problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.1k
25.4k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 582
39.1k
| num_tokens
int64 271
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_14930 | rasdani/github-patches | git_diff | mirumee__ariadne-228 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Async context_value callable is not supported
Our ASGI GraphQL app supports callable `context_value`, but it doesnt support `async` callable, so it's impossible to do any async work in it like get user form database. 😂
Example:
```python
async def resolve_context(request: Request) -> Dict:
return {"user": await get_user_for_request(request)}
graphql_app = GraphQL(schema, context_value=resolve_context)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ariadne/asgi.py`
Content:
```
1 import asyncio
2 import json
3 from typing import (
4 Any,
5 AsyncGenerator,
6 Callable,
7 Dict,
8 List,
9 Optional,
10 Union,
11 Type,
12 cast,
13 )
14
15 from graphql import GraphQLError, GraphQLSchema
16 from graphql.execution import MiddlewareManager
17 from starlette.datastructures import UploadFile
18 from starlette.requests import Request
19 from starlette.responses import HTMLResponse, JSONResponse, PlainTextResponse, Response
20 from starlette.types import Receive, Scope, Send
21 from starlette.websockets import WebSocket, WebSocketState, WebSocketDisconnect
22
23 from .constants import DATA_TYPE_JSON, DATA_TYPE_MULTIPART, PLAYGROUND_HTML
24 from .exceptions import HttpBadRequestError, HttpError
25 from .file_uploads import combine_multipart_data
26 from .format_error import format_error
27 from .graphql import graphql, subscribe
28 from .logger import log_error
29 from .types import ContextValue, ErrorFormatter, Extension, RootValue
30
31 GQL_CONNECTION_INIT = "connection_init" # Client -> Server
32 GQL_CONNECTION_ACK = "connection_ack" # Server -> Client
33 GQL_CONNECTION_ERROR = "connection_error" # Server -> Client
34
35 # NOTE: The keep alive message type does not follow the standard due to connection optimizations
36 GQL_CONNECTION_KEEP_ALIVE = "ka" # Server -> Client
37
38 GQL_CONNECTION_TERMINATE = "connection_terminate" # Client -> Server
39 GQL_START = "start" # Client -> Server
40 GQL_DATA = "data" # Server -> Client
41 GQL_ERROR = "error" # Server -> Client
42 GQL_COMPLETE = "complete" # Server -> Client
43 GQL_STOP = "stop" # Client -> Server
44
45 ExtensionList = Optional[List[Type[Extension]]]
46
47
48 class GraphQL:
49 def __init__(
50 self,
51 schema: GraphQLSchema,
52 *,
53 context_value: Optional[ContextValue] = None,
54 root_value: Optional[RootValue] = None,
55 debug: bool = False,
56 logger: Optional[str] = None,
57 error_formatter: ErrorFormatter = format_error,
58 extensions: Union[Callable[[Any], ExtensionList], ExtensionList] = None,
59 middleware: Optional[MiddlewareManager] = None,
60 keepalive: float = None,
61 ):
62 self.context_value = context_value
63 self.root_value = root_value
64 self.debug = debug
65 self.logger = logger
66 self.error_formatter = error_formatter
67 self.extensions = extensions
68 self.middleware = middleware
69 self.keepalive = keepalive
70 self.schema = schema
71
72 async def __call__(self, scope: Scope, receive: Receive, send: Send):
73 if scope["type"] == "http":
74 await self.handle_http(scope=scope, receive=receive, send=send)
75 elif scope["type"] == "websocket":
76 await self.handle_websocket(scope=scope, receive=receive, send=send)
77 else:
78 raise ValueError("Unknown scope type: %r" % (scope["type"],))
79
80 async def get_context_for_request(self, request: Any) -> Any:
81 if callable(self.context_value):
82 return self.context_value(request)
83 return self.context_value or {"request": request}
84
85 async def get_extensions_for_request(self, request: Any) -> ExtensionList:
86 if callable(self.extensions):
87 return self.extensions(request)
88 return self.extensions
89
90 async def handle_http(self, scope: Scope, receive: Receive, send: Send):
91 request = Request(scope=scope, receive=receive)
92 if request.method == "GET":
93 response = await self.render_playground(request)
94 elif request.method == "POST":
95 response = await self.graphql_http_server(request)
96 else:
97 response = Response(status_code=405)
98 await response(scope, receive, send)
99
100 async def handle_websocket(self, scope: Scope, receive: Receive, send: Send):
101 websocket = WebSocket(scope=scope, receive=receive, send=send)
102 await self.websocket_server(websocket)
103
104 async def render_playground( # pylint: disable=unused-argument
105 self, request: Request
106 ) -> Response:
107 return HTMLResponse(PLAYGROUND_HTML)
108
109 async def graphql_http_server(self, request: Request) -> Response:
110 try:
111 data = await self.extract_data_from_request(request)
112 except HttpError as error:
113 return PlainTextResponse(error.message or error.status, status_code=400)
114
115 context_value = await self.get_context_for_request(request)
116 extensions = await self.get_extensions_for_request(request)
117
118 success, response = await graphql(
119 self.schema,
120 data,
121 context_value=context_value,
122 root_value=self.root_value,
123 debug=self.debug,
124 logger=self.logger,
125 error_formatter=self.error_formatter,
126 extensions=extensions,
127 )
128 status_code = 200 if success else 400
129 return JSONResponse(response, status_code=status_code)
130
131 async def extract_data_from_request(self, request: Request):
132 content_type = request.headers.get("Content-Type", "")
133 content_type = content_type.split(";")[0]
134
135 if content_type == DATA_TYPE_JSON:
136 return await self.extract_data_from_json_request(request)
137 if content_type == DATA_TYPE_MULTIPART:
138 return await self.extract_data_from_multipart_request(request)
139
140 raise HttpBadRequestError(
141 "Posted content must be of type {} or {}".format(
142 DATA_TYPE_JSON, DATA_TYPE_MULTIPART
143 )
144 )
145
146 async def extract_data_from_json_request(self, request: Request):
147 try:
148 return await request.json()
149 except (TypeError, ValueError):
150 raise HttpBadRequestError("Request body is not a valid JSON")
151
152 async def extract_data_from_multipart_request(self, request: Request):
153 try:
154 request_body = await request.form()
155 except ValueError:
156 raise HttpBadRequestError("Request body is not a valid multipart/form-data")
157
158 try:
159 operations = json.loads(request_body.get("operations"))
160 except (TypeError, ValueError):
161 raise HttpBadRequestError(
162 "Request 'operations' multipart field is not a valid JSON"
163 )
164 try:
165 files_map = json.loads(request_body.get("map"))
166 except (TypeError, ValueError):
167 raise HttpBadRequestError(
168 "Request 'map' multipart field is not a valid JSON"
169 )
170
171 request_files = {
172 key: value
173 for key, value in request_body.items()
174 if isinstance(value, UploadFile)
175 }
176
177 return combine_multipart_data(operations, files_map, request_files)
178
179 async def websocket_server(self, websocket: WebSocket) -> None:
180 subscriptions: Dict[str, AsyncGenerator] = {}
181 await websocket.accept("graphql-ws")
182 try:
183 while (
184 websocket.client_state != WebSocketState.DISCONNECTED
185 and websocket.application_state != WebSocketState.DISCONNECTED
186 ):
187 message = await websocket.receive_json()
188 await self.handle_websocket_message(message, websocket, subscriptions)
189 except WebSocketDisconnect:
190 pass
191 finally:
192 for operation_id in subscriptions:
193 await subscriptions[operation_id].aclose()
194
195 async def handle_websocket_message(
196 self,
197 message: dict,
198 websocket: WebSocket,
199 subscriptions: Dict[str, AsyncGenerator],
200 ):
201 operation_id = cast(str, message.get("id"))
202 message_type = cast(str, message.get("type"))
203
204 if message_type == GQL_CONNECTION_INIT:
205 await websocket.send_json({"type": GQL_CONNECTION_ACK})
206 asyncio.ensure_future(self.keep_websocket_alive(websocket))
207 elif message_type == GQL_CONNECTION_TERMINATE:
208 await websocket.close()
209 elif message_type == GQL_START:
210 await self.start_websocket_subscription(
211 message.get("payload"), operation_id, websocket, subscriptions
212 )
213 elif message_type == GQL_STOP:
214 if operation_id in subscriptions:
215 await subscriptions[operation_id].aclose()
216 del subscriptions[operation_id]
217
218 async def keep_websocket_alive(self, websocket: WebSocket):
219 if not self.keepalive:
220 return
221 while websocket.application_state != WebSocketState.DISCONNECTED:
222 try:
223 await websocket.send_json({"type": GQL_CONNECTION_KEEP_ALIVE})
224 except WebSocketDisconnect:
225 return
226 await asyncio.sleep(self.keepalive)
227
228 async def start_websocket_subscription(
229 self,
230 data: Any,
231 operation_id: str,
232 websocket: WebSocket,
233 subscriptions: Dict[str, AsyncGenerator],
234 ):
235 context_value = await self.get_context_for_request(websocket)
236 success, results = await subscribe(
237 self.schema,
238 data,
239 context_value=context_value,
240 root_value=self.root_value,
241 debug=self.debug,
242 logger=self.logger,
243 error_formatter=self.error_formatter,
244 )
245 if not success:
246 results = cast(List[dict], results)
247 await websocket.send_json(
248 {"type": GQL_ERROR, "id": operation_id, "payload": results[0]}
249 )
250 else:
251 results = cast(AsyncGenerator, results)
252 subscriptions[operation_id] = results
253 asyncio.ensure_future(
254 self.observe_async_results(results, operation_id, websocket)
255 )
256
257 async def observe_async_results(
258 self, results: AsyncGenerator, operation_id: str, websocket: WebSocket
259 ) -> None:
260 try:
261 async for result in results:
262 payload = {}
263 if result.data:
264 payload["data"] = result.data
265 if result.errors:
266 for error in result.errors:
267 log_error(error, self.logger)
268 payload["errors"] = [
269 self.error_formatter(error, self.debug)
270 for error in result.errors
271 ]
272 await websocket.send_json(
273 {"type": GQL_DATA, "id": operation_id, "payload": payload}
274 )
275 except Exception as error:
276 if not isinstance(error, GraphQLError):
277 error = GraphQLError(str(error), original_error=error)
278 log_error(error, self.logger)
279 payload = {"errors": [self.error_formatter(error, self.debug)]}
280 await websocket.send_json(
281 {"type": GQL_DATA, "id": operation_id, "payload": payload}
282 )
283
284 if (
285 websocket.client_state != WebSocketState.DISCONNECTED
286 and websocket.application_state != WebSocketState.DISCONNECTED
287 ):
288 await websocket.send_json({"type": GQL_COMPLETE, "id": operation_id})
289
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ariadne/asgi.py b/ariadne/asgi.py
--- a/ariadne/asgi.py
+++ b/ariadne/asgi.py
@@ -1,5 +1,6 @@
import asyncio
import json
+from inspect import isawaitable
from typing import (
Any,
AsyncGenerator,
@@ -79,7 +80,11 @@
async def get_context_for_request(self, request: Any) -> Any:
if callable(self.context_value):
- return self.context_value(request)
+ context = self.context_value(request)
+ if isawaitable(context):
+ context = await context
+ return context
+
return self.context_value or {"request": request}
async def get_extensions_for_request(self, request: Any) -> ExtensionList:
| {"golden_diff": "diff --git a/ariadne/asgi.py b/ariadne/asgi.py\n--- a/ariadne/asgi.py\n+++ b/ariadne/asgi.py\n@@ -1,5 +1,6 @@\n import asyncio\n import json\n+from inspect import isawaitable\n from typing import (\n Any,\n AsyncGenerator,\n@@ -79,7 +80,11 @@\n \n async def get_context_for_request(self, request: Any) -> Any:\n if callable(self.context_value):\n- return self.context_value(request)\n+ context = self.context_value(request)\n+ if isawaitable(context):\n+ context = await context\n+ return context\n+\n return self.context_value or {\"request\": request}\n \n async def get_extensions_for_request(self, request: Any) -> ExtensionList:\n", "issue": "Async context_value callable is not supported\nOur ASGI GraphQL app supports callable `context_value`, but it doesnt support `async` callable, so it's impossible to do any async work in it like get user form database. \ud83d\ude02\r\n\r\nExample:\r\n\r\n```python\r\nasync def resolve_context(request: Request) -> Dict:\r\n return {\"user\": await get_user_for_request(request)}\r\n\r\n\r\ngraphql_app = GraphQL(schema, context_value=resolve_context)\r\n```\n", "before_files": [{"content": "import asyncio\nimport json\nfrom typing import (\n Any,\n AsyncGenerator,\n Callable,\n Dict,\n List,\n Optional,\n Union,\n Type,\n cast,\n)\n\nfrom graphql import GraphQLError, GraphQLSchema\nfrom graphql.execution import MiddlewareManager\nfrom starlette.datastructures import UploadFile\nfrom starlette.requests import Request\nfrom starlette.responses import HTMLResponse, JSONResponse, PlainTextResponse, Response\nfrom starlette.types import Receive, Scope, Send\nfrom starlette.websockets import WebSocket, WebSocketState, WebSocketDisconnect\n\nfrom .constants import DATA_TYPE_JSON, DATA_TYPE_MULTIPART, PLAYGROUND_HTML\nfrom .exceptions import HttpBadRequestError, HttpError\nfrom .file_uploads import combine_multipart_data\nfrom .format_error import format_error\nfrom .graphql import graphql, subscribe\nfrom .logger import log_error\nfrom .types import ContextValue, ErrorFormatter, Extension, RootValue\n\nGQL_CONNECTION_INIT = \"connection_init\" # Client -> Server\nGQL_CONNECTION_ACK = \"connection_ack\" # Server -> Client\nGQL_CONNECTION_ERROR = \"connection_error\" # Server -> Client\n\n# NOTE: The keep alive message type does not follow the standard due to connection optimizations\nGQL_CONNECTION_KEEP_ALIVE = \"ka\" # Server -> Client\n\nGQL_CONNECTION_TERMINATE = \"connection_terminate\" # Client -> Server\nGQL_START = \"start\" # Client -> Server\nGQL_DATA = \"data\" # Server -> Client\nGQL_ERROR = \"error\" # Server -> Client\nGQL_COMPLETE = \"complete\" # Server -> Client\nGQL_STOP = \"stop\" # Client -> Server\n\nExtensionList = Optional[List[Type[Extension]]]\n\n\nclass GraphQL:\n def __init__(\n self,\n schema: GraphQLSchema,\n *,\n context_value: Optional[ContextValue] = None,\n root_value: Optional[RootValue] = None,\n debug: bool = False,\n logger: Optional[str] = None,\n error_formatter: ErrorFormatter = format_error,\n extensions: Union[Callable[[Any], ExtensionList], ExtensionList] = None,\n middleware: Optional[MiddlewareManager] = None,\n keepalive: float = None,\n ):\n self.context_value = context_value\n self.root_value = root_value\n self.debug = debug\n self.logger = logger\n self.error_formatter = error_formatter\n self.extensions = extensions\n self.middleware = middleware\n self.keepalive = keepalive\n self.schema = schema\n\n async def __call__(self, scope: Scope, receive: Receive, send: Send):\n if scope[\"type\"] == \"http\":\n await self.handle_http(scope=scope, receive=receive, send=send)\n elif scope[\"type\"] == \"websocket\":\n await self.handle_websocket(scope=scope, receive=receive, send=send)\n else:\n raise ValueError(\"Unknown scope type: %r\" % (scope[\"type\"],))\n\n async def get_context_for_request(self, request: Any) -> Any:\n if callable(self.context_value):\n return self.context_value(request)\n return self.context_value or {\"request\": request}\n\n async def get_extensions_for_request(self, request: Any) -> ExtensionList:\n if callable(self.extensions):\n return self.extensions(request)\n return self.extensions\n\n async def handle_http(self, scope: Scope, receive: Receive, send: Send):\n request = Request(scope=scope, receive=receive)\n if request.method == \"GET\":\n response = await self.render_playground(request)\n elif request.method == \"POST\":\n response = await self.graphql_http_server(request)\n else:\n response = Response(status_code=405)\n await response(scope, receive, send)\n\n async def handle_websocket(self, scope: Scope, receive: Receive, send: Send):\n websocket = WebSocket(scope=scope, receive=receive, send=send)\n await self.websocket_server(websocket)\n\n async def render_playground( # pylint: disable=unused-argument\n self, request: Request\n ) -> Response:\n return HTMLResponse(PLAYGROUND_HTML)\n\n async def graphql_http_server(self, request: Request) -> Response:\n try:\n data = await self.extract_data_from_request(request)\n except HttpError as error:\n return PlainTextResponse(error.message or error.status, status_code=400)\n\n context_value = await self.get_context_for_request(request)\n extensions = await self.get_extensions_for_request(request)\n\n success, response = await graphql(\n self.schema,\n data,\n context_value=context_value,\n root_value=self.root_value,\n debug=self.debug,\n logger=self.logger,\n error_formatter=self.error_formatter,\n extensions=extensions,\n )\n status_code = 200 if success else 400\n return JSONResponse(response, status_code=status_code)\n\n async def extract_data_from_request(self, request: Request):\n content_type = request.headers.get(\"Content-Type\", \"\")\n content_type = content_type.split(\";\")[0]\n\n if content_type == DATA_TYPE_JSON:\n return await self.extract_data_from_json_request(request)\n if content_type == DATA_TYPE_MULTIPART:\n return await self.extract_data_from_multipart_request(request)\n\n raise HttpBadRequestError(\n \"Posted content must be of type {} or {}\".format(\n DATA_TYPE_JSON, DATA_TYPE_MULTIPART\n )\n )\n\n async def extract_data_from_json_request(self, request: Request):\n try:\n return await request.json()\n except (TypeError, ValueError):\n raise HttpBadRequestError(\"Request body is not a valid JSON\")\n\n async def extract_data_from_multipart_request(self, request: Request):\n try:\n request_body = await request.form()\n except ValueError:\n raise HttpBadRequestError(\"Request body is not a valid multipart/form-data\")\n\n try:\n operations = json.loads(request_body.get(\"operations\"))\n except (TypeError, ValueError):\n raise HttpBadRequestError(\n \"Request 'operations' multipart field is not a valid JSON\"\n )\n try:\n files_map = json.loads(request_body.get(\"map\"))\n except (TypeError, ValueError):\n raise HttpBadRequestError(\n \"Request 'map' multipart field is not a valid JSON\"\n )\n\n request_files = {\n key: value\n for key, value in request_body.items()\n if isinstance(value, UploadFile)\n }\n\n return combine_multipart_data(operations, files_map, request_files)\n\n async def websocket_server(self, websocket: WebSocket) -> None:\n subscriptions: Dict[str, AsyncGenerator] = {}\n await websocket.accept(\"graphql-ws\")\n try:\n while (\n websocket.client_state != WebSocketState.DISCONNECTED\n and websocket.application_state != WebSocketState.DISCONNECTED\n ):\n message = await websocket.receive_json()\n await self.handle_websocket_message(message, websocket, subscriptions)\n except WebSocketDisconnect:\n pass\n finally:\n for operation_id in subscriptions:\n await subscriptions[operation_id].aclose()\n\n async def handle_websocket_message(\n self,\n message: dict,\n websocket: WebSocket,\n subscriptions: Dict[str, AsyncGenerator],\n ):\n operation_id = cast(str, message.get(\"id\"))\n message_type = cast(str, message.get(\"type\"))\n\n if message_type == GQL_CONNECTION_INIT:\n await websocket.send_json({\"type\": GQL_CONNECTION_ACK})\n asyncio.ensure_future(self.keep_websocket_alive(websocket))\n elif message_type == GQL_CONNECTION_TERMINATE:\n await websocket.close()\n elif message_type == GQL_START:\n await self.start_websocket_subscription(\n message.get(\"payload\"), operation_id, websocket, subscriptions\n )\n elif message_type == GQL_STOP:\n if operation_id in subscriptions:\n await subscriptions[operation_id].aclose()\n del subscriptions[operation_id]\n\n async def keep_websocket_alive(self, websocket: WebSocket):\n if not self.keepalive:\n return\n while websocket.application_state != WebSocketState.DISCONNECTED:\n try:\n await websocket.send_json({\"type\": GQL_CONNECTION_KEEP_ALIVE})\n except WebSocketDisconnect:\n return\n await asyncio.sleep(self.keepalive)\n\n async def start_websocket_subscription(\n self,\n data: Any,\n operation_id: str,\n websocket: WebSocket,\n subscriptions: Dict[str, AsyncGenerator],\n ):\n context_value = await self.get_context_for_request(websocket)\n success, results = await subscribe(\n self.schema,\n data,\n context_value=context_value,\n root_value=self.root_value,\n debug=self.debug,\n logger=self.logger,\n error_formatter=self.error_formatter,\n )\n if not success:\n results = cast(List[dict], results)\n await websocket.send_json(\n {\"type\": GQL_ERROR, \"id\": operation_id, \"payload\": results[0]}\n )\n else:\n results = cast(AsyncGenerator, results)\n subscriptions[operation_id] = results\n asyncio.ensure_future(\n self.observe_async_results(results, operation_id, websocket)\n )\n\n async def observe_async_results(\n self, results: AsyncGenerator, operation_id: str, websocket: WebSocket\n ) -> None:\n try:\n async for result in results:\n payload = {}\n if result.data:\n payload[\"data\"] = result.data\n if result.errors:\n for error in result.errors:\n log_error(error, self.logger)\n payload[\"errors\"] = [\n self.error_formatter(error, self.debug)\n for error in result.errors\n ]\n await websocket.send_json(\n {\"type\": GQL_DATA, \"id\": operation_id, \"payload\": payload}\n )\n except Exception as error:\n if not isinstance(error, GraphQLError):\n error = GraphQLError(str(error), original_error=error)\n log_error(error, self.logger)\n payload = {\"errors\": [self.error_formatter(error, self.debug)]}\n await websocket.send_json(\n {\"type\": GQL_DATA, \"id\": operation_id, \"payload\": payload}\n )\n\n if (\n websocket.client_state != WebSocketState.DISCONNECTED\n and websocket.application_state != WebSocketState.DISCONNECTED\n ):\n await websocket.send_json({\"type\": GQL_COMPLETE, \"id\": operation_id})\n", "path": "ariadne/asgi.py"}], "after_files": [{"content": "import asyncio\nimport json\nfrom inspect import isawaitable\nfrom typing import (\n Any,\n AsyncGenerator,\n Callable,\n Dict,\n List,\n Optional,\n Union,\n Type,\n cast,\n)\n\nfrom graphql import GraphQLError, GraphQLSchema\nfrom graphql.execution import MiddlewareManager\nfrom starlette.datastructures import UploadFile\nfrom starlette.requests import Request\nfrom starlette.responses import HTMLResponse, JSONResponse, PlainTextResponse, Response\nfrom starlette.types import Receive, Scope, Send\nfrom starlette.websockets import WebSocket, WebSocketState, WebSocketDisconnect\n\nfrom .constants import DATA_TYPE_JSON, DATA_TYPE_MULTIPART, PLAYGROUND_HTML\nfrom .exceptions import HttpBadRequestError, HttpError\nfrom .file_uploads import combine_multipart_data\nfrom .format_error import format_error\nfrom .graphql import graphql, subscribe\nfrom .logger import log_error\nfrom .types import ContextValue, ErrorFormatter, Extension, RootValue\n\nGQL_CONNECTION_INIT = \"connection_init\" # Client -> Server\nGQL_CONNECTION_ACK = \"connection_ack\" # Server -> Client\nGQL_CONNECTION_ERROR = \"connection_error\" # Server -> Client\n\n# NOTE: The keep alive message type does not follow the standard due to connection optimizations\nGQL_CONNECTION_KEEP_ALIVE = \"ka\" # Server -> Client\n\nGQL_CONNECTION_TERMINATE = \"connection_terminate\" # Client -> Server\nGQL_START = \"start\" # Client -> Server\nGQL_DATA = \"data\" # Server -> Client\nGQL_ERROR = \"error\" # Server -> Client\nGQL_COMPLETE = \"complete\" # Server -> Client\nGQL_STOP = \"stop\" # Client -> Server\n\nExtensionList = Optional[List[Type[Extension]]]\n\n\nclass GraphQL:\n def __init__(\n self,\n schema: GraphQLSchema,\n *,\n context_value: Optional[ContextValue] = None,\n root_value: Optional[RootValue] = None,\n debug: bool = False,\n logger: Optional[str] = None,\n error_formatter: ErrorFormatter = format_error,\n extensions: Union[Callable[[Any], ExtensionList], ExtensionList] = None,\n middleware: Optional[MiddlewareManager] = None,\n keepalive: float = None,\n ):\n self.context_value = context_value\n self.root_value = root_value\n self.debug = debug\n self.logger = logger\n self.error_formatter = error_formatter\n self.extensions = extensions\n self.middleware = middleware\n self.keepalive = keepalive\n self.schema = schema\n\n async def __call__(self, scope: Scope, receive: Receive, send: Send):\n if scope[\"type\"] == \"http\":\n await self.handle_http(scope=scope, receive=receive, send=send)\n elif scope[\"type\"] == \"websocket\":\n await self.handle_websocket(scope=scope, receive=receive, send=send)\n else:\n raise ValueError(\"Unknown scope type: %r\" % (scope[\"type\"],))\n\n async def get_context_for_request(self, request: Any) -> Any:\n if callable(self.context_value):\n context = self.context_value(request)\n if isawaitable(context):\n context = await context\n return context\n\n return self.context_value or {\"request\": request}\n\n async def get_extensions_for_request(self, request: Any) -> ExtensionList:\n if callable(self.extensions):\n return self.extensions(request)\n return self.extensions\n\n async def handle_http(self, scope: Scope, receive: Receive, send: Send):\n request = Request(scope=scope, receive=receive)\n if request.method == \"GET\":\n response = await self.render_playground(request)\n elif request.method == \"POST\":\n response = await self.graphql_http_server(request)\n else:\n response = Response(status_code=405)\n await response(scope, receive, send)\n\n async def handle_websocket(self, scope: Scope, receive: Receive, send: Send):\n websocket = WebSocket(scope=scope, receive=receive, send=send)\n await self.websocket_server(websocket)\n\n async def render_playground( # pylint: disable=unused-argument\n self, request: Request\n ) -> Response:\n return HTMLResponse(PLAYGROUND_HTML)\n\n async def graphql_http_server(self, request: Request) -> Response:\n try:\n data = await self.extract_data_from_request(request)\n except HttpError as error:\n return PlainTextResponse(error.message or error.status, status_code=400)\n\n context_value = await self.get_context_for_request(request)\n extensions = await self.get_extensions_for_request(request)\n\n success, response = await graphql(\n self.schema,\n data,\n context_value=context_value,\n root_value=self.root_value,\n debug=self.debug,\n logger=self.logger,\n error_formatter=self.error_formatter,\n extensions=extensions,\n )\n status_code = 200 if success else 400\n return JSONResponse(response, status_code=status_code)\n\n async def extract_data_from_request(self, request: Request):\n content_type = request.headers.get(\"Content-Type\", \"\")\n content_type = content_type.split(\";\")[0]\n\n if content_type == DATA_TYPE_JSON:\n return await self.extract_data_from_json_request(request)\n if content_type == DATA_TYPE_MULTIPART:\n return await self.extract_data_from_multipart_request(request)\n\n raise HttpBadRequestError(\n \"Posted content must be of type {} or {}\".format(\n DATA_TYPE_JSON, DATA_TYPE_MULTIPART\n )\n )\n\n async def extract_data_from_json_request(self, request: Request):\n try:\n return await request.json()\n except (TypeError, ValueError):\n raise HttpBadRequestError(\"Request body is not a valid JSON\")\n\n async def extract_data_from_multipart_request(self, request: Request):\n try:\n request_body = await request.form()\n except ValueError:\n raise HttpBadRequestError(\"Request body is not a valid multipart/form-data\")\n\n try:\n operations = json.loads(request_body.get(\"operations\"))\n except (TypeError, ValueError):\n raise HttpBadRequestError(\n \"Request 'operations' multipart field is not a valid JSON\"\n )\n try:\n files_map = json.loads(request_body.get(\"map\"))\n except (TypeError, ValueError):\n raise HttpBadRequestError(\n \"Request 'map' multipart field is not a valid JSON\"\n )\n\n request_files = {\n key: value\n for key, value in request_body.items()\n if isinstance(value, UploadFile)\n }\n\n return combine_multipart_data(operations, files_map, request_files)\n\n async def websocket_server(self, websocket: WebSocket) -> None:\n subscriptions: Dict[str, AsyncGenerator] = {}\n await websocket.accept(\"graphql-ws\")\n try:\n while (\n websocket.client_state != WebSocketState.DISCONNECTED\n and websocket.application_state != WebSocketState.DISCONNECTED\n ):\n message = await websocket.receive_json()\n await self.handle_websocket_message(message, websocket, subscriptions)\n except WebSocketDisconnect:\n pass\n finally:\n for operation_id in subscriptions:\n await subscriptions[operation_id].aclose()\n\n async def handle_websocket_message(\n self,\n message: dict,\n websocket: WebSocket,\n subscriptions: Dict[str, AsyncGenerator],\n ):\n operation_id = cast(str, message.get(\"id\"))\n message_type = cast(str, message.get(\"type\"))\n\n if message_type == GQL_CONNECTION_INIT:\n await websocket.send_json({\"type\": GQL_CONNECTION_ACK})\n asyncio.ensure_future(self.keep_websocket_alive(websocket))\n elif message_type == GQL_CONNECTION_TERMINATE:\n await websocket.close()\n elif message_type == GQL_START:\n await self.start_websocket_subscription(\n message.get(\"payload\"), operation_id, websocket, subscriptions\n )\n elif message_type == GQL_STOP:\n if operation_id in subscriptions:\n await subscriptions[operation_id].aclose()\n del subscriptions[operation_id]\n\n async def keep_websocket_alive(self, websocket: WebSocket):\n if not self.keepalive:\n return\n while websocket.application_state != WebSocketState.DISCONNECTED:\n try:\n await websocket.send_json({\"type\": GQL_CONNECTION_KEEP_ALIVE})\n except WebSocketDisconnect:\n return\n await asyncio.sleep(self.keepalive)\n\n async def start_websocket_subscription(\n self,\n data: Any,\n operation_id: str,\n websocket: WebSocket,\n subscriptions: Dict[str, AsyncGenerator],\n ):\n context_value = await self.get_context_for_request(websocket)\n success, results = await subscribe(\n self.schema,\n data,\n context_value=context_value,\n root_value=self.root_value,\n debug=self.debug,\n logger=self.logger,\n error_formatter=self.error_formatter,\n )\n if not success:\n results = cast(List[dict], results)\n await websocket.send_json(\n {\"type\": GQL_ERROR, \"id\": operation_id, \"payload\": results[0]}\n )\n else:\n results = cast(AsyncGenerator, results)\n subscriptions[operation_id] = results\n asyncio.ensure_future(\n self.observe_async_results(results, operation_id, websocket)\n )\n\n async def observe_async_results(\n self, results: AsyncGenerator, operation_id: str, websocket: WebSocket\n ) -> None:\n try:\n async for result in results:\n payload = {}\n if result.data:\n payload[\"data\"] = result.data\n if result.errors:\n for error in result.errors:\n log_error(error, self.logger)\n payload[\"errors\"] = [\n self.error_formatter(error, self.debug)\n for error in result.errors\n ]\n await websocket.send_json(\n {\"type\": GQL_DATA, \"id\": operation_id, \"payload\": payload}\n )\n except Exception as error:\n if not isinstance(error, GraphQLError):\n error = GraphQLError(str(error), original_error=error)\n log_error(error, self.logger)\n payload = {\"errors\": [self.error_formatter(error, self.debug)]}\n await websocket.send_json(\n {\"type\": GQL_DATA, \"id\": operation_id, \"payload\": payload}\n )\n\n if (\n websocket.client_state != WebSocketState.DISCONNECTED\n and websocket.application_state != WebSocketState.DISCONNECTED\n ):\n await websocket.send_json({\"type\": GQL_COMPLETE, \"id\": operation_id})\n", "path": "ariadne/asgi.py"}]} | 3,305 | 179 |
gh_patches_debug_35819 | rasdani/github-patches | git_diff | apluslms__a-plus-1105 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
deviations: columns Granter and Grant time are missing info
Kun monta saman henkilön poikkeamaa on "taiteltu plussan taakse", ei Granter- ja Grant time -sarakkeissa lue mitään edes siinä tapauksessa, että aika ja myöntäjä olisivat kaikilla piilotetuilla riveillä samat.
Jos tiedot ovat kaikille saman plussan takana olevilla identtiset, voisivat ne näkyä.
(e.g. https://plus.cs.aalto.fi/o1/2021/teachers/deadline-deviations/)
--------------------------------------
In English
> It is extremely common that a student gets a deadline extension for multiple exercises at once. In such cases, the DL extensions table folds the exercises into one entry so that the view doesn’t get cluttered. That’s great, but it would be nice if the metadata — who granted the extension? when? — was shown for the folded items as well (assuming it’s one person and a single time, which should almost always be the case). This does not currently happen. The columns on the right are blank here:
https://github.com/apluslms/a-plus/issues/1121#issue-1517628848
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `deviations/viewbase.py`
Content:
```
1 from itertools import groupby
2 from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Type
3
4 from django.db import models
5 from django.http import HttpRequest, HttpResponse
6 from django.contrib import messages
7 from django import forms
8 from django.utils.text import format_lazy
9 from django.utils.translation import gettext_lazy as _, ngettext
10
11 from course.models import CourseModule, UserTag
12 from course.viewbase import CourseInstanceMixin, CourseInstanceBaseView
13 from deviations.models import SubmissionRuleDeviation
14 from lib.helpers import is_ajax
15 from lib.viewbase import BaseFormView, BaseRedirectView
16 from authorization.permissions import ACCESS
17 from exercise.models import BaseExercise
18 from userprofile.models import UserProfile
19
20
21 class ListDeviationsView(CourseInstanceBaseView):
22 access_mode = ACCESS.TEACHER
23 deviation_model: Type[SubmissionRuleDeviation]
24
25 def get_common_objects(self) -> None:
26 super().get_common_objects()
27 all_deviations = self.deviation_model.objects.filter(
28 exercise__course_module__course_instance=self.instance
29 )
30 self.deviation_groups = get_deviation_groups(all_deviations)
31 self.note("deviation_groups")
32
33
34 class AddDeviationsView(CourseInstanceMixin, BaseFormView):
35 access_mode = ACCESS.TEACHER
36 deviation_model: Type[SubmissionRuleDeviation]
37 session_key: str
38
39 def get_form_kwargs(self) -> Dict[str, Any]:
40 kwargs = super().get_form_kwargs()
41 kwargs["instance"] = self.instance
42 return kwargs
43
44 def get_initial_get_param_spec(self) -> Dict[str, Optional[Callable[[str], Any]]]:
45 def list_arg(arg):
46 return arg.split(",")
47
48 spec = super().get_initial_get_param_spec()
49 spec.update({
50 "module": list_arg,
51 "exercise": list_arg,
52 "submitter": list_arg,
53 "submitter_tag": list_arg,
54 })
55 return spec
56
57 def form_valid(self, form: forms.BaseForm) -> HttpResponse:
58 exercises = get_exercises(form.cleaned_data)
59 submitters = get_submitters(form.cleaned_data)
60 existing_deviations = self.deviation_model.objects.filter(
61 exercise__in=exercises,
62 submitter__in=submitters,
63 )
64
65 if existing_deviations:
66 # Some deviations already existed. Use OverrideDeviationsView to
67 # confirm which ones the user wants to override. Store the form
68 # values in the current session, so they can be used afterwards.
69 self.success_url = self.deviation_model.get_override_url(self.instance)
70 self.request.session[self.session_key] = self.serialize_session_data(form.cleaned_data)
71 else:
72 self.success_url = self.deviation_model.get_list_url(self.instance)
73 for exercise in exercises:
74 for submitter in submitters:
75 new_deviation = self.deviation_model(
76 exercise=exercise,
77 submitter=submitter,
78 granter=self.request.user.userprofile,
79 )
80 new_deviation.update_by_form(form.cleaned_data)
81 new_deviation.save()
82
83 return super().form_valid(form)
84
85 def serialize_session_data(self, form_data: Dict[str, Any]) -> Dict[str, Any]:
86 """
87 Convert input form data into serializable values that can be stored in
88 the session cache.
89 """
90 result = {}
91 for key in ('exercise', 'module', 'submitter', 'submitter_tag'):
92 result[key] = [i.id for i in form_data.get(key, [])]
93 return result
94
95
96 class OverrideDeviationsView(CourseInstanceMixin, BaseFormView):
97 access_mode = ACCESS.TEACHER
98 # form_class is not really used, but it is required by the FormView.
99 # The form contains only checkboxes and the user input is validated in
100 # the form_valid method. The form HTML is manually written in the template.
101 form_class = forms.Form
102 deviation_model: Type[SubmissionRuleDeviation]
103 session_key: str
104
105 def get_success_url(self) -> str:
106 return self.deviation_model.get_list_url(self.instance)
107
108 def get_common_objects(self) -> None:
109 super().get_common_objects()
110 self.session_data = self.deserialize_session_data(self.request.session[self.session_key])
111 self.exercises = get_exercises(self.session_data)
112 self.submitters = get_submitters(self.session_data)
113 self.existing_deviations = self.deviation_model.objects.filter(
114 exercise__in=self.exercises,
115 submitter__in=self.submitters,
116 )
117 self.deviation_groups = get_deviation_groups(self.existing_deviations)
118 self.note("session_data", "exercises", "submitters", "existing_deviations", "deviation_groups")
119
120 def form_valid(self, form: forms.BaseForm) -> HttpResponse:
121 override_deviations = set()
122 deviation_list = self.request.POST.getlist('override')
123 for id_pair in deviation_list:
124 try:
125 submitter_id, exercise_id = id_pair.split('.')
126 submitter_id, exercise_id = int(submitter_id), int(exercise_id)
127 override_deviations.add((submitter_id, exercise_id))
128 except ValueError:
129 messages.error(self.request,
130 format_lazy(
131 _("INVALID_EXERCISE_OR_SUBMITTER_ID -- {id}"),
132 id=id_pair,
133 )
134 )
135 continue
136
137 existing_deviations = {(d.submitter_id, d.exercise_id): d for d in self.existing_deviations}
138
139 for exercise in self.exercises:
140 for submitter in self.submitters:
141 existing_deviation = existing_deviations.get((submitter.id, exercise.id))
142 if existing_deviation is not None:
143 if (submitter.id, exercise.id) in override_deviations:
144 existing_deviation.granter = self.request.user.userprofile
145 existing_deviation.update_by_form(self.session_data)
146 existing_deviation.save()
147 else:
148 new_deviation = self.deviation_model(
149 exercise=exercise,
150 submitter=submitter,
151 granter=self.request.user.userprofile,
152 )
153 new_deviation.update_by_form(self.session_data)
154 new_deviation.save()
155
156 del self.request.session[self.session_key]
157 return super().form_valid(form)
158
159 def deserialize_session_data(self, session_data: Dict[str, Any]) -> Dict[str, Any]:
160 """
161 Convert serialized session data back into its original representation.
162 """
163 result = {
164 'exercise': BaseExercise.objects.filter(id__in=session_data.get('exercise', [])),
165 'module': CourseModule.objects.filter(id__in=session_data.get('module', [])),
166 'submitter': UserProfile.objects.filter(id__in=session_data.get('submitter', [])),
167 'submitter_tag': UserTag.objects.filter(id__in=session_data.get('submitter_tag', [])),
168 }
169 return result
170
171
172 class RemoveDeviationsByIDView(CourseInstanceMixin, BaseRedirectView):
173 access_mode = ACCESS.TEACHER
174 deviation_model: Type[SubmissionRuleDeviation]
175
176 def post(self, request: HttpRequest, *args: Any, **kwargs: Any) -> HttpResponse:
177 deviations = self.deviation_model.objects.filter(
178 id__in=request.POST.getlist("id"),
179 exercise__course_module__course_instance=self.instance,
180 )
181 for deviation in deviations:
182 deviation.delete()
183 if is_ajax(request):
184 return HttpResponse(status=204)
185 return self.redirect(self.deviation_model.get_list_url(self.instance))
186
187
188 class RemoveDeviationsView(CourseInstanceMixin, BaseFormView):
189 access_mode = ACCESS.TEACHER
190 deviation_model: Type[SubmissionRuleDeviation]
191
192 def get_form_kwargs(self) -> Dict[str, Any]:
193 kwargs = super().get_form_kwargs()
194 kwargs["instance"] = self.instance
195 return kwargs
196
197 def get_success_url(self) -> str:
198 return self.deviation_model.get_list_url(self.instance)
199
200 def form_valid(self, form: forms.BaseForm) -> HttpResponse:
201 number_of_removed = 0
202 deviations = self.deviation_model.objects.filter(
203 exercise__in=get_exercises(form.cleaned_data),
204 submitter__in=get_submitters(form.cleaned_data),
205 )
206 for deviation in deviations:
207 deviation.delete()
208 number_of_removed += 1
209 if number_of_removed == 0:
210 messages.warning(self.request, _("NOTHING_REMOVED"))
211 else:
212 message = ngettext(
213 'REMOVED_DEVIATION -- {count}',
214 'REMOVED_DEVIATIONS -- {count}',
215 number_of_removed,
216 ).format(count=number_of_removed)
217 messages.info(self.request, message)
218 return super().form_valid(form)
219
220
221 def get_deviation_groups(
222 all_deviations: models.QuerySet[SubmissionRuleDeviation],
223 ) -> Iterable[Tuple[List[SubmissionRuleDeviation], bool, Optional[str]]]:
224 """
225 Group the deviations by user and module.
226
227 Grouping condition: deviations can be grouped if the user has been
228 granted the same deviation (based on the `is_equal` method) for all
229 exercises in the module.
230
231 The returned tuples contain the following values:
232 1. List of deviations with the same user and module.
233 2. Boolean representing whether the deviations in the list can be
234 displayed as a group (i.e. the grouping condition is satisfied).
235 3. An id that uniquely identifies the group of deviations.
236 """
237 # Find the number of exercises in each module.
238 course_instances = (
239 all_deviations
240 .values_list('exercise__course_module__course_instance', flat=True)
241 .distinct()
242 )
243 exercise_counts = (
244 BaseExercise.objects.filter(
245 course_module__course_instance__in=course_instances
246 )
247 .order_by()
248 .values('course_module_id')
249 .annotate(count=models.Count('*'))
250 )
251 exercise_count_by_module = {row['course_module_id']: row['count'] for row in exercise_counts}
252
253 ordered_deviations = (
254 all_deviations
255 .select_related(
256 'submitter', 'submitter__user',
257 'granter', 'granter__user',
258 'exercise', 'exercise__course_module',
259 )
260 # parent is prefetched because there may be multiple ancestors, and
261 # they are needed for building the deviation's URL.
262 .prefetch_related('exercise__parent')
263 .order_by('submitter', 'exercise__course_module')
264 )
265
266 deviation_groups = groupby(
267 ordered_deviations,
268 lambda obj: (obj.submitter, obj.exercise.course_module),
269 )
270 for (_submitter, module), deviations_iter in deviation_groups:
271 deviations = list(deviations_iter)
272 can_group = True
273 if len(deviations) < 2:
274 # Group must have at least 2 deviations.
275 can_group = False
276 else:
277 group_exercises = set()
278 # Check that the same deviation has been granted for all exercises.
279 for deviation in deviations:
280 if not deviation.is_groupable(deviations[0]):
281 can_group = False
282 break
283 group_exercises.add(deviation.exercise.id)
284 else:
285 if len(group_exercises) != exercise_count_by_module[module.id]:
286 # The number of exercises that have deviations doesn't
287 # match the number of exercises in the module, so there
288 # are some exercises that don't have a deviation.
289 can_group = False
290 group_id = f"{deviations[0].submitter.id}.{module.id}" if can_group else None
291 yield (deviations, can_group, group_id)
292
293
294 def get_exercises(form_data: Dict[str, Any]) -> models.QuerySet[BaseExercise]:
295 """
296 Get the exercises that match the input form's `exercise` and `module`
297 fields.
298 """
299 return BaseExercise.objects.filter(
300 models.Q(id__in=form_data.get('exercise', []))
301 | models.Q(course_module__in=form_data.get('module', []))
302 )
303
304
305 def get_submitters(form_data: Dict[str, Any]) -> models.QuerySet[UserProfile]:
306 """
307 Get the submitters that match the input form's `submitter` and
308 `submitter_tag` fields.
309 """
310 return UserProfile.objects.filter(
311 models.Q(id__in=form_data.get('submitter', []))
312 | models.Q(taggings__tag__in=form_data.get('submitter_tag', []))
313 ).distinct()
314
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/deviations/viewbase.py b/deviations/viewbase.py
--- a/deviations/viewbase.py
+++ b/deviations/viewbase.py
@@ -218,6 +218,7 @@
return super().form_valid(form)
+# pylint: disable-next=too-many-locals
def get_deviation_groups(
all_deviations: models.QuerySet[SubmissionRuleDeviation],
) -> Iterable[Tuple[List[SubmissionRuleDeviation], bool, Optional[str]]]:
@@ -270,16 +271,23 @@
for (_submitter, module), deviations_iter in deviation_groups:
deviations = list(deviations_iter)
can_group = True
+ show_granter = True
if len(deviations) < 2:
# Group must have at least 2 deviations.
can_group = False
else:
group_exercises = set()
# Check that the same deviation has been granted for all exercises.
+ first_granter = deviations[0].granter.id
for deviation in deviations:
if not deviation.is_groupable(deviations[0]):
can_group = False
- break
+ if not show_granter:
+ break
+ if deviation.granter.id != first_granter:
+ show_granter = False
+ if not can_group:
+ break
group_exercises.add(deviation.exercise.id)
else:
if len(group_exercises) != exercise_count_by_module[module.id]:
@@ -288,7 +296,7 @@
# are some exercises that don't have a deviation.
can_group = False
group_id = f"{deviations[0].submitter.id}.{module.id}" if can_group else None
- yield (deviations, can_group, group_id)
+ yield (deviations, can_group, group_id, show_granter)
def get_exercises(form_data: Dict[str, Any]) -> models.QuerySet[BaseExercise]:
| {"golden_diff": "diff --git a/deviations/viewbase.py b/deviations/viewbase.py\n--- a/deviations/viewbase.py\n+++ b/deviations/viewbase.py\n@@ -218,6 +218,7 @@\n return super().form_valid(form)\n \n \n+# pylint: disable-next=too-many-locals\n def get_deviation_groups(\n all_deviations: models.QuerySet[SubmissionRuleDeviation],\n ) -> Iterable[Tuple[List[SubmissionRuleDeviation], bool, Optional[str]]]:\n@@ -270,16 +271,23 @@\n for (_submitter, module), deviations_iter in deviation_groups:\n deviations = list(deviations_iter)\n can_group = True\n+ show_granter = True\n if len(deviations) < 2:\n # Group must have at least 2 deviations.\n can_group = False\n else:\n group_exercises = set()\n # Check that the same deviation has been granted for all exercises.\n+ first_granter = deviations[0].granter.id\n for deviation in deviations:\n if not deviation.is_groupable(deviations[0]):\n can_group = False\n- break\n+ if not show_granter:\n+ break\n+ if deviation.granter.id != first_granter:\n+ show_granter = False\n+ if not can_group:\n+ break\n group_exercises.add(deviation.exercise.id)\n else:\n if len(group_exercises) != exercise_count_by_module[module.id]:\n@@ -288,7 +296,7 @@\n # are some exercises that don't have a deviation.\n can_group = False\n group_id = f\"{deviations[0].submitter.id}.{module.id}\" if can_group else None\n- yield (deviations, can_group, group_id)\n+ yield (deviations, can_group, group_id, show_granter)\n \n \n def get_exercises(form_data: Dict[str, Any]) -> models.QuerySet[BaseExercise]:\n", "issue": "deviations: columns Granter and Grant time are missing info\nKun monta saman henkil\u00f6n poikkeamaa on \"taiteltu plussan taakse\", ei Granter- ja Grant time -sarakkeissa lue mit\u00e4\u00e4n edes siin\u00e4 tapauksessa, ett\u00e4 aika ja my\u00f6nt\u00e4j\u00e4 olisivat kaikilla piilotetuilla riveill\u00e4 samat. \r\nJos tiedot ovat kaikille saman plussan takana olevilla identtiset, voisivat ne n\u00e4ky\u00e4.\r\n(e.g. https://plus.cs.aalto.fi/o1/2021/teachers/deadline-deviations/)\r\n\r\n--------------------------------------\r\n\r\nIn English\r\n> It is extremely common that a student gets a deadline extension for multiple exercises at once. In such cases, the DL extensions table folds the exercises into one entry so that the view doesn\u2019t get cluttered. That\u2019s great, but it would be nice if the metadata \u2014 who granted the extension? when? \u2014 was shown for the folded items as well (assuming it\u2019s one person and a single time, which should almost always be the case). This does not currently happen. The columns on the right are blank here:\r\n\r\nhttps://github.com/apluslms/a-plus/issues/1121#issue-1517628848\n", "before_files": [{"content": "from itertools import groupby\nfrom typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Type\n\nfrom django.db import models\nfrom django.http import HttpRequest, HttpResponse\nfrom django.contrib import messages\nfrom django import forms\nfrom django.utils.text import format_lazy\nfrom django.utils.translation import gettext_lazy as _, ngettext\n\nfrom course.models import CourseModule, UserTag\nfrom course.viewbase import CourseInstanceMixin, CourseInstanceBaseView\nfrom deviations.models import SubmissionRuleDeviation\nfrom lib.helpers import is_ajax\nfrom lib.viewbase import BaseFormView, BaseRedirectView\nfrom authorization.permissions import ACCESS\nfrom exercise.models import BaseExercise\nfrom userprofile.models import UserProfile\n\n\nclass ListDeviationsView(CourseInstanceBaseView):\n access_mode = ACCESS.TEACHER\n deviation_model: Type[SubmissionRuleDeviation]\n\n def get_common_objects(self) -> None:\n super().get_common_objects()\n all_deviations = self.deviation_model.objects.filter(\n exercise__course_module__course_instance=self.instance\n )\n self.deviation_groups = get_deviation_groups(all_deviations)\n self.note(\"deviation_groups\")\n\n\nclass AddDeviationsView(CourseInstanceMixin, BaseFormView):\n access_mode = ACCESS.TEACHER\n deviation_model: Type[SubmissionRuleDeviation]\n session_key: str\n\n def get_form_kwargs(self) -> Dict[str, Any]:\n kwargs = super().get_form_kwargs()\n kwargs[\"instance\"] = self.instance\n return kwargs\n\n def get_initial_get_param_spec(self) -> Dict[str, Optional[Callable[[str], Any]]]:\n def list_arg(arg):\n return arg.split(\",\")\n\n spec = super().get_initial_get_param_spec()\n spec.update({\n \"module\": list_arg,\n \"exercise\": list_arg,\n \"submitter\": list_arg,\n \"submitter_tag\": list_arg,\n })\n return spec\n\n def form_valid(self, form: forms.BaseForm) -> HttpResponse:\n exercises = get_exercises(form.cleaned_data)\n submitters = get_submitters(form.cleaned_data)\n existing_deviations = self.deviation_model.objects.filter(\n exercise__in=exercises,\n submitter__in=submitters,\n )\n\n if existing_deviations:\n # Some deviations already existed. Use OverrideDeviationsView to\n # confirm which ones the user wants to override. Store the form\n # values in the current session, so they can be used afterwards.\n self.success_url = self.deviation_model.get_override_url(self.instance)\n self.request.session[self.session_key] = self.serialize_session_data(form.cleaned_data)\n else:\n self.success_url = self.deviation_model.get_list_url(self.instance)\n for exercise in exercises:\n for submitter in submitters:\n new_deviation = self.deviation_model(\n exercise=exercise,\n submitter=submitter,\n granter=self.request.user.userprofile,\n )\n new_deviation.update_by_form(form.cleaned_data)\n new_deviation.save()\n\n return super().form_valid(form)\n\n def serialize_session_data(self, form_data: Dict[str, Any]) -> Dict[str, Any]:\n \"\"\"\n Convert input form data into serializable values that can be stored in\n the session cache.\n \"\"\"\n result = {}\n for key in ('exercise', 'module', 'submitter', 'submitter_tag'):\n result[key] = [i.id for i in form_data.get(key, [])]\n return result\n\n\nclass OverrideDeviationsView(CourseInstanceMixin, BaseFormView):\n access_mode = ACCESS.TEACHER\n # form_class is not really used, but it is required by the FormView.\n # The form contains only checkboxes and the user input is validated in\n # the form_valid method. The form HTML is manually written in the template.\n form_class = forms.Form\n deviation_model: Type[SubmissionRuleDeviation]\n session_key: str\n\n def get_success_url(self) -> str:\n return self.deviation_model.get_list_url(self.instance)\n\n def get_common_objects(self) -> None:\n super().get_common_objects()\n self.session_data = self.deserialize_session_data(self.request.session[self.session_key])\n self.exercises = get_exercises(self.session_data)\n self.submitters = get_submitters(self.session_data)\n self.existing_deviations = self.deviation_model.objects.filter(\n exercise__in=self.exercises,\n submitter__in=self.submitters,\n )\n self.deviation_groups = get_deviation_groups(self.existing_deviations)\n self.note(\"session_data\", \"exercises\", \"submitters\", \"existing_deviations\", \"deviation_groups\")\n\n def form_valid(self, form: forms.BaseForm) -> HttpResponse:\n override_deviations = set()\n deviation_list = self.request.POST.getlist('override')\n for id_pair in deviation_list:\n try:\n submitter_id, exercise_id = id_pair.split('.')\n submitter_id, exercise_id = int(submitter_id), int(exercise_id)\n override_deviations.add((submitter_id, exercise_id))\n except ValueError:\n messages.error(self.request,\n format_lazy(\n _(\"INVALID_EXERCISE_OR_SUBMITTER_ID -- {id}\"),\n id=id_pair,\n )\n )\n continue\n\n existing_deviations = {(d.submitter_id, d.exercise_id): d for d in self.existing_deviations}\n\n for exercise in self.exercises:\n for submitter in self.submitters:\n existing_deviation = existing_deviations.get((submitter.id, exercise.id))\n if existing_deviation is not None:\n if (submitter.id, exercise.id) in override_deviations:\n existing_deviation.granter = self.request.user.userprofile\n existing_deviation.update_by_form(self.session_data)\n existing_deviation.save()\n else:\n new_deviation = self.deviation_model(\n exercise=exercise,\n submitter=submitter,\n granter=self.request.user.userprofile,\n )\n new_deviation.update_by_form(self.session_data)\n new_deviation.save()\n\n del self.request.session[self.session_key]\n return super().form_valid(form)\n\n def deserialize_session_data(self, session_data: Dict[str, Any]) -> Dict[str, Any]:\n \"\"\"\n Convert serialized session data back into its original representation.\n \"\"\"\n result = {\n 'exercise': BaseExercise.objects.filter(id__in=session_data.get('exercise', [])),\n 'module': CourseModule.objects.filter(id__in=session_data.get('module', [])),\n 'submitter': UserProfile.objects.filter(id__in=session_data.get('submitter', [])),\n 'submitter_tag': UserTag.objects.filter(id__in=session_data.get('submitter_tag', [])),\n }\n return result\n\n\nclass RemoveDeviationsByIDView(CourseInstanceMixin, BaseRedirectView):\n access_mode = ACCESS.TEACHER\n deviation_model: Type[SubmissionRuleDeviation]\n\n def post(self, request: HttpRequest, *args: Any, **kwargs: Any) -> HttpResponse:\n deviations = self.deviation_model.objects.filter(\n id__in=request.POST.getlist(\"id\"),\n exercise__course_module__course_instance=self.instance,\n )\n for deviation in deviations:\n deviation.delete()\n if is_ajax(request):\n return HttpResponse(status=204)\n return self.redirect(self.deviation_model.get_list_url(self.instance))\n\n\nclass RemoveDeviationsView(CourseInstanceMixin, BaseFormView):\n access_mode = ACCESS.TEACHER\n deviation_model: Type[SubmissionRuleDeviation]\n\n def get_form_kwargs(self) -> Dict[str, Any]:\n kwargs = super().get_form_kwargs()\n kwargs[\"instance\"] = self.instance\n return kwargs\n\n def get_success_url(self) -> str:\n return self.deviation_model.get_list_url(self.instance)\n\n def form_valid(self, form: forms.BaseForm) -> HttpResponse:\n number_of_removed = 0\n deviations = self.deviation_model.objects.filter(\n exercise__in=get_exercises(form.cleaned_data),\n submitter__in=get_submitters(form.cleaned_data),\n )\n for deviation in deviations:\n deviation.delete()\n number_of_removed += 1\n if number_of_removed == 0:\n messages.warning(self.request, _(\"NOTHING_REMOVED\"))\n else:\n message = ngettext(\n 'REMOVED_DEVIATION -- {count}',\n 'REMOVED_DEVIATIONS -- {count}',\n number_of_removed,\n ).format(count=number_of_removed)\n messages.info(self.request, message)\n return super().form_valid(form)\n\n\ndef get_deviation_groups(\n all_deviations: models.QuerySet[SubmissionRuleDeviation],\n ) -> Iterable[Tuple[List[SubmissionRuleDeviation], bool, Optional[str]]]:\n \"\"\"\n Group the deviations by user and module.\n\n Grouping condition: deviations can be grouped if the user has been\n granted the same deviation (based on the `is_equal` method) for all\n exercises in the module.\n\n The returned tuples contain the following values:\n 1. List of deviations with the same user and module.\n 2. Boolean representing whether the deviations in the list can be\n displayed as a group (i.e. the grouping condition is satisfied).\n 3. An id that uniquely identifies the group of deviations.\n \"\"\"\n # Find the number of exercises in each module.\n course_instances = (\n all_deviations\n .values_list('exercise__course_module__course_instance', flat=True)\n .distinct()\n )\n exercise_counts = (\n BaseExercise.objects.filter(\n course_module__course_instance__in=course_instances\n )\n .order_by()\n .values('course_module_id')\n .annotate(count=models.Count('*'))\n )\n exercise_count_by_module = {row['course_module_id']: row['count'] for row in exercise_counts}\n\n ordered_deviations = (\n all_deviations\n .select_related(\n 'submitter', 'submitter__user',\n 'granter', 'granter__user',\n 'exercise', 'exercise__course_module',\n )\n # parent is prefetched because there may be multiple ancestors, and\n # they are needed for building the deviation's URL.\n .prefetch_related('exercise__parent')\n .order_by('submitter', 'exercise__course_module')\n )\n\n deviation_groups = groupby(\n ordered_deviations,\n lambda obj: (obj.submitter, obj.exercise.course_module),\n )\n for (_submitter, module), deviations_iter in deviation_groups:\n deviations = list(deviations_iter)\n can_group = True\n if len(deviations) < 2:\n # Group must have at least 2 deviations.\n can_group = False\n else:\n group_exercises = set()\n # Check that the same deviation has been granted for all exercises.\n for deviation in deviations:\n if not deviation.is_groupable(deviations[0]):\n can_group = False\n break\n group_exercises.add(deviation.exercise.id)\n else:\n if len(group_exercises) != exercise_count_by_module[module.id]:\n # The number of exercises that have deviations doesn't\n # match the number of exercises in the module, so there\n # are some exercises that don't have a deviation.\n can_group = False\n group_id = f\"{deviations[0].submitter.id}.{module.id}\" if can_group else None\n yield (deviations, can_group, group_id)\n\n\ndef get_exercises(form_data: Dict[str, Any]) -> models.QuerySet[BaseExercise]:\n \"\"\"\n Get the exercises that match the input form's `exercise` and `module`\n fields.\n \"\"\"\n return BaseExercise.objects.filter(\n models.Q(id__in=form_data.get('exercise', []))\n | models.Q(course_module__in=form_data.get('module', []))\n )\n\n\ndef get_submitters(form_data: Dict[str, Any]) -> models.QuerySet[UserProfile]:\n \"\"\"\n Get the submitters that match the input form's `submitter` and\n `submitter_tag` fields.\n \"\"\"\n return UserProfile.objects.filter(\n models.Q(id__in=form_data.get('submitter', []))\n | models.Q(taggings__tag__in=form_data.get('submitter_tag', []))\n ).distinct()\n", "path": "deviations/viewbase.py"}], "after_files": [{"content": "from itertools import groupby\nfrom typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Type\n\nfrom django.db import models\nfrom django.http import HttpRequest, HttpResponse\nfrom django.contrib import messages\nfrom django import forms\nfrom django.utils.text import format_lazy\nfrom django.utils.translation import gettext_lazy as _, ngettext\n\nfrom course.models import CourseModule, UserTag\nfrom course.viewbase import CourseInstanceMixin, CourseInstanceBaseView\nfrom deviations.models import SubmissionRuleDeviation\nfrom lib.helpers import is_ajax\nfrom lib.viewbase import BaseFormView, BaseRedirectView\nfrom authorization.permissions import ACCESS\nfrom exercise.models import BaseExercise\nfrom userprofile.models import UserProfile\n\n\nclass ListDeviationsView(CourseInstanceBaseView):\n access_mode = ACCESS.TEACHER\n deviation_model: Type[SubmissionRuleDeviation]\n\n def get_common_objects(self) -> None:\n super().get_common_objects()\n all_deviations = self.deviation_model.objects.filter(\n exercise__course_module__course_instance=self.instance\n )\n self.deviation_groups = get_deviation_groups(all_deviations)\n self.note(\"deviation_groups\")\n\n\nclass AddDeviationsView(CourseInstanceMixin, BaseFormView):\n access_mode = ACCESS.TEACHER\n deviation_model: Type[SubmissionRuleDeviation]\n session_key: str\n\n def get_form_kwargs(self) -> Dict[str, Any]:\n kwargs = super().get_form_kwargs()\n kwargs[\"instance\"] = self.instance\n return kwargs\n\n def get_initial_get_param_spec(self) -> Dict[str, Optional[Callable[[str], Any]]]:\n def list_arg(arg):\n return arg.split(\",\")\n\n spec = super().get_initial_get_param_spec()\n spec.update({\n \"module\": list_arg,\n \"exercise\": list_arg,\n \"submitter\": list_arg,\n \"submitter_tag\": list_arg,\n })\n return spec\n\n def form_valid(self, form: forms.BaseForm) -> HttpResponse:\n exercises = get_exercises(form.cleaned_data)\n submitters = get_submitters(form.cleaned_data)\n existing_deviations = self.deviation_model.objects.filter(\n exercise__in=exercises,\n submitter__in=submitters,\n )\n\n if existing_deviations:\n # Some deviations already existed. Use OverrideDeviationsView to\n # confirm which ones the user wants to override. Store the form\n # values in the current session, so they can be used afterwards.\n self.success_url = self.deviation_model.get_override_url(self.instance)\n self.request.session[self.session_key] = self.serialize_session_data(form.cleaned_data)\n else:\n self.success_url = self.deviation_model.get_list_url(self.instance)\n for exercise in exercises:\n for submitter in submitters:\n new_deviation = self.deviation_model(\n exercise=exercise,\n submitter=submitter,\n granter=self.request.user.userprofile,\n )\n new_deviation.update_by_form(form.cleaned_data)\n new_deviation.save()\n\n return super().form_valid(form)\n\n def serialize_session_data(self, form_data: Dict[str, Any]) -> Dict[str, Any]:\n \"\"\"\n Convert input form data into serializable values that can be stored in\n the session cache.\n \"\"\"\n result = {}\n for key in ('exercise', 'module', 'submitter', 'submitter_tag'):\n result[key] = [i.id for i in form_data.get(key, [])]\n return result\n\n\nclass OverrideDeviationsView(CourseInstanceMixin, BaseFormView):\n access_mode = ACCESS.TEACHER\n # form_class is not really used, but it is required by the FormView.\n # The form contains only checkboxes and the user input is validated in\n # the form_valid method. The form HTML is manually written in the template.\n form_class = forms.Form\n deviation_model: Type[SubmissionRuleDeviation]\n session_key: str\n\n def get_success_url(self) -> str:\n return self.deviation_model.get_list_url(self.instance)\n\n def get_common_objects(self) -> None:\n super().get_common_objects()\n self.session_data = self.deserialize_session_data(self.request.session[self.session_key])\n self.exercises = get_exercises(self.session_data)\n self.submitters = get_submitters(self.session_data)\n self.existing_deviations = self.deviation_model.objects.filter(\n exercise__in=self.exercises,\n submitter__in=self.submitters,\n )\n self.deviation_groups = get_deviation_groups(self.existing_deviations)\n self.note(\"session_data\", \"exercises\", \"submitters\", \"existing_deviations\", \"deviation_groups\")\n\n def form_valid(self, form: forms.BaseForm) -> HttpResponse:\n override_deviations = set()\n deviation_list = self.request.POST.getlist('override')\n for id_pair in deviation_list:\n try:\n submitter_id, exercise_id = id_pair.split('.')\n submitter_id, exercise_id = int(submitter_id), int(exercise_id)\n override_deviations.add((submitter_id, exercise_id))\n except ValueError:\n messages.error(self.request,\n format_lazy(\n _(\"INVALID_EXERCISE_OR_SUBMITTER_ID -- {id}\"),\n id=id_pair,\n )\n )\n continue\n\n existing_deviations = {(d.submitter_id, d.exercise_id): d for d in self.existing_deviations}\n\n for exercise in self.exercises:\n for submitter in self.submitters:\n existing_deviation = existing_deviations.get((submitter.id, exercise.id))\n if existing_deviation is not None:\n if (submitter.id, exercise.id) in override_deviations:\n existing_deviation.granter = self.request.user.userprofile\n existing_deviation.update_by_form(self.session_data)\n existing_deviation.save()\n else:\n new_deviation = self.deviation_model(\n exercise=exercise,\n submitter=submitter,\n granter=self.request.user.userprofile,\n )\n new_deviation.update_by_form(self.session_data)\n new_deviation.save()\n\n del self.request.session[self.session_key]\n return super().form_valid(form)\n\n def deserialize_session_data(self, session_data: Dict[str, Any]) -> Dict[str, Any]:\n \"\"\"\n Convert serialized session data back into its original representation.\n \"\"\"\n result = {\n 'exercise': BaseExercise.objects.filter(id__in=session_data.get('exercise', [])),\n 'module': CourseModule.objects.filter(id__in=session_data.get('module', [])),\n 'submitter': UserProfile.objects.filter(id__in=session_data.get('submitter', [])),\n 'submitter_tag': UserTag.objects.filter(id__in=session_data.get('submitter_tag', [])),\n }\n return result\n\n\nclass RemoveDeviationsByIDView(CourseInstanceMixin, BaseRedirectView):\n access_mode = ACCESS.TEACHER\n deviation_model: Type[SubmissionRuleDeviation]\n\n def post(self, request: HttpRequest, *args: Any, **kwargs: Any) -> HttpResponse:\n deviations = self.deviation_model.objects.filter(\n id__in=request.POST.getlist(\"id\"),\n exercise__course_module__course_instance=self.instance,\n )\n for deviation in deviations:\n deviation.delete()\n if is_ajax(request):\n return HttpResponse(status=204)\n return self.redirect(self.deviation_model.get_list_url(self.instance))\n\n\nclass RemoveDeviationsView(CourseInstanceMixin, BaseFormView):\n access_mode = ACCESS.TEACHER\n deviation_model: Type[SubmissionRuleDeviation]\n\n def get_form_kwargs(self) -> Dict[str, Any]:\n kwargs = super().get_form_kwargs()\n kwargs[\"instance\"] = self.instance\n return kwargs\n\n def get_success_url(self) -> str:\n return self.deviation_model.get_list_url(self.instance)\n\n def form_valid(self, form: forms.BaseForm) -> HttpResponse:\n number_of_removed = 0\n deviations = self.deviation_model.objects.filter(\n exercise__in=get_exercises(form.cleaned_data),\n submitter__in=get_submitters(form.cleaned_data),\n )\n for deviation in deviations:\n deviation.delete()\n number_of_removed += 1\n if number_of_removed == 0:\n messages.warning(self.request, _(\"NOTHING_REMOVED\"))\n else:\n message = ngettext(\n 'REMOVED_DEVIATION -- {count}',\n 'REMOVED_DEVIATIONS -- {count}',\n number_of_removed,\n ).format(count=number_of_removed)\n messages.info(self.request, message)\n return super().form_valid(form)\n\n\n# pylint: disable-next=too-many-locals\ndef get_deviation_groups(\n all_deviations: models.QuerySet[SubmissionRuleDeviation],\n ) -> Iterable[Tuple[List[SubmissionRuleDeviation], bool, Optional[str]]]:\n \"\"\"\n Group the deviations by user and module.\n\n Grouping condition: deviations can be grouped if the user has been\n granted the same deviation (based on the `is_equal` method) for all\n exercises in the module.\n\n The returned tuples contain the following values:\n 1. List of deviations with the same user and module.\n 2. Boolean representing whether the deviations in the list can be\n displayed as a group (i.e. the grouping condition is satisfied).\n 3. An id that uniquely identifies the group of deviations.\n \"\"\"\n # Find the number of exercises in each module.\n course_instances = (\n all_deviations\n .values_list('exercise__course_module__course_instance', flat=True)\n .distinct()\n )\n exercise_counts = (\n BaseExercise.objects.filter(\n course_module__course_instance__in=course_instances\n )\n .order_by()\n .values('course_module_id')\n .annotate(count=models.Count('*'))\n )\n exercise_count_by_module = {row['course_module_id']: row['count'] for row in exercise_counts}\n\n ordered_deviations = (\n all_deviations\n .select_related(\n 'submitter', 'submitter__user',\n 'granter', 'granter__user',\n 'exercise', 'exercise__course_module',\n )\n # parent is prefetched because there may be multiple ancestors, and\n # they are needed for building the deviation's URL.\n .prefetch_related('exercise__parent')\n .order_by('submitter', 'exercise__course_module')\n )\n\n deviation_groups = groupby(\n ordered_deviations,\n lambda obj: (obj.submitter, obj.exercise.course_module),\n )\n for (_submitter, module), deviations_iter in deviation_groups:\n deviations = list(deviations_iter)\n can_group = True\n show_granter = True\n if len(deviations) < 2:\n # Group must have at least 2 deviations.\n can_group = False\n else:\n group_exercises = set()\n # Check that the same deviation has been granted for all exercises.\n first_granter = deviations[0].granter.id\n for deviation in deviations:\n if not deviation.is_groupable(deviations[0]):\n can_group = False\n if not show_granter:\n break\n if deviation.granter.id != first_granter:\n show_granter = False\n if not can_group:\n break\n group_exercises.add(deviation.exercise.id)\n else:\n if len(group_exercises) != exercise_count_by_module[module.id]:\n # The number of exercises that have deviations doesn't\n # match the number of exercises in the module, so there\n # are some exercises that don't have a deviation.\n can_group = False\n group_id = f\"{deviations[0].submitter.id}.{module.id}\" if can_group else None\n yield (deviations, can_group, group_id, show_granter)\n\n\ndef get_exercises(form_data: Dict[str, Any]) -> models.QuerySet[BaseExercise]:\n \"\"\"\n Get the exercises that match the input form's `exercise` and `module`\n fields.\n \"\"\"\n return BaseExercise.objects.filter(\n models.Q(id__in=form_data.get('exercise', []))\n | models.Q(course_module__in=form_data.get('module', []))\n )\n\n\ndef get_submitters(form_data: Dict[str, Any]) -> models.QuerySet[UserProfile]:\n \"\"\"\n Get the submitters that match the input form's `submitter` and\n `submitter_tag` fields.\n \"\"\"\n return UserProfile.objects.filter(\n models.Q(id__in=form_data.get('submitter', []))\n | models.Q(taggings__tag__in=form_data.get('submitter_tag', []))\n ).distinct()\n", "path": "deviations/viewbase.py"}]} | 4,043 | 431 |
gh_patches_debug_19068 | rasdani/github-patches | git_diff | archlinux__archinstall-1204 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[master] Lag in menu after selecting a harddrive
Running `viztracer` generates a 2.6G trace and crashes most machines trying to analyze it.
Here's the only evidence of the issue thus far:
https://user-images.githubusercontent.com/861439/167908067-694654d5-56a3-4baa-8bcb-9378472c8677.mov
https://user-images.githubusercontent.com/861439/167908074-be9c081f-2180-42ea-84bc-7fe0fed73e20.mov
I haven't been able to track it down.
Reported by `@rez` on discord.
Doesn't always happen, I tried a "speedrum" earlier today of `master` and it worked, with a slight hint of the lag: https://cdn.discordapp.com/attachments/726808924401172483/973994546126024714/archinstall_speedrun.gif
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `archinstall/lib/disk/blockdevice.py`
Content:
```
1 from __future__ import annotations
2 import os
3 import json
4 import logging
5 import time
6 from typing import Optional, Dict, Any, Iterator, Tuple, List, TYPE_CHECKING
7 # https://stackoverflow.com/a/39757388/929999
8 if TYPE_CHECKING:
9 from .partition import Partition
10
11 from ..exceptions import DiskError, SysCallError
12 from ..output import log
13 from ..general import SysCommand
14 from ..storage import storage
15
16
17 class BlockDevice:
18 def __init__(self, path :str, info :Optional[Dict[str, Any]] = None):
19 if not info:
20 from .helpers import all_blockdevices
21 # If we don't give any information, we need to auto-fill it.
22 # Otherwise any subsequent usage will break.
23 info = all_blockdevices(partitions=False)[path].info
24
25 self.path = path
26 self.info = info
27 self.keep_partitions = True
28 self.part_cache = {}
29
30 # TODO: Currently disk encryption is a BIT misleading.
31 # It's actually partition-encryption, but for future-proofing this
32 # I'm placing the encryption password on a BlockDevice level.
33
34 def __repr__(self, *args :str, **kwargs :str) -> str:
35 return f"BlockDevice({self.device_or_backfile}, size={self._safe_size}GB, free_space={self._safe_free_space}, bus_type={self.bus_type})"
36
37 def __iter__(self) -> Iterator[Partition]:
38 for partition in self.partitions:
39 yield self.partitions[partition]
40
41 def __getitem__(self, key :str, *args :str, **kwargs :str) -> Any:
42 if hasattr(self, key):
43 return getattr(self, key)
44 elif key not in self.info:
45 raise KeyError(f'{self} does not contain information: "{key}"')
46 return self.info[key]
47
48 def __len__(self) -> int:
49 return len(self.partitions)
50
51 def __lt__(self, left_comparitor :'BlockDevice') -> bool:
52 return self.path < left_comparitor.path
53
54 def json(self) -> str:
55 """
56 json() has precedence over __dump__, so this is a way
57 to give less/partial information for user readability.
58 """
59 return self.path
60
61 def __dump__(self) -> Dict[str, Dict[str, Any]]:
62 return {
63 self.path : {
64 'partuuid' : self.uuid,
65 'wipe' : self.info.get('wipe', None),
66 'partitions' : [part.__dump__() for part in self.partitions.values()]
67 }
68 }
69
70 @property
71 def partition_type(self) -> str:
72 output = json.loads(SysCommand(f"lsblk --json -o+PTTYPE {self.path}").decode('UTF-8'))
73
74 for device in output['blockdevices']:
75 return device['pttype']
76
77 @property
78 def device_or_backfile(self) -> str:
79 """
80 Returns the actual device-endpoint of the BlockDevice.
81 If it's a loop-back-device it returns the back-file,
82 For other types it return self.device
83 """
84 if self.info.get('type') == 'loop':
85 return self.info['back-file']
86 else:
87 return self.device
88
89 @property
90 def mountpoint(self) -> None:
91 """
92 A dummy function to enable transparent comparisons of mountpoints.
93 As blockdevices can't be mounted directly, this will always be None
94 """
95 return None
96
97 @property
98 def device(self) -> str:
99 """
100 Returns the device file of the BlockDevice.
101 If it's a loop-back-device it returns the /dev/X device,
102 If it's a ATA-drive it returns the /dev/X device
103 And if it's a crypto-device it returns the parent device
104 """
105 if "DEVTYPE" not in self.info:
106 raise DiskError(f'Could not locate backplane info for "{self.path}"')
107
108 if self.info['DEVTYPE'] in ['disk','loop']:
109 return self.path
110 elif self.info['DEVTYPE'][:4] == 'raid':
111 # This should catch /dev/md## raid devices
112 return self.path
113 elif self.info['DEVTYPE'] == 'crypt':
114 if 'pkname' not in self.info:
115 raise DiskError(f'A crypt device ({self.path}) without a parent kernel device name.')
116 return f"/dev/{self.info['pkname']}"
117 else:
118 log(f"Unknown blockdevice type for {self.path}: {self.info['DEVTYPE']}", level=logging.DEBUG)
119
120 # if not stat.S_ISBLK(os.stat(full_path).st_mode):
121 # raise DiskError(f'Selected disk "{full_path}" is not a block device.')
122
123 @property
124 def partitions(self) -> Dict[str, Partition]:
125 from .filesystem import Partition
126
127 self.partprobe()
128 result = SysCommand(['/usr/bin/lsblk', '-J', self.path])
129
130 if b'not a block device' in result:
131 raise DiskError(f'Can not read partitions off something that isn\'t a block device: {self.path}')
132
133 if not result[:1] == b'{':
134 raise DiskError('Error getting JSON output from:', f'/usr/bin/lsblk -J {self.path}')
135
136 r = json.loads(result.decode('UTF-8'))
137 if len(r['blockdevices']) and 'children' in r['blockdevices'][0]:
138 root_path = f"/dev/{r['blockdevices'][0]['name']}"
139 for part in r['blockdevices'][0]['children']:
140 part_id = part['name'][len(os.path.basename(self.path)):]
141 if part_id not in self.part_cache:
142 # TODO: Force over-write even if in cache?
143 if part_id not in self.part_cache or self.part_cache[part_id].size != part['size']:
144 self.part_cache[part_id] = Partition(root_path + part_id, block_device=self, part_id=part_id)
145
146 return {k: self.part_cache[k] for k in sorted(self.part_cache)}
147
148 @property
149 def partition(self) -> Partition:
150 all_partitions = self.partitions
151 return [all_partitions[k] for k in all_partitions]
152
153 @property
154 def partition_table_type(self) -> int:
155 # TODO: Don't hardcode :)
156 # Remove if we don't use this function anywhere
157 from .filesystem import GPT
158 return GPT
159
160 @property
161 def uuid(self) -> str:
162 log('BlockDevice().uuid is untested!', level=logging.WARNING, fg='yellow')
163 """
164 Returns the disk UUID as returned by lsblk.
165 This is more reliable than relying on /dev/disk/by-partuuid as
166 it doesn't seam to be able to detect md raid partitions.
167 """
168 return SysCommand(f'blkid -s PTUUID -o value {self.path}').decode('UTF-8')
169
170 @property
171 def _safe_size(self) -> float:
172 from .helpers import convert_size_to_gb
173
174 try:
175 output = json.loads(SysCommand(f"lsblk --json -b -o+SIZE {self.path}").decode('UTF-8'))
176 except SysCallError:
177 return -1.0
178
179 for device in output['blockdevices']:
180 return convert_size_to_gb(device['size'])
181
182 @property
183 def size(self) -> float:
184 from .helpers import convert_size_to_gb
185
186 output = json.loads(SysCommand(f"lsblk --json -b -o+SIZE {self.path}").decode('UTF-8'))
187
188 for device in output['blockdevices']:
189 return convert_size_to_gb(device['size'])
190
191 @property
192 def bus_type(self) -> str:
193 output = json.loads(SysCommand(f"lsblk --json -o+ROTA,TRAN {self.path}").decode('UTF-8'))
194
195 for device in output['blockdevices']:
196 return device['tran']
197
198 @property
199 def spinning(self) -> bool:
200 output = json.loads(SysCommand(f"lsblk --json -o+ROTA,TRAN {self.path}").decode('UTF-8'))
201
202 for device in output['blockdevices']:
203 return device['rota'] is True
204
205 @property
206 def _safe_free_space(self) -> Tuple[str, ...]:
207 try:
208 return '+'.join(part[2] for part in self.free_space)
209 except SysCallError:
210 return '?'
211
212 @property
213 def free_space(self) -> Tuple[str, ...]:
214 # NOTE: parted -s will default to `cancel` on prompt, skipping any partition
215 # that is "outside" the disk. in /dev/sr0 this is usually the case with Archiso,
216 # so the free will ignore the ESP partition and just give the "free" space.
217 # Doesn't harm us, but worth noting in case something weird happens.
218 try:
219 for line in SysCommand(f"parted -s --machine {self.path} print free"):
220 if 'free' in (free_space := line.decode('UTF-8')):
221 _, start, end, size, *_ = free_space.strip('\r\n;').split(':')
222 yield (start, end, size)
223 except SysCallError as error:
224 log(f"Could not get free space on {self.path}: {error}", level=logging.DEBUG)
225
226 @property
227 def largest_free_space(self) -> List[str]:
228 info = []
229 for space_info in self.free_space:
230 if not info:
231 info = space_info
232 else:
233 # [-1] = size
234 if space_info[-1] > info[-1]:
235 info = space_info
236 return info
237
238 @property
239 def first_free_sector(self) -> str:
240 if info := self.largest_free_space:
241 start = info[0]
242 else:
243 start = '512MB'
244 return start
245
246 @property
247 def first_end_sector(self) -> str:
248 if info := self.largest_free_space:
249 end = info[1]
250 else:
251 end = f"{self.size}GB"
252 return end
253
254 def partprobe(self) -> bool:
255 return SysCommand(['partprobe', self.path]).exit_code == 0
256
257 def has_partitions(self) -> int:
258 return len(self.partitions)
259
260 def has_mount_point(self, mountpoint :str) -> bool:
261 for partition in self.partitions:
262 if self.partitions[partition].mountpoint == mountpoint:
263 return True
264 return False
265
266 def flush_cache(self) -> None:
267 self.part_cache = {}
268
269 def get_partition(self, uuid :str) -> Partition:
270 count = 0
271 while count < 5:
272 for partition_uuid, partition in self.partitions.items():
273 if partition.uuid.lower() == uuid.lower():
274 return partition
275 else:
276 log(f"uuid {uuid} not found. Waiting for {count +1} time",level=logging.DEBUG)
277 time.sleep(float(storage['arguments'].get('disk-sleep', 0.2)))
278 count += 1
279 else:
280 log(f"Could not find {uuid} in disk after 5 retries",level=logging.INFO)
281 print(f"Cache: {self.part_cache}")
282 print(f"Partitions: {self.partitions.items()}")
283 print(f"UUID: {[uuid]}")
284 raise DiskError(f"New partition {uuid} never showed up after adding new partition on {self}")
285
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/archinstall/lib/disk/blockdevice.py b/archinstall/lib/disk/blockdevice.py
--- a/archinstall/lib/disk/blockdevice.py
+++ b/archinstall/lib/disk/blockdevice.py
@@ -3,6 +3,7 @@
import json
import logging
import time
+from functools import cached_property
from typing import Optional, Dict, Any, Iterator, Tuple, List, TYPE_CHECKING
# https://stackoverflow.com/a/39757388/929999
if TYPE_CHECKING:
@@ -32,6 +33,10 @@
# I'm placing the encryption password on a BlockDevice level.
def __repr__(self, *args :str, **kwargs :str) -> str:
+ return self._str_repr
+
+ @cached_property
+ def _str_repr(self) -> str:
return f"BlockDevice({self.device_or_backfile}, size={self._safe_size}GB, free_space={self._safe_free_space}, bus_type={self.bus_type})"
def __iter__(self) -> Iterator[Partition]:
| {"golden_diff": "diff --git a/archinstall/lib/disk/blockdevice.py b/archinstall/lib/disk/blockdevice.py\n--- a/archinstall/lib/disk/blockdevice.py\n+++ b/archinstall/lib/disk/blockdevice.py\n@@ -3,6 +3,7 @@\n import json\n import logging\n import time\n+from functools import cached_property\n from typing import Optional, Dict, Any, Iterator, Tuple, List, TYPE_CHECKING\n # https://stackoverflow.com/a/39757388/929999\n if TYPE_CHECKING:\n@@ -32,6 +33,10 @@\n \t\t# I'm placing the encryption password on a BlockDevice level.\n \n \tdef __repr__(self, *args :str, **kwargs :str) -> str:\n+\t\treturn self._str_repr\n+\t\n+\t@cached_property\n+\tdef _str_repr(self) -> str:\n \t\treturn f\"BlockDevice({self.device_or_backfile}, size={self._safe_size}GB, free_space={self._safe_free_space}, bus_type={self.bus_type})\"\n \n \tdef __iter__(self) -> Iterator[Partition]:\n", "issue": "[master] Lag in menu after selecting a harddrive\nRunning `viztracer` generates a 2.6G trace and crashes most machines trying to analyze it.\r\nHere's the only evidence of the issue thus far:\r\n\r\nhttps://user-images.githubusercontent.com/861439/167908067-694654d5-56a3-4baa-8bcb-9378472c8677.mov\r\n\r\n\r\nhttps://user-images.githubusercontent.com/861439/167908074-be9c081f-2180-42ea-84bc-7fe0fed73e20.mov\r\n\r\nI haven't been able to track it down.\r\nReported by `@rez` on discord.\r\n\r\nDoesn't always happen, I tried a \"speedrum\" earlier today of `master` and it worked, with a slight hint of the lag: https://cdn.discordapp.com/attachments/726808924401172483/973994546126024714/archinstall_speedrun.gif\n", "before_files": [{"content": "from __future__ import annotations\nimport os\nimport json\nimport logging\nimport time\nfrom typing import Optional, Dict, Any, Iterator, Tuple, List, TYPE_CHECKING\n# https://stackoverflow.com/a/39757388/929999\nif TYPE_CHECKING:\n\tfrom .partition import Partition\n\nfrom ..exceptions import DiskError, SysCallError\nfrom ..output import log\nfrom ..general import SysCommand\nfrom ..storage import storage\n\n\nclass BlockDevice:\n\tdef __init__(self, path :str, info :Optional[Dict[str, Any]] = None):\n\t\tif not info:\n\t\t\tfrom .helpers import all_blockdevices\n\t\t\t# If we don't give any information, we need to auto-fill it.\n\t\t\t# Otherwise any subsequent usage will break.\n\t\t\tinfo = all_blockdevices(partitions=False)[path].info\n\n\t\tself.path = path\n\t\tself.info = info\n\t\tself.keep_partitions = True\n\t\tself.part_cache = {}\n\n\t\t# TODO: Currently disk encryption is a BIT misleading.\n\t\t# It's actually partition-encryption, but for future-proofing this\n\t\t# I'm placing the encryption password on a BlockDevice level.\n\n\tdef __repr__(self, *args :str, **kwargs :str) -> str:\n\t\treturn f\"BlockDevice({self.device_or_backfile}, size={self._safe_size}GB, free_space={self._safe_free_space}, bus_type={self.bus_type})\"\n\n\tdef __iter__(self) -> Iterator[Partition]:\n\t\tfor partition in self.partitions:\n\t\t\tyield self.partitions[partition]\n\n\tdef __getitem__(self, key :str, *args :str, **kwargs :str) -> Any:\n\t\tif hasattr(self, key):\n\t\t\treturn getattr(self, key)\n\t\telif key not in self.info:\n\t\t\traise KeyError(f'{self} does not contain information: \"{key}\"')\n\t\treturn self.info[key]\n\n\tdef __len__(self) -> int:\n\t\treturn len(self.partitions)\n\n\tdef __lt__(self, left_comparitor :'BlockDevice') -> bool:\n\t\treturn self.path < left_comparitor.path\n\n\tdef json(self) -> str:\n\t\t\"\"\"\n\t\tjson() has precedence over __dump__, so this is a way\n\t\tto give less/partial information for user readability.\n\t\t\"\"\"\n\t\treturn self.path\n\n\tdef __dump__(self) -> Dict[str, Dict[str, Any]]:\n\t\treturn {\n\t\t\tself.path : {\n\t\t\t\t'partuuid' : self.uuid,\n\t\t\t\t'wipe' : self.info.get('wipe', None),\n\t\t\t\t'partitions' : [part.__dump__() for part in self.partitions.values()]\n\t\t\t}\n\t\t}\n\n\t@property\n\tdef partition_type(self) -> str:\n\t\toutput = json.loads(SysCommand(f\"lsblk --json -o+PTTYPE {self.path}\").decode('UTF-8'))\n\n\t\tfor device in output['blockdevices']:\n\t\t\treturn device['pttype']\n\n\t@property\n\tdef device_or_backfile(self) -> str:\n\t\t\"\"\"\n\t\tReturns the actual device-endpoint of the BlockDevice.\n\t\tIf it's a loop-back-device it returns the back-file,\n\t\tFor other types it return self.device\n\t\t\"\"\"\n\t\tif self.info.get('type') == 'loop':\n\t\t\treturn self.info['back-file']\n\t\telse:\n\t\t\treturn self.device\n\n\t@property\n\tdef mountpoint(self) -> None:\n\t\t\"\"\"\n\t\tA dummy function to enable transparent comparisons of mountpoints.\n\t\tAs blockdevices can't be mounted directly, this will always be None\n\t\t\"\"\"\n\t\treturn None\n\n\t@property\n\tdef device(self) -> str:\n\t\t\"\"\"\n\t\tReturns the device file of the BlockDevice.\n\t\tIf it's a loop-back-device it returns the /dev/X device,\n\t\tIf it's a ATA-drive it returns the /dev/X device\n\t\tAnd if it's a crypto-device it returns the parent device\n\t\t\"\"\"\n\t\tif \"DEVTYPE\" not in self.info:\n\t\t\traise DiskError(f'Could not locate backplane info for \"{self.path}\"')\n\n\t\tif self.info['DEVTYPE'] in ['disk','loop']:\n\t\t\treturn self.path\n\t\telif self.info['DEVTYPE'][:4] == 'raid':\n\t\t\t# This should catch /dev/md## raid devices\n\t\t\treturn self.path\n\t\telif self.info['DEVTYPE'] == 'crypt':\n\t\t\tif 'pkname' not in self.info:\n\t\t\t\traise DiskError(f'A crypt device ({self.path}) without a parent kernel device name.')\n\t\t\treturn f\"/dev/{self.info['pkname']}\"\n\t\telse:\n\t\t\tlog(f\"Unknown blockdevice type for {self.path}: {self.info['DEVTYPE']}\", level=logging.DEBUG)\n\n\t# \tif not stat.S_ISBLK(os.stat(full_path).st_mode):\n\t# \t\traise DiskError(f'Selected disk \"{full_path}\" is not a block device.')\n\n\t@property\n\tdef partitions(self) -> Dict[str, Partition]:\n\t\tfrom .filesystem import Partition\n\n\t\tself.partprobe()\n\t\tresult = SysCommand(['/usr/bin/lsblk', '-J', self.path])\n\n\t\tif b'not a block device' in result:\n\t\t\traise DiskError(f'Can not read partitions off something that isn\\'t a block device: {self.path}')\n\n\t\tif not result[:1] == b'{':\n\t\t\traise DiskError('Error getting JSON output from:', f'/usr/bin/lsblk -J {self.path}')\n\n\t\tr = json.loads(result.decode('UTF-8'))\n\t\tif len(r['blockdevices']) and 'children' in r['blockdevices'][0]:\n\t\t\troot_path = f\"/dev/{r['blockdevices'][0]['name']}\"\n\t\t\tfor part in r['blockdevices'][0]['children']:\n\t\t\t\tpart_id = part['name'][len(os.path.basename(self.path)):]\n\t\t\t\tif part_id not in self.part_cache:\n\t\t\t\t\t# TODO: Force over-write even if in cache?\n\t\t\t\t\tif part_id not in self.part_cache or self.part_cache[part_id].size != part['size']:\n\t\t\t\t\t\tself.part_cache[part_id] = Partition(root_path + part_id, block_device=self, part_id=part_id)\n\n\t\treturn {k: self.part_cache[k] for k in sorted(self.part_cache)}\n\n\t@property\n\tdef partition(self) -> Partition:\n\t\tall_partitions = self.partitions\n\t\treturn [all_partitions[k] for k in all_partitions]\n\n\t@property\n\tdef partition_table_type(self) -> int:\n\t\t# TODO: Don't hardcode :)\n\t\t# Remove if we don't use this function anywhere\n\t\tfrom .filesystem import GPT\n\t\treturn GPT\n\n\t@property\n\tdef uuid(self) -> str:\n\t\tlog('BlockDevice().uuid is untested!', level=logging.WARNING, fg='yellow')\n\t\t\"\"\"\n\t\tReturns the disk UUID as returned by lsblk.\n\t\tThis is more reliable than relying on /dev/disk/by-partuuid as\n\t\tit doesn't seam to be able to detect md raid partitions.\n\t\t\"\"\"\n\t\treturn SysCommand(f'blkid -s PTUUID -o value {self.path}').decode('UTF-8')\n\n\t@property\n\tdef _safe_size(self) -> float:\n\t\tfrom .helpers import convert_size_to_gb\n\n\t\ttry:\n\t\t\toutput = json.loads(SysCommand(f\"lsblk --json -b -o+SIZE {self.path}\").decode('UTF-8'))\n\t\texcept SysCallError:\n\t\t\treturn -1.0\n\n\t\tfor device in output['blockdevices']:\n\t\t\treturn convert_size_to_gb(device['size'])\n\n\t@property\n\tdef size(self) -> float:\n\t\tfrom .helpers import convert_size_to_gb\n\n\t\toutput = json.loads(SysCommand(f\"lsblk --json -b -o+SIZE {self.path}\").decode('UTF-8'))\n\n\t\tfor device in output['blockdevices']:\n\t\t\treturn convert_size_to_gb(device['size'])\n\n\t@property\n\tdef bus_type(self) -> str:\n\t\toutput = json.loads(SysCommand(f\"lsblk --json -o+ROTA,TRAN {self.path}\").decode('UTF-8'))\n\n\t\tfor device in output['blockdevices']:\n\t\t\treturn device['tran']\n\n\t@property\n\tdef spinning(self) -> bool:\n\t\toutput = json.loads(SysCommand(f\"lsblk --json -o+ROTA,TRAN {self.path}\").decode('UTF-8'))\n\n\t\tfor device in output['blockdevices']:\n\t\t\treturn device['rota'] is True\n\n\t@property\n\tdef _safe_free_space(self) -> Tuple[str, ...]:\n\t\ttry:\n\t\t\treturn '+'.join(part[2] for part in self.free_space)\n\t\texcept SysCallError:\n\t\t\treturn '?'\n\n\t@property\n\tdef free_space(self) -> Tuple[str, ...]:\n\t\t# NOTE: parted -s will default to `cancel` on prompt, skipping any partition\n\t\t# that is \"outside\" the disk. in /dev/sr0 this is usually the case with Archiso,\n\t\t# so the free will ignore the ESP partition and just give the \"free\" space.\n\t\t# Doesn't harm us, but worth noting in case something weird happens.\n\t\ttry:\n\t\t\tfor line in SysCommand(f\"parted -s --machine {self.path} print free\"):\n\t\t\t\tif 'free' in (free_space := line.decode('UTF-8')):\n\t\t\t\t\t_, start, end, size, *_ = free_space.strip('\\r\\n;').split(':')\n\t\t\t\t\tyield (start, end, size)\n\t\texcept SysCallError as error:\n\t\t\tlog(f\"Could not get free space on {self.path}: {error}\", level=logging.DEBUG)\n\n\t@property\n\tdef largest_free_space(self) -> List[str]:\n\t\tinfo = []\n\t\tfor space_info in self.free_space:\n\t\t\tif not info:\n\t\t\t\tinfo = space_info\n\t\t\telse:\n\t\t\t\t# [-1] = size\n\t\t\t\tif space_info[-1] > info[-1]:\n\t\t\t\t\tinfo = space_info\n\t\treturn info\n\n\t@property\n\tdef first_free_sector(self) -> str:\n\t\tif info := self.largest_free_space:\n\t\t\tstart = info[0]\n\t\telse:\n\t\t\tstart = '512MB'\n\t\treturn start\n\n\t@property\n\tdef first_end_sector(self) -> str:\n\t\tif info := self.largest_free_space:\n\t\t\tend = info[1]\n\t\telse:\n\t\t\tend = f\"{self.size}GB\"\n\t\treturn end\n\n\tdef partprobe(self) -> bool:\n\t\treturn SysCommand(['partprobe', self.path]).exit_code == 0\n\n\tdef has_partitions(self) -> int:\n\t\treturn len(self.partitions)\n\n\tdef has_mount_point(self, mountpoint :str) -> bool:\n\t\tfor partition in self.partitions:\n\t\t\tif self.partitions[partition].mountpoint == mountpoint:\n\t\t\t\treturn True\n\t\treturn False\n\n\tdef flush_cache(self) -> None:\n\t\tself.part_cache = {}\n\n\tdef get_partition(self, uuid :str) -> Partition:\n\t\tcount = 0\n\t\twhile count < 5:\n\t\t\tfor partition_uuid, partition in self.partitions.items():\n\t\t\t\tif partition.uuid.lower() == uuid.lower():\n\t\t\t\t\treturn partition\n\t\t\telse:\n\t\t\t\tlog(f\"uuid {uuid} not found. Waiting for {count +1} time\",level=logging.DEBUG)\n\t\t\t\ttime.sleep(float(storage['arguments'].get('disk-sleep', 0.2)))\n\t\t\t\tcount += 1\n\t\telse:\n\t\t\tlog(f\"Could not find {uuid} in disk after 5 retries\",level=logging.INFO)\n\t\t\tprint(f\"Cache: {self.part_cache}\")\n\t\t\tprint(f\"Partitions: {self.partitions.items()}\")\n\t\t\tprint(f\"UUID: {[uuid]}\")\n\t\t\traise DiskError(f\"New partition {uuid} never showed up after adding new partition on {self}\")\n", "path": "archinstall/lib/disk/blockdevice.py"}], "after_files": [{"content": "from __future__ import annotations\nimport os\nimport json\nimport logging\nimport time\nfrom functools import cached_property\nfrom typing import Optional, Dict, Any, Iterator, Tuple, List, TYPE_CHECKING\n# https://stackoverflow.com/a/39757388/929999\nif TYPE_CHECKING:\n\tfrom .partition import Partition\n\nfrom ..exceptions import DiskError, SysCallError\nfrom ..output import log\nfrom ..general import SysCommand\nfrom ..storage import storage\n\n\nclass BlockDevice:\n\tdef __init__(self, path :str, info :Optional[Dict[str, Any]] = None):\n\t\tif not info:\n\t\t\tfrom .helpers import all_blockdevices\n\t\t\t# If we don't give any information, we need to auto-fill it.\n\t\t\t# Otherwise any subsequent usage will break.\n\t\t\tinfo = all_blockdevices(partitions=False)[path].info\n\n\t\tself.path = path\n\t\tself.info = info\n\t\tself.keep_partitions = True\n\t\tself.part_cache = {}\n\n\t\t# TODO: Currently disk encryption is a BIT misleading.\n\t\t# It's actually partition-encryption, but for future-proofing this\n\t\t# I'm placing the encryption password on a BlockDevice level.\n\n\tdef __repr__(self, *args :str, **kwargs :str) -> str:\n\t\treturn self._str_repr\n\t\n\t@cached_property\n\tdef _str_repr(self) -> str:\n\t\treturn f\"BlockDevice({self.device_or_backfile}, size={self._safe_size}GB, free_space={self._safe_free_space}, bus_type={self.bus_type})\"\n\n\tdef __iter__(self) -> Iterator[Partition]:\n\t\tfor partition in self.partitions:\n\t\t\tyield self.partitions[partition]\n\n\tdef __getitem__(self, key :str, *args :str, **kwargs :str) -> Any:\n\t\tif hasattr(self, key):\n\t\t\treturn getattr(self, key)\n\t\telif key not in self.info:\n\t\t\traise KeyError(f'{self} does not contain information: \"{key}\"')\n\t\treturn self.info[key]\n\n\tdef __len__(self) -> int:\n\t\treturn len(self.partitions)\n\n\tdef __lt__(self, left_comparitor :'BlockDevice') -> bool:\n\t\treturn self.path < left_comparitor.path\n\n\tdef json(self) -> str:\n\t\t\"\"\"\n\t\tjson() has precedence over __dump__, so this is a way\n\t\tto give less/partial information for user readability.\n\t\t\"\"\"\n\t\treturn self.path\n\n\tdef __dump__(self) -> Dict[str, Dict[str, Any]]:\n\t\treturn {\n\t\t\tself.path : {\n\t\t\t\t'partuuid' : self.uuid,\n\t\t\t\t'wipe' : self.info.get('wipe', None),\n\t\t\t\t'partitions' : [part.__dump__() for part in self.partitions.values()]\n\t\t\t}\n\t\t}\n\n\t@property\n\tdef partition_type(self) -> str:\n\t\toutput = json.loads(SysCommand(f\"lsblk --json -o+PTTYPE {self.path}\").decode('UTF-8'))\n\n\t\tfor device in output['blockdevices']:\n\t\t\treturn device['pttype']\n\n\t@property\n\tdef device_or_backfile(self) -> str:\n\t\t\"\"\"\n\t\tReturns the actual device-endpoint of the BlockDevice.\n\t\tIf it's a loop-back-device it returns the back-file,\n\t\tFor other types it return self.device\n\t\t\"\"\"\n\t\tif self.info.get('type') == 'loop':\n\t\t\treturn self.info['back-file']\n\t\telse:\n\t\t\treturn self.device\n\n\t@property\n\tdef mountpoint(self) -> None:\n\t\t\"\"\"\n\t\tA dummy function to enable transparent comparisons of mountpoints.\n\t\tAs blockdevices can't be mounted directly, this will always be None\n\t\t\"\"\"\n\t\treturn None\n\n\t@property\n\tdef device(self) -> str:\n\t\t\"\"\"\n\t\tReturns the device file of the BlockDevice.\n\t\tIf it's a loop-back-device it returns the /dev/X device,\n\t\tIf it's a ATA-drive it returns the /dev/X device\n\t\tAnd if it's a crypto-device it returns the parent device\n\t\t\"\"\"\n\t\tif \"DEVTYPE\" not in self.info:\n\t\t\traise DiskError(f'Could not locate backplane info for \"{self.path}\"')\n\n\t\tif self.info['DEVTYPE'] in ['disk','loop']:\n\t\t\treturn self.path\n\t\telif self.info['DEVTYPE'][:4] == 'raid':\n\t\t\t# This should catch /dev/md## raid devices\n\t\t\treturn self.path\n\t\telif self.info['DEVTYPE'] == 'crypt':\n\t\t\tif 'pkname' not in self.info:\n\t\t\t\traise DiskError(f'A crypt device ({self.path}) without a parent kernel device name.')\n\t\t\treturn f\"/dev/{self.info['pkname']}\"\n\t\telse:\n\t\t\tlog(f\"Unknown blockdevice type for {self.path}: {self.info['DEVTYPE']}\", level=logging.DEBUG)\n\n\t# \tif not stat.S_ISBLK(os.stat(full_path).st_mode):\n\t# \t\traise DiskError(f'Selected disk \"{full_path}\" is not a block device.')\n\n\t@property\n\tdef partitions(self) -> Dict[str, Partition]:\n\t\tfrom .filesystem import Partition\n\n\t\tself.partprobe()\n\t\tresult = SysCommand(['/usr/bin/lsblk', '-J', self.path])\n\n\t\tif b'not a block device' in result:\n\t\t\traise DiskError(f'Can not read partitions off something that isn\\'t a block device: {self.path}')\n\n\t\tif not result[:1] == b'{':\n\t\t\traise DiskError('Error getting JSON output from:', f'/usr/bin/lsblk -J {self.path}')\n\n\t\tr = json.loads(result.decode('UTF-8'))\n\t\tif len(r['blockdevices']) and 'children' in r['blockdevices'][0]:\n\t\t\troot_path = f\"/dev/{r['blockdevices'][0]['name']}\"\n\t\t\tfor part in r['blockdevices'][0]['children']:\n\t\t\t\tpart_id = part['name'][len(os.path.basename(self.path)):]\n\t\t\t\tif part_id not in self.part_cache:\n\t\t\t\t\t# TODO: Force over-write even if in cache?\n\t\t\t\t\tif part_id not in self.part_cache or self.part_cache[part_id].size != part['size']:\n\t\t\t\t\t\tself.part_cache[part_id] = Partition(root_path + part_id, block_device=self, part_id=part_id)\n\n\t\treturn {k: self.part_cache[k] for k in sorted(self.part_cache)}\n\n\t@property\n\tdef partition(self) -> Partition:\n\t\tall_partitions = self.partitions\n\t\treturn [all_partitions[k] for k in all_partitions]\n\n\t@property\n\tdef partition_table_type(self) -> int:\n\t\t# TODO: Don't hardcode :)\n\t\t# Remove if we don't use this function anywhere\n\t\tfrom .filesystem import GPT\n\t\treturn GPT\n\n\t@property\n\tdef uuid(self) -> str:\n\t\tlog('BlockDevice().uuid is untested!', level=logging.WARNING, fg='yellow')\n\t\t\"\"\"\n\t\tReturns the disk UUID as returned by lsblk.\n\t\tThis is more reliable than relying on /dev/disk/by-partuuid as\n\t\tit doesn't seam to be able to detect md raid partitions.\n\t\t\"\"\"\n\t\treturn SysCommand(f'blkid -s PTUUID -o value {self.path}').decode('UTF-8')\n\n\t@property\n\tdef _safe_size(self) -> float:\n\t\tfrom .helpers import convert_size_to_gb\n\n\t\ttry:\n\t\t\toutput = json.loads(SysCommand(f\"lsblk --json -b -o+SIZE {self.path}\").decode('UTF-8'))\n\t\texcept SysCallError:\n\t\t\treturn -1.0\n\n\t\tfor device in output['blockdevices']:\n\t\t\treturn convert_size_to_gb(device['size'])\n\n\t@property\n\tdef size(self) -> float:\n\t\tfrom .helpers import convert_size_to_gb\n\n\t\toutput = json.loads(SysCommand(f\"lsblk --json -b -o+SIZE {self.path}\").decode('UTF-8'))\n\n\t\tfor device in output['blockdevices']:\n\t\t\treturn convert_size_to_gb(device['size'])\n\n\t@property\n\tdef bus_type(self) -> str:\n\t\toutput = json.loads(SysCommand(f\"lsblk --json -o+ROTA,TRAN {self.path}\").decode('UTF-8'))\n\n\t\tfor device in output['blockdevices']:\n\t\t\treturn device['tran']\n\n\t@property\n\tdef spinning(self) -> bool:\n\t\toutput = json.loads(SysCommand(f\"lsblk --json -o+ROTA,TRAN {self.path}\").decode('UTF-8'))\n\n\t\tfor device in output['blockdevices']:\n\t\t\treturn device['rota'] is True\n\n\t@property\n\tdef _safe_free_space(self) -> Tuple[str, ...]:\n\t\ttry:\n\t\t\treturn '+'.join(part[2] for part in self.free_space)\n\t\texcept SysCallError:\n\t\t\treturn '?'\n\n\t@property\n\tdef free_space(self) -> Tuple[str, ...]:\n\t\t# NOTE: parted -s will default to `cancel` on prompt, skipping any partition\n\t\t# that is \"outside\" the disk. in /dev/sr0 this is usually the case with Archiso,\n\t\t# so the free will ignore the ESP partition and just give the \"free\" space.\n\t\t# Doesn't harm us, but worth noting in case something weird happens.\n\t\ttry:\n\t\t\tfor line in SysCommand(f\"parted -s --machine {self.path} print free\"):\n\t\t\t\tif 'free' in (free_space := line.decode('UTF-8')):\n\t\t\t\t\t_, start, end, size, *_ = free_space.strip('\\r\\n;').split(':')\n\t\t\t\t\tyield (start, end, size)\n\t\texcept SysCallError as error:\n\t\t\tlog(f\"Could not get free space on {self.path}: {error}\", level=logging.DEBUG)\n\n\t@property\n\tdef largest_free_space(self) -> List[str]:\n\t\tinfo = []\n\t\tfor space_info in self.free_space:\n\t\t\tif not info:\n\t\t\t\tinfo = space_info\n\t\t\telse:\n\t\t\t\t# [-1] = size\n\t\t\t\tif space_info[-1] > info[-1]:\n\t\t\t\t\tinfo = space_info\n\t\treturn info\n\n\t@property\n\tdef first_free_sector(self) -> str:\n\t\tif info := self.largest_free_space:\n\t\t\tstart = info[0]\n\t\telse:\n\t\t\tstart = '512MB'\n\t\treturn start\n\n\t@property\n\tdef first_end_sector(self) -> str:\n\t\tif info := self.largest_free_space:\n\t\t\tend = info[1]\n\t\telse:\n\t\t\tend = f\"{self.size}GB\"\n\t\treturn end\n\n\tdef partprobe(self) -> bool:\n\t\treturn SysCommand(['partprobe', self.path]).exit_code == 0\n\n\tdef has_partitions(self) -> int:\n\t\treturn len(self.partitions)\n\n\tdef has_mount_point(self, mountpoint :str) -> bool:\n\t\tfor partition in self.partitions:\n\t\t\tif self.partitions[partition].mountpoint == mountpoint:\n\t\t\t\treturn True\n\t\treturn False\n\n\tdef flush_cache(self) -> None:\n\t\tself.part_cache = {}\n\n\tdef get_partition(self, uuid :str) -> Partition:\n\t\tcount = 0\n\t\twhile count < 5:\n\t\t\tfor partition_uuid, partition in self.partitions.items():\n\t\t\t\tif partition.uuid.lower() == uuid.lower():\n\t\t\t\t\treturn partition\n\t\t\telse:\n\t\t\t\tlog(f\"uuid {uuid} not found. Waiting for {count +1} time\",level=logging.DEBUG)\n\t\t\t\ttime.sleep(float(storage['arguments'].get('disk-sleep', 0.2)))\n\t\t\t\tcount += 1\n\t\telse:\n\t\t\tlog(f\"Could not find {uuid} in disk after 5 retries\",level=logging.INFO)\n\t\t\tprint(f\"Cache: {self.part_cache}\")\n\t\t\tprint(f\"Partitions: {self.partitions.items()}\")\n\t\t\tprint(f\"UUID: {[uuid]}\")\n\t\t\traise DiskError(f\"New partition {uuid} never showed up after adding new partition on {self}\")\n", "path": "archinstall/lib/disk/blockdevice.py"}]} | 3,834 | 244 |
gh_patches_debug_5434 | rasdani/github-patches | git_diff | secdev__scapy-4403 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Scapy overides platform
Scapy exports the platform name, and could override the platform module. This is likely the issue: https://github.com/secdev/scapy/blob/b0506a1e22321eba41d5c21d26bba418de04bc8f/scapy/consts.py#L10
Here are the example:
```shell
python issue.py
<class 'str'>
<class 'module'>
```
```python
import platform
from scapy.all import *
print(type(platform))
import platform
print(type(platform))
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scapy/consts.py`
Content:
```
1 # SPDX-License-Identifier: GPL-2.0-only
2 # This file is part of Scapy
3 # See https://scapy.net/ for more information
4 # Copyright (C) Philippe Biondi <[email protected]>
5
6 """
7 This file contains constants
8 """
9
10 from sys import byteorder, platform, maxsize
11 import platform as platform_lib
12
13 LINUX = platform.startswith("linux")
14 OPENBSD = platform.startswith("openbsd")
15 FREEBSD = "freebsd" in platform
16 NETBSD = platform.startswith("netbsd")
17 DARWIN = platform.startswith("darwin")
18 SOLARIS = platform.startswith("sunos")
19 WINDOWS = platform.startswith("win32")
20 WINDOWS_XP = platform_lib.release() == "XP"
21 BSD = DARWIN or FREEBSD or OPENBSD or NETBSD
22 # See https://docs.python.org/3/library/platform.html#cross-platform
23 IS_64BITS = maxsize > 2**32
24 BIG_ENDIAN = byteorder == 'big'
25 # LOOPBACK_NAME moved to conf.loopback_name
26
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scapy/consts.py b/scapy/consts.py
--- a/scapy/consts.py
+++ b/scapy/consts.py
@@ -10,6 +10,20 @@
from sys import byteorder, platform, maxsize
import platform as platform_lib
+__all__ = [
+ "LINUX",
+ "OPENBSD",
+ "FREEBSD",
+ "NETBSD",
+ "DARWIN",
+ "SOLARIS",
+ "WINDOWS",
+ "WINDOWS_XP",
+ "BSD",
+ "IS_64BITS",
+ "BIG_ENDIAN",
+]
+
LINUX = platform.startswith("linux")
OPENBSD = platform.startswith("openbsd")
FREEBSD = "freebsd" in platform
| {"golden_diff": "diff --git a/scapy/consts.py b/scapy/consts.py\n--- a/scapy/consts.py\n+++ b/scapy/consts.py\n@@ -10,6 +10,20 @@\n from sys import byteorder, platform, maxsize\n import platform as platform_lib\n \n+__all__ = [\n+ \"LINUX\",\n+ \"OPENBSD\",\n+ \"FREEBSD\",\n+ \"NETBSD\",\n+ \"DARWIN\",\n+ \"SOLARIS\",\n+ \"WINDOWS\",\n+ \"WINDOWS_XP\",\n+ \"BSD\",\n+ \"IS_64BITS\",\n+ \"BIG_ENDIAN\",\n+]\n+\n LINUX = platform.startswith(\"linux\")\n OPENBSD = platform.startswith(\"openbsd\")\n FREEBSD = \"freebsd\" in platform\n", "issue": "Scapy overides platform\nScapy exports the platform name, and could override the platform module. This is likely the issue: https://github.com/secdev/scapy/blob/b0506a1e22321eba41d5c21d26bba418de04bc8f/scapy/consts.py#L10\r\n\r\nHere are the example:\r\n\r\n```shell\r\npython issue.py \r\n<class 'str'>\r\n<class 'module'>\r\n```\r\n\r\n```python\r\nimport platform\r\nfrom scapy.all import *\r\nprint(type(platform))\r\n\r\nimport platform\r\nprint(type(platform))\r\n```\n", "before_files": [{"content": "# SPDX-License-Identifier: GPL-2.0-only\n# This file is part of Scapy\n# See https://scapy.net/ for more information\n# Copyright (C) Philippe Biondi <[email protected]>\n\n\"\"\"\nThis file contains constants\n\"\"\"\n\nfrom sys import byteorder, platform, maxsize\nimport platform as platform_lib\n\nLINUX = platform.startswith(\"linux\")\nOPENBSD = platform.startswith(\"openbsd\")\nFREEBSD = \"freebsd\" in platform\nNETBSD = platform.startswith(\"netbsd\")\nDARWIN = platform.startswith(\"darwin\")\nSOLARIS = platform.startswith(\"sunos\")\nWINDOWS = platform.startswith(\"win32\")\nWINDOWS_XP = platform_lib.release() == \"XP\"\nBSD = DARWIN or FREEBSD or OPENBSD or NETBSD\n# See https://docs.python.org/3/library/platform.html#cross-platform\nIS_64BITS = maxsize > 2**32\nBIG_ENDIAN = byteorder == 'big'\n# LOOPBACK_NAME moved to conf.loopback_name\n", "path": "scapy/consts.py"}], "after_files": [{"content": "# SPDX-License-Identifier: GPL-2.0-only\n# This file is part of Scapy\n# See https://scapy.net/ for more information\n# Copyright (C) Philippe Biondi <[email protected]>\n\n\"\"\"\nThis file contains constants\n\"\"\"\n\nfrom sys import byteorder, platform, maxsize\nimport platform as platform_lib\n\n__all__ = [\n \"LINUX\",\n \"OPENBSD\",\n \"FREEBSD\",\n \"NETBSD\",\n \"DARWIN\",\n \"SOLARIS\",\n \"WINDOWS\",\n \"WINDOWS_XP\",\n \"BSD\",\n \"IS_64BITS\",\n \"BIG_ENDIAN\",\n]\n\nLINUX = platform.startswith(\"linux\")\nOPENBSD = platform.startswith(\"openbsd\")\nFREEBSD = \"freebsd\" in platform\nNETBSD = platform.startswith(\"netbsd\")\nDARWIN = platform.startswith(\"darwin\")\nSOLARIS = platform.startswith(\"sunos\")\nWINDOWS = platform.startswith(\"win32\")\nWINDOWS_XP = platform_lib.release() == \"XP\"\nBSD = DARWIN or FREEBSD or OPENBSD or NETBSD\n# See https://docs.python.org/3/library/platform.html#cross-platform\nIS_64BITS = maxsize > 2**32\nBIG_ENDIAN = byteorder == 'big'\n# LOOPBACK_NAME moved to conf.loopback_name\n", "path": "scapy/consts.py"}]} | 644 | 168 |
gh_patches_debug_2296 | rasdani/github-patches | git_diff | Qiskit__qiskit-5218 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Show pi in the text drawer as π
<!-- ⚠️ If you do not respect this template, your issue will be closed -->
<!-- ⚠️ Make sure to browse the opened and closed issues to confirm this idea does not exist. -->
### What is the expected enhancement?
Based on a discussion in #4931, there's interest in displaying pi in the text drawer as `π `instead of `pi`.
```
┌─────────┐ ┌─────────┐
Change from q_0: ┤ RX(pij) ├ to q_0: ┤ RX(πj) ├
└─────────┘ └─────────┘
```
To do this on all platforms requires completion of #4900 and modifying pi_check and 24 tests.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `qiskit/circuit/tools/pi_check.py`
Content:
```
1 # This code is part of Qiskit.
2 #
3 # (C) Copyright IBM 2017, 2019.
4 #
5 # This code is licensed under the Apache License, Version 2.0. You may
6 # obtain a copy of this license in the LICENSE.txt file in the root directory
7 # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
8 #
9 # Any modifications or derivative works of this code must retain this
10 # copyright notice, and modified files need to carry a notice indicating
11 # that they have been altered from the originals.
12 # pylint: disable=too-many-return-statements
13
14 """Check if number close to values of PI
15 """
16
17 import numpy as np
18 from qiskit.circuit.parameterexpression import ParameterExpression
19 from qiskit.exceptions import QiskitError
20
21 MAX_FRAC = 16
22 N, D = np.meshgrid(np.arange(1, MAX_FRAC+1), np.arange(1, MAX_FRAC+1))
23 FRAC_MESH = N / D * np.pi
24 RECIP_MESH = N / D / np.pi
25 POW_LIST = np.pi ** np.arange(2, 5)
26
27
28 def pi_check(inpt, eps=1e-6, output='text', ndigits=5):
29 """ Computes if a number is close to an integer
30 fraction or multiple of PI and returns the
31 corresponding string.
32
33 Args:
34 inpt (float): Number to check.
35 eps (float): EPS to check against.
36 output (str): Options are 'text' (default),
37 'latex', 'mpl', and 'qasm'.
38 ndigits (int): Number of digits to print
39 if returning raw inpt.
40
41 Returns:
42 str: string representation of output.
43
44 Raises:
45 QiskitError: if output is not a valid option.
46 """
47 if isinstance(inpt, ParameterExpression):
48 param_str = str(inpt)
49 syms = inpt._symbol_expr.expr_free_symbols
50 for sym in syms:
51 if not sym.is_number:
52 continue
53 pi = pi_check(float(sym), eps=eps, output=output, ndigits=ndigits)
54 try:
55 _ = float(pi)
56 except (ValueError, TypeError):
57 # Strip leading '-' from pi since must replace with abs(sym)
58 # in order to preserve spacing around minuses in expression
59 if pi[0] == '-':
60 pi = pi[1:]
61 param_str = param_str.replace(str(abs(sym)), pi)
62 return param_str
63 elif isinstance(inpt, str):
64 return inpt
65
66 def normalize(single_inpt):
67 if abs(single_inpt) < 1e-14:
68 return '0'
69
70 if output in ['text', 'qasm']:
71 pi = 'pi'
72 elif output == 'latex':
73 pi = '\\pi'
74 elif output == 'mpl':
75 pi = '$\\pi$'
76 else:
77 raise QiskitError('pi_check parameter output should be text, '
78 'latex, mpl, or qasm.')
79
80 neg_str = '-' if single_inpt < 0 else ''
81
82 # First check is for whole multiples of pi
83 val = single_inpt / np.pi
84 if abs(val) >= 1 - eps:
85 if abs(abs(val) - abs(round(val))) < eps:
86 val = int(abs(round(val)))
87 if abs(val) == 1:
88 str_out = '{}{}'.format(neg_str, pi)
89 else:
90 if output == 'qasm':
91 str_out = '{}{}*{}'.format(neg_str, val, pi)
92 else:
93 str_out = '{}{}{}'.format(neg_str, val, pi)
94 return str_out
95
96 # Second is a check for powers of pi
97 if abs(single_inpt) > np.pi:
98 power = np.where(abs(abs(single_inpt) - POW_LIST) < eps)
99 if power[0].shape[0]:
100 if output == 'qasm':
101 str_out = '{:.{}g}'.format(single_inpt, ndigits)
102 elif output == 'latex':
103 str_out = '{}{}^{}'.format(neg_str, pi, power[0][0] + 2)
104 elif output == 'mpl':
105 str_out = '{}{}$^{}$'.format(neg_str, pi, power[0][0] + 2)
106 else:
107 str_out = '{}{}**{}'.format(neg_str, pi, power[0][0] + 2)
108 return str_out
109
110 # Third is a check for a number larger than MAX_FRAC * pi, not a
111 # multiple or power of pi, since no fractions will exceed MAX_FRAC * pi
112 if abs(single_inpt) >= (MAX_FRAC * np.pi):
113 str_out = '{:.{}g}'.format(single_inpt, ndigits)
114 return str_out
115
116 # Fourth check is for fractions for 1*pi in the numer and any
117 # number in the denom.
118 val = np.pi / single_inpt
119 if abs(abs(val) - abs(round(val))) < eps:
120 val = int(abs(round(val)))
121 if output == 'latex':
122 str_out = '\\frac{%s%s}{%s}' % (neg_str, pi, val)
123 else:
124 str_out = '{}{}/{}'.format(neg_str, pi, val)
125 return str_out
126
127 # Fifth check is for fractions where the numer > 1*pi and numer
128 # is up to MAX_FRAC*pi and denom is up to MAX_FRAC and all
129 # fractions are reduced. Ex. 15pi/16, 2pi/5, 15pi/2, 16pi/9.
130 frac = np.where(np.abs(abs(single_inpt) - FRAC_MESH) < eps)
131 if frac[0].shape[0]:
132 numer = int(frac[1][0]) + 1
133 denom = int(frac[0][0]) + 1
134 if output == 'latex':
135 str_out = '\\frac{%s%s%s}{%s}' % (neg_str, numer, pi, denom)
136 elif output == 'qasm':
137 str_out = '{}{}*{}/{}'.format(neg_str, numer, pi, denom)
138 else:
139 str_out = '{}{}{}/{}'.format(neg_str, numer, pi, denom)
140 return str_out
141
142 # Sixth check is for fractions where the numer > 1 and numer
143 # is up to MAX_FRAC and denom is up to MAX_FRAC*pi and all
144 # fractions are reduced. Ex. 15/16pi, 2/5pi, 15/2pi, 16/9pi
145 frac = np.where(np.abs(abs(single_inpt) - RECIP_MESH) < eps)
146 if frac[0].shape[0]:
147 numer = int(frac[1][0]) + 1
148 denom = int(frac[0][0]) + 1
149 if denom == 1 and output != 'qasm':
150 denom = ''
151 if output == 'latex':
152 str_out = '\\frac{%s%s}{%s%s}' % (neg_str, numer, denom, pi)
153 elif output == 'qasm':
154 str_out = '{}{}/({}*{})'.format(neg_str, numer, denom, pi)
155 else:
156 str_out = '{}{}/{}{}'.format(neg_str, numer, denom, pi)
157 return str_out
158
159 # Nothing found
160 str_out = '{:.{}g}'.format(single_inpt, ndigits)
161 return str_out
162
163 complex_inpt = complex(inpt)
164 real, imag = map(normalize, [complex_inpt.real, complex_inpt.imag])
165
166 jstr = '\\jmath' if output == 'latex' else 'j'
167 if real == '0' and imag != '0':
168 str_out = imag + jstr
169 elif real != '0' and imag != '0':
170 op_str = '+'
171 # Remove + if imag negative except for latex fractions
172 if complex_inpt.imag < 0 and (output != 'latex' or '\\frac' not in imag):
173 op_str = ''
174 str_out = '{}{}{}{}'.format(real, op_str, imag, jstr)
175 else:
176 str_out = real
177 return str_out
178
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/qiskit/circuit/tools/pi_check.py b/qiskit/circuit/tools/pi_check.py
--- a/qiskit/circuit/tools/pi_check.py
+++ b/qiskit/circuit/tools/pi_check.py
@@ -67,7 +67,9 @@
if abs(single_inpt) < 1e-14:
return '0'
- if output in ['text', 'qasm']:
+ if output == 'text':
+ pi = 'π'
+ elif output == 'qasm':
pi = 'pi'
elif output == 'latex':
pi = '\\pi'
| {"golden_diff": "diff --git a/qiskit/circuit/tools/pi_check.py b/qiskit/circuit/tools/pi_check.py\n--- a/qiskit/circuit/tools/pi_check.py\n+++ b/qiskit/circuit/tools/pi_check.py\n@@ -67,7 +67,9 @@\n if abs(single_inpt) < 1e-14:\n return '0'\n \n- if output in ['text', 'qasm']:\n+ if output == 'text':\n+ pi = '\u03c0'\n+ elif output == 'qasm':\n pi = 'pi'\n elif output == 'latex':\n pi = '\\\\pi'\n", "issue": "Show pi in the text drawer as \u03c0\n<!-- \u26a0\ufe0f If you do not respect this template, your issue will be closed -->\r\n<!-- \u26a0\ufe0f Make sure to browse the opened and closed issues to confirm this idea does not exist. -->\r\n\r\n### What is the expected enhancement?\r\n\r\nBased on a discussion in #4931, there's interest in displaying pi in the text drawer as `\u03c0 `instead of `pi`.\r\n``` \r\n \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510 \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\r\nChange from q_0: \u2524 RX(pij) \u251c to q_0: \u2524 RX(\u03c0j) \u251c\r\n \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\r\n```\r\nTo do this on all platforms requires completion of #4900 and modifying pi_check and 24 tests.\n", "before_files": [{"content": "# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017, 2019.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n# pylint: disable=too-many-return-statements\n\n\"\"\"Check if number close to values of PI\n\"\"\"\n\nimport numpy as np\nfrom qiskit.circuit.parameterexpression import ParameterExpression\nfrom qiskit.exceptions import QiskitError\n\nMAX_FRAC = 16\nN, D = np.meshgrid(np.arange(1, MAX_FRAC+1), np.arange(1, MAX_FRAC+1))\nFRAC_MESH = N / D * np.pi\nRECIP_MESH = N / D / np.pi\nPOW_LIST = np.pi ** np.arange(2, 5)\n\n\ndef pi_check(inpt, eps=1e-6, output='text', ndigits=5):\n \"\"\" Computes if a number is close to an integer\n fraction or multiple of PI and returns the\n corresponding string.\n\n Args:\n inpt (float): Number to check.\n eps (float): EPS to check against.\n output (str): Options are 'text' (default),\n 'latex', 'mpl', and 'qasm'.\n ndigits (int): Number of digits to print\n if returning raw inpt.\n\n Returns:\n str: string representation of output.\n\n Raises:\n QiskitError: if output is not a valid option.\n \"\"\"\n if isinstance(inpt, ParameterExpression):\n param_str = str(inpt)\n syms = inpt._symbol_expr.expr_free_symbols\n for sym in syms:\n if not sym.is_number:\n continue\n pi = pi_check(float(sym), eps=eps, output=output, ndigits=ndigits)\n try:\n _ = float(pi)\n except (ValueError, TypeError):\n # Strip leading '-' from pi since must replace with abs(sym)\n # in order to preserve spacing around minuses in expression\n if pi[0] == '-':\n pi = pi[1:]\n param_str = param_str.replace(str(abs(sym)), pi)\n return param_str\n elif isinstance(inpt, str):\n return inpt\n\n def normalize(single_inpt):\n if abs(single_inpt) < 1e-14:\n return '0'\n\n if output in ['text', 'qasm']:\n pi = 'pi'\n elif output == 'latex':\n pi = '\\\\pi'\n elif output == 'mpl':\n pi = '$\\\\pi$'\n else:\n raise QiskitError('pi_check parameter output should be text, '\n 'latex, mpl, or qasm.')\n\n neg_str = '-' if single_inpt < 0 else ''\n\n # First check is for whole multiples of pi\n val = single_inpt / np.pi\n if abs(val) >= 1 - eps:\n if abs(abs(val) - abs(round(val))) < eps:\n val = int(abs(round(val)))\n if abs(val) == 1:\n str_out = '{}{}'.format(neg_str, pi)\n else:\n if output == 'qasm':\n str_out = '{}{}*{}'.format(neg_str, val, pi)\n else:\n str_out = '{}{}{}'.format(neg_str, val, pi)\n return str_out\n\n # Second is a check for powers of pi\n if abs(single_inpt) > np.pi:\n power = np.where(abs(abs(single_inpt) - POW_LIST) < eps)\n if power[0].shape[0]:\n if output == 'qasm':\n str_out = '{:.{}g}'.format(single_inpt, ndigits)\n elif output == 'latex':\n str_out = '{}{}^{}'.format(neg_str, pi, power[0][0] + 2)\n elif output == 'mpl':\n str_out = '{}{}$^{}$'.format(neg_str, pi, power[0][0] + 2)\n else:\n str_out = '{}{}**{}'.format(neg_str, pi, power[0][0] + 2)\n return str_out\n\n # Third is a check for a number larger than MAX_FRAC * pi, not a\n # multiple or power of pi, since no fractions will exceed MAX_FRAC * pi\n if abs(single_inpt) >= (MAX_FRAC * np.pi):\n str_out = '{:.{}g}'.format(single_inpt, ndigits)\n return str_out\n\n # Fourth check is for fractions for 1*pi in the numer and any\n # number in the denom.\n val = np.pi / single_inpt\n if abs(abs(val) - abs(round(val))) < eps:\n val = int(abs(round(val)))\n if output == 'latex':\n str_out = '\\\\frac{%s%s}{%s}' % (neg_str, pi, val)\n else:\n str_out = '{}{}/{}'.format(neg_str, pi, val)\n return str_out\n\n # Fifth check is for fractions where the numer > 1*pi and numer\n # is up to MAX_FRAC*pi and denom is up to MAX_FRAC and all\n # fractions are reduced. Ex. 15pi/16, 2pi/5, 15pi/2, 16pi/9.\n frac = np.where(np.abs(abs(single_inpt) - FRAC_MESH) < eps)\n if frac[0].shape[0]:\n numer = int(frac[1][0]) + 1\n denom = int(frac[0][0]) + 1\n if output == 'latex':\n str_out = '\\\\frac{%s%s%s}{%s}' % (neg_str, numer, pi, denom)\n elif output == 'qasm':\n str_out = '{}{}*{}/{}'.format(neg_str, numer, pi, denom)\n else:\n str_out = '{}{}{}/{}'.format(neg_str, numer, pi, denom)\n return str_out\n\n # Sixth check is for fractions where the numer > 1 and numer\n # is up to MAX_FRAC and denom is up to MAX_FRAC*pi and all\n # fractions are reduced. Ex. 15/16pi, 2/5pi, 15/2pi, 16/9pi\n frac = np.where(np.abs(abs(single_inpt) - RECIP_MESH) < eps)\n if frac[0].shape[0]:\n numer = int(frac[1][0]) + 1\n denom = int(frac[0][0]) + 1\n if denom == 1 and output != 'qasm':\n denom = ''\n if output == 'latex':\n str_out = '\\\\frac{%s%s}{%s%s}' % (neg_str, numer, denom, pi)\n elif output == 'qasm':\n str_out = '{}{}/({}*{})'.format(neg_str, numer, denom, pi)\n else:\n str_out = '{}{}/{}{}'.format(neg_str, numer, denom, pi)\n return str_out\n\n # Nothing found\n str_out = '{:.{}g}'.format(single_inpt, ndigits)\n return str_out\n\n complex_inpt = complex(inpt)\n real, imag = map(normalize, [complex_inpt.real, complex_inpt.imag])\n\n jstr = '\\\\jmath' if output == 'latex' else 'j'\n if real == '0' and imag != '0':\n str_out = imag + jstr\n elif real != '0' and imag != '0':\n op_str = '+'\n # Remove + if imag negative except for latex fractions\n if complex_inpt.imag < 0 and (output != 'latex' or '\\\\frac' not in imag):\n op_str = ''\n str_out = '{}{}{}{}'.format(real, op_str, imag, jstr)\n else:\n str_out = real\n return str_out\n", "path": "qiskit/circuit/tools/pi_check.py"}], "after_files": [{"content": "# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017, 2019.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n# pylint: disable=too-many-return-statements\n\n\"\"\"Check if number close to values of PI\n\"\"\"\n\nimport numpy as np\nfrom qiskit.circuit.parameterexpression import ParameterExpression\nfrom qiskit.exceptions import QiskitError\n\nMAX_FRAC = 16\nN, D = np.meshgrid(np.arange(1, MAX_FRAC+1), np.arange(1, MAX_FRAC+1))\nFRAC_MESH = N / D * np.pi\nRECIP_MESH = N / D / np.pi\nPOW_LIST = np.pi ** np.arange(2, 5)\n\n\ndef pi_check(inpt, eps=1e-6, output='text', ndigits=5):\n \"\"\" Computes if a number is close to an integer\n fraction or multiple of PI and returns the\n corresponding string.\n\n Args:\n inpt (float): Number to check.\n eps (float): EPS to check against.\n output (str): Options are 'text' (default),\n 'latex', 'mpl', and 'qasm'.\n ndigits (int): Number of digits to print\n if returning raw inpt.\n\n Returns:\n str: string representation of output.\n\n Raises:\n QiskitError: if output is not a valid option.\n \"\"\"\n if isinstance(inpt, ParameterExpression):\n param_str = str(inpt)\n syms = inpt._symbol_expr.expr_free_symbols\n for sym in syms:\n if not sym.is_number:\n continue\n pi = pi_check(float(sym), eps=eps, output=output, ndigits=ndigits)\n try:\n _ = float(pi)\n except (ValueError, TypeError):\n # Strip leading '-' from pi since must replace with abs(sym)\n # in order to preserve spacing around minuses in expression\n if pi[0] == '-':\n pi = pi[1:]\n param_str = param_str.replace(str(abs(sym)), pi)\n return param_str\n elif isinstance(inpt, str):\n return inpt\n\n def normalize(single_inpt):\n if abs(single_inpt) < 1e-14:\n return '0'\n\n if output == 'text':\n pi = '\u03c0'\n elif output == 'qasm':\n pi = 'pi'\n elif output == 'latex':\n pi = '\\\\pi'\n elif output == 'mpl':\n pi = '$\\\\pi$'\n else:\n raise QiskitError('pi_check parameter output should be text, '\n 'latex, mpl, or qasm.')\n\n neg_str = '-' if single_inpt < 0 else ''\n\n # First check is for whole multiples of pi\n val = single_inpt / np.pi\n if abs(val) >= 1 - eps:\n if abs(abs(val) - abs(round(val))) < eps:\n val = int(abs(round(val)))\n if abs(val) == 1:\n str_out = '{}{}'.format(neg_str, pi)\n else:\n if output == 'qasm':\n str_out = '{}{}*{}'.format(neg_str, val, pi)\n else:\n str_out = '{}{}{}'.format(neg_str, val, pi)\n return str_out\n\n # Second is a check for powers of pi\n if abs(single_inpt) > np.pi:\n power = np.where(abs(abs(single_inpt) - POW_LIST) < eps)\n if power[0].shape[0]:\n if output == 'qasm':\n str_out = '{:.{}g}'.format(single_inpt, ndigits)\n elif output == 'latex':\n str_out = '{}{}^{}'.format(neg_str, pi, power[0][0] + 2)\n elif output == 'mpl':\n str_out = '{}{}$^{}$'.format(neg_str, pi, power[0][0] + 2)\n else:\n str_out = '{}{}**{}'.format(neg_str, pi, power[0][0] + 2)\n return str_out\n\n # Third is a check for a number larger than MAX_FRAC * pi, not a\n # multiple or power of pi, since no fractions will exceed MAX_FRAC * pi\n if abs(single_inpt) >= (MAX_FRAC * np.pi):\n str_out = '{:.{}g}'.format(single_inpt, ndigits)\n return str_out\n\n # Fourth check is for fractions for 1*pi in the numer and any\n # number in the denom.\n val = np.pi / single_inpt\n if abs(abs(val) - abs(round(val))) < eps:\n val = int(abs(round(val)))\n if output == 'latex':\n str_out = '\\\\frac{%s%s}{%s}' % (neg_str, pi, val)\n else:\n str_out = '{}{}/{}'.format(neg_str, pi, val)\n return str_out\n\n # Fifth check is for fractions where the numer > 1*pi and numer\n # is up to MAX_FRAC*pi and denom is up to MAX_FRAC and all\n # fractions are reduced. Ex. 15pi/16, 2pi/5, 15pi/2, 16pi/9.\n frac = np.where(np.abs(abs(single_inpt) - FRAC_MESH) < eps)\n if frac[0].shape[0]:\n numer = int(frac[1][0]) + 1\n denom = int(frac[0][0]) + 1\n if output == 'latex':\n str_out = '\\\\frac{%s%s%s}{%s}' % (neg_str, numer, pi, denom)\n elif output == 'qasm':\n str_out = '{}{}*{}/{}'.format(neg_str, numer, pi, denom)\n else:\n str_out = '{}{}{}/{}'.format(neg_str, numer, pi, denom)\n return str_out\n\n # Sixth check is for fractions where the numer > 1 and numer\n # is up to MAX_FRAC and denom is up to MAX_FRAC*pi and all\n # fractions are reduced. Ex. 15/16pi, 2/5pi, 15/2pi, 16/9pi\n frac = np.where(np.abs(abs(single_inpt) - RECIP_MESH) < eps)\n if frac[0].shape[0]:\n numer = int(frac[1][0]) + 1\n denom = int(frac[0][0]) + 1\n if denom == 1 and output != 'qasm':\n denom = ''\n if output == 'latex':\n str_out = '\\\\frac{%s%s}{%s%s}' % (neg_str, numer, denom, pi)\n elif output == 'qasm':\n str_out = '{}{}/({}*{})'.format(neg_str, numer, denom, pi)\n else:\n str_out = '{}{}/{}{}'.format(neg_str, numer, denom, pi)\n return str_out\n\n # Nothing found\n str_out = '{:.{}g}'.format(single_inpt, ndigits)\n return str_out\n\n complex_inpt = complex(inpt)\n real, imag = map(normalize, [complex_inpt.real, complex_inpt.imag])\n\n jstr = '\\\\jmath' if output == 'latex' else 'j'\n if real == '0' and imag != '0':\n str_out = imag + jstr\n elif real != '0' and imag != '0':\n op_str = '+'\n # Remove + if imag negative except for latex fractions\n if complex_inpt.imag < 0 and (output != 'latex' or '\\\\frac' not in imag):\n op_str = ''\n str_out = '{}{}{}{}'.format(real, op_str, imag, jstr)\n else:\n str_out = real\n return str_out\n", "path": "qiskit/circuit/tools/pi_check.py"}]} | 2,689 | 136 |
gh_patches_debug_22411 | rasdani/github-patches | git_diff | wagtail__wagtail-730 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Allow use of MPO formatted JPEG images
Just tried loading some JPEG images into a website and was given an error "Not a valid JPEG image please use blah blah".
The images were from my Nikon D3300 which seems to create JPEG files in MPO format. This format is supported by Pillow but Wagtail is blocking them from being uploaded. I disabled the format validation and everything seemed to work fine.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `wagtail/wagtailimages/fields.py`
Content:
```
1 import os
2
3 from PIL import Image
4
5 from django.forms.fields import ImageField
6 from django.core.exceptions import ValidationError
7 from django.utils.translation import ugettext_lazy as _
8 from django.template.defaultfilters import filesizeformat
9 from django.conf import settings
10
11
12 ALLOWED_EXTENSIONS = ['gif', 'jpg', 'jpeg', 'png']
13 SUPPORTED_FORMATS_TEXT = _("GIF, JPEG, PNG")
14
15 INVALID_IMAGE_ERROR = _(
16 "Not a supported image format. Supported formats: %s."
17 ) % SUPPORTED_FORMATS_TEXT
18
19 INVALID_IMAGE_KNOWN_FORMAT_ERROR = _(
20 "Not a valid %s image."
21 )
22
23 MAX_UPLOAD_SIZE = getattr(settings, 'WAGTAILIMAGES_MAX_UPLOAD_SIZE', 10 * 1024 * 1024)
24
25 if MAX_UPLOAD_SIZE is not None:
26 MAX_UPLOAD_SIZE_TEXT = filesizeformat(MAX_UPLOAD_SIZE)
27
28 FILE_TOO_LARGE_ERROR = _(
29 "This file is too big. Maximum filesize %s."
30 ) % (MAX_UPLOAD_SIZE_TEXT, )
31
32 FILE_TOO_LARGE_KNOWN_SIZE_ERROR = _(
33 "This file is too big (%%s). Maximum filesize %s."
34 ) % (MAX_UPLOAD_SIZE_TEXT, )
35
36 IMAGE_FIELD_HELP_TEXT = _(
37 "Supported formats: %s. Maximum filesize: %s."
38 ) % (SUPPORTED_FORMATS_TEXT, MAX_UPLOAD_SIZE_TEXT, )
39 else:
40 MAX_UPLOAD_SIZE_TEXT = ""
41 FILE_TOO_LARGE_ERROR = ""
42 FILE_TOO_LARGE_KNOWN_SIZE_ERROR = ""
43
44 IMAGE_FIELD_HELP_TEXT = _(
45 "Supported formats: %s."
46 ) % (SUPPORTED_FORMATS_TEXT, )
47
48
49 class WagtailImageField(ImageField):
50 default_error_messages = {
51 'invalid_image': INVALID_IMAGE_ERROR,
52 'invalid_image_known_format': INVALID_IMAGE_KNOWN_FORMAT_ERROR,
53 'file_too_large': FILE_TOO_LARGE_KNOWN_SIZE_ERROR,
54 }
55
56 def __init__(self, *args, **kwargs):
57 super(WagtailImageField, self).__init__(*args, **kwargs)
58
59 self.help_text = IMAGE_FIELD_HELP_TEXT
60
61 def check_image_file_format(self, f):
62 # Check file extension
63 extension = os.path.splitext(f.name)[1].lower()[1:]
64
65 if extension not in ALLOWED_EXTENSIONS:
66 raise ValidationError(self.error_messages['invalid_image'], code='invalid_image')
67
68 if hasattr(f, 'image'):
69 # Django 1.8 annotates the file object with the PIL image
70 image = f.image
71 elif not f.closed:
72 # Open image file
73 file_position = f.tell()
74 f.seek(0)
75
76 try:
77 image = Image.open(f)
78 except IOError:
79 # Uploaded file is not even an image file (or corrupted)
80 raise ValidationError(self.error_messages['invalid_image_known_format'],
81 code='invalid_image_known_format')
82
83 f.seek(file_position)
84 else:
85 # Couldn't get the PIL image, skip checking the internal file format
86 return
87
88 image_format = extension
89 if extension == 'jpg':
90 image_format = 'jpeg'
91
92 # Check that the internal format matches the extension
93 # It is possible to upload PSD files if their extension is set to jpg, png or gif. This should catch them out
94 if image.format.upper() != image_format.upper():
95 raise ValidationError(self.error_messages['invalid_image_known_format'] % (
96 image_format.upper()
97 ), code='invalid_image_known_format')
98
99 def check_image_file_size(self, f):
100 # Upload size checking can be disabled by setting max upload size to None
101 if MAX_UPLOAD_SIZE is None:
102 return
103
104 # Check the filesize
105 if f.size > MAX_UPLOAD_SIZE:
106 raise ValidationError(self.error_messages['file_too_large'] % (
107 filesizeformat(f.size),
108 ), code='file_too_large')
109
110 def to_python(self, data):
111 f = super(WagtailImageField, self).to_python(data)
112
113 if f is not None:
114 self.check_image_file_size(f)
115 self.check_image_file_format(f)
116
117 return f
118
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/wagtail/wagtailimages/fields.py b/wagtail/wagtailimages/fields.py
--- a/wagtail/wagtailimages/fields.py
+++ b/wagtail/wagtailimages/fields.py
@@ -85,15 +85,19 @@
# Couldn't get the PIL image, skip checking the internal file format
return
- image_format = extension
- if extension == 'jpg':
- image_format = 'jpeg'
+ image_format = extension.upper()
+ if image_format == 'JPG':
+ image_format = 'JPEG'
+
+ internal_image_format = image.format.upper()
+ if internal_image_format == 'MPO':
+ internal_image_format = 'JPEG'
# Check that the internal format matches the extension
# It is possible to upload PSD files if their extension is set to jpg, png or gif. This should catch them out
- if image.format.upper() != image_format.upper():
+ if internal_image_format != image_format:
raise ValidationError(self.error_messages['invalid_image_known_format'] % (
- image_format.upper()
+ image_format,
), code='invalid_image_known_format')
def check_image_file_size(self, f):
| {"golden_diff": "diff --git a/wagtail/wagtailimages/fields.py b/wagtail/wagtailimages/fields.py\n--- a/wagtail/wagtailimages/fields.py\n+++ b/wagtail/wagtailimages/fields.py\n@@ -85,15 +85,19 @@\n # Couldn't get the PIL image, skip checking the internal file format\n return\n \n- image_format = extension\n- if extension == 'jpg':\n- image_format = 'jpeg'\n+ image_format = extension.upper()\n+ if image_format == 'JPG':\n+ image_format = 'JPEG'\n+\n+ internal_image_format = image.format.upper()\n+ if internal_image_format == 'MPO':\n+ internal_image_format = 'JPEG'\n \n # Check that the internal format matches the extension\n # It is possible to upload PSD files if their extension is set to jpg, png or gif. This should catch them out\n- if image.format.upper() != image_format.upper():\n+ if internal_image_format != image_format:\n raise ValidationError(self.error_messages['invalid_image_known_format'] % (\n- image_format.upper()\n+ image_format,\n ), code='invalid_image_known_format')\n \n def check_image_file_size(self, f):\n", "issue": "Allow use of MPO formatted JPEG images\nJust tried loading some JPEG images into a website and was given an error \"Not a valid JPEG image please use blah blah\".\n\nThe images were from my Nikon D3300 which seems to create JPEG files in MPO format. This format is supported by Pillow but Wagtail is blocking them from being uploaded. I disabled the format validation and everything seemed to work fine.\n\n", "before_files": [{"content": "import os\n\nfrom PIL import Image\n\nfrom django.forms.fields import ImageField\nfrom django.core.exceptions import ValidationError\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.template.defaultfilters import filesizeformat\nfrom django.conf import settings\n\n\nALLOWED_EXTENSIONS = ['gif', 'jpg', 'jpeg', 'png']\nSUPPORTED_FORMATS_TEXT = _(\"GIF, JPEG, PNG\")\n\nINVALID_IMAGE_ERROR = _(\n \"Not a supported image format. Supported formats: %s.\"\n) % SUPPORTED_FORMATS_TEXT\n\nINVALID_IMAGE_KNOWN_FORMAT_ERROR = _(\n \"Not a valid %s image.\"\n)\n\nMAX_UPLOAD_SIZE = getattr(settings, 'WAGTAILIMAGES_MAX_UPLOAD_SIZE', 10 * 1024 * 1024)\n\nif MAX_UPLOAD_SIZE is not None:\n MAX_UPLOAD_SIZE_TEXT = filesizeformat(MAX_UPLOAD_SIZE)\n\n FILE_TOO_LARGE_ERROR = _(\n \"This file is too big. Maximum filesize %s.\"\n ) % (MAX_UPLOAD_SIZE_TEXT, )\n\n FILE_TOO_LARGE_KNOWN_SIZE_ERROR = _(\n \"This file is too big (%%s). Maximum filesize %s.\"\n ) % (MAX_UPLOAD_SIZE_TEXT, )\n\n IMAGE_FIELD_HELP_TEXT = _(\n \"Supported formats: %s. Maximum filesize: %s.\"\n ) % (SUPPORTED_FORMATS_TEXT, MAX_UPLOAD_SIZE_TEXT, )\nelse:\n MAX_UPLOAD_SIZE_TEXT = \"\"\n FILE_TOO_LARGE_ERROR = \"\"\n FILE_TOO_LARGE_KNOWN_SIZE_ERROR = \"\"\n\n IMAGE_FIELD_HELP_TEXT = _(\n \"Supported formats: %s.\"\n ) % (SUPPORTED_FORMATS_TEXT, )\n\n\nclass WagtailImageField(ImageField):\n default_error_messages = {\n 'invalid_image': INVALID_IMAGE_ERROR,\n 'invalid_image_known_format': INVALID_IMAGE_KNOWN_FORMAT_ERROR,\n 'file_too_large': FILE_TOO_LARGE_KNOWN_SIZE_ERROR,\n }\n\n def __init__(self, *args, **kwargs):\n super(WagtailImageField, self).__init__(*args, **kwargs)\n\n self.help_text = IMAGE_FIELD_HELP_TEXT\n\n def check_image_file_format(self, f):\n # Check file extension\n extension = os.path.splitext(f.name)[1].lower()[1:]\n\n if extension not in ALLOWED_EXTENSIONS:\n raise ValidationError(self.error_messages['invalid_image'], code='invalid_image')\n\n if hasattr(f, 'image'):\n # Django 1.8 annotates the file object with the PIL image\n image = f.image\n elif not f.closed:\n # Open image file\n file_position = f.tell()\n f.seek(0)\n\n try:\n image = Image.open(f)\n except IOError:\n # Uploaded file is not even an image file (or corrupted)\n raise ValidationError(self.error_messages['invalid_image_known_format'],\n code='invalid_image_known_format')\n\n f.seek(file_position)\n else:\n # Couldn't get the PIL image, skip checking the internal file format\n return\n\n image_format = extension\n if extension == 'jpg':\n image_format = 'jpeg'\n\n # Check that the internal format matches the extension\n # It is possible to upload PSD files if their extension is set to jpg, png or gif. This should catch them out\n if image.format.upper() != image_format.upper():\n raise ValidationError(self.error_messages['invalid_image_known_format'] % (\n image_format.upper()\n ), code='invalid_image_known_format')\n\n def check_image_file_size(self, f):\n # Upload size checking can be disabled by setting max upload size to None\n if MAX_UPLOAD_SIZE is None:\n return\n\n # Check the filesize\n if f.size > MAX_UPLOAD_SIZE:\n raise ValidationError(self.error_messages['file_too_large'] % (\n filesizeformat(f.size),\n ), code='file_too_large')\n\n def to_python(self, data):\n f = super(WagtailImageField, self).to_python(data)\n\n if f is not None:\n self.check_image_file_size(f)\n self.check_image_file_format(f)\n\n return f\n", "path": "wagtail/wagtailimages/fields.py"}], "after_files": [{"content": "import os\n\nfrom PIL import Image\n\nfrom django.forms.fields import ImageField\nfrom django.core.exceptions import ValidationError\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.template.defaultfilters import filesizeformat\nfrom django.conf import settings\n\n\nALLOWED_EXTENSIONS = ['gif', 'jpg', 'jpeg', 'png']\nSUPPORTED_FORMATS_TEXT = _(\"GIF, JPEG, PNG\")\n\nINVALID_IMAGE_ERROR = _(\n \"Not a supported image format. Supported formats: %s.\"\n) % SUPPORTED_FORMATS_TEXT\n\nINVALID_IMAGE_KNOWN_FORMAT_ERROR = _(\n \"Not a valid %s image.\"\n)\n\nMAX_UPLOAD_SIZE = getattr(settings, 'WAGTAILIMAGES_MAX_UPLOAD_SIZE', 10 * 1024 * 1024)\n\nif MAX_UPLOAD_SIZE is not None:\n MAX_UPLOAD_SIZE_TEXT = filesizeformat(MAX_UPLOAD_SIZE)\n\n FILE_TOO_LARGE_ERROR = _(\n \"This file is too big. Maximum filesize %s.\"\n ) % (MAX_UPLOAD_SIZE_TEXT, )\n\n FILE_TOO_LARGE_KNOWN_SIZE_ERROR = _(\n \"This file is too big (%%s). Maximum filesize %s.\"\n ) % (MAX_UPLOAD_SIZE_TEXT, )\n\n IMAGE_FIELD_HELP_TEXT = _(\n \"Supported formats: %s. Maximum filesize: %s.\"\n ) % (SUPPORTED_FORMATS_TEXT, MAX_UPLOAD_SIZE_TEXT, )\nelse:\n MAX_UPLOAD_SIZE_TEXT = \"\"\n FILE_TOO_LARGE_ERROR = \"\"\n FILE_TOO_LARGE_KNOWN_SIZE_ERROR = \"\"\n\n IMAGE_FIELD_HELP_TEXT = _(\n \"Supported formats: %s.\"\n ) % (SUPPORTED_FORMATS_TEXT, )\n\n\nclass WagtailImageField(ImageField):\n default_error_messages = {\n 'invalid_image': INVALID_IMAGE_ERROR,\n 'invalid_image_known_format': INVALID_IMAGE_KNOWN_FORMAT_ERROR,\n 'file_too_large': FILE_TOO_LARGE_KNOWN_SIZE_ERROR,\n }\n\n def __init__(self, *args, **kwargs):\n super(WagtailImageField, self).__init__(*args, **kwargs)\n\n self.help_text = IMAGE_FIELD_HELP_TEXT\n\n def check_image_file_format(self, f):\n # Check file extension\n extension = os.path.splitext(f.name)[1].lower()[1:]\n\n if extension not in ALLOWED_EXTENSIONS:\n raise ValidationError(self.error_messages['invalid_image'], code='invalid_image')\n\n if hasattr(f, 'image'):\n # Django 1.8 annotates the file object with the PIL image\n image = f.image\n elif not f.closed:\n # Open image file\n file_position = f.tell()\n f.seek(0)\n\n try:\n image = Image.open(f)\n except IOError:\n # Uploaded file is not even an image file (or corrupted)\n raise ValidationError(self.error_messages['invalid_image_known_format'],\n code='invalid_image_known_format')\n\n f.seek(file_position)\n else:\n # Couldn't get the PIL image, skip checking the internal file format\n return\n\n image_format = extension.upper()\n if image_format == 'JPG':\n image_format = 'JPEG'\n\n internal_image_format = image.format.upper()\n if internal_image_format == 'MPO':\n internal_image_format = 'JPEG'\n\n # Check that the internal format matches the extension\n # It is possible to upload PSD files if their extension is set to jpg, png or gif. This should catch them out\n if internal_image_format != image_format:\n raise ValidationError(self.error_messages['invalid_image_known_format'] % (\n image_format,\n ), code='invalid_image_known_format')\n\n def check_image_file_size(self, f):\n # Upload size checking can be disabled by setting max upload size to None\n if MAX_UPLOAD_SIZE is None:\n return\n\n # Check the filesize\n if f.size > MAX_UPLOAD_SIZE:\n raise ValidationError(self.error_messages['file_too_large'] % (\n filesizeformat(f.size),\n ), code='file_too_large')\n\n def to_python(self, data):\n f = super(WagtailImageField, self).to_python(data)\n\n if f is not None:\n self.check_image_file_size(f)\n self.check_image_file_format(f)\n\n return f\n", "path": "wagtail/wagtailimages/fields.py"}]} | 1,474 | 271 |
gh_patches_debug_14539 | rasdani/github-patches | git_diff | cloudtools__troposphere-1589 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add support for DataLocationResource & TableWithColumnsResource in AWS::LakeFormation::Permissions (2020, Jan 16 update)
waiting for the doc to be updated
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `troposphere/lakeformation.py`
Content:
```
1 # Copyright (c) 2012-2019, Mark Peek <[email protected]>
2 # All rights reserved.
3 #
4 # See LICENSE file for full license.
5 #
6 # *** Do not modify - this file is autogenerated ***
7 # Resource specification version: 5.3.0
8
9
10 from . import AWSObject
11 from . import AWSProperty
12
13
14 class Admins(AWSProperty):
15 props = {
16 }
17
18
19 class DataLakeSettings(AWSObject):
20 resource_type = "AWS::LakeFormation::DataLakeSettings"
21
22 props = {
23 'Admins': (Admins, False),
24 }
25
26
27 class DataLakePrincipal(AWSProperty):
28 props = {
29 'DataLakePrincipalIdentifier': (basestring, False),
30 }
31
32
33 class DatabaseResource(AWSProperty):
34 props = {
35 'Name': (basestring, False),
36 }
37
38
39 class TableResource(AWSProperty):
40 props = {
41 'DatabaseName': (basestring, False),
42 'Name': (basestring, False),
43 }
44
45
46 class Resource(AWSProperty):
47 props = {
48 'DatabaseResource': (DatabaseResource, False),
49 'TableResource': (TableResource, False),
50 }
51
52
53 class Permissions(AWSObject):
54 resource_type = "AWS::LakeFormation::Permissions"
55
56 props = {
57 'DataLakePrincipal': (DataLakePrincipal, True),
58 'Permissions': ([basestring], False),
59 'PermissionsWithGrantOption': ([basestring], False),
60 'Resource': (Resource, True),
61 }
62
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/troposphere/lakeformation.py b/troposphere/lakeformation.py
--- a/troposphere/lakeformation.py
+++ b/troposphere/lakeformation.py
@@ -43,10 +43,33 @@
}
+class DataLocationResource(AWSProperty):
+ props = {
+ 'S3Resource': (basestring, False),
+ }
+
+
+class ColumnWildcard(AWSProperty):
+ props = {
+ 'ExcludedColumnNames': ([basestring], False),
+ }
+
+
+class TableWithColumnsResource(AWSProperty):
+ props = {
+ 'ColumnNames': ([basestring], False),
+ 'ColumnWildcard': (ColumnWildcard, False),
+ 'DatabaseName': (basestring, False),
+ 'Name': (basestring, False),
+ }
+
+
class Resource(AWSProperty):
props = {
'DatabaseResource': (DatabaseResource, False),
+ 'DataLocationResource': (DataLocationResource, False),
'TableResource': (TableResource, False),
+ 'TableWithColumnsResource': (TableWithColumnsResource, False),
}
| {"golden_diff": "diff --git a/troposphere/lakeformation.py b/troposphere/lakeformation.py\n--- a/troposphere/lakeformation.py\n+++ b/troposphere/lakeformation.py\n@@ -43,10 +43,33 @@\n }\n \n \n+class DataLocationResource(AWSProperty):\n+ props = {\n+ 'S3Resource': (basestring, False),\n+ }\n+\n+\n+class ColumnWildcard(AWSProperty):\n+ props = {\n+ 'ExcludedColumnNames': ([basestring], False),\n+ }\n+\n+\n+class TableWithColumnsResource(AWSProperty):\n+ props = {\n+ 'ColumnNames': ([basestring], False),\n+ 'ColumnWildcard': (ColumnWildcard, False),\n+ 'DatabaseName': (basestring, False),\n+ 'Name': (basestring, False),\n+ }\n+\n+\n class Resource(AWSProperty):\n props = {\n 'DatabaseResource': (DatabaseResource, False),\n+ 'DataLocationResource': (DataLocationResource, False),\n 'TableResource': (TableResource, False),\n+ 'TableWithColumnsResource': (TableWithColumnsResource, False),\n }\n", "issue": "Add support for DataLocationResource & TableWithColumnsResource in AWS::LakeFormation::Permissions (2020, Jan 16 update)\nwaiting for the doc to be updated\n", "before_files": [{"content": "# Copyright (c) 2012-2019, Mark Peek <[email protected]>\n# All rights reserved.\n#\n# See LICENSE file for full license.\n#\n# *** Do not modify - this file is autogenerated ***\n# Resource specification version: 5.3.0\n\n\nfrom . import AWSObject\nfrom . import AWSProperty\n\n\nclass Admins(AWSProperty):\n props = {\n }\n\n\nclass DataLakeSettings(AWSObject):\n resource_type = \"AWS::LakeFormation::DataLakeSettings\"\n\n props = {\n 'Admins': (Admins, False),\n }\n\n\nclass DataLakePrincipal(AWSProperty):\n props = {\n 'DataLakePrincipalIdentifier': (basestring, False),\n }\n\n\nclass DatabaseResource(AWSProperty):\n props = {\n 'Name': (basestring, False),\n }\n\n\nclass TableResource(AWSProperty):\n props = {\n 'DatabaseName': (basestring, False),\n 'Name': (basestring, False),\n }\n\n\nclass Resource(AWSProperty):\n props = {\n 'DatabaseResource': (DatabaseResource, False),\n 'TableResource': (TableResource, False),\n }\n\n\nclass Permissions(AWSObject):\n resource_type = \"AWS::LakeFormation::Permissions\"\n\n props = {\n 'DataLakePrincipal': (DataLakePrincipal, True),\n 'Permissions': ([basestring], False),\n 'PermissionsWithGrantOption': ([basestring], False),\n 'Resource': (Resource, True),\n }\n", "path": "troposphere/lakeformation.py"}], "after_files": [{"content": "# Copyright (c) 2012-2019, Mark Peek <[email protected]>\n# All rights reserved.\n#\n# See LICENSE file for full license.\n#\n# *** Do not modify - this file is autogenerated ***\n# Resource specification version: 5.3.0\n\n\nfrom . import AWSObject\nfrom . import AWSProperty\n\n\nclass Admins(AWSProperty):\n props = {\n }\n\n\nclass DataLakeSettings(AWSObject):\n resource_type = \"AWS::LakeFormation::DataLakeSettings\"\n\n props = {\n 'Admins': (Admins, False),\n }\n\n\nclass DataLakePrincipal(AWSProperty):\n props = {\n 'DataLakePrincipalIdentifier': (basestring, False),\n }\n\n\nclass DatabaseResource(AWSProperty):\n props = {\n 'Name': (basestring, False),\n }\n\n\nclass TableResource(AWSProperty):\n props = {\n 'DatabaseName': (basestring, False),\n 'Name': (basestring, False),\n }\n\n\nclass DataLocationResource(AWSProperty):\n props = {\n 'S3Resource': (basestring, False),\n }\n\n\nclass ColumnWildcard(AWSProperty):\n props = {\n 'ExcludedColumnNames': ([basestring], False),\n }\n\n\nclass TableWithColumnsResource(AWSProperty):\n props = {\n 'ColumnNames': ([basestring], False),\n 'ColumnWildcard': (ColumnWildcard, False),\n 'DatabaseName': (basestring, False),\n 'Name': (basestring, False),\n }\n\n\nclass Resource(AWSProperty):\n props = {\n 'DatabaseResource': (DatabaseResource, False),\n 'DataLocationResource': (DataLocationResource, False),\n 'TableResource': (TableResource, False),\n 'TableWithColumnsResource': (TableWithColumnsResource, False),\n }\n\n\nclass Permissions(AWSObject):\n resource_type = \"AWS::LakeFormation::Permissions\"\n\n props = {\n 'DataLakePrincipal': (DataLakePrincipal, True),\n 'Permissions': ([basestring], False),\n 'PermissionsWithGrantOption': ([basestring], False),\n 'Resource': (Resource, True),\n }\n", "path": "troposphere/lakeformation.py"}]} | 745 | 252 |
gh_patches_debug_35727 | rasdani/github-patches | git_diff | CTFd__CTFd-319 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Awards is not accounted in `user.place()`
[`user.place()`][user_place] is not accounting awards while [`get_standings()`][get_standings] does. This causes different ordering in scoreboard and team profile. Is this by design?
[user_place]: https://github.com/CTFd/CTFd/blob/master/CTFd/models.py#L200
[get_standings]: https://github.com/CTFd/CTFd/blob/master/CTFd/scoreboard.py#L11
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `CTFd/models.py`
Content:
```
1 import datetime
2 import hashlib
3 import json
4 import netaddr
5 from socket import inet_pton, inet_ntop, AF_INET, AF_INET6
6 from struct import unpack, pack, error as struct_error
7
8 from flask_sqlalchemy import SQLAlchemy
9 from passlib.hash import bcrypt_sha256
10 from sqlalchemy.exc import DatabaseError
11
12
13 def sha512(string):
14 return str(hashlib.sha512(string).hexdigest())
15
16
17 def ip2long(ip):
18 '''Converts a user's IP address into an integer/long'''
19 return int(netaddr.IPAddress(ip))
20
21
22 def long2ip(ip_int):
23 '''Converts a saved integer/long back into an IP address'''
24 return str(netaddr.IPAddress(ip_int))
25
26
27 db = SQLAlchemy()
28
29
30 class Pages(db.Model):
31 id = db.Column(db.Integer, primary_key=True)
32 route = db.Column(db.String(80), unique=True)
33 html = db.Column(db.Text)
34
35 def __init__(self, route, html):
36 self.route = route
37 self.html = html
38
39 def __repr__(self):
40 return "<Pages route {0}>".format(self.route)
41
42
43 class Containers(db.Model):
44 id = db.Column(db.Integer, primary_key=True)
45 name = db.Column(db.String(80))
46 buildfile = db.Column(db.Text)
47
48 def __init__(self, name, buildfile):
49 self.name = name
50 self.buildfile = buildfile
51
52 def __repr__(self):
53 return "<Container ID:(0) {1}>".format(self.id, self.name)
54
55
56 class Challenges(db.Model):
57 id = db.Column(db.Integer, primary_key=True)
58 name = db.Column(db.String(80))
59 description = db.Column(db.Text)
60 max_attempts = db.Column(db.Integer, default=0)
61 value = db.Column(db.Integer)
62 category = db.Column(db.String(80))
63 type = db.Column(db.Integer)
64 hidden = db.Column(db.Boolean)
65
66 def __init__(self, name, description, value, category, type=0):
67 self.name = name
68 self.description = description
69 self.value = value
70 self.category = category
71 self.type = type
72 # self.flags = json.dumps(flags)
73
74 def __repr__(self):
75 return '<chal %r>' % self.name
76
77
78 class Hints(db.Model):
79 id = db.Column(db.Integer, primary_key=True)
80 type = db.Column(db.Integer, default=0)
81 chal = db.Column(db.Integer, db.ForeignKey('challenges.id'))
82 hint = db.Column(db.Text)
83 cost = db.Column(db.Integer, default=0)
84
85 def __init__(self, chal, hint, cost=0, type=0):
86 self.chal = chal
87 self.hint = hint
88 self.cost = cost
89 self.type = type
90
91 def __repr__(self):
92 return '<hint %r>' % self.hint
93
94
95 class Awards(db.Model):
96 id = db.Column(db.Integer, primary_key=True)
97 teamid = db.Column(db.Integer, db.ForeignKey('teams.id'))
98 name = db.Column(db.String(80))
99 description = db.Column(db.Text)
100 date = db.Column(db.DateTime, default=datetime.datetime.utcnow)
101 value = db.Column(db.Integer)
102 category = db.Column(db.String(80))
103 icon = db.Column(db.Text)
104
105 def __init__(self, teamid, name, value):
106 self.teamid = teamid
107 self.name = name
108 self.value = value
109
110 def __repr__(self):
111 return '<award %r>' % self.name
112
113
114 class Tags(db.Model):
115 id = db.Column(db.Integer, primary_key=True)
116 chal = db.Column(db.Integer, db.ForeignKey('challenges.id'))
117 tag = db.Column(db.String(80))
118
119 def __init__(self, chal, tag):
120 self.chal = chal
121 self.tag = tag
122
123 def __repr__(self):
124 return "<Tag {0} for challenge {1}>".format(self.tag, self.chal)
125
126
127 class Files(db.Model):
128 id = db.Column(db.Integer, primary_key=True)
129 chal = db.Column(db.Integer, db.ForeignKey('challenges.id'))
130 location = db.Column(db.Text)
131
132 def __init__(self, chal, location):
133 self.chal = chal
134 self.location = location
135
136 def __repr__(self):
137 return "<File {0} for challenge {1}>".format(self.location, self.chal)
138
139
140 class Keys(db.Model):
141 id = db.Column(db.Integer, primary_key=True)
142 chal = db.Column(db.Integer, db.ForeignKey('challenges.id'))
143 key_type = db.Column(db.Integer)
144 flag = db.Column(db.Text)
145 data = db.Column(db.Text)
146
147 def __init__(self, chal, flag, key_type):
148 self.chal = chal
149 self.flag = flag
150 self.key_type = key_type
151
152 def __repr__(self):
153 return "<Flag {0} for challenge {1}>".format(self.flag, self.chal)
154
155
156 class Teams(db.Model):
157 id = db.Column(db.Integer, primary_key=True)
158 name = db.Column(db.String(128), unique=True)
159 email = db.Column(db.String(128), unique=True)
160 password = db.Column(db.String(128))
161 website = db.Column(db.String(128))
162 affiliation = db.Column(db.String(128))
163 country = db.Column(db.String(32))
164 bracket = db.Column(db.String(32))
165 banned = db.Column(db.Boolean, default=False)
166 verified = db.Column(db.Boolean, default=False)
167 admin = db.Column(db.Boolean, default=False)
168 joined = db.Column(db.DateTime, default=datetime.datetime.utcnow)
169
170 def __init__(self, name, email, password):
171 self.name = name
172 self.email = email
173 self.password = bcrypt_sha256.encrypt(str(password))
174
175 def __repr__(self):
176 return '<team %r>' % self.name
177
178 def score(self, admin=False):
179 score = db.func.sum(Challenges.value).label('score')
180 team = db.session.query(Solves.teamid, score).join(Teams).join(Challenges).filter(Teams.banned == False, Teams.id == self.id)
181 award_score = db.func.sum(Awards.value).label('award_score')
182 award = db.session.query(award_score).filter_by(teamid=self.id)
183
184 if not admin:
185 freeze = Config.query.filter_by(key='freeze').first()
186 if freeze and freeze.value:
187 freeze = int(freeze.value)
188 freeze = datetime.datetime.utcfromtimestamp(freeze)
189 team = team.filter(Solves.date < freeze)
190 award = award.filter(Awards.date < freeze)
191
192 team = team.group_by(Solves.teamid).first()
193 award = award.first()
194
195 if team:
196 return int(team.score or 0) + int(award.award_score or 0)
197 else:
198 return 0
199
200 def place(self, admin=False):
201 score = db.func.sum(Challenges.value).label('score')
202 quickest = db.func.max(Solves.date).label('quickest')
203 teams = db.session.query(Solves.teamid).join(Teams).join(Challenges).filter(Teams.banned == False)
204
205 if not admin:
206 freeze = Config.query.filter_by(key='freeze').first()
207 if freeze and freeze.value:
208 freeze = int(freeze.value)
209 freeze = datetime.datetime.utcfromtimestamp(freeze)
210 teams = teams.filter(Solves.date < freeze)
211
212 teams = teams.group_by(Solves.teamid).order_by(score.desc(), quickest).all()
213
214 # http://codegolf.stackexchange.com/a/4712
215 try:
216 i = teams.index((self.id,)) + 1
217 k = i % 10
218 return "%d%s" % (i, "tsnrhtdd"[(i / 10 % 10 != 1) * (k < 4) * k::4])
219 except ValueError:
220 return 0
221
222
223 class Solves(db.Model):
224 __table_args__ = (db.UniqueConstraint('chalid', 'teamid'), {})
225 id = db.Column(db.Integer, primary_key=True)
226 chalid = db.Column(db.Integer, db.ForeignKey('challenges.id'))
227 teamid = db.Column(db.Integer, db.ForeignKey('teams.id'))
228 ip = db.Column(db.String(46))
229 flag = db.Column(db.Text)
230 date = db.Column(db.DateTime, default=datetime.datetime.utcnow)
231 team = db.relationship('Teams', foreign_keys="Solves.teamid", lazy='joined')
232 chal = db.relationship('Challenges', foreign_keys="Solves.chalid", lazy='joined')
233 # value = db.Column(db.Integer)
234
235 def __init__(self, teamid, chalid, ip, flag):
236 self.ip = ip
237 self.chalid = chalid
238 self.teamid = teamid
239 self.flag = flag
240 # self.value = value
241
242 def __repr__(self):
243 return '<solve {}, {}, {}, {}>'.format(self.teamid, self.chalid, self.ip, self.flag)
244
245
246 class WrongKeys(db.Model):
247 id = db.Column(db.Integer, primary_key=True)
248 chalid = db.Column(db.Integer, db.ForeignKey('challenges.id'))
249 teamid = db.Column(db.Integer, db.ForeignKey('teams.id'))
250 ip = db.Column(db.String(46))
251 date = db.Column(db.DateTime, default=datetime.datetime.utcnow)
252 flag = db.Column(db.Text)
253 chal = db.relationship('Challenges', foreign_keys="WrongKeys.chalid", lazy='joined')
254
255 def __init__(self, teamid, chalid, ip, flag):
256 self.ip = ip
257 self.teamid = teamid
258 self.chalid = chalid
259 self.flag = flag
260
261 def __repr__(self):
262 return '<wrong {}, {}, {}, {}>'.format(self.teamid, self.chalid, self.ip, self.flag)
263
264
265 class Unlocks(db.Model):
266 id = db.Column(db.Integer, primary_key=True)
267 teamid = db.Column(db.Integer, db.ForeignKey('teams.id'))
268 date = db.Column(db.DateTime, default=datetime.datetime.utcnow)
269 itemid = db.Column(db.Integer)
270 model = db.Column(db.String(32))
271
272 def __init__(self, model, teamid, itemid):
273 self.model = model
274 self.teamid = teamid
275 self.itemid = itemid
276
277 def __repr__(self):
278 return '<unlock %r>' % self.teamid
279
280
281 class Tracking(db.Model):
282 id = db.Column(db.Integer, primary_key=True)
283 ip = db.Column(db.String(46))
284 team = db.Column(db.Integer, db.ForeignKey('teams.id'))
285 date = db.Column(db.DateTime, default=datetime.datetime.utcnow)
286
287 def __init__(self, ip, team):
288 self.ip = ip
289 self.team = team
290
291 def __repr__(self):
292 return '<ip %r>' % self.team
293
294
295 class Config(db.Model):
296 id = db.Column(db.Integer, primary_key=True)
297 key = db.Column(db.Text)
298 value = db.Column(db.Text)
299
300 def __init__(self, key, value):
301 self.key = key
302 self.value = value
303
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/CTFd/models.py b/CTFd/models.py
--- a/CTFd/models.py
+++ b/CTFd/models.py
@@ -8,6 +8,7 @@
from flask_sqlalchemy import SQLAlchemy
from passlib.hash import bcrypt_sha256
from sqlalchemy.exc import DatabaseError
+from sqlalchemy.sql.expression import union_all
def sha512(string):
@@ -198,22 +199,44 @@
return 0
def place(self, admin=False):
- score = db.func.sum(Challenges.value).label('score')
- quickest = db.func.max(Solves.date).label('quickest')
- teams = db.session.query(Solves.teamid).join(Teams).join(Challenges).filter(Teams.banned == False)
+ scores = db.session.query(
+ Solves.teamid.label('teamid'),
+ db.func.sum(Challenges.value).label('score'),
+ db.func.max(Solves.date).label('date')
+ ).join(Challenges).group_by(Solves.teamid)
+
+ awards = db.session.query(
+ Awards.teamid.label('teamid'),
+ db.func.sum(Awards.value).label('score'),
+ db.func.max(Awards.date).label('date')
+ ).group_by(Awards.teamid)
if not admin:
freeze = Config.query.filter_by(key='freeze').first()
if freeze and freeze.value:
freeze = int(freeze.value)
freeze = datetime.datetime.utcfromtimestamp(freeze)
- teams = teams.filter(Solves.date < freeze)
+ scores = scores.filter(Solves.date < freeze)
+ awards = awards.filter(Awards.date < freeze)
+
+ results = union_all(scores, awards).alias('results')
+
+ sumscore = db.func.sum(results.columns.score).label('sumscore')
+ quickest = db.func.max(results.columns.date).label('quickest')
+
+ standings_query = db.session.query(results.columns.teamid)\
+ .join(Teams)\
+ .group_by(results.columns.teamid)\
+ .order_by(sumscore.desc(), quickest)
+
+ if not admin:
+ standings_query = standings_query.filter(Teams.banned == False)
- teams = teams.group_by(Solves.teamid).order_by(score.desc(), quickest).all()
+ standings = standings_query.all()
# http://codegolf.stackexchange.com/a/4712
try:
- i = teams.index((self.id,)) + 1
+ i = standings.index((self.id,)) + 1
k = i % 10
return "%d%s" % (i, "tsnrhtdd"[(i / 10 % 10 != 1) * (k < 4) * k::4])
except ValueError:
| {"golden_diff": "diff --git a/CTFd/models.py b/CTFd/models.py\n--- a/CTFd/models.py\n+++ b/CTFd/models.py\n@@ -8,6 +8,7 @@\n from flask_sqlalchemy import SQLAlchemy\n from passlib.hash import bcrypt_sha256\n from sqlalchemy.exc import DatabaseError\n+from sqlalchemy.sql.expression import union_all\n \n \n def sha512(string):\n@@ -198,22 +199,44 @@\n return 0\n \n def place(self, admin=False):\n- score = db.func.sum(Challenges.value).label('score')\n- quickest = db.func.max(Solves.date).label('quickest')\n- teams = db.session.query(Solves.teamid).join(Teams).join(Challenges).filter(Teams.banned == False)\n+ scores = db.session.query(\n+ Solves.teamid.label('teamid'),\n+ db.func.sum(Challenges.value).label('score'),\n+ db.func.max(Solves.date).label('date')\n+ ).join(Challenges).group_by(Solves.teamid)\n+\n+ awards = db.session.query(\n+ Awards.teamid.label('teamid'),\n+ db.func.sum(Awards.value).label('score'),\n+ db.func.max(Awards.date).label('date')\n+ ).group_by(Awards.teamid)\n \n if not admin:\n freeze = Config.query.filter_by(key='freeze').first()\n if freeze and freeze.value:\n freeze = int(freeze.value)\n freeze = datetime.datetime.utcfromtimestamp(freeze)\n- teams = teams.filter(Solves.date < freeze)\n+ scores = scores.filter(Solves.date < freeze)\n+ awards = awards.filter(Awards.date < freeze)\n+\n+ results = union_all(scores, awards).alias('results')\n+\n+ sumscore = db.func.sum(results.columns.score).label('sumscore')\n+ quickest = db.func.max(results.columns.date).label('quickest')\n+\n+ standings_query = db.session.query(results.columns.teamid)\\\n+ .join(Teams)\\\n+ .group_by(results.columns.teamid)\\\n+ .order_by(sumscore.desc(), quickest)\n+\n+ if not admin:\n+ standings_query = standings_query.filter(Teams.banned == False)\n \n- teams = teams.group_by(Solves.teamid).order_by(score.desc(), quickest).all()\n+ standings = standings_query.all()\n \n # http://codegolf.stackexchange.com/a/4712\n try:\n- i = teams.index((self.id,)) + 1\n+ i = standings.index((self.id,)) + 1\n k = i % 10\n return \"%d%s\" % (i, \"tsnrhtdd\"[(i / 10 % 10 != 1) * (k < 4) * k::4])\n except ValueError:\n", "issue": "Awards is not accounted in `user.place()`\n[`user.place()`][user_place] is not accounting awards while [`get_standings()`][get_standings] does. This causes different ordering in scoreboard and team profile. Is this by design?\r\n\r\n[user_place]: https://github.com/CTFd/CTFd/blob/master/CTFd/models.py#L200\r\n[get_standings]: https://github.com/CTFd/CTFd/blob/master/CTFd/scoreboard.py#L11\r\n\n", "before_files": [{"content": "import datetime\nimport hashlib\nimport json\nimport netaddr\nfrom socket import inet_pton, inet_ntop, AF_INET, AF_INET6\nfrom struct import unpack, pack, error as struct_error\n\nfrom flask_sqlalchemy import SQLAlchemy\nfrom passlib.hash import bcrypt_sha256\nfrom sqlalchemy.exc import DatabaseError\n\n\ndef sha512(string):\n return str(hashlib.sha512(string).hexdigest())\n\n\ndef ip2long(ip):\n '''Converts a user's IP address into an integer/long'''\n return int(netaddr.IPAddress(ip))\n\n\ndef long2ip(ip_int):\n '''Converts a saved integer/long back into an IP address'''\n return str(netaddr.IPAddress(ip_int))\n\n\ndb = SQLAlchemy()\n\n\nclass Pages(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n route = db.Column(db.String(80), unique=True)\n html = db.Column(db.Text)\n\n def __init__(self, route, html):\n self.route = route\n self.html = html\n\n def __repr__(self):\n return \"<Pages route {0}>\".format(self.route)\n\n\nclass Containers(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(80))\n buildfile = db.Column(db.Text)\n\n def __init__(self, name, buildfile):\n self.name = name\n self.buildfile = buildfile\n\n def __repr__(self):\n return \"<Container ID:(0) {1}>\".format(self.id, self.name)\n\n\nclass Challenges(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(80))\n description = db.Column(db.Text)\n max_attempts = db.Column(db.Integer, default=0)\n value = db.Column(db.Integer)\n category = db.Column(db.String(80))\n type = db.Column(db.Integer)\n hidden = db.Column(db.Boolean)\n\n def __init__(self, name, description, value, category, type=0):\n self.name = name\n self.description = description\n self.value = value\n self.category = category\n self.type = type\n # self.flags = json.dumps(flags)\n\n def __repr__(self):\n return '<chal %r>' % self.name\n\n\nclass Hints(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n type = db.Column(db.Integer, default=0)\n chal = db.Column(db.Integer, db.ForeignKey('challenges.id'))\n hint = db.Column(db.Text)\n cost = db.Column(db.Integer, default=0)\n\n def __init__(self, chal, hint, cost=0, type=0):\n self.chal = chal\n self.hint = hint\n self.cost = cost\n self.type = type\n\n def __repr__(self):\n return '<hint %r>' % self.hint\n\n\nclass Awards(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n teamid = db.Column(db.Integer, db.ForeignKey('teams.id'))\n name = db.Column(db.String(80))\n description = db.Column(db.Text)\n date = db.Column(db.DateTime, default=datetime.datetime.utcnow)\n value = db.Column(db.Integer)\n category = db.Column(db.String(80))\n icon = db.Column(db.Text)\n\n def __init__(self, teamid, name, value):\n self.teamid = teamid\n self.name = name\n self.value = value\n\n def __repr__(self):\n return '<award %r>' % self.name\n\n\nclass Tags(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n chal = db.Column(db.Integer, db.ForeignKey('challenges.id'))\n tag = db.Column(db.String(80))\n\n def __init__(self, chal, tag):\n self.chal = chal\n self.tag = tag\n\n def __repr__(self):\n return \"<Tag {0} for challenge {1}>\".format(self.tag, self.chal)\n\n\nclass Files(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n chal = db.Column(db.Integer, db.ForeignKey('challenges.id'))\n location = db.Column(db.Text)\n\n def __init__(self, chal, location):\n self.chal = chal\n self.location = location\n\n def __repr__(self):\n return \"<File {0} for challenge {1}>\".format(self.location, self.chal)\n\n\nclass Keys(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n chal = db.Column(db.Integer, db.ForeignKey('challenges.id'))\n key_type = db.Column(db.Integer)\n flag = db.Column(db.Text)\n data = db.Column(db.Text)\n\n def __init__(self, chal, flag, key_type):\n self.chal = chal\n self.flag = flag\n self.key_type = key_type\n\n def __repr__(self):\n return \"<Flag {0} for challenge {1}>\".format(self.flag, self.chal)\n\n\nclass Teams(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(128), unique=True)\n email = db.Column(db.String(128), unique=True)\n password = db.Column(db.String(128))\n website = db.Column(db.String(128))\n affiliation = db.Column(db.String(128))\n country = db.Column(db.String(32))\n bracket = db.Column(db.String(32))\n banned = db.Column(db.Boolean, default=False)\n verified = db.Column(db.Boolean, default=False)\n admin = db.Column(db.Boolean, default=False)\n joined = db.Column(db.DateTime, default=datetime.datetime.utcnow)\n\n def __init__(self, name, email, password):\n self.name = name\n self.email = email\n self.password = bcrypt_sha256.encrypt(str(password))\n\n def __repr__(self):\n return '<team %r>' % self.name\n\n def score(self, admin=False):\n score = db.func.sum(Challenges.value).label('score')\n team = db.session.query(Solves.teamid, score).join(Teams).join(Challenges).filter(Teams.banned == False, Teams.id == self.id)\n award_score = db.func.sum(Awards.value).label('award_score')\n award = db.session.query(award_score).filter_by(teamid=self.id)\n\n if not admin:\n freeze = Config.query.filter_by(key='freeze').first()\n if freeze and freeze.value:\n freeze = int(freeze.value)\n freeze = datetime.datetime.utcfromtimestamp(freeze)\n team = team.filter(Solves.date < freeze)\n award = award.filter(Awards.date < freeze)\n\n team = team.group_by(Solves.teamid).first()\n award = award.first()\n\n if team:\n return int(team.score or 0) + int(award.award_score or 0)\n else:\n return 0\n\n def place(self, admin=False):\n score = db.func.sum(Challenges.value).label('score')\n quickest = db.func.max(Solves.date).label('quickest')\n teams = db.session.query(Solves.teamid).join(Teams).join(Challenges).filter(Teams.banned == False)\n\n if not admin:\n freeze = Config.query.filter_by(key='freeze').first()\n if freeze and freeze.value:\n freeze = int(freeze.value)\n freeze = datetime.datetime.utcfromtimestamp(freeze)\n teams = teams.filter(Solves.date < freeze)\n\n teams = teams.group_by(Solves.teamid).order_by(score.desc(), quickest).all()\n\n # http://codegolf.stackexchange.com/a/4712\n try:\n i = teams.index((self.id,)) + 1\n k = i % 10\n return \"%d%s\" % (i, \"tsnrhtdd\"[(i / 10 % 10 != 1) * (k < 4) * k::4])\n except ValueError:\n return 0\n\n\nclass Solves(db.Model):\n __table_args__ = (db.UniqueConstraint('chalid', 'teamid'), {})\n id = db.Column(db.Integer, primary_key=True)\n chalid = db.Column(db.Integer, db.ForeignKey('challenges.id'))\n teamid = db.Column(db.Integer, db.ForeignKey('teams.id'))\n ip = db.Column(db.String(46))\n flag = db.Column(db.Text)\n date = db.Column(db.DateTime, default=datetime.datetime.utcnow)\n team = db.relationship('Teams', foreign_keys=\"Solves.teamid\", lazy='joined')\n chal = db.relationship('Challenges', foreign_keys=\"Solves.chalid\", lazy='joined')\n # value = db.Column(db.Integer)\n\n def __init__(self, teamid, chalid, ip, flag):\n self.ip = ip\n self.chalid = chalid\n self.teamid = teamid\n self.flag = flag\n # self.value = value\n\n def __repr__(self):\n return '<solve {}, {}, {}, {}>'.format(self.teamid, self.chalid, self.ip, self.flag)\n\n\nclass WrongKeys(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n chalid = db.Column(db.Integer, db.ForeignKey('challenges.id'))\n teamid = db.Column(db.Integer, db.ForeignKey('teams.id'))\n ip = db.Column(db.String(46))\n date = db.Column(db.DateTime, default=datetime.datetime.utcnow)\n flag = db.Column(db.Text)\n chal = db.relationship('Challenges', foreign_keys=\"WrongKeys.chalid\", lazy='joined')\n\n def __init__(self, teamid, chalid, ip, flag):\n self.ip = ip\n self.teamid = teamid\n self.chalid = chalid\n self.flag = flag\n\n def __repr__(self):\n return '<wrong {}, {}, {}, {}>'.format(self.teamid, self.chalid, self.ip, self.flag)\n\n\nclass Unlocks(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n teamid = db.Column(db.Integer, db.ForeignKey('teams.id'))\n date = db.Column(db.DateTime, default=datetime.datetime.utcnow)\n itemid = db.Column(db.Integer)\n model = db.Column(db.String(32))\n\n def __init__(self, model, teamid, itemid):\n self.model = model\n self.teamid = teamid\n self.itemid = itemid\n\n def __repr__(self):\n return '<unlock %r>' % self.teamid\n\n\nclass Tracking(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n ip = db.Column(db.String(46))\n team = db.Column(db.Integer, db.ForeignKey('teams.id'))\n date = db.Column(db.DateTime, default=datetime.datetime.utcnow)\n\n def __init__(self, ip, team):\n self.ip = ip\n self.team = team\n\n def __repr__(self):\n return '<ip %r>' % self.team\n\n\nclass Config(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n key = db.Column(db.Text)\n value = db.Column(db.Text)\n\n def __init__(self, key, value):\n self.key = key\n self.value = value\n", "path": "CTFd/models.py"}], "after_files": [{"content": "import datetime\nimport hashlib\nimport json\nimport netaddr\nfrom socket import inet_pton, inet_ntop, AF_INET, AF_INET6\nfrom struct import unpack, pack, error as struct_error\n\nfrom flask_sqlalchemy import SQLAlchemy\nfrom passlib.hash import bcrypt_sha256\nfrom sqlalchemy.exc import DatabaseError\nfrom sqlalchemy.sql.expression import union_all\n\n\ndef sha512(string):\n return str(hashlib.sha512(string).hexdigest())\n\n\ndef ip2long(ip):\n '''Converts a user's IP address into an integer/long'''\n return int(netaddr.IPAddress(ip))\n\n\ndef long2ip(ip_int):\n '''Converts a saved integer/long back into an IP address'''\n return str(netaddr.IPAddress(ip_int))\n\n\ndb = SQLAlchemy()\n\n\nclass Pages(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n route = db.Column(db.String(80), unique=True)\n html = db.Column(db.Text)\n\n def __init__(self, route, html):\n self.route = route\n self.html = html\n\n def __repr__(self):\n return \"<Pages route {0}>\".format(self.route)\n\n\nclass Containers(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(80))\n buildfile = db.Column(db.Text)\n\n def __init__(self, name, buildfile):\n self.name = name\n self.buildfile = buildfile\n\n def __repr__(self):\n return \"<Container ID:(0) {1}>\".format(self.id, self.name)\n\n\nclass Challenges(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(80))\n description = db.Column(db.Text)\n max_attempts = db.Column(db.Integer, default=0)\n value = db.Column(db.Integer)\n category = db.Column(db.String(80))\n type = db.Column(db.Integer)\n hidden = db.Column(db.Boolean)\n\n def __init__(self, name, description, value, category, type=0):\n self.name = name\n self.description = description\n self.value = value\n self.category = category\n self.type = type\n # self.flags = json.dumps(flags)\n\n def __repr__(self):\n return '<chal %r>' % self.name\n\n\nclass Hints(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n type = db.Column(db.Integer, default=0)\n chal = db.Column(db.Integer, db.ForeignKey('challenges.id'))\n hint = db.Column(db.Text)\n cost = db.Column(db.Integer, default=0)\n\n def __init__(self, chal, hint, cost=0, type=0):\n self.chal = chal\n self.hint = hint\n self.cost = cost\n self.type = type\n\n def __repr__(self):\n return '<hint %r>' % self.hint\n\n\nclass Awards(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n teamid = db.Column(db.Integer, db.ForeignKey('teams.id'))\n name = db.Column(db.String(80))\n description = db.Column(db.Text)\n date = db.Column(db.DateTime, default=datetime.datetime.utcnow)\n value = db.Column(db.Integer)\n category = db.Column(db.String(80))\n icon = db.Column(db.Text)\n\n def __init__(self, teamid, name, value):\n self.teamid = teamid\n self.name = name\n self.value = value\n\n def __repr__(self):\n return '<award %r>' % self.name\n\n\nclass Tags(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n chal = db.Column(db.Integer, db.ForeignKey('challenges.id'))\n tag = db.Column(db.String(80))\n\n def __init__(self, chal, tag):\n self.chal = chal\n self.tag = tag\n\n def __repr__(self):\n return \"<Tag {0} for challenge {1}>\".format(self.tag, self.chal)\n\n\nclass Files(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n chal = db.Column(db.Integer, db.ForeignKey('challenges.id'))\n location = db.Column(db.Text)\n\n def __init__(self, chal, location):\n self.chal = chal\n self.location = location\n\n def __repr__(self):\n return \"<File {0} for challenge {1}>\".format(self.location, self.chal)\n\n\nclass Keys(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n chal = db.Column(db.Integer, db.ForeignKey('challenges.id'))\n key_type = db.Column(db.Integer)\n flag = db.Column(db.Text)\n data = db.Column(db.Text)\n\n def __init__(self, chal, flag, key_type):\n self.chal = chal\n self.flag = flag\n self.key_type = key_type\n\n def __repr__(self):\n return \"<Flag {0} for challenge {1}>\".format(self.flag, self.chal)\n\n\nclass Teams(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(128), unique=True)\n email = db.Column(db.String(128), unique=True)\n password = db.Column(db.String(128))\n website = db.Column(db.String(128))\n affiliation = db.Column(db.String(128))\n country = db.Column(db.String(32))\n bracket = db.Column(db.String(32))\n banned = db.Column(db.Boolean, default=False)\n verified = db.Column(db.Boolean, default=False)\n admin = db.Column(db.Boolean, default=False)\n joined = db.Column(db.DateTime, default=datetime.datetime.utcnow)\n\n def __init__(self, name, email, password):\n self.name = name\n self.email = email\n self.password = bcrypt_sha256.encrypt(str(password))\n\n def __repr__(self):\n return '<team %r>' % self.name\n\n def score(self, admin=False):\n score = db.func.sum(Challenges.value).label('score')\n team = db.session.query(Solves.teamid, score).join(Teams).join(Challenges).filter(Teams.banned == False, Teams.id == self.id)\n award_score = db.func.sum(Awards.value).label('award_score')\n award = db.session.query(award_score).filter_by(teamid=self.id)\n\n if not admin:\n freeze = Config.query.filter_by(key='freeze').first()\n if freeze and freeze.value:\n freeze = int(freeze.value)\n freeze = datetime.datetime.utcfromtimestamp(freeze)\n team = team.filter(Solves.date < freeze)\n award = award.filter(Awards.date < freeze)\n\n team = team.group_by(Solves.teamid).first()\n award = award.first()\n\n if team:\n return int(team.score or 0) + int(award.award_score or 0)\n else:\n return 0\n\n def place(self, admin=False):\n scores = db.session.query(\n Solves.teamid.label('teamid'),\n db.func.sum(Challenges.value).label('score'),\n db.func.max(Solves.date).label('date')\n ).join(Challenges).group_by(Solves.teamid)\n\n awards = db.session.query(\n Awards.teamid.label('teamid'),\n db.func.sum(Awards.value).label('score'),\n db.func.max(Awards.date).label('date')\n ).group_by(Awards.teamid)\n\n if not admin:\n freeze = Config.query.filter_by(key='freeze').first()\n if freeze and freeze.value:\n freeze = int(freeze.value)\n freeze = datetime.datetime.utcfromtimestamp(freeze)\n scores = scores.filter(Solves.date < freeze)\n awards = awards.filter(Awards.date < freeze)\n\n results = union_all(scores, awards).alias('results')\n\n sumscore = db.func.sum(results.columns.score).label('sumscore')\n quickest = db.func.max(results.columns.date).label('quickest')\n\n standings_query = db.session.query(results.columns.teamid)\\\n .join(Teams)\\\n .group_by(results.columns.teamid)\\\n .order_by(sumscore.desc(), quickest)\n\n if not admin:\n standings_query = standings_query.filter(Teams.banned == False)\n\n standings = standings_query.all()\n\n # http://codegolf.stackexchange.com/a/4712\n try:\n i = standings.index((self.id,)) + 1\n k = i % 10\n return \"%d%s\" % (i, \"tsnrhtdd\"[(i / 10 % 10 != 1) * (k < 4) * k::4])\n except ValueError:\n return 0\n\n\nclass Solves(db.Model):\n __table_args__ = (db.UniqueConstraint('chalid', 'teamid'), {})\n id = db.Column(db.Integer, primary_key=True)\n chalid = db.Column(db.Integer, db.ForeignKey('challenges.id'))\n teamid = db.Column(db.Integer, db.ForeignKey('teams.id'))\n ip = db.Column(db.String(46))\n flag = db.Column(db.Text)\n date = db.Column(db.DateTime, default=datetime.datetime.utcnow)\n team = db.relationship('Teams', foreign_keys=\"Solves.teamid\", lazy='joined')\n chal = db.relationship('Challenges', foreign_keys=\"Solves.chalid\", lazy='joined')\n # value = db.Column(db.Integer)\n\n def __init__(self, teamid, chalid, ip, flag):\n self.ip = ip\n self.chalid = chalid\n self.teamid = teamid\n self.flag = flag\n # self.value = value\n\n def __repr__(self):\n return '<solve {}, {}, {}, {}>'.format(self.teamid, self.chalid, self.ip, self.flag)\n\n\nclass WrongKeys(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n chalid = db.Column(db.Integer, db.ForeignKey('challenges.id'))\n teamid = db.Column(db.Integer, db.ForeignKey('teams.id'))\n ip = db.Column(db.String(46))\n date = db.Column(db.DateTime, default=datetime.datetime.utcnow)\n flag = db.Column(db.Text)\n chal = db.relationship('Challenges', foreign_keys=\"WrongKeys.chalid\", lazy='joined')\n\n def __init__(self, teamid, chalid, ip, flag):\n self.ip = ip\n self.teamid = teamid\n self.chalid = chalid\n self.flag = flag\n\n def __repr__(self):\n return '<wrong {}, {}, {}, {}>'.format(self.teamid, self.chalid, self.ip, self.flag)\n\n\nclass Unlocks(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n teamid = db.Column(db.Integer, db.ForeignKey('teams.id'))\n date = db.Column(db.DateTime, default=datetime.datetime.utcnow)\n itemid = db.Column(db.Integer)\n model = db.Column(db.String(32))\n\n def __init__(self, model, teamid, itemid):\n self.model = model\n self.teamid = teamid\n self.itemid = itemid\n\n def __repr__(self):\n return '<unlock %r>' % self.teamid\n\n\nclass Tracking(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n ip = db.Column(db.String(46))\n team = db.Column(db.Integer, db.ForeignKey('teams.id'))\n date = db.Column(db.DateTime, default=datetime.datetime.utcnow)\n\n def __init__(self, ip, team):\n self.ip = ip\n self.team = team\n\n def __repr__(self):\n return '<ip %r>' % self.team\n\n\nclass Config(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n key = db.Column(db.Text)\n value = db.Column(db.Text)\n\n def __init__(self, key, value):\n self.key = key\n self.value = value\n", "path": "CTFd/models.py"}]} | 3,669 | 625 |
gh_patches_debug_31851 | rasdani/github-patches | git_diff | coala__coala-2865 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add more strings to Constants
Our Constants are located in coala/coalib/misc/Constants.py and there we hold options for `TRUE_STRINGS`. I think we should expand these true strings with more options, such as : `yep`,`ja` or even `hell yeah` (who knows what the user might come up with). Feel free to add your own suggestions if you think they fit.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `coalib/misc/Constants.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 import appdirs
4 import os
5 import re
6
7 # Start ignoring PyImportSortBear, PyLintBear as BUS_NAME is imported as a
8 # constant from other files.
9 from coalib import BUS_NAME
10 from coalib import VERSION
11 # Stop ignoring
12
13
14 THIS_IS_A_BUG = ('This is a bug. We are sorry for the inconvenience. '
15 'Please contact the developers for assistance.')
16
17 CRASH_MESSAGE = ('An unknown error occurred. This is a bug. We are '
18 'sorry for the inconvenience. Please contact the '
19 'developers for assistance. During execution of '
20 'coala an exception was raised. This should never '
21 'happen. When asked for, the following information '
22 'may help investigating:')
23
24 VERSION_CONFLICT_MESSAGE = ('There is a conflict in the version of a '
25 'dependency you have installed and the '
26 'requirements of coala. This may be resolved by '
27 'creating a separate virtual environment for '
28 'coala or running `pip install "%s"`. Be aware '
29 'that the latter solution might break other '
30 'python packages that depend on the currently '
31 'installed version.')
32
33 OBJ_NOT_ACCESSIBLE = '{} is not accessible and will be ignored!'
34
35 TRUE_STRINGS = ['1',
36 'on',
37 'y',
38 'yes',
39 'yeah',
40 'sure',
41 'true',
42 'definitely',
43 'yup',
44 'right',
45 'aye',
46 'positive']
47
48 FALSE_STRINGS = ['0',
49 'off',
50 'n',
51 'no',
52 'nope',
53 'nah',
54 'false',
55 'wrong',
56 'none',
57 'nay',
58 'negative']
59
60 # This string contains many unicode characters to challenge tests.
61 COMPLEX_TEST_STRING = ('4 r34l ch4ll3n63: 123 ÄÖü ABc @€¥ §&% {[( ←↓→↑ '
62 'ĦŊħ ß°^ \\\n\u2192')
63
64 # Path to the coalib directory
65 coalib_root = os.path.join(os.path.dirname(__file__),
66 os.path.pardir)
67
68 # Path to the language definition files
69 language_definitions = os.path.join(coalib_root,
70 'bearlib',
71 'languages',
72 'definitions')
73
74 system_coafile = os.path.join(coalib_root, 'default_coafile')
75
76 user_coafile = os.path.join(os.path.expanduser('~'), '.coarc')
77
78 default_coafile = '.coafile'
79
80 USER_DATA_DIR = appdirs.user_data_dir('coala', version=VERSION)
81
82 GLOBBING_SPECIAL_CHARS = '()[]|?*'
83
84 URL_REGEX = re.compile(
85 r'^(?:(?:http|ftp)[s]?://)?' # scheme
86 r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+' # domain name
87 r'(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|'
88 r'localhost|' # OR localhost
89 r'(?:\d{1,3}\.){3}\d{1,3})' # OR an ip
90 r'(?::\d+)?' # optional port number
91 r'(?:/?|[/?]\S+)$', # path
92 re.IGNORECASE)
93
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/coalib/misc/Constants.py b/coalib/misc/Constants.py
--- a/coalib/misc/Constants.py
+++ b/coalib/misc/Constants.py
@@ -34,27 +34,69 @@
TRUE_STRINGS = ['1',
'on',
+ 'okay',
+ 'ok',
+ 'okey-dokey',
'y',
'yes',
'yeah',
+ 'yea',
+ 'ya',
+ 'ye',
+ 'yessir',
'sure',
'true',
+ 'tru',
+ 'uh-huh',
'definitely',
'yup',
+ 'yep',
'right',
'aye',
+ 'alright',
+ 'alrighty',
+ 'hell yeah',
+ 'affirmative',
+ 'certainly',
+ 'definitely',
+ 'absolutely',
+ 'roger',
+ 'righto',
+ 'ja',
+ 'da',
+ 'si',
+ 'oui',
+ 'amen',
+ 'totally',
+ '10-4',
'positive']
FALSE_STRINGS = ['0',
'off',
'n',
'no',
+ 'nix',
'nope',
+ 'nop',
'nah',
+ 'nay',
'false',
+ 'uh-uh',
'wrong',
'none',
'nay',
+ 'hell no',
+ 'fat chance',
+ 'not a chance in hell',
+ 'not in a million years',
+ 'out of the question',
+ 'no siree',
+ 'no way',
+ 'nein',
+ 'njet',
+ 'nee',
+ 'non',
+ 'hakuna',
'negative']
# This string contains many unicode characters to challenge tests.
| {"golden_diff": "diff --git a/coalib/misc/Constants.py b/coalib/misc/Constants.py\n--- a/coalib/misc/Constants.py\n+++ b/coalib/misc/Constants.py\n@@ -34,27 +34,69 @@\n \n TRUE_STRINGS = ['1',\n 'on',\n+ 'okay',\n+ 'ok',\n+ 'okey-dokey',\n 'y',\n 'yes',\n 'yeah',\n+ 'yea',\n+ 'ya',\n+ 'ye',\n+ 'yessir',\n 'sure',\n 'true',\n+ 'tru',\n+ 'uh-huh',\n 'definitely',\n 'yup',\n+ 'yep',\n 'right',\n 'aye',\n+ 'alright',\n+ 'alrighty',\n+ 'hell yeah',\n+ 'affirmative',\n+ 'certainly',\n+ 'definitely',\n+ 'absolutely',\n+ 'roger',\n+ 'righto',\n+ 'ja',\n+ 'da',\n+ 'si',\n+ 'oui',\n+ 'amen',\n+ 'totally',\n+ '10-4',\n 'positive']\n \n FALSE_STRINGS = ['0',\n 'off',\n 'n',\n 'no',\n+ 'nix',\n 'nope',\n+ 'nop',\n 'nah',\n+ 'nay',\n 'false',\n+ 'uh-uh',\n 'wrong',\n 'none',\n 'nay',\n+ 'hell no',\n+ 'fat chance',\n+ 'not a chance in hell',\n+ 'not in a million years',\n+ 'out of the question',\n+ 'no siree',\n+ 'no way',\n+ 'nein',\n+ 'njet',\n+ 'nee',\n+ 'non',\n+ 'hakuna',\n 'negative']\n \n # This string contains many unicode characters to challenge tests.\n", "issue": "Add more strings to Constants\nOur Constants are located in coala/coalib/misc/Constants.py and there we hold options for `TRUE_STRINGS`. I think we should expand these true strings with more options, such as : `yep`,`ja` or even `hell yeah` (who knows what the user might come up with). Feel free to add your own suggestions if you think they fit.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport appdirs\nimport os\nimport re\n\n# Start ignoring PyImportSortBear, PyLintBear as BUS_NAME is imported as a\n# constant from other files.\nfrom coalib import BUS_NAME\nfrom coalib import VERSION\n# Stop ignoring\n\n\nTHIS_IS_A_BUG = ('This is a bug. We are sorry for the inconvenience. '\n 'Please contact the developers for assistance.')\n\nCRASH_MESSAGE = ('An unknown error occurred. This is a bug. We are '\n 'sorry for the inconvenience. Please contact the '\n 'developers for assistance. During execution of '\n 'coala an exception was raised. This should never '\n 'happen. When asked for, the following information '\n 'may help investigating:')\n\nVERSION_CONFLICT_MESSAGE = ('There is a conflict in the version of a '\n 'dependency you have installed and the '\n 'requirements of coala. This may be resolved by '\n 'creating a separate virtual environment for '\n 'coala or running `pip install \"%s\"`. Be aware '\n 'that the latter solution might break other '\n 'python packages that depend on the currently '\n 'installed version.')\n\nOBJ_NOT_ACCESSIBLE = '{} is not accessible and will be ignored!'\n\nTRUE_STRINGS = ['1',\n 'on',\n 'y',\n 'yes',\n 'yeah',\n 'sure',\n 'true',\n 'definitely',\n 'yup',\n 'right',\n 'aye',\n 'positive']\n\nFALSE_STRINGS = ['0',\n 'off',\n 'n',\n 'no',\n 'nope',\n 'nah',\n 'false',\n 'wrong',\n 'none',\n 'nay',\n 'negative']\n\n# This string contains many unicode characters to challenge tests.\nCOMPLEX_TEST_STRING = ('4 r34l ch4ll3n63: 123 \u00c4\u00d6\u00fc ABc @\u20ac\u00a5 \u00a7&% {[( \u2190\u2193\u2192\u2191 '\n '\u0126\u014a\u0127 \u00df\u00b0^ \\\\\\n\\u2192')\n\n# Path to the coalib directory\ncoalib_root = os.path.join(os.path.dirname(__file__),\n os.path.pardir)\n\n# Path to the language definition files\nlanguage_definitions = os.path.join(coalib_root,\n 'bearlib',\n 'languages',\n 'definitions')\n\nsystem_coafile = os.path.join(coalib_root, 'default_coafile')\n\nuser_coafile = os.path.join(os.path.expanduser('~'), '.coarc')\n\ndefault_coafile = '.coafile'\n\nUSER_DATA_DIR = appdirs.user_data_dir('coala', version=VERSION)\n\nGLOBBING_SPECIAL_CHARS = '()[]|?*'\n\nURL_REGEX = re.compile(\n r'^(?:(?:http|ftp)[s]?://)?' # scheme\n r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\\.)+' # domain name\n r'(?:[A-Z]{2,6}\\.?|[A-Z0-9-]{2,}\\.?)|'\n r'localhost|' # OR localhost\n r'(?:\\d{1,3}\\.){3}\\d{1,3})' # OR an ip\n r'(?::\\d+)?' # optional port number\n r'(?:/?|[/?]\\S+)$', # path\n re.IGNORECASE)\n", "path": "coalib/misc/Constants.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport appdirs\nimport os\nimport re\n\n# Start ignoring PyImportSortBear, PyLintBear as BUS_NAME is imported as a\n# constant from other files.\nfrom coalib import BUS_NAME\nfrom coalib import VERSION\n# Stop ignoring\n\n\nTHIS_IS_A_BUG = ('This is a bug. We are sorry for the inconvenience. '\n 'Please contact the developers for assistance.')\n\nCRASH_MESSAGE = ('An unknown error occurred. This is a bug. We are '\n 'sorry for the inconvenience. Please contact the '\n 'developers for assistance. During execution of '\n 'coala an exception was raised. This should never '\n 'happen. When asked for, the following information '\n 'may help investigating:')\n\nVERSION_CONFLICT_MESSAGE = ('There is a conflict in the version of a '\n 'dependency you have installed and the '\n 'requirements of coala. This may be resolved by '\n 'creating a separate virtual environment for '\n 'coala or running `pip install \"%s\"`. Be aware '\n 'that the latter solution might break other '\n 'python packages that depend on the currently '\n 'installed version.')\n\nOBJ_NOT_ACCESSIBLE = '{} is not accessible and will be ignored!'\n\nTRUE_STRINGS = ['1',\n 'on',\n 'okay',\n 'ok',\n 'okey-dokey',\n 'y',\n 'yes',\n 'yeah',\n 'yea',\n 'ya',\n 'ye',\n 'yessir',\n 'sure',\n 'true',\n 'tru',\n 'uh-huh',\n 'definitely',\n 'yup',\n 'yep',\n 'right',\n 'aye',\n 'alright',\n 'alrighty',\n 'hell yeah',\n 'affirmative',\n 'certainly',\n 'definitely',\n 'absolutely',\n 'roger',\n 'righto',\n 'ja',\n 'da',\n 'si',\n 'oui',\n 'amen',\n 'totally',\n '10-4',\n 'positive']\n\nFALSE_STRINGS = ['0',\n 'off',\n 'n',\n 'no',\n 'nix',\n 'nope',\n 'nop',\n 'nah',\n 'nay',\n 'false',\n 'uh-uh',\n 'wrong',\n 'none',\n 'nay',\n 'hell no',\n 'fat chance',\n 'not a chance in hell',\n 'not in a million years',\n 'out of the question',\n 'no siree',\n 'no way',\n 'nein',\n 'njet',\n 'nee',\n 'non',\n 'hakuna',\n 'negative']\n\n# This string contains many unicode characters to challenge tests.\nCOMPLEX_TEST_STRING = ('4 r34l ch4ll3n63: 123 \u00c4\u00d6\u00fc ABc @\u20ac\u00a5 \u00a7&% {[( \u2190\u2193\u2192\u2191 '\n '\u0126\u014a\u0127 \u00df\u00b0^ \\\\\\n\\u2192')\n\n# Path to the coalib directory\ncoalib_root = os.path.join(os.path.dirname(__file__),\n os.path.pardir)\n\n# Path to the language definition files\nlanguage_definitions = os.path.join(coalib_root,\n 'bearlib',\n 'languages',\n 'definitions')\n\nsystem_coafile = os.path.join(coalib_root, 'default_coafile')\n\nuser_coafile = os.path.join(os.path.expanduser('~'), '.coarc')\n\ndefault_coafile = '.coafile'\n\nUSER_DATA_DIR = appdirs.user_data_dir('coala', version=VERSION)\n\nGLOBBING_SPECIAL_CHARS = '()[]|?*'\n\nURL_REGEX = re.compile(\n r'^(?:(?:http|ftp)[s]?://)?' # scheme\n r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\\.)+' # domain name\n r'(?:[A-Z]{2,6}\\.?|[A-Z0-9-]{2,}\\.?)|'\n r'localhost|' # OR localhost\n r'(?:\\d{1,3}\\.){3}\\d{1,3})' # OR an ip\n r'(?::\\d+)?' # optional port number\n r'(?:/?|[/?]\\S+)$', # path\n re.IGNORECASE)\n", "path": "coalib/misc/Constants.py"}]} | 1,281 | 429 |
gh_patches_debug_37713 | rasdani/github-patches | git_diff | coala__coala-964 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
glob/collecting: Accept strings too
the methods there want a list but `glob("string")` makes sense too (and behaves very strangely), we should support that.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `coalib/collecting/Collectors.py`
Content:
```
1 import os
2
3 from coalib.collecting.Importers import iimport_objects
4 from coalib.misc.Decorators import yield_once
5 from coalib.misc.i18n import _
6 from coalib.parsing.Globbing import iglob
7
8
9 def _yield_if_right_kind(bear_class, kinds):
10 try:
11 if bear_class.kind() in kinds:
12 yield bear_class
13 except NotImplementedError:
14 pass
15
16
17 def _import_bears(file_path, kinds):
18 # recursive imports:
19 for bear_list in iimport_objects(file_path,
20 names='__additional_bears__',
21 types=list):
22 for bear_class in bear_list:
23 for valid_bear_class in _yield_if_right_kind(bear_class, kinds):
24 yield valid_bear_class
25 # normal import
26 for bear_class in iimport_objects(file_path,
27 attributes='kind',
28 local=True):
29 for valid_bear_class in _yield_if_right_kind(bear_class, kinds):
30 yield valid_bear_class
31
32
33 @yield_once
34 def icollect(file_paths):
35 """
36 Evaluate globs in file paths and return all matching files.
37
38 :param file_paths: list of file paths that can include globs
39 :return: iterator that yields paths of all matching files
40 """
41 for file_path in file_paths:
42 for match in iglob(file_path):
43 yield match
44
45
46 def collect_files(file_paths):
47 """
48 Evaluate globs in file paths and return all matching files
49
50 :param file_paths: list of file paths that can include globs
51 :return: list of paths of all matching files
52 """
53 return list(filter(os.path.isfile, icollect(file_paths)))
54
55
56 def collect_dirs(dir_paths):
57 """
58 Evaluate globs in directory paths and return all matching directories
59
60 :param dir_paths: list of file paths that can include globs
61 :return: list of paths of all matching directories
62 """
63 return list(filter(os.path.isdir, icollect(dir_paths)))
64
65
66 @yield_once
67 def icollect_bears(bear_dirs, bear_names, kinds, log_printer):
68 """
69 Collect all bears from bear directories that have a matching kind.
70
71 :param bear_dirs: directories that can contain bears
72 :param bear_names: names of bears
73 :param kinds: list of bear kinds to be collected
74 :param log_printer: log_printer to handle logging
75 :return: iterator that yields bear classes
76 """
77 for bear_dir in filter(os.path.isdir, icollect(bear_dirs)):
78 for bear_name in bear_names:
79 for matching_file in iglob(
80 os.path.join(bear_dir, bear_name + '.py')):
81
82 try:
83 for bear in _import_bears(matching_file, kinds):
84 yield bear
85 except:
86 log_printer.warn(_("Unable to collect bears from {file}. "
87 "Probably the file is malformed or "
88 "the module code raises an exception.")
89 .format(file=matching_file))
90
91
92 def collect_bears(bear_dirs, bear_names, kinds, log_printer):
93 """
94 Collect all bears from bear directories that have a matching kind.
95
96 :param bear_dirs: directories that can contain bears
97 :param bear_names: names of bears
98 :param kinds: list of bear kinds to be collected
99 :param log_printer: log_printer to handle logging
100 :return: list of matching bear classes
101 """
102 return list(icollect_bears(bear_dirs, bear_names, kinds, log_printer))
103
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/coalib/collecting/Collectors.py b/coalib/collecting/Collectors.py
--- a/coalib/collecting/Collectors.py
+++ b/coalib/collecting/Collectors.py
@@ -35,9 +35,12 @@
"""
Evaluate globs in file paths and return all matching files.
- :param file_paths: list of file paths that can include globs
+ :param file_paths: file path or list of such that can include globs
:return: iterator that yields paths of all matching files
"""
+ if isinstance(file_paths, str):
+ file_paths = [file_paths]
+
for file_path in file_paths:
for match in iglob(file_path):
yield match
@@ -47,7 +50,7 @@
"""
Evaluate globs in file paths and return all matching files
- :param file_paths: list of file paths that can include globs
+ :param file_paths: file path or list of such that can include globs
:return: list of paths of all matching files
"""
return list(filter(os.path.isfile, icollect(file_paths)))
@@ -57,7 +60,7 @@
"""
Evaluate globs in directory paths and return all matching directories
- :param dir_paths: list of file paths that can include globs
+ :param dir_paths: file path or list of such that can include globs
:return: list of paths of all matching directories
"""
return list(filter(os.path.isdir, icollect(dir_paths)))
@@ -68,7 +71,7 @@
"""
Collect all bears from bear directories that have a matching kind.
- :param bear_dirs: directories that can contain bears
+ :param bear_dirs: directory name or list of such that can contain bears
:param bear_names: names of bears
:param kinds: list of bear kinds to be collected
:param log_printer: log_printer to handle logging
@@ -93,7 +96,7 @@
"""
Collect all bears from bear directories that have a matching kind.
- :param bear_dirs: directories that can contain bears
+ :param bear_dirs: directory name or list of such that can contain bears
:param bear_names: names of bears
:param kinds: list of bear kinds to be collected
:param log_printer: log_printer to handle logging
| {"golden_diff": "diff --git a/coalib/collecting/Collectors.py b/coalib/collecting/Collectors.py\n--- a/coalib/collecting/Collectors.py\n+++ b/coalib/collecting/Collectors.py\n@@ -35,9 +35,12 @@\n \"\"\"\n Evaluate globs in file paths and return all matching files.\n \n- :param file_paths: list of file paths that can include globs\n+ :param file_paths: file path or list of such that can include globs\n :return: iterator that yields paths of all matching files\n \"\"\"\n+ if isinstance(file_paths, str):\n+ file_paths = [file_paths]\n+\n for file_path in file_paths:\n for match in iglob(file_path):\n yield match\n@@ -47,7 +50,7 @@\n \"\"\"\n Evaluate globs in file paths and return all matching files\n \n- :param file_paths: list of file paths that can include globs\n+ :param file_paths: file path or list of such that can include globs\n :return: list of paths of all matching files\n \"\"\"\n return list(filter(os.path.isfile, icollect(file_paths)))\n@@ -57,7 +60,7 @@\n \"\"\"\n Evaluate globs in directory paths and return all matching directories\n \n- :param dir_paths: list of file paths that can include globs\n+ :param dir_paths: file path or list of such that can include globs\n :return: list of paths of all matching directories\n \"\"\"\n return list(filter(os.path.isdir, icollect(dir_paths)))\n@@ -68,7 +71,7 @@\n \"\"\"\n Collect all bears from bear directories that have a matching kind.\n \n- :param bear_dirs: directories that can contain bears\n+ :param bear_dirs: directory name or list of such that can contain bears\n :param bear_names: names of bears\n :param kinds: list of bear kinds to be collected\n :param log_printer: log_printer to handle logging\n@@ -93,7 +96,7 @@\n \"\"\"\n Collect all bears from bear directories that have a matching kind.\n \n- :param bear_dirs: directories that can contain bears\n+ :param bear_dirs: directory name or list of such that can contain bears\n :param bear_names: names of bears\n :param kinds: list of bear kinds to be collected\n :param log_printer: log_printer to handle logging\n", "issue": "glob/collecting: Accept strings too\nthe methods there want a list but `glob(\"string\")` makes sense too (and behaves very strangely), we should support that.\n\n", "before_files": [{"content": "import os\n\nfrom coalib.collecting.Importers import iimport_objects\nfrom coalib.misc.Decorators import yield_once\nfrom coalib.misc.i18n import _\nfrom coalib.parsing.Globbing import iglob\n\n\ndef _yield_if_right_kind(bear_class, kinds):\n try:\n if bear_class.kind() in kinds:\n yield bear_class\n except NotImplementedError:\n pass\n\n\ndef _import_bears(file_path, kinds):\n # recursive imports:\n for bear_list in iimport_objects(file_path,\n names='__additional_bears__',\n types=list):\n for bear_class in bear_list:\n for valid_bear_class in _yield_if_right_kind(bear_class, kinds):\n yield valid_bear_class\n # normal import\n for bear_class in iimport_objects(file_path,\n attributes='kind',\n local=True):\n for valid_bear_class in _yield_if_right_kind(bear_class, kinds):\n yield valid_bear_class\n\n\n@yield_once\ndef icollect(file_paths):\n \"\"\"\n Evaluate globs in file paths and return all matching files.\n\n :param file_paths: list of file paths that can include globs\n :return: iterator that yields paths of all matching files\n \"\"\"\n for file_path in file_paths:\n for match in iglob(file_path):\n yield match\n\n\ndef collect_files(file_paths):\n \"\"\"\n Evaluate globs in file paths and return all matching files\n\n :param file_paths: list of file paths that can include globs\n :return: list of paths of all matching files\n \"\"\"\n return list(filter(os.path.isfile, icollect(file_paths)))\n\n\ndef collect_dirs(dir_paths):\n \"\"\"\n Evaluate globs in directory paths and return all matching directories\n\n :param dir_paths: list of file paths that can include globs\n :return: list of paths of all matching directories\n \"\"\"\n return list(filter(os.path.isdir, icollect(dir_paths)))\n\n\n@yield_once\ndef icollect_bears(bear_dirs, bear_names, kinds, log_printer):\n \"\"\"\n Collect all bears from bear directories that have a matching kind.\n\n :param bear_dirs: directories that can contain bears\n :param bear_names: names of bears\n :param kinds: list of bear kinds to be collected\n :param log_printer: log_printer to handle logging\n :return: iterator that yields bear classes\n \"\"\"\n for bear_dir in filter(os.path.isdir, icollect(bear_dirs)):\n for bear_name in bear_names:\n for matching_file in iglob(\n os.path.join(bear_dir, bear_name + '.py')):\n\n try:\n for bear in _import_bears(matching_file, kinds):\n yield bear\n except:\n log_printer.warn(_(\"Unable to collect bears from {file}. \"\n \"Probably the file is malformed or \"\n \"the module code raises an exception.\")\n .format(file=matching_file))\n\n\ndef collect_bears(bear_dirs, bear_names, kinds, log_printer):\n \"\"\"\n Collect all bears from bear directories that have a matching kind.\n\n :param bear_dirs: directories that can contain bears\n :param bear_names: names of bears\n :param kinds: list of bear kinds to be collected\n :param log_printer: log_printer to handle logging\n :return: list of matching bear classes\n \"\"\"\n return list(icollect_bears(bear_dirs, bear_names, kinds, log_printer))\n", "path": "coalib/collecting/Collectors.py"}], "after_files": [{"content": "import os\n\nfrom coalib.collecting.Importers import iimport_objects\nfrom coalib.misc.Decorators import yield_once\nfrom coalib.misc.i18n import _\nfrom coalib.parsing.Globbing import iglob\n\n\ndef _yield_if_right_kind(bear_class, kinds):\n try:\n if bear_class.kind() in kinds:\n yield bear_class\n except NotImplementedError:\n pass\n\n\ndef _import_bears(file_path, kinds):\n # recursive imports:\n for bear_list in iimport_objects(file_path,\n names='__additional_bears__',\n types=list):\n for bear_class in bear_list:\n for valid_bear_class in _yield_if_right_kind(bear_class, kinds):\n yield valid_bear_class\n # normal import\n for bear_class in iimport_objects(file_path,\n attributes='kind',\n local=True):\n for valid_bear_class in _yield_if_right_kind(bear_class, kinds):\n yield valid_bear_class\n\n\n@yield_once\ndef icollect(file_paths):\n \"\"\"\n Evaluate globs in file paths and return all matching files.\n\n :param file_paths: file path or list of such that can include globs\n :return: iterator that yields paths of all matching files\n \"\"\"\n if isinstance(file_paths, str):\n file_paths = [file_paths]\n\n for file_path in file_paths:\n for match in iglob(file_path):\n yield match\n\n\ndef collect_files(file_paths):\n \"\"\"\n Evaluate globs in file paths and return all matching files\n\n :param file_paths: file path or list of such that can include globs\n :return: list of paths of all matching files\n \"\"\"\n return list(filter(os.path.isfile, icollect(file_paths)))\n\n\ndef collect_dirs(dir_paths):\n \"\"\"\n Evaluate globs in directory paths and return all matching directories\n\n :param dir_paths: file path or list of such that can include globs\n :return: list of paths of all matching directories\n \"\"\"\n return list(filter(os.path.isdir, icollect(dir_paths)))\n\n\n@yield_once\ndef icollect_bears(bear_dirs, bear_names, kinds, log_printer):\n \"\"\"\n Collect all bears from bear directories that have a matching kind.\n\n :param bear_dirs: directory name or list of such that can contain bears\n :param bear_names: names of bears\n :param kinds: list of bear kinds to be collected\n :param log_printer: log_printer to handle logging\n :return: iterator that yields bear classes\n \"\"\"\n for bear_dir in filter(os.path.isdir, icollect(bear_dirs)):\n for bear_name in bear_names:\n for matching_file in iglob(\n os.path.join(bear_dir, bear_name + '.py')):\n\n try:\n for bear in _import_bears(matching_file, kinds):\n yield bear\n except:\n log_printer.warn(_(\"Unable to collect bears from {file}. \"\n \"Probably the file is malformed or \"\n \"the module code raises an exception.\")\n .format(file=matching_file))\n\n\ndef collect_bears(bear_dirs, bear_names, kinds, log_printer):\n \"\"\"\n Collect all bears from bear directories that have a matching kind.\n\n :param bear_dirs: directory name or list of such that can contain bears\n :param bear_names: names of bears\n :param kinds: list of bear kinds to be collected\n :param log_printer: log_printer to handle logging\n :return: list of matching bear classes\n \"\"\"\n return list(icollect_bears(bear_dirs, bear_names, kinds, log_printer))\n", "path": "coalib/collecting/Collectors.py"}]} | 1,258 | 553 |
gh_patches_debug_11204 | rasdani/github-patches | git_diff | svthalia__concrexit-1736 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
AttributeError: 'Event' object has no attribute 'title_en'
Sentry Issue: [CONCREXIT-6D](https://sentry.io/organizations/thalia/issues/2465590057/?referrer=github_integration)
```
AttributeError: 'Event' object has no attribute 'title_en'
(9 additional frame(s) were not displayed)
...
File "rest_framework/mixins.py", line 68, in update
self.perform_update(serializer)
File "pizzas/api/v1/viewsets.py", line 105, in perform_update
self._update_payment(
File "pizzas/api/v1/viewsets.py", line 114, in _update_payment
order.payment = create_payment(order, processed_by, payment_type)
File "payments/services.py", line 67, in create_payment
notes=payable.payment_notes,
File "pizzas/payables.py", line 21, in payment_notes
f"Food order by {self.model.member_name} "
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `website/pizzas/payables.py`
Content:
```
1 from django.template.defaultfilters import date
2
3 from payments import Payable, payables
4 from pizzas.models import FoodOrder
5 from pizzas.services import can_change_order
6
7
8 class FoodOrderPayable(Payable):
9 @property
10 def payment_amount(self):
11 return self.model.product.price
12
13 @property
14 def payment_topic(self):
15 start_date = date(self.model.food_event.start, "Y-m-d")
16 return f"Food {self.model.food_event.event.title_en} [{start_date}]"
17
18 @property
19 def payment_notes(self):
20 return (
21 f"Food order by {self.model.member_name} "
22 f"for {self.model.food_event.event.title_en}"
23 )
24
25 @property
26 def payment_payer(self):
27 return self.model.member
28
29 def can_manage_payment(self, member):
30 return can_change_order(member, self.model.food_event)
31
32
33 def register():
34 payables.register(FoodOrder, FoodOrderPayable)
35
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/website/pizzas/payables.py b/website/pizzas/payables.py
--- a/website/pizzas/payables.py
+++ b/website/pizzas/payables.py
@@ -13,13 +13,13 @@
@property
def payment_topic(self):
start_date = date(self.model.food_event.start, "Y-m-d")
- return f"Food {self.model.food_event.event.title_en} [{start_date}]"
+ return f"Food {self.model.food_event.event.title} [{start_date}]"
@property
def payment_notes(self):
return (
f"Food order by {self.model.member_name} "
- f"for {self.model.food_event.event.title_en}"
+ f"for {self.model.food_event.event.title}"
)
@property
| {"golden_diff": "diff --git a/website/pizzas/payables.py b/website/pizzas/payables.py\n--- a/website/pizzas/payables.py\n+++ b/website/pizzas/payables.py\n@@ -13,13 +13,13 @@\n @property\n def payment_topic(self):\n start_date = date(self.model.food_event.start, \"Y-m-d\")\n- return f\"Food {self.model.food_event.event.title_en} [{start_date}]\"\n+ return f\"Food {self.model.food_event.event.title} [{start_date}]\"\n \n @property\n def payment_notes(self):\n return (\n f\"Food order by {self.model.member_name} \"\n- f\"for {self.model.food_event.event.title_en}\"\n+ f\"for {self.model.food_event.event.title}\"\n )\n \n @property\n", "issue": "AttributeError: 'Event' object has no attribute 'title_en'\nSentry Issue: [CONCREXIT-6D](https://sentry.io/organizations/thalia/issues/2465590057/?referrer=github_integration)\n\n```\nAttributeError: 'Event' object has no attribute 'title_en'\n(9 additional frame(s) were not displayed)\n...\n File \"rest_framework/mixins.py\", line 68, in update\n self.perform_update(serializer)\n File \"pizzas/api/v1/viewsets.py\", line 105, in perform_update\n self._update_payment(\n File \"pizzas/api/v1/viewsets.py\", line 114, in _update_payment\n order.payment = create_payment(order, processed_by, payment_type)\n File \"payments/services.py\", line 67, in create_payment\n notes=payable.payment_notes,\n File \"pizzas/payables.py\", line 21, in payment_notes\n f\"Food order by {self.model.member_name} \"\n```\n", "before_files": [{"content": "from django.template.defaultfilters import date\n\nfrom payments import Payable, payables\nfrom pizzas.models import FoodOrder\nfrom pizzas.services import can_change_order\n\n\nclass FoodOrderPayable(Payable):\n @property\n def payment_amount(self):\n return self.model.product.price\n\n @property\n def payment_topic(self):\n start_date = date(self.model.food_event.start, \"Y-m-d\")\n return f\"Food {self.model.food_event.event.title_en} [{start_date}]\"\n\n @property\n def payment_notes(self):\n return (\n f\"Food order by {self.model.member_name} \"\n f\"for {self.model.food_event.event.title_en}\"\n )\n\n @property\n def payment_payer(self):\n return self.model.member\n\n def can_manage_payment(self, member):\n return can_change_order(member, self.model.food_event)\n\n\ndef register():\n payables.register(FoodOrder, FoodOrderPayable)\n", "path": "website/pizzas/payables.py"}], "after_files": [{"content": "from django.template.defaultfilters import date\n\nfrom payments import Payable, payables\nfrom pizzas.models import FoodOrder\nfrom pizzas.services import can_change_order\n\n\nclass FoodOrderPayable(Payable):\n @property\n def payment_amount(self):\n return self.model.product.price\n\n @property\n def payment_topic(self):\n start_date = date(self.model.food_event.start, \"Y-m-d\")\n return f\"Food {self.model.food_event.event.title} [{start_date}]\"\n\n @property\n def payment_notes(self):\n return (\n f\"Food order by {self.model.member_name} \"\n f\"for {self.model.food_event.event.title}\"\n )\n\n @property\n def payment_payer(self):\n return self.model.member\n\n def can_manage_payment(self, member):\n return can_change_order(member, self.model.food_event)\n\n\ndef register():\n payables.register(FoodOrder, FoodOrderPayable)\n", "path": "website/pizzas/payables.py"}]} | 755 | 180 |
gh_patches_debug_25989 | rasdani/github-patches | git_diff | readthedocs__readthedocs.org-5974 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
gitpython sometimes fails when checking for submodules
## Details
I noticed the other week that our builds have actually been failing for a few months but I'm not sure how to debug it. Locally the docs generate as expected when I run `sphinx-build -b dirhtml . _build/dirhtml`
* Read the Docs project URL: https://readthedocs.org/projects/chassis/
* Build URL (if applicable): https://readthedocs.org/projects/chassis/builds/7462561/
* Read the Docs username (if applicable): BronsonQuick
## Expected Result
A successful build.
## Actual Result
>There was a problem with Read the Docs while building your documentation. Please report this to us with your build id (7462561).
Thanks very much for all your work on this project!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `readthedocs/projects/exceptions.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 """Project exceptions."""
4
5 from django.conf import settings
6 from django.utils.translation import ugettext_noop as _
7
8 from readthedocs.doc_builder.exceptions import BuildEnvironmentError
9
10
11 class ProjectConfigurationError(BuildEnvironmentError):
12
13 """Error raised trying to configure a project for build."""
14
15 NOT_FOUND = _(
16 'A configuration file was not found. '
17 'Make sure you have a conf.py file in your repository.',
18 )
19
20 MULTIPLE_CONF_FILES = _(
21 'We found more than one conf.py and are not sure which one to use. '
22 'Please, specify the correct file under the Advanced settings tab '
23 "in the project's Admin.",
24 )
25
26
27 class RepositoryError(BuildEnvironmentError):
28
29 """Failure during repository operation."""
30
31 PRIVATE_ALLOWED = _(
32 'There was a problem connecting to your repository, '
33 'ensure that your repository URL is correct.',
34 )
35 PRIVATE_NOT_ALLOWED = _(
36 'There was a problem connecting to your repository, '
37 'ensure that your repository URL is correct and your repository is public. '
38 'Private repositories are not supported.',
39 )
40
41 INVALID_SUBMODULES = _(
42 'One or more submodule URLs are not valid: {}, '
43 'git/ssh URL schemas for submodules are not supported.'
44 )
45 INVALID_SUBMODULES_PATH = _(
46 'One or more submodule paths are not valid. '
47 'Check that all your submodules in .gitmodules are used.'
48 )
49
50 DUPLICATED_RESERVED_VERSIONS = _(
51 'You can not have two versions with the name latest or stable.',
52 )
53
54 FAILED_TO_CHECKOUT = _('Failed to checkout revision: {}')
55
56 def get_default_message(self):
57 if settings.ALLOW_PRIVATE_REPOS:
58 return self.PRIVATE_ALLOWED
59 return self.PRIVATE_NOT_ALLOWED
60
61
62 class ProjectSpamError(Exception):
63
64 """
65 Error raised when a project field has detected spam.
66
67 This error is not raised to users, we use this for banning users in the
68 background.
69 """
70
```
Path: `readthedocs/vcs_support/backends/git.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 """Git-related utilities."""
4
5 import logging
6 import os
7 import re
8
9 import git
10 from django.core.exceptions import ValidationError
11 from git.exc import BadName, InvalidGitRepositoryError
12
13 from readthedocs.builds.constants import EXTERNAL
14 from readthedocs.config import ALL
15 from readthedocs.projects.constants import GITHUB_PR_PULL_PATTERN
16 from readthedocs.projects.exceptions import RepositoryError
17 from readthedocs.projects.validators import validate_submodule_url
18 from readthedocs.vcs_support.base import BaseVCS, VCSVersion
19
20
21 log = logging.getLogger(__name__)
22
23
24 class Backend(BaseVCS):
25
26 """Git VCS backend."""
27
28 supports_tags = True
29 supports_branches = True
30 supports_submodules = True
31 fallback_branch = 'master' # default branch
32 repo_depth = 50
33
34 def __init__(self, *args, **kwargs):
35 super().__init__(*args, **kwargs)
36 self.token = kwargs.get('token', None)
37 self.repo_url = self._get_clone_url()
38
39 def _get_clone_url(self):
40 if '://' in self.repo_url:
41 hacked_url = self.repo_url.split('://')[1]
42 hacked_url = re.sub('.git$', '', hacked_url)
43 clone_url = 'https://%s' % hacked_url
44 if self.token:
45 clone_url = 'https://{}@{}'.format(self.token, hacked_url)
46 return clone_url
47 # Don't edit URL because all hosts aren't the same
48 # else:
49 # clone_url = 'git://%s' % (hacked_url)
50 return self.repo_url
51
52 def set_remote_url(self, url):
53 return self.run('git', 'remote', 'set-url', 'origin', url)
54
55 def update(self):
56 """Clone or update the repository."""
57 super().update()
58 if self.repo_exists():
59 self.set_remote_url(self.repo_url)
60 return self.fetch()
61 self.make_clean_working_dir()
62 # A fetch is always required to get external versions properly
63 if self.version_type == EXTERNAL:
64 self.clone()
65 return self.fetch()
66 return self.clone()
67
68 def repo_exists(self):
69 try:
70 git.Repo(self.working_dir)
71 except InvalidGitRepositoryError:
72 return False
73 return True
74
75 def are_submodules_available(self, config):
76 """Test whether git submodule checkout step should be performed."""
77 # TODO remove this after users migrate to a config file
78 from readthedocs.projects.models import Feature
79 submodules_in_config = (
80 config.submodules.exclude != ALL or config.submodules.include
81 )
82 if (
83 self.project.has_feature(Feature.SKIP_SUBMODULES) or
84 not submodules_in_config
85 ):
86 return False
87
88 # Keep compatibility with previous projects
89 return bool(self.submodules)
90
91 def validate_submodules(self, config):
92 """
93 Returns the submodules and check that its URLs are valid.
94
95 .. note::
96
97 Allways call after `self.are_submodules_available`.
98
99 :returns: tuple(bool, list)
100
101 Returns `True` if all required submodules URLs are valid.
102 Returns a list of all required submodules:
103 - Include is `ALL`, returns all submodules available.
104 - Include is a list, returns just those.
105 - Exclude is `ALL` - this should never happen.
106 - Exlude is a list, returns all available submodules
107 but those from the list.
108
109 Returns `False` if at least one submodule is invalid.
110 Returns the list of invalid submodules.
111 """
112 submodules = {sub.path: sub for sub in self.submodules}
113
114 for sub_path in config.submodules.exclude:
115 path = sub_path.rstrip('/')
116 if path in submodules:
117 del submodules[path]
118
119 if config.submodules.include != ALL and config.submodules.include:
120 submodules_include = {}
121 for sub_path in config.submodules.include:
122 path = sub_path.rstrip('/')
123 submodules_include[path] = submodules[path]
124 submodules = submodules_include
125
126 invalid_submodules = []
127 for path, submodule in submodules.items():
128 try:
129 validate_submodule_url(submodule.url)
130 except ValidationError:
131 invalid_submodules.append(path)
132
133 if invalid_submodules:
134 return False, invalid_submodules
135 return True, submodules.keys()
136
137 def use_shallow_clone(self):
138 """
139 Test whether shallow clone should be performed.
140
141 .. note::
142
143 Temporarily, we support skipping this option as builds that rely on
144 git history can fail if using shallow clones. This should
145 eventually be configurable via the web UI.
146 """
147 from readthedocs.projects.models import Feature
148 return not self.project.has_feature(Feature.DONT_SHALLOW_CLONE)
149
150 def fetch(self):
151 cmd = ['git', 'fetch', 'origin',
152 '--tags', '--prune', '--prune-tags']
153
154 if self.use_shallow_clone():
155 cmd.extend(['--depth', str(self.repo_depth)])
156
157 if (
158 self.verbose_name and
159 self.version_type == EXTERNAL and
160 'github.com' in self.repo_url
161 ):
162 cmd.append(
163 GITHUB_PR_PULL_PATTERN.format(id=self.verbose_name)
164 )
165
166 code, stdout, stderr = self.run(*cmd)
167 if code != 0:
168 raise RepositoryError
169 return code, stdout, stderr
170
171 def checkout_revision(self, revision=None):
172 if not revision:
173 branch = self.default_branch or self.fallback_branch
174 revision = 'origin/%s' % branch
175
176 code, out, err = self.run('git', 'checkout', '--force', revision)
177 if code != 0:
178 raise RepositoryError(
179 RepositoryError.FAILED_TO_CHECKOUT.format(revision),
180 )
181 return [code, out, err]
182
183 def clone(self):
184 """Clones the repository."""
185 cmd = ['git', 'clone', '--no-single-branch']
186
187 if self.use_shallow_clone():
188 cmd.extend(['--depth', str(self.repo_depth)])
189
190 cmd.extend([self.repo_url, '.'])
191
192 code, stdout, stderr = self.run(*cmd)
193 if code != 0:
194 raise RepositoryError
195 return code, stdout, stderr
196
197 @property
198 def tags(self):
199 versions = []
200 repo = git.Repo(self.working_dir)
201 for tag in repo.tags:
202 try:
203 versions.append(VCSVersion(self, str(tag.commit), str(tag)))
204 except ValueError:
205 # ValueError: Cannot resolve commit as tag TAGNAME points to a
206 # blob object - use the `.object` property instead to access it
207 # This is not a real tag for us, so we skip it
208 # https://github.com/rtfd/readthedocs.org/issues/4440
209 log.warning('Git tag skipped: %s', tag, exc_info=True)
210 continue
211 return versions
212
213 @property
214 def branches(self):
215 repo = git.Repo(self.working_dir)
216 versions = []
217 branches = []
218
219 # ``repo.remotes.origin.refs`` returns remote branches
220 if repo.remotes:
221 branches += repo.remotes.origin.refs
222
223 for branch in branches:
224 verbose_name = branch.name
225 if verbose_name.startswith('origin/'):
226 verbose_name = verbose_name.replace('origin/', '')
227 if verbose_name == 'HEAD':
228 continue
229 versions.append(VCSVersion(self, str(branch), verbose_name))
230 return versions
231
232 @property
233 def commit(self):
234 if self.repo_exists():
235 _, stdout, _ = self.run('git', 'rev-parse', 'HEAD')
236 return stdout.strip()
237 return None
238
239 @property
240 def submodules(self):
241 try:
242 repo = git.Repo(self.working_dir)
243 return list(repo.submodules)
244 except InvalidGitRepositoryError:
245 raise RepositoryError(
246 RepositoryError.INVALID_SUBMODULES_PATH,
247 )
248
249 def checkout(self, identifier=None):
250 """Checkout to identifier or latest."""
251 super().checkout()
252 # Find proper identifier
253 if not identifier:
254 identifier = self.default_branch or self.fallback_branch
255
256 identifier = self.find_ref(identifier)
257
258 # Checkout the correct identifier for this branch.
259 code, out, err = self.checkout_revision(identifier)
260 if code != 0:
261 return code, out, err
262
263 # Clean any remains of previous checkouts
264 self.run('git', 'clean', '-d', '-f', '-f')
265 return code, out, err
266
267 def update_submodules(self, config):
268 if self.are_submodules_available(config):
269 valid, submodules = self.validate_submodules(config)
270 if valid:
271 self.checkout_submodules(submodules, config)
272 else:
273 raise RepositoryError(
274 RepositoryError.INVALID_SUBMODULES.format(submodules),
275 )
276
277 def checkout_submodules(self, submodules, config):
278 """Checkout all repository submodules."""
279 self.run('git', 'submodule', 'sync')
280 cmd = [
281 'git',
282 'submodule',
283 'update',
284 '--init',
285 '--force',
286 ]
287 if config.submodules.recursive:
288 cmd.append('--recursive')
289 cmd += submodules
290 self.run(*cmd)
291
292 def find_ref(self, ref):
293 # Check if ref starts with 'origin/'
294 if ref.startswith('origin/'):
295 return ref
296
297 # Check if ref is a branch of the origin remote
298 if self.ref_exists('remotes/origin/' + ref):
299 return 'origin/' + ref
300
301 return ref
302
303 def ref_exists(self, ref):
304 try:
305 r = git.Repo(self.working_dir)
306 if r.commit(ref):
307 return True
308 except (BadName, ValueError):
309 return False
310 return False
311
312 @property
313 def env(self):
314 env = super().env
315 env['GIT_DIR'] = os.path.join(self.working_dir, '.git')
316 # Don't prompt for username, this requires Git 2.3+
317 env['GIT_TERMINAL_PROMPT'] = '0'
318 return env
319
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/readthedocs/projects/exceptions.py b/readthedocs/projects/exceptions.py
--- a/readthedocs/projects/exceptions.py
+++ b/readthedocs/projects/exceptions.py
@@ -37,16 +37,10 @@
'ensure that your repository URL is correct and your repository is public. '
'Private repositories are not supported.',
)
-
INVALID_SUBMODULES = _(
'One or more submodule URLs are not valid: {}, '
'git/ssh URL schemas for submodules are not supported.'
)
- INVALID_SUBMODULES_PATH = _(
- 'One or more submodule paths are not valid. '
- 'Check that all your submodules in .gitmodules are used.'
- )
-
DUPLICATED_RESERVED_VERSIONS = _(
'You can not have two versions with the name latest or stable.',
)
diff --git a/readthedocs/vcs_support/backends/git.py b/readthedocs/vcs_support/backends/git.py
--- a/readthedocs/vcs_support/backends/git.py
+++ b/readthedocs/vcs_support/backends/git.py
@@ -238,13 +238,8 @@
@property
def submodules(self):
- try:
- repo = git.Repo(self.working_dir)
- return list(repo.submodules)
- except InvalidGitRepositoryError:
- raise RepositoryError(
- RepositoryError.INVALID_SUBMODULES_PATH,
- )
+ repo = git.Repo(self.working_dir)
+ return list(repo.submodules)
def checkout(self, identifier=None):
"""Checkout to identifier or latest."""
| {"golden_diff": "diff --git a/readthedocs/projects/exceptions.py b/readthedocs/projects/exceptions.py\n--- a/readthedocs/projects/exceptions.py\n+++ b/readthedocs/projects/exceptions.py\n@@ -37,16 +37,10 @@\n 'ensure that your repository URL is correct and your repository is public. '\n 'Private repositories are not supported.',\n )\n-\n INVALID_SUBMODULES = _(\n 'One or more submodule URLs are not valid: {}, '\n 'git/ssh URL schemas for submodules are not supported.'\n )\n- INVALID_SUBMODULES_PATH = _(\n- 'One or more submodule paths are not valid. '\n- 'Check that all your submodules in .gitmodules are used.'\n- )\n-\n DUPLICATED_RESERVED_VERSIONS = _(\n 'You can not have two versions with the name latest or stable.',\n )\ndiff --git a/readthedocs/vcs_support/backends/git.py b/readthedocs/vcs_support/backends/git.py\n--- a/readthedocs/vcs_support/backends/git.py\n+++ b/readthedocs/vcs_support/backends/git.py\n@@ -238,13 +238,8 @@\n \n @property\n def submodules(self):\n- try:\n- repo = git.Repo(self.working_dir)\n- return list(repo.submodules)\n- except InvalidGitRepositoryError:\n- raise RepositoryError(\n- RepositoryError.INVALID_SUBMODULES_PATH,\n- )\n+ repo = git.Repo(self.working_dir)\n+ return list(repo.submodules)\n \n def checkout(self, identifier=None):\n \"\"\"Checkout to identifier or latest.\"\"\"\n", "issue": "gitpython sometimes fails when checking for submodules\n## Details\r\n\r\nI noticed the other week that our builds have actually been failing for a few months but I'm not sure how to debug it. Locally the docs generate as expected when I run `sphinx-build -b dirhtml . _build/dirhtml`\r\n\r\n* Read the Docs project URL: https://readthedocs.org/projects/chassis/\r\n* Build URL (if applicable): https://readthedocs.org/projects/chassis/builds/7462561/\r\n* Read the Docs username (if applicable): BronsonQuick\r\n\r\n## Expected Result\r\n\r\nA successful build.\r\n\r\n## Actual Result\r\n\r\n>There was a problem with Read the Docs while building your documentation. Please report this to us with your build id (7462561).\r\n\r\nThanks very much for all your work on this project!\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Project exceptions.\"\"\"\n\nfrom django.conf import settings\nfrom django.utils.translation import ugettext_noop as _\n\nfrom readthedocs.doc_builder.exceptions import BuildEnvironmentError\n\n\nclass ProjectConfigurationError(BuildEnvironmentError):\n\n \"\"\"Error raised trying to configure a project for build.\"\"\"\n\n NOT_FOUND = _(\n 'A configuration file was not found. '\n 'Make sure you have a conf.py file in your repository.',\n )\n\n MULTIPLE_CONF_FILES = _(\n 'We found more than one conf.py and are not sure which one to use. '\n 'Please, specify the correct file under the Advanced settings tab '\n \"in the project's Admin.\",\n )\n\n\nclass RepositoryError(BuildEnvironmentError):\n\n \"\"\"Failure during repository operation.\"\"\"\n\n PRIVATE_ALLOWED = _(\n 'There was a problem connecting to your repository, '\n 'ensure that your repository URL is correct.',\n )\n PRIVATE_NOT_ALLOWED = _(\n 'There was a problem connecting to your repository, '\n 'ensure that your repository URL is correct and your repository is public. '\n 'Private repositories are not supported.',\n )\n\n INVALID_SUBMODULES = _(\n 'One or more submodule URLs are not valid: {}, '\n 'git/ssh URL schemas for submodules are not supported.'\n )\n INVALID_SUBMODULES_PATH = _(\n 'One or more submodule paths are not valid. '\n 'Check that all your submodules in .gitmodules are used.'\n )\n\n DUPLICATED_RESERVED_VERSIONS = _(\n 'You can not have two versions with the name latest or stable.',\n )\n\n FAILED_TO_CHECKOUT = _('Failed to checkout revision: {}')\n\n def get_default_message(self):\n if settings.ALLOW_PRIVATE_REPOS:\n return self.PRIVATE_ALLOWED\n return self.PRIVATE_NOT_ALLOWED\n\n\nclass ProjectSpamError(Exception):\n\n \"\"\"\n Error raised when a project field has detected spam.\n\n This error is not raised to users, we use this for banning users in the\n background.\n \"\"\"\n", "path": "readthedocs/projects/exceptions.py"}, {"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Git-related utilities.\"\"\"\n\nimport logging\nimport os\nimport re\n\nimport git\nfrom django.core.exceptions import ValidationError\nfrom git.exc import BadName, InvalidGitRepositoryError\n\nfrom readthedocs.builds.constants import EXTERNAL\nfrom readthedocs.config import ALL\nfrom readthedocs.projects.constants import GITHUB_PR_PULL_PATTERN\nfrom readthedocs.projects.exceptions import RepositoryError\nfrom readthedocs.projects.validators import validate_submodule_url\nfrom readthedocs.vcs_support.base import BaseVCS, VCSVersion\n\n\nlog = logging.getLogger(__name__)\n\n\nclass Backend(BaseVCS):\n\n \"\"\"Git VCS backend.\"\"\"\n\n supports_tags = True\n supports_branches = True\n supports_submodules = True\n fallback_branch = 'master' # default branch\n repo_depth = 50\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.token = kwargs.get('token', None)\n self.repo_url = self._get_clone_url()\n\n def _get_clone_url(self):\n if '://' in self.repo_url:\n hacked_url = self.repo_url.split('://')[1]\n hacked_url = re.sub('.git$', '', hacked_url)\n clone_url = 'https://%s' % hacked_url\n if self.token:\n clone_url = 'https://{}@{}'.format(self.token, hacked_url)\n return clone_url\n # Don't edit URL because all hosts aren't the same\n # else:\n # clone_url = 'git://%s' % (hacked_url)\n return self.repo_url\n\n def set_remote_url(self, url):\n return self.run('git', 'remote', 'set-url', 'origin', url)\n\n def update(self):\n \"\"\"Clone or update the repository.\"\"\"\n super().update()\n if self.repo_exists():\n self.set_remote_url(self.repo_url)\n return self.fetch()\n self.make_clean_working_dir()\n # A fetch is always required to get external versions properly\n if self.version_type == EXTERNAL:\n self.clone()\n return self.fetch()\n return self.clone()\n\n def repo_exists(self):\n try:\n git.Repo(self.working_dir)\n except InvalidGitRepositoryError:\n return False\n return True\n\n def are_submodules_available(self, config):\n \"\"\"Test whether git submodule checkout step should be performed.\"\"\"\n # TODO remove this after users migrate to a config file\n from readthedocs.projects.models import Feature\n submodules_in_config = (\n config.submodules.exclude != ALL or config.submodules.include\n )\n if (\n self.project.has_feature(Feature.SKIP_SUBMODULES) or\n not submodules_in_config\n ):\n return False\n\n # Keep compatibility with previous projects\n return bool(self.submodules)\n\n def validate_submodules(self, config):\n \"\"\"\n Returns the submodules and check that its URLs are valid.\n\n .. note::\n\n Allways call after `self.are_submodules_available`.\n\n :returns: tuple(bool, list)\n\n Returns `True` if all required submodules URLs are valid.\n Returns a list of all required submodules:\n - Include is `ALL`, returns all submodules available.\n - Include is a list, returns just those.\n - Exclude is `ALL` - this should never happen.\n - Exlude is a list, returns all available submodules\n but those from the list.\n\n Returns `False` if at least one submodule is invalid.\n Returns the list of invalid submodules.\n \"\"\"\n submodules = {sub.path: sub for sub in self.submodules}\n\n for sub_path in config.submodules.exclude:\n path = sub_path.rstrip('/')\n if path in submodules:\n del submodules[path]\n\n if config.submodules.include != ALL and config.submodules.include:\n submodules_include = {}\n for sub_path in config.submodules.include:\n path = sub_path.rstrip('/')\n submodules_include[path] = submodules[path]\n submodules = submodules_include\n\n invalid_submodules = []\n for path, submodule in submodules.items():\n try:\n validate_submodule_url(submodule.url)\n except ValidationError:\n invalid_submodules.append(path)\n\n if invalid_submodules:\n return False, invalid_submodules\n return True, submodules.keys()\n\n def use_shallow_clone(self):\n \"\"\"\n Test whether shallow clone should be performed.\n\n .. note::\n\n Temporarily, we support skipping this option as builds that rely on\n git history can fail if using shallow clones. This should\n eventually be configurable via the web UI.\n \"\"\"\n from readthedocs.projects.models import Feature\n return not self.project.has_feature(Feature.DONT_SHALLOW_CLONE)\n\n def fetch(self):\n cmd = ['git', 'fetch', 'origin',\n '--tags', '--prune', '--prune-tags']\n\n if self.use_shallow_clone():\n cmd.extend(['--depth', str(self.repo_depth)])\n\n if (\n self.verbose_name and\n self.version_type == EXTERNAL and\n 'github.com' in self.repo_url\n ):\n cmd.append(\n GITHUB_PR_PULL_PATTERN.format(id=self.verbose_name)\n )\n\n code, stdout, stderr = self.run(*cmd)\n if code != 0:\n raise RepositoryError\n return code, stdout, stderr\n\n def checkout_revision(self, revision=None):\n if not revision:\n branch = self.default_branch or self.fallback_branch\n revision = 'origin/%s' % branch\n\n code, out, err = self.run('git', 'checkout', '--force', revision)\n if code != 0:\n raise RepositoryError(\n RepositoryError.FAILED_TO_CHECKOUT.format(revision),\n )\n return [code, out, err]\n\n def clone(self):\n \"\"\"Clones the repository.\"\"\"\n cmd = ['git', 'clone', '--no-single-branch']\n\n if self.use_shallow_clone():\n cmd.extend(['--depth', str(self.repo_depth)])\n\n cmd.extend([self.repo_url, '.'])\n\n code, stdout, stderr = self.run(*cmd)\n if code != 0:\n raise RepositoryError\n return code, stdout, stderr\n\n @property\n def tags(self):\n versions = []\n repo = git.Repo(self.working_dir)\n for tag in repo.tags:\n try:\n versions.append(VCSVersion(self, str(tag.commit), str(tag)))\n except ValueError:\n # ValueError: Cannot resolve commit as tag TAGNAME points to a\n # blob object - use the `.object` property instead to access it\n # This is not a real tag for us, so we skip it\n # https://github.com/rtfd/readthedocs.org/issues/4440\n log.warning('Git tag skipped: %s', tag, exc_info=True)\n continue\n return versions\n\n @property\n def branches(self):\n repo = git.Repo(self.working_dir)\n versions = []\n branches = []\n\n # ``repo.remotes.origin.refs`` returns remote branches\n if repo.remotes:\n branches += repo.remotes.origin.refs\n\n for branch in branches:\n verbose_name = branch.name\n if verbose_name.startswith('origin/'):\n verbose_name = verbose_name.replace('origin/', '')\n if verbose_name == 'HEAD':\n continue\n versions.append(VCSVersion(self, str(branch), verbose_name))\n return versions\n\n @property\n def commit(self):\n if self.repo_exists():\n _, stdout, _ = self.run('git', 'rev-parse', 'HEAD')\n return stdout.strip()\n return None\n\n @property\n def submodules(self):\n try:\n repo = git.Repo(self.working_dir)\n return list(repo.submodules)\n except InvalidGitRepositoryError:\n raise RepositoryError(\n RepositoryError.INVALID_SUBMODULES_PATH,\n )\n\n def checkout(self, identifier=None):\n \"\"\"Checkout to identifier or latest.\"\"\"\n super().checkout()\n # Find proper identifier\n if not identifier:\n identifier = self.default_branch or self.fallback_branch\n\n identifier = self.find_ref(identifier)\n\n # Checkout the correct identifier for this branch.\n code, out, err = self.checkout_revision(identifier)\n if code != 0:\n return code, out, err\n\n # Clean any remains of previous checkouts\n self.run('git', 'clean', '-d', '-f', '-f')\n return code, out, err\n\n def update_submodules(self, config):\n if self.are_submodules_available(config):\n valid, submodules = self.validate_submodules(config)\n if valid:\n self.checkout_submodules(submodules, config)\n else:\n raise RepositoryError(\n RepositoryError.INVALID_SUBMODULES.format(submodules),\n )\n\n def checkout_submodules(self, submodules, config):\n \"\"\"Checkout all repository submodules.\"\"\"\n self.run('git', 'submodule', 'sync')\n cmd = [\n 'git',\n 'submodule',\n 'update',\n '--init',\n '--force',\n ]\n if config.submodules.recursive:\n cmd.append('--recursive')\n cmd += submodules\n self.run(*cmd)\n\n def find_ref(self, ref):\n # Check if ref starts with 'origin/'\n if ref.startswith('origin/'):\n return ref\n\n # Check if ref is a branch of the origin remote\n if self.ref_exists('remotes/origin/' + ref):\n return 'origin/' + ref\n\n return ref\n\n def ref_exists(self, ref):\n try:\n r = git.Repo(self.working_dir)\n if r.commit(ref):\n return True\n except (BadName, ValueError):\n return False\n return False\n\n @property\n def env(self):\n env = super().env\n env['GIT_DIR'] = os.path.join(self.working_dir, '.git')\n # Don't prompt for username, this requires Git 2.3+\n env['GIT_TERMINAL_PROMPT'] = '0'\n return env\n", "path": "readthedocs/vcs_support/backends/git.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Project exceptions.\"\"\"\n\nfrom django.conf import settings\nfrom django.utils.translation import ugettext_noop as _\n\nfrom readthedocs.doc_builder.exceptions import BuildEnvironmentError\n\n\nclass ProjectConfigurationError(BuildEnvironmentError):\n\n \"\"\"Error raised trying to configure a project for build.\"\"\"\n\n NOT_FOUND = _(\n 'A configuration file was not found. '\n 'Make sure you have a conf.py file in your repository.',\n )\n\n MULTIPLE_CONF_FILES = _(\n 'We found more than one conf.py and are not sure which one to use. '\n 'Please, specify the correct file under the Advanced settings tab '\n \"in the project's Admin.\",\n )\n\n\nclass RepositoryError(BuildEnvironmentError):\n\n \"\"\"Failure during repository operation.\"\"\"\n\n PRIVATE_ALLOWED = _(\n 'There was a problem connecting to your repository, '\n 'ensure that your repository URL is correct.',\n )\n PRIVATE_NOT_ALLOWED = _(\n 'There was a problem connecting to your repository, '\n 'ensure that your repository URL is correct and your repository is public. '\n 'Private repositories are not supported.',\n )\n INVALID_SUBMODULES = _(\n 'One or more submodule URLs are not valid: {}, '\n 'git/ssh URL schemas for submodules are not supported.'\n )\n DUPLICATED_RESERVED_VERSIONS = _(\n 'You can not have two versions with the name latest or stable.',\n )\n\n FAILED_TO_CHECKOUT = _('Failed to checkout revision: {}')\n\n def get_default_message(self):\n if settings.ALLOW_PRIVATE_REPOS:\n return self.PRIVATE_ALLOWED\n return self.PRIVATE_NOT_ALLOWED\n\n\nclass ProjectSpamError(Exception):\n\n \"\"\"\n Error raised when a project field has detected spam.\n\n This error is not raised to users, we use this for banning users in the\n background.\n \"\"\"\n", "path": "readthedocs/projects/exceptions.py"}, {"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Git-related utilities.\"\"\"\n\nimport logging\nimport os\nimport re\n\nimport git\nfrom django.core.exceptions import ValidationError\nfrom git.exc import BadName, InvalidGitRepositoryError\n\nfrom readthedocs.builds.constants import EXTERNAL\nfrom readthedocs.config import ALL\nfrom readthedocs.projects.constants import GITHUB_PR_PULL_PATTERN\nfrom readthedocs.projects.exceptions import RepositoryError\nfrom readthedocs.projects.validators import validate_submodule_url\nfrom readthedocs.vcs_support.base import BaseVCS, VCSVersion\n\n\nlog = logging.getLogger(__name__)\n\n\nclass Backend(BaseVCS):\n\n \"\"\"Git VCS backend.\"\"\"\n\n supports_tags = True\n supports_branches = True\n supports_submodules = True\n fallback_branch = 'master' # default branch\n repo_depth = 50\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.token = kwargs.get('token', None)\n self.repo_url = self._get_clone_url()\n\n def _get_clone_url(self):\n if '://' in self.repo_url:\n hacked_url = self.repo_url.split('://')[1]\n hacked_url = re.sub('.git$', '', hacked_url)\n clone_url = 'https://%s' % hacked_url\n if self.token:\n clone_url = 'https://{}@{}'.format(self.token, hacked_url)\n return clone_url\n # Don't edit URL because all hosts aren't the same\n # else:\n # clone_url = 'git://%s' % (hacked_url)\n return self.repo_url\n\n def set_remote_url(self, url):\n return self.run('git', 'remote', 'set-url', 'origin', url)\n\n def update(self):\n \"\"\"Clone or update the repository.\"\"\"\n super().update()\n if self.repo_exists():\n self.set_remote_url(self.repo_url)\n return self.fetch()\n self.make_clean_working_dir()\n # A fetch is always required to get external versions properly\n if self.version_type == EXTERNAL:\n self.clone()\n return self.fetch()\n return self.clone()\n\n def repo_exists(self):\n try:\n git.Repo(self.working_dir)\n except InvalidGitRepositoryError:\n return False\n return True\n\n def are_submodules_available(self, config):\n \"\"\"Test whether git submodule checkout step should be performed.\"\"\"\n # TODO remove this after users migrate to a config file\n from readthedocs.projects.models import Feature\n submodules_in_config = (\n config.submodules.exclude != ALL or config.submodules.include\n )\n if (\n self.project.has_feature(Feature.SKIP_SUBMODULES) or\n not submodules_in_config\n ):\n return False\n\n # Keep compatibility with previous projects\n return bool(self.submodules)\n\n def validate_submodules(self, config):\n \"\"\"\n Returns the submodules and check that its URLs are valid.\n\n .. note::\n\n Allways call after `self.are_submodules_available`.\n\n :returns: tuple(bool, list)\n\n Returns `True` if all required submodules URLs are valid.\n Returns a list of all required submodules:\n - Include is `ALL`, returns all submodules available.\n - Include is a list, returns just those.\n - Exclude is `ALL` - this should never happen.\n - Exlude is a list, returns all available submodules\n but those from the list.\n\n Returns `False` if at least one submodule is invalid.\n Returns the list of invalid submodules.\n \"\"\"\n submodules = {sub.path: sub for sub in self.submodules}\n\n for sub_path in config.submodules.exclude:\n path = sub_path.rstrip('/')\n if path in submodules:\n del submodules[path]\n\n if config.submodules.include != ALL and config.submodules.include:\n submodules_include = {}\n for sub_path in config.submodules.include:\n path = sub_path.rstrip('/')\n submodules_include[path] = submodules[path]\n submodules = submodules_include\n\n invalid_submodules = []\n for path, submodule in submodules.items():\n try:\n validate_submodule_url(submodule.url)\n except ValidationError:\n invalid_submodules.append(path)\n\n if invalid_submodules:\n return False, invalid_submodules\n return True, submodules.keys()\n\n def use_shallow_clone(self):\n \"\"\"\n Test whether shallow clone should be performed.\n\n .. note::\n\n Temporarily, we support skipping this option as builds that rely on\n git history can fail if using shallow clones. This should\n eventually be configurable via the web UI.\n \"\"\"\n from readthedocs.projects.models import Feature\n return not self.project.has_feature(Feature.DONT_SHALLOW_CLONE)\n\n def fetch(self):\n cmd = ['git', 'fetch', 'origin',\n '--tags', '--prune', '--prune-tags']\n\n if self.use_shallow_clone():\n cmd.extend(['--depth', str(self.repo_depth)])\n\n if (\n self.verbose_name and\n self.version_type == EXTERNAL and\n 'github.com' in self.repo_url\n ):\n cmd.append(\n GITHUB_PR_PULL_PATTERN.format(id=self.verbose_name)\n )\n\n code, stdout, stderr = self.run(*cmd)\n if code != 0:\n raise RepositoryError\n return code, stdout, stderr\n\n def checkout_revision(self, revision=None):\n if not revision:\n branch = self.default_branch or self.fallback_branch\n revision = 'origin/%s' % branch\n\n code, out, err = self.run('git', 'checkout', '--force', revision)\n if code != 0:\n raise RepositoryError(\n RepositoryError.FAILED_TO_CHECKOUT.format(revision),\n )\n return [code, out, err]\n\n def clone(self):\n \"\"\"Clones the repository.\"\"\"\n cmd = ['git', 'clone', '--no-single-branch']\n\n if self.use_shallow_clone():\n cmd.extend(['--depth', str(self.repo_depth)])\n\n cmd.extend([self.repo_url, '.'])\n\n code, stdout, stderr = self.run(*cmd)\n if code != 0:\n raise RepositoryError\n return code, stdout, stderr\n\n @property\n def tags(self):\n versions = []\n repo = git.Repo(self.working_dir)\n for tag in repo.tags:\n try:\n versions.append(VCSVersion(self, str(tag.commit), str(tag)))\n except ValueError:\n # ValueError: Cannot resolve commit as tag TAGNAME points to a\n # blob object - use the `.object` property instead to access it\n # This is not a real tag for us, so we skip it\n # https://github.com/rtfd/readthedocs.org/issues/4440\n log.warning('Git tag skipped: %s', tag, exc_info=True)\n continue\n return versions\n\n @property\n def branches(self):\n repo = git.Repo(self.working_dir)\n versions = []\n branches = []\n\n # ``repo.remotes.origin.refs`` returns remote branches\n if repo.remotes:\n branches += repo.remotes.origin.refs\n\n for branch in branches:\n verbose_name = branch.name\n if verbose_name.startswith('origin/'):\n verbose_name = verbose_name.replace('origin/', '')\n if verbose_name == 'HEAD':\n continue\n versions.append(VCSVersion(self, str(branch), verbose_name))\n return versions\n\n @property\n def commit(self):\n if self.repo_exists():\n _, stdout, _ = self.run('git', 'rev-parse', 'HEAD')\n return stdout.strip()\n return None\n\n @property\n def submodules(self):\n repo = git.Repo(self.working_dir)\n return list(repo.submodules)\n\n def checkout(self, identifier=None):\n \"\"\"Checkout to identifier or latest.\"\"\"\n super().checkout()\n # Find proper identifier\n if not identifier:\n identifier = self.default_branch or self.fallback_branch\n\n identifier = self.find_ref(identifier)\n\n # Checkout the correct identifier for this branch.\n code, out, err = self.checkout_revision(identifier)\n if code != 0:\n return code, out, err\n\n # Clean any remains of previous checkouts\n self.run('git', 'clean', '-d', '-f', '-f')\n return code, out, err\n\n def update_submodules(self, config):\n if self.are_submodules_available(config):\n valid, submodules = self.validate_submodules(config)\n if valid:\n self.checkout_submodules(submodules, config)\n else:\n raise RepositoryError(\n RepositoryError.INVALID_SUBMODULES.format(submodules),\n )\n\n def checkout_submodules(self, submodules, config):\n \"\"\"Checkout all repository submodules.\"\"\"\n self.run('git', 'submodule', 'sync')\n cmd = [\n 'git',\n 'submodule',\n 'update',\n '--init',\n '--force',\n ]\n if config.submodules.recursive:\n cmd.append('--recursive')\n cmd += submodules\n self.run(*cmd)\n\n def find_ref(self, ref):\n # Check if ref starts with 'origin/'\n if ref.startswith('origin/'):\n return ref\n\n # Check if ref is a branch of the origin remote\n if self.ref_exists('remotes/origin/' + ref):\n return 'origin/' + ref\n\n return ref\n\n def ref_exists(self, ref):\n try:\n r = git.Repo(self.working_dir)\n if r.commit(ref):\n return True\n except (BadName, ValueError):\n return False\n return False\n\n @property\n def env(self):\n env = super().env\n env['GIT_DIR'] = os.path.join(self.working_dir, '.git')\n # Don't prompt for username, this requires Git 2.3+\n env['GIT_TERMINAL_PROMPT'] = '0'\n return env\n", "path": "readthedocs/vcs_support/backends/git.py"}]} | 4,078 | 354 |
gh_patches_debug_21261 | rasdani/github-patches | git_diff | cookiecutter__cookiecutter-62 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
UnicodeEncodeError when the defualt value is used and contains non ascii characters.
Error occurs when the user uses the default unicode string.
Code:
```
if PY3:
cookiecutter_dict[key] = new_val
else:
cookiecutter_dict[key] = new_val.decode('utf-8')
```
Everything is okay in Python 3, but `new_val` is already unicode in 2.x.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cookiecutter/prompt.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 """
5 cookiecutter.prompt
6 ---------------------
7
8 Functions for prompting the user for project info.
9 """
10
11 from __future__ import unicode_literals
12 import sys
13
14 PY3 = sys.version > '3'
15 if PY3:
16 iteritems = lambda d: iter(d.items())
17 else:
18 input = raw_input
19 iteritems = lambda d: d.iteritems()
20
21 def prompt_for_config(context):
22 """
23 Prompts the user to enter new config, using context as a source for the
24 field names and sample values.
25 """
26 cookiecutter_dict = {}
27
28 for key, val in iteritems(context['cookiecutter']):
29 prompt = "{0} (default is \"{1}\")? ".format(key, val)
30 new_val = input(prompt.encode('utf-8'))
31 new_val = new_val.strip()
32
33 if new_val == '':
34 new_val = val
35
36 if PY3:
37 cookiecutter_dict[key] = new_val
38 else:
39 cookiecutter_dict[key] = new_val.decode('utf-8')
40 return cookiecutter_dict
41
42
43 def query_yes_no(question, default="yes"):
44 """
45 Ask a yes/no question via `raw_input()` and return their answer.
46
47 :param question: A string that is presented to the user.
48 :param default: The presumed answer if the user just hits <Enter>.
49 It must be "yes" (the default), "no" or None (meaning
50 an answer is required of the user).
51
52 The "answer" return value is one of "yes" or "no".
53
54 Adapted from
55 http://stackoverflow.com/questions/3041986/python-command-line-yes-no-input
56 http://code.activestate.com/recipes/577058/
57
58 """
59 valid = {"yes": True, "y": True, "ye": True, "no": False, "n": False}
60 if default is None:
61 prompt = " [y/n] "
62 elif default == "yes":
63 prompt = " [Y/n] "
64 elif default == "no":
65 prompt = " [y/N] "
66 else:
67 raise ValueError("invalid default answer: '%s'" % default)
68
69 while True:
70 sys.stdout.write(question + prompt)
71 choice = input().lower()
72
73 if default is not None and choice == '':
74 return valid[default]
75 elif choice in valid:
76 return valid[choice]
77 else:
78 sys.stdout.write("Please respond with 'yes' or 'no' "
79 "(or 'y' or 'n').\n")
80
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cookiecutter/prompt.py b/cookiecutter/prompt.py
--- a/cookiecutter/prompt.py
+++ b/cookiecutter/prompt.py
@@ -18,6 +18,7 @@
input = raw_input
iteritems = lambda d: d.iteritems()
+
def prompt_for_config(context):
"""
Prompts the user to enter new config, using context as a source for the
@@ -27,16 +28,18 @@
for key, val in iteritems(context['cookiecutter']):
prompt = "{0} (default is \"{1}\")? ".format(key, val)
- new_val = input(prompt.encode('utf-8'))
+
+ if PY3:
+ new_val = input(prompt.encode('utf-8'))
+ else:
+ new_val = input(prompt.encode('utf-8')).decode('utf-8')
+
new_val = new_val.strip()
if new_val == '':
new_val = val
- if PY3:
- cookiecutter_dict[key] = new_val
- else:
- cookiecutter_dict[key] = new_val.decode('utf-8')
+ cookiecutter_dict[key] = new_val
return cookiecutter_dict
| {"golden_diff": "diff --git a/cookiecutter/prompt.py b/cookiecutter/prompt.py\n--- a/cookiecutter/prompt.py\n+++ b/cookiecutter/prompt.py\n@@ -18,6 +18,7 @@\n input = raw_input\n iteritems = lambda d: d.iteritems()\n \n+\n def prompt_for_config(context):\n \"\"\"\n Prompts the user to enter new config, using context as a source for the\n@@ -27,16 +28,18 @@\n \n for key, val in iteritems(context['cookiecutter']):\n prompt = \"{0} (default is \\\"{1}\\\")? \".format(key, val)\n- new_val = input(prompt.encode('utf-8'))\n+\n+ if PY3:\n+ new_val = input(prompt.encode('utf-8'))\n+ else:\n+ new_val = input(prompt.encode('utf-8')).decode('utf-8')\n+\n new_val = new_val.strip()\n \n if new_val == '':\n new_val = val\n \n- if PY3:\n- cookiecutter_dict[key] = new_val\n- else:\n- cookiecutter_dict[key] = new_val.decode('utf-8')\n+ cookiecutter_dict[key] = new_val\n return cookiecutter_dict\n", "issue": "UnicodeEncodeError when the defualt value is used and contains non ascii characters.\nError occurs when the user uses the default unicode string.\n\nCode:\n\n```\nif PY3:\n cookiecutter_dict[key] = new_val\nelse:\n cookiecutter_dict[key] = new_val.decode('utf-8')\n```\n\nEverything is okay in Python 3, but `new_val` is already unicode in 2.x.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\ncookiecutter.prompt\n---------------------\n\nFunctions for prompting the user for project info.\n\"\"\"\n\nfrom __future__ import unicode_literals\nimport sys\n\nPY3 = sys.version > '3'\nif PY3:\n iteritems = lambda d: iter(d.items())\nelse:\n input = raw_input\n iteritems = lambda d: d.iteritems()\n\ndef prompt_for_config(context):\n \"\"\"\n Prompts the user to enter new config, using context as a source for the\n field names and sample values.\n \"\"\"\n cookiecutter_dict = {}\n\n for key, val in iteritems(context['cookiecutter']):\n prompt = \"{0} (default is \\\"{1}\\\")? \".format(key, val)\n new_val = input(prompt.encode('utf-8'))\n new_val = new_val.strip()\n\n if new_val == '':\n new_val = val\n\n if PY3:\n cookiecutter_dict[key] = new_val\n else:\n cookiecutter_dict[key] = new_val.decode('utf-8')\n return cookiecutter_dict\n\n\ndef query_yes_no(question, default=\"yes\"):\n \"\"\"\n Ask a yes/no question via `raw_input()` and return their answer.\n\n :param question: A string that is presented to the user.\n :param default: The presumed answer if the user just hits <Enter>.\n It must be \"yes\" (the default), \"no\" or None (meaning\n an answer is required of the user).\n\n The \"answer\" return value is one of \"yes\" or \"no\".\n\n Adapted from\n http://stackoverflow.com/questions/3041986/python-command-line-yes-no-input\n http://code.activestate.com/recipes/577058/\n\n \"\"\"\n valid = {\"yes\": True, \"y\": True, \"ye\": True, \"no\": False, \"n\": False}\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = input().lower()\n\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")\n", "path": "cookiecutter/prompt.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\ncookiecutter.prompt\n---------------------\n\nFunctions for prompting the user for project info.\n\"\"\"\n\nfrom __future__ import unicode_literals\nimport sys\n\nPY3 = sys.version > '3'\nif PY3:\n iteritems = lambda d: iter(d.items())\nelse:\n input = raw_input\n iteritems = lambda d: d.iteritems()\n\n\ndef prompt_for_config(context):\n \"\"\"\n Prompts the user to enter new config, using context as a source for the\n field names and sample values.\n \"\"\"\n cookiecutter_dict = {}\n\n for key, val in iteritems(context['cookiecutter']):\n prompt = \"{0} (default is \\\"{1}\\\")? \".format(key, val)\n\n if PY3:\n new_val = input(prompt.encode('utf-8'))\n else:\n new_val = input(prompt.encode('utf-8')).decode('utf-8')\n\n new_val = new_val.strip()\n\n if new_val == '':\n new_val = val\n\n cookiecutter_dict[key] = new_val\n return cookiecutter_dict\n\n\ndef query_yes_no(question, default=\"yes\"):\n \"\"\"\n Ask a yes/no question via `raw_input()` and return their answer.\n\n :param question: A string that is presented to the user.\n :param default: The presumed answer if the user just hits <Enter>.\n It must be \"yes\" (the default), \"no\" or None (meaning\n an answer is required of the user).\n\n The \"answer\" return value is one of \"yes\" or \"no\".\n\n Adapted from\n http://stackoverflow.com/questions/3041986/python-command-line-yes-no-input\n http://code.activestate.com/recipes/577058/\n\n \"\"\"\n valid = {\"yes\": True, \"y\": True, \"ye\": True, \"no\": False, \"n\": False}\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = input().lower()\n\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")\n", "path": "cookiecutter/prompt.py"}]} | 1,071 | 281 |
gh_patches_debug_36110 | rasdani/github-patches | git_diff | DataDog__dd-agent-3643 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Missing info on fixing: Using events for service checks is deprecated in favor of monitors
It seems that you deprecated a feature without putting any information regarding how to fix it. I even googled and the only reference is the source code where you added this warning.
Please provide some information regarding what changes we are supposed to implement in order not to get these warnings.
These warnings are showing in the UI:
https://www.dropbox.com/s/xpjpmgqtk71hqze/Screenshot%202014-09-24%2015.43.22.png?dl=0
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `checks/network_checks.py`
Content:
```
1 # (C) Datadog, Inc. 2010-2016
2 # All rights reserved
3 # Licensed under Simplified BSD License (see LICENSE)
4
5 # stdlib
6 from collections import defaultdict
7 from Queue import Empty, Queue
8 import threading
9 import time
10
11 # project
12 from checks import AgentCheck
13 from checks.libs.thread_pool import Pool
14 from config import _is_affirmative
15
16 TIMEOUT = 180
17 DEFAULT_SIZE_POOL = 6
18 MAX_LOOP_ITERATIONS = 1000
19 FAILURE = "FAILURE"
20
21 class Status:
22 DOWN = "DOWN"
23 WARNING = "WARNING"
24 CRITICAL = "CRITICAL"
25 UP = "UP"
26
27
28 class EventType:
29 DOWN = "servicecheck.state_change.down"
30 UP = "servicecheck.state_change.up"
31
32
33 class NetworkCheck(AgentCheck):
34 SOURCE_TYPE_NAME = 'servicecheck'
35 SERVICE_CHECK_PREFIX = 'network_check'
36 _global_current_pool_size = 0
37
38 STATUS_TO_SERVICE_CHECK = {
39 Status.UP : AgentCheck.OK,
40 Status.WARNING : AgentCheck.WARNING,
41 Status.CRITICAL : AgentCheck.CRITICAL,
42 Status.DOWN : AgentCheck.CRITICAL,
43 }
44
45 """
46 Services checks inherits from this class.
47 This class should never be directly instanciated.
48
49 Work flow:
50 The main agent loop will call the check function for each instance for
51 each iteration of the loop.
52 The check method will make an asynchronous call to the _process method in
53 one of the thread initiated in the thread pool created in this class constructor.
54 The _process method will call the _check method of the inherited class
55 which will perform the actual check.
56
57 The _check method must return a tuple which first element is either
58 Status.UP or Status.DOWN.
59 The second element is a short error message that will be displayed
60 when the service turns down.
61
62 """
63
64 def __init__(self, name, init_config, agentConfig, instances):
65 AgentCheck.__init__(self, name, init_config, agentConfig, instances)
66
67 # A dictionary to keep track of service statuses
68 self.statuses = {}
69 self.notified = {}
70 self.nb_failures = 0
71 self.pool_size = 0
72 self.pool_started = False
73
74 # Make sure every instance has a name that we use as a unique key
75 # to keep track of statuses
76 names = []
77 for inst in instances:
78 inst_name = inst.get('name', None)
79 if not inst_name:
80 raise Exception("All instances should have a 'name' parameter,"
81 " error on instance: {0}".format(inst))
82 if inst_name in names:
83 raise Exception("Duplicate names for instances with name {0}"
84 .format(inst_name))
85 names.append(inst_name)
86
87 def stop(self):
88 self.stop_pool()
89 self.pool_started = False
90
91 def start_pool(self):
92 # The pool size should be the minimum between the number of instances
93 # and the DEFAULT_SIZE_POOL. It can also be overridden by the 'threads_count'
94 # parameter in the init_config of the check
95 self.log.info("Starting Thread Pool")
96 default_size = min(self.instance_count(), DEFAULT_SIZE_POOL)
97 self.pool_size = int(self.init_config.get('threads_count', default_size))
98
99 # To keep track on the total number of threads we should have running
100 NetworkCheck._global_current_pool_size += self.pool_size
101
102 self.pool = Pool(self.pool_size)
103
104 self.resultsq = Queue()
105 self.jobs_status = {}
106 self.jobs_results = {}
107 self.pool_started = True
108
109 def stop_pool(self):
110 self.log.info("Stopping Thread Pool")
111
112 # To keep track on the total number of threads we should have running
113 NetworkCheck._global_current_pool_size -= self.pool_size
114
115 if self.pool_started:
116 self.pool.terminate()
117 self.pool.join()
118 self.jobs_status.clear()
119 assert self.pool.get_nworkers() == 0
120
121 def restart_pool(self):
122 self.stop_pool()
123 self.start_pool()
124
125 def check(self, instance):
126 if not self.pool_started:
127 self.start_pool()
128 if threading.activeCount() > 5 * NetworkCheck._global_current_pool_size + 6:
129 # On Windows the agent runs on multiple threads because of WMI so we need an offset of 6
130 raise Exception("Thread number (%s) is exploding. Skipping this check" % threading.activeCount())
131 self._process_results()
132 self._clean()
133 name = instance.get('name', None)
134 if name is None:
135 self.log.error('Each service check must have a name')
136 return
137
138 if name not in self.jobs_status:
139 # A given instance should be processed one at a time
140 self.jobs_status[name] = time.time()
141 self.jobs_results[name] = self.pool.apply_async(self._process, args=(instance,))
142 else:
143 self.log.error("Instance: %s skipped because it's already running." % name)
144
145 def _process(self, instance):
146 try:
147 statuses = self._check(instance)
148
149 if isinstance(statuses, tuple):
150 # Assume the check only returns one service check
151 status, msg = statuses
152 self.resultsq.put((status, msg, None, instance))
153
154 elif isinstance(statuses, list):
155 for status in statuses:
156 sc_name, status, msg = status
157 self.resultsq.put((status, msg, sc_name, instance))
158
159 except Exception:
160 self.log.exception(
161 u"Failed to process instance '%s'.", instance.get('name', u"")
162 )
163 result = (FAILURE, FAILURE, FAILURE, instance)
164 self.resultsq.put(result)
165
166 def _process_results(self):
167 for i in xrange(MAX_LOOP_ITERATIONS):
168 try:
169 # We want to fetch the result in a non blocking way
170 status, msg, sc_name, instance = self.resultsq.get_nowait()
171 except Empty:
172 break
173
174 instance_name = instance['name']
175 if status == FAILURE:
176 self.nb_failures += 1
177 if self.nb_failures >= self.pool_size - 1:
178 self.nb_failures = 0
179 self.restart_pool()
180
181 # clean failed job
182 self._clean_job(instance_name)
183 continue
184
185 self.report_as_service_check(sc_name, status, instance, msg)
186
187 # FIXME: 5.3, this has been deprecated before, get rid of events
188 # Don't create any event to avoid duplicates with server side
189 # service_checks
190 skip_event = _is_affirmative(instance.get('skip_event', False))
191 if not skip_event:
192 self.warning("Using events for service checks is deprecated in favor of monitors and will be removed in future versions of the Datadog Agent.")
193 event = None
194
195 if instance_name not in self.statuses:
196 self.statuses[instance_name] = defaultdict(list)
197
198 self.statuses[instance_name][sc_name].append(status)
199
200 window = int(instance.get('window', 1))
201
202 if window > 256:
203 self.log.warning("Maximum window size (256) exceeded, defaulting it to 256")
204 window = 256
205
206 threshold = instance.get('threshold', 1)
207
208 if len(self.statuses[instance_name][sc_name]) > window:
209 self.statuses[instance_name][sc_name].pop(0)
210
211 nb_failures = self.statuses[instance_name][sc_name].count(Status.DOWN)
212
213 if nb_failures >= threshold:
214 if self.notified.get((instance_name, sc_name), Status.UP) != Status.DOWN:
215 event = self._create_status_event(sc_name, status, msg, instance)
216 self.notified[(instance_name, sc_name)] = Status.DOWN
217 else:
218 if self.notified.get((instance_name, sc_name), Status.UP) != Status.UP:
219 event = self._create_status_event(sc_name, status, msg, instance)
220 self.notified[(instance_name, sc_name)] = Status.UP
221
222 if event is not None:
223 self.events.append(event)
224
225 self._clean_job(instance_name)
226
227 def _clean_job(self, instance_name):
228 # The job is finished here, this instance can be re processed
229 if instance_name in self.jobs_status:
230 self.log.debug("Instance: %s cleaned from jobs status." % instance_name)
231 del self.jobs_status[instance_name]
232
233 # if an exception happened, log it
234 if instance_name in self.jobs_results:
235 self.log.debug("Instance: %s cleaned from jobs results." % instance_name)
236 ret = self.jobs_results[instance_name].get()
237 if isinstance(ret, Exception):
238 self.log.exception("Exception in worker thread: {0}".format(ret))
239 del self.jobs_results[instance_name]
240
241
242 def _check(self, instance):
243 """This function should be implemented by inherited classes"""
244 raise NotImplementedError
245
246
247 def _clean(self):
248 now = time.time()
249 for name, start_time in self.jobs_status.iteritems():
250 if now - start_time > TIMEOUT:
251 self.log.critical("Restarting Pool. One check is stuck: %s" % name)
252 self.restart_pool()
253 break
254
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/checks/network_checks.py b/checks/network_checks.py
--- a/checks/network_checks.py
+++ b/checks/network_checks.py
@@ -3,7 +3,6 @@
# Licensed under Simplified BSD License (see LICENSE)
# stdlib
-from collections import defaultdict
from Queue import Empty, Queue
import threading
import time
@@ -11,7 +10,6 @@
# project
from checks import AgentCheck
from checks.libs.thread_pool import Pool
-from config import _is_affirmative
TIMEOUT = 180
DEFAULT_SIZE_POOL = 6
@@ -183,45 +181,6 @@
continue
self.report_as_service_check(sc_name, status, instance, msg)
-
- # FIXME: 5.3, this has been deprecated before, get rid of events
- # Don't create any event to avoid duplicates with server side
- # service_checks
- skip_event = _is_affirmative(instance.get('skip_event', False))
- if not skip_event:
- self.warning("Using events for service checks is deprecated in favor of monitors and will be removed in future versions of the Datadog Agent.")
- event = None
-
- if instance_name not in self.statuses:
- self.statuses[instance_name] = defaultdict(list)
-
- self.statuses[instance_name][sc_name].append(status)
-
- window = int(instance.get('window', 1))
-
- if window > 256:
- self.log.warning("Maximum window size (256) exceeded, defaulting it to 256")
- window = 256
-
- threshold = instance.get('threshold', 1)
-
- if len(self.statuses[instance_name][sc_name]) > window:
- self.statuses[instance_name][sc_name].pop(0)
-
- nb_failures = self.statuses[instance_name][sc_name].count(Status.DOWN)
-
- if nb_failures >= threshold:
- if self.notified.get((instance_name, sc_name), Status.UP) != Status.DOWN:
- event = self._create_status_event(sc_name, status, msg, instance)
- self.notified[(instance_name, sc_name)] = Status.DOWN
- else:
- if self.notified.get((instance_name, sc_name), Status.UP) != Status.UP:
- event = self._create_status_event(sc_name, status, msg, instance)
- self.notified[(instance_name, sc_name)] = Status.UP
-
- if event is not None:
- self.events.append(event)
-
self._clean_job(instance_name)
def _clean_job(self, instance_name):
| {"golden_diff": "diff --git a/checks/network_checks.py b/checks/network_checks.py\n--- a/checks/network_checks.py\n+++ b/checks/network_checks.py\n@@ -3,7 +3,6 @@\n # Licensed under Simplified BSD License (see LICENSE)\n \n # stdlib\n-from collections import defaultdict\n from Queue import Empty, Queue\n import threading\n import time\n@@ -11,7 +10,6 @@\n # project\n from checks import AgentCheck\n from checks.libs.thread_pool import Pool\n-from config import _is_affirmative\n \n TIMEOUT = 180\n DEFAULT_SIZE_POOL = 6\n@@ -183,45 +181,6 @@\n continue\n \n self.report_as_service_check(sc_name, status, instance, msg)\n-\n- # FIXME: 5.3, this has been deprecated before, get rid of events\n- # Don't create any event to avoid duplicates with server side\n- # service_checks\n- skip_event = _is_affirmative(instance.get('skip_event', False))\n- if not skip_event:\n- self.warning(\"Using events for service checks is deprecated in favor of monitors and will be removed in future versions of the Datadog Agent.\")\n- event = None\n-\n- if instance_name not in self.statuses:\n- self.statuses[instance_name] = defaultdict(list)\n-\n- self.statuses[instance_name][sc_name].append(status)\n-\n- window = int(instance.get('window', 1))\n-\n- if window > 256:\n- self.log.warning(\"Maximum window size (256) exceeded, defaulting it to 256\")\n- window = 256\n-\n- threshold = instance.get('threshold', 1)\n-\n- if len(self.statuses[instance_name][sc_name]) > window:\n- self.statuses[instance_name][sc_name].pop(0)\n-\n- nb_failures = self.statuses[instance_name][sc_name].count(Status.DOWN)\n-\n- if nb_failures >= threshold:\n- if self.notified.get((instance_name, sc_name), Status.UP) != Status.DOWN:\n- event = self._create_status_event(sc_name, status, msg, instance)\n- self.notified[(instance_name, sc_name)] = Status.DOWN\n- else:\n- if self.notified.get((instance_name, sc_name), Status.UP) != Status.UP:\n- event = self._create_status_event(sc_name, status, msg, instance)\n- self.notified[(instance_name, sc_name)] = Status.UP\n-\n- if event is not None:\n- self.events.append(event)\n-\n self._clean_job(instance_name)\n \n def _clean_job(self, instance_name):\n", "issue": "Missing info on fixing: Using events for service checks is deprecated in favor of monitors\nIt seems that you deprecated a feature without putting any information regarding how to fix it. I even googled and the only reference is the source code where you added this warning.\n\nPlease provide some information regarding what changes we are supposed to implement in order not to get these warnings. \n\nThese warnings are showing in the UI:\nhttps://www.dropbox.com/s/xpjpmgqtk71hqze/Screenshot%202014-09-24%2015.43.22.png?dl=0\n\n", "before_files": [{"content": "# (C) Datadog, Inc. 2010-2016\n# All rights reserved\n# Licensed under Simplified BSD License (see LICENSE)\n\n# stdlib\nfrom collections import defaultdict\nfrom Queue import Empty, Queue\nimport threading\nimport time\n\n# project\nfrom checks import AgentCheck\nfrom checks.libs.thread_pool import Pool\nfrom config import _is_affirmative\n\nTIMEOUT = 180\nDEFAULT_SIZE_POOL = 6\nMAX_LOOP_ITERATIONS = 1000\nFAILURE = \"FAILURE\"\n\nclass Status:\n DOWN = \"DOWN\"\n WARNING = \"WARNING\"\n CRITICAL = \"CRITICAL\"\n UP = \"UP\"\n\n\nclass EventType:\n DOWN = \"servicecheck.state_change.down\"\n UP = \"servicecheck.state_change.up\"\n\n\nclass NetworkCheck(AgentCheck):\n SOURCE_TYPE_NAME = 'servicecheck'\n SERVICE_CHECK_PREFIX = 'network_check'\n _global_current_pool_size = 0\n\n STATUS_TO_SERVICE_CHECK = {\n Status.UP : AgentCheck.OK,\n Status.WARNING : AgentCheck.WARNING,\n Status.CRITICAL : AgentCheck.CRITICAL,\n Status.DOWN : AgentCheck.CRITICAL,\n }\n\n \"\"\"\n Services checks inherits from this class.\n This class should never be directly instanciated.\n\n Work flow:\n The main agent loop will call the check function for each instance for\n each iteration of the loop.\n The check method will make an asynchronous call to the _process method in\n one of the thread initiated in the thread pool created in this class constructor.\n The _process method will call the _check method of the inherited class\n which will perform the actual check.\n\n The _check method must return a tuple which first element is either\n Status.UP or Status.DOWN.\n The second element is a short error message that will be displayed\n when the service turns down.\n\n \"\"\"\n\n def __init__(self, name, init_config, agentConfig, instances):\n AgentCheck.__init__(self, name, init_config, agentConfig, instances)\n\n # A dictionary to keep track of service statuses\n self.statuses = {}\n self.notified = {}\n self.nb_failures = 0\n self.pool_size = 0\n self.pool_started = False\n\n # Make sure every instance has a name that we use as a unique key\n # to keep track of statuses\n names = []\n for inst in instances:\n inst_name = inst.get('name', None)\n if not inst_name:\n raise Exception(\"All instances should have a 'name' parameter,\"\n \" error on instance: {0}\".format(inst))\n if inst_name in names:\n raise Exception(\"Duplicate names for instances with name {0}\"\n .format(inst_name))\n names.append(inst_name)\n\n def stop(self):\n self.stop_pool()\n self.pool_started = False\n\n def start_pool(self):\n # The pool size should be the minimum between the number of instances\n # and the DEFAULT_SIZE_POOL. It can also be overridden by the 'threads_count'\n # parameter in the init_config of the check\n self.log.info(\"Starting Thread Pool\")\n default_size = min(self.instance_count(), DEFAULT_SIZE_POOL)\n self.pool_size = int(self.init_config.get('threads_count', default_size))\n\n # To keep track on the total number of threads we should have running\n NetworkCheck._global_current_pool_size += self.pool_size\n\n self.pool = Pool(self.pool_size)\n\n self.resultsq = Queue()\n self.jobs_status = {}\n self.jobs_results = {}\n self.pool_started = True\n\n def stop_pool(self):\n self.log.info(\"Stopping Thread Pool\")\n\n # To keep track on the total number of threads we should have running\n NetworkCheck._global_current_pool_size -= self.pool_size\n\n if self.pool_started:\n self.pool.terminate()\n self.pool.join()\n self.jobs_status.clear()\n assert self.pool.get_nworkers() == 0\n\n def restart_pool(self):\n self.stop_pool()\n self.start_pool()\n\n def check(self, instance):\n if not self.pool_started:\n self.start_pool()\n if threading.activeCount() > 5 * NetworkCheck._global_current_pool_size + 6:\n # On Windows the agent runs on multiple threads because of WMI so we need an offset of 6\n raise Exception(\"Thread number (%s) is exploding. Skipping this check\" % threading.activeCount())\n self._process_results()\n self._clean()\n name = instance.get('name', None)\n if name is None:\n self.log.error('Each service check must have a name')\n return\n\n if name not in self.jobs_status:\n # A given instance should be processed one at a time\n self.jobs_status[name] = time.time()\n self.jobs_results[name] = self.pool.apply_async(self._process, args=(instance,))\n else:\n self.log.error(\"Instance: %s skipped because it's already running.\" % name)\n\n def _process(self, instance):\n try:\n statuses = self._check(instance)\n\n if isinstance(statuses, tuple):\n # Assume the check only returns one service check\n status, msg = statuses\n self.resultsq.put((status, msg, None, instance))\n\n elif isinstance(statuses, list):\n for status in statuses:\n sc_name, status, msg = status\n self.resultsq.put((status, msg, sc_name, instance))\n\n except Exception:\n self.log.exception(\n u\"Failed to process instance '%s'.\", instance.get('name', u\"\")\n )\n result = (FAILURE, FAILURE, FAILURE, instance)\n self.resultsq.put(result)\n\n def _process_results(self):\n for i in xrange(MAX_LOOP_ITERATIONS):\n try:\n # We want to fetch the result in a non blocking way\n status, msg, sc_name, instance = self.resultsq.get_nowait()\n except Empty:\n break\n\n instance_name = instance['name']\n if status == FAILURE:\n self.nb_failures += 1\n if self.nb_failures >= self.pool_size - 1:\n self.nb_failures = 0\n self.restart_pool()\n\n # clean failed job\n self._clean_job(instance_name)\n continue\n\n self.report_as_service_check(sc_name, status, instance, msg)\n\n # FIXME: 5.3, this has been deprecated before, get rid of events\n # Don't create any event to avoid duplicates with server side\n # service_checks\n skip_event = _is_affirmative(instance.get('skip_event', False))\n if not skip_event:\n self.warning(\"Using events for service checks is deprecated in favor of monitors and will be removed in future versions of the Datadog Agent.\")\n event = None\n\n if instance_name not in self.statuses:\n self.statuses[instance_name] = defaultdict(list)\n\n self.statuses[instance_name][sc_name].append(status)\n\n window = int(instance.get('window', 1))\n\n if window > 256:\n self.log.warning(\"Maximum window size (256) exceeded, defaulting it to 256\")\n window = 256\n\n threshold = instance.get('threshold', 1)\n\n if len(self.statuses[instance_name][sc_name]) > window:\n self.statuses[instance_name][sc_name].pop(0)\n\n nb_failures = self.statuses[instance_name][sc_name].count(Status.DOWN)\n\n if nb_failures >= threshold:\n if self.notified.get((instance_name, sc_name), Status.UP) != Status.DOWN:\n event = self._create_status_event(sc_name, status, msg, instance)\n self.notified[(instance_name, sc_name)] = Status.DOWN\n else:\n if self.notified.get((instance_name, sc_name), Status.UP) != Status.UP:\n event = self._create_status_event(sc_name, status, msg, instance)\n self.notified[(instance_name, sc_name)] = Status.UP\n\n if event is not None:\n self.events.append(event)\n\n self._clean_job(instance_name)\n\n def _clean_job(self, instance_name):\n # The job is finished here, this instance can be re processed\n if instance_name in self.jobs_status:\n self.log.debug(\"Instance: %s cleaned from jobs status.\" % instance_name)\n del self.jobs_status[instance_name]\n\n # if an exception happened, log it\n if instance_name in self.jobs_results:\n self.log.debug(\"Instance: %s cleaned from jobs results.\" % instance_name)\n ret = self.jobs_results[instance_name].get()\n if isinstance(ret, Exception):\n self.log.exception(\"Exception in worker thread: {0}\".format(ret))\n del self.jobs_results[instance_name]\n\n\n def _check(self, instance):\n \"\"\"This function should be implemented by inherited classes\"\"\"\n raise NotImplementedError\n\n\n def _clean(self):\n now = time.time()\n for name, start_time in self.jobs_status.iteritems():\n if now - start_time > TIMEOUT:\n self.log.critical(\"Restarting Pool. One check is stuck: %s\" % name)\n self.restart_pool()\n break\n", "path": "checks/network_checks.py"}], "after_files": [{"content": "# (C) Datadog, Inc. 2010-2016\n# All rights reserved\n# Licensed under Simplified BSD License (see LICENSE)\n\n# stdlib\nfrom Queue import Empty, Queue\nimport threading\nimport time\n\n# project\nfrom checks import AgentCheck\nfrom checks.libs.thread_pool import Pool\n\nTIMEOUT = 180\nDEFAULT_SIZE_POOL = 6\nMAX_LOOP_ITERATIONS = 1000\nFAILURE = \"FAILURE\"\n\nclass Status:\n DOWN = \"DOWN\"\n WARNING = \"WARNING\"\n CRITICAL = \"CRITICAL\"\n UP = \"UP\"\n\n\nclass EventType:\n DOWN = \"servicecheck.state_change.down\"\n UP = \"servicecheck.state_change.up\"\n\n\nclass NetworkCheck(AgentCheck):\n SOURCE_TYPE_NAME = 'servicecheck'\n SERVICE_CHECK_PREFIX = 'network_check'\n _global_current_pool_size = 0\n\n STATUS_TO_SERVICE_CHECK = {\n Status.UP : AgentCheck.OK,\n Status.WARNING : AgentCheck.WARNING,\n Status.CRITICAL : AgentCheck.CRITICAL,\n Status.DOWN : AgentCheck.CRITICAL,\n }\n\n \"\"\"\n Services checks inherits from this class.\n This class should never be directly instanciated.\n\n Work flow:\n The main agent loop will call the check function for each instance for\n each iteration of the loop.\n The check method will make an asynchronous call to the _process method in\n one of the thread initiated in the thread pool created in this class constructor.\n The _process method will call the _check method of the inherited class\n which will perform the actual check.\n\n The _check method must return a tuple which first element is either\n Status.UP or Status.DOWN.\n The second element is a short error message that will be displayed\n when the service turns down.\n\n \"\"\"\n\n def __init__(self, name, init_config, agentConfig, instances):\n AgentCheck.__init__(self, name, init_config, agentConfig, instances)\n\n # A dictionary to keep track of service statuses\n self.statuses = {}\n self.notified = {}\n self.nb_failures = 0\n self.pool_size = 0\n self.pool_started = False\n\n # Make sure every instance has a name that we use as a unique key\n # to keep track of statuses\n names = []\n for inst in instances:\n inst_name = inst.get('name', None)\n if not inst_name:\n raise Exception(\"All instances should have a 'name' parameter,\"\n \" error on instance: {0}\".format(inst))\n if inst_name in names:\n raise Exception(\"Duplicate names for instances with name {0}\"\n .format(inst_name))\n names.append(inst_name)\n\n def stop(self):\n self.stop_pool()\n self.pool_started = False\n\n def start_pool(self):\n # The pool size should be the minimum between the number of instances\n # and the DEFAULT_SIZE_POOL. It can also be overridden by the 'threads_count'\n # parameter in the init_config of the check\n self.log.info(\"Starting Thread Pool\")\n default_size = min(self.instance_count(), DEFAULT_SIZE_POOL)\n self.pool_size = int(self.init_config.get('threads_count', default_size))\n\n # To keep track on the total number of threads we should have running\n NetworkCheck._global_current_pool_size += self.pool_size\n\n self.pool = Pool(self.pool_size)\n\n self.resultsq = Queue()\n self.jobs_status = {}\n self.jobs_results = {}\n self.pool_started = True\n\n def stop_pool(self):\n self.log.info(\"Stopping Thread Pool\")\n\n # To keep track on the total number of threads we should have running\n NetworkCheck._global_current_pool_size -= self.pool_size\n\n if self.pool_started:\n self.pool.terminate()\n self.pool.join()\n self.jobs_status.clear()\n assert self.pool.get_nworkers() == 0\n\n def restart_pool(self):\n self.stop_pool()\n self.start_pool()\n\n def check(self, instance):\n if not self.pool_started:\n self.start_pool()\n if threading.activeCount() > 5 * NetworkCheck._global_current_pool_size + 6:\n # On Windows the agent runs on multiple threads because of WMI so we need an offset of 6\n raise Exception(\"Thread number (%s) is exploding. Skipping this check\" % threading.activeCount())\n self._process_results()\n self._clean()\n name = instance.get('name', None)\n if name is None:\n self.log.error('Each service check must have a name')\n return\n\n if name not in self.jobs_status:\n # A given instance should be processed one at a time\n self.jobs_status[name] = time.time()\n self.jobs_results[name] = self.pool.apply_async(self._process, args=(instance,))\n else:\n self.log.error(\"Instance: %s skipped because it's already running.\" % name)\n\n def _process(self, instance):\n try:\n statuses = self._check(instance)\n\n if isinstance(statuses, tuple):\n # Assume the check only returns one service check\n status, msg = statuses\n self.resultsq.put((status, msg, None, instance))\n\n elif isinstance(statuses, list):\n for status in statuses:\n sc_name, status, msg = status\n self.resultsq.put((status, msg, sc_name, instance))\n\n except Exception:\n self.log.exception(\n u\"Failed to process instance '%s'.\", instance.get('name', u\"\")\n )\n result = (FAILURE, FAILURE, FAILURE, instance)\n self.resultsq.put(result)\n\n def _process_results(self):\n for i in xrange(MAX_LOOP_ITERATIONS):\n try:\n # We want to fetch the result in a non blocking way\n status, msg, sc_name, instance = self.resultsq.get_nowait()\n except Empty:\n break\n\n instance_name = instance['name']\n if status == FAILURE:\n self.nb_failures += 1\n if self.nb_failures >= self.pool_size - 1:\n self.nb_failures = 0\n self.restart_pool()\n\n # clean failed job\n self._clean_job(instance_name)\n continue\n\n self.report_as_service_check(sc_name, status, instance, msg)\n self._clean_job(instance_name)\n\n def _clean_job(self, instance_name):\n # The job is finished here, this instance can be re processed\n if instance_name in self.jobs_status:\n self.log.debug(\"Instance: %s cleaned from jobs status.\" % instance_name)\n del self.jobs_status[instance_name]\n\n # if an exception happened, log it\n if instance_name in self.jobs_results:\n self.log.debug(\"Instance: %s cleaned from jobs results.\" % instance_name)\n ret = self.jobs_results[instance_name].get()\n if isinstance(ret, Exception):\n self.log.exception(\"Exception in worker thread: {0}\".format(ret))\n del self.jobs_results[instance_name]\n\n\n def _check(self, instance):\n \"\"\"This function should be implemented by inherited classes\"\"\"\n raise NotImplementedError\n\n\n def _clean(self):\n now = time.time()\n for name, start_time in self.jobs_status.iteritems():\n if now - start_time > TIMEOUT:\n self.log.critical(\"Restarting Pool. One check is stuck: %s\" % name)\n self.restart_pool()\n break\n", "path": "checks/network_checks.py"}]} | 3,042 | 593 |
gh_patches_debug_27241 | rasdani/github-patches | git_diff | Kinto__kinto-1025 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Should the cache backend even use transactions?
mozilla-services/kinto-fxa#33 seems to be related to the fact that we're opening a transaction to consult the authentication cache. Is this really necessary? It seems like for a cache, we don't need any of the ACID properties. /cc @leplatrem
Should the cache backend even use transactions?
mozilla-services/kinto-fxa#33 seems to be related to the fact that we're opening a transaction to consult the authentication cache. Is this really necessary? It seems like for a cache, we don't need any of the ACID properties. /cc @leplatrem
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kinto/core/storage/postgresql/client.py`
Content:
```
1 import contextlib
2 import warnings
3 from collections import defaultdict
4
5 from kinto.core import logger
6 from kinto.core.storage import exceptions
7 from kinto.core.utils import sqlalchemy
8 import transaction as zope_transaction
9
10
11 class PostgreSQLClient(object):
12 def __init__(self, session_factory, commit_manually=True, invalidate=None):
13 self.session_factory = session_factory
14 self.commit_manually = commit_manually
15 self.invalidate = invalidate or (lambda session: None)
16
17 # # Register ujson, globally for all futur cursors
18 # with self.connect() as cursor:
19 # psycopg2.extras.register_json(cursor,
20 # globally=True,
21 # loads=json.loads)
22
23 @contextlib.contextmanager
24 def connect(self, readonly=False, force_commit=False):
25 """
26 Pulls a connection from the pool when context is entered and
27 returns it when context is exited.
28
29 A COMMIT is performed on the current transaction if everything went
30 well. Otherwise transaction is ROLLBACK, and everything cleaned up.
31 """
32 with_transaction = not readonly and self.commit_manually
33 session = None
34 try:
35 # Pull connection from pool.
36 session = self.session_factory()
37 # Start context
38 yield session
39 if not readonly and not self.commit_manually:
40 # Mark session as dirty.
41 self.invalidate(session)
42 # Success
43 if with_transaction:
44 session.commit()
45 elif force_commit:
46 # Commit like would do a succesful request.
47 zope_transaction.commit()
48
49 except sqlalchemy.exc.SQLAlchemyError as e:
50 logger.error(e)
51 if session and with_transaction:
52 session.rollback()
53 raise exceptions.BackendError(original=e)
54
55 finally:
56 if session and self.commit_manually:
57 # Give back to pool if commit done manually.
58 session.close()
59
60
61 # Reuse existing client if same URL.
62 _CLIENTS = defaultdict(dict)
63
64
65 def create_from_config(config, prefix=''):
66 """Create a PostgreSQLClient client using settings in the provided config.
67 """
68 if sqlalchemy is None:
69 message = ("PostgreSQL SQLAlchemy dependency missing. "
70 "Refer to installation section in documentation.")
71 raise ImportWarning(message)
72
73 from zope.sqlalchemy import ZopeTransactionExtension, invalidate
74 from sqlalchemy.orm import sessionmaker, scoped_session
75
76 settings = config.get_settings().copy()
77 # Custom Kinto settings, unsupported by SQLAlchemy.
78 settings.pop(prefix + 'backend', None)
79 settings.pop(prefix + 'max_fetch_size', None)
80 settings.pop(prefix + 'prefix', None)
81 transaction_per_request = settings.pop('transaction_per_request', False)
82
83 url = settings[prefix + 'url']
84 existing_client = _CLIENTS[transaction_per_request].get(url)
85 if existing_client:
86 msg = ("Reuse existing PostgreSQL connection. "
87 "Parameters %s* will be ignored." % prefix)
88 warnings.warn(msg)
89 return existing_client
90
91 # Initialize SQLAlchemy engine from settings.
92 poolclass_key = prefix + 'poolclass'
93 settings.setdefault(poolclass_key, ('kinto.core.storage.postgresql.'
94 'pool.QueuePoolWithMaxBacklog'))
95 settings[poolclass_key] = config.maybe_dotted(settings[poolclass_key])
96 engine = sqlalchemy.engine_from_config(settings, prefix=prefix, url=url)
97
98 # Initialize thread-safe session factory.
99 options = {}
100 if transaction_per_request:
101 # Plug with Pyramid transaction manager
102 options['extension'] = ZopeTransactionExtension()
103 session_factory = scoped_session(sessionmaker(bind=engine, **options))
104
105 # Store one client per URI.
106 commit_manually = (not transaction_per_request)
107 client = PostgreSQLClient(session_factory, commit_manually, invalidate)
108 _CLIENTS[transaction_per_request][url] = client
109 return client
110
```
Path: `kinto/core/cache/postgresql/__init__.py`
Content:
```
1 from __future__ import absolute_import
2
3 import os
4
5 from kinto.core import logger
6 from kinto.core.cache import CacheBase
7 from kinto.core.storage.postgresql.client import create_from_config
8 from kinto.core.utils import json
9
10
11 class Cache(CacheBase):
12 """Cache backend using PostgreSQL.
13
14 Enable in configuration::
15
16 kinto.cache_backend = kinto.core.cache.postgresql
17
18 Database location URI can be customized::
19
20 kinto.cache_url = postgres://user:[email protected]:5432/dbname
21
22 Alternatively, username and password could also rely on system user ident
23 or even specified in :file:`~/.pgpass` (*see PostgreSQL documentation*).
24
25 .. note::
26
27 Some tables and indices are created when ``kinto migrate`` is run.
28 This requires some privileges on the database, or some error will
29 be raised.
30
31 **Alternatively**, the schema can be initialized outside the
32 python application, using the SQL file located in
33 :file:`kinto/core/cache/postgresql/schema.sql`. This allows to
34 distinguish schema manipulation privileges from schema usage.
35
36
37 A connection pool is enabled by default::
38
39 kinto.cache_pool_size = 10
40 kinto.cache_maxoverflow = 10
41 kinto.cache_max_backlog = -1
42 kinto.cache_pool_recycle = -1
43 kinto.cache_pool_timeout = 30
44 kinto.cache_poolclass =
45 kinto.core.storage.postgresql.pool.QueuePoolWithMaxBacklog
46
47 The ``max_backlog`` limits the number of threads that can be in the queue
48 waiting for a connection. Once this limit has been reached, any further
49 attempts to acquire a connection will be rejected immediately, instead of
50 locking up all threads by keeping them waiting in the queue.
51
52 See `dedicated section in SQLAlchemy documentation
53 <http://docs.sqlalchemy.org/en/rel_1_0/core/engines.html>`_
54 for default values and behaviour.
55
56 .. note::
57
58 Using a `dedicated connection pool <http://pgpool.net>`_ is still
59 recommended to allow load balancing, replication or limit the number
60 of connections used in a multi-process deployment.
61
62 :noindex:
63 """ # NOQA
64 def __init__(self, client, *args, **kwargs):
65 super(Cache, self).__init__(*args, **kwargs)
66 self.client = client
67
68 def initialize_schema(self, dry_run=False):
69 # Check if cache table exists.
70 query = """
71 SELECT 1
72 FROM information_schema.tables
73 WHERE table_name = 'cache';
74 """
75 with self.client.connect(readonly=True) as conn:
76 result = conn.execute(query)
77 if result.rowcount > 0:
78 logger.info("PostgreSQL cache schema is up-to-date.")
79 return
80
81 # Create schema
82 here = os.path.abspath(os.path.dirname(__file__))
83 sql_file = os.path.join(here, 'schema.sql')
84
85 if dry_run:
86 logger.info("Create cache schema from %s" % sql_file)
87 return
88
89 # Since called outside request, force commit.
90 schema = open(sql_file).read()
91 with self.client.connect(force_commit=True) as conn:
92 conn.execute(schema)
93 logger.info('Created PostgreSQL cache tables')
94
95 def flush(self):
96 query = """
97 DELETE FROM cache;
98 """
99 # Since called outside request (e.g. tests), force commit.
100 with self.client.connect(force_commit=True) as conn:
101 conn.execute(query)
102 logger.debug('Flushed PostgreSQL cache tables')
103
104 def ttl(self, key):
105 query = """
106 SELECT EXTRACT(SECOND FROM (ttl - now())) AS ttl
107 FROM cache
108 WHERE key = :key
109 AND ttl IS NOT NULL;
110 """
111 with self.client.connect(readonly=True) as conn:
112 result = conn.execute(query, dict(key=self.prefix + key))
113 if result.rowcount > 0:
114 return result.fetchone()['ttl']
115 return -1
116
117 def expire(self, key, ttl):
118 query = """
119 UPDATE cache SET ttl = sec2ttl(:ttl) WHERE key = :key;
120 """
121 with self.client.connect() as conn:
122 conn.execute(query, dict(ttl=ttl, key=self.prefix + key))
123
124 def set(self, key, value, ttl=None):
125 if ttl is None:
126 logger.warning("No TTL for cache key %r" % key)
127 query = """
128 WITH upsert AS (
129 UPDATE cache SET value = :value, ttl = sec2ttl(:ttl)
130 WHERE key=:key
131 RETURNING *)
132 INSERT INTO cache (key, value, ttl)
133 SELECT :key, :value, sec2ttl(:ttl)
134 WHERE NOT EXISTS (SELECT * FROM upsert)
135 """
136 value = json.dumps(value)
137 with self.client.connect() as conn:
138 conn.execute(query, dict(key=self.prefix + key,
139 value=value, ttl=ttl))
140
141 def get(self, key):
142 purge = "DELETE FROM cache WHERE ttl IS NOT NULL AND now() > ttl;"
143 query = "SELECT value FROM cache WHERE key = :key;"
144 with self.client.connect() as conn:
145 conn.execute(purge)
146 result = conn.execute(query, dict(key=self.prefix + key))
147 if result.rowcount > 0:
148 value = result.fetchone()['value']
149 return json.loads(value)
150
151 def delete(self, key):
152 query = "DELETE FROM cache WHERE key = :key"
153 with self.client.connect() as conn:
154 conn.execute(query, dict(key=self.prefix + key))
155
156
157 def load_from_config(config):
158 settings = config.get_settings()
159 client = create_from_config(config, prefix='cache_')
160 return Cache(client=client, cache_prefix=settings['cache_prefix'])
161
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kinto/core/cache/postgresql/__init__.py b/kinto/core/cache/postgresql/__init__.py
--- a/kinto/core/cache/postgresql/__init__.py
+++ b/kinto/core/cache/postgresql/__init__.py
@@ -156,5 +156,5 @@
def load_from_config(config):
settings = config.get_settings()
- client = create_from_config(config, prefix='cache_')
+ client = create_from_config(config, prefix='cache_', with_transaction=False)
return Cache(client=client, cache_prefix=settings['cache_prefix'])
diff --git a/kinto/core/storage/postgresql/client.py b/kinto/core/storage/postgresql/client.py
--- a/kinto/core/storage/postgresql/client.py
+++ b/kinto/core/storage/postgresql/client.py
@@ -62,7 +62,7 @@
_CLIENTS = defaultdict(dict)
-def create_from_config(config, prefix=''):
+def create_from_config(config, prefix='', with_transaction=True):
"""Create a PostgreSQLClient client using settings in the provided config.
"""
if sqlalchemy is None:
@@ -77,8 +77,9 @@
# Custom Kinto settings, unsupported by SQLAlchemy.
settings.pop(prefix + 'backend', None)
settings.pop(prefix + 'max_fetch_size', None)
+ settings.pop(prefix + 'max_size_bytes', None)
settings.pop(prefix + 'prefix', None)
- transaction_per_request = settings.pop('transaction_per_request', False)
+ transaction_per_request = with_transaction and settings.pop('transaction_per_request', False)
url = settings[prefix + 'url']
existing_client = _CLIENTS[transaction_per_request].get(url)
| {"golden_diff": "diff --git a/kinto/core/cache/postgresql/__init__.py b/kinto/core/cache/postgresql/__init__.py\n--- a/kinto/core/cache/postgresql/__init__.py\n+++ b/kinto/core/cache/postgresql/__init__.py\n@@ -156,5 +156,5 @@\n \n def load_from_config(config):\n settings = config.get_settings()\n- client = create_from_config(config, prefix='cache_')\n+ client = create_from_config(config, prefix='cache_', with_transaction=False)\n return Cache(client=client, cache_prefix=settings['cache_prefix'])\ndiff --git a/kinto/core/storage/postgresql/client.py b/kinto/core/storage/postgresql/client.py\n--- a/kinto/core/storage/postgresql/client.py\n+++ b/kinto/core/storage/postgresql/client.py\n@@ -62,7 +62,7 @@\n _CLIENTS = defaultdict(dict)\n \n \n-def create_from_config(config, prefix=''):\n+def create_from_config(config, prefix='', with_transaction=True):\n \"\"\"Create a PostgreSQLClient client using settings in the provided config.\n \"\"\"\n if sqlalchemy is None:\n@@ -77,8 +77,9 @@\n # Custom Kinto settings, unsupported by SQLAlchemy.\n settings.pop(prefix + 'backend', None)\n settings.pop(prefix + 'max_fetch_size', None)\n+ settings.pop(prefix + 'max_size_bytes', None)\n settings.pop(prefix + 'prefix', None)\n- transaction_per_request = settings.pop('transaction_per_request', False)\n+ transaction_per_request = with_transaction and settings.pop('transaction_per_request', False)\n \n url = settings[prefix + 'url']\n existing_client = _CLIENTS[transaction_per_request].get(url)\n", "issue": "Should the cache backend even use transactions?\nmozilla-services/kinto-fxa#33 seems to be related to the fact that we're opening a transaction to consult the authentication cache. Is this really necessary? It seems like for a cache, we don't need any of the ACID properties. /cc @leplatrem \n\nShould the cache backend even use transactions?\nmozilla-services/kinto-fxa#33 seems to be related to the fact that we're opening a transaction to consult the authentication cache. Is this really necessary? It seems like for a cache, we don't need any of the ACID properties. /cc @leplatrem \n\n", "before_files": [{"content": "import contextlib\nimport warnings\nfrom collections import defaultdict\n\nfrom kinto.core import logger\nfrom kinto.core.storage import exceptions\nfrom kinto.core.utils import sqlalchemy\nimport transaction as zope_transaction\n\n\nclass PostgreSQLClient(object):\n def __init__(self, session_factory, commit_manually=True, invalidate=None):\n self.session_factory = session_factory\n self.commit_manually = commit_manually\n self.invalidate = invalidate or (lambda session: None)\n\n # # Register ujson, globally for all futur cursors\n # with self.connect() as cursor:\n # psycopg2.extras.register_json(cursor,\n # globally=True,\n # loads=json.loads)\n\n @contextlib.contextmanager\n def connect(self, readonly=False, force_commit=False):\n \"\"\"\n Pulls a connection from the pool when context is entered and\n returns it when context is exited.\n\n A COMMIT is performed on the current transaction if everything went\n well. Otherwise transaction is ROLLBACK, and everything cleaned up.\n \"\"\"\n with_transaction = not readonly and self.commit_manually\n session = None\n try:\n # Pull connection from pool.\n session = self.session_factory()\n # Start context\n yield session\n if not readonly and not self.commit_manually:\n # Mark session as dirty.\n self.invalidate(session)\n # Success\n if with_transaction:\n session.commit()\n elif force_commit:\n # Commit like would do a succesful request.\n zope_transaction.commit()\n\n except sqlalchemy.exc.SQLAlchemyError as e:\n logger.error(e)\n if session and with_transaction:\n session.rollback()\n raise exceptions.BackendError(original=e)\n\n finally:\n if session and self.commit_manually:\n # Give back to pool if commit done manually.\n session.close()\n\n\n# Reuse existing client if same URL.\n_CLIENTS = defaultdict(dict)\n\n\ndef create_from_config(config, prefix=''):\n \"\"\"Create a PostgreSQLClient client using settings in the provided config.\n \"\"\"\n if sqlalchemy is None:\n message = (\"PostgreSQL SQLAlchemy dependency missing. \"\n \"Refer to installation section in documentation.\")\n raise ImportWarning(message)\n\n from zope.sqlalchemy import ZopeTransactionExtension, invalidate\n from sqlalchemy.orm import sessionmaker, scoped_session\n\n settings = config.get_settings().copy()\n # Custom Kinto settings, unsupported by SQLAlchemy.\n settings.pop(prefix + 'backend', None)\n settings.pop(prefix + 'max_fetch_size', None)\n settings.pop(prefix + 'prefix', None)\n transaction_per_request = settings.pop('transaction_per_request', False)\n\n url = settings[prefix + 'url']\n existing_client = _CLIENTS[transaction_per_request].get(url)\n if existing_client:\n msg = (\"Reuse existing PostgreSQL connection. \"\n \"Parameters %s* will be ignored.\" % prefix)\n warnings.warn(msg)\n return existing_client\n\n # Initialize SQLAlchemy engine from settings.\n poolclass_key = prefix + 'poolclass'\n settings.setdefault(poolclass_key, ('kinto.core.storage.postgresql.'\n 'pool.QueuePoolWithMaxBacklog'))\n settings[poolclass_key] = config.maybe_dotted(settings[poolclass_key])\n engine = sqlalchemy.engine_from_config(settings, prefix=prefix, url=url)\n\n # Initialize thread-safe session factory.\n options = {}\n if transaction_per_request:\n # Plug with Pyramid transaction manager\n options['extension'] = ZopeTransactionExtension()\n session_factory = scoped_session(sessionmaker(bind=engine, **options))\n\n # Store one client per URI.\n commit_manually = (not transaction_per_request)\n client = PostgreSQLClient(session_factory, commit_manually, invalidate)\n _CLIENTS[transaction_per_request][url] = client\n return client\n", "path": "kinto/core/storage/postgresql/client.py"}, {"content": "from __future__ import absolute_import\n\nimport os\n\nfrom kinto.core import logger\nfrom kinto.core.cache import CacheBase\nfrom kinto.core.storage.postgresql.client import create_from_config\nfrom kinto.core.utils import json\n\n\nclass Cache(CacheBase):\n \"\"\"Cache backend using PostgreSQL.\n\n Enable in configuration::\n\n kinto.cache_backend = kinto.core.cache.postgresql\n\n Database location URI can be customized::\n\n kinto.cache_url = postgres://user:[email protected]:5432/dbname\n\n Alternatively, username and password could also rely on system user ident\n or even specified in :file:`~/.pgpass` (*see PostgreSQL documentation*).\n\n .. note::\n\n Some tables and indices are created when ``kinto migrate`` is run.\n This requires some privileges on the database, or some error will\n be raised.\n\n **Alternatively**, the schema can be initialized outside the\n python application, using the SQL file located in\n :file:`kinto/core/cache/postgresql/schema.sql`. This allows to\n distinguish schema manipulation privileges from schema usage.\n\n\n A connection pool is enabled by default::\n\n kinto.cache_pool_size = 10\n kinto.cache_maxoverflow = 10\n kinto.cache_max_backlog = -1\n kinto.cache_pool_recycle = -1\n kinto.cache_pool_timeout = 30\n kinto.cache_poolclass =\n kinto.core.storage.postgresql.pool.QueuePoolWithMaxBacklog\n\n The ``max_backlog`` limits the number of threads that can be in the queue\n waiting for a connection. Once this limit has been reached, any further\n attempts to acquire a connection will be rejected immediately, instead of\n locking up all threads by keeping them waiting in the queue.\n\n See `dedicated section in SQLAlchemy documentation\n <http://docs.sqlalchemy.org/en/rel_1_0/core/engines.html>`_\n for default values and behaviour.\n\n .. note::\n\n Using a `dedicated connection pool <http://pgpool.net>`_ is still\n recommended to allow load balancing, replication or limit the number\n of connections used in a multi-process deployment.\n\n :noindex:\n \"\"\" # NOQA\n def __init__(self, client, *args, **kwargs):\n super(Cache, self).__init__(*args, **kwargs)\n self.client = client\n\n def initialize_schema(self, dry_run=False):\n # Check if cache table exists.\n query = \"\"\"\n SELECT 1\n FROM information_schema.tables\n WHERE table_name = 'cache';\n \"\"\"\n with self.client.connect(readonly=True) as conn:\n result = conn.execute(query)\n if result.rowcount > 0:\n logger.info(\"PostgreSQL cache schema is up-to-date.\")\n return\n\n # Create schema\n here = os.path.abspath(os.path.dirname(__file__))\n sql_file = os.path.join(here, 'schema.sql')\n\n if dry_run:\n logger.info(\"Create cache schema from %s\" % sql_file)\n return\n\n # Since called outside request, force commit.\n schema = open(sql_file).read()\n with self.client.connect(force_commit=True) as conn:\n conn.execute(schema)\n logger.info('Created PostgreSQL cache tables')\n\n def flush(self):\n query = \"\"\"\n DELETE FROM cache;\n \"\"\"\n # Since called outside request (e.g. tests), force commit.\n with self.client.connect(force_commit=True) as conn:\n conn.execute(query)\n logger.debug('Flushed PostgreSQL cache tables')\n\n def ttl(self, key):\n query = \"\"\"\n SELECT EXTRACT(SECOND FROM (ttl - now())) AS ttl\n FROM cache\n WHERE key = :key\n AND ttl IS NOT NULL;\n \"\"\"\n with self.client.connect(readonly=True) as conn:\n result = conn.execute(query, dict(key=self.prefix + key))\n if result.rowcount > 0:\n return result.fetchone()['ttl']\n return -1\n\n def expire(self, key, ttl):\n query = \"\"\"\n UPDATE cache SET ttl = sec2ttl(:ttl) WHERE key = :key;\n \"\"\"\n with self.client.connect() as conn:\n conn.execute(query, dict(ttl=ttl, key=self.prefix + key))\n\n def set(self, key, value, ttl=None):\n if ttl is None:\n logger.warning(\"No TTL for cache key %r\" % key)\n query = \"\"\"\n WITH upsert AS (\n UPDATE cache SET value = :value, ttl = sec2ttl(:ttl)\n WHERE key=:key\n RETURNING *)\n INSERT INTO cache (key, value, ttl)\n SELECT :key, :value, sec2ttl(:ttl)\n WHERE NOT EXISTS (SELECT * FROM upsert)\n \"\"\"\n value = json.dumps(value)\n with self.client.connect() as conn:\n conn.execute(query, dict(key=self.prefix + key,\n value=value, ttl=ttl))\n\n def get(self, key):\n purge = \"DELETE FROM cache WHERE ttl IS NOT NULL AND now() > ttl;\"\n query = \"SELECT value FROM cache WHERE key = :key;\"\n with self.client.connect() as conn:\n conn.execute(purge)\n result = conn.execute(query, dict(key=self.prefix + key))\n if result.rowcount > 0:\n value = result.fetchone()['value']\n return json.loads(value)\n\n def delete(self, key):\n query = \"DELETE FROM cache WHERE key = :key\"\n with self.client.connect() as conn:\n conn.execute(query, dict(key=self.prefix + key))\n\n\ndef load_from_config(config):\n settings = config.get_settings()\n client = create_from_config(config, prefix='cache_')\n return Cache(client=client, cache_prefix=settings['cache_prefix'])\n", "path": "kinto/core/cache/postgresql/__init__.py"}], "after_files": [{"content": "import contextlib\nimport warnings\nfrom collections import defaultdict\n\nfrom kinto.core import logger\nfrom kinto.core.storage import exceptions\nfrom kinto.core.utils import sqlalchemy\nimport transaction as zope_transaction\n\n\nclass PostgreSQLClient(object):\n def __init__(self, session_factory, commit_manually=True, invalidate=None):\n self.session_factory = session_factory\n self.commit_manually = commit_manually\n self.invalidate = invalidate or (lambda session: None)\n\n # # Register ujson, globally for all futur cursors\n # with self.connect() as cursor:\n # psycopg2.extras.register_json(cursor,\n # globally=True,\n # loads=json.loads)\n\n @contextlib.contextmanager\n def connect(self, readonly=False, force_commit=False):\n \"\"\"\n Pulls a connection from the pool when context is entered and\n returns it when context is exited.\n\n A COMMIT is performed on the current transaction if everything went\n well. Otherwise transaction is ROLLBACK, and everything cleaned up.\n \"\"\"\n with_transaction = not readonly and self.commit_manually\n session = None\n try:\n # Pull connection from pool.\n session = self.session_factory()\n # Start context\n yield session\n if not readonly and not self.commit_manually:\n # Mark session as dirty.\n self.invalidate(session)\n # Success\n if with_transaction:\n session.commit()\n elif force_commit:\n # Commit like would do a succesful request.\n zope_transaction.commit()\n\n except sqlalchemy.exc.SQLAlchemyError as e:\n logger.error(e)\n if session and with_transaction:\n session.rollback()\n raise exceptions.BackendError(original=e)\n\n finally:\n if session and self.commit_manually:\n # Give back to pool if commit done manually.\n session.close()\n\n\n# Reuse existing client if same URL.\n_CLIENTS = defaultdict(dict)\n\n\ndef create_from_config(config, prefix='', with_transaction=True):\n \"\"\"Create a PostgreSQLClient client using settings in the provided config.\n \"\"\"\n if sqlalchemy is None:\n message = (\"PostgreSQL SQLAlchemy dependency missing. \"\n \"Refer to installation section in documentation.\")\n raise ImportWarning(message)\n\n from zope.sqlalchemy import ZopeTransactionExtension, invalidate\n from sqlalchemy.orm import sessionmaker, scoped_session\n\n settings = config.get_settings().copy()\n # Custom Kinto settings, unsupported by SQLAlchemy.\n settings.pop(prefix + 'backend', None)\n settings.pop(prefix + 'max_fetch_size', None)\n settings.pop(prefix + 'max_size_bytes', None)\n settings.pop(prefix + 'prefix', None)\n transaction_per_request = with_transaction and settings.pop('transaction_per_request', False)\n\n url = settings[prefix + 'url']\n existing_client = _CLIENTS[transaction_per_request].get(url)\n if existing_client:\n msg = (\"Reuse existing PostgreSQL connection. \"\n \"Parameters %s* will be ignored.\" % prefix)\n warnings.warn(msg)\n return existing_client\n\n # Initialize SQLAlchemy engine from settings.\n poolclass_key = prefix + 'poolclass'\n settings.setdefault(poolclass_key, ('kinto.core.storage.postgresql.'\n 'pool.QueuePoolWithMaxBacklog'))\n settings[poolclass_key] = config.maybe_dotted(settings[poolclass_key])\n engine = sqlalchemy.engine_from_config(settings, prefix=prefix, url=url)\n\n # Initialize thread-safe session factory.\n options = {}\n if transaction_per_request:\n # Plug with Pyramid transaction manager\n options['extension'] = ZopeTransactionExtension()\n session_factory = scoped_session(sessionmaker(bind=engine, **options))\n\n # Store one client per URI.\n commit_manually = (not transaction_per_request)\n client = PostgreSQLClient(session_factory, commit_manually, invalidate)\n _CLIENTS[transaction_per_request][url] = client\n return client\n", "path": "kinto/core/storage/postgresql/client.py"}, {"content": "from __future__ import absolute_import\n\nimport os\n\nfrom kinto.core import logger\nfrom kinto.core.cache import CacheBase\nfrom kinto.core.storage.postgresql.client import create_from_config\nfrom kinto.core.utils import json\n\n\nclass Cache(CacheBase):\n \"\"\"Cache backend using PostgreSQL.\n\n Enable in configuration::\n\n kinto.cache_backend = kinto.core.cache.postgresql\n\n Database location URI can be customized::\n\n kinto.cache_url = postgres://user:[email protected]:5432/dbname\n\n Alternatively, username and password could also rely on system user ident\n or even specified in :file:`~/.pgpass` (*see PostgreSQL documentation*).\n\n .. note::\n\n Some tables and indices are created when ``kinto migrate`` is run.\n This requires some privileges on the database, or some error will\n be raised.\n\n **Alternatively**, the schema can be initialized outside the\n python application, using the SQL file located in\n :file:`kinto/core/cache/postgresql/schema.sql`. This allows to\n distinguish schema manipulation privileges from schema usage.\n\n\n A connection pool is enabled by default::\n\n kinto.cache_pool_size = 10\n kinto.cache_maxoverflow = 10\n kinto.cache_max_backlog = -1\n kinto.cache_pool_recycle = -1\n kinto.cache_pool_timeout = 30\n kinto.cache_poolclass =\n kinto.core.storage.postgresql.pool.QueuePoolWithMaxBacklog\n\n The ``max_backlog`` limits the number of threads that can be in the queue\n waiting for a connection. Once this limit has been reached, any further\n attempts to acquire a connection will be rejected immediately, instead of\n locking up all threads by keeping them waiting in the queue.\n\n See `dedicated section in SQLAlchemy documentation\n <http://docs.sqlalchemy.org/en/rel_1_0/core/engines.html>`_\n for default values and behaviour.\n\n .. note::\n\n Using a `dedicated connection pool <http://pgpool.net>`_ is still\n recommended to allow load balancing, replication or limit the number\n of connections used in a multi-process deployment.\n\n :noindex:\n \"\"\" # NOQA\n def __init__(self, client, *args, **kwargs):\n super(Cache, self).__init__(*args, **kwargs)\n self.client = client\n\n def initialize_schema(self, dry_run=False):\n # Check if cache table exists.\n query = \"\"\"\n SELECT 1\n FROM information_schema.tables\n WHERE table_name = 'cache';\n \"\"\"\n with self.client.connect(readonly=True) as conn:\n result = conn.execute(query)\n if result.rowcount > 0:\n logger.info(\"PostgreSQL cache schema is up-to-date.\")\n return\n\n # Create schema\n here = os.path.abspath(os.path.dirname(__file__))\n sql_file = os.path.join(here, 'schema.sql')\n\n if dry_run:\n logger.info(\"Create cache schema from %s\" % sql_file)\n return\n\n # Since called outside request, force commit.\n schema = open(sql_file).read()\n with self.client.connect(force_commit=True) as conn:\n conn.execute(schema)\n logger.info('Created PostgreSQL cache tables')\n\n def flush(self):\n query = \"\"\"\n DELETE FROM cache;\n \"\"\"\n # Since called outside request (e.g. tests), force commit.\n with self.client.connect(force_commit=True) as conn:\n conn.execute(query)\n logger.debug('Flushed PostgreSQL cache tables')\n\n def ttl(self, key):\n query = \"\"\"\n SELECT EXTRACT(SECOND FROM (ttl - now())) AS ttl\n FROM cache\n WHERE key = :key\n AND ttl IS NOT NULL;\n \"\"\"\n with self.client.connect(readonly=True) as conn:\n result = conn.execute(query, dict(key=self.prefix + key))\n if result.rowcount > 0:\n return result.fetchone()['ttl']\n return -1\n\n def expire(self, key, ttl):\n query = \"\"\"\n UPDATE cache SET ttl = sec2ttl(:ttl) WHERE key = :key;\n \"\"\"\n with self.client.connect() as conn:\n conn.execute(query, dict(ttl=ttl, key=self.prefix + key))\n\n def set(self, key, value, ttl=None):\n if ttl is None:\n logger.warning(\"No TTL for cache key %r\" % key)\n query = \"\"\"\n WITH upsert AS (\n UPDATE cache SET value = :value, ttl = sec2ttl(:ttl)\n WHERE key=:key\n RETURNING *)\n INSERT INTO cache (key, value, ttl)\n SELECT :key, :value, sec2ttl(:ttl)\n WHERE NOT EXISTS (SELECT * FROM upsert)\n \"\"\"\n value = json.dumps(value)\n with self.client.connect() as conn:\n conn.execute(query, dict(key=self.prefix + key,\n value=value, ttl=ttl))\n\n def get(self, key):\n purge = \"DELETE FROM cache WHERE ttl IS NOT NULL AND now() > ttl;\"\n query = \"SELECT value FROM cache WHERE key = :key;\"\n with self.client.connect() as conn:\n conn.execute(purge)\n result = conn.execute(query, dict(key=self.prefix + key))\n if result.rowcount > 0:\n value = result.fetchone()['value']\n return json.loads(value)\n\n def delete(self, key):\n query = \"DELETE FROM cache WHERE key = :key\"\n with self.client.connect() as conn:\n conn.execute(query, dict(key=self.prefix + key))\n\n\ndef load_from_config(config):\n settings = config.get_settings()\n client = create_from_config(config, prefix='cache_', with_transaction=False)\n return Cache(client=client, cache_prefix=settings['cache_prefix'])\n", "path": "kinto/core/cache/postgresql/__init__.py"}]} | 3,073 | 359 |
gh_patches_debug_32478 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-2080 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
format_baggage does not escape non-ascii in baggage keys
https://github.com/open-telemetry/opentelemetry-python/blob/4250078e43ddb24c88e19270c7af01ae63336fb9/opentelemetry-api/src/opentelemetry/baggage/propagation/__init__.py#L100
The cpp implementation does this, and it looks like in python test strings there is url encoding.
https://github.com/open-telemetry/opentelemetry-cpp/blob/61d3c5e318830d10a0859befa046aa4847593764/api/include/opentelemetry/baggage/baggage.h#L174
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `opentelemetry-api/src/opentelemetry/baggage/propagation/__init__.py`
Content:
```
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 #
15 import typing
16 import urllib.parse
17
18 from opentelemetry import baggage
19 from opentelemetry.context import get_current
20 from opentelemetry.context.context import Context
21 from opentelemetry.propagators import textmap
22
23
24 class W3CBaggagePropagator(textmap.TextMapPropagator):
25 """Extracts and injects Baggage which is used to annotate telemetry."""
26
27 _MAX_HEADER_LENGTH = 8192
28 _MAX_PAIR_LENGTH = 4096
29 _MAX_PAIRS = 180
30 _BAGGAGE_HEADER_NAME = "baggage"
31
32 def extract(
33 self,
34 carrier: textmap.CarrierT,
35 context: typing.Optional[Context] = None,
36 getter: textmap.Getter = textmap.default_getter,
37 ) -> Context:
38 """Extract Baggage from the carrier.
39
40 See
41 `opentelemetry.propagators.textmap.TextMapPropagator.extract`
42 """
43
44 if context is None:
45 context = get_current()
46
47 header = _extract_first_element(
48 getter.get(carrier, self._BAGGAGE_HEADER_NAME)
49 )
50
51 if not header or len(header) > self._MAX_HEADER_LENGTH:
52 return context
53
54 baggage_entries = header.split(",")
55 total_baggage_entries = self._MAX_PAIRS
56 for entry in baggage_entries:
57 if total_baggage_entries <= 0:
58 return context
59 total_baggage_entries -= 1
60 if len(entry) > self._MAX_PAIR_LENGTH:
61 continue
62 try:
63 name, value = entry.split("=", 1)
64 except Exception: # pylint: disable=broad-except
65 continue
66 context = baggage.set_baggage(
67 urllib.parse.unquote(name).strip(),
68 urllib.parse.unquote(value).strip(),
69 context=context,
70 )
71
72 return context
73
74 def inject(
75 self,
76 carrier: textmap.CarrierT,
77 context: typing.Optional[Context] = None,
78 setter: textmap.Setter = textmap.default_setter,
79 ) -> None:
80 """Injects Baggage into the carrier.
81
82 See
83 `opentelemetry.propagators.textmap.TextMapPropagator.inject`
84 """
85 baggage_entries = baggage.get_all(context=context)
86 if not baggage_entries:
87 return
88
89 baggage_string = _format_baggage(baggage_entries)
90 setter.set(carrier, self._BAGGAGE_HEADER_NAME, baggage_string)
91
92 @property
93 def fields(self) -> typing.Set[str]:
94 """Returns a set with the fields set in `inject`."""
95 return {self._BAGGAGE_HEADER_NAME}
96
97
98 def _format_baggage(baggage_entries: typing.Mapping[str, object]) -> str:
99 return ",".join(
100 key + "=" + urllib.parse.quote_plus(str(value))
101 for key, value in baggage_entries.items()
102 )
103
104
105 def _extract_first_element(
106 items: typing.Optional[typing.Iterable[textmap.CarrierT]],
107 ) -> typing.Optional[textmap.CarrierT]:
108 if items is None:
109 return None
110 return next(iter(items), None)
111
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/opentelemetry-api/src/opentelemetry/baggage/propagation/__init__.py b/opentelemetry-api/src/opentelemetry/baggage/propagation/__init__.py
--- a/opentelemetry-api/src/opentelemetry/baggage/propagation/__init__.py
+++ b/opentelemetry-api/src/opentelemetry/baggage/propagation/__init__.py
@@ -13,9 +13,9 @@
# limitations under the License.
#
import typing
-import urllib.parse
+from urllib.parse import quote_plus, unquote_plus
-from opentelemetry import baggage
+from opentelemetry.baggage import get_all, set_baggage
from opentelemetry.context import get_current
from opentelemetry.context.context import Context
from opentelemetry.propagators import textmap
@@ -63,9 +63,9 @@
name, value = entry.split("=", 1)
except Exception: # pylint: disable=broad-except
continue
- context = baggage.set_baggage(
- urllib.parse.unquote(name).strip(),
- urllib.parse.unquote(value).strip(),
+ context = set_baggage(
+ unquote_plus(name).strip(),
+ unquote_plus(value).strip(),
context=context,
)
@@ -82,7 +82,7 @@
See
`opentelemetry.propagators.textmap.TextMapPropagator.inject`
"""
- baggage_entries = baggage.get_all(context=context)
+ baggage_entries = get_all(context=context)
if not baggage_entries:
return
@@ -97,7 +97,7 @@
def _format_baggage(baggage_entries: typing.Mapping[str, object]) -> str:
return ",".join(
- key + "=" + urllib.parse.quote_plus(str(value))
+ quote_plus(str(key)) + "=" + quote_plus(str(value))
for key, value in baggage_entries.items()
)
| {"golden_diff": "diff --git a/opentelemetry-api/src/opentelemetry/baggage/propagation/__init__.py b/opentelemetry-api/src/opentelemetry/baggage/propagation/__init__.py\n--- a/opentelemetry-api/src/opentelemetry/baggage/propagation/__init__.py\n+++ b/opentelemetry-api/src/opentelemetry/baggage/propagation/__init__.py\n@@ -13,9 +13,9 @@\n # limitations under the License.\n #\n import typing\n-import urllib.parse\n+from urllib.parse import quote_plus, unquote_plus\n \n-from opentelemetry import baggage\n+from opentelemetry.baggage import get_all, set_baggage\n from opentelemetry.context import get_current\n from opentelemetry.context.context import Context\n from opentelemetry.propagators import textmap\n@@ -63,9 +63,9 @@\n name, value = entry.split(\"=\", 1)\n except Exception: # pylint: disable=broad-except\n continue\n- context = baggage.set_baggage(\n- urllib.parse.unquote(name).strip(),\n- urllib.parse.unquote(value).strip(),\n+ context = set_baggage(\n+ unquote_plus(name).strip(),\n+ unquote_plus(value).strip(),\n context=context,\n )\n \n@@ -82,7 +82,7 @@\n See\n `opentelemetry.propagators.textmap.TextMapPropagator.inject`\n \"\"\"\n- baggage_entries = baggage.get_all(context=context)\n+ baggage_entries = get_all(context=context)\n if not baggage_entries:\n return\n \n@@ -97,7 +97,7 @@\n \n def _format_baggage(baggage_entries: typing.Mapping[str, object]) -> str:\n return \",\".join(\n- key + \"=\" + urllib.parse.quote_plus(str(value))\n+ quote_plus(str(key)) + \"=\" + quote_plus(str(value))\n for key, value in baggage_entries.items()\n )\n", "issue": "format_baggage does not escape non-ascii in baggage keys\nhttps://github.com/open-telemetry/opentelemetry-python/blob/4250078e43ddb24c88e19270c7af01ae63336fb9/opentelemetry-api/src/opentelemetry/baggage/propagation/__init__.py#L100\r\n\r\nThe cpp implementation does this, and it looks like in python test strings there is url encoding.\r\nhttps://github.com/open-telemetry/opentelemetry-cpp/blob/61d3c5e318830d10a0859befa046aa4847593764/api/include/opentelemetry/baggage/baggage.h#L174\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport typing\nimport urllib.parse\n\nfrom opentelemetry import baggage\nfrom opentelemetry.context import get_current\nfrom opentelemetry.context.context import Context\nfrom opentelemetry.propagators import textmap\n\n\nclass W3CBaggagePropagator(textmap.TextMapPropagator):\n \"\"\"Extracts and injects Baggage which is used to annotate telemetry.\"\"\"\n\n _MAX_HEADER_LENGTH = 8192\n _MAX_PAIR_LENGTH = 4096\n _MAX_PAIRS = 180\n _BAGGAGE_HEADER_NAME = \"baggage\"\n\n def extract(\n self,\n carrier: textmap.CarrierT,\n context: typing.Optional[Context] = None,\n getter: textmap.Getter = textmap.default_getter,\n ) -> Context:\n \"\"\"Extract Baggage from the carrier.\n\n See\n `opentelemetry.propagators.textmap.TextMapPropagator.extract`\n \"\"\"\n\n if context is None:\n context = get_current()\n\n header = _extract_first_element(\n getter.get(carrier, self._BAGGAGE_HEADER_NAME)\n )\n\n if not header or len(header) > self._MAX_HEADER_LENGTH:\n return context\n\n baggage_entries = header.split(\",\")\n total_baggage_entries = self._MAX_PAIRS\n for entry in baggage_entries:\n if total_baggage_entries <= 0:\n return context\n total_baggage_entries -= 1\n if len(entry) > self._MAX_PAIR_LENGTH:\n continue\n try:\n name, value = entry.split(\"=\", 1)\n except Exception: # pylint: disable=broad-except\n continue\n context = baggage.set_baggage(\n urllib.parse.unquote(name).strip(),\n urllib.parse.unquote(value).strip(),\n context=context,\n )\n\n return context\n\n def inject(\n self,\n carrier: textmap.CarrierT,\n context: typing.Optional[Context] = None,\n setter: textmap.Setter = textmap.default_setter,\n ) -> None:\n \"\"\"Injects Baggage into the carrier.\n\n See\n `opentelemetry.propagators.textmap.TextMapPropagator.inject`\n \"\"\"\n baggage_entries = baggage.get_all(context=context)\n if not baggage_entries:\n return\n\n baggage_string = _format_baggage(baggage_entries)\n setter.set(carrier, self._BAGGAGE_HEADER_NAME, baggage_string)\n\n @property\n def fields(self) -> typing.Set[str]:\n \"\"\"Returns a set with the fields set in `inject`.\"\"\"\n return {self._BAGGAGE_HEADER_NAME}\n\n\ndef _format_baggage(baggage_entries: typing.Mapping[str, object]) -> str:\n return \",\".join(\n key + \"=\" + urllib.parse.quote_plus(str(value))\n for key, value in baggage_entries.items()\n )\n\n\ndef _extract_first_element(\n items: typing.Optional[typing.Iterable[textmap.CarrierT]],\n) -> typing.Optional[textmap.CarrierT]:\n if items is None:\n return None\n return next(iter(items), None)\n", "path": "opentelemetry-api/src/opentelemetry/baggage/propagation/__init__.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport typing\nfrom urllib.parse import quote_plus, unquote_plus\n\nfrom opentelemetry.baggage import get_all, set_baggage\nfrom opentelemetry.context import get_current\nfrom opentelemetry.context.context import Context\nfrom opentelemetry.propagators import textmap\n\n\nclass W3CBaggagePropagator(textmap.TextMapPropagator):\n \"\"\"Extracts and injects Baggage which is used to annotate telemetry.\"\"\"\n\n _MAX_HEADER_LENGTH = 8192\n _MAX_PAIR_LENGTH = 4096\n _MAX_PAIRS = 180\n _BAGGAGE_HEADER_NAME = \"baggage\"\n\n def extract(\n self,\n carrier: textmap.CarrierT,\n context: typing.Optional[Context] = None,\n getter: textmap.Getter = textmap.default_getter,\n ) -> Context:\n \"\"\"Extract Baggage from the carrier.\n\n See\n `opentelemetry.propagators.textmap.TextMapPropagator.extract`\n \"\"\"\n\n if context is None:\n context = get_current()\n\n header = _extract_first_element(\n getter.get(carrier, self._BAGGAGE_HEADER_NAME)\n )\n\n if not header or len(header) > self._MAX_HEADER_LENGTH:\n return context\n\n baggage_entries = header.split(\",\")\n total_baggage_entries = self._MAX_PAIRS\n for entry in baggage_entries:\n if total_baggage_entries <= 0:\n return context\n total_baggage_entries -= 1\n if len(entry) > self._MAX_PAIR_LENGTH:\n continue\n try:\n name, value = entry.split(\"=\", 1)\n except Exception: # pylint: disable=broad-except\n continue\n context = set_baggage(\n unquote_plus(name).strip(),\n unquote_plus(value).strip(),\n context=context,\n )\n\n return context\n\n def inject(\n self,\n carrier: textmap.CarrierT,\n context: typing.Optional[Context] = None,\n setter: textmap.Setter = textmap.default_setter,\n ) -> None:\n \"\"\"Injects Baggage into the carrier.\n\n See\n `opentelemetry.propagators.textmap.TextMapPropagator.inject`\n \"\"\"\n baggage_entries = get_all(context=context)\n if not baggage_entries:\n return\n\n baggage_string = _format_baggage(baggage_entries)\n setter.set(carrier, self._BAGGAGE_HEADER_NAME, baggage_string)\n\n @property\n def fields(self) -> typing.Set[str]:\n \"\"\"Returns a set with the fields set in `inject`.\"\"\"\n return {self._BAGGAGE_HEADER_NAME}\n\n\ndef _format_baggage(baggage_entries: typing.Mapping[str, object]) -> str:\n return \",\".join(\n quote_plus(str(key)) + \"=\" + quote_plus(str(value))\n for key, value in baggage_entries.items()\n )\n\n\ndef _extract_first_element(\n items: typing.Optional[typing.Iterable[textmap.CarrierT]],\n) -> typing.Optional[textmap.CarrierT]:\n if items is None:\n return None\n return next(iter(items), None)\n", "path": "opentelemetry-api/src/opentelemetry/baggage/propagation/__init__.py"}]} | 1,471 | 419 |
gh_patches_debug_3291 | rasdani/github-patches | git_diff | DataDog__dd-trace-py-2708 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
0.50 bug sanic APM
Thanks for taking the time for reporting an issue!
Before reporting an issue on dd-trace-py, please be sure to provide all
necessary information.
If you're hitting a bug, make sure that you're using the latest version of this
library.
### Which version of dd-trace-py are you using?
0.50.0
### Which version of pip are you using?
19.0.3
### Which version of the libraries are you using?
Package Version
------------------- ---------
aiofiles 0.7.0
aiohttp 3.6.2
aiomysql 0.0.20
astroid 2.3.3
async-timeout 3.0.1
attrs 19.3.0
certifi 2019.9.11
cffi 1.13.2
chardet 3.0.4
Click 7.0
cryptography 2.8
ddtrace 0.50.0
Deprecated 1.2.11
elasticsearch 7.5.1
elasticsearch-async 6.2.0
h11 0.8.1
h2 3.1.1
hpack 3.0.0
hstspreload 2020.1.7
httpcore 0.3.0
httptools 0.0.13
httpx 0.9.3
hyperframe 5.2.0
idna 2.8
isort 4.3.21
lazy-object-proxy 1.4.3
mccabe 0.6.1
motor 2.4.0
multidict 5.1.0
packaging 21.0
peewee 3.13.1
pip 19.0.3
protobuf 3.17.3
pycparser 2.19
PyJWT 1.7.1
pymongo 3.11.4
PyMySQL 0.9.2
pyparsing 2.4.7
pytz 2019.3
PyYAML 5.3
requests 2.22.0
requests-async 0.5.0
rfc3986 1.3.2
sanic 21.3.4
sanic-motor 0.5.0
sanic-routing 0.6.2
sanic-scheduler 1.0.7
setuptools 40.8.0
six 1.14.0
sniffio 1.1.0
stringcase 1.2.0
tenacity 8.0.1
typed-ast 1.4.1
ujson 1.35
urllib3 1.25.6
uvloop 0.13.0
websockets 8.1
wrapt 1.11.2
yarl 1.4.2
### How can we reproduce your problem?
#### Description
It's not working patch when apply APM on Sanic
If path variable type is int on sanic route
Case code
```
@app.route('/<gam_id:int>/slot/count', methods=['GET'])
async def slot_count(request, gam_id):
try:
pass
except Exception as e:
abort(500, e)
return json(response(200, 'Complete Successfully', {}))
```
Error
```
[2021-07-13 19:50:48 +0000] [13] [ERROR] Exception occurred while handling uri: 'http://xxxxxxxxx.xxx/25/slot/count'
NoneType: None
```
### What is the result that you get?
my production env is not working on Sanic
### What is the result that you expected?
I wanna use datadog APM on my production SANIC
I made already pull request
- https://github.com/DataDog/dd-trace-py/pull/2662
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ddtrace/contrib/sanic/patch.py`
Content:
```
1 import asyncio
2
3 import sanic
4
5 import ddtrace
6 from ddtrace import config
7 from ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY
8 from ddtrace.ext import SpanTypes
9 from ddtrace.pin import Pin
10 from ddtrace.utils.wrappers import unwrap as _u
11 from ddtrace.vendor import wrapt
12 from ddtrace.vendor.wrapt import wrap_function_wrapper as _w
13
14 from .. import trace_utils
15 from ...internal.logger import get_logger
16
17
18 log = get_logger(__name__)
19
20 config._add("sanic", dict(_default_service="sanic", distributed_tracing=True))
21
22 SANIC_PRE_21 = None
23
24
25 def update_span(span, response):
26 if isinstance(response, sanic.response.BaseHTTPResponse):
27 status_code = response.status
28 response_headers = response.headers
29 else:
30 # invalid response causes ServerError exception which must be handled
31 status_code = 500
32 response_headers = None
33 trace_utils.set_http_meta(span, config.sanic, status_code=status_code, response_headers=response_headers)
34
35
36 def _wrap_response_callback(span, callback):
37 # Only for sanic 20 and older
38 # Wrap response callbacks (either sync or async function) to set HTTP
39 # response span tags
40
41 @wrapt.function_wrapper
42 def wrap_sync(wrapped, instance, args, kwargs):
43 r = wrapped(*args, **kwargs)
44 response = args[0]
45 update_span(span, response)
46 return r
47
48 @wrapt.function_wrapper
49 async def wrap_async(wrapped, instance, args, kwargs):
50 r = await wrapped(*args, **kwargs)
51 response = args[0]
52 update_span(span, response)
53 return r
54
55 if asyncio.iscoroutinefunction(callback):
56 return wrap_async(callback)
57
58 return wrap_sync(callback)
59
60
61 async def patch_request_respond(wrapped, instance, args, kwargs):
62 # Only for sanic 21 and newer
63 # Wrap the framework response to set HTTP response span tags
64 response = await wrapped(*args, **kwargs)
65 pin = Pin._find(instance.ctx)
66 if pin is not None and pin.enabled():
67 span = pin.tracer.current_span()
68 if span is not None:
69 update_span(span, response)
70 return response
71
72
73 def _get_path(request):
74 """Get path and replace path parameter values with names if route exists."""
75 path = request.path
76 try:
77 match_info = request.match_info
78 except sanic.exceptions.SanicException:
79 return path
80 for key, value in match_info.items():
81 path = path.replace(value, f"<{key}>")
82 return path
83
84
85 async def patch_run_request_middleware(wrapped, instance, args, kwargs):
86 # Set span resource from the framework request
87 request = args[0]
88 pin = Pin._find(request.ctx)
89 if pin is not None and pin.enabled():
90 span = pin.tracer.current_span()
91 if span is not None:
92 span.resource = "{} {}".format(request.method, _get_path(request))
93 return await wrapped(*args, **kwargs)
94
95
96 def patch():
97 """Patch the instrumented methods."""
98 global SANIC_PRE_21
99
100 if getattr(sanic, "__datadog_patch", False):
101 return
102 setattr(sanic, "__datadog_patch", True)
103
104 SANIC_PRE_21 = sanic.__version__[:2] < "21"
105
106 _w("sanic", "Sanic.handle_request", patch_handle_request)
107 if not SANIC_PRE_21:
108 _w("sanic", "Sanic._run_request_middleware", patch_run_request_middleware)
109 _w(sanic.request, "Request.respond", patch_request_respond)
110
111
112 def unpatch():
113 """Unpatch the instrumented methods."""
114 _u(sanic.Sanic, "handle_request")
115 if not SANIC_PRE_21:
116 _u(sanic.Sanic, "_run_request_middleware")
117 _u(sanic.request.Request, "respond")
118 if not getattr(sanic, "__datadog_patch", False):
119 return
120 setattr(sanic, "__datadog_patch", False)
121
122
123 async def patch_handle_request(wrapped, instance, args, kwargs):
124 """Wrapper for Sanic.handle_request"""
125
126 def unwrap(request, write_callback=None, stream_callback=None, **kwargs):
127 return request, write_callback, stream_callback, kwargs
128
129 request, write_callback, stream_callback, new_kwargs = unwrap(*args, **kwargs)
130
131 if request.scheme not in ("http", "https"):
132 return await wrapped(*args, **kwargs)
133
134 pin = Pin()
135 if SANIC_PRE_21:
136 # Set span resource from the framework request
137 resource = "{} {}".format(request.method, _get_path(request))
138 else:
139 # The path is not available anymore in 21.x. Get it from
140 # the _run_request_middleware instrumented method.
141 resource = None
142 pin.onto(request.ctx)
143
144 headers = request.headers.copy()
145
146 trace_utils.activate_distributed_headers(ddtrace.tracer, int_config=config.sanic, request_headers=headers)
147
148 with pin.tracer.trace(
149 "sanic.request",
150 service=trace_utils.int_service(None, config.sanic),
151 resource=resource,
152 span_type=SpanTypes.WEB,
153 ) as span:
154 sample_rate = config.sanic.get_analytics_sample_rate(use_global_config=True)
155 if sample_rate is not None:
156 span.set_tag(ANALYTICS_SAMPLE_RATE_KEY, sample_rate)
157
158 method = request.method
159 url = "{scheme}://{host}{path}".format(scheme=request.scheme, host=request.host, path=request.path)
160 query_string = request.query_string
161 if isinstance(query_string, bytes):
162 query_string = query_string.decode()
163 trace_utils.set_http_meta(
164 span, config.sanic, method=method, url=url, query=query_string, request_headers=headers
165 )
166
167 if write_callback is not None:
168 new_kwargs["write_callback"] = _wrap_response_callback(span, write_callback)
169 if stream_callback is not None:
170 new_kwargs["stream_callback"] = _wrap_response_callback(span, stream_callback)
171
172 return await wrapped(request, **new_kwargs)
173
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ddtrace/contrib/sanic/patch.py b/ddtrace/contrib/sanic/patch.py
--- a/ddtrace/contrib/sanic/patch.py
+++ b/ddtrace/contrib/sanic/patch.py
@@ -78,6 +78,11 @@
except sanic.exceptions.SanicException:
return path
for key, value in match_info.items():
+ try:
+ value = str(value)
+ except Exception:
+ # Best effort
+ continue
path = path.replace(value, f"<{key}>")
return path
| {"golden_diff": "diff --git a/ddtrace/contrib/sanic/patch.py b/ddtrace/contrib/sanic/patch.py\n--- a/ddtrace/contrib/sanic/patch.py\n+++ b/ddtrace/contrib/sanic/patch.py\n@@ -78,6 +78,11 @@\n except sanic.exceptions.SanicException:\n return path\n for key, value in match_info.items():\n+ try:\n+ value = str(value)\n+ except Exception:\n+ # Best effort\n+ continue\n path = path.replace(value, f\"<{key}>\")\n return path\n", "issue": "0.50 bug sanic APM\nThanks for taking the time for reporting an issue!\r\n\r\nBefore reporting an issue on dd-trace-py, please be sure to provide all\r\nnecessary information.\r\n\r\nIf you're hitting a bug, make sure that you're using the latest version of this\r\nlibrary.\r\n\r\n### Which version of dd-trace-py are you using?\r\n0.50.0\r\n\r\n### Which version of pip are you using?\r\n19.0.3\r\n\r\n### Which version of the libraries are you using?\r\nPackage Version\r\n------------------- ---------\r\naiofiles 0.7.0\r\naiohttp 3.6.2\r\naiomysql 0.0.20\r\nastroid 2.3.3\r\nasync-timeout 3.0.1\r\nattrs 19.3.0\r\ncertifi 2019.9.11\r\ncffi 1.13.2\r\nchardet 3.0.4\r\nClick 7.0\r\ncryptography 2.8\r\nddtrace 0.50.0\r\nDeprecated 1.2.11\r\nelasticsearch 7.5.1\r\nelasticsearch-async 6.2.0\r\nh11 0.8.1\r\nh2 3.1.1\r\nhpack 3.0.0\r\nhstspreload 2020.1.7\r\nhttpcore 0.3.0\r\nhttptools 0.0.13\r\nhttpx 0.9.3\r\nhyperframe 5.2.0\r\nidna 2.8\r\nisort 4.3.21\r\nlazy-object-proxy 1.4.3\r\nmccabe 0.6.1\r\nmotor 2.4.0\r\nmultidict 5.1.0\r\npackaging 21.0\r\npeewee 3.13.1\r\npip 19.0.3\r\nprotobuf 3.17.3\r\npycparser 2.19\r\nPyJWT 1.7.1\r\npymongo 3.11.4\r\nPyMySQL 0.9.2\r\npyparsing 2.4.7\r\npytz 2019.3\r\nPyYAML 5.3\r\nrequests 2.22.0\r\nrequests-async 0.5.0\r\nrfc3986 1.3.2\r\nsanic 21.3.4\r\nsanic-motor 0.5.0\r\nsanic-routing 0.6.2\r\nsanic-scheduler 1.0.7\r\nsetuptools 40.8.0\r\nsix 1.14.0\r\nsniffio 1.1.0\r\nstringcase 1.2.0\r\ntenacity 8.0.1\r\ntyped-ast 1.4.1\r\nujson 1.35\r\nurllib3 1.25.6\r\nuvloop 0.13.0\r\nwebsockets 8.1\r\nwrapt 1.11.2\r\nyarl 1.4.2\r\n\r\n### How can we reproduce your problem?\r\n#### Description\r\nIt's not working patch when apply APM on Sanic\r\nIf path variable type is int on sanic route\r\n\r\nCase code\r\n```\r\[email protected]('/<gam_id:int>/slot/count', methods=['GET'])\r\nasync def slot_count(request, gam_id):\r\n try:\r\n pass\r\n except Exception as e:\r\n abort(500, e)\r\n return json(response(200, 'Complete Successfully', {}))\r\n\r\n```\r\n\r\nError\r\n```\r\n[2021-07-13 19:50:48 +0000] [13] [ERROR] Exception occurred while handling uri: 'http://xxxxxxxxx.xxx/25/slot/count'\r\nNoneType: None\r\n\r\n```\r\n\r\n### What is the result that you get?\r\nmy production env is not working on Sanic\r\n\r\n\r\n### What is the result that you expected?\r\nI wanna use datadog APM on my production SANIC\r\n\r\nI made already pull request\r\n- https://github.com/DataDog/dd-trace-py/pull/2662\r\n\n", "before_files": [{"content": "import asyncio\n\nimport sanic\n\nimport ddtrace\nfrom ddtrace import config\nfrom ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY\nfrom ddtrace.ext import SpanTypes\nfrom ddtrace.pin import Pin\nfrom ddtrace.utils.wrappers import unwrap as _u\nfrom ddtrace.vendor import wrapt\nfrom ddtrace.vendor.wrapt import wrap_function_wrapper as _w\n\nfrom .. import trace_utils\nfrom ...internal.logger import get_logger\n\n\nlog = get_logger(__name__)\n\nconfig._add(\"sanic\", dict(_default_service=\"sanic\", distributed_tracing=True))\n\nSANIC_PRE_21 = None\n\n\ndef update_span(span, response):\n if isinstance(response, sanic.response.BaseHTTPResponse):\n status_code = response.status\n response_headers = response.headers\n else:\n # invalid response causes ServerError exception which must be handled\n status_code = 500\n response_headers = None\n trace_utils.set_http_meta(span, config.sanic, status_code=status_code, response_headers=response_headers)\n\n\ndef _wrap_response_callback(span, callback):\n # Only for sanic 20 and older\n # Wrap response callbacks (either sync or async function) to set HTTP\n # response span tags\n\n @wrapt.function_wrapper\n def wrap_sync(wrapped, instance, args, kwargs):\n r = wrapped(*args, **kwargs)\n response = args[0]\n update_span(span, response)\n return r\n\n @wrapt.function_wrapper\n async def wrap_async(wrapped, instance, args, kwargs):\n r = await wrapped(*args, **kwargs)\n response = args[0]\n update_span(span, response)\n return r\n\n if asyncio.iscoroutinefunction(callback):\n return wrap_async(callback)\n\n return wrap_sync(callback)\n\n\nasync def patch_request_respond(wrapped, instance, args, kwargs):\n # Only for sanic 21 and newer\n # Wrap the framework response to set HTTP response span tags\n response = await wrapped(*args, **kwargs)\n pin = Pin._find(instance.ctx)\n if pin is not None and pin.enabled():\n span = pin.tracer.current_span()\n if span is not None:\n update_span(span, response)\n return response\n\n\ndef _get_path(request):\n \"\"\"Get path and replace path parameter values with names if route exists.\"\"\"\n path = request.path\n try:\n match_info = request.match_info\n except sanic.exceptions.SanicException:\n return path\n for key, value in match_info.items():\n path = path.replace(value, f\"<{key}>\")\n return path\n\n\nasync def patch_run_request_middleware(wrapped, instance, args, kwargs):\n # Set span resource from the framework request\n request = args[0]\n pin = Pin._find(request.ctx)\n if pin is not None and pin.enabled():\n span = pin.tracer.current_span()\n if span is not None:\n span.resource = \"{} {}\".format(request.method, _get_path(request))\n return await wrapped(*args, **kwargs)\n\n\ndef patch():\n \"\"\"Patch the instrumented methods.\"\"\"\n global SANIC_PRE_21\n\n if getattr(sanic, \"__datadog_patch\", False):\n return\n setattr(sanic, \"__datadog_patch\", True)\n\n SANIC_PRE_21 = sanic.__version__[:2] < \"21\"\n\n _w(\"sanic\", \"Sanic.handle_request\", patch_handle_request)\n if not SANIC_PRE_21:\n _w(\"sanic\", \"Sanic._run_request_middleware\", patch_run_request_middleware)\n _w(sanic.request, \"Request.respond\", patch_request_respond)\n\n\ndef unpatch():\n \"\"\"Unpatch the instrumented methods.\"\"\"\n _u(sanic.Sanic, \"handle_request\")\n if not SANIC_PRE_21:\n _u(sanic.Sanic, \"_run_request_middleware\")\n _u(sanic.request.Request, \"respond\")\n if not getattr(sanic, \"__datadog_patch\", False):\n return\n setattr(sanic, \"__datadog_patch\", False)\n\n\nasync def patch_handle_request(wrapped, instance, args, kwargs):\n \"\"\"Wrapper for Sanic.handle_request\"\"\"\n\n def unwrap(request, write_callback=None, stream_callback=None, **kwargs):\n return request, write_callback, stream_callback, kwargs\n\n request, write_callback, stream_callback, new_kwargs = unwrap(*args, **kwargs)\n\n if request.scheme not in (\"http\", \"https\"):\n return await wrapped(*args, **kwargs)\n\n pin = Pin()\n if SANIC_PRE_21:\n # Set span resource from the framework request\n resource = \"{} {}\".format(request.method, _get_path(request))\n else:\n # The path is not available anymore in 21.x. Get it from\n # the _run_request_middleware instrumented method.\n resource = None\n pin.onto(request.ctx)\n\n headers = request.headers.copy()\n\n trace_utils.activate_distributed_headers(ddtrace.tracer, int_config=config.sanic, request_headers=headers)\n\n with pin.tracer.trace(\n \"sanic.request\",\n service=trace_utils.int_service(None, config.sanic),\n resource=resource,\n span_type=SpanTypes.WEB,\n ) as span:\n sample_rate = config.sanic.get_analytics_sample_rate(use_global_config=True)\n if sample_rate is not None:\n span.set_tag(ANALYTICS_SAMPLE_RATE_KEY, sample_rate)\n\n method = request.method\n url = \"{scheme}://{host}{path}\".format(scheme=request.scheme, host=request.host, path=request.path)\n query_string = request.query_string\n if isinstance(query_string, bytes):\n query_string = query_string.decode()\n trace_utils.set_http_meta(\n span, config.sanic, method=method, url=url, query=query_string, request_headers=headers\n )\n\n if write_callback is not None:\n new_kwargs[\"write_callback\"] = _wrap_response_callback(span, write_callback)\n if stream_callback is not None:\n new_kwargs[\"stream_callback\"] = _wrap_response_callback(span, stream_callback)\n\n return await wrapped(request, **new_kwargs)\n", "path": "ddtrace/contrib/sanic/patch.py"}], "after_files": [{"content": "import asyncio\n\nimport sanic\n\nimport ddtrace\nfrom ddtrace import config\nfrom ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY\nfrom ddtrace.ext import SpanTypes\nfrom ddtrace.pin import Pin\nfrom ddtrace.utils.wrappers import unwrap as _u\nfrom ddtrace.vendor import wrapt\nfrom ddtrace.vendor.wrapt import wrap_function_wrapper as _w\n\nfrom .. import trace_utils\nfrom ...internal.logger import get_logger\n\n\nlog = get_logger(__name__)\n\nconfig._add(\"sanic\", dict(_default_service=\"sanic\", distributed_tracing=True))\n\nSANIC_PRE_21 = None\n\n\ndef update_span(span, response):\n if isinstance(response, sanic.response.BaseHTTPResponse):\n status_code = response.status\n response_headers = response.headers\n else:\n # invalid response causes ServerError exception which must be handled\n status_code = 500\n response_headers = None\n trace_utils.set_http_meta(span, config.sanic, status_code=status_code, response_headers=response_headers)\n\n\ndef _wrap_response_callback(span, callback):\n # Only for sanic 20 and older\n # Wrap response callbacks (either sync or async function) to set HTTP\n # response span tags\n\n @wrapt.function_wrapper\n def wrap_sync(wrapped, instance, args, kwargs):\n r = wrapped(*args, **kwargs)\n response = args[0]\n update_span(span, response)\n return r\n\n @wrapt.function_wrapper\n async def wrap_async(wrapped, instance, args, kwargs):\n r = await wrapped(*args, **kwargs)\n response = args[0]\n update_span(span, response)\n return r\n\n if asyncio.iscoroutinefunction(callback):\n return wrap_async(callback)\n\n return wrap_sync(callback)\n\n\nasync def patch_request_respond(wrapped, instance, args, kwargs):\n # Only for sanic 21 and newer\n # Wrap the framework response to set HTTP response span tags\n response = await wrapped(*args, **kwargs)\n pin = Pin._find(instance.ctx)\n if pin is not None and pin.enabled():\n span = pin.tracer.current_span()\n if span is not None:\n update_span(span, response)\n return response\n\n\ndef _get_path(request):\n \"\"\"Get path and replace path parameter values with names if route exists.\"\"\"\n path = request.path\n try:\n match_info = request.match_info\n except sanic.exceptions.SanicException:\n return path\n for key, value in match_info.items():\n try:\n value = str(value)\n except Exception:\n # Best effort\n continue\n path = path.replace(value, f\"<{key}>\")\n return path\n\n\nasync def patch_run_request_middleware(wrapped, instance, args, kwargs):\n # Set span resource from the framework request\n request = args[0]\n pin = Pin._find(request.ctx)\n if pin is not None and pin.enabled():\n span = pin.tracer.current_span()\n if span is not None:\n span.resource = \"{} {}\".format(request.method, _get_path(request))\n return await wrapped(*args, **kwargs)\n\n\ndef patch():\n \"\"\"Patch the instrumented methods.\"\"\"\n global SANIC_PRE_21\n\n if getattr(sanic, \"__datadog_patch\", False):\n return\n setattr(sanic, \"__datadog_patch\", True)\n\n SANIC_PRE_21 = sanic.__version__[:2] < \"21\"\n\n _w(\"sanic\", \"Sanic.handle_request\", patch_handle_request)\n if not SANIC_PRE_21:\n _w(\"sanic\", \"Sanic._run_request_middleware\", patch_run_request_middleware)\n _w(sanic.request, \"Request.respond\", patch_request_respond)\n\n\ndef unpatch():\n \"\"\"Unpatch the instrumented methods.\"\"\"\n _u(sanic.Sanic, \"handle_request\")\n if not SANIC_PRE_21:\n _u(sanic.Sanic, \"_run_request_middleware\")\n _u(sanic.request.Request, \"respond\")\n if not getattr(sanic, \"__datadog_patch\", False):\n return\n setattr(sanic, \"__datadog_patch\", False)\n\n\nasync def patch_handle_request(wrapped, instance, args, kwargs):\n \"\"\"Wrapper for Sanic.handle_request\"\"\"\n\n def unwrap(request, write_callback=None, stream_callback=None, **kwargs):\n return request, write_callback, stream_callback, kwargs\n\n request, write_callback, stream_callback, new_kwargs = unwrap(*args, **kwargs)\n\n if request.scheme not in (\"http\", \"https\"):\n return await wrapped(*args, **kwargs)\n\n pin = Pin()\n if SANIC_PRE_21:\n # Set span resource from the framework request\n resource = \"{} {}\".format(request.method, _get_path(request))\n else:\n # The path is not available anymore in 21.x. Get it from\n # the _run_request_middleware instrumented method.\n resource = None\n pin.onto(request.ctx)\n\n headers = request.headers.copy()\n\n trace_utils.activate_distributed_headers(ddtrace.tracer, int_config=config.sanic, request_headers=headers)\n\n with pin.tracer.trace(\n \"sanic.request\",\n service=trace_utils.int_service(None, config.sanic),\n resource=resource,\n span_type=SpanTypes.WEB,\n ) as span:\n sample_rate = config.sanic.get_analytics_sample_rate(use_global_config=True)\n if sample_rate is not None:\n span.set_tag(ANALYTICS_SAMPLE_RATE_KEY, sample_rate)\n\n method = request.method\n url = \"{scheme}://{host}{path}\".format(scheme=request.scheme, host=request.host, path=request.path)\n query_string = request.query_string\n if isinstance(query_string, bytes):\n query_string = query_string.decode()\n trace_utils.set_http_meta(\n span, config.sanic, method=method, url=url, query=query_string, request_headers=headers\n )\n\n if write_callback is not None:\n new_kwargs[\"write_callback\"] = _wrap_response_callback(span, write_callback)\n if stream_callback is not None:\n new_kwargs[\"stream_callback\"] = _wrap_response_callback(span, stream_callback)\n\n return await wrapped(request, **new_kwargs)\n", "path": "ddtrace/contrib/sanic/patch.py"}]} | 2,997 | 127 |
gh_patches_debug_35121 | rasdani/github-patches | git_diff | dbt-labs__dbt-core-1129 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
deprecate sql_where
See also:
The `sql_where` config isn't actually useful and people shouldn't use it. We should:
- add a deprecation warning when `sql_where` is provided
- make `sql_where` no longer required
- revamp docs around incremental models (https://github.com/fishtown-analytics/dbt/issues/1036)
- make it easier/faster to do the "advanced" version of incremental models
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dbt/links.py`
Content:
```
1
2 SnowflakeQuotingDocs = 'https://docs.getdbt.com/v0.10/docs/configuring-quoting'
3
```
Path: `dbt/compilation.py`
Content:
```
1 import itertools
2 import os
3 import json
4 from collections import OrderedDict, defaultdict
5 import sqlparse
6
7 import dbt.utils
8 import dbt.include
9 import dbt.tracking
10
11 from dbt.utils import get_materialization, NodeType, is_type
12
13 from dbt.linker import Linker
14
15 import dbt.compat
16 import dbt.context.runtime
17 import dbt.contracts.project
18 import dbt.exceptions
19 import dbt.flags
20 import dbt.loader
21 import dbt.config
22 from dbt.contracts.graph.compiled import CompiledNode, CompiledGraph
23
24 from dbt.clients.system import write_json
25 from dbt.logger import GLOBAL_LOGGER as logger
26
27 graph_file_name = 'graph.gpickle'
28 manifest_file_name = 'manifest.json'
29
30
31 def print_compile_stats(stats):
32 names = {
33 NodeType.Model: 'models',
34 NodeType.Test: 'tests',
35 NodeType.Archive: 'archives',
36 NodeType.Analysis: 'analyses',
37 NodeType.Macro: 'macros',
38 NodeType.Operation: 'operations',
39 NodeType.Seed: 'seed files',
40 }
41
42 results = {k: 0 for k in names.keys()}
43 results.update(stats)
44
45 stat_line = ", ".join(
46 ["{} {}".format(ct, names.get(t)) for t, ct in results.items()])
47
48 logger.info("Found {}".format(stat_line))
49
50
51 def _add_prepended_cte(prepended_ctes, new_cte):
52 for dct in prepended_ctes:
53 if dct['id'] == new_cte['id']:
54 dct['sql'] = new_cte['sql']
55 return
56 prepended_ctes.append(new_cte)
57
58
59 def _extend_prepended_ctes(prepended_ctes, new_prepended_ctes):
60 for new_cte in new_prepended_ctes:
61 _add_prepended_cte(prepended_ctes, new_cte)
62
63
64 def prepend_ctes(model, manifest):
65 model, _, manifest = recursively_prepend_ctes(model, manifest)
66
67 return (model, manifest)
68
69
70 def recursively_prepend_ctes(model, manifest):
71 if model.extra_ctes_injected:
72 return (model, model.extra_ctes, manifest)
73
74 if dbt.flags.STRICT_MODE:
75 # ensure that all the nodes in this manifest are compiled
76 CompiledGraph(**manifest.to_flat_graph())
77
78 prepended_ctes = []
79
80 for cte in model.extra_ctes:
81 cte_id = cte['id']
82 cte_to_add = manifest.nodes.get(cte_id)
83 cte_to_add, new_prepended_ctes, manifest = recursively_prepend_ctes(
84 cte_to_add, manifest)
85
86 _extend_prepended_ctes(prepended_ctes, new_prepended_ctes)
87 new_cte_name = '__dbt__CTE__{}'.format(cte_to_add.get('name'))
88 sql = ' {} as (\n{}\n)'.format(new_cte_name, cte_to_add.compiled_sql)
89 _add_prepended_cte(prepended_ctes, {'id': cte_id, 'sql': sql})
90
91 model.prepend_ctes(prepended_ctes)
92
93 manifest.nodes[model.unique_id] = model
94
95 return (model, prepended_ctes, manifest)
96
97
98 class Compiler(object):
99 def __init__(self, config):
100 self.config = config
101
102 def initialize(self):
103 dbt.clients.system.make_directory(self.config.target_path)
104 dbt.clients.system.make_directory(self.config.modules_path)
105
106 def compile_node(self, node, manifest, extra_context=None):
107 if extra_context is None:
108 extra_context = {}
109
110 logger.debug("Compiling {}".format(node.get('unique_id')))
111
112 data = node.to_dict()
113 data.update({
114 'compiled': False,
115 'compiled_sql': None,
116 'extra_ctes_injected': False,
117 'extra_ctes': [],
118 'injected_sql': None,
119 })
120 compiled_node = CompiledNode(**data)
121
122 context = dbt.context.runtime.generate(
123 compiled_node, self.config, manifest)
124 context.update(extra_context)
125
126 compiled_node.compiled_sql = dbt.clients.jinja.get_rendered(
127 node.get('raw_sql'),
128 context,
129 node)
130
131 compiled_node.compiled = True
132
133 injected_node, _ = prepend_ctes(compiled_node, manifest)
134
135 should_wrap = {NodeType.Test, NodeType.Operation}
136 if injected_node.resource_type in should_wrap:
137 # data tests get wrapped in count(*)
138 # TODO : move this somewhere more reasonable
139 if 'data' in injected_node.tags and \
140 is_type(injected_node, NodeType.Test):
141 injected_node.wrapped_sql = (
142 "select count(*) from (\n{test_sql}\n) sbq").format(
143 test_sql=injected_node.injected_sql)
144 else:
145 # don't wrap schema tests or analyses.
146 injected_node.wrapped_sql = injected_node.injected_sql
147
148 elif is_type(injected_node, NodeType.Archive):
149 # unfortunately we do everything automagically for
150 # archives. in the future it'd be nice to generate
151 # the SQL at the parser level.
152 pass
153
154 elif(is_type(injected_node, NodeType.Model) and
155 get_materialization(injected_node) == 'ephemeral'):
156 pass
157
158 else:
159 injected_node.wrapped_sql = None
160
161 return injected_node
162
163 def write_manifest_file(self, manifest):
164 """Write the manifest file to disk.
165
166 manifest should be a Manifest.
167 """
168 filename = manifest_file_name
169 manifest_path = os.path.join(self.config.target_path, filename)
170 write_json(manifest_path, manifest.serialize())
171
172 def write_graph_file(self, linker, manifest):
173 filename = graph_file_name
174 graph_path = os.path.join(self.config.target_path, filename)
175 linker.write_graph(graph_path, manifest)
176
177 def link_node(self, linker, node, manifest):
178 linker.add_node(node.unique_id)
179
180 for dependency in node.depends_on_nodes:
181 if manifest.nodes.get(dependency):
182 linker.dependency(
183 node.unique_id,
184 (manifest.nodes.get(dependency).unique_id))
185
186 else:
187 dbt.exceptions.dependency_not_found(node, dependency)
188
189 def link_graph(self, linker, manifest):
190 for node in manifest.nodes.values():
191 self.link_node(linker, node, manifest)
192
193 cycle = linker.find_cycles()
194
195 if cycle:
196 raise RuntimeError("Found a cycle: {}".format(cycle))
197
198 def get_all_projects(self):
199 all_projects = {self.config.project_name: self.config}
200 dependency_projects = dbt.utils.dependency_projects(self.config)
201
202 for project_cfg in dependency_projects:
203 name = project_cfg.project_name
204 all_projects[name] = project_cfg
205
206 if dbt.flags.STRICT_MODE:
207 dbt.contracts.project.ProjectList(**all_projects)
208
209 return all_projects
210
211 def _check_resource_uniqueness(cls, manifest):
212 names_resources = {}
213 alias_resources = {}
214
215 for resource, node in manifest.nodes.items():
216 if node.resource_type not in NodeType.refable():
217 continue
218
219 name = node.name
220 alias = "{}.{}".format(node.schema, node.alias)
221
222 existing_node = names_resources.get(name)
223 if existing_node is not None:
224 dbt.exceptions.raise_duplicate_resource_name(
225 existing_node, node)
226
227 existing_alias = alias_resources.get(alias)
228 if existing_alias is not None:
229 dbt.exceptions.raise_ambiguous_alias(
230 existing_alias, node)
231
232 names_resources[name] = node
233 alias_resources[alias] = node
234
235 def compile(self):
236 linker = Linker()
237
238 all_projects = self.get_all_projects()
239
240 manifest = dbt.loader.GraphLoader.load_all(self.config, all_projects)
241
242 self.write_manifest_file(manifest)
243
244 self._check_resource_uniqueness(manifest)
245
246 resource_fqns = manifest.get_resource_fqns()
247 disabled_fqns = [n.fqn for n in manifest.disabled]
248 self.config.warn_for_unused_resource_config_paths(resource_fqns,
249 disabled_fqns)
250
251 self.link_graph(linker, manifest)
252
253 stats = defaultdict(int)
254
255 for node_name, node in itertools.chain(
256 manifest.nodes.items(),
257 manifest.macros.items()):
258 stats[node.resource_type] += 1
259
260 self.write_graph_file(linker, manifest)
261 print_compile_stats(stats)
262
263 return manifest, linker
264
```
Path: `dbt/deprecations.py`
Content:
```
1 from dbt.logger import GLOBAL_LOGGER as logger
2
3
4 class DBTDeprecation(object):
5 name = None
6 description = None
7
8 def show(self, *args, **kwargs):
9 if self.name not in active_deprecations:
10 desc = self.description.format(**kwargs)
11 logger.info("* Deprecation Warning: {}\n".format(desc))
12 active_deprecations.add(self.name)
13
14
15 class DBTRepositoriesDeprecation(DBTDeprecation):
16 name = "repositories"
17 description = """The dbt_project.yml configuration option 'repositories' is
18 deprecated. Please place dependencies in the `packages.yml` file instead.
19 The 'repositories' option will be removed in a future version of dbt.
20
21 For more information, see: https://docs.getdbt.com/docs/package-management
22
23 # Example packages.yml contents:
24
25 {recommendation}
26 """
27
28
29 class SeedDropExistingDeprecation(DBTDeprecation):
30 name = 'drop-existing'
31 description = """The --drop-existing argument to `dbt seed` has been
32 deprecated. Please use --full-refresh instead. The --drop-existing option
33 will be removed in a future version of dbt."""
34
35
36 def warn(name, *args, **kwargs):
37 if name not in deprecations:
38 # this should (hopefully) never happen
39 raise RuntimeError(
40 "Error showing deprecation warning: {}".format(name)
41 )
42
43 deprecations[name].show(*args, **kwargs)
44
45
46 # these are globally available
47 # since modules are only imported once, active_deprecations is a singleton
48
49 active_deprecations = set()
50
51 deprecations_list = [
52 DBTRepositoriesDeprecation(),
53 SeedDropExistingDeprecation()
54 ]
55
56 deprecations = {d.name: d for d in deprecations_list}
57
58
59 def reset_deprecations():
60 active_deprecations.clear()
61
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/dbt/compilation.py b/dbt/compilation.py
--- a/dbt/compilation.py
+++ b/dbt/compilation.py
@@ -232,6 +232,12 @@
names_resources[name] = node
alias_resources[alias] = node
+ def warn_for_deprecated_configs(self, manifest):
+ for unique_id, node in manifest.nodes.items():
+ is_model = node.resource_type == NodeType.Model
+ if is_model and 'sql_where' in node.config:
+ dbt.deprecations.warn('sql_where')
+
def compile(self):
linker = Linker()
@@ -247,6 +253,7 @@
disabled_fqns = [n.fqn for n in manifest.disabled]
self.config.warn_for_unused_resource_config_paths(resource_fqns,
disabled_fqns)
+ self.warn_for_deprecated_configs(manifest)
self.link_graph(linker, manifest)
diff --git a/dbt/deprecations.py b/dbt/deprecations.py
--- a/dbt/deprecations.py
+++ b/dbt/deprecations.py
@@ -1,4 +1,5 @@
from dbt.logger import GLOBAL_LOGGER as logger
+import dbt.links
class DBTDeprecation(object):
@@ -26,6 +27,16 @@
"""
+class SqlWhereDeprecation(DBTDeprecation):
+ name = "sql_where"
+ description = """\
+The `sql_where` option for incremental models is deprecated and will be
+ removed in a future release. Check the docs for more information
+
+ {}
+ """.format(dbt.links.IncrementalDocs)
+
+
class SeedDropExistingDeprecation(DBTDeprecation):
name = 'drop-existing'
description = """The --drop-existing argument to `dbt seed` has been
@@ -50,7 +61,8 @@
deprecations_list = [
DBTRepositoriesDeprecation(),
- SeedDropExistingDeprecation()
+ SeedDropExistingDeprecation(),
+ SqlWhereDeprecation(),
]
deprecations = {d.name: d for d in deprecations_list}
diff --git a/dbt/links.py b/dbt/links.py
--- a/dbt/links.py
+++ b/dbt/links.py
@@ -1,2 +1,3 @@
SnowflakeQuotingDocs = 'https://docs.getdbt.com/v0.10/docs/configuring-quoting'
+IncrementalDocs = 'https://docs.getdbt.com/docs/configuring-incremental-models'
| {"golden_diff": "diff --git a/dbt/compilation.py b/dbt/compilation.py\n--- a/dbt/compilation.py\n+++ b/dbt/compilation.py\n@@ -232,6 +232,12 @@\n names_resources[name] = node\n alias_resources[alias] = node\n \n+ def warn_for_deprecated_configs(self, manifest):\n+ for unique_id, node in manifest.nodes.items():\n+ is_model = node.resource_type == NodeType.Model\n+ if is_model and 'sql_where' in node.config:\n+ dbt.deprecations.warn('sql_where')\n+\n def compile(self):\n linker = Linker()\n \n@@ -247,6 +253,7 @@\n disabled_fqns = [n.fqn for n in manifest.disabled]\n self.config.warn_for_unused_resource_config_paths(resource_fqns,\n disabled_fqns)\n+ self.warn_for_deprecated_configs(manifest)\n \n self.link_graph(linker, manifest)\n \ndiff --git a/dbt/deprecations.py b/dbt/deprecations.py\n--- a/dbt/deprecations.py\n+++ b/dbt/deprecations.py\n@@ -1,4 +1,5 @@\n from dbt.logger import GLOBAL_LOGGER as logger\n+import dbt.links\n \n \n class DBTDeprecation(object):\n@@ -26,6 +27,16 @@\n \"\"\"\n \n \n+class SqlWhereDeprecation(DBTDeprecation):\n+ name = \"sql_where\"\n+ description = \"\"\"\\\n+The `sql_where` option for incremental models is deprecated and will be\n+ removed in a future release. Check the docs for more information\n+\n+ {}\n+ \"\"\".format(dbt.links.IncrementalDocs)\n+\n+\n class SeedDropExistingDeprecation(DBTDeprecation):\n name = 'drop-existing'\n description = \"\"\"The --drop-existing argument to `dbt seed` has been\n@@ -50,7 +61,8 @@\n \n deprecations_list = [\n DBTRepositoriesDeprecation(),\n- SeedDropExistingDeprecation()\n+ SeedDropExistingDeprecation(),\n+ SqlWhereDeprecation(),\n ]\n \n deprecations = {d.name: d for d in deprecations_list}\ndiff --git a/dbt/links.py b/dbt/links.py\n--- a/dbt/links.py\n+++ b/dbt/links.py\n@@ -1,2 +1,3 @@\n \n SnowflakeQuotingDocs = 'https://docs.getdbt.com/v0.10/docs/configuring-quoting'\n+IncrementalDocs = 'https://docs.getdbt.com/docs/configuring-incremental-models'\n", "issue": "deprecate sql_where\nSee also: \r\n\r\nThe `sql_where` config isn't actually useful and people shouldn't use it. We should:\r\n- add a deprecation warning when `sql_where` is provided\r\n- make `sql_where` no longer required\r\n- revamp docs around incremental models (https://github.com/fishtown-analytics/dbt/issues/1036)\r\n- make it easier/faster to do the \"advanced\" version of incremental models\n", "before_files": [{"content": "\nSnowflakeQuotingDocs = 'https://docs.getdbt.com/v0.10/docs/configuring-quoting'\n", "path": "dbt/links.py"}, {"content": "import itertools\nimport os\nimport json\nfrom collections import OrderedDict, defaultdict\nimport sqlparse\n\nimport dbt.utils\nimport dbt.include\nimport dbt.tracking\n\nfrom dbt.utils import get_materialization, NodeType, is_type\n\nfrom dbt.linker import Linker\n\nimport dbt.compat\nimport dbt.context.runtime\nimport dbt.contracts.project\nimport dbt.exceptions\nimport dbt.flags\nimport dbt.loader\nimport dbt.config\nfrom dbt.contracts.graph.compiled import CompiledNode, CompiledGraph\n\nfrom dbt.clients.system import write_json\nfrom dbt.logger import GLOBAL_LOGGER as logger\n\ngraph_file_name = 'graph.gpickle'\nmanifest_file_name = 'manifest.json'\n\n\ndef print_compile_stats(stats):\n names = {\n NodeType.Model: 'models',\n NodeType.Test: 'tests',\n NodeType.Archive: 'archives',\n NodeType.Analysis: 'analyses',\n NodeType.Macro: 'macros',\n NodeType.Operation: 'operations',\n NodeType.Seed: 'seed files',\n }\n\n results = {k: 0 for k in names.keys()}\n results.update(stats)\n\n stat_line = \", \".join(\n [\"{} {}\".format(ct, names.get(t)) for t, ct in results.items()])\n\n logger.info(\"Found {}\".format(stat_line))\n\n\ndef _add_prepended_cte(prepended_ctes, new_cte):\n for dct in prepended_ctes:\n if dct['id'] == new_cte['id']:\n dct['sql'] = new_cte['sql']\n return\n prepended_ctes.append(new_cte)\n\n\ndef _extend_prepended_ctes(prepended_ctes, new_prepended_ctes):\n for new_cte in new_prepended_ctes:\n _add_prepended_cte(prepended_ctes, new_cte)\n\n\ndef prepend_ctes(model, manifest):\n model, _, manifest = recursively_prepend_ctes(model, manifest)\n\n return (model, manifest)\n\n\ndef recursively_prepend_ctes(model, manifest):\n if model.extra_ctes_injected:\n return (model, model.extra_ctes, manifest)\n\n if dbt.flags.STRICT_MODE:\n # ensure that all the nodes in this manifest are compiled\n CompiledGraph(**manifest.to_flat_graph())\n\n prepended_ctes = []\n\n for cte in model.extra_ctes:\n cte_id = cte['id']\n cte_to_add = manifest.nodes.get(cte_id)\n cte_to_add, new_prepended_ctes, manifest = recursively_prepend_ctes(\n cte_to_add, manifest)\n\n _extend_prepended_ctes(prepended_ctes, new_prepended_ctes)\n new_cte_name = '__dbt__CTE__{}'.format(cte_to_add.get('name'))\n sql = ' {} as (\\n{}\\n)'.format(new_cte_name, cte_to_add.compiled_sql)\n _add_prepended_cte(prepended_ctes, {'id': cte_id, 'sql': sql})\n\n model.prepend_ctes(prepended_ctes)\n\n manifest.nodes[model.unique_id] = model\n\n return (model, prepended_ctes, manifest)\n\n\nclass Compiler(object):\n def __init__(self, config):\n self.config = config\n\n def initialize(self):\n dbt.clients.system.make_directory(self.config.target_path)\n dbt.clients.system.make_directory(self.config.modules_path)\n\n def compile_node(self, node, manifest, extra_context=None):\n if extra_context is None:\n extra_context = {}\n\n logger.debug(\"Compiling {}\".format(node.get('unique_id')))\n\n data = node.to_dict()\n data.update({\n 'compiled': False,\n 'compiled_sql': None,\n 'extra_ctes_injected': False,\n 'extra_ctes': [],\n 'injected_sql': None,\n })\n compiled_node = CompiledNode(**data)\n\n context = dbt.context.runtime.generate(\n compiled_node, self.config, manifest)\n context.update(extra_context)\n\n compiled_node.compiled_sql = dbt.clients.jinja.get_rendered(\n node.get('raw_sql'),\n context,\n node)\n\n compiled_node.compiled = True\n\n injected_node, _ = prepend_ctes(compiled_node, manifest)\n\n should_wrap = {NodeType.Test, NodeType.Operation}\n if injected_node.resource_type in should_wrap:\n # data tests get wrapped in count(*)\n # TODO : move this somewhere more reasonable\n if 'data' in injected_node.tags and \\\n is_type(injected_node, NodeType.Test):\n injected_node.wrapped_sql = (\n \"select count(*) from (\\n{test_sql}\\n) sbq\").format(\n test_sql=injected_node.injected_sql)\n else:\n # don't wrap schema tests or analyses.\n injected_node.wrapped_sql = injected_node.injected_sql\n\n elif is_type(injected_node, NodeType.Archive):\n # unfortunately we do everything automagically for\n # archives. in the future it'd be nice to generate\n # the SQL at the parser level.\n pass\n\n elif(is_type(injected_node, NodeType.Model) and\n get_materialization(injected_node) == 'ephemeral'):\n pass\n\n else:\n injected_node.wrapped_sql = None\n\n return injected_node\n\n def write_manifest_file(self, manifest):\n \"\"\"Write the manifest file to disk.\n\n manifest should be a Manifest.\n \"\"\"\n filename = manifest_file_name\n manifest_path = os.path.join(self.config.target_path, filename)\n write_json(manifest_path, manifest.serialize())\n\n def write_graph_file(self, linker, manifest):\n filename = graph_file_name\n graph_path = os.path.join(self.config.target_path, filename)\n linker.write_graph(graph_path, manifest)\n\n def link_node(self, linker, node, manifest):\n linker.add_node(node.unique_id)\n\n for dependency in node.depends_on_nodes:\n if manifest.nodes.get(dependency):\n linker.dependency(\n node.unique_id,\n (manifest.nodes.get(dependency).unique_id))\n\n else:\n dbt.exceptions.dependency_not_found(node, dependency)\n\n def link_graph(self, linker, manifest):\n for node in manifest.nodes.values():\n self.link_node(linker, node, manifest)\n\n cycle = linker.find_cycles()\n\n if cycle:\n raise RuntimeError(\"Found a cycle: {}\".format(cycle))\n\n def get_all_projects(self):\n all_projects = {self.config.project_name: self.config}\n dependency_projects = dbt.utils.dependency_projects(self.config)\n\n for project_cfg in dependency_projects:\n name = project_cfg.project_name\n all_projects[name] = project_cfg\n\n if dbt.flags.STRICT_MODE:\n dbt.contracts.project.ProjectList(**all_projects)\n\n return all_projects\n\n def _check_resource_uniqueness(cls, manifest):\n names_resources = {}\n alias_resources = {}\n\n for resource, node in manifest.nodes.items():\n if node.resource_type not in NodeType.refable():\n continue\n\n name = node.name\n alias = \"{}.{}\".format(node.schema, node.alias)\n\n existing_node = names_resources.get(name)\n if existing_node is not None:\n dbt.exceptions.raise_duplicate_resource_name(\n existing_node, node)\n\n existing_alias = alias_resources.get(alias)\n if existing_alias is not None:\n dbt.exceptions.raise_ambiguous_alias(\n existing_alias, node)\n\n names_resources[name] = node\n alias_resources[alias] = node\n\n def compile(self):\n linker = Linker()\n\n all_projects = self.get_all_projects()\n\n manifest = dbt.loader.GraphLoader.load_all(self.config, all_projects)\n\n self.write_manifest_file(manifest)\n\n self._check_resource_uniqueness(manifest)\n\n resource_fqns = manifest.get_resource_fqns()\n disabled_fqns = [n.fqn for n in manifest.disabled]\n self.config.warn_for_unused_resource_config_paths(resource_fqns,\n disabled_fqns)\n\n self.link_graph(linker, manifest)\n\n stats = defaultdict(int)\n\n for node_name, node in itertools.chain(\n manifest.nodes.items(),\n manifest.macros.items()):\n stats[node.resource_type] += 1\n\n self.write_graph_file(linker, manifest)\n print_compile_stats(stats)\n\n return manifest, linker\n", "path": "dbt/compilation.py"}, {"content": "from dbt.logger import GLOBAL_LOGGER as logger\n\n\nclass DBTDeprecation(object):\n name = None\n description = None\n\n def show(self, *args, **kwargs):\n if self.name not in active_deprecations:\n desc = self.description.format(**kwargs)\n logger.info(\"* Deprecation Warning: {}\\n\".format(desc))\n active_deprecations.add(self.name)\n\n\nclass DBTRepositoriesDeprecation(DBTDeprecation):\n name = \"repositories\"\n description = \"\"\"The dbt_project.yml configuration option 'repositories' is\n deprecated. Please place dependencies in the `packages.yml` file instead.\n The 'repositories' option will be removed in a future version of dbt.\n\n For more information, see: https://docs.getdbt.com/docs/package-management\n\n # Example packages.yml contents:\n\n{recommendation}\n \"\"\"\n\n\nclass SeedDropExistingDeprecation(DBTDeprecation):\n name = 'drop-existing'\n description = \"\"\"The --drop-existing argument to `dbt seed` has been\n deprecated. Please use --full-refresh instead. The --drop-existing option\n will be removed in a future version of dbt.\"\"\"\n\n\ndef warn(name, *args, **kwargs):\n if name not in deprecations:\n # this should (hopefully) never happen\n raise RuntimeError(\n \"Error showing deprecation warning: {}\".format(name)\n )\n\n deprecations[name].show(*args, **kwargs)\n\n\n# these are globally available\n# since modules are only imported once, active_deprecations is a singleton\n\nactive_deprecations = set()\n\ndeprecations_list = [\n DBTRepositoriesDeprecation(),\n SeedDropExistingDeprecation()\n]\n\ndeprecations = {d.name: d for d in deprecations_list}\n\n\ndef reset_deprecations():\n active_deprecations.clear()\n", "path": "dbt/deprecations.py"}], "after_files": [{"content": "\nSnowflakeQuotingDocs = 'https://docs.getdbt.com/v0.10/docs/configuring-quoting'\nIncrementalDocs = 'https://docs.getdbt.com/docs/configuring-incremental-models'\n", "path": "dbt/links.py"}, {"content": "import itertools\nimport os\nimport json\nfrom collections import OrderedDict, defaultdict\nimport sqlparse\n\nimport dbt.utils\nimport dbt.include\nimport dbt.tracking\n\nfrom dbt.utils import get_materialization, NodeType, is_type\n\nfrom dbt.linker import Linker\n\nimport dbt.compat\nimport dbt.context.runtime\nimport dbt.contracts.project\nimport dbt.exceptions\nimport dbt.flags\nimport dbt.loader\nimport dbt.config\nfrom dbt.contracts.graph.compiled import CompiledNode, CompiledGraph\n\nfrom dbt.clients.system import write_json\nfrom dbt.logger import GLOBAL_LOGGER as logger\n\ngraph_file_name = 'graph.gpickle'\nmanifest_file_name = 'manifest.json'\n\n\ndef print_compile_stats(stats):\n names = {\n NodeType.Model: 'models',\n NodeType.Test: 'tests',\n NodeType.Archive: 'archives',\n NodeType.Analysis: 'analyses',\n NodeType.Macro: 'macros',\n NodeType.Operation: 'operations',\n NodeType.Seed: 'seed files',\n }\n\n results = {k: 0 for k in names.keys()}\n results.update(stats)\n\n stat_line = \", \".join(\n [\"{} {}\".format(ct, names.get(t)) for t, ct in results.items()])\n\n logger.info(\"Found {}\".format(stat_line))\n\n\ndef _add_prepended_cte(prepended_ctes, new_cte):\n for dct in prepended_ctes:\n if dct['id'] == new_cte['id']:\n dct['sql'] = new_cte['sql']\n return\n prepended_ctes.append(new_cte)\n\n\ndef _extend_prepended_ctes(prepended_ctes, new_prepended_ctes):\n for new_cte in new_prepended_ctes:\n _add_prepended_cte(prepended_ctes, new_cte)\n\n\ndef prepend_ctes(model, manifest):\n model, _, manifest = recursively_prepend_ctes(model, manifest)\n\n return (model, manifest)\n\n\ndef recursively_prepend_ctes(model, manifest):\n if model.extra_ctes_injected:\n return (model, model.extra_ctes, manifest)\n\n if dbt.flags.STRICT_MODE:\n # ensure that all the nodes in this manifest are compiled\n CompiledGraph(**manifest.to_flat_graph())\n\n prepended_ctes = []\n\n for cte in model.extra_ctes:\n cte_id = cte['id']\n cte_to_add = manifest.nodes.get(cte_id)\n cte_to_add, new_prepended_ctes, manifest = recursively_prepend_ctes(\n cte_to_add, manifest)\n\n _extend_prepended_ctes(prepended_ctes, new_prepended_ctes)\n new_cte_name = '__dbt__CTE__{}'.format(cte_to_add.get('name'))\n sql = ' {} as (\\n{}\\n)'.format(new_cte_name, cte_to_add.compiled_sql)\n _add_prepended_cte(prepended_ctes, {'id': cte_id, 'sql': sql})\n\n model.prepend_ctes(prepended_ctes)\n\n manifest.nodes[model.unique_id] = model\n\n return (model, prepended_ctes, manifest)\n\n\nclass Compiler(object):\n def __init__(self, config):\n self.config = config\n\n def initialize(self):\n dbt.clients.system.make_directory(self.config.target_path)\n dbt.clients.system.make_directory(self.config.modules_path)\n\n def compile_node(self, node, manifest, extra_context=None):\n if extra_context is None:\n extra_context = {}\n\n logger.debug(\"Compiling {}\".format(node.get('unique_id')))\n\n data = node.to_dict()\n data.update({\n 'compiled': False,\n 'compiled_sql': None,\n 'extra_ctes_injected': False,\n 'extra_ctes': [],\n 'injected_sql': None,\n })\n compiled_node = CompiledNode(**data)\n\n context = dbt.context.runtime.generate(\n compiled_node, self.config, manifest)\n context.update(extra_context)\n\n compiled_node.compiled_sql = dbt.clients.jinja.get_rendered(\n node.get('raw_sql'),\n context,\n node)\n\n compiled_node.compiled = True\n\n injected_node, _ = prepend_ctes(compiled_node, manifest)\n\n should_wrap = {NodeType.Test, NodeType.Operation}\n if injected_node.resource_type in should_wrap:\n # data tests get wrapped in count(*)\n # TODO : move this somewhere more reasonable\n if 'data' in injected_node.tags and \\\n is_type(injected_node, NodeType.Test):\n injected_node.wrapped_sql = (\n \"select count(*) from (\\n{test_sql}\\n) sbq\").format(\n test_sql=injected_node.injected_sql)\n else:\n # don't wrap schema tests or analyses.\n injected_node.wrapped_sql = injected_node.injected_sql\n\n elif is_type(injected_node, NodeType.Archive):\n # unfortunately we do everything automagically for\n # archives. in the future it'd be nice to generate\n # the SQL at the parser level.\n pass\n\n elif(is_type(injected_node, NodeType.Model) and\n get_materialization(injected_node) == 'ephemeral'):\n pass\n\n else:\n injected_node.wrapped_sql = None\n\n return injected_node\n\n def write_manifest_file(self, manifest):\n \"\"\"Write the manifest file to disk.\n\n manifest should be a Manifest.\n \"\"\"\n filename = manifest_file_name\n manifest_path = os.path.join(self.config.target_path, filename)\n write_json(manifest_path, manifest.serialize())\n\n def write_graph_file(self, linker, manifest):\n filename = graph_file_name\n graph_path = os.path.join(self.config.target_path, filename)\n linker.write_graph(graph_path, manifest)\n\n def link_node(self, linker, node, manifest):\n linker.add_node(node.unique_id)\n\n for dependency in node.depends_on_nodes:\n if manifest.nodes.get(dependency):\n linker.dependency(\n node.unique_id,\n (manifest.nodes.get(dependency).unique_id))\n\n else:\n dbt.exceptions.dependency_not_found(node, dependency)\n\n def link_graph(self, linker, manifest):\n for node in manifest.nodes.values():\n self.link_node(linker, node, manifest)\n\n cycle = linker.find_cycles()\n\n if cycle:\n raise RuntimeError(\"Found a cycle: {}\".format(cycle))\n\n def get_all_projects(self):\n all_projects = {self.config.project_name: self.config}\n dependency_projects = dbt.utils.dependency_projects(self.config)\n\n for project_cfg in dependency_projects:\n name = project_cfg.project_name\n all_projects[name] = project_cfg\n\n if dbt.flags.STRICT_MODE:\n dbt.contracts.project.ProjectList(**all_projects)\n\n return all_projects\n\n def _check_resource_uniqueness(cls, manifest):\n names_resources = {}\n alias_resources = {}\n\n for resource, node in manifest.nodes.items():\n if node.resource_type not in NodeType.refable():\n continue\n\n name = node.name\n alias = \"{}.{}\".format(node.schema, node.alias)\n\n existing_node = names_resources.get(name)\n if existing_node is not None:\n dbt.exceptions.raise_duplicate_resource_name(\n existing_node, node)\n\n existing_alias = alias_resources.get(alias)\n if existing_alias is not None:\n dbt.exceptions.raise_ambiguous_alias(\n existing_alias, node)\n\n names_resources[name] = node\n alias_resources[alias] = node\n\n def warn_for_deprecated_configs(self, manifest):\n for unique_id, node in manifest.nodes.items():\n is_model = node.resource_type == NodeType.Model\n if is_model and 'sql_where' in node.config:\n dbt.deprecations.warn('sql_where')\n\n def compile(self):\n linker = Linker()\n\n all_projects = self.get_all_projects()\n\n manifest = dbt.loader.GraphLoader.load_all(self.config, all_projects)\n\n self.write_manifest_file(manifest)\n\n self._check_resource_uniqueness(manifest)\n\n resource_fqns = manifest.get_resource_fqns()\n disabled_fqns = [n.fqn for n in manifest.disabled]\n self.config.warn_for_unused_resource_config_paths(resource_fqns,\n disabled_fqns)\n self.warn_for_deprecated_configs(manifest)\n\n self.link_graph(linker, manifest)\n\n stats = defaultdict(int)\n\n for node_name, node in itertools.chain(\n manifest.nodes.items(),\n manifest.macros.items()):\n stats[node.resource_type] += 1\n\n self.write_graph_file(linker, manifest)\n print_compile_stats(stats)\n\n return manifest, linker\n", "path": "dbt/compilation.py"}, {"content": "from dbt.logger import GLOBAL_LOGGER as logger\nimport dbt.links\n\n\nclass DBTDeprecation(object):\n name = None\n description = None\n\n def show(self, *args, **kwargs):\n if self.name not in active_deprecations:\n desc = self.description.format(**kwargs)\n logger.info(\"* Deprecation Warning: {}\\n\".format(desc))\n active_deprecations.add(self.name)\n\n\nclass DBTRepositoriesDeprecation(DBTDeprecation):\n name = \"repositories\"\n description = \"\"\"The dbt_project.yml configuration option 'repositories' is\n deprecated. Please place dependencies in the `packages.yml` file instead.\n The 'repositories' option will be removed in a future version of dbt.\n\n For more information, see: https://docs.getdbt.com/docs/package-management\n\n # Example packages.yml contents:\n\n{recommendation}\n \"\"\"\n\n\nclass SqlWhereDeprecation(DBTDeprecation):\n name = \"sql_where\"\n description = \"\"\"\\\nThe `sql_where` option for incremental models is deprecated and will be\n removed in a future release. Check the docs for more information\n\n {}\n \"\"\".format(dbt.links.IncrementalDocs)\n\n\nclass SeedDropExistingDeprecation(DBTDeprecation):\n name = 'drop-existing'\n description = \"\"\"The --drop-existing argument to `dbt seed` has been\n deprecated. Please use --full-refresh instead. The --drop-existing option\n will be removed in a future version of dbt.\"\"\"\n\n\ndef warn(name, *args, **kwargs):\n if name not in deprecations:\n # this should (hopefully) never happen\n raise RuntimeError(\n \"Error showing deprecation warning: {}\".format(name)\n )\n\n deprecations[name].show(*args, **kwargs)\n\n\n# these are globally available\n# since modules are only imported once, active_deprecations is a singleton\n\nactive_deprecations = set()\n\ndeprecations_list = [\n DBTRepositoriesDeprecation(),\n SeedDropExistingDeprecation(),\n SqlWhereDeprecation(),\n]\n\ndeprecations = {d.name: d for d in deprecations_list}\n\n\ndef reset_deprecations():\n active_deprecations.clear()\n", "path": "dbt/deprecations.py"}]} | 3,439 | 578 |
gh_patches_debug_42248 | rasdani/github-patches | git_diff | bridgecrewio__checkov-2168 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CKV_GCP_26 is not working as expected when using terraform plan output.
**Describe the bug**
CKV_GCP_26 check is not providing expected output when it is ran against terraform output json file. Check is gives the result as PASSED even if we don't have the flow logs enabled for the subnet. However if we run it against the directory where we have the .tf files then the check returns the FAILED status which is expected.
checkov version used: `2.0.236`
Terraform version used: `1.0.1`
**To Reproduce**
1. Create a sample subnet resource in a network.tf file.
```
resource "google_compute_network" "vpc" {
name = "test-vpc"
auto_create_subnetworks = "false"
routing_mode = "REGIONAL"
}
resource "google_compute_subnetwork" "private-subnet" {
name = "test-sub"
ip_cidr_range = "10.1.0.0/16"
network = google_compute_network.vpc.self_link
private_ip_google_access = true
}
```
2. terraform plan -out tf.plan
3. terraform show -json tf.plan > tf.json
4. checkov -f tf.json
5. Now run checkov -f network.tf
Both the commands will give conflicting results
```
Check: CKV_GCP_26: "Ensure that VPC Flow Logs is enabled for every subnet in a VPC Network"
PASSED for resource: google_compute_subnetwork.private-subnet
File: /tf/tfplan.json:0-0
Guide: https://docs.bridgecrew.io/docs/bc_gcp_logging_1
```
```
Check: CKV_GCP_26: "Ensure that VPC Flow Logs is enabled for every subnet in a VPC Network"
FAILED for resource: google_compute_subnetwork.private-subnet
File: /network.tf:6-11
Guide: https://docs.bridgecrew.io/docs/bc_gcp_logging_1
6 | resource "google_compute_subnetwork" "private-subnet" {
7 | name = var.private_subnet_name
8 | ip_cidr_range = var.private_subnet_cidr
9 | network = google_compute_network.vpc.self_link
10 | private_ip_google_access = true
11 | }
```
**Expected behavior**
The check should return the status as FAILED when running it against terraform plan output json
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `checkov/terraform/checks/resource/azure/AKSApiServerAuthorizedIpRanges.py`
Content:
```
1 from checkov.common.models.enums import CheckCategories
2 from checkov.common.models.consts import ANY_VALUE
3 from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck
4
5
6 class AKSApiServerAuthorizedIpRanges(BaseResourceValueCheck):
7 def __init__(self):
8 name = "Ensure AKS has an API Server Authorized IP Ranges enabled"
9 id = "CKV_AZURE_6"
10 supported_resources = ['azurerm_kubernetes_cluster']
11 categories = [CheckCategories.KUBERNETES]
12 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
13
14 def get_inspected_key(self):
15 return 'api_server_authorized_ip_ranges/[0]'
16
17 def get_expected_value(self):
18 return ANY_VALUE
19
20
21 check = AKSApiServerAuthorizedIpRanges()
22
```
Path: `checkov/terraform/checks/resource/base_resource_value_check.py`
Content:
```
1 from abc import abstractmethod
2 from collections.abc import Iterable
3 from typing import List, Dict, Any
4
5 import dpath.util
6 import re
7 from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
8 from checkov.common.models.enums import CheckResult, CheckCategories
9 from checkov.common.models.consts import ANY_VALUE
10 from checkov.common.util.type_forcers import force_list
11 from checkov.terraform.graph_builder.utils import get_referenced_vertices_in_value
12 from checkov.terraform.parser_functions import handle_dynamic_values
13 from checkov.terraform.parser_utils import find_var_blocks
14
15
16
17 class BaseResourceValueCheck(BaseResourceCheck):
18 def __init__(
19 self,
20 name: str,
21 id: str,
22 categories: "Iterable[CheckCategories]",
23 supported_resources: "Iterable[str]",
24 missing_block_result: CheckResult = CheckResult.FAILED,
25 ) -> None:
26 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
27 self.missing_block_result = missing_block_result
28
29 @staticmethod
30 def _filter_key_path(path: str) -> List[str]:
31 """
32 Filter an attribute path to contain only named attributes by dropping array indices from the path)
33 :param path: valid JSONPath of an attribute
34 :return: List of named attributes with respect to the input JSONPath order
35 """
36 return [x for x in path.split("/") if not re.search(re.compile(r"^\[?\d+]?$"), x)]
37
38 @staticmethod
39 def _is_variable_dependant(value: Any) -> bool:
40 if not isinstance(value, str):
41 return False
42 if "${" not in value:
43 return False
44
45 if find_var_blocks(value):
46 return True
47 return False
48
49 @staticmethod
50 def _is_nesting_key(inspected_attributes: List[str], key: List[str]) -> bool:
51 """
52 Resolves whether a key is a subset of the inspected nesting attributes
53 :param inspected_attributes: list of nesting attributes
54 :param key: JSONPath key of an attribute
55 :return: True/False
56 """
57 return any(x in key for x in inspected_attributes)
58
59 def scan_resource_conf(self, conf: Dict[str, List[Any]]) -> CheckResult:
60 handle_dynamic_values(conf)
61 inspected_key = self.get_inspected_key()
62 expected_values = self.get_expected_values()
63 if dpath.search(conf, inspected_key) != {}:
64 # Inspected key exists
65 value = dpath.get(conf, inspected_key)
66 if isinstance(value, list) and len(value) == 1:
67 value = value[0]
68 if value is None:
69 return self.missing_block_result
70 if ANY_VALUE in expected_values and value is not None and (not isinstance(value, str) or value):
71 # Key is found on the configuration - if it accepts any value, the check is PASSED
72 return CheckResult.PASSED
73 if self._is_variable_dependant(value):
74 # If the tested attribute is variable-dependant, then result is PASSED
75 return CheckResult.PASSED
76 if value in expected_values:
77 return CheckResult.PASSED
78 if get_referenced_vertices_in_value(value=value, aliases={}, resources_types=[]):
79 # we don't provide resources_types as we want to stay provider agnostic
80 return CheckResult.UNKNOWN
81 return CheckResult.FAILED
82 else:
83 # Look for the configuration in a bottom-up fashion
84 inspected_attributes = self._filter_key_path(inspected_key)
85 for attribute in reversed(inspected_attributes):
86 for sub_key, sub_conf in dpath.search(conf, f"**/{attribute}", yielded=True):
87 filtered_sub_key = self._filter_key_path(sub_key)
88 # Only proceed with check if full path for key is similar - not partial match
89 if inspected_attributes == filtered_sub_key:
90 if self._is_nesting_key(inspected_attributes, filtered_sub_key):
91 if isinstance(sub_conf, list) and len(sub_conf) == 1:
92 sub_conf = sub_conf[0]
93 if sub_conf in self.get_expected_values():
94 return CheckResult.PASSED
95 if self._is_variable_dependant(sub_conf):
96 # If the tested attribute is variable-dependant, then result is PASSED
97 return CheckResult.PASSED
98
99 return self.missing_block_result
100
101 @abstractmethod
102 def get_inspected_key(self) -> str:
103 """
104 :return: JSONPath syntax path of the checked attribute
105 """
106 raise NotImplementedError()
107
108 def get_expected_values(self) -> List[Any]:
109 """
110 Override the method with the list of acceptable values if the check has more than one possible expected value, given
111 the inspected key
112 :return: List of expected values, defaults to a list of the expected value
113 """
114 return [self.get_expected_value()]
115
116 def get_expected_value(self) -> Any:
117 """
118 Returns the default expected value, governed by provider best practices
119 """
120 return True
121
122 def get_evaluated_keys(self) -> List[str]:
123 return force_list(self.get_inspected_key())
124
```
Path: `checkov/terraform/checks/resource/gcp/GoogleSubnetworkLoggingEnabled.py`
Content:
```
1 from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck
2 from checkov.common.models.enums import CheckCategories
3 from checkov.common.models.consts import ANY_VALUE
4
5
6 class GoogleSubnetworkLoggingEnabled(BaseResourceValueCheck):
7 def __init__(self):
8 name = "Ensure that VPC Flow Logs is enabled for every subnet in a VPC Network"
9 id = "CKV_GCP_26"
10 supported_resources = ['google_compute_subnetwork']
11 categories = [CheckCategories.LOGGING]
12 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
13
14 def get_inspected_key(self):
15 return 'log_config'
16
17 def get_expected_values(self):
18 return [ANY_VALUE]
19
20
21 check = GoogleSubnetworkLoggingEnabled()
22
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/checkov/terraform/checks/resource/azure/AKSApiServerAuthorizedIpRanges.py b/checkov/terraform/checks/resource/azure/AKSApiServerAuthorizedIpRanges.py
--- a/checkov/terraform/checks/resource/azure/AKSApiServerAuthorizedIpRanges.py
+++ b/checkov/terraform/checks/resource/azure/AKSApiServerAuthorizedIpRanges.py
@@ -1,21 +1,23 @@
-from checkov.common.models.enums import CheckCategories
+from typing import List, Any
+
from checkov.common.models.consts import ANY_VALUE
+from checkov.common.models.enums import CheckCategories
from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck
class AKSApiServerAuthorizedIpRanges(BaseResourceValueCheck):
- def __init__(self):
+ def __init__(self) -> None:
name = "Ensure AKS has an API Server Authorized IP Ranges enabled"
id = "CKV_AZURE_6"
- supported_resources = ['azurerm_kubernetes_cluster']
- categories = [CheckCategories.KUBERNETES]
+ supported_resources = ("azurerm_kubernetes_cluster",)
+ categories = (CheckCategories.KUBERNETES,)
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
- def get_inspected_key(self):
- return 'api_server_authorized_ip_ranges/[0]'
+ def get_inspected_key(self) -> str:
+ return "api_server_authorized_ip_ranges/[0]"
- def get_expected_value(self):
- return ANY_VALUE
+ def get_expected_values(self) -> List[Any]:
+ return [ANY_VALUE]
check = AKSApiServerAuthorizedIpRanges()
diff --git a/checkov/terraform/checks/resource/base_resource_value_check.py b/checkov/terraform/checks/resource/base_resource_value_check.py
--- a/checkov/terraform/checks/resource/base_resource_value_check.py
+++ b/checkov/terraform/checks/resource/base_resource_value_check.py
@@ -65,7 +65,7 @@
value = dpath.get(conf, inspected_key)
if isinstance(value, list) and len(value) == 1:
value = value[0]
- if value is None:
+ if value is None or (isinstance(value, list) and not value):
return self.missing_block_result
if ANY_VALUE in expected_values and value is not None and (not isinstance(value, str) or value):
# Key is found on the configuration - if it accepts any value, the check is PASSED
diff --git a/checkov/terraform/checks/resource/gcp/GoogleSubnetworkLoggingEnabled.py b/checkov/terraform/checks/resource/gcp/GoogleSubnetworkLoggingEnabled.py
--- a/checkov/terraform/checks/resource/gcp/GoogleSubnetworkLoggingEnabled.py
+++ b/checkov/terraform/checks/resource/gcp/GoogleSubnetworkLoggingEnabled.py
@@ -1,20 +1,30 @@
-from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck
-from checkov.common.models.enums import CheckCategories
+from typing import Any, List, Dict
+
from checkov.common.models.consts import ANY_VALUE
+from checkov.common.models.enums import CheckCategories, CheckResult
+from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck
class GoogleSubnetworkLoggingEnabled(BaseResourceValueCheck):
- def __init__(self):
+ def __init__(self) -> None:
name = "Ensure that VPC Flow Logs is enabled for every subnet in a VPC Network"
id = "CKV_GCP_26"
- supported_resources = ['google_compute_subnetwork']
- categories = [CheckCategories.LOGGING]
+ supported_resources = ("google_compute_subnetwork",)
+ categories = (CheckCategories.LOGGING,)
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
- def get_inspected_key(self):
- return 'log_config'
+ def scan_resource_conf(self, conf: Dict[str, List[Any]]) -> CheckResult:
+ # flow logs can't be enabled for `INTERNAL_HTTPS_LOAD_BALANCER` subnetworks
+ purpose = conf.get("purpose")
+ if purpose and purpose[0] == "INTERNAL_HTTPS_LOAD_BALANCER":
+ return CheckResult.UNKNOWN
+
+ return super().scan_resource_conf(conf)
+
+ def get_inspected_key(self) -> str:
+ return "log_config"
- def get_expected_values(self):
+ def get_expected_values(self) -> List[Any]:
return [ANY_VALUE]
| {"golden_diff": "diff --git a/checkov/terraform/checks/resource/azure/AKSApiServerAuthorizedIpRanges.py b/checkov/terraform/checks/resource/azure/AKSApiServerAuthorizedIpRanges.py\n--- a/checkov/terraform/checks/resource/azure/AKSApiServerAuthorizedIpRanges.py\n+++ b/checkov/terraform/checks/resource/azure/AKSApiServerAuthorizedIpRanges.py\n@@ -1,21 +1,23 @@\n-from checkov.common.models.enums import CheckCategories\n+from typing import List, Any\n+\n from checkov.common.models.consts import ANY_VALUE\n+from checkov.common.models.enums import CheckCategories\n from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\n \n \n class AKSApiServerAuthorizedIpRanges(BaseResourceValueCheck):\n- def __init__(self):\n+ def __init__(self) -> None:\n name = \"Ensure AKS has an API Server Authorized IP Ranges enabled\"\n id = \"CKV_AZURE_6\"\n- supported_resources = ['azurerm_kubernetes_cluster']\n- categories = [CheckCategories.KUBERNETES]\n+ supported_resources = (\"azurerm_kubernetes_cluster\",)\n+ categories = (CheckCategories.KUBERNETES,)\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n \n- def get_inspected_key(self):\n- return 'api_server_authorized_ip_ranges/[0]'\n+ def get_inspected_key(self) -> str:\n+ return \"api_server_authorized_ip_ranges/[0]\"\n \n- def get_expected_value(self):\n- return ANY_VALUE\n+ def get_expected_values(self) -> List[Any]:\n+ return [ANY_VALUE]\n \n \n check = AKSApiServerAuthorizedIpRanges()\ndiff --git a/checkov/terraform/checks/resource/base_resource_value_check.py b/checkov/terraform/checks/resource/base_resource_value_check.py\n--- a/checkov/terraform/checks/resource/base_resource_value_check.py\n+++ b/checkov/terraform/checks/resource/base_resource_value_check.py\n@@ -65,7 +65,7 @@\n value = dpath.get(conf, inspected_key)\n if isinstance(value, list) and len(value) == 1:\n value = value[0]\n- if value is None:\n+ if value is None or (isinstance(value, list) and not value):\n return self.missing_block_result\n if ANY_VALUE in expected_values and value is not None and (not isinstance(value, str) or value):\n # Key is found on the configuration - if it accepts any value, the check is PASSED\ndiff --git a/checkov/terraform/checks/resource/gcp/GoogleSubnetworkLoggingEnabled.py b/checkov/terraform/checks/resource/gcp/GoogleSubnetworkLoggingEnabled.py\n--- a/checkov/terraform/checks/resource/gcp/GoogleSubnetworkLoggingEnabled.py\n+++ b/checkov/terraform/checks/resource/gcp/GoogleSubnetworkLoggingEnabled.py\n@@ -1,20 +1,30 @@\n-from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\n-from checkov.common.models.enums import CheckCategories\n+from typing import Any, List, Dict\n+\n from checkov.common.models.consts import ANY_VALUE\n+from checkov.common.models.enums import CheckCategories, CheckResult\n+from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\n \n \n class GoogleSubnetworkLoggingEnabled(BaseResourceValueCheck):\n- def __init__(self):\n+ def __init__(self) -> None:\n name = \"Ensure that VPC Flow Logs is enabled for every subnet in a VPC Network\"\n id = \"CKV_GCP_26\"\n- supported_resources = ['google_compute_subnetwork']\n- categories = [CheckCategories.LOGGING]\n+ supported_resources = (\"google_compute_subnetwork\",)\n+ categories = (CheckCategories.LOGGING,)\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n \n- def get_inspected_key(self):\n- return 'log_config'\n+ def scan_resource_conf(self, conf: Dict[str, List[Any]]) -> CheckResult:\n+ # flow logs can't be enabled for `INTERNAL_HTTPS_LOAD_BALANCER` subnetworks\n+ purpose = conf.get(\"purpose\")\n+ if purpose and purpose[0] == \"INTERNAL_HTTPS_LOAD_BALANCER\":\n+ return CheckResult.UNKNOWN\n+\n+ return super().scan_resource_conf(conf)\n+\n+ def get_inspected_key(self) -> str:\n+ return \"log_config\"\n \n- def get_expected_values(self):\n+ def get_expected_values(self) -> List[Any]:\n return [ANY_VALUE]\n", "issue": "CKV_GCP_26 is not working as expected when using terraform plan output.\n**Describe the bug**\r\nCKV_GCP_26 check is not providing expected output when it is ran against terraform output json file. Check is gives the result as PASSED even if we don't have the flow logs enabled for the subnet. However if we run it against the directory where we have the .tf files then the check returns the FAILED status which is expected. \r\n\r\ncheckov version used: `2.0.236`\r\nTerraform version used: `1.0.1`\r\n\r\n**To Reproduce**\r\n1. Create a sample subnet resource in a network.tf file. \r\n```\r\nresource \"google_compute_network\" \"vpc\" {\r\n name = \"test-vpc\"\r\n auto_create_subnetworks = \"false\"\r\n routing_mode = \"REGIONAL\"\r\n}\r\n\r\nresource \"google_compute_subnetwork\" \"private-subnet\" {\r\n name = \"test-sub\"\r\n ip_cidr_range = \"10.1.0.0/16\"\r\n network = google_compute_network.vpc.self_link\r\n private_ip_google_access = true\r\n}\r\n```\r\n2. terraform plan -out tf.plan\r\n3. terraform show -json tf.plan > tf.json \r\n4. checkov -f tf.json\r\n5. Now run checkov -f network.tf\r\n\r\nBoth the commands will give conflicting results\r\n\r\n```\r\nCheck: CKV_GCP_26: \"Ensure that VPC Flow Logs is enabled for every subnet in a VPC Network\"\r\n PASSED for resource: google_compute_subnetwork.private-subnet\r\n File: /tf/tfplan.json:0-0\r\n Guide: https://docs.bridgecrew.io/docs/bc_gcp_logging_1\r\n```\r\n```\r\nCheck: CKV_GCP_26: \"Ensure that VPC Flow Logs is enabled for every subnet in a VPC Network\"\r\n FAILED for resource: google_compute_subnetwork.private-subnet\r\n File: /network.tf:6-11\r\n Guide: https://docs.bridgecrew.io/docs/bc_gcp_logging_1\r\n\r\n 6 | resource \"google_compute_subnetwork\" \"private-subnet\" {\r\n 7 | name = var.private_subnet_name\r\n 8 | ip_cidr_range = var.private_subnet_cidr\r\n 9 | network = google_compute_network.vpc.self_link\r\n 10 | private_ip_google_access = true\r\n 11 | }\r\n```\r\n**Expected behavior**\r\nThe check should return the status as FAILED when running it against terraform plan output json\r\n\n", "before_files": [{"content": "from checkov.common.models.enums import CheckCategories\nfrom checkov.common.models.consts import ANY_VALUE\nfrom checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\n\n\nclass AKSApiServerAuthorizedIpRanges(BaseResourceValueCheck):\n def __init__(self):\n name = \"Ensure AKS has an API Server Authorized IP Ranges enabled\"\n id = \"CKV_AZURE_6\"\n supported_resources = ['azurerm_kubernetes_cluster']\n categories = [CheckCategories.KUBERNETES]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def get_inspected_key(self):\n return 'api_server_authorized_ip_ranges/[0]'\n\n def get_expected_value(self):\n return ANY_VALUE\n\n\ncheck = AKSApiServerAuthorizedIpRanges()\n", "path": "checkov/terraform/checks/resource/azure/AKSApiServerAuthorizedIpRanges.py"}, {"content": "from abc import abstractmethod\nfrom collections.abc import Iterable\nfrom typing import List, Dict, Any\n\nimport dpath.util\nimport re\nfrom checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\nfrom checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.common.models.consts import ANY_VALUE\nfrom checkov.common.util.type_forcers import force_list\nfrom checkov.terraform.graph_builder.utils import get_referenced_vertices_in_value\nfrom checkov.terraform.parser_functions import handle_dynamic_values\nfrom checkov.terraform.parser_utils import find_var_blocks\n\n\n\nclass BaseResourceValueCheck(BaseResourceCheck):\n def __init__(\n self,\n name: str,\n id: str,\n categories: \"Iterable[CheckCategories]\",\n supported_resources: \"Iterable[str]\",\n missing_block_result: CheckResult = CheckResult.FAILED,\n ) -> None:\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n self.missing_block_result = missing_block_result\n\n @staticmethod\n def _filter_key_path(path: str) -> List[str]:\n \"\"\"\n Filter an attribute path to contain only named attributes by dropping array indices from the path)\n :param path: valid JSONPath of an attribute\n :return: List of named attributes with respect to the input JSONPath order\n \"\"\"\n return [x for x in path.split(\"/\") if not re.search(re.compile(r\"^\\[?\\d+]?$\"), x)]\n\n @staticmethod\n def _is_variable_dependant(value: Any) -> bool:\n if not isinstance(value, str):\n return False\n if \"${\" not in value:\n return False\n\n if find_var_blocks(value):\n return True\n return False\n\n @staticmethod\n def _is_nesting_key(inspected_attributes: List[str], key: List[str]) -> bool:\n \"\"\"\n Resolves whether a key is a subset of the inspected nesting attributes\n :param inspected_attributes: list of nesting attributes\n :param key: JSONPath key of an attribute\n :return: True/False\n \"\"\"\n return any(x in key for x in inspected_attributes)\n\n def scan_resource_conf(self, conf: Dict[str, List[Any]]) -> CheckResult:\n handle_dynamic_values(conf)\n inspected_key = self.get_inspected_key()\n expected_values = self.get_expected_values()\n if dpath.search(conf, inspected_key) != {}:\n # Inspected key exists\n value = dpath.get(conf, inspected_key)\n if isinstance(value, list) and len(value) == 1:\n value = value[0]\n if value is None:\n return self.missing_block_result\n if ANY_VALUE in expected_values and value is not None and (not isinstance(value, str) or value):\n # Key is found on the configuration - if it accepts any value, the check is PASSED\n return CheckResult.PASSED\n if self._is_variable_dependant(value):\n # If the tested attribute is variable-dependant, then result is PASSED\n return CheckResult.PASSED\n if value in expected_values:\n return CheckResult.PASSED\n if get_referenced_vertices_in_value(value=value, aliases={}, resources_types=[]):\n # we don't provide resources_types as we want to stay provider agnostic\n return CheckResult.UNKNOWN\n return CheckResult.FAILED\n else:\n # Look for the configuration in a bottom-up fashion\n inspected_attributes = self._filter_key_path(inspected_key)\n for attribute in reversed(inspected_attributes):\n for sub_key, sub_conf in dpath.search(conf, f\"**/{attribute}\", yielded=True):\n filtered_sub_key = self._filter_key_path(sub_key)\n # Only proceed with check if full path for key is similar - not partial match\n if inspected_attributes == filtered_sub_key:\n if self._is_nesting_key(inspected_attributes, filtered_sub_key):\n if isinstance(sub_conf, list) and len(sub_conf) == 1:\n sub_conf = sub_conf[0]\n if sub_conf in self.get_expected_values():\n return CheckResult.PASSED\n if self._is_variable_dependant(sub_conf):\n # If the tested attribute is variable-dependant, then result is PASSED\n return CheckResult.PASSED\n\n return self.missing_block_result\n\n @abstractmethod\n def get_inspected_key(self) -> str:\n \"\"\"\n :return: JSONPath syntax path of the checked attribute\n \"\"\"\n raise NotImplementedError()\n\n def get_expected_values(self) -> List[Any]:\n \"\"\"\n Override the method with the list of acceptable values if the check has more than one possible expected value, given\n the inspected key\n :return: List of expected values, defaults to a list of the expected value\n \"\"\"\n return [self.get_expected_value()]\n\n def get_expected_value(self) -> Any:\n \"\"\"\n Returns the default expected value, governed by provider best practices\n \"\"\"\n return True\n\n def get_evaluated_keys(self) -> List[str]:\n return force_list(self.get_inspected_key())\n", "path": "checkov/terraform/checks/resource/base_resource_value_check.py"}, {"content": "from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\nfrom checkov.common.models.enums import CheckCategories\nfrom checkov.common.models.consts import ANY_VALUE\n\n\nclass GoogleSubnetworkLoggingEnabled(BaseResourceValueCheck):\n def __init__(self):\n name = \"Ensure that VPC Flow Logs is enabled for every subnet in a VPC Network\"\n id = \"CKV_GCP_26\"\n supported_resources = ['google_compute_subnetwork']\n categories = [CheckCategories.LOGGING]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def get_inspected_key(self):\n return 'log_config'\n\n def get_expected_values(self):\n return [ANY_VALUE]\n\n\ncheck = GoogleSubnetworkLoggingEnabled()\n", "path": "checkov/terraform/checks/resource/gcp/GoogleSubnetworkLoggingEnabled.py"}], "after_files": [{"content": "from typing import List, Any\n\nfrom checkov.common.models.consts import ANY_VALUE\nfrom checkov.common.models.enums import CheckCategories\nfrom checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\n\n\nclass AKSApiServerAuthorizedIpRanges(BaseResourceValueCheck):\n def __init__(self) -> None:\n name = \"Ensure AKS has an API Server Authorized IP Ranges enabled\"\n id = \"CKV_AZURE_6\"\n supported_resources = (\"azurerm_kubernetes_cluster\",)\n categories = (CheckCategories.KUBERNETES,)\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def get_inspected_key(self) -> str:\n return \"api_server_authorized_ip_ranges/[0]\"\n\n def get_expected_values(self) -> List[Any]:\n return [ANY_VALUE]\n\n\ncheck = AKSApiServerAuthorizedIpRanges()\n", "path": "checkov/terraform/checks/resource/azure/AKSApiServerAuthorizedIpRanges.py"}, {"content": "from abc import abstractmethod\nfrom collections.abc import Iterable\nfrom typing import List, Dict, Any\n\nimport dpath.util\nimport re\nfrom checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\nfrom checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.common.models.consts import ANY_VALUE\nfrom checkov.common.util.type_forcers import force_list\nfrom checkov.terraform.graph_builder.utils import get_referenced_vertices_in_value\nfrom checkov.terraform.parser_functions import handle_dynamic_values\nfrom checkov.terraform.parser_utils import find_var_blocks\n\n\n\nclass BaseResourceValueCheck(BaseResourceCheck):\n def __init__(\n self,\n name: str,\n id: str,\n categories: \"Iterable[CheckCategories]\",\n supported_resources: \"Iterable[str]\",\n missing_block_result: CheckResult = CheckResult.FAILED,\n ) -> None:\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n self.missing_block_result = missing_block_result\n\n @staticmethod\n def _filter_key_path(path: str) -> List[str]:\n \"\"\"\n Filter an attribute path to contain only named attributes by dropping array indices from the path)\n :param path: valid JSONPath of an attribute\n :return: List of named attributes with respect to the input JSONPath order\n \"\"\"\n return [x for x in path.split(\"/\") if not re.search(re.compile(r\"^\\[?\\d+]?$\"), x)]\n\n @staticmethod\n def _is_variable_dependant(value: Any) -> bool:\n if not isinstance(value, str):\n return False\n if \"${\" not in value:\n return False\n\n if find_var_blocks(value):\n return True\n return False\n\n @staticmethod\n def _is_nesting_key(inspected_attributes: List[str], key: List[str]) -> bool:\n \"\"\"\n Resolves whether a key is a subset of the inspected nesting attributes\n :param inspected_attributes: list of nesting attributes\n :param key: JSONPath key of an attribute\n :return: True/False\n \"\"\"\n return any(x in key for x in inspected_attributes)\n\n def scan_resource_conf(self, conf: Dict[str, List[Any]]) -> CheckResult:\n handle_dynamic_values(conf)\n inspected_key = self.get_inspected_key()\n expected_values = self.get_expected_values()\n if dpath.search(conf, inspected_key) != {}:\n # Inspected key exists\n value = dpath.get(conf, inspected_key)\n if isinstance(value, list) and len(value) == 1:\n value = value[0]\n if value is None or (isinstance(value, list) and not value):\n return self.missing_block_result\n if ANY_VALUE in expected_values and value is not None and (not isinstance(value, str) or value):\n # Key is found on the configuration - if it accepts any value, the check is PASSED\n return CheckResult.PASSED\n if self._is_variable_dependant(value):\n # If the tested attribute is variable-dependant, then result is PASSED\n return CheckResult.PASSED\n if value in expected_values:\n return CheckResult.PASSED\n if get_referenced_vertices_in_value(value=value, aliases={}, resources_types=[]):\n # we don't provide resources_types as we want to stay provider agnostic\n return CheckResult.UNKNOWN\n return CheckResult.FAILED\n else:\n # Look for the configuration in a bottom-up fashion\n inspected_attributes = self._filter_key_path(inspected_key)\n for attribute in reversed(inspected_attributes):\n for sub_key, sub_conf in dpath.search(conf, f\"**/{attribute}\", yielded=True):\n filtered_sub_key = self._filter_key_path(sub_key)\n # Only proceed with check if full path for key is similar - not partial match\n if inspected_attributes == filtered_sub_key:\n if self._is_nesting_key(inspected_attributes, filtered_sub_key):\n if isinstance(sub_conf, list) and len(sub_conf) == 1:\n sub_conf = sub_conf[0]\n if sub_conf in self.get_expected_values():\n return CheckResult.PASSED\n if self._is_variable_dependant(sub_conf):\n # If the tested attribute is variable-dependant, then result is PASSED\n return CheckResult.PASSED\n\n return self.missing_block_result\n\n @abstractmethod\n def get_inspected_key(self) -> str:\n \"\"\"\n :return: JSONPath syntax path of the checked attribute\n \"\"\"\n raise NotImplementedError()\n\n def get_expected_values(self) -> List[Any]:\n \"\"\"\n Override the method with the list of acceptable values if the check has more than one possible expected value, given\n the inspected key\n :return: List of expected values, defaults to a list of the expected value\n \"\"\"\n return [self.get_expected_value()]\n\n def get_expected_value(self) -> Any:\n \"\"\"\n Returns the default expected value, governed by provider best practices\n \"\"\"\n return True\n\n def get_evaluated_keys(self) -> List[str]:\n return force_list(self.get_inspected_key())\n", "path": "checkov/terraform/checks/resource/base_resource_value_check.py"}, {"content": "from typing import Any, List, Dict\n\nfrom checkov.common.models.consts import ANY_VALUE\nfrom checkov.common.models.enums import CheckCategories, CheckResult\nfrom checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\n\n\nclass GoogleSubnetworkLoggingEnabled(BaseResourceValueCheck):\n def __init__(self) -> None:\n name = \"Ensure that VPC Flow Logs is enabled for every subnet in a VPC Network\"\n id = \"CKV_GCP_26\"\n supported_resources = (\"google_compute_subnetwork\",)\n categories = (CheckCategories.LOGGING,)\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf: Dict[str, List[Any]]) -> CheckResult:\n # flow logs can't be enabled for `INTERNAL_HTTPS_LOAD_BALANCER` subnetworks\n purpose = conf.get(\"purpose\")\n if purpose and purpose[0] == \"INTERNAL_HTTPS_LOAD_BALANCER\":\n return CheckResult.UNKNOWN\n\n return super().scan_resource_conf(conf)\n\n def get_inspected_key(self) -> str:\n return \"log_config\"\n\n def get_expected_values(self) -> List[Any]:\n return [ANY_VALUE]\n\n\ncheck = GoogleSubnetworkLoggingEnabled()\n", "path": "checkov/terraform/checks/resource/gcp/GoogleSubnetworkLoggingEnabled.py"}]} | 2,665 | 1,014 |
gh_patches_debug_28446 | rasdani/github-patches | git_diff | wagtail__wagtail-11577 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add system checks for `WAGTAIL_DATE_FORMAT`, `WAGTAIL_DATETIME_FORMAT`, `WAGTAIL_TIME_FORMAT` being listed in Django configured formats
### Is your proposal related to a problem?
When configuring any of the `WAGTAIL_*_FORMAT` configs, these must be included in the relevant Django formats.
https://docs.wagtail.org/en/stable/reference/settings.html#wagtail-date-format-wagtail-datetime-format-wagtail-time-format
> ...must be one of the recognised formats listed in the DATE_INPUT_FORMATS, TIME_INPUT_FORMATS, or DATETIME_INPUT_FORMATS setting respectively
This config coordination is easy to to get wrong causing non-intuitive behaviour as Wagtail/Django will fallback to defaults.
### Describe the solution you'd like
* Using the [Django system check framework](https://docs.djangoproject.com/en/5.0/topics/checks/) or similar, ensure that if there are misconfiguration issues there will be an error thrown or report at app startup.
### Describe alternatives you've considered
* Leave as is, the documentation advises what to do.
### Additional context
* An older PR had a really solid start to this but had a few questions and was only a draft. https://github.com/wagtail/wagtail/pull/6168
* It may make sense to start with a simple version of these checks as a smaller scoped PR.
### Working on this
* This would be good for anyone willing to dig deep into these configs & their Django equivalents.
* Some knowledge or learning of the way the Django system check framework works and exploration of the current usage in Wagtail.
* The PR linked above would be a good start but may not be the right way forward, it also did not have tests and tests are a must.
* View our [contributing guidelines](https://docs.wagtail.org/en/latest/contributing/index.html), add a comment to the issue once you’re ready to start.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `wagtail/admin/checks.py`
Content:
```
1 import os
2
3 from django.core.checks import Error, Tags, Warning, register
4
5
6 @register("staticfiles")
7 def css_install_check(app_configs, **kwargs):
8 errors = []
9
10 css_path = os.path.join(
11 os.path.dirname(__file__), "static", "wagtailadmin", "css", "core.css"
12 )
13
14 if not os.path.isfile(css_path):
15 error_hint = (
16 """
17 Most likely you are running a development (non-packaged) copy of
18 Wagtail and have not built the static assets -
19 see https://docs.wagtail.org/en/latest/contributing/developing.html
20
21 File not found: %s
22 """
23 % css_path
24 )
25
26 errors.append(
27 Warning(
28 "CSS for the Wagtail admin is missing",
29 hint=error_hint,
30 id="wagtailadmin.W001",
31 )
32 )
33 return errors
34
35
36 @register(Tags.admin)
37 def base_form_class_check(app_configs, **kwargs):
38 from wagtail.admin.forms import WagtailAdminPageForm
39 from wagtail.models import get_page_models
40
41 errors = []
42
43 for cls in get_page_models():
44 if not issubclass(cls.base_form_class, WagtailAdminPageForm):
45 errors.append(
46 Error(
47 "{}.base_form_class does not extend WagtailAdminPageForm".format(
48 cls.__name__
49 ),
50 hint="Ensure that {}.{} extends WagtailAdminPageForm".format(
51 cls.base_form_class.__module__, cls.base_form_class.__name__
52 ),
53 obj=cls,
54 id="wagtailadmin.E001",
55 )
56 )
57
58 return errors
59
60
61 @register(Tags.admin)
62 def get_form_class_check(app_configs, **kwargs):
63 from wagtail.admin.forms import WagtailAdminPageForm
64 from wagtail.models import get_page_models
65
66 errors = []
67
68 for cls in get_page_models():
69 edit_handler = cls.get_edit_handler()
70 if not issubclass(edit_handler.get_form_class(), WagtailAdminPageForm):
71 errors.append(
72 Error(
73 "{cls}.get_edit_handler().get_form_class() does not extend WagtailAdminPageForm".format(
74 cls=cls.__name__
75 ),
76 hint="Ensure that the panel definition for {cls} creates a subclass of WagtailAdminPageForm".format(
77 cls=cls.__name__
78 ),
79 obj=cls,
80 id="wagtailadmin.E002",
81 )
82 )
83
84 return errors
85
86
87 @register("panels")
88 def inline_panel_model_panels_check(app_configs, **kwargs):
89 from wagtail.models import get_page_models
90
91 errors = []
92 page_models = get_page_models()
93
94 for cls in page_models:
95 errors.extend(check_panels_in_model(cls))
96
97 # filter out duplicate errors found for the same model
98 unique_errors = []
99 for error in errors:
100 if error.msg not in [e.msg for e in unique_errors]:
101 unique_errors.append(error)
102 return unique_errors
103
104
105 def check_panels_in_model(cls, context="model"):
106 """Check panels configuration uses `panels` when `edit_handler` not in use."""
107 from wagtail.admin.panels import InlinePanel, PanelGroup
108 from wagtail.models import Page
109
110 errors = []
111
112 if hasattr(cls, "get_edit_handler"):
113 # must check the InlinePanel related models
114 edit_handler = cls.get_edit_handler()
115 for tab in edit_handler.children:
116 if isinstance(tab, PanelGroup):
117 inline_panel_children = [
118 panel for panel in tab.children if isinstance(panel, InlinePanel)
119 ]
120 for inline_panel_child in inline_panel_children:
121 errors.extend(
122 check_panels_in_model(
123 inline_panel_child.db_field.related_model,
124 context="InlinePanel model",
125 )
126 )
127
128 if issubclass(cls, Page) or hasattr(cls, "edit_handler"):
129 # Pages do not need to be checked for standalone tabbed_panel usage
130 # if edit_handler is used on any model, assume config is correct
131 return errors
132
133 tabbed_panels = [
134 "content_panels",
135 "promote_panels",
136 "settings_panels",
137 ]
138
139 for panel_name in tabbed_panels:
140 class_name = cls.__name__
141 if not hasattr(cls, panel_name):
142 continue
143
144 panel_name_short = panel_name.replace("_panels", "").title()
145 error_title = "{}.{} will have no effect on {} editing".format(
146 class_name, panel_name, context
147 )
148
149 if "InlinePanel" in context:
150 error_hint = """Ensure that {} uses `panels` instead of `{}`.
151 There are no tabs on non-Page model editing within InlinePanels.""".format(
152 class_name, panel_name
153 )
154 else:
155 error_hint = """Ensure that {} uses `panels` instead of `{}`\
156 or set up an `edit_handler` if you want a tabbed editing interface.
157 There are no default tabs on non-Page models so there will be no \
158 {} tab for the {} to render in.""".format(
159 class_name, panel_name, panel_name_short, panel_name
160 )
161
162 error = Warning(error_title, hint=error_hint, obj=cls, id="wagtailadmin.W002")
163
164 errors.append(error)
165
166 return errors
167
168
169 @register("wagtailadmin_base_url")
170 def wagtail_admin_base_url_check(app_configs, **kwargs):
171 from django.conf import settings
172
173 errors = []
174
175 if getattr(settings, "WAGTAILADMIN_BASE_URL", None) is None:
176 errors.append(
177 Warning(
178 "The WAGTAILADMIN_BASE_URL setting is not defined",
179 hint="This should be the base URL used to access the Wagtail admin site. "
180 "Without this, URLs in notification emails will not display correctly.",
181 id="wagtailadmin.W003",
182 )
183 )
184
185 return errors
186
187
188 @register("file_overwrite")
189 def file_overwrite_check(app_configs, **kwargs):
190 from django import VERSION as DJANGO_VERSION
191 from django.conf import settings
192
193 if DJANGO_VERSION >= (5, 1):
194 file_storage = getattr(settings, "STORAGES")["default"]["BACKEND"]
195 else:
196 try:
197 file_storage = getattr(settings, "STORAGES")["default"]["BACKEND"]
198 except AttributeError:
199 file_storage = getattr(settings, "DEFAULT_FILE_STORAGE", None)
200
201 errors = []
202
203 if file_storage == "storages.backends.s3boto3.S3Boto3Storage" and getattr(
204 settings, "AWS_S3_FILE_OVERWRITE", True
205 ):
206 errors.append(
207 Warning(
208 "The AWS_S3_FILE_OVERWRITE setting is set to True",
209 hint="This should be set to False. The incorrect setting can cause documents and "
210 "other user-uploaded files to be silently overwritten or deleted.",
211 id="wagtailadmin.W004",
212 )
213 )
214 if file_storage == "storages.backends.azure_storage.AzureStorage" and getattr(
215 settings, "AZURE_OVERWRITE_FILES", False
216 ):
217 errors.append(
218 Warning(
219 "The AZURE_OVERWRITE_FILES setting is set to True",
220 hint="This should be set to False. The incorrect setting can cause documents and "
221 "other user-uploaded files to be silently overwritten or deleted.",
222 id="wagtailadmin.W004",
223 )
224 )
225 if file_storage == "storages.backends.gcloud.GoogleCloudStorage" and getattr(
226 settings, "GS_FILE_OVERWRITE", True
227 ):
228 errors.append(
229 Warning(
230 "The GS_FILE_OVERWRITE setting is set to True",
231 hint="This should be set to False. The incorrect setting can cause documents and "
232 "other user-uploaded files to be silently overwritten or deleted.",
233 id="wagtailadmin.W004",
234 )
235 )
236
237 return errors
238
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/wagtail/admin/checks.py b/wagtail/admin/checks.py
--- a/wagtail/admin/checks.py
+++ b/wagtail/admin/checks.py
@@ -235,3 +235,54 @@
)
return errors
+
+
+@register("datetime_format")
+def datetime_format_check(app_configs, **kwargs):
+ """
+ If L10N is enabled, check if WAGTAIL_* formats are compatible with Django input formats.
+ See https://docs.djangoproject.com/en/stable/topics/i18n/formatting/#creating-custom-format-files
+ See https://docs.wagtail.org/en/stable/reference/settings.html#wagtail-date-format-wagtail-datetime-format-wagtail-time-format
+ """
+
+ from django.conf import settings
+ from django.utils import formats, translation
+
+ errors = []
+
+ if not getattr(settings, "USE_L10N", False):
+ return errors
+
+ formats.FORMAT_SETTINGS = formats.FORMAT_SETTINGS.union(
+ [
+ "WAGTAIL_DATE_FORMAT",
+ "WAGTAIL_DATETIME_FORMAT",
+ "WAGTAIL_TIME_FORMAT",
+ ]
+ )
+
+ for code, label in settings.LANGUAGES:
+ with translation.override(code):
+ for wagtail_format, django_formats in [
+ ("WAGTAIL_DATE_FORMAT", "DATE_INPUT_FORMATS"),
+ ("WAGTAIL_DATETIME_FORMAT", "DATETIME_INPUT_FORMATS"),
+ ("WAGTAIL_TIME_FORMAT", "TIME_INPUT_FORMATS"),
+ ]:
+ wagtail_format_value = getattr(settings, wagtail_format, None)
+ django_formats_value = getattr(settings, django_formats, None)
+
+ if wagtail_format_value is None:
+ # Skip the iteration if wagtail_format is not present
+ continue
+
+ input_format = formats.get_format_lazy(wagtail_format_value)
+ input_formats = formats.get_format_lazy(django_formats_value)
+ if str(input_format) not in str(input_formats):
+ errors.append(
+ Error(
+ "Configuration error",
+ hint=f"{wagtail_format} {input_format} must be in {django_formats} for language {label} ({code}).",
+ )
+ )
+
+ return errors
| {"golden_diff": "diff --git a/wagtail/admin/checks.py b/wagtail/admin/checks.py\n--- a/wagtail/admin/checks.py\n+++ b/wagtail/admin/checks.py\n@@ -235,3 +235,54 @@\n )\n \n return errors\n+\n+\n+@register(\"datetime_format\")\n+def datetime_format_check(app_configs, **kwargs):\n+ \"\"\"\n+ If L10N is enabled, check if WAGTAIL_* formats are compatible with Django input formats.\n+ See https://docs.djangoproject.com/en/stable/topics/i18n/formatting/#creating-custom-format-files\n+ See https://docs.wagtail.org/en/stable/reference/settings.html#wagtail-date-format-wagtail-datetime-format-wagtail-time-format\n+ \"\"\"\n+\n+ from django.conf import settings\n+ from django.utils import formats, translation\n+\n+ errors = []\n+\n+ if not getattr(settings, \"USE_L10N\", False):\n+ return errors\n+\n+ formats.FORMAT_SETTINGS = formats.FORMAT_SETTINGS.union(\n+ [\n+ \"WAGTAIL_DATE_FORMAT\",\n+ \"WAGTAIL_DATETIME_FORMAT\",\n+ \"WAGTAIL_TIME_FORMAT\",\n+ ]\n+ )\n+\n+ for code, label in settings.LANGUAGES:\n+ with translation.override(code):\n+ for wagtail_format, django_formats in [\n+ (\"WAGTAIL_DATE_FORMAT\", \"DATE_INPUT_FORMATS\"),\n+ (\"WAGTAIL_DATETIME_FORMAT\", \"DATETIME_INPUT_FORMATS\"),\n+ (\"WAGTAIL_TIME_FORMAT\", \"TIME_INPUT_FORMATS\"),\n+ ]:\n+ wagtail_format_value = getattr(settings, wagtail_format, None)\n+ django_formats_value = getattr(settings, django_formats, None)\n+\n+ if wagtail_format_value is None:\n+ # Skip the iteration if wagtail_format is not present\n+ continue\n+\n+ input_format = formats.get_format_lazy(wagtail_format_value)\n+ input_formats = formats.get_format_lazy(django_formats_value)\n+ if str(input_format) not in str(input_formats):\n+ errors.append(\n+ Error(\n+ \"Configuration error\",\n+ hint=f\"{wagtail_format} {input_format} must be in {django_formats} for language {label} ({code}).\",\n+ )\n+ )\n+\n+ return errors\n", "issue": "Add system checks for `WAGTAIL_DATE_FORMAT`, `WAGTAIL_DATETIME_FORMAT`, `WAGTAIL_TIME_FORMAT` being listed in Django configured formats\n### Is your proposal related to a problem?\r\n\r\nWhen configuring any of the `WAGTAIL_*_FORMAT` configs, these must be included in the relevant Django formats.\r\n\r\nhttps://docs.wagtail.org/en/stable/reference/settings.html#wagtail-date-format-wagtail-datetime-format-wagtail-time-format\r\n\r\n> ...must be one of the recognised formats listed in the DATE_INPUT_FORMATS, TIME_INPUT_FORMATS, or DATETIME_INPUT_FORMATS setting respectively \r\n\r\nThis config coordination is easy to to get wrong causing non-intuitive behaviour as Wagtail/Django will fallback to defaults.\r\n\r\n### Describe the solution you'd like\r\n\r\n* Using the [Django system check framework](https://docs.djangoproject.com/en/5.0/topics/checks/) or similar, ensure that if there are misconfiguration issues there will be an error thrown or report at app startup.\r\n\r\n### Describe alternatives you've considered\r\n\r\n* Leave as is, the documentation advises what to do.\r\n\r\n### Additional context\r\n\r\n* An older PR had a really solid start to this but had a few questions and was only a draft. https://github.com/wagtail/wagtail/pull/6168\r\n* It may make sense to start with a simple version of these checks as a smaller scoped PR.\r\n\r\n### Working on this\r\n\r\n* This would be good for anyone willing to dig deep into these configs & their Django equivalents.\r\n* Some knowledge or learning of the way the Django system check framework works and exploration of the current usage in Wagtail.\r\n* The PR linked above would be a good start but may not be the right way forward, it also did not have tests and tests are a must.\r\n* View our [contributing guidelines](https://docs.wagtail.org/en/latest/contributing/index.html), add a comment to the issue once you\u2019re ready to start.\r\n\n", "before_files": [{"content": "import os\n\nfrom django.core.checks import Error, Tags, Warning, register\n\n\n@register(\"staticfiles\")\ndef css_install_check(app_configs, **kwargs):\n errors = []\n\n css_path = os.path.join(\n os.path.dirname(__file__), \"static\", \"wagtailadmin\", \"css\", \"core.css\"\n )\n\n if not os.path.isfile(css_path):\n error_hint = (\n \"\"\"\n Most likely you are running a development (non-packaged) copy of\n Wagtail and have not built the static assets -\n see https://docs.wagtail.org/en/latest/contributing/developing.html\n\n File not found: %s\n \"\"\"\n % css_path\n )\n\n errors.append(\n Warning(\n \"CSS for the Wagtail admin is missing\",\n hint=error_hint,\n id=\"wagtailadmin.W001\",\n )\n )\n return errors\n\n\n@register(Tags.admin)\ndef base_form_class_check(app_configs, **kwargs):\n from wagtail.admin.forms import WagtailAdminPageForm\n from wagtail.models import get_page_models\n\n errors = []\n\n for cls in get_page_models():\n if not issubclass(cls.base_form_class, WagtailAdminPageForm):\n errors.append(\n Error(\n \"{}.base_form_class does not extend WagtailAdminPageForm\".format(\n cls.__name__\n ),\n hint=\"Ensure that {}.{} extends WagtailAdminPageForm\".format(\n cls.base_form_class.__module__, cls.base_form_class.__name__\n ),\n obj=cls,\n id=\"wagtailadmin.E001\",\n )\n )\n\n return errors\n\n\n@register(Tags.admin)\ndef get_form_class_check(app_configs, **kwargs):\n from wagtail.admin.forms import WagtailAdminPageForm\n from wagtail.models import get_page_models\n\n errors = []\n\n for cls in get_page_models():\n edit_handler = cls.get_edit_handler()\n if not issubclass(edit_handler.get_form_class(), WagtailAdminPageForm):\n errors.append(\n Error(\n \"{cls}.get_edit_handler().get_form_class() does not extend WagtailAdminPageForm\".format(\n cls=cls.__name__\n ),\n hint=\"Ensure that the panel definition for {cls} creates a subclass of WagtailAdminPageForm\".format(\n cls=cls.__name__\n ),\n obj=cls,\n id=\"wagtailadmin.E002\",\n )\n )\n\n return errors\n\n\n@register(\"panels\")\ndef inline_panel_model_panels_check(app_configs, **kwargs):\n from wagtail.models import get_page_models\n\n errors = []\n page_models = get_page_models()\n\n for cls in page_models:\n errors.extend(check_panels_in_model(cls))\n\n # filter out duplicate errors found for the same model\n unique_errors = []\n for error in errors:\n if error.msg not in [e.msg for e in unique_errors]:\n unique_errors.append(error)\n return unique_errors\n\n\ndef check_panels_in_model(cls, context=\"model\"):\n \"\"\"Check panels configuration uses `panels` when `edit_handler` not in use.\"\"\"\n from wagtail.admin.panels import InlinePanel, PanelGroup\n from wagtail.models import Page\n\n errors = []\n\n if hasattr(cls, \"get_edit_handler\"):\n # must check the InlinePanel related models\n edit_handler = cls.get_edit_handler()\n for tab in edit_handler.children:\n if isinstance(tab, PanelGroup):\n inline_panel_children = [\n panel for panel in tab.children if isinstance(panel, InlinePanel)\n ]\n for inline_panel_child in inline_panel_children:\n errors.extend(\n check_panels_in_model(\n inline_panel_child.db_field.related_model,\n context=\"InlinePanel model\",\n )\n )\n\n if issubclass(cls, Page) or hasattr(cls, \"edit_handler\"):\n # Pages do not need to be checked for standalone tabbed_panel usage\n # if edit_handler is used on any model, assume config is correct\n return errors\n\n tabbed_panels = [\n \"content_panels\",\n \"promote_panels\",\n \"settings_panels\",\n ]\n\n for panel_name in tabbed_panels:\n class_name = cls.__name__\n if not hasattr(cls, panel_name):\n continue\n\n panel_name_short = panel_name.replace(\"_panels\", \"\").title()\n error_title = \"{}.{} will have no effect on {} editing\".format(\n class_name, panel_name, context\n )\n\n if \"InlinePanel\" in context:\n error_hint = \"\"\"Ensure that {} uses `panels` instead of `{}`.\nThere are no tabs on non-Page model editing within InlinePanels.\"\"\".format(\n class_name, panel_name\n )\n else:\n error_hint = \"\"\"Ensure that {} uses `panels` instead of `{}`\\\nor set up an `edit_handler` if you want a tabbed editing interface.\nThere are no default tabs on non-Page models so there will be no \\\n{} tab for the {} to render in.\"\"\".format(\n class_name, panel_name, panel_name_short, panel_name\n )\n\n error = Warning(error_title, hint=error_hint, obj=cls, id=\"wagtailadmin.W002\")\n\n errors.append(error)\n\n return errors\n\n\n@register(\"wagtailadmin_base_url\")\ndef wagtail_admin_base_url_check(app_configs, **kwargs):\n from django.conf import settings\n\n errors = []\n\n if getattr(settings, \"WAGTAILADMIN_BASE_URL\", None) is None:\n errors.append(\n Warning(\n \"The WAGTAILADMIN_BASE_URL setting is not defined\",\n hint=\"This should be the base URL used to access the Wagtail admin site. \"\n \"Without this, URLs in notification emails will not display correctly.\",\n id=\"wagtailadmin.W003\",\n )\n )\n\n return errors\n\n\n@register(\"file_overwrite\")\ndef file_overwrite_check(app_configs, **kwargs):\n from django import VERSION as DJANGO_VERSION\n from django.conf import settings\n\n if DJANGO_VERSION >= (5, 1):\n file_storage = getattr(settings, \"STORAGES\")[\"default\"][\"BACKEND\"]\n else:\n try:\n file_storage = getattr(settings, \"STORAGES\")[\"default\"][\"BACKEND\"]\n except AttributeError:\n file_storage = getattr(settings, \"DEFAULT_FILE_STORAGE\", None)\n\n errors = []\n\n if file_storage == \"storages.backends.s3boto3.S3Boto3Storage\" and getattr(\n settings, \"AWS_S3_FILE_OVERWRITE\", True\n ):\n errors.append(\n Warning(\n \"The AWS_S3_FILE_OVERWRITE setting is set to True\",\n hint=\"This should be set to False. The incorrect setting can cause documents and \"\n \"other user-uploaded files to be silently overwritten or deleted.\",\n id=\"wagtailadmin.W004\",\n )\n )\n if file_storage == \"storages.backends.azure_storage.AzureStorage\" and getattr(\n settings, \"AZURE_OVERWRITE_FILES\", False\n ):\n errors.append(\n Warning(\n \"The AZURE_OVERWRITE_FILES setting is set to True\",\n hint=\"This should be set to False. The incorrect setting can cause documents and \"\n \"other user-uploaded files to be silently overwritten or deleted.\",\n id=\"wagtailadmin.W004\",\n )\n )\n if file_storage == \"storages.backends.gcloud.GoogleCloudStorage\" and getattr(\n settings, \"GS_FILE_OVERWRITE\", True\n ):\n errors.append(\n Warning(\n \"The GS_FILE_OVERWRITE setting is set to True\",\n hint=\"This should be set to False. The incorrect setting can cause documents and \"\n \"other user-uploaded files to be silently overwritten or deleted.\",\n id=\"wagtailadmin.W004\",\n )\n )\n\n return errors\n", "path": "wagtail/admin/checks.py"}], "after_files": [{"content": "import os\n\nfrom django.core.checks import Error, Tags, Warning, register\n\n\n@register(\"staticfiles\")\ndef css_install_check(app_configs, **kwargs):\n errors = []\n\n css_path = os.path.join(\n os.path.dirname(__file__), \"static\", \"wagtailadmin\", \"css\", \"core.css\"\n )\n\n if not os.path.isfile(css_path):\n error_hint = (\n \"\"\"\n Most likely you are running a development (non-packaged) copy of\n Wagtail and have not built the static assets -\n see https://docs.wagtail.org/en/latest/contributing/developing.html\n\n File not found: %s\n \"\"\"\n % css_path\n )\n\n errors.append(\n Warning(\n \"CSS for the Wagtail admin is missing\",\n hint=error_hint,\n id=\"wagtailadmin.W001\",\n )\n )\n return errors\n\n\n@register(Tags.admin)\ndef base_form_class_check(app_configs, **kwargs):\n from wagtail.admin.forms import WagtailAdminPageForm\n from wagtail.models import get_page_models\n\n errors = []\n\n for cls in get_page_models():\n if not issubclass(cls.base_form_class, WagtailAdminPageForm):\n errors.append(\n Error(\n \"{}.base_form_class does not extend WagtailAdminPageForm\".format(\n cls.__name__\n ),\n hint=\"Ensure that {}.{} extends WagtailAdminPageForm\".format(\n cls.base_form_class.__module__, cls.base_form_class.__name__\n ),\n obj=cls,\n id=\"wagtailadmin.E001\",\n )\n )\n\n return errors\n\n\n@register(Tags.admin)\ndef get_form_class_check(app_configs, **kwargs):\n from wagtail.admin.forms import WagtailAdminPageForm\n from wagtail.models import get_page_models\n\n errors = []\n\n for cls in get_page_models():\n edit_handler = cls.get_edit_handler()\n if not issubclass(edit_handler.get_form_class(), WagtailAdminPageForm):\n errors.append(\n Error(\n \"{cls}.get_edit_handler().get_form_class() does not extend WagtailAdminPageForm\".format(\n cls=cls.__name__\n ),\n hint=\"Ensure that the panel definition for {cls} creates a subclass of WagtailAdminPageForm\".format(\n cls=cls.__name__\n ),\n obj=cls,\n id=\"wagtailadmin.E002\",\n )\n )\n\n return errors\n\n\n@register(\"panels\")\ndef inline_panel_model_panels_check(app_configs, **kwargs):\n from wagtail.models import get_page_models\n\n errors = []\n page_models = get_page_models()\n\n for cls in page_models:\n errors.extend(check_panels_in_model(cls))\n\n # filter out duplicate errors found for the same model\n unique_errors = []\n for error in errors:\n if error.msg not in [e.msg for e in unique_errors]:\n unique_errors.append(error)\n return unique_errors\n\n\ndef check_panels_in_model(cls, context=\"model\"):\n \"\"\"Check panels configuration uses `panels` when `edit_handler` not in use.\"\"\"\n from wagtail.admin.panels import InlinePanel, PanelGroup\n from wagtail.models import Page\n\n errors = []\n\n if hasattr(cls, \"get_edit_handler\"):\n # must check the InlinePanel related models\n edit_handler = cls.get_edit_handler()\n for tab in edit_handler.children:\n if isinstance(tab, PanelGroup):\n inline_panel_children = [\n panel for panel in tab.children if isinstance(panel, InlinePanel)\n ]\n for inline_panel_child in inline_panel_children:\n errors.extend(\n check_panels_in_model(\n inline_panel_child.db_field.related_model,\n context=\"InlinePanel model\",\n )\n )\n\n if issubclass(cls, Page) or hasattr(cls, \"edit_handler\"):\n # Pages do not need to be checked for standalone tabbed_panel usage\n # if edit_handler is used on any model, assume config is correct\n return errors\n\n tabbed_panels = [\n \"content_panels\",\n \"promote_panels\",\n \"settings_panels\",\n ]\n\n for panel_name in tabbed_panels:\n class_name = cls.__name__\n if not hasattr(cls, panel_name):\n continue\n\n panel_name_short = panel_name.replace(\"_panels\", \"\").title()\n error_title = \"{}.{} will have no effect on {} editing\".format(\n class_name, panel_name, context\n )\n\n if \"InlinePanel\" in context:\n error_hint = \"\"\"Ensure that {} uses `panels` instead of `{}`.\nThere are no tabs on non-Page model editing within InlinePanels.\"\"\".format(\n class_name, panel_name\n )\n else:\n error_hint = \"\"\"Ensure that {} uses `panels` instead of `{}`\\\nor set up an `edit_handler` if you want a tabbed editing interface.\nThere are no default tabs on non-Page models so there will be no \\\n{} tab for the {} to render in.\"\"\".format(\n class_name, panel_name, panel_name_short, panel_name\n )\n\n error = Warning(error_title, hint=error_hint, obj=cls, id=\"wagtailadmin.W002\")\n\n errors.append(error)\n\n return errors\n\n\n@register(\"wagtailadmin_base_url\")\ndef wagtail_admin_base_url_check(app_configs, **kwargs):\n from django.conf import settings\n\n errors = []\n\n if getattr(settings, \"WAGTAILADMIN_BASE_URL\", None) is None:\n errors.append(\n Warning(\n \"The WAGTAILADMIN_BASE_URL setting is not defined\",\n hint=\"This should be the base URL used to access the Wagtail admin site. \"\n \"Without this, URLs in notification emails will not display correctly.\",\n id=\"wagtailadmin.W003\",\n )\n )\n\n return errors\n\n\n@register(\"file_overwrite\")\ndef file_overwrite_check(app_configs, **kwargs):\n from django import VERSION as DJANGO_VERSION\n from django.conf import settings\n\n if DJANGO_VERSION >= (5, 1):\n file_storage = getattr(settings, \"STORAGES\")[\"default\"][\"BACKEND\"]\n else:\n try:\n file_storage = getattr(settings, \"STORAGES\")[\"default\"][\"BACKEND\"]\n except AttributeError:\n file_storage = getattr(settings, \"DEFAULT_FILE_STORAGE\", None)\n\n errors = []\n\n if file_storage == \"storages.backends.s3boto3.S3Boto3Storage\" and getattr(\n settings, \"AWS_S3_FILE_OVERWRITE\", True\n ):\n errors.append(\n Warning(\n \"The AWS_S3_FILE_OVERWRITE setting is set to True\",\n hint=\"This should be set to False. The incorrect setting can cause documents and \"\n \"other user-uploaded files to be silently overwritten or deleted.\",\n id=\"wagtailadmin.W004\",\n )\n )\n if file_storage == \"storages.backends.azure_storage.AzureStorage\" and getattr(\n settings, \"AZURE_OVERWRITE_FILES\", False\n ):\n errors.append(\n Warning(\n \"The AZURE_OVERWRITE_FILES setting is set to True\",\n hint=\"This should be set to False. The incorrect setting can cause documents and \"\n \"other user-uploaded files to be silently overwritten or deleted.\",\n id=\"wagtailadmin.W004\",\n )\n )\n if file_storage == \"storages.backends.gcloud.GoogleCloudStorage\" and getattr(\n settings, \"GS_FILE_OVERWRITE\", True\n ):\n errors.append(\n Warning(\n \"The GS_FILE_OVERWRITE setting is set to True\",\n hint=\"This should be set to False. The incorrect setting can cause documents and \"\n \"other user-uploaded files to be silently overwritten or deleted.\",\n id=\"wagtailadmin.W004\",\n )\n )\n\n return errors\n\n\n@register(\"datetime_format\")\ndef datetime_format_check(app_configs, **kwargs):\n \"\"\"\n If L10N is enabled, check if WAGTAIL_* formats are compatible with Django input formats.\n See https://docs.djangoproject.com/en/stable/topics/i18n/formatting/#creating-custom-format-files\n See https://docs.wagtail.org/en/stable/reference/settings.html#wagtail-date-format-wagtail-datetime-format-wagtail-time-format\n \"\"\"\n\n from django.conf import settings\n from django.utils import formats, translation\n\n errors = []\n\n if not getattr(settings, \"USE_L10N\", False):\n return errors\n\n formats.FORMAT_SETTINGS = formats.FORMAT_SETTINGS.union(\n [\n \"WAGTAIL_DATE_FORMAT\",\n \"WAGTAIL_DATETIME_FORMAT\",\n \"WAGTAIL_TIME_FORMAT\",\n ]\n )\n\n for code, label in settings.LANGUAGES:\n with translation.override(code):\n for wagtail_format, django_formats in [\n (\"WAGTAIL_DATE_FORMAT\", \"DATE_INPUT_FORMATS\"),\n (\"WAGTAIL_DATETIME_FORMAT\", \"DATETIME_INPUT_FORMATS\"),\n (\"WAGTAIL_TIME_FORMAT\", \"TIME_INPUT_FORMATS\"),\n ]:\n wagtail_format_value = getattr(settings, wagtail_format, None)\n django_formats_value = getattr(settings, django_formats, None)\n\n if wagtail_format_value is None:\n # Skip the iteration if wagtail_format is not present\n continue\n\n input_format = formats.get_format_lazy(wagtail_format_value)\n input_formats = formats.get_format_lazy(django_formats_value)\n if str(input_format) not in str(input_formats):\n errors.append(\n Error(\n \"Configuration error\",\n hint=f\"{wagtail_format} {input_format} must be in {django_formats} for language {label} ({code}).\",\n )\n )\n\n return errors\n", "path": "wagtail/admin/checks.py"}]} | 2,986 | 509 |
gh_patches_debug_4595 | rasdani/github-patches | git_diff | ocf__ocfweb-45 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Links in headings are broken on docs
For example:
https://dev.ocf.berkeley.edu/docs/faq/

I think this is a side-effect of overreaching CSS rules intended for the link icons.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ocfweb/component/markdown.py`
Content:
```
1 import re
2
3 import mistune
4 from django.core.urlresolvers import reverse
5
6 from ocfweb.caching import lru_cache
7
8 # tags of a format like: [[!meta title="Backups"]]
9 META_REGEX = re.compile('\[\[!meta ([a-z]+)="([^"]*)"\]\]')
10
11
12 class HtmlCommentsInlineLexerMixin:
13 """Strip HTML comments inside lines."""
14
15 def enable_html_comments(self):
16 self.rules.html_comment = re.compile(
17 '^<!--(.*?)-->'
18 )
19 self.default_rules.insert(0, 'html_comment')
20
21 def output_html_comment(self, m):
22 return ''
23
24
25 class HtmlCommentsBlockLexerMixin:
26 """Strip blocks which consist entirely of HTML comments."""
27
28 def enable_html_comments(self):
29 self.rules.html_comment = re.compile(
30 '^<!--(.*?)-->'
31 )
32 self.default_rules.insert(0, 'html_comment')
33
34 def parse_html_comment(self, m):
35 pass
36
37
38 class DjangoLinkInlineLexerMixin:
39 """Turn special Markdown link syntax into Django links.
40
41 In Django templates, we can use `url` tags, such as:
42 {% url 'staff-hours' %}
43 {% url 'doc' 'staff/backend/backups' %}
44
45 In Markdown, we use the following fake syntax to generate Django links:
46 [[human readable name|staff-hours]]
47 [[human readable name|doc staff/backend/backups]]
48
49 You can link to fragments with a # at the very end:
50 [[human readable name|staff-hours#something]]
51 [[human readable name|doc staff/backend/backups#something]]
52 """
53
54 split_words = re.compile('((?:\S|\\\\ )+)')
55
56 def enable_django_links(self):
57 self.rules.django_link = re.compile(
58 '^\[\[(?!\!)'
59 '([\s\S]+?)'
60 '\|'
61 '([^#]+?)'
62 '(?:#(.*?))?'
63 '\]\]'
64 )
65 self.default_rules.insert(0, 'django_link')
66
67 def output_django_link(self, m):
68 text, target, fragment = m.group(1), m.group(2), m.group(3)
69
70 def href(link, fragment):
71 if fragment:
72 return link + '#' + fragment
73 return link
74
75 words = DjangoLinkInlineLexerMixin.split_words.findall(target)
76 name, *params = words
77 return self.renderer.link(
78 link=href(reverse(name, args=params), fragment),
79 title=None,
80 text=text,
81 )
82
83
84 class HeaderRendererMixin:
85 """Mixin to render headers with auto-generated IDs (or provided IDs).
86
87 If headers are written as usual, they'll be given automatically-generated
88 IDs based on their header level and text.
89
90 Headers can also be specified with an ID at the end wrapped in curly braces:
91
92 ### My Header {my_id}
93
94 This ID will be used directly without further manipulation, and can be
95 relied on for linking.
96
97 Custom IDs can consist only of lowercase a-z, 0-9, dash, and underscore.
98
99 IDs are tracked into a table of contents which should be reset before
100 rendering a document and read afterwards.
101 """
102
103 def reset_toc(self):
104 self.toc = []
105 self.toc_ids = set()
106
107 def get_toc(self):
108 return self.toc
109
110 def header(self, text, level, raw=None):
111 custom_id_match = re.match(r'^(.*?)\s+{([a-z0-9\-_]+)}\s*$', text)
112 if custom_id_match:
113 text = custom_id_match.group(1)
114 id = custom_id_match.group(2)
115
116 if id in self.toc_ids:
117 raise ValueError('Duplicate header ID in Markdown: "{}"'.format(id))
118 else:
119 id = 'h{level}_{title}'.format(
120 level=level,
121 title=re.sub('[^a-z0-9\-_ ]', '', text.lower()).strip().replace(' ', '-'),
122 )
123
124 # dumb collision avoidance
125 while id in self.toc_ids:
126 id += '_'
127
128 self.toc.append((level, text, id))
129 self.toc_ids.add(id)
130 return '<h{level} id="{id}">{text} <a class="" href="#{id}"><span></span></a></h{level}>\n'.format(
131 level=level,
132 id=id,
133 text=text,
134 )
135
136
137 class OcfMarkdownRenderer(
138 HeaderRendererMixin,
139 mistune.Renderer,
140 ):
141 pass
142
143
144 class OcfMarkdownInlineLexer(
145 mistune.InlineLexer,
146 DjangoLinkInlineLexerMixin,
147 HtmlCommentsInlineLexerMixin,
148 ):
149 pass
150
151
152 class OcfMarkdownBlockLexer(
153 mistune.BlockLexer,
154 HtmlCommentsBlockLexerMixin,
155 ):
156 pass
157
158
159 _renderer = OcfMarkdownRenderer(
160 escape=True,
161 hard_wrap=False,
162 )
163
164 _inline = OcfMarkdownInlineLexer(_renderer)
165 _inline.enable_html_comments()
166 _inline.enable_django_links()
167
168 _block = OcfMarkdownBlockLexer(mistune.BlockGrammar())
169 _block.enable_html_comments()
170
171 _markdown = mistune.Markdown(
172 renderer=_renderer,
173 inline=_inline,
174 block=_block,
175 )
176
177
178 def markdown(text):
179 _renderer.reset_toc()
180 return _markdown(text)
181
182
183 def text_and_meta(f):
184 """Return tuple (text, meta dict) for the given file.
185
186 Meta tags are stripped from the Markdown source, but the Markdown is
187 not rendered.
188 """
189 text = f.read()
190 meta = {}
191
192 def repl(match):
193 meta[match.group(1)] = match.group(2)
194 return ''
195
196 text = META_REGEX.sub(repl, text)
197 return text, meta
198
199
200 @lru_cache()
201 def markdown_and_toc(text):
202 """Return tuple (html, toc) for the given text."""
203 html = markdown(text)
204 return html, _renderer.get_toc()
205
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ocfweb/component/markdown.py b/ocfweb/component/markdown.py
--- a/ocfweb/component/markdown.py
+++ b/ocfweb/component/markdown.py
@@ -127,7 +127,7 @@
self.toc.append((level, text, id))
self.toc_ids.add(id)
- return '<h{level} id="{id}">{text} <a class="" href="#{id}"><span></span></a></h{level}>\n'.format(
+ return '<h{level} id="{id}">{text} <a class="anchor" href="#{id}"><span></span></a></h{level}>\n'.format(
level=level,
id=id,
text=text,
| {"golden_diff": "diff --git a/ocfweb/component/markdown.py b/ocfweb/component/markdown.py\n--- a/ocfweb/component/markdown.py\n+++ b/ocfweb/component/markdown.py\n@@ -127,7 +127,7 @@\n \n self.toc.append((level, text, id))\n self.toc_ids.add(id)\n- return '<h{level} id=\"{id}\">{text} <a class=\"\" href=\"#{id}\"><span></span></a></h{level}>\\n'.format(\n+ return '<h{level} id=\"{id}\">{text} <a class=\"anchor\" href=\"#{id}\"><span></span></a></h{level}>\\n'.format(\n level=level,\n id=id,\n text=text,\n", "issue": "Links in headings are broken on docs\nFor example:\nhttps://dev.ocf.berkeley.edu/docs/faq/\n\n\nI think this is a side-effect of overreaching CSS rules intended for the link icons.\n\n", "before_files": [{"content": "import re\n\nimport mistune\nfrom django.core.urlresolvers import reverse\n\nfrom ocfweb.caching import lru_cache\n\n# tags of a format like: [[!meta title=\"Backups\"]]\nMETA_REGEX = re.compile('\\[\\[!meta ([a-z]+)=\"([^\"]*)\"\\]\\]')\n\n\nclass HtmlCommentsInlineLexerMixin:\n \"\"\"Strip HTML comments inside lines.\"\"\"\n\n def enable_html_comments(self):\n self.rules.html_comment = re.compile(\n '^<!--(.*?)-->'\n )\n self.default_rules.insert(0, 'html_comment')\n\n def output_html_comment(self, m):\n return ''\n\n\nclass HtmlCommentsBlockLexerMixin:\n \"\"\"Strip blocks which consist entirely of HTML comments.\"\"\"\n\n def enable_html_comments(self):\n self.rules.html_comment = re.compile(\n '^<!--(.*?)-->'\n )\n self.default_rules.insert(0, 'html_comment')\n\n def parse_html_comment(self, m):\n pass\n\n\nclass DjangoLinkInlineLexerMixin:\n \"\"\"Turn special Markdown link syntax into Django links.\n\n In Django templates, we can use `url` tags, such as:\n {% url 'staff-hours' %}\n {% url 'doc' 'staff/backend/backups' %}\n\n In Markdown, we use the following fake syntax to generate Django links:\n [[human readable name|staff-hours]]\n [[human readable name|doc staff/backend/backups]]\n\n You can link to fragments with a # at the very end:\n [[human readable name|staff-hours#something]]\n [[human readable name|doc staff/backend/backups#something]]\n \"\"\"\n\n split_words = re.compile('((?:\\S|\\\\\\\\ )+)')\n\n def enable_django_links(self):\n self.rules.django_link = re.compile(\n '^\\[\\[(?!\\!)'\n '([\\s\\S]+?)'\n '\\|'\n '([^#]+?)'\n '(?:#(.*?))?'\n '\\]\\]'\n )\n self.default_rules.insert(0, 'django_link')\n\n def output_django_link(self, m):\n text, target, fragment = m.group(1), m.group(2), m.group(3)\n\n def href(link, fragment):\n if fragment:\n return link + '#' + fragment\n return link\n\n words = DjangoLinkInlineLexerMixin.split_words.findall(target)\n name, *params = words\n return self.renderer.link(\n link=href(reverse(name, args=params), fragment),\n title=None,\n text=text,\n )\n\n\nclass HeaderRendererMixin:\n \"\"\"Mixin to render headers with auto-generated IDs (or provided IDs).\n\n If headers are written as usual, they'll be given automatically-generated\n IDs based on their header level and text.\n\n Headers can also be specified with an ID at the end wrapped in curly braces:\n\n ### My Header {my_id}\n\n This ID will be used directly without further manipulation, and can be\n relied on for linking.\n\n Custom IDs can consist only of lowercase a-z, 0-9, dash, and underscore.\n\n IDs are tracked into a table of contents which should be reset before\n rendering a document and read afterwards.\n \"\"\"\n\n def reset_toc(self):\n self.toc = []\n self.toc_ids = set()\n\n def get_toc(self):\n return self.toc\n\n def header(self, text, level, raw=None):\n custom_id_match = re.match(r'^(.*?)\\s+{([a-z0-9\\-_]+)}\\s*$', text)\n if custom_id_match:\n text = custom_id_match.group(1)\n id = custom_id_match.group(2)\n\n if id in self.toc_ids:\n raise ValueError('Duplicate header ID in Markdown: \"{}\"'.format(id))\n else:\n id = 'h{level}_{title}'.format(\n level=level,\n title=re.sub('[^a-z0-9\\-_ ]', '', text.lower()).strip().replace(' ', '-'),\n )\n\n # dumb collision avoidance\n while id in self.toc_ids:\n id += '_'\n\n self.toc.append((level, text, id))\n self.toc_ids.add(id)\n return '<h{level} id=\"{id}\">{text} <a class=\"\" href=\"#{id}\"><span></span></a></h{level}>\\n'.format(\n level=level,\n id=id,\n text=text,\n )\n\n\nclass OcfMarkdownRenderer(\n HeaderRendererMixin,\n mistune.Renderer,\n):\n pass\n\n\nclass OcfMarkdownInlineLexer(\n mistune.InlineLexer,\n DjangoLinkInlineLexerMixin,\n HtmlCommentsInlineLexerMixin,\n):\n pass\n\n\nclass OcfMarkdownBlockLexer(\n mistune.BlockLexer,\n HtmlCommentsBlockLexerMixin,\n):\n pass\n\n\n_renderer = OcfMarkdownRenderer(\n escape=True,\n hard_wrap=False,\n)\n\n_inline = OcfMarkdownInlineLexer(_renderer)\n_inline.enable_html_comments()\n_inline.enable_django_links()\n\n_block = OcfMarkdownBlockLexer(mistune.BlockGrammar())\n_block.enable_html_comments()\n\n_markdown = mistune.Markdown(\n renderer=_renderer,\n inline=_inline,\n block=_block,\n)\n\n\ndef markdown(text):\n _renderer.reset_toc()\n return _markdown(text)\n\n\ndef text_and_meta(f):\n \"\"\"Return tuple (text, meta dict) for the given file.\n\n Meta tags are stripped from the Markdown source, but the Markdown is\n not rendered.\n \"\"\"\n text = f.read()\n meta = {}\n\n def repl(match):\n meta[match.group(1)] = match.group(2)\n return ''\n\n text = META_REGEX.sub(repl, text)\n return text, meta\n\n\n@lru_cache()\ndef markdown_and_toc(text):\n \"\"\"Return tuple (html, toc) for the given text.\"\"\"\n html = markdown(text)\n return html, _renderer.get_toc()\n", "path": "ocfweb/component/markdown.py"}], "after_files": [{"content": "import re\n\nimport mistune\nfrom django.core.urlresolvers import reverse\n\nfrom ocfweb.caching import lru_cache\n\n# tags of a format like: [[!meta title=\"Backups\"]]\nMETA_REGEX = re.compile('\\[\\[!meta ([a-z]+)=\"([^\"]*)\"\\]\\]')\n\n\nclass HtmlCommentsInlineLexerMixin:\n \"\"\"Strip HTML comments inside lines.\"\"\"\n\n def enable_html_comments(self):\n self.rules.html_comment = re.compile(\n '^<!--(.*?)-->'\n )\n self.default_rules.insert(0, 'html_comment')\n\n def output_html_comment(self, m):\n return ''\n\n\nclass HtmlCommentsBlockLexerMixin:\n \"\"\"Strip blocks which consist entirely of HTML comments.\"\"\"\n\n def enable_html_comments(self):\n self.rules.html_comment = re.compile(\n '^<!--(.*?)-->'\n )\n self.default_rules.insert(0, 'html_comment')\n\n def parse_html_comment(self, m):\n pass\n\n\nclass DjangoLinkInlineLexerMixin:\n \"\"\"Turn special Markdown link syntax into Django links.\n\n In Django templates, we can use `url` tags, such as:\n {% url 'staff-hours' %}\n {% url 'doc' 'staff/backend/backups' %}\n\n In Markdown, we use the following fake syntax to generate Django links:\n [[human readable name|staff-hours]]\n [[human readable name|doc staff/backend/backups]]\n\n You can link to fragments with a # at the very end:\n [[human readable name|staff-hours#something]]\n [[human readable name|doc staff/backend/backups#something]]\n \"\"\"\n\n split_words = re.compile('((?:\\S|\\\\\\\\ )+)')\n\n def enable_django_links(self):\n self.rules.django_link = re.compile(\n '^\\[\\[(?!\\!)'\n '([\\s\\S]+?)'\n '\\|'\n '([^#]+?)'\n '(?:#(.*?))?'\n '\\]\\]'\n )\n self.default_rules.insert(0, 'django_link')\n\n def output_django_link(self, m):\n text, target, fragment = m.group(1), m.group(2), m.group(3)\n\n def href(link, fragment):\n if fragment:\n return link + '#' + fragment\n return link\n\n words = DjangoLinkInlineLexerMixin.split_words.findall(target)\n name, *params = words\n return self.renderer.link(\n link=href(reverse(name, args=params), fragment),\n title=None,\n text=text,\n )\n\n\nclass HeaderRendererMixin:\n \"\"\"Mixin to render headers with auto-generated IDs (or provided IDs).\n\n If headers are written as usual, they'll be given automatically-generated\n IDs based on their header level and text.\n\n Headers can also be specified with an ID at the end wrapped in curly braces:\n\n ### My Header {my_id}\n\n This ID will be used directly without further manipulation, and can be\n relied on for linking.\n\n Custom IDs can consist only of lowercase a-z, 0-9, dash, and underscore.\n\n IDs are tracked into a table of contents which should be reset before\n rendering a document and read afterwards.\n \"\"\"\n\n def reset_toc(self):\n self.toc = []\n self.toc_ids = set()\n\n def get_toc(self):\n return self.toc\n\n def header(self, text, level, raw=None):\n custom_id_match = re.match(r'^(.*?)\\s+{([a-z0-9\\-_]+)}\\s*$', text)\n if custom_id_match:\n text = custom_id_match.group(1)\n id = custom_id_match.group(2)\n\n if id in self.toc_ids:\n raise ValueError('Duplicate header ID in Markdown: \"{}\"'.format(id))\n else:\n id = 'h{level}_{title}'.format(\n level=level,\n title=re.sub('[^a-z0-9\\-_ ]', '', text.lower()).strip().replace(' ', '-'),\n )\n\n # dumb collision avoidance\n while id in self.toc_ids:\n id += '_'\n\n self.toc.append((level, text, id))\n self.toc_ids.add(id)\n return '<h{level} id=\"{id}\">{text} <a class=\"anchor\" href=\"#{id}\"><span></span></a></h{level}>\\n'.format(\n level=level,\n id=id,\n text=text,\n )\n\n\nclass OcfMarkdownRenderer(\n HeaderRendererMixin,\n mistune.Renderer,\n):\n pass\n\n\nclass OcfMarkdownInlineLexer(\n mistune.InlineLexer,\n DjangoLinkInlineLexerMixin,\n HtmlCommentsInlineLexerMixin,\n):\n pass\n\n\nclass OcfMarkdownBlockLexer(\n mistune.BlockLexer,\n HtmlCommentsBlockLexerMixin,\n):\n pass\n\n\n_renderer = OcfMarkdownRenderer(\n escape=True,\n hard_wrap=False,\n)\n\n_inline = OcfMarkdownInlineLexer(_renderer)\n_inline.enable_html_comments()\n_inline.enable_django_links()\n\n_block = OcfMarkdownBlockLexer(mistune.BlockGrammar())\n_block.enable_html_comments()\n\n_markdown = mistune.Markdown(\n renderer=_renderer,\n inline=_inline,\n block=_block,\n)\n\n\ndef markdown(text):\n _renderer.reset_toc()\n return _markdown(text)\n\n\ndef text_and_meta(f):\n \"\"\"Return tuple (text, meta dict) for the given file.\n\n Meta tags are stripped from the Markdown source, but the Markdown is\n not rendered.\n \"\"\"\n text = f.read()\n meta = {}\n\n def repl(match):\n meta[match.group(1)] = match.group(2)\n return ''\n\n text = META_REGEX.sub(repl, text)\n return text, meta\n\n\n@lru_cache()\ndef markdown_and_toc(text):\n \"\"\"Return tuple (html, toc) for the given text.\"\"\"\n html = markdown(text)\n return html, _renderer.get_toc()\n", "path": "ocfweb/component/markdown.py"}]} | 2,142 | 174 |
gh_patches_debug_28036 | rasdani/github-patches | git_diff | open-mmlab__mmdetection-3112 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
mabe a bug in version 2.1.0
I upgrade mmdetection from 2.0.0 to 2.1.0, but I can't train with this error:ValueError: need at least one array to concatenate.
So I use version 2.0.0 and all work well, so I think maybe there is a bug in version 2.1.0
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mmdet/datasets/cityscapes.py`
Content:
```
1 # Modified from https://github.com/facebookresearch/detectron2/blob/master/detectron2/data/datasets/cityscapes.py # noqa
2 # and https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/evaluation/evalInstanceLevelSemanticLabeling.py # noqa
3
4 import glob
5 import os
6 import os.path as osp
7 import tempfile
8
9 import mmcv
10 import numpy as np
11 import pycocotools.mask as maskUtils
12 from mmcv.utils import print_log
13
14 from .builder import DATASETS
15 from .coco import CocoDataset
16
17
18 @DATASETS.register_module()
19 class CityscapesDataset(CocoDataset):
20
21 CLASSES = ('person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle',
22 'bicycle')
23
24 def _filter_imgs(self, min_size=32):
25 """Filter images too small or without ground truths."""
26 valid_inds = []
27 ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values())
28 for i, img_info in enumerate(self.data_infos):
29 img_id = img_info['id']
30 ann_ids = self.coco.getAnnIds(imgIds=[img_id])
31 ann_info = self.coco.loadAnns(ann_ids)
32 all_iscrowd = all([_['iscrowd'] for _ in ann_info])
33 if self.filter_empty_gt and (self.img_ids[i] not in ids_with_ann
34 or all_iscrowd):
35 continue
36 if min(img_info['width'], img_info['height']) >= min_size:
37 valid_inds.append(i)
38 return valid_inds
39
40 def _parse_ann_info(self, img_info, ann_info):
41 """Parse bbox and mask annotation.
42
43 Args:
44 img_info (dict): Image info of an image.
45 ann_info (list[dict]): Annotation info of an image.
46
47 Returns:
48 dict: A dict containing the following keys: bboxes, bboxes_ignore,
49 labels, masks, seg_map.
50 "masks" are already decoded into binary masks.
51 """
52 gt_bboxes = []
53 gt_labels = []
54 gt_bboxes_ignore = []
55 gt_masks_ann = []
56
57 for i, ann in enumerate(ann_info):
58 if ann.get('ignore', False):
59 continue
60 x1, y1, w, h = ann['bbox']
61 if ann['area'] <= 0 or w < 1 or h < 1:
62 continue
63 if ann['category_id'] not in self.cat_ids:
64 continue
65 bbox = [x1, y1, x1 + w, y1 + h]
66 if ann.get('iscrowd', False):
67 gt_bboxes_ignore.append(bbox)
68 else:
69 gt_bboxes.append(bbox)
70 gt_labels.append(self.cat2label[ann['category_id']])
71 gt_masks_ann.append(ann['segmentation'])
72
73 if gt_bboxes:
74 gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
75 gt_labels = np.array(gt_labels, dtype=np.int64)
76 else:
77 gt_bboxes = np.zeros((0, 4), dtype=np.float32)
78 gt_labels = np.array([], dtype=np.int64)
79
80 if gt_bboxes_ignore:
81 gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)
82 else:
83 gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
84
85 ann = dict(
86 bboxes=gt_bboxes,
87 labels=gt_labels,
88 bboxes_ignore=gt_bboxes_ignore,
89 masks=gt_masks_ann,
90 seg_map=img_info['segm_file'])
91
92 return ann
93
94 def results2txt(self, results, outfile_prefix):
95 """Dump the detection results to a txt file.
96
97 Args:
98 results (list[list | tuple | ndarray]): Testing results of the
99 dataset.
100 outfile_prefix (str): The filename prefix of the json files.
101 If the prefix is "somepath/xxx",
102 the txt files will be named "somepath/xxx.txt".
103
104 Returns:
105 list[str: str]: result txt files which contains corresponding
106 instance segmentation images.
107 """
108 try:
109 import cityscapesscripts.helpers.labels as CSLabels
110 except ImportError:
111 raise ImportError('Please run "pip install citscapesscripts" to '
112 'install cityscapesscripts first.')
113 result_files = []
114 os.makedirs(outfile_prefix, exist_ok=True)
115 prog_bar = mmcv.ProgressBar(len(self))
116 for idx in range(len(self)):
117 result = results[idx]
118 filename = self.data_infos[idx]['filename']
119 basename = osp.splitext(osp.basename(filename))[0]
120 pred_txt = osp.join(outfile_prefix, basename + '_pred.txt')
121
122 bbox_result, segm_result = result
123 bboxes = np.vstack(bbox_result)
124 segms = mmcv.concat_list(segm_result)
125 labels = [
126 np.full(bbox.shape[0], i, dtype=np.int32)
127 for i, bbox in enumerate(bbox_result)
128 ]
129 labels = np.concatenate(labels)
130
131 assert len(bboxes) == len(segms) == len(labels)
132 num_instances = len(bboxes)
133 prog_bar.update()
134 with open(pred_txt, 'w') as fout:
135 for i in range(num_instances):
136 pred_class = labels[i]
137 classes = self.CLASSES[pred_class]
138 class_id = CSLabels.name2label[classes].id
139 score = bboxes[i, -1]
140 mask = maskUtils.decode(segms[i]).astype(np.uint8)
141 png_filename = osp.join(outfile_prefix,
142 basename + f'_{i}_{classes}.png')
143 mmcv.imwrite(mask, png_filename)
144 fout.write(f'{osp.basename(png_filename)} {class_id} '
145 f'{score}\n')
146 result_files.append(pred_txt)
147
148 return result_files
149
150 def format_results(self, results, txtfile_prefix=None):
151 """Format the results to txt (standard format for Cityscapes evaluation).
152
153 Args:
154 results (list): Testing results of the dataset.
155 txtfile_prefix (str | None): The prefix of txt files. It includes
156 the file path and the prefix of filename, e.g., "a/b/prefix".
157 If not specified, a temp file will be created. Default: None.
158
159 Returns:
160 tuple: (result_files, tmp_dir), result_files is a dict containing
161 the json filepaths, tmp_dir is the temporal directory created
162 for saving txt/png files when txtfile_prefix is not specified.
163 """
164 assert isinstance(results, list), 'results must be a list'
165 assert len(results) == len(self), (
166 'The length of results is not equal to the dataset len: {} != {}'.
167 format(len(results), len(self)))
168
169 assert isinstance(results, list), 'results must be a list'
170 assert len(results) == len(self), (
171 'The length of results is not equal to the dataset len: {} != {}'.
172 format(len(results), len(self)))
173
174 if txtfile_prefix is None:
175 tmp_dir = tempfile.TemporaryDirectory()
176 txtfile_prefix = osp.join(tmp_dir.name, 'results')
177 else:
178 tmp_dir = None
179 result_files = self.results2txt(results, txtfile_prefix)
180
181 return result_files, tmp_dir
182
183 def evaluate(self,
184 results,
185 metric='bbox',
186 logger=None,
187 outfile_prefix=None,
188 classwise=False,
189 proposal_nums=(100, 300, 1000),
190 iou_thrs=np.arange(0.5, 0.96, 0.05)):
191 """Evaluation in Cityscapes protocol.
192
193 Args:
194 results (list): Testing results of the dataset.
195 metric (str | list[str]): Metrics to be evaluated.
196 logger (logging.Logger | str | None): Logger used for printing
197 related information during evaluation. Default: None.
198 outfile_prefix (str | None):
199 classwise (bool): Whether to evaluating the AP for each class.
200 proposal_nums (Sequence[int]): Proposal number used for evaluating
201 recalls, such as recall@100, recall@1000.
202 Default: (100, 300, 1000).
203 iou_thrs (Sequence[float]): IoU threshold used for evaluating
204 recalls. If set to a list, the average recall of all IoUs will
205 also be computed. Default: 0.5.
206
207 Returns:
208 dict[str: float]
209 """
210 eval_results = dict()
211
212 metrics = metric.copy() if isinstance(metric, list) else [metric]
213
214 if 'cityscapes' in metrics:
215 eval_results.update(
216 self._evaluate_cityscapes(results, outfile_prefix, logger))
217 metrics.remove('cityscapes')
218
219 # left metrics are all coco metric
220 if len(metrics) > 0:
221 # create CocoDataset with CityscapesDataset annotation
222 self_coco = CocoDataset(self.ann_file, self.pipeline.transforms,
223 None, self.data_root, self.img_prefix,
224 self.seg_prefix, self.proposal_file,
225 self.test_mode, self.filter_empty_gt)
226 # TODO: remove this in the future
227 # reload annotations of correct class
228 self_coco.CLASSES = self.CLASSES
229 self_coco.data_infos = self_coco.load_annotations(self.ann_file)
230 eval_results.update(
231 self_coco.evaluate(results, metrics, logger, outfile_prefix,
232 classwise, proposal_nums, iou_thrs))
233
234 return eval_results
235
236 def _evaluate_cityscapes(self, results, txtfile_prefix, logger):
237 try:
238 import cityscapesscripts.evaluation.evalInstanceLevelSemanticLabeling as CSEval # noqa
239 except ImportError:
240 raise ImportError('Please run "pip install citscapesscripts" to '
241 'install cityscapesscripts first.')
242 msg = 'Evaluating in Cityscapes style'
243 if logger is None:
244 msg = '\n' + msg
245 print_log(msg, logger=logger)
246
247 result_files, tmp_dir = self.format_results(results, txtfile_prefix)
248
249 if tmp_dir is None:
250 result_dir = osp.join(txtfile_prefix, 'results')
251 else:
252 result_dir = osp.join(tmp_dir.name, 'results')
253
254 eval_results = {}
255 print_log(f'Evaluating results under {result_dir} ...', logger=logger)
256
257 # set global states in cityscapes evaluation API
258 CSEval.args.cityscapesPath = os.path.join(self.img_prefix, '../..')
259 CSEval.args.predictionPath = os.path.abspath(result_dir)
260 CSEval.args.predictionWalk = None
261 CSEval.args.JSONOutput = False
262 CSEval.args.colorized = False
263 CSEval.args.gtInstancesFile = os.path.join(result_dir,
264 'gtInstances.json')
265 CSEval.args.groundTruthSearch = os.path.join(
266 self.img_prefix.replace('leftImg8bit', 'gtFine'),
267 '*/*_gtFine_instanceIds.png')
268
269 groundTruthImgList = glob.glob(CSEval.args.groundTruthSearch)
270 assert len(groundTruthImgList), 'Cannot find ground truth images' \
271 f' in {CSEval.args.groundTruthSearch}.'
272 predictionImgList = []
273 for gt in groundTruthImgList:
274 predictionImgList.append(CSEval.getPrediction(gt, CSEval.args))
275 CSEval_results = CSEval.evaluateImgLists(predictionImgList,
276 groundTruthImgList,
277 CSEval.args)['averages']
278
279 eval_results['mAP'] = CSEval_results['allAp']
280 eval_results['AP@50'] = CSEval_results['allAp50%']
281 if tmp_dir is not None:
282 tmp_dir.cleanup()
283 return eval_results
284
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mmdet/datasets/cityscapes.py b/mmdet/datasets/cityscapes.py
--- a/mmdet/datasets/cityscapes.py
+++ b/mmdet/datasets/cityscapes.py
@@ -121,7 +121,17 @@
bbox_result, segm_result = result
bboxes = np.vstack(bbox_result)
- segms = mmcv.concat_list(segm_result)
+ # segm results
+ if isinstance(segm_result, tuple):
+ # Some detectors use different scores for bbox and mask,
+ # like Mask Scoring R-CNN. Score of segm will be used instead
+ # of bbox score.
+ segms = mmcv.concat_list(segm_result[0])
+ mask_score = segm_result[1]
+ else:
+ # use bbox score for mask score
+ segms = mmcv.concat_list(segm_result)
+ mask_score = [bbox[-1] for bbox in bboxes]
labels = [
np.full(bbox.shape[0], i, dtype=np.int32)
for i, bbox in enumerate(bbox_result)
@@ -136,7 +146,7 @@
pred_class = labels[i]
classes = self.CLASSES[pred_class]
class_id = CSLabels.name2label[classes].id
- score = bboxes[i, -1]
+ score = mask_score[i]
mask = maskUtils.decode(segms[i]).astype(np.uint8)
png_filename = osp.join(outfile_prefix,
basename + f'_{i}_{classes}.png')
| {"golden_diff": "diff --git a/mmdet/datasets/cityscapes.py b/mmdet/datasets/cityscapes.py\n--- a/mmdet/datasets/cityscapes.py\n+++ b/mmdet/datasets/cityscapes.py\n@@ -121,7 +121,17 @@\n \n bbox_result, segm_result = result\n bboxes = np.vstack(bbox_result)\n- segms = mmcv.concat_list(segm_result)\n+ # segm results\n+ if isinstance(segm_result, tuple):\n+ # Some detectors use different scores for bbox and mask,\n+ # like Mask Scoring R-CNN. Score of segm will be used instead\n+ # of bbox score.\n+ segms = mmcv.concat_list(segm_result[0])\n+ mask_score = segm_result[1]\n+ else:\n+ # use bbox score for mask score\n+ segms = mmcv.concat_list(segm_result)\n+ mask_score = [bbox[-1] for bbox in bboxes]\n labels = [\n np.full(bbox.shape[0], i, dtype=np.int32)\n for i, bbox in enumerate(bbox_result)\n@@ -136,7 +146,7 @@\n pred_class = labels[i]\n classes = self.CLASSES[pred_class]\n class_id = CSLabels.name2label[classes].id\n- score = bboxes[i, -1]\n+ score = mask_score[i]\n mask = maskUtils.decode(segms[i]).astype(np.uint8)\n png_filename = osp.join(outfile_prefix,\n basename + f'_{i}_{classes}.png')\n", "issue": "mabe a bug in version 2.1.0\nI upgrade mmdetection from 2.0.0 to 2.1.0, but I can't train with this error:ValueError: need at least one array to concatenate.\r\nSo I use version 2.0.0 and all work well, so I think maybe there is a bug in version 2.1.0\n", "before_files": [{"content": "# Modified from https://github.com/facebookresearch/detectron2/blob/master/detectron2/data/datasets/cityscapes.py # noqa\n# and https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/evaluation/evalInstanceLevelSemanticLabeling.py # noqa\n\nimport glob\nimport os\nimport os.path as osp\nimport tempfile\n\nimport mmcv\nimport numpy as np\nimport pycocotools.mask as maskUtils\nfrom mmcv.utils import print_log\n\nfrom .builder import DATASETS\nfrom .coco import CocoDataset\n\n\[email protected]_module()\nclass CityscapesDataset(CocoDataset):\n\n CLASSES = ('person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle',\n 'bicycle')\n\n def _filter_imgs(self, min_size=32):\n \"\"\"Filter images too small or without ground truths.\"\"\"\n valid_inds = []\n ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values())\n for i, img_info in enumerate(self.data_infos):\n img_id = img_info['id']\n ann_ids = self.coco.getAnnIds(imgIds=[img_id])\n ann_info = self.coco.loadAnns(ann_ids)\n all_iscrowd = all([_['iscrowd'] for _ in ann_info])\n if self.filter_empty_gt and (self.img_ids[i] not in ids_with_ann\n or all_iscrowd):\n continue\n if min(img_info['width'], img_info['height']) >= min_size:\n valid_inds.append(i)\n return valid_inds\n\n def _parse_ann_info(self, img_info, ann_info):\n \"\"\"Parse bbox and mask annotation.\n\n Args:\n img_info (dict): Image info of an image.\n ann_info (list[dict]): Annotation info of an image.\n\n Returns:\n dict: A dict containing the following keys: bboxes, bboxes_ignore,\n labels, masks, seg_map.\n \"masks\" are already decoded into binary masks.\n \"\"\"\n gt_bboxes = []\n gt_labels = []\n gt_bboxes_ignore = []\n gt_masks_ann = []\n\n for i, ann in enumerate(ann_info):\n if ann.get('ignore', False):\n continue\n x1, y1, w, h = ann['bbox']\n if ann['area'] <= 0 or w < 1 or h < 1:\n continue\n if ann['category_id'] not in self.cat_ids:\n continue\n bbox = [x1, y1, x1 + w, y1 + h]\n if ann.get('iscrowd', False):\n gt_bboxes_ignore.append(bbox)\n else:\n gt_bboxes.append(bbox)\n gt_labels.append(self.cat2label[ann['category_id']])\n gt_masks_ann.append(ann['segmentation'])\n\n if gt_bboxes:\n gt_bboxes = np.array(gt_bboxes, dtype=np.float32)\n gt_labels = np.array(gt_labels, dtype=np.int64)\n else:\n gt_bboxes = np.zeros((0, 4), dtype=np.float32)\n gt_labels = np.array([], dtype=np.int64)\n\n if gt_bboxes_ignore:\n gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)\n else:\n gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)\n\n ann = dict(\n bboxes=gt_bboxes,\n labels=gt_labels,\n bboxes_ignore=gt_bboxes_ignore,\n masks=gt_masks_ann,\n seg_map=img_info['segm_file'])\n\n return ann\n\n def results2txt(self, results, outfile_prefix):\n \"\"\"Dump the detection results to a txt file.\n\n Args:\n results (list[list | tuple | ndarray]): Testing results of the\n dataset.\n outfile_prefix (str): The filename prefix of the json files.\n If the prefix is \"somepath/xxx\",\n the txt files will be named \"somepath/xxx.txt\".\n\n Returns:\n list[str: str]: result txt files which contains corresponding\n instance segmentation images.\n \"\"\"\n try:\n import cityscapesscripts.helpers.labels as CSLabels\n except ImportError:\n raise ImportError('Please run \"pip install citscapesscripts\" to '\n 'install cityscapesscripts first.')\n result_files = []\n os.makedirs(outfile_prefix, exist_ok=True)\n prog_bar = mmcv.ProgressBar(len(self))\n for idx in range(len(self)):\n result = results[idx]\n filename = self.data_infos[idx]['filename']\n basename = osp.splitext(osp.basename(filename))[0]\n pred_txt = osp.join(outfile_prefix, basename + '_pred.txt')\n\n bbox_result, segm_result = result\n bboxes = np.vstack(bbox_result)\n segms = mmcv.concat_list(segm_result)\n labels = [\n np.full(bbox.shape[0], i, dtype=np.int32)\n for i, bbox in enumerate(bbox_result)\n ]\n labels = np.concatenate(labels)\n\n assert len(bboxes) == len(segms) == len(labels)\n num_instances = len(bboxes)\n prog_bar.update()\n with open(pred_txt, 'w') as fout:\n for i in range(num_instances):\n pred_class = labels[i]\n classes = self.CLASSES[pred_class]\n class_id = CSLabels.name2label[classes].id\n score = bboxes[i, -1]\n mask = maskUtils.decode(segms[i]).astype(np.uint8)\n png_filename = osp.join(outfile_prefix,\n basename + f'_{i}_{classes}.png')\n mmcv.imwrite(mask, png_filename)\n fout.write(f'{osp.basename(png_filename)} {class_id} '\n f'{score}\\n')\n result_files.append(pred_txt)\n\n return result_files\n\n def format_results(self, results, txtfile_prefix=None):\n \"\"\"Format the results to txt (standard format for Cityscapes evaluation).\n\n Args:\n results (list): Testing results of the dataset.\n txtfile_prefix (str | None): The prefix of txt files. It includes\n the file path and the prefix of filename, e.g., \"a/b/prefix\".\n If not specified, a temp file will be created. Default: None.\n\n Returns:\n tuple: (result_files, tmp_dir), result_files is a dict containing\n the json filepaths, tmp_dir is the temporal directory created\n for saving txt/png files when txtfile_prefix is not specified.\n \"\"\"\n assert isinstance(results, list), 'results must be a list'\n assert len(results) == len(self), (\n 'The length of results is not equal to the dataset len: {} != {}'.\n format(len(results), len(self)))\n\n assert isinstance(results, list), 'results must be a list'\n assert len(results) == len(self), (\n 'The length of results is not equal to the dataset len: {} != {}'.\n format(len(results), len(self)))\n\n if txtfile_prefix is None:\n tmp_dir = tempfile.TemporaryDirectory()\n txtfile_prefix = osp.join(tmp_dir.name, 'results')\n else:\n tmp_dir = None\n result_files = self.results2txt(results, txtfile_prefix)\n\n return result_files, tmp_dir\n\n def evaluate(self,\n results,\n metric='bbox',\n logger=None,\n outfile_prefix=None,\n classwise=False,\n proposal_nums=(100, 300, 1000),\n iou_thrs=np.arange(0.5, 0.96, 0.05)):\n \"\"\"Evaluation in Cityscapes protocol.\n\n Args:\n results (list): Testing results of the dataset.\n metric (str | list[str]): Metrics to be evaluated.\n logger (logging.Logger | str | None): Logger used for printing\n related information during evaluation. Default: None.\n outfile_prefix (str | None):\n classwise (bool): Whether to evaluating the AP for each class.\n proposal_nums (Sequence[int]): Proposal number used for evaluating\n recalls, such as recall@100, recall@1000.\n Default: (100, 300, 1000).\n iou_thrs (Sequence[float]): IoU threshold used for evaluating\n recalls. If set to a list, the average recall of all IoUs will\n also be computed. Default: 0.5.\n\n Returns:\n dict[str: float]\n \"\"\"\n eval_results = dict()\n\n metrics = metric.copy() if isinstance(metric, list) else [metric]\n\n if 'cityscapes' in metrics:\n eval_results.update(\n self._evaluate_cityscapes(results, outfile_prefix, logger))\n metrics.remove('cityscapes')\n\n # left metrics are all coco metric\n if len(metrics) > 0:\n # create CocoDataset with CityscapesDataset annotation\n self_coco = CocoDataset(self.ann_file, self.pipeline.transforms,\n None, self.data_root, self.img_prefix,\n self.seg_prefix, self.proposal_file,\n self.test_mode, self.filter_empty_gt)\n # TODO: remove this in the future\n # reload annotations of correct class\n self_coco.CLASSES = self.CLASSES\n self_coco.data_infos = self_coco.load_annotations(self.ann_file)\n eval_results.update(\n self_coco.evaluate(results, metrics, logger, outfile_prefix,\n classwise, proposal_nums, iou_thrs))\n\n return eval_results\n\n def _evaluate_cityscapes(self, results, txtfile_prefix, logger):\n try:\n import cityscapesscripts.evaluation.evalInstanceLevelSemanticLabeling as CSEval # noqa\n except ImportError:\n raise ImportError('Please run \"pip install citscapesscripts\" to '\n 'install cityscapesscripts first.')\n msg = 'Evaluating in Cityscapes style'\n if logger is None:\n msg = '\\n' + msg\n print_log(msg, logger=logger)\n\n result_files, tmp_dir = self.format_results(results, txtfile_prefix)\n\n if tmp_dir is None:\n result_dir = osp.join(txtfile_prefix, 'results')\n else:\n result_dir = osp.join(tmp_dir.name, 'results')\n\n eval_results = {}\n print_log(f'Evaluating results under {result_dir} ...', logger=logger)\n\n # set global states in cityscapes evaluation API\n CSEval.args.cityscapesPath = os.path.join(self.img_prefix, '../..')\n CSEval.args.predictionPath = os.path.abspath(result_dir)\n CSEval.args.predictionWalk = None\n CSEval.args.JSONOutput = False\n CSEval.args.colorized = False\n CSEval.args.gtInstancesFile = os.path.join(result_dir,\n 'gtInstances.json')\n CSEval.args.groundTruthSearch = os.path.join(\n self.img_prefix.replace('leftImg8bit', 'gtFine'),\n '*/*_gtFine_instanceIds.png')\n\n groundTruthImgList = glob.glob(CSEval.args.groundTruthSearch)\n assert len(groundTruthImgList), 'Cannot find ground truth images' \\\n f' in {CSEval.args.groundTruthSearch}.'\n predictionImgList = []\n for gt in groundTruthImgList:\n predictionImgList.append(CSEval.getPrediction(gt, CSEval.args))\n CSEval_results = CSEval.evaluateImgLists(predictionImgList,\n groundTruthImgList,\n CSEval.args)['averages']\n\n eval_results['mAP'] = CSEval_results['allAp']\n eval_results['AP@50'] = CSEval_results['allAp50%']\n if tmp_dir is not None:\n tmp_dir.cleanup()\n return eval_results\n", "path": "mmdet/datasets/cityscapes.py"}], "after_files": [{"content": "# Modified from https://github.com/facebookresearch/detectron2/blob/master/detectron2/data/datasets/cityscapes.py # noqa\n# and https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/evaluation/evalInstanceLevelSemanticLabeling.py # noqa\n\nimport glob\nimport os\nimport os.path as osp\nimport tempfile\n\nimport mmcv\nimport numpy as np\nimport pycocotools.mask as maskUtils\nfrom mmcv.utils import print_log\n\nfrom .builder import DATASETS\nfrom .coco import CocoDataset\n\n\[email protected]_module()\nclass CityscapesDataset(CocoDataset):\n\n CLASSES = ('person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle',\n 'bicycle')\n\n def _filter_imgs(self, min_size=32):\n \"\"\"Filter images too small or without ground truths.\"\"\"\n valid_inds = []\n ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values())\n for i, img_info in enumerate(self.data_infos):\n img_id = img_info['id']\n ann_ids = self.coco.getAnnIds(imgIds=[img_id])\n ann_info = self.coco.loadAnns(ann_ids)\n all_iscrowd = all([_['iscrowd'] for _ in ann_info])\n if self.filter_empty_gt and (self.img_ids[i] not in ids_with_ann\n or all_iscrowd):\n continue\n if min(img_info['width'], img_info['height']) >= min_size:\n valid_inds.append(i)\n return valid_inds\n\n def _parse_ann_info(self, img_info, ann_info):\n \"\"\"Parse bbox and mask annotation.\n\n Args:\n img_info (dict): Image info of an image.\n ann_info (list[dict]): Annotation info of an image.\n\n Returns:\n dict: A dict containing the following keys: bboxes, bboxes_ignore,\n labels, masks, seg_map.\n \"masks\" are already decoded into binary masks.\n \"\"\"\n gt_bboxes = []\n gt_labels = []\n gt_bboxes_ignore = []\n gt_masks_ann = []\n\n for i, ann in enumerate(ann_info):\n if ann.get('ignore', False):\n continue\n x1, y1, w, h = ann['bbox']\n if ann['area'] <= 0 or w < 1 or h < 1:\n continue\n if ann['category_id'] not in self.cat_ids:\n continue\n bbox = [x1, y1, x1 + w, y1 + h]\n if ann.get('iscrowd', False):\n gt_bboxes_ignore.append(bbox)\n else:\n gt_bboxes.append(bbox)\n gt_labels.append(self.cat2label[ann['category_id']])\n gt_masks_ann.append(ann['segmentation'])\n\n if gt_bboxes:\n gt_bboxes = np.array(gt_bboxes, dtype=np.float32)\n gt_labels = np.array(gt_labels, dtype=np.int64)\n else:\n gt_bboxes = np.zeros((0, 4), dtype=np.float32)\n gt_labels = np.array([], dtype=np.int64)\n\n if gt_bboxes_ignore:\n gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)\n else:\n gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)\n\n ann = dict(\n bboxes=gt_bboxes,\n labels=gt_labels,\n bboxes_ignore=gt_bboxes_ignore,\n masks=gt_masks_ann,\n seg_map=img_info['segm_file'])\n\n return ann\n\n def results2txt(self, results, outfile_prefix):\n \"\"\"Dump the detection results to a txt file.\n\n Args:\n results (list[list | tuple | ndarray]): Testing results of the\n dataset.\n outfile_prefix (str): The filename prefix of the json files.\n If the prefix is \"somepath/xxx\",\n the txt files will be named \"somepath/xxx.txt\".\n\n Returns:\n list[str: str]: result txt files which contains corresponding\n instance segmentation images.\n \"\"\"\n try:\n import cityscapesscripts.helpers.labels as CSLabels\n except ImportError:\n raise ImportError('Please run \"pip install citscapesscripts\" to '\n 'install cityscapesscripts first.')\n result_files = []\n os.makedirs(outfile_prefix, exist_ok=True)\n prog_bar = mmcv.ProgressBar(len(self))\n for idx in range(len(self)):\n result = results[idx]\n filename = self.data_infos[idx]['filename']\n basename = osp.splitext(osp.basename(filename))[0]\n pred_txt = osp.join(outfile_prefix, basename + '_pred.txt')\n\n bbox_result, segm_result = result\n bboxes = np.vstack(bbox_result)\n # segm results\n if isinstance(segm_result, tuple):\n # Some detectors use different scores for bbox and mask,\n # like Mask Scoring R-CNN. Score of segm will be used instead\n # of bbox score.\n segms = mmcv.concat_list(segm_result[0])\n mask_score = segm_result[1]\n else:\n # use bbox score for mask score\n segms = mmcv.concat_list(segm_result)\n mask_score = [bbox[-1] for bbox in bboxes]\n labels = [\n np.full(bbox.shape[0], i, dtype=np.int32)\n for i, bbox in enumerate(bbox_result)\n ]\n labels = np.concatenate(labels)\n\n assert len(bboxes) == len(segms) == len(labels)\n num_instances = len(bboxes)\n prog_bar.update()\n with open(pred_txt, 'w') as fout:\n for i in range(num_instances):\n pred_class = labels[i]\n classes = self.CLASSES[pred_class]\n class_id = CSLabels.name2label[classes].id\n score = mask_score[i]\n mask = maskUtils.decode(segms[i]).astype(np.uint8)\n png_filename = osp.join(outfile_prefix,\n basename + f'_{i}_{classes}.png')\n mmcv.imwrite(mask, png_filename)\n fout.write(f'{osp.basename(png_filename)} {class_id} '\n f'{score}\\n')\n result_files.append(pred_txt)\n\n return result_files\n\n def format_results(self, results, txtfile_prefix=None):\n \"\"\"Format the results to txt (standard format for Cityscapes evaluation).\n\n Args:\n results (list): Testing results of the dataset.\n txtfile_prefix (str | None): The prefix of txt files. It includes\n the file path and the prefix of filename, e.g., \"a/b/prefix\".\n If not specified, a temp file will be created. Default: None.\n\n Returns:\n tuple: (result_files, tmp_dir), result_files is a dict containing\n the json filepaths, tmp_dir is the temporal directory created\n for saving txt/png files when txtfile_prefix is not specified.\n \"\"\"\n assert isinstance(results, list), 'results must be a list'\n assert len(results) == len(self), (\n 'The length of results is not equal to the dataset len: {} != {}'.\n format(len(results), len(self)))\n\n assert isinstance(results, list), 'results must be a list'\n assert len(results) == len(self), (\n 'The length of results is not equal to the dataset len: {} != {}'.\n format(len(results), len(self)))\n\n if txtfile_prefix is None:\n tmp_dir = tempfile.TemporaryDirectory()\n txtfile_prefix = osp.join(tmp_dir.name, 'results')\n else:\n tmp_dir = None\n result_files = self.results2txt(results, txtfile_prefix)\n\n return result_files, tmp_dir\n\n def evaluate(self,\n results,\n metric='bbox',\n logger=None,\n outfile_prefix=None,\n classwise=False,\n proposal_nums=(100, 300, 1000),\n iou_thrs=np.arange(0.5, 0.96, 0.05)):\n \"\"\"Evaluation in Cityscapes protocol.\n\n Args:\n results (list): Testing results of the dataset.\n metric (str | list[str]): Metrics to be evaluated.\n logger (logging.Logger | str | None): Logger used for printing\n related information during evaluation. Default: None.\n outfile_prefix (str | None):\n classwise (bool): Whether to evaluating the AP for each class.\n proposal_nums (Sequence[int]): Proposal number used for evaluating\n recalls, such as recall@100, recall@1000.\n Default: (100, 300, 1000).\n iou_thrs (Sequence[float]): IoU threshold used for evaluating\n recalls. If set to a list, the average recall of all IoUs will\n also be computed. Default: 0.5.\n\n Returns:\n dict[str: float]\n \"\"\"\n eval_results = dict()\n\n metrics = metric.copy() if isinstance(metric, list) else [metric]\n\n if 'cityscapes' in metrics:\n eval_results.update(\n self._evaluate_cityscapes(results, outfile_prefix, logger))\n metrics.remove('cityscapes')\n\n # left metrics are all coco metric\n if len(metrics) > 0:\n # create CocoDataset with CityscapesDataset annotation\n self_coco = CocoDataset(self.ann_file, self.pipeline.transforms,\n None, self.data_root, self.img_prefix,\n self.seg_prefix, self.proposal_file,\n self.test_mode, self.filter_empty_gt)\n # TODO: remove this in the future\n # reload annotations of correct class\n self_coco.CLASSES = self.CLASSES\n self_coco.data_infos = self_coco.load_annotations(self.ann_file)\n eval_results.update(\n self_coco.evaluate(results, metrics, logger, outfile_prefix,\n classwise, proposal_nums, iou_thrs))\n\n return eval_results\n\n def _evaluate_cityscapes(self, results, txtfile_prefix, logger):\n try:\n import cityscapesscripts.evaluation.evalInstanceLevelSemanticLabeling as CSEval # noqa\n except ImportError:\n raise ImportError('Please run \"pip install citscapesscripts\" to '\n 'install cityscapesscripts first.')\n msg = 'Evaluating in Cityscapes style'\n if logger is None:\n msg = '\\n' + msg\n print_log(msg, logger=logger)\n\n result_files, tmp_dir = self.format_results(results, txtfile_prefix)\n\n if tmp_dir is None:\n result_dir = osp.join(txtfile_prefix, 'results')\n else:\n result_dir = osp.join(tmp_dir.name, 'results')\n\n eval_results = {}\n print_log(f'Evaluating results under {result_dir} ...', logger=logger)\n\n # set global states in cityscapes evaluation API\n CSEval.args.cityscapesPath = os.path.join(self.img_prefix, '../..')\n CSEval.args.predictionPath = os.path.abspath(result_dir)\n CSEval.args.predictionWalk = None\n CSEval.args.JSONOutput = False\n CSEval.args.colorized = False\n CSEval.args.gtInstancesFile = os.path.join(result_dir,\n 'gtInstances.json')\n CSEval.args.groundTruthSearch = os.path.join(\n self.img_prefix.replace('leftImg8bit', 'gtFine'),\n '*/*_gtFine_instanceIds.png')\n\n groundTruthImgList = glob.glob(CSEval.args.groundTruthSearch)\n assert len(groundTruthImgList), 'Cannot find ground truth images' \\\n f' in {CSEval.args.groundTruthSearch}.'\n predictionImgList = []\n for gt in groundTruthImgList:\n predictionImgList.append(CSEval.getPrediction(gt, CSEval.args))\n CSEval_results = CSEval.evaluateImgLists(predictionImgList,\n groundTruthImgList,\n CSEval.args)['averages']\n\n eval_results['mAP'] = CSEval_results['allAp']\n eval_results['AP@50'] = CSEval_results['allAp50%']\n if tmp_dir is not None:\n tmp_dir.cleanup()\n return eval_results\n", "path": "mmdet/datasets/cityscapes.py"}]} | 3,675 | 356 |
gh_patches_debug_27904 | rasdani/github-patches | git_diff | wright-group__WrightTools-746 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
test kit.smooth_1D
write tests for `wt.kit.smooth_1D`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `WrightTools/kit/_array.py`
Content:
```
1 """Array interaction tools."""
2
3
4 # --- import --------------------------------------------------------------------------------------
5
6
7 import numpy as np
8
9 from .. import exceptions as wt_exceptions
10
11
12 # --- define --------------------------------------------------------------------------------------
13
14
15 __all__ = [
16 "closest_pair",
17 "diff",
18 "fft",
19 "joint_shape",
20 "orthogonal",
21 "remove_nans_1D",
22 "share_nans",
23 "smooth_1D",
24 "svd",
25 "unique",
26 "valid_index",
27 "mask_reduce",
28 "enforce_mask_shape",
29 ]
30
31
32 # --- functions -----------------------------------------------------------------------------------
33
34
35 def closest_pair(arr, give="indicies"):
36 """Find the pair of indices corresponding to the closest elements in an array.
37
38 If multiple pairs are equally close, both pairs of indicies are returned.
39 Optionally returns the closest distance itself.
40
41 I am sure that this could be written as a cheaper operation. I
42 wrote this as a quick and dirty method because I need it now to use on some
43 relatively small arrays. Feel free to refactor if you need this operation
44 done as fast as possible. - Blaise 2016-02-07
45
46 Parameters
47 ----------
48 arr : numpy.ndarray
49 The array to search.
50 give : {'indicies', 'distance'} (optional)
51 Toggle return behavior. If 'distance', returns a single float - the
52 closest distance itself. Default is indicies.
53
54 Returns
55 -------
56 list of lists of two tuples
57 List containing lists of two tuples: indicies the nearest pair in the
58 array.
59
60 >>> arr = np.array([0, 1, 2, 3, 3, 4, 5, 6, 1])
61 >>> closest_pair(arr)
62 [[(1,), (8,)], [(3,), (4,)]]
63
64 """
65 idxs = [idx for idx in np.ndindex(arr.shape)]
66 outs = []
67 min_dist = arr.max() - arr.min()
68 for idxa in idxs:
69 for idxb in idxs:
70 if idxa == idxb:
71 continue
72 dist = abs(arr[idxa] - arr[idxb])
73 if dist == min_dist:
74 if not [idxb, idxa] in outs:
75 outs.append([idxa, idxb])
76 elif dist < min_dist:
77 min_dist = dist
78 outs = [[idxa, idxb]]
79 if give == "indicies":
80 return outs
81 elif give == "distance":
82 return min_dist
83 else:
84 raise KeyError("give not recognized in closest_pair")
85
86
87 def diff(xi, yi, order=1) -> np.ndarray:
88 """Take the numerical derivative of a 1D array.
89
90 Output is mapped onto the original coordinates using linear interpolation.
91 Expects monotonic xi values.
92
93 Parameters
94 ----------
95 xi : 1D array-like
96 Coordinates.
97 yi : 1D array-like
98 Values.
99 order : positive integer (optional)
100 Order of differentiation.
101
102 Returns
103 -------
104 1D numpy array
105 Numerical derivative. Has the same shape as the input arrays.
106 """
107 yi = np.array(yi).copy()
108 flip = False
109 if xi[-1] < xi[0]:
110 xi = np.flipud(xi.copy())
111 yi = np.flipud(yi)
112 flip = True
113 midpoints = (xi[1:] + xi[:-1]) / 2
114 for _ in range(order):
115 d = np.diff(yi)
116 d /= np.diff(xi)
117 yi = np.interp(xi, midpoints, d)
118 if flip:
119 yi = np.flipud(yi)
120 return yi
121
122
123 def fft(xi, yi, axis=0) -> tuple:
124 """Take the 1D FFT of an N-dimensional array and return "sensible" properly shifted arrays.
125
126 Parameters
127 ----------
128 xi : numpy.ndarray
129 1D array over which the points to be FFT'ed are defined
130 yi : numpy.ndarray
131 ND array with values to FFT
132 axis : int
133 axis of yi to perform FFT over
134
135 Returns
136 -------
137 xi : 1D numpy.ndarray
138 1D array. Conjugate to input xi. Example: if input xi is in the time
139 domain, output xi is in frequency domain.
140 yi : ND numpy.ndarray
141 FFT. Has the same shape as the input array (yi).
142 """
143 # xi must be 1D
144 if xi.ndim != 1:
145 raise wt_exceptions.DimensionalityError(1, xi.ndim)
146 # xi must be evenly spaced
147 spacing = np.diff(xi)
148 if not np.allclose(spacing, spacing.mean()):
149 raise RuntimeError("WrightTools.kit.fft: argument xi must be evenly spaced")
150 # fft
151 yi = np.fft.fft(yi, axis=axis)
152 d = (xi.max() - xi.min()) / (xi.size - 1)
153 xi = np.fft.fftfreq(xi.size, d=d)
154 # shift
155 xi = np.fft.fftshift(xi)
156 yi = np.fft.fftshift(yi, axes=axis)
157 return xi, yi
158
159
160 def joint_shape(*args) -> tuple:
161 """Given a set of arrays, return the joint shape.
162
163 Parameters
164 ----------
165 args : array-likes
166
167 Returns
168 -------
169 tuple of int
170 Joint shape.
171 """
172 if len(args) == 0:
173 return ()
174 shape = []
175 shapes = [a.shape for a in args]
176 ndim = args[0].ndim
177 for i in range(ndim):
178 shape.append(max([s[i] for s in shapes]))
179 return tuple(shape)
180
181
182 def orthogonal(*args) -> bool:
183 """Determine if a set of arrays are orthogonal.
184
185 Parameters
186 ----------
187 args : array-likes or array shapes
188
189 Returns
190 -------
191 bool
192 Array orthogonality condition.
193 """
194 for i, arg in enumerate(args):
195 if hasattr(arg, "shape"):
196 args[i] = arg.shape
197 for s in zip(*args):
198 if np.product(s) != max(s):
199 return False
200 return True
201
202
203 def remove_nans_1D(*args) -> tuple:
204 """Remove nans in a set of 1D arrays.
205
206 Removes indicies in all arrays if any array is nan at that index.
207 All input arrays must have the same size.
208
209 Parameters
210 ----------
211 args : 1D arrays
212
213 Returns
214 -------
215 tuple
216 Tuple of 1D arrays in same order as given, with nan indicies removed.
217 """
218 vals = np.isnan(args[0])
219 for a in args:
220 vals |= np.isnan(a)
221 return tuple(np.array(a)[~vals] for a in args)
222
223
224 def share_nans(*arrs) -> tuple:
225 """Take a list of nD arrays and return a new list of nD arrays.
226
227 The new list is in the same order as the old list.
228 If one indexed element in an old array is nan then every element for that
229 index in all new arrays in the list is then nan.
230
231 Parameters
232 ----------
233 *arrs : nD arrays.
234
235 Returns
236 -------
237 list
238 List of nD arrays in same order as given, with nan indicies syncronized.
239 """
240 nans = np.zeros(joint_shape(*arrs))
241 for arr in arrs:
242 nans *= arr
243 return tuple([a + nans for a in arrs])
244
245
246 def smooth_1D(arr, n=10) -> np.ndarray:
247 """Smooth 1D data by 'running average'.
248
249 Parameters
250 ----------
251 n : int
252 number of points to average
253 """
254 for i in range(n, len(arr) - n):
255 window = arr[i - n : i + n].copy()
256 arr[i] = window.mean()
257 return arr
258
259
260 def svd(a, i=None) -> tuple:
261 """Singular Value Decomposition.
262
263 Factors the matrix `a` as ``u * np.diag(s) * v``, where `u` and `v`
264 are unitary and `s` is a 1D array of `a`'s singular values.
265
266 Parameters
267 ----------
268 a : array_like
269 Input array.
270 i : int or slice (optional)
271 What singular value "slice" to return.
272 Default is None which returns unitary 2D arrays.
273
274 Returns
275 -------
276 tuple
277 Decomposed arrays in order `u`, `v`, `s`
278 """
279 u, s, v = np.linalg.svd(a, full_matrices=False, compute_uv=True)
280 u = u.T
281 if i is None:
282 return u, v, s
283 else:
284 return u[i], v[i], s[i]
285
286
287 def unique(arr, tolerance=1e-6) -> np.ndarray:
288 """Return unique elements in 1D array, within tolerance.
289
290 Parameters
291 ----------
292 arr : array_like
293 Input array. This will be flattened if it is not already 1D.
294 tolerance : number (optional)
295 The tolerance for uniqueness.
296
297 Returns
298 -------
299 array
300 The sorted unique values.
301 """
302 arr = sorted(arr.flatten())
303 unique = []
304 while len(arr) > 0:
305 current = arr[0]
306 lis = [xi for xi in arr if np.abs(current - xi) < tolerance]
307 arr = [xi for xi in arr if not np.abs(lis[0] - xi) < tolerance]
308 xi_lis_average = sum(lis) / len(lis)
309 unique.append(xi_lis_average)
310 return np.array(unique)
311
312
313 def valid_index(index, shape) -> tuple:
314 """Get a valid index for a broadcastable shape.
315
316 Parameters
317 ----------
318 index : tuple
319 Given index.
320 shape : tuple of int
321 Shape.
322
323 Returns
324 -------
325 tuple
326 Valid index.
327 """
328 # append slices to index
329 index = list(index)
330 while len(index) < len(shape):
331 index.append(slice(None))
332 # fill out, in reverse
333 out = []
334 for i, s in zip(index[::-1], shape[::-1]):
335 if s == 1:
336 if isinstance(i, slice):
337 out.append(slice(None))
338 else:
339 out.append(0)
340 else:
341 out.append(i)
342 return tuple(out[::-1])
343
344
345 def mask_reduce(mask):
346 """Reduce a boolean mask, removing all false slices in any dimension.
347
348 Parameters
349 ----------
350 mask : ndarray with bool dtype
351 The mask which is to be reduced
352
353 Returns
354 -------
355 A boolean mask with no all False slices.
356 """
357 mask = mask.copy()
358 for i in range(len(mask.shape)):
359 a = mask.copy()
360 j = list(range(len(mask.shape)))
361 j.remove(i)
362 j = tuple(j)
363 a = a.max(axis=j, keepdims=True)
364 idx = [slice(None)] * len(mask.shape)
365 a = a.flatten()
366 idx[i] = [k for k in range(len(a)) if a[k]]
367 mask = mask[tuple(idx)]
368 return mask
369
370
371 def enforce_mask_shape(mask, shape):
372 """Reduce a boolean mask to fit a given shape.
373
374 Parameters
375 ----------
376 mask : ndarray with bool dtype
377 The mask which is to be reduced
378 shape : tuple of int
379 Shape which broadcasts to the mask shape.
380
381 Returns
382 -------
383 A boolean mask, collapsed along axes where the shape given has one element.
384 """
385 red = tuple([i for i in range(len(shape)) if shape[i] == 1])
386 return mask.max(axis=red, keepdims=True)
387
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/WrightTools/kit/_array.py b/WrightTools/kit/_array.py
--- a/WrightTools/kit/_array.py
+++ b/WrightTools/kit/_array.py
@@ -243,18 +243,52 @@
return tuple([a + nans for a in arrs])
-def smooth_1D(arr, n=10) -> np.ndarray:
- """Smooth 1D data by 'running average'.
+def smooth_1D(arr, n=10, smooth_type="flat") -> np.ndarray:
+ """Smooth 1D data using a window function.
+
+ Edge effects will be present.
Parameters
----------
- n : int
- number of points to average
+ arr : array_like
+ Input array, 1D.
+ n : int (optional)
+ Window length.
+ smooth_type : {'flat', 'hanning', 'hamming', 'bartlett', 'blackman'} (optional)
+ Type of window function to convolve data with.
+ 'flat' window will produce a moving average smoothing.
+
+ Returns
+ -------
+ array_like
+ Smoothed 1D array.
"""
- for i in range(n, len(arr) - n):
- window = arr[i - n : i + n].copy()
- arr[i] = window.mean()
- return arr
+
+ # check array input
+ if arr.ndim != 1:
+ raise wt_exceptions.DimensionalityError(1, arr.ndim)
+ if arr.size < n:
+ message = "Input array size must be larger than window size."
+ raise wt_exceptions.ValueError(message)
+ if n < 3:
+ return arr
+ # construct window array
+ if smooth_type == "flat":
+ w = np.ones(n, dtype=arr.dtype)
+ elif smooth_type == "hanning":
+ w = np.hanning(n)
+ elif smooth_type == "hamming":
+ w = np.hamming(n)
+ elif smooth_type == "bartlett":
+ w = np.bartlett(n)
+ elif smooth_type == "blackman":
+ w = np.blackman(n)
+ else:
+ message = "Given smooth_type, {0}, not available.".format(str(smooth_type))
+ raise wt_exceptions.ValueError(message)
+ # convolve reflected array with window function
+ out = np.convolve(w / w.sum(), arr, mode="same")
+ return out
def svd(a, i=None) -> tuple:
| {"golden_diff": "diff --git a/WrightTools/kit/_array.py b/WrightTools/kit/_array.py\n--- a/WrightTools/kit/_array.py\n+++ b/WrightTools/kit/_array.py\n@@ -243,18 +243,52 @@\n return tuple([a + nans for a in arrs])\n \n \n-def smooth_1D(arr, n=10) -> np.ndarray:\n- \"\"\"Smooth 1D data by 'running average'.\n+def smooth_1D(arr, n=10, smooth_type=\"flat\") -> np.ndarray:\n+ \"\"\"Smooth 1D data using a window function.\n+ \n+ Edge effects will be present. \n \n Parameters\n ----------\n- n : int\n- number of points to average\n+ arr : array_like\n+ Input array, 1D.\n+ n : int (optional)\n+ Window length.\n+ smooth_type : {'flat', 'hanning', 'hamming', 'bartlett', 'blackman'} (optional)\n+ Type of window function to convolve data with.\n+ 'flat' window will produce a moving average smoothing.\n+ \n+ Returns\n+ -------\n+ array_like\n+ Smoothed 1D array.\n \"\"\"\n- for i in range(n, len(arr) - n):\n- window = arr[i - n : i + n].copy()\n- arr[i] = window.mean()\n- return arr\n+\n+ # check array input\n+ if arr.ndim != 1:\n+ raise wt_exceptions.DimensionalityError(1, arr.ndim)\n+ if arr.size < n:\n+ message = \"Input array size must be larger than window size.\"\n+ raise wt_exceptions.ValueError(message)\n+ if n < 3:\n+ return arr\n+ # construct window array\n+ if smooth_type == \"flat\":\n+ w = np.ones(n, dtype=arr.dtype)\n+ elif smooth_type == \"hanning\":\n+ w = np.hanning(n)\n+ elif smooth_type == \"hamming\":\n+ w = np.hamming(n)\n+ elif smooth_type == \"bartlett\":\n+ w = np.bartlett(n)\n+ elif smooth_type == \"blackman\":\n+ w = np.blackman(n)\n+ else:\n+ message = \"Given smooth_type, {0}, not available.\".format(str(smooth_type))\n+ raise wt_exceptions.ValueError(message)\n+ # convolve reflected array with window function\n+ out = np.convolve(w / w.sum(), arr, mode=\"same\")\n+ return out\n \n \n def svd(a, i=None) -> tuple:\n", "issue": "test kit.smooth_1D\nwrite tests for `wt.kit.smooth_1D`\n", "before_files": [{"content": "\"\"\"Array interaction tools.\"\"\"\n\n\n# --- import --------------------------------------------------------------------------------------\n\n\nimport numpy as np\n\nfrom .. import exceptions as wt_exceptions\n\n\n# --- define --------------------------------------------------------------------------------------\n\n\n__all__ = [\n \"closest_pair\",\n \"diff\",\n \"fft\",\n \"joint_shape\",\n \"orthogonal\",\n \"remove_nans_1D\",\n \"share_nans\",\n \"smooth_1D\",\n \"svd\",\n \"unique\",\n \"valid_index\",\n \"mask_reduce\",\n \"enforce_mask_shape\",\n]\n\n\n# --- functions -----------------------------------------------------------------------------------\n\n\ndef closest_pair(arr, give=\"indicies\"):\n \"\"\"Find the pair of indices corresponding to the closest elements in an array.\n\n If multiple pairs are equally close, both pairs of indicies are returned.\n Optionally returns the closest distance itself.\n\n I am sure that this could be written as a cheaper operation. I\n wrote this as a quick and dirty method because I need it now to use on some\n relatively small arrays. Feel free to refactor if you need this operation\n done as fast as possible. - Blaise 2016-02-07\n\n Parameters\n ----------\n arr : numpy.ndarray\n The array to search.\n give : {'indicies', 'distance'} (optional)\n Toggle return behavior. If 'distance', returns a single float - the\n closest distance itself. Default is indicies.\n\n Returns\n -------\n list of lists of two tuples\n List containing lists of two tuples: indicies the nearest pair in the\n array.\n\n >>> arr = np.array([0, 1, 2, 3, 3, 4, 5, 6, 1])\n >>> closest_pair(arr)\n [[(1,), (8,)], [(3,), (4,)]]\n\n \"\"\"\n idxs = [idx for idx in np.ndindex(arr.shape)]\n outs = []\n min_dist = arr.max() - arr.min()\n for idxa in idxs:\n for idxb in idxs:\n if idxa == idxb:\n continue\n dist = abs(arr[idxa] - arr[idxb])\n if dist == min_dist:\n if not [idxb, idxa] in outs:\n outs.append([idxa, idxb])\n elif dist < min_dist:\n min_dist = dist\n outs = [[idxa, idxb]]\n if give == \"indicies\":\n return outs\n elif give == \"distance\":\n return min_dist\n else:\n raise KeyError(\"give not recognized in closest_pair\")\n\n\ndef diff(xi, yi, order=1) -> np.ndarray:\n \"\"\"Take the numerical derivative of a 1D array.\n\n Output is mapped onto the original coordinates using linear interpolation.\n Expects monotonic xi values.\n\n Parameters\n ----------\n xi : 1D array-like\n Coordinates.\n yi : 1D array-like\n Values.\n order : positive integer (optional)\n Order of differentiation.\n\n Returns\n -------\n 1D numpy array\n Numerical derivative. Has the same shape as the input arrays.\n \"\"\"\n yi = np.array(yi).copy()\n flip = False\n if xi[-1] < xi[0]:\n xi = np.flipud(xi.copy())\n yi = np.flipud(yi)\n flip = True\n midpoints = (xi[1:] + xi[:-1]) / 2\n for _ in range(order):\n d = np.diff(yi)\n d /= np.diff(xi)\n yi = np.interp(xi, midpoints, d)\n if flip:\n yi = np.flipud(yi)\n return yi\n\n\ndef fft(xi, yi, axis=0) -> tuple:\n \"\"\"Take the 1D FFT of an N-dimensional array and return \"sensible\" properly shifted arrays.\n\n Parameters\n ----------\n xi : numpy.ndarray\n 1D array over which the points to be FFT'ed are defined\n yi : numpy.ndarray\n ND array with values to FFT\n axis : int\n axis of yi to perform FFT over\n\n Returns\n -------\n xi : 1D numpy.ndarray\n 1D array. Conjugate to input xi. Example: if input xi is in the time\n domain, output xi is in frequency domain.\n yi : ND numpy.ndarray\n FFT. Has the same shape as the input array (yi).\n \"\"\"\n # xi must be 1D\n if xi.ndim != 1:\n raise wt_exceptions.DimensionalityError(1, xi.ndim)\n # xi must be evenly spaced\n spacing = np.diff(xi)\n if not np.allclose(spacing, spacing.mean()):\n raise RuntimeError(\"WrightTools.kit.fft: argument xi must be evenly spaced\")\n # fft\n yi = np.fft.fft(yi, axis=axis)\n d = (xi.max() - xi.min()) / (xi.size - 1)\n xi = np.fft.fftfreq(xi.size, d=d)\n # shift\n xi = np.fft.fftshift(xi)\n yi = np.fft.fftshift(yi, axes=axis)\n return xi, yi\n\n\ndef joint_shape(*args) -> tuple:\n \"\"\"Given a set of arrays, return the joint shape.\n\n Parameters\n ----------\n args : array-likes\n\n Returns\n -------\n tuple of int\n Joint shape.\n \"\"\"\n if len(args) == 0:\n return ()\n shape = []\n shapes = [a.shape for a in args]\n ndim = args[0].ndim\n for i in range(ndim):\n shape.append(max([s[i] for s in shapes]))\n return tuple(shape)\n\n\ndef orthogonal(*args) -> bool:\n \"\"\"Determine if a set of arrays are orthogonal.\n\n Parameters\n ----------\n args : array-likes or array shapes\n\n Returns\n -------\n bool\n Array orthogonality condition.\n \"\"\"\n for i, arg in enumerate(args):\n if hasattr(arg, \"shape\"):\n args[i] = arg.shape\n for s in zip(*args):\n if np.product(s) != max(s):\n return False\n return True\n\n\ndef remove_nans_1D(*args) -> tuple:\n \"\"\"Remove nans in a set of 1D arrays.\n\n Removes indicies in all arrays if any array is nan at that index.\n All input arrays must have the same size.\n\n Parameters\n ----------\n args : 1D arrays\n\n Returns\n -------\n tuple\n Tuple of 1D arrays in same order as given, with nan indicies removed.\n \"\"\"\n vals = np.isnan(args[0])\n for a in args:\n vals |= np.isnan(a)\n return tuple(np.array(a)[~vals] for a in args)\n\n\ndef share_nans(*arrs) -> tuple:\n \"\"\"Take a list of nD arrays and return a new list of nD arrays.\n\n The new list is in the same order as the old list.\n If one indexed element in an old array is nan then every element for that\n index in all new arrays in the list is then nan.\n\n Parameters\n ----------\n *arrs : nD arrays.\n\n Returns\n -------\n list\n List of nD arrays in same order as given, with nan indicies syncronized.\n \"\"\"\n nans = np.zeros(joint_shape(*arrs))\n for arr in arrs:\n nans *= arr\n return tuple([a + nans for a in arrs])\n\n\ndef smooth_1D(arr, n=10) -> np.ndarray:\n \"\"\"Smooth 1D data by 'running average'.\n\n Parameters\n ----------\n n : int\n number of points to average\n \"\"\"\n for i in range(n, len(arr) - n):\n window = arr[i - n : i + n].copy()\n arr[i] = window.mean()\n return arr\n\n\ndef svd(a, i=None) -> tuple:\n \"\"\"Singular Value Decomposition.\n\n Factors the matrix `a` as ``u * np.diag(s) * v``, where `u` and `v`\n are unitary and `s` is a 1D array of `a`'s singular values.\n\n Parameters\n ----------\n a : array_like\n Input array.\n i : int or slice (optional)\n What singular value \"slice\" to return.\n Default is None which returns unitary 2D arrays.\n\n Returns\n -------\n tuple\n Decomposed arrays in order `u`, `v`, `s`\n \"\"\"\n u, s, v = np.linalg.svd(a, full_matrices=False, compute_uv=True)\n u = u.T\n if i is None:\n return u, v, s\n else:\n return u[i], v[i], s[i]\n\n\ndef unique(arr, tolerance=1e-6) -> np.ndarray:\n \"\"\"Return unique elements in 1D array, within tolerance.\n\n Parameters\n ----------\n arr : array_like\n Input array. This will be flattened if it is not already 1D.\n tolerance : number (optional)\n The tolerance for uniqueness.\n\n Returns\n -------\n array\n The sorted unique values.\n \"\"\"\n arr = sorted(arr.flatten())\n unique = []\n while len(arr) > 0:\n current = arr[0]\n lis = [xi for xi in arr if np.abs(current - xi) < tolerance]\n arr = [xi for xi in arr if not np.abs(lis[0] - xi) < tolerance]\n xi_lis_average = sum(lis) / len(lis)\n unique.append(xi_lis_average)\n return np.array(unique)\n\n\ndef valid_index(index, shape) -> tuple:\n \"\"\"Get a valid index for a broadcastable shape.\n\n Parameters\n ----------\n index : tuple\n Given index.\n shape : tuple of int\n Shape.\n\n Returns\n -------\n tuple\n Valid index.\n \"\"\"\n # append slices to index\n index = list(index)\n while len(index) < len(shape):\n index.append(slice(None))\n # fill out, in reverse\n out = []\n for i, s in zip(index[::-1], shape[::-1]):\n if s == 1:\n if isinstance(i, slice):\n out.append(slice(None))\n else:\n out.append(0)\n else:\n out.append(i)\n return tuple(out[::-1])\n\n\ndef mask_reduce(mask):\n \"\"\"Reduce a boolean mask, removing all false slices in any dimension.\n\n Parameters\n ----------\n mask : ndarray with bool dtype\n The mask which is to be reduced\n\n Returns\n -------\n A boolean mask with no all False slices.\n \"\"\"\n mask = mask.copy()\n for i in range(len(mask.shape)):\n a = mask.copy()\n j = list(range(len(mask.shape)))\n j.remove(i)\n j = tuple(j)\n a = a.max(axis=j, keepdims=True)\n idx = [slice(None)] * len(mask.shape)\n a = a.flatten()\n idx[i] = [k for k in range(len(a)) if a[k]]\n mask = mask[tuple(idx)]\n return mask\n\n\ndef enforce_mask_shape(mask, shape):\n \"\"\"Reduce a boolean mask to fit a given shape.\n\n Parameters\n ----------\n mask : ndarray with bool dtype\n The mask which is to be reduced\n shape : tuple of int\n Shape which broadcasts to the mask shape.\n\n Returns\n -------\n A boolean mask, collapsed along axes where the shape given has one element.\n \"\"\"\n red = tuple([i for i in range(len(shape)) if shape[i] == 1])\n return mask.max(axis=red, keepdims=True)\n", "path": "WrightTools/kit/_array.py"}], "after_files": [{"content": "\"\"\"Array interaction tools.\"\"\"\n\n\n# --- import --------------------------------------------------------------------------------------\n\n\nimport numpy as np\n\nfrom .. import exceptions as wt_exceptions\n\n\n# --- define --------------------------------------------------------------------------------------\n\n\n__all__ = [\n \"closest_pair\",\n \"diff\",\n \"fft\",\n \"joint_shape\",\n \"orthogonal\",\n \"remove_nans_1D\",\n \"share_nans\",\n \"smooth_1D\",\n \"svd\",\n \"unique\",\n \"valid_index\",\n \"mask_reduce\",\n \"enforce_mask_shape\",\n]\n\n\n# --- functions -----------------------------------------------------------------------------------\n\n\ndef closest_pair(arr, give=\"indicies\"):\n \"\"\"Find the pair of indices corresponding to the closest elements in an array.\n\n If multiple pairs are equally close, both pairs of indicies are returned.\n Optionally returns the closest distance itself.\n\n I am sure that this could be written as a cheaper operation. I\n wrote this as a quick and dirty method because I need it now to use on some\n relatively small arrays. Feel free to refactor if you need this operation\n done as fast as possible. - Blaise 2016-02-07\n\n Parameters\n ----------\n arr : numpy.ndarray\n The array to search.\n give : {'indicies', 'distance'} (optional)\n Toggle return behavior. If 'distance', returns a single float - the\n closest distance itself. Default is indicies.\n\n Returns\n -------\n list of lists of two tuples\n List containing lists of two tuples: indicies the nearest pair in the\n array.\n\n >>> arr = np.array([0, 1, 2, 3, 3, 4, 5, 6, 1])\n >>> closest_pair(arr)\n [[(1,), (8,)], [(3,), (4,)]]\n\n \"\"\"\n idxs = [idx for idx in np.ndindex(arr.shape)]\n outs = []\n min_dist = arr.max() - arr.min()\n for idxa in idxs:\n for idxb in idxs:\n if idxa == idxb:\n continue\n dist = abs(arr[idxa] - arr[idxb])\n if dist == min_dist:\n if not [idxb, idxa] in outs:\n outs.append([idxa, idxb])\n elif dist < min_dist:\n min_dist = dist\n outs = [[idxa, idxb]]\n if give == \"indicies\":\n return outs\n elif give == \"distance\":\n return min_dist\n else:\n raise KeyError(\"give not recognized in closest_pair\")\n\n\ndef diff(xi, yi, order=1) -> np.ndarray:\n \"\"\"Take the numerical derivative of a 1D array.\n\n Output is mapped onto the original coordinates using linear interpolation.\n Expects monotonic xi values.\n\n Parameters\n ----------\n xi : 1D array-like\n Coordinates.\n yi : 1D array-like\n Values.\n order : positive integer (optional)\n Order of differentiation.\n\n Returns\n -------\n 1D numpy array\n Numerical derivative. Has the same shape as the input arrays.\n \"\"\"\n yi = np.array(yi).copy()\n flip = False\n if xi[-1] < xi[0]:\n xi = np.flipud(xi.copy())\n yi = np.flipud(yi)\n flip = True\n midpoints = (xi[1:] + xi[:-1]) / 2\n for _ in range(order):\n d = np.diff(yi)\n d /= np.diff(xi)\n yi = np.interp(xi, midpoints, d)\n if flip:\n yi = np.flipud(yi)\n return yi\n\n\ndef fft(xi, yi, axis=0) -> tuple:\n \"\"\"Take the 1D FFT of an N-dimensional array and return \"sensible\" properly shifted arrays.\n\n Parameters\n ----------\n xi : numpy.ndarray\n 1D array over which the points to be FFT'ed are defined\n yi : numpy.ndarray\n ND array with values to FFT\n axis : int\n axis of yi to perform FFT over\n\n Returns\n -------\n xi : 1D numpy.ndarray\n 1D array. Conjugate to input xi. Example: if input xi is in the time\n domain, output xi is in frequency domain.\n yi : ND numpy.ndarray\n FFT. Has the same shape as the input array (yi).\n \"\"\"\n # xi must be 1D\n if xi.ndim != 1:\n raise wt_exceptions.DimensionalityError(1, xi.ndim)\n # xi must be evenly spaced\n spacing = np.diff(xi)\n if not np.allclose(spacing, spacing.mean()):\n raise RuntimeError(\"WrightTools.kit.fft: argument xi must be evenly spaced\")\n # fft\n yi = np.fft.fft(yi, axis=axis)\n d = (xi.max() - xi.min()) / (xi.size - 1)\n xi = np.fft.fftfreq(xi.size, d=d)\n # shift\n xi = np.fft.fftshift(xi)\n yi = np.fft.fftshift(yi, axes=axis)\n return xi, yi\n\n\ndef joint_shape(*args) -> tuple:\n \"\"\"Given a set of arrays, return the joint shape.\n\n Parameters\n ----------\n args : array-likes\n\n Returns\n -------\n tuple of int\n Joint shape.\n \"\"\"\n if len(args) == 0:\n return ()\n shape = []\n shapes = [a.shape for a in args]\n ndim = args[0].ndim\n for i in range(ndim):\n shape.append(max([s[i] for s in shapes]))\n return tuple(shape)\n\n\ndef orthogonal(*args) -> bool:\n \"\"\"Determine if a set of arrays are orthogonal.\n\n Parameters\n ----------\n args : array-likes or array shapes\n\n Returns\n -------\n bool\n Array orthogonality condition.\n \"\"\"\n for i, arg in enumerate(args):\n if hasattr(arg, \"shape\"):\n args[i] = arg.shape\n for s in zip(*args):\n if np.product(s) != max(s):\n return False\n return True\n\n\ndef remove_nans_1D(*args) -> tuple:\n \"\"\"Remove nans in a set of 1D arrays.\n\n Removes indicies in all arrays if any array is nan at that index.\n All input arrays must have the same size.\n\n Parameters\n ----------\n args : 1D arrays\n\n Returns\n -------\n tuple\n Tuple of 1D arrays in same order as given, with nan indicies removed.\n \"\"\"\n vals = np.isnan(args[0])\n for a in args:\n vals |= np.isnan(a)\n return tuple(np.array(a)[~vals] for a in args)\n\n\ndef share_nans(*arrs) -> tuple:\n \"\"\"Take a list of nD arrays and return a new list of nD arrays.\n\n The new list is in the same order as the old list.\n If one indexed element in an old array is nan then every element for that\n index in all new arrays in the list is then nan.\n\n Parameters\n ----------\n *arrs : nD arrays.\n\n Returns\n -------\n list\n List of nD arrays in same order as given, with nan indicies syncronized.\n \"\"\"\n nans = np.zeros(joint_shape(*arrs))\n for arr in arrs:\n nans *= arr\n return tuple([a + nans for a in arrs])\n\n\ndef smooth_1D(arr, n=10, smooth_type=\"flat\") -> np.ndarray:\n \"\"\"Smooth 1D data using a window function.\n \n Edge effects will be present. \n\n Parameters\n ----------\n arr : array_like\n Input array, 1D.\n n : int (optional)\n Window length.\n smooth_type : {'flat', 'hanning', 'hamming', 'bartlett', 'blackman'} (optional)\n Type of window function to convolve data with.\n 'flat' window will produce a moving average smoothing.\n \n Returns\n -------\n array_like\n Smoothed 1D array.\n \"\"\"\n\n # check array input\n if arr.ndim != 1:\n raise wt_exceptions.DimensionalityError(1, arr.ndim)\n if arr.size < n:\n message = \"Input array size must be larger than window size.\"\n raise wt_exceptions.ValueError(message)\n if n < 3:\n return arr\n # construct window array\n if smooth_type == \"flat\":\n w = np.ones(n, dtype=arr.dtype)\n elif smooth_type == \"hanning\":\n w = np.hanning(n)\n elif smooth_type == \"hamming\":\n w = np.hamming(n)\n elif smooth_type == \"bartlett\":\n w = np.bartlett(n)\n elif smooth_type == \"blackman\":\n w = np.blackman(n)\n else:\n message = \"Given smooth_type, {0}, not available.\".format(str(smooth_type))\n raise wt_exceptions.ValueError(message)\n # convolve reflected array with window function\n out = np.convolve(w / w.sum(), arr, mode=\"same\")\n return out\n\n\ndef svd(a, i=None) -> tuple:\n \"\"\"Singular Value Decomposition.\n\n Factors the matrix `a` as ``u * np.diag(s) * v``, where `u` and `v`\n are unitary and `s` is a 1D array of `a`'s singular values.\n\n Parameters\n ----------\n a : array_like\n Input array.\n i : int or slice (optional)\n What singular value \"slice\" to return.\n Default is None which returns unitary 2D arrays.\n\n Returns\n -------\n tuple\n Decomposed arrays in order `u`, `v`, `s`\n \"\"\"\n u, s, v = np.linalg.svd(a, full_matrices=False, compute_uv=True)\n u = u.T\n if i is None:\n return u, v, s\n else:\n return u[i], v[i], s[i]\n\n\ndef unique(arr, tolerance=1e-6) -> np.ndarray:\n \"\"\"Return unique elements in 1D array, within tolerance.\n\n Parameters\n ----------\n arr : array_like\n Input array. This will be flattened if it is not already 1D.\n tolerance : number (optional)\n The tolerance for uniqueness.\n\n Returns\n -------\n array\n The sorted unique values.\n \"\"\"\n arr = sorted(arr.flatten())\n unique = []\n while len(arr) > 0:\n current = arr[0]\n lis = [xi for xi in arr if np.abs(current - xi) < tolerance]\n arr = [xi for xi in arr if not np.abs(lis[0] - xi) < tolerance]\n xi_lis_average = sum(lis) / len(lis)\n unique.append(xi_lis_average)\n return np.array(unique)\n\n\ndef valid_index(index, shape) -> tuple:\n \"\"\"Get a valid index for a broadcastable shape.\n\n Parameters\n ----------\n index : tuple\n Given index.\n shape : tuple of int\n Shape.\n\n Returns\n -------\n tuple\n Valid index.\n \"\"\"\n # append slices to index\n index = list(index)\n while len(index) < len(shape):\n index.append(slice(None))\n # fill out, in reverse\n out = []\n for i, s in zip(index[::-1], shape[::-1]):\n if s == 1:\n if isinstance(i, slice):\n out.append(slice(None))\n else:\n out.append(0)\n else:\n out.append(i)\n return tuple(out[::-1])\n\n\ndef mask_reduce(mask):\n \"\"\"Reduce a boolean mask, removing all false slices in any dimension.\n\n Parameters\n ----------\n mask : ndarray with bool dtype\n The mask which is to be reduced\n\n Returns\n -------\n A boolean mask with no all False slices.\n \"\"\"\n mask = mask.copy()\n for i in range(len(mask.shape)):\n a = mask.copy()\n j = list(range(len(mask.shape)))\n j.remove(i)\n j = tuple(j)\n a = a.max(axis=j, keepdims=True)\n idx = [slice(None)] * len(mask.shape)\n a = a.flatten()\n idx[i] = [k for k in range(len(a)) if a[k]]\n mask = mask[tuple(idx)]\n return mask\n\n\ndef enforce_mask_shape(mask, shape):\n \"\"\"Reduce a boolean mask to fit a given shape.\n\n Parameters\n ----------\n mask : ndarray with bool dtype\n The mask which is to be reduced\n shape : tuple of int\n Shape which broadcasts to the mask shape.\n\n Returns\n -------\n A boolean mask, collapsed along axes where the shape given has one element.\n \"\"\"\n red = tuple([i for i in range(len(shape)) if shape[i] == 1])\n return mask.max(axis=red, keepdims=True)\n", "path": "WrightTools/kit/_array.py"}]} | 3,909 | 580 |
gh_patches_debug_9536 | rasdani/github-patches | git_diff | ibis-project__ibis-1949 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
More informative IntegrityError on duplicate columns
```
~/Workspace/ibis/ibis/expr/schema.py in __init__(self, names, types)
32
33 if len(self._name_locs) < len(self.names):
---> 34 raise com.IntegrityError('Duplicate column names')
35
36 def __repr__(self):
IntegrityError: Duplicate column names
```
List the particular columns at least.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ibis/expr/schema.py`
Content:
```
1 import collections
2
3 from multipledispatch import Dispatcher
4
5 import ibis.common.exceptions as com
6 import ibis.expr.datatypes as dt
7 import ibis.util as util
8
9
10 class Schema:
11
12 """An object for holding table schema information, i.e., column names and
13 types.
14
15 Parameters
16 ----------
17 names : Sequence[str]
18 A sequence of ``str`` indicating the name of each column.
19 types : Sequence[DataType]
20 A sequence of :class:`ibis.expr.datatypes.DataType` objects
21 representing type of each column.
22 """
23
24 __slots__ = 'names', 'types', '_name_locs'
25
26 def __init__(self, names, types):
27 if not isinstance(names, list):
28 names = list(names)
29
30 self.names = names
31 self.types = list(map(dt.dtype, types))
32
33 self._name_locs = dict((v, i) for i, v in enumerate(self.names))
34
35 if len(self._name_locs) < len(self.names):
36 raise com.IntegrityError('Duplicate column names')
37
38 def __repr__(self):
39 space = 2 + max(map(len, self.names), default=0)
40 return "ibis.Schema {{{}\n}}".format(
41 util.indent(
42 ''.join(
43 '\n{}{}'.format(name.ljust(space), str(type))
44 for name, type in zip(self.names, self.types)
45 ),
46 2,
47 )
48 )
49
50 def __hash__(self):
51 return hash((type(self), tuple(self.names), tuple(self.types)))
52
53 def __len__(self):
54 return len(self.names)
55
56 def __iter__(self):
57 return iter(self.names)
58
59 def __contains__(self, name):
60 return name in self._name_locs
61
62 def __getitem__(self, name):
63 return self.types[self._name_locs[name]]
64
65 def __getstate__(self):
66 return {slot: getattr(self, slot) for slot in self.__class__.__slots__}
67
68 def __setstate__(self, instance_dict):
69 for key, value in instance_dict.items():
70 setattr(self, key, value)
71
72 def delete(self, names_to_delete):
73 for name in names_to_delete:
74 if name not in self:
75 raise KeyError(name)
76
77 new_names, new_types = [], []
78 for name, type_ in zip(self.names, self.types):
79 if name in names_to_delete:
80 continue
81 new_names.append(name)
82 new_types.append(type_)
83
84 return Schema(new_names, new_types)
85
86 @classmethod
87 def from_tuples(cls, values):
88 if not isinstance(values, (list, tuple)):
89 values = list(values)
90
91 names, types = zip(*values) if values else ([], [])
92 return Schema(names, types)
93
94 @classmethod
95 def from_dict(cls, dictionary):
96 return Schema(*zip(*dictionary.items()))
97
98 def equals(self, other, cache=None):
99 return self.names == other.names and self.types == other.types
100
101 def __eq__(self, other):
102 return self.equals(other)
103
104 def __gt__(self, other):
105 return set(self.items()) > set(other.items())
106
107 def __ge__(self, other):
108 return set(self.items()) >= set(other.items())
109
110 def append(self, schema):
111 return Schema(self.names + schema.names, self.types + schema.types)
112
113 def items(self):
114 return zip(self.names, self.types)
115
116 def name_at_position(self, i):
117 """
118 """
119 upper = len(self.names) - 1
120 if not 0 <= i <= upper:
121 raise ValueError(
122 'Column index must be between 0 and {:d}, inclusive'.format(
123 upper
124 )
125 )
126 return self.names[i]
127
128
129 class HasSchema:
130
131 """
132 Base class representing a structured dataset with a well-defined
133 schema.
134
135 Base implementation is for tables that do not reference a particular
136 concrete dataset or database table.
137 """
138
139 def __repr__(self):
140 return '{}({})'.format(type(self).__name__, repr(self.schema))
141
142 def has_schema(self):
143 return True
144
145 def equals(self, other, cache=None):
146 return type(self) == type(other) and self.schema.equals(
147 other.schema, cache=cache
148 )
149
150 def root_tables(self):
151 return [self]
152
153 @property
154 def schema(self):
155 raise NotImplementedError
156
157
158 schema = Dispatcher('schema')
159 infer = Dispatcher('infer')
160
161
162 @schema.register(Schema)
163 def identity(s):
164 return s
165
166
167 @schema.register(collections.abc.Mapping)
168 def schema_from_mapping(d):
169 return Schema.from_dict(d)
170
171
172 @schema.register(collections.abc.Iterable)
173 def schema_from_pairs(lst):
174 return Schema.from_tuples(lst)
175
176
177 @schema.register(collections.abc.Iterable, collections.abc.Iterable)
178 def schema_from_names_types(names, types):
179 return Schema(names, types)
180
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ibis/expr/schema.py b/ibis/expr/schema.py
--- a/ibis/expr/schema.py
+++ b/ibis/expr/schema.py
@@ -33,7 +33,11 @@
self._name_locs = dict((v, i) for i, v in enumerate(self.names))
if len(self._name_locs) < len(self.names):
- raise com.IntegrityError('Duplicate column names')
+ duplicate_names = list(self.names)
+ for v in self._name_locs.keys():
+ duplicate_names.remove(v)
+ raise com.IntegrityError(
+ 'Duplicate column name(s): {}'.format(duplicate_names))
def __repr__(self):
space = 2 + max(map(len, self.names), default=0)
| {"golden_diff": "diff --git a/ibis/expr/schema.py b/ibis/expr/schema.py\n--- a/ibis/expr/schema.py\n+++ b/ibis/expr/schema.py\n@@ -33,7 +33,11 @@\n self._name_locs = dict((v, i) for i, v in enumerate(self.names))\n \n if len(self._name_locs) < len(self.names):\n- raise com.IntegrityError('Duplicate column names')\n+ duplicate_names = list(self.names)\n+ for v in self._name_locs.keys():\n+ duplicate_names.remove(v)\n+ raise com.IntegrityError(\n+ 'Duplicate column name(s): {}'.format(duplicate_names))\n \n def __repr__(self):\n space = 2 + max(map(len, self.names), default=0)\n", "issue": "More informative IntegrityError on duplicate columns\n```\r\n~/Workspace/ibis/ibis/expr/schema.py in __init__(self, names, types)\r\n 32 \r\n 33 if len(self._name_locs) < len(self.names):\r\n---> 34 raise com.IntegrityError('Duplicate column names')\r\n 35 \r\n 36 def __repr__(self):\r\n\r\nIntegrityError: Duplicate column names\r\n```\r\n\r\nList the particular columns at least.\n", "before_files": [{"content": "import collections\n\nfrom multipledispatch import Dispatcher\n\nimport ibis.common.exceptions as com\nimport ibis.expr.datatypes as dt\nimport ibis.util as util\n\n\nclass Schema:\n\n \"\"\"An object for holding table schema information, i.e., column names and\n types.\n\n Parameters\n ----------\n names : Sequence[str]\n A sequence of ``str`` indicating the name of each column.\n types : Sequence[DataType]\n A sequence of :class:`ibis.expr.datatypes.DataType` objects\n representing type of each column.\n \"\"\"\n\n __slots__ = 'names', 'types', '_name_locs'\n\n def __init__(self, names, types):\n if not isinstance(names, list):\n names = list(names)\n\n self.names = names\n self.types = list(map(dt.dtype, types))\n\n self._name_locs = dict((v, i) for i, v in enumerate(self.names))\n\n if len(self._name_locs) < len(self.names):\n raise com.IntegrityError('Duplicate column names')\n\n def __repr__(self):\n space = 2 + max(map(len, self.names), default=0)\n return \"ibis.Schema {{{}\\n}}\".format(\n util.indent(\n ''.join(\n '\\n{}{}'.format(name.ljust(space), str(type))\n for name, type in zip(self.names, self.types)\n ),\n 2,\n )\n )\n\n def __hash__(self):\n return hash((type(self), tuple(self.names), tuple(self.types)))\n\n def __len__(self):\n return len(self.names)\n\n def __iter__(self):\n return iter(self.names)\n\n def __contains__(self, name):\n return name in self._name_locs\n\n def __getitem__(self, name):\n return self.types[self._name_locs[name]]\n\n def __getstate__(self):\n return {slot: getattr(self, slot) for slot in self.__class__.__slots__}\n\n def __setstate__(self, instance_dict):\n for key, value in instance_dict.items():\n setattr(self, key, value)\n\n def delete(self, names_to_delete):\n for name in names_to_delete:\n if name not in self:\n raise KeyError(name)\n\n new_names, new_types = [], []\n for name, type_ in zip(self.names, self.types):\n if name in names_to_delete:\n continue\n new_names.append(name)\n new_types.append(type_)\n\n return Schema(new_names, new_types)\n\n @classmethod\n def from_tuples(cls, values):\n if not isinstance(values, (list, tuple)):\n values = list(values)\n\n names, types = zip(*values) if values else ([], [])\n return Schema(names, types)\n\n @classmethod\n def from_dict(cls, dictionary):\n return Schema(*zip(*dictionary.items()))\n\n def equals(self, other, cache=None):\n return self.names == other.names and self.types == other.types\n\n def __eq__(self, other):\n return self.equals(other)\n\n def __gt__(self, other):\n return set(self.items()) > set(other.items())\n\n def __ge__(self, other):\n return set(self.items()) >= set(other.items())\n\n def append(self, schema):\n return Schema(self.names + schema.names, self.types + schema.types)\n\n def items(self):\n return zip(self.names, self.types)\n\n def name_at_position(self, i):\n \"\"\"\n \"\"\"\n upper = len(self.names) - 1\n if not 0 <= i <= upper:\n raise ValueError(\n 'Column index must be between 0 and {:d}, inclusive'.format(\n upper\n )\n )\n return self.names[i]\n\n\nclass HasSchema:\n\n \"\"\"\n Base class representing a structured dataset with a well-defined\n schema.\n\n Base implementation is for tables that do not reference a particular\n concrete dataset or database table.\n \"\"\"\n\n def __repr__(self):\n return '{}({})'.format(type(self).__name__, repr(self.schema))\n\n def has_schema(self):\n return True\n\n def equals(self, other, cache=None):\n return type(self) == type(other) and self.schema.equals(\n other.schema, cache=cache\n )\n\n def root_tables(self):\n return [self]\n\n @property\n def schema(self):\n raise NotImplementedError\n\n\nschema = Dispatcher('schema')\ninfer = Dispatcher('infer')\n\n\[email protected](Schema)\ndef identity(s):\n return s\n\n\[email protected](collections.abc.Mapping)\ndef schema_from_mapping(d):\n return Schema.from_dict(d)\n\n\[email protected](collections.abc.Iterable)\ndef schema_from_pairs(lst):\n return Schema.from_tuples(lst)\n\n\[email protected](collections.abc.Iterable, collections.abc.Iterable)\ndef schema_from_names_types(names, types):\n return Schema(names, types)\n", "path": "ibis/expr/schema.py"}], "after_files": [{"content": "import collections\n\nfrom multipledispatch import Dispatcher\n\nimport ibis.common.exceptions as com\nimport ibis.expr.datatypes as dt\nimport ibis.util as util\n\n\nclass Schema:\n\n \"\"\"An object for holding table schema information, i.e., column names and\n types.\n\n Parameters\n ----------\n names : Sequence[str]\n A sequence of ``str`` indicating the name of each column.\n types : Sequence[DataType]\n A sequence of :class:`ibis.expr.datatypes.DataType` objects\n representing type of each column.\n \"\"\"\n\n __slots__ = 'names', 'types', '_name_locs'\n\n def __init__(self, names, types):\n if not isinstance(names, list):\n names = list(names)\n\n self.names = names\n self.types = list(map(dt.dtype, types))\n\n self._name_locs = dict((v, i) for i, v in enumerate(self.names))\n\n if len(self._name_locs) < len(self.names):\n duplicate_names = list(self.names)\n for v in self._name_locs.keys():\n duplicate_names.remove(v)\n raise com.IntegrityError(\n 'Duplicate column name(s): {}'.format(duplicate_names))\n\n def __repr__(self):\n space = 2 + max(map(len, self.names), default=0)\n return \"ibis.Schema {{{}\\n}}\".format(\n util.indent(\n ''.join(\n '\\n{}{}'.format(name.ljust(space), str(type))\n for name, type in zip(self.names, self.types)\n ),\n 2,\n )\n )\n\n def __hash__(self):\n return hash((type(self), tuple(self.names), tuple(self.types)))\n\n def __len__(self):\n return len(self.names)\n\n def __iter__(self):\n return iter(self.names)\n\n def __contains__(self, name):\n return name in self._name_locs\n\n def __getitem__(self, name):\n return self.types[self._name_locs[name]]\n\n def __getstate__(self):\n return {slot: getattr(self, slot) for slot in self.__class__.__slots__}\n\n def __setstate__(self, instance_dict):\n for key, value in instance_dict.items():\n setattr(self, key, value)\n\n def delete(self, names_to_delete):\n for name in names_to_delete:\n if name not in self:\n raise KeyError(name)\n\n new_names, new_types = [], []\n for name, type_ in zip(self.names, self.types):\n if name in names_to_delete:\n continue\n new_names.append(name)\n new_types.append(type_)\n\n return Schema(new_names, new_types)\n\n @classmethod\n def from_tuples(cls, values):\n if not isinstance(values, (list, tuple)):\n values = list(values)\n\n names, types = zip(*values) if values else ([], [])\n return Schema(names, types)\n\n @classmethod\n def from_dict(cls, dictionary):\n return Schema(*zip(*dictionary.items()))\n\n def equals(self, other, cache=None):\n return self.names == other.names and self.types == other.types\n\n def __eq__(self, other):\n return self.equals(other)\n\n def __gt__(self, other):\n return set(self.items()) > set(other.items())\n\n def __ge__(self, other):\n return set(self.items()) >= set(other.items())\n\n def append(self, schema):\n return Schema(self.names + schema.names, self.types + schema.types)\n\n def items(self):\n return zip(self.names, self.types)\n\n def name_at_position(self, i):\n \"\"\"\n \"\"\"\n upper = len(self.names) - 1\n if not 0 <= i <= upper:\n raise ValueError(\n 'Column index must be between 0 and {:d}, inclusive'.format(\n upper\n )\n )\n return self.names[i]\n\n\nclass HasSchema:\n\n \"\"\"\n Base class representing a structured dataset with a well-defined\n schema.\n\n Base implementation is for tables that do not reference a particular\n concrete dataset or database table.\n \"\"\"\n\n def __repr__(self):\n return '{}({})'.format(type(self).__name__, repr(self.schema))\n\n def has_schema(self):\n return True\n\n def equals(self, other, cache=None):\n return type(self) == type(other) and self.schema.equals(\n other.schema, cache=cache\n )\n\n def root_tables(self):\n return [self]\n\n @property\n def schema(self):\n raise NotImplementedError\n\n\nschema = Dispatcher('schema')\ninfer = Dispatcher('infer')\n\n\[email protected](Schema)\ndef identity(s):\n return s\n\n\[email protected](collections.abc.Mapping)\ndef schema_from_mapping(d):\n return Schema.from_dict(d)\n\n\[email protected](collections.abc.Iterable)\ndef schema_from_pairs(lst):\n return Schema.from_tuples(lst)\n\n\[email protected](collections.abc.Iterable, collections.abc.Iterable)\ndef schema_from_names_types(names, types):\n return Schema(names, types)\n", "path": "ibis/expr/schema.py"}]} | 1,863 | 174 |
gh_patches_debug_2006 | rasdani/github-patches | git_diff | psf__black-1892 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
s390x: test_python2/test_python2_unicode_literals can't assign to () INTERNAL ERROR
During the build of 19.10b0 in Fedora, the following test failure occurs on s390x (Big Endian) architecture:
```
======================================================================
FAIL: test_python2 (tests.test_black.BlackTestCase)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/builddir/build/BUILD/black-19.10b0/black.py", line 3754, in assert_equivalent
src_ast = parse_ast(src)
File "/builddir/build/BUILD/black-19.10b0/black.py", line 3686, in parse_ast
return ast27.parse(src)
File "/usr/lib64/python3.8/site-packages/typed_ast/ast27.py", line 50, in parse
return _ast27.parse(source, filename, mode)
File "<unknown>", line 10
SyntaxError: can't assign to ()
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/lib64/python3.8/unittest/mock.py", line 1342, in patched
return func(*newargs, **newkeywargs)
File "/builddir/build/BUILD/black-19.10b0/tests/test_black.py", line 543, in test_python2
black.assert_equivalent(source, actual)
File "/builddir/build/BUILD/black-19.10b0/black.py", line 3756, in assert_equivalent
raise AssertionError(
AssertionError: cannot use --safe with this file; failed to parse source file. AST error message: can't assign to () (<unknown>, line 10)
======================================================================
FAIL: test_python2_unicode_literals (tests.test_black.BlackTestCase)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/lib64/python3.8/unittest/mock.py", line 1342, in patched
return func(*newargs, **newkeywargs)
File "/builddir/build/BUILD/black-19.10b0/tests/test_black.py", line 560, in test_python2_unicode_literals
black.assert_equivalent(source, actual)
File "/builddir/build/BUILD/black-19.10b0/black.py", line 3775, in assert_equivalent
raise AssertionError(
AssertionError: INTERNAL ERROR: Black produced code that is not equivalent to the source. Please report a bug on https://github.com/psf/black/issues. This diff might be helpful:
--- src
+++ dst
@@ -1,4 +1,70 @@
Module(
body=
+ ImportFrom(
+ level=
+ 0, # int
+ module=
+ '__future__', # str
+ names=
+ alias(
+ asname=
+ '_unicode_literals', # str
+ name=
+ 'unicode_literals', # str
+ ) # /alias
+ ) # /ImportFrom
+ ImportFrom(
+ level=
+ 0, # int
+ module=
+ '__future__', # str
+ names=
+ alias(
+ asname=
+ None, # NoneType
+ name=
+ 'absolute_import', # str
+ ) # /alias
+ ) # /ImportFrom
+ ImportFrom(
+ level=
+ 0, # int
+ module=
+ '__future__', # str
+ names=
+ alias(
+ asname=
+ 'lol', # str
+ name=
+ 'print_function', # str
+ ) # /alias
+ alias(
+ asname=
+ None, # NoneType
+ name=
+ 'with_function', # str
+ ) # /alias
+ ) # /ImportFrom
+ Expr(
+ value=
+ Constant(
+ value=
+ 'hello', # str
+ ) # /Constant
+ ) # /Expr
+ Expr(
+ value=
+ Constant(
+ value=
+ 'hello', # str
+ ) # /Constant
+ ) # /Expr
+ Expr(
+ value=
+ Constant(
+ value=
+ 'hello', # str
+ ) # /Constant
+ ) # /Expr
type_ignores=
) # /Module
----------------------------------------------------------------------
Ran 119 tests in 18.012s
FAILED (failures=2)
```
**To Reproduce**, run the test suite on s390x.
Here is the build log with all the commands: [build.log](https://github.com/psf/black/files/3782557/build.log)
Here is the root log with all the package versions: [root.log](https://github.com/psf/black/files/3782561/root.log)
**Expected behavior**
Test succeed an all architectures.
**Environment:**
- Version: 19.10b0
- OS and Python version: Linux, Fedora 32 on s390x, Python 3.8.0
**Does this bug also happen on master?** yes, on 6bedb5c58a7d8c25aa9509f8217bc24e9797e90d
**Additional context** The problem does not happen on the same build system with armv7hl or ppc64le.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # Copyright (C) 2020 Łukasz Langa
2 from setuptools import setup
3 import sys
4 import os
5
6 assert sys.version_info >= (3, 6, 0), "black requires Python 3.6+"
7 from pathlib import Path # noqa E402
8
9 CURRENT_DIR = Path(__file__).parent
10 sys.path.insert(0, str(CURRENT_DIR)) # for setuptools.build_meta
11
12
13 def get_long_description() -> str:
14 return (
15 (CURRENT_DIR / "README.md").read_text(encoding="utf8")
16 + "\n\n"
17 + (CURRENT_DIR / "CHANGES.md").read_text(encoding="utf8")
18 )
19
20
21 USE_MYPYC = False
22 # To compile with mypyc, a mypyc checkout must be present on the PYTHONPATH
23 if len(sys.argv) > 1 and sys.argv[1] == "--use-mypyc":
24 sys.argv.pop(1)
25 USE_MYPYC = True
26 if os.getenv("BLACK_USE_MYPYC", None) == "1":
27 USE_MYPYC = True
28
29 if USE_MYPYC:
30 mypyc_targets = [
31 "src/black/__init__.py",
32 "src/blib2to3/pytree.py",
33 "src/blib2to3/pygram.py",
34 "src/blib2to3/pgen2/parse.py",
35 "src/blib2to3/pgen2/grammar.py",
36 "src/blib2to3/pgen2/token.py",
37 "src/blib2to3/pgen2/driver.py",
38 "src/blib2to3/pgen2/pgen.py",
39 ]
40
41 from mypyc.build import mypycify
42
43 opt_level = os.getenv("MYPYC_OPT_LEVEL", "3")
44 ext_modules = mypycify(mypyc_targets, opt_level=opt_level)
45 else:
46 ext_modules = []
47
48 setup(
49 name="black",
50 use_scm_version={
51 "write_to": "src/_black_version.py",
52 "write_to_template": 'version = "{version}"\n',
53 },
54 description="The uncompromising code formatter.",
55 long_description=get_long_description(),
56 long_description_content_type="text/markdown",
57 keywords="automation formatter yapf autopep8 pyfmt gofmt rustfmt",
58 author="Łukasz Langa",
59 author_email="[email protected]",
60 url="https://github.com/psf/black",
61 project_urls={"Changelog": "https://github.com/psf/black/blob/master/CHANGES.md"},
62 license="MIT",
63 py_modules=["_black_version"],
64 ext_modules=ext_modules,
65 packages=["blackd", "black", "blib2to3", "blib2to3.pgen2", "black_primer"],
66 package_dir={"": "src"},
67 package_data={"blib2to3": ["*.txt"], "black": ["py.typed"]},
68 python_requires=">=3.6",
69 zip_safe=False,
70 install_requires=[
71 "click>=7.1.2",
72 "appdirs",
73 "toml>=0.10.1",
74 "typed-ast>=1.4.0",
75 "regex>=2020.1.8",
76 "pathspec>=0.6, <1",
77 "dataclasses>=0.6; python_version < '3.7'",
78 "typing_extensions>=3.7.4",
79 "mypy_extensions>=0.4.3",
80 ],
81 extras_require={
82 "d": ["aiohttp>=3.3.2", "aiohttp-cors"],
83 "colorama": ["colorama>=0.4.3"],
84 },
85 test_suite="tests.test_black",
86 classifiers=[
87 "Development Status :: 4 - Beta",
88 "Environment :: Console",
89 "Intended Audience :: Developers",
90 "License :: OSI Approved :: MIT License",
91 "Operating System :: OS Independent",
92 "Programming Language :: Python",
93 "Programming Language :: Python :: 3.6",
94 "Programming Language :: Python :: 3.7",
95 "Programming Language :: Python :: 3.8",
96 "Programming Language :: Python :: 3.9",
97 "Programming Language :: Python :: 3 :: Only",
98 "Topic :: Software Development :: Libraries :: Python Modules",
99 "Topic :: Software Development :: Quality Assurance",
100 ],
101 entry_points={
102 "console_scripts": [
103 "black=black:patched_main",
104 "blackd=blackd:patched_main [d]",
105 "black-primer=black_primer.cli:main",
106 ]
107 },
108 )
109
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -71,7 +71,7 @@
"click>=7.1.2",
"appdirs",
"toml>=0.10.1",
- "typed-ast>=1.4.0",
+ "typed-ast>=1.4.2",
"regex>=2020.1.8",
"pathspec>=0.6, <1",
"dataclasses>=0.6; python_version < '3.7'",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -71,7 +71,7 @@\n \"click>=7.1.2\",\n \"appdirs\",\n \"toml>=0.10.1\",\n- \"typed-ast>=1.4.0\",\n+ \"typed-ast>=1.4.2\",\n \"regex>=2020.1.8\",\n \"pathspec>=0.6, <1\",\n \"dataclasses>=0.6; python_version < '3.7'\",\n", "issue": "s390x: test_python2/test_python2_unicode_literals can't assign to () INTERNAL ERROR\nDuring the build of 19.10b0 in Fedora, the following test failure occurs on s390x (Big Endian) architecture:\r\n\r\n```\r\n======================================================================\r\nFAIL: test_python2 (tests.test_black.BlackTestCase)\r\n----------------------------------------------------------------------\r\nTraceback (most recent call last):\r\n File \"/builddir/build/BUILD/black-19.10b0/black.py\", line 3754, in assert_equivalent\r\n src_ast = parse_ast(src)\r\n File \"/builddir/build/BUILD/black-19.10b0/black.py\", line 3686, in parse_ast\r\n return ast27.parse(src)\r\n File \"/usr/lib64/python3.8/site-packages/typed_ast/ast27.py\", line 50, in parse\r\n return _ast27.parse(source, filename, mode)\r\n File \"<unknown>\", line 10\r\nSyntaxError: can't assign to ()\r\nDuring handling of the above exception, another exception occurred:\r\nTraceback (most recent call last):\r\n File \"/usr/lib64/python3.8/unittest/mock.py\", line 1342, in patched\r\n return func(*newargs, **newkeywargs)\r\n File \"/builddir/build/BUILD/black-19.10b0/tests/test_black.py\", line 543, in test_python2\r\n black.assert_equivalent(source, actual)\r\n File \"/builddir/build/BUILD/black-19.10b0/black.py\", line 3756, in assert_equivalent\r\n raise AssertionError(\r\nAssertionError: cannot use --safe with this file; failed to parse source file. AST error message: can't assign to () (<unknown>, line 10)\r\n======================================================================\r\nFAIL: test_python2_unicode_literals (tests.test_black.BlackTestCase)\r\n----------------------------------------------------------------------\r\nTraceback (most recent call last):\r\n File \"/usr/lib64/python3.8/unittest/mock.py\", line 1342, in patched\r\n return func(*newargs, **newkeywargs)\r\n File \"/builddir/build/BUILD/black-19.10b0/tests/test_black.py\", line 560, in test_python2_unicode_literals\r\n black.assert_equivalent(source, actual)\r\n File \"/builddir/build/BUILD/black-19.10b0/black.py\", line 3775, in assert_equivalent\r\n raise AssertionError(\r\nAssertionError: INTERNAL ERROR: Black produced code that is not equivalent to the source. Please report a bug on https://github.com/psf/black/issues. This diff might be helpful: \r\n--- src\r\n+++ dst\r\n@@ -1,4 +1,70 @@\r\n Module(\r\n body=\r\n+ ImportFrom(\r\n+ level=\r\n+ 0, # int\r\n+ module=\r\n+ '__future__', # str\r\n+ names=\r\n+ alias(\r\n+ asname=\r\n+ '_unicode_literals', # str\r\n+ name=\r\n+ 'unicode_literals', # str\r\n+ ) # /alias\r\n+ ) # /ImportFrom\r\n+ ImportFrom(\r\n+ level=\r\n+ 0, # int\r\n+ module=\r\n+ '__future__', # str\r\n+ names=\r\n+ alias(\r\n+ asname=\r\n+ None, # NoneType\r\n+ name=\r\n+ 'absolute_import', # str\r\n+ ) # /alias\r\n+ ) # /ImportFrom\r\n+ ImportFrom(\r\n+ level=\r\n+ 0, # int\r\n+ module=\r\n+ '__future__', # str\r\n+ names=\r\n+ alias(\r\n+ asname=\r\n+ 'lol', # str\r\n+ name=\r\n+ 'print_function', # str\r\n+ ) # /alias\r\n+ alias(\r\n+ asname=\r\n+ None, # NoneType\r\n+ name=\r\n+ 'with_function', # str\r\n+ ) # /alias\r\n+ ) # /ImportFrom\r\n+ Expr(\r\n+ value=\r\n+ Constant(\r\n+ value=\r\n+ 'hello', # str\r\n+ ) # /Constant\r\n+ ) # /Expr\r\n+ Expr(\r\n+ value=\r\n+ Constant(\r\n+ value=\r\n+ 'hello', # str\r\n+ ) # /Constant\r\n+ ) # /Expr\r\n+ Expr(\r\n+ value=\r\n+ Constant(\r\n+ value=\r\n+ 'hello', # str\r\n+ ) # /Constant\r\n+ ) # /Expr\r\n type_ignores=\r\n ) # /Module\r\n----------------------------------------------------------------------\r\nRan 119 tests in 18.012s\r\nFAILED (failures=2)\r\n```\r\n\r\n**To Reproduce**, run the test suite on s390x. \r\n\r\nHere is the build log with all the commands: [build.log](https://github.com/psf/black/files/3782557/build.log)\r\n\r\nHere is the root log with all the package versions: [root.log](https://github.com/psf/black/files/3782561/root.log)\r\n\r\n\r\n**Expected behavior**\r\nTest succeed an all architectures.\r\n\r\n**Environment:**\r\n\r\n- Version: 19.10b0\r\n- OS and Python version: Linux, Fedora 32 on s390x, Python 3.8.0\r\n\r\n**Does this bug also happen on master?** yes, on 6bedb5c58a7d8c25aa9509f8217bc24e9797e90d\r\n\r\n**Additional context** The problem does not happen on the same build system with armv7hl or ppc64le.\r\n\n", "before_files": [{"content": "# Copyright (C) 2020 \u0141ukasz Langa\nfrom setuptools import setup\nimport sys\nimport os\n\nassert sys.version_info >= (3, 6, 0), \"black requires Python 3.6+\"\nfrom pathlib import Path # noqa E402\n\nCURRENT_DIR = Path(__file__).parent\nsys.path.insert(0, str(CURRENT_DIR)) # for setuptools.build_meta\n\n\ndef get_long_description() -> str:\n return (\n (CURRENT_DIR / \"README.md\").read_text(encoding=\"utf8\")\n + \"\\n\\n\"\n + (CURRENT_DIR / \"CHANGES.md\").read_text(encoding=\"utf8\")\n )\n\n\nUSE_MYPYC = False\n# To compile with mypyc, a mypyc checkout must be present on the PYTHONPATH\nif len(sys.argv) > 1 and sys.argv[1] == \"--use-mypyc\":\n sys.argv.pop(1)\n USE_MYPYC = True\nif os.getenv(\"BLACK_USE_MYPYC\", None) == \"1\":\n USE_MYPYC = True\n\nif USE_MYPYC:\n mypyc_targets = [\n \"src/black/__init__.py\",\n \"src/blib2to3/pytree.py\",\n \"src/blib2to3/pygram.py\",\n \"src/blib2to3/pgen2/parse.py\",\n \"src/blib2to3/pgen2/grammar.py\",\n \"src/blib2to3/pgen2/token.py\",\n \"src/blib2to3/pgen2/driver.py\",\n \"src/blib2to3/pgen2/pgen.py\",\n ]\n\n from mypyc.build import mypycify\n\n opt_level = os.getenv(\"MYPYC_OPT_LEVEL\", \"3\")\n ext_modules = mypycify(mypyc_targets, opt_level=opt_level)\nelse:\n ext_modules = []\n\nsetup(\n name=\"black\",\n use_scm_version={\n \"write_to\": \"src/_black_version.py\",\n \"write_to_template\": 'version = \"{version}\"\\n',\n },\n description=\"The uncompromising code formatter.\",\n long_description=get_long_description(),\n long_description_content_type=\"text/markdown\",\n keywords=\"automation formatter yapf autopep8 pyfmt gofmt rustfmt\",\n author=\"\u0141ukasz Langa\",\n author_email=\"[email protected]\",\n url=\"https://github.com/psf/black\",\n project_urls={\"Changelog\": \"https://github.com/psf/black/blob/master/CHANGES.md\"},\n license=\"MIT\",\n py_modules=[\"_black_version\"],\n ext_modules=ext_modules,\n packages=[\"blackd\", \"black\", \"blib2to3\", \"blib2to3.pgen2\", \"black_primer\"],\n package_dir={\"\": \"src\"},\n package_data={\"blib2to3\": [\"*.txt\"], \"black\": [\"py.typed\"]},\n python_requires=\">=3.6\",\n zip_safe=False,\n install_requires=[\n \"click>=7.1.2\",\n \"appdirs\",\n \"toml>=0.10.1\",\n \"typed-ast>=1.4.0\",\n \"regex>=2020.1.8\",\n \"pathspec>=0.6, <1\",\n \"dataclasses>=0.6; python_version < '3.7'\",\n \"typing_extensions>=3.7.4\",\n \"mypy_extensions>=0.4.3\",\n ],\n extras_require={\n \"d\": [\"aiohttp>=3.3.2\", \"aiohttp-cors\"],\n \"colorama\": [\"colorama>=0.4.3\"],\n },\n test_suite=\"tests.test_black\",\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n \"Topic :: Software Development :: Quality Assurance\",\n ],\n entry_points={\n \"console_scripts\": [\n \"black=black:patched_main\",\n \"blackd=blackd:patched_main [d]\",\n \"black-primer=black_primer.cli:main\",\n ]\n },\n)\n", "path": "setup.py"}], "after_files": [{"content": "# Copyright (C) 2020 \u0141ukasz Langa\nfrom setuptools import setup\nimport sys\nimport os\n\nassert sys.version_info >= (3, 6, 0), \"black requires Python 3.6+\"\nfrom pathlib import Path # noqa E402\n\nCURRENT_DIR = Path(__file__).parent\nsys.path.insert(0, str(CURRENT_DIR)) # for setuptools.build_meta\n\n\ndef get_long_description() -> str:\n return (\n (CURRENT_DIR / \"README.md\").read_text(encoding=\"utf8\")\n + \"\\n\\n\"\n + (CURRENT_DIR / \"CHANGES.md\").read_text(encoding=\"utf8\")\n )\n\n\nUSE_MYPYC = False\n# To compile with mypyc, a mypyc checkout must be present on the PYTHONPATH\nif len(sys.argv) > 1 and sys.argv[1] == \"--use-mypyc\":\n sys.argv.pop(1)\n USE_MYPYC = True\nif os.getenv(\"BLACK_USE_MYPYC\", None) == \"1\":\n USE_MYPYC = True\n\nif USE_MYPYC:\n mypyc_targets = [\n \"src/black/__init__.py\",\n \"src/blib2to3/pytree.py\",\n \"src/blib2to3/pygram.py\",\n \"src/blib2to3/pgen2/parse.py\",\n \"src/blib2to3/pgen2/grammar.py\",\n \"src/blib2to3/pgen2/token.py\",\n \"src/blib2to3/pgen2/driver.py\",\n \"src/blib2to3/pgen2/pgen.py\",\n ]\n\n from mypyc.build import mypycify\n\n opt_level = os.getenv(\"MYPYC_OPT_LEVEL\", \"3\")\n ext_modules = mypycify(mypyc_targets, opt_level=opt_level)\nelse:\n ext_modules = []\n\nsetup(\n name=\"black\",\n use_scm_version={\n \"write_to\": \"src/_black_version.py\",\n \"write_to_template\": 'version = \"{version}\"\\n',\n },\n description=\"The uncompromising code formatter.\",\n long_description=get_long_description(),\n long_description_content_type=\"text/markdown\",\n keywords=\"automation formatter yapf autopep8 pyfmt gofmt rustfmt\",\n author=\"\u0141ukasz Langa\",\n author_email=\"[email protected]\",\n url=\"https://github.com/psf/black\",\n project_urls={\"Changelog\": \"https://github.com/psf/black/blob/master/CHANGES.md\"},\n license=\"MIT\",\n py_modules=[\"_black_version\"],\n ext_modules=ext_modules,\n packages=[\"blackd\", \"black\", \"blib2to3\", \"blib2to3.pgen2\", \"black_primer\"],\n package_dir={\"\": \"src\"},\n package_data={\"blib2to3\": [\"*.txt\"], \"black\": [\"py.typed\"]},\n python_requires=\">=3.6\",\n zip_safe=False,\n install_requires=[\n \"click>=7.1.2\",\n \"appdirs\",\n \"toml>=0.10.1\",\n \"typed-ast>=1.4.2\",\n \"regex>=2020.1.8\",\n \"pathspec>=0.6, <1\",\n \"dataclasses>=0.6; python_version < '3.7'\",\n \"typing_extensions>=3.7.4\",\n \"mypy_extensions>=0.4.3\",\n ],\n extras_require={\n \"d\": [\"aiohttp>=3.3.2\", \"aiohttp-cors\"],\n \"colorama\": [\"colorama>=0.4.3\"],\n },\n test_suite=\"tests.test_black\",\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n \"Topic :: Software Development :: Quality Assurance\",\n ],\n entry_points={\n \"console_scripts\": [\n \"black=black:patched_main\",\n \"blackd=blackd:patched_main [d]\",\n \"black-primer=black_primer.cli:main\",\n ]\n },\n)\n", "path": "setup.py"}]} | 2,792 | 127 |
gh_patches_debug_39177 | rasdani/github-patches | git_diff | scikit-hep__pyhf-944 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
API: change CLs to be scalar
# Description
right now it's returning (1,) vectors
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/pyhf/cli/infer.py`
Content:
```
1 """The inference CLI group."""
2 import logging
3
4 import click
5 import json
6
7 from ..utils import EqDelimStringParamType
8 from ..infer import hypotest
9 from ..workspace import Workspace
10 from .. import get_backend, set_backend, optimize
11
12 log = logging.getLogger(__name__)
13
14
15 @click.group(name='infer')
16 def cli():
17 """Infererence CLI group."""
18
19
20 @cli.command()
21 @click.argument('workspace', default='-')
22 @click.option(
23 '--output-file',
24 help='The location of the output json file. If not specified, prints to screen.',
25 default=None,
26 )
27 @click.option('--measurement', default=None)
28 @click.option('-p', '--patch', multiple=True)
29 @click.option('--testpoi', default=1.0)
30 @click.option('--teststat', type=click.Choice(['q', 'qtilde']), default='qtilde')
31 @click.option(
32 '--backend',
33 type=click.Choice(['numpy', 'pytorch', 'tensorflow', 'jax', 'np', 'torch', 'tf']),
34 help='The tensor backend used for the calculation.',
35 default='numpy',
36 )
37 @click.option('--optimizer')
38 @click.option('--optconf', type=EqDelimStringParamType(), multiple=True)
39 def cls(
40 workspace,
41 output_file,
42 measurement,
43 patch,
44 testpoi,
45 teststat,
46 backend,
47 optimizer,
48 optconf,
49 ):
50 """
51 Compute CLs value(s) for a given pyhf workspace.
52
53 Example:
54
55 .. code-block:: shell
56
57 $ curl -sL https://git.io/JJYDE | pyhf cls
58
59 \b
60 {
61 "CLs_exp": [
62 0.07807427911686156,
63 0.17472571775474618,
64 0.35998495263681285,
65 0.6343568235898907,
66 0.8809947004472013
67 ],
68 "CLs_obs": 0.3599845631401915
69 }
70 """
71 with click.open_file(workspace, 'r') as specstream:
72 spec = json.load(specstream)
73
74 ws = Workspace(spec)
75
76 is_qtilde = teststat == 'qtilde'
77
78 patches = [json.loads(click.open_file(pfile, 'r').read()) for pfile in patch]
79 model = ws.model(
80 measurement_name=measurement,
81 patches=patches,
82 modifier_settings={
83 'normsys': {'interpcode': 'code4'},
84 'histosys': {'interpcode': 'code4p'},
85 },
86 )
87
88 # set the backend if not NumPy
89 if backend in ['pytorch', 'torch']:
90 set_backend("pytorch", precision="64b")
91 elif backend in ['tensorflow', 'tf']:
92 set_backend("tensorflow", precision="64b")
93 elif backend in ['jax']:
94 set_backend("jax")
95 tensorlib, _ = get_backend()
96
97 optconf = {k: v for item in optconf for k, v in item.items()}
98
99 # set the new optimizer
100 if optimizer:
101 new_optimizer = getattr(optimize, optimizer) or getattr(
102 optimize, f'{optimizer}_optimizer'
103 )
104 set_backend(tensorlib, new_optimizer(**optconf))
105
106 result = hypotest(
107 testpoi, ws.data(model), model, qtilde=is_qtilde, return_expected_set=True
108 )
109 result = {
110 'CLs_obs': tensorlib.tolist(result[0])[0],
111 'CLs_exp': tensorlib.tolist(tensorlib.reshape(result[-1], [-1])),
112 }
113
114 if output_file is None:
115 click.echo(json.dumps(result, indent=4, sort_keys=True))
116 else:
117 with open(output_file, 'w+') as out_file:
118 json.dump(result, out_file, indent=4, sort_keys=True)
119 log.debug("Written to {0:s}".format(output_file))
120
```
Path: `src/pyhf/infer/__init__.py`
Content:
```
1 """Inference for Statistical Models."""
2
3 from .test_statistics import qmu
4 from .. import get_backend
5 from .calculators import AsymptoticCalculator
6
7
8 def hypotest(
9 poi_test, data, pdf, init_pars=None, par_bounds=None, qtilde=False, **kwargs
10 ):
11 r"""
12 Compute :math:`p`-values and test statistics for a single value of the parameter of interest.
13
14 Example:
15 >>> import pyhf
16 >>> pyhf.set_backend("numpy")
17 >>> model = pyhf.simplemodels.hepdata_like(
18 ... signal_data=[12.0, 11.0], bkg_data=[50.0, 52.0], bkg_uncerts=[3.0, 7.0]
19 ... )
20 >>> observations = [51, 48]
21 >>> data = pyhf.tensorlib.astensor(observations + model.config.auxdata)
22 >>> test_poi = 1.0
23 >>> CLs_obs, CLs_exp_band = pyhf.infer.hypotest(
24 ... test_poi, data, model, qtilde=True, return_expected_set=True
25 ... )
26 >>> print(CLs_obs)
27 [0.05251554]
28 >>> print(CLs_exp_band)
29 [[0.00260641]
30 [0.01382066]
31 [0.06445521]
32 [0.23526104]
33 [0.57304182]]
34
35 Args:
36 poi_test (Number or Tensor): The value of the parameter of interest (POI)
37 data (Number or Tensor): The root of the calculated test statistic given the Asimov data, :math:`\sqrt{q_{\mu,A}}`
38 pdf (~pyhf.pdf.Model): The HistFactory statistical model
39 init_pars (Array or Tensor): The initial parameter values to be used for minimization
40 par_bounds (Array or Tensor): The parameter value bounds to be used for minimization
41 qtilde (Bool): When ``True`` perform the calculation using the alternative test statistic, :math:`\tilde{q}`, as defined in Equation (62) of :xref:`arXiv:1007.1727`
42
43 Keyword Args:
44 return_tail_probs (bool): Bool for returning :math:`\textrm{CL}_{s+b}` and :math:`\textrm{CL}_{b}`
45 return_expected (bool): Bool for returning :math:`\textrm{CL}_{\textrm{exp}}`
46 return_expected_set (bool): Bool for returning the :math:`(-2,-1,0,1,2)\sigma` :math:`\textrm{CL}_{\textrm{exp}}` --- the "Brazil band"
47
48 Returns:
49 Tuple of Floats and lists of Floats:
50
51 - :math:`\textrm{CL}_{s}`: The :math:`p`-value compared to the given threshold :math:`\alpha`, typically taken to be :math:`0.05`, defined in :xref:`arXiv:1007.1727` as
52
53 .. math::
54
55 \textrm{CL}_{s} = \frac{\textrm{CL}_{s+b}}{\textrm{CL}_{b}} = \frac{p_{s+b}}{1-p_{b}}
56
57 to protect against excluding signal models in which there is little sensitivity. In the case that :math:`\textrm{CL}_{s} \leq \alpha` the given signal model is excluded.
58
59 - :math:`\left[\textrm{CL}_{s+b}, \textrm{CL}_{b}\right]`: The signal + background :math:`p`-value and 1 minus the background only :math:`p`-value as defined in Equations (75) and (76) of :xref:`arXiv:1007.1727`
60
61 .. math::
62
63 \textrm{CL}_{s+b} = p_{s+b} = \int\limits_{q_{\textrm{obs}}}^{\infty} f\left(q\,\middle|s+b\right)\,dq = 1 - \Phi\left(\frac{q_{\textrm{obs}} + 1/\sigma_{s+b}^{2}}{2/\sigma_{s+b}}\right)
64
65 .. math::
66
67 \textrm{CL}_{b} = 1- p_{b} = 1 - \int\limits_{-\infty}^{q_{\textrm{obs}}} f\left(q\,\middle|b\right)\,dq = 1 - \Phi\left(\frac{q_{\textrm{obs}} - 1/\sigma_{b}^{2}}{2/\sigma_{b}}\right)
68
69 with Equations (73) and (74) for the mean
70
71 .. math::
72
73 E\left[q\right] = \frac{1 - 2\mu}{\sigma^{2}}
74
75 and variance
76
77 .. math::
78
79 V\left[q\right] = \frac{4}{\sigma^{2}}
80
81 of the test statistic :math:`q` under the background only and and signal + background hypotheses. Only returned when ``return_tail_probs`` is ``True``.
82
83 - :math:`\textrm{CL}_{s,\textrm{exp}}`: The expected :math:`\textrm{CL}_{s}` value corresponding to the test statistic under the background only hypothesis :math:`\left(\mu=0\right)`. Only returned when ``return_expected`` is ``True``.
84
85 - :math:`\textrm{CL}_{s,\textrm{exp}}` band: The set of expected :math:`\textrm{CL}_{s}` values corresponding to the median significance of variations of the signal strength from the background only hypothesis :math:`\left(\mu=0\right)` at :math:`(-2,-1,0,1,2)\sigma`. That is, the :math:`p`-values that satisfy Equation (89) of :xref:`arXiv:1007.1727`
86
87 .. math::
88
89 \textrm{band}_{N\sigma} = \mu' + \sigma\,\Phi^{-1}\left(1-\alpha\right) \pm N\sigma
90
91 for :math:`\mu'=0` and :math:`N \in \left\{-2, -1, 0, 1, 2\right\}`. These values define the boundaries of an uncertainty band sometimes referred to as the "Brazil band". Only returned when ``return_expected_set`` is ``True``.
92
93 """
94 init_pars = init_pars or pdf.config.suggested_init()
95 par_bounds = par_bounds or pdf.config.suggested_bounds()
96 tensorlib, _ = get_backend()
97
98 calc = AsymptoticCalculator(data, pdf, init_pars, par_bounds, qtilde=qtilde)
99 teststat = calc.teststatistic(poi_test)
100 sig_plus_bkg_distribution, b_only_distribution = calc.distributions(poi_test)
101
102 CLsb = sig_plus_bkg_distribution.pvalue(teststat)
103 CLb = b_only_distribution.pvalue(teststat)
104 CLs = CLsb / CLb
105 CLsb, CLb, CLs = (
106 tensorlib.reshape(CLsb, (1,)),
107 tensorlib.reshape(CLb, (1,)),
108 tensorlib.reshape(CLs, (1,)),
109 )
110
111 _returns = [CLs]
112 if kwargs.get('return_tail_probs'):
113 _returns.append([CLsb, CLb])
114 if kwargs.get('return_expected_set'):
115 CLs_exp = []
116 for n_sigma in [2, 1, 0, -1, -2]:
117
118 expected_bonly_teststat = b_only_distribution.expected_value(n_sigma)
119
120 CLs = sig_plus_bkg_distribution.pvalue(
121 expected_bonly_teststat
122 ) / b_only_distribution.pvalue(expected_bonly_teststat)
123 CLs_exp.append(tensorlib.reshape(CLs, (1,)))
124 CLs_exp = tensorlib.astensor(CLs_exp)
125 if kwargs.get('return_expected'):
126 _returns.append(CLs_exp[2])
127 _returns.append(CLs_exp)
128 elif kwargs.get('return_expected'):
129 n_sigma = 0
130 expected_bonly_teststat = b_only_distribution.expected_value(n_sigma)
131
132 CLs = sig_plus_bkg_distribution.pvalue(
133 expected_bonly_teststat
134 ) / b_only_distribution.pvalue(expected_bonly_teststat)
135 _returns.append(tensorlib.reshape(CLs, (1,)))
136 # Enforce a consistent return type of the observed CLs
137 return tuple(_returns) if len(_returns) > 1 else _returns[0]
138
139
140 __all__ = ['qmu', 'hypotest']
141
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/pyhf/cli/infer.py b/src/pyhf/cli/infer.py
--- a/src/pyhf/cli/infer.py
+++ b/src/pyhf/cli/infer.py
@@ -107,8 +107,8 @@
testpoi, ws.data(model), model, qtilde=is_qtilde, return_expected_set=True
)
result = {
- 'CLs_obs': tensorlib.tolist(result[0])[0],
- 'CLs_exp': tensorlib.tolist(tensorlib.reshape(result[-1], [-1])),
+ 'CLs_obs': tensorlib.tolist(result[0]),
+ 'CLs_exp': [tensorlib.tolist(tensor) for tensor in result[-1]],
}
if output_file is None:
diff --git a/src/pyhf/infer/__init__.py b/src/pyhf/infer/__init__.py
--- a/src/pyhf/infer/__init__.py
+++ b/src/pyhf/infer/__init__.py
@@ -23,14 +23,10 @@
>>> CLs_obs, CLs_exp_band = pyhf.infer.hypotest(
... test_poi, data, model, qtilde=True, return_expected_set=True
... )
- >>> print(CLs_obs)
- [0.05251554]
- >>> print(CLs_exp_band)
- [[0.00260641]
- [0.01382066]
- [0.06445521]
- [0.23526104]
- [0.57304182]]
+ >>> CLs_obs
+ array(0.05251554)
+ >>> CLs_exp_band
+ [array(0.00260641), array(0.01382066), array(0.06445521), array(0.23526104), array(0.57304182)]
Args:
poi_test (Number or Tensor): The value of the parameter of interest (POI)
@@ -102,10 +98,11 @@
CLsb = sig_plus_bkg_distribution.pvalue(teststat)
CLb = b_only_distribution.pvalue(teststat)
CLs = CLsb / CLb
+ # Ensure that all CL values are 0-d tensors
CLsb, CLb, CLs = (
- tensorlib.reshape(CLsb, (1,)),
- tensorlib.reshape(CLb, (1,)),
- tensorlib.reshape(CLs, (1,)),
+ tensorlib.astensor(CLsb),
+ tensorlib.astensor(CLb),
+ tensorlib.astensor(CLs),
)
_returns = [CLs]
@@ -120,8 +117,7 @@
CLs = sig_plus_bkg_distribution.pvalue(
expected_bonly_teststat
) / b_only_distribution.pvalue(expected_bonly_teststat)
- CLs_exp.append(tensorlib.reshape(CLs, (1,)))
- CLs_exp = tensorlib.astensor(CLs_exp)
+ CLs_exp.append(tensorlib.astensor(CLs))
if kwargs.get('return_expected'):
_returns.append(CLs_exp[2])
_returns.append(CLs_exp)
@@ -132,7 +128,7 @@
CLs = sig_plus_bkg_distribution.pvalue(
expected_bonly_teststat
) / b_only_distribution.pvalue(expected_bonly_teststat)
- _returns.append(tensorlib.reshape(CLs, (1,)))
+ _returns.append(tensorlib.astensor(CLs))
# Enforce a consistent return type of the observed CLs
return tuple(_returns) if len(_returns) > 1 else _returns[0]
| {"golden_diff": "diff --git a/src/pyhf/cli/infer.py b/src/pyhf/cli/infer.py\n--- a/src/pyhf/cli/infer.py\n+++ b/src/pyhf/cli/infer.py\n@@ -107,8 +107,8 @@\n testpoi, ws.data(model), model, qtilde=is_qtilde, return_expected_set=True\n )\n result = {\n- 'CLs_obs': tensorlib.tolist(result[0])[0],\n- 'CLs_exp': tensorlib.tolist(tensorlib.reshape(result[-1], [-1])),\n+ 'CLs_obs': tensorlib.tolist(result[0]),\n+ 'CLs_exp': [tensorlib.tolist(tensor) for tensor in result[-1]],\n }\n \n if output_file is None:\ndiff --git a/src/pyhf/infer/__init__.py b/src/pyhf/infer/__init__.py\n--- a/src/pyhf/infer/__init__.py\n+++ b/src/pyhf/infer/__init__.py\n@@ -23,14 +23,10 @@\n >>> CLs_obs, CLs_exp_band = pyhf.infer.hypotest(\n ... test_poi, data, model, qtilde=True, return_expected_set=True\n ... )\n- >>> print(CLs_obs)\n- [0.05251554]\n- >>> print(CLs_exp_band)\n- [[0.00260641]\n- [0.01382066]\n- [0.06445521]\n- [0.23526104]\n- [0.57304182]]\n+ >>> CLs_obs\n+ array(0.05251554)\n+ >>> CLs_exp_band\n+ [array(0.00260641), array(0.01382066), array(0.06445521), array(0.23526104), array(0.57304182)]\n \n Args:\n poi_test (Number or Tensor): The value of the parameter of interest (POI)\n@@ -102,10 +98,11 @@\n CLsb = sig_plus_bkg_distribution.pvalue(teststat)\n CLb = b_only_distribution.pvalue(teststat)\n CLs = CLsb / CLb\n+ # Ensure that all CL values are 0-d tensors\n CLsb, CLb, CLs = (\n- tensorlib.reshape(CLsb, (1,)),\n- tensorlib.reshape(CLb, (1,)),\n- tensorlib.reshape(CLs, (1,)),\n+ tensorlib.astensor(CLsb),\n+ tensorlib.astensor(CLb),\n+ tensorlib.astensor(CLs),\n )\n \n _returns = [CLs]\n@@ -120,8 +117,7 @@\n CLs = sig_plus_bkg_distribution.pvalue(\n expected_bonly_teststat\n ) / b_only_distribution.pvalue(expected_bonly_teststat)\n- CLs_exp.append(tensorlib.reshape(CLs, (1,)))\n- CLs_exp = tensorlib.astensor(CLs_exp)\n+ CLs_exp.append(tensorlib.astensor(CLs))\n if kwargs.get('return_expected'):\n _returns.append(CLs_exp[2])\n _returns.append(CLs_exp)\n@@ -132,7 +128,7 @@\n CLs = sig_plus_bkg_distribution.pvalue(\n expected_bonly_teststat\n ) / b_only_distribution.pvalue(expected_bonly_teststat)\n- _returns.append(tensorlib.reshape(CLs, (1,)))\n+ _returns.append(tensorlib.astensor(CLs))\n # Enforce a consistent return type of the observed CLs\n return tuple(_returns) if len(_returns) > 1 else _returns[0]\n", "issue": "API: change CLs to be scalar\n# Description\r\n\r\nright now it's returning (1,) vectors\r\n\n", "before_files": [{"content": "\"\"\"The inference CLI group.\"\"\"\nimport logging\n\nimport click\nimport json\n\nfrom ..utils import EqDelimStringParamType\nfrom ..infer import hypotest\nfrom ..workspace import Workspace\nfrom .. import get_backend, set_backend, optimize\n\nlog = logging.getLogger(__name__)\n\n\[email protected](name='infer')\ndef cli():\n \"\"\"Infererence CLI group.\"\"\"\n\n\[email protected]()\[email protected]('workspace', default='-')\[email protected](\n '--output-file',\n help='The location of the output json file. If not specified, prints to screen.',\n default=None,\n)\[email protected]('--measurement', default=None)\[email protected]('-p', '--patch', multiple=True)\[email protected]('--testpoi', default=1.0)\[email protected]('--teststat', type=click.Choice(['q', 'qtilde']), default='qtilde')\[email protected](\n '--backend',\n type=click.Choice(['numpy', 'pytorch', 'tensorflow', 'jax', 'np', 'torch', 'tf']),\n help='The tensor backend used for the calculation.',\n default='numpy',\n)\[email protected]('--optimizer')\[email protected]('--optconf', type=EqDelimStringParamType(), multiple=True)\ndef cls(\n workspace,\n output_file,\n measurement,\n patch,\n testpoi,\n teststat,\n backend,\n optimizer,\n optconf,\n):\n \"\"\"\n Compute CLs value(s) for a given pyhf workspace.\n\n Example:\n\n .. code-block:: shell\n\n $ curl -sL https://git.io/JJYDE | pyhf cls\n\n \\b\n {\n \"CLs_exp\": [\n 0.07807427911686156,\n 0.17472571775474618,\n 0.35998495263681285,\n 0.6343568235898907,\n 0.8809947004472013\n ],\n \"CLs_obs\": 0.3599845631401915\n }\n \"\"\"\n with click.open_file(workspace, 'r') as specstream:\n spec = json.load(specstream)\n\n ws = Workspace(spec)\n\n is_qtilde = teststat == 'qtilde'\n\n patches = [json.loads(click.open_file(pfile, 'r').read()) for pfile in patch]\n model = ws.model(\n measurement_name=measurement,\n patches=patches,\n modifier_settings={\n 'normsys': {'interpcode': 'code4'},\n 'histosys': {'interpcode': 'code4p'},\n },\n )\n\n # set the backend if not NumPy\n if backend in ['pytorch', 'torch']:\n set_backend(\"pytorch\", precision=\"64b\")\n elif backend in ['tensorflow', 'tf']:\n set_backend(\"tensorflow\", precision=\"64b\")\n elif backend in ['jax']:\n set_backend(\"jax\")\n tensorlib, _ = get_backend()\n\n optconf = {k: v for item in optconf for k, v in item.items()}\n\n # set the new optimizer\n if optimizer:\n new_optimizer = getattr(optimize, optimizer) or getattr(\n optimize, f'{optimizer}_optimizer'\n )\n set_backend(tensorlib, new_optimizer(**optconf))\n\n result = hypotest(\n testpoi, ws.data(model), model, qtilde=is_qtilde, return_expected_set=True\n )\n result = {\n 'CLs_obs': tensorlib.tolist(result[0])[0],\n 'CLs_exp': tensorlib.tolist(tensorlib.reshape(result[-1], [-1])),\n }\n\n if output_file is None:\n click.echo(json.dumps(result, indent=4, sort_keys=True))\n else:\n with open(output_file, 'w+') as out_file:\n json.dump(result, out_file, indent=4, sort_keys=True)\n log.debug(\"Written to {0:s}\".format(output_file))\n", "path": "src/pyhf/cli/infer.py"}, {"content": "\"\"\"Inference for Statistical Models.\"\"\"\n\nfrom .test_statistics import qmu\nfrom .. import get_backend\nfrom .calculators import AsymptoticCalculator\n\n\ndef hypotest(\n poi_test, data, pdf, init_pars=None, par_bounds=None, qtilde=False, **kwargs\n):\n r\"\"\"\n Compute :math:`p`-values and test statistics for a single value of the parameter of interest.\n\n Example:\n >>> import pyhf\n >>> pyhf.set_backend(\"numpy\")\n >>> model = pyhf.simplemodels.hepdata_like(\n ... signal_data=[12.0, 11.0], bkg_data=[50.0, 52.0], bkg_uncerts=[3.0, 7.0]\n ... )\n >>> observations = [51, 48]\n >>> data = pyhf.tensorlib.astensor(observations + model.config.auxdata)\n >>> test_poi = 1.0\n >>> CLs_obs, CLs_exp_band = pyhf.infer.hypotest(\n ... test_poi, data, model, qtilde=True, return_expected_set=True\n ... )\n >>> print(CLs_obs)\n [0.05251554]\n >>> print(CLs_exp_band)\n [[0.00260641]\n [0.01382066]\n [0.06445521]\n [0.23526104]\n [0.57304182]]\n\n Args:\n poi_test (Number or Tensor): The value of the parameter of interest (POI)\n data (Number or Tensor): The root of the calculated test statistic given the Asimov data, :math:`\\sqrt{q_{\\mu,A}}`\n pdf (~pyhf.pdf.Model): The HistFactory statistical model\n init_pars (Array or Tensor): The initial parameter values to be used for minimization\n par_bounds (Array or Tensor): The parameter value bounds to be used for minimization\n qtilde (Bool): When ``True`` perform the calculation using the alternative test statistic, :math:`\\tilde{q}`, as defined in Equation (62) of :xref:`arXiv:1007.1727`\n\n Keyword Args:\n return_tail_probs (bool): Bool for returning :math:`\\textrm{CL}_{s+b}` and :math:`\\textrm{CL}_{b}`\n return_expected (bool): Bool for returning :math:`\\textrm{CL}_{\\textrm{exp}}`\n return_expected_set (bool): Bool for returning the :math:`(-2,-1,0,1,2)\\sigma` :math:`\\textrm{CL}_{\\textrm{exp}}` --- the \"Brazil band\"\n\n Returns:\n Tuple of Floats and lists of Floats:\n\n - :math:`\\textrm{CL}_{s}`: The :math:`p`-value compared to the given threshold :math:`\\alpha`, typically taken to be :math:`0.05`, defined in :xref:`arXiv:1007.1727` as\n\n .. math::\n\n \\textrm{CL}_{s} = \\frac{\\textrm{CL}_{s+b}}{\\textrm{CL}_{b}} = \\frac{p_{s+b}}{1-p_{b}}\n\n to protect against excluding signal models in which there is little sensitivity. In the case that :math:`\\textrm{CL}_{s} \\leq \\alpha` the given signal model is excluded.\n\n - :math:`\\left[\\textrm{CL}_{s+b}, \\textrm{CL}_{b}\\right]`: The signal + background :math:`p`-value and 1 minus the background only :math:`p`-value as defined in Equations (75) and (76) of :xref:`arXiv:1007.1727`\n\n .. math::\n\n \\textrm{CL}_{s+b} = p_{s+b} = \\int\\limits_{q_{\\textrm{obs}}}^{\\infty} f\\left(q\\,\\middle|s+b\\right)\\,dq = 1 - \\Phi\\left(\\frac{q_{\\textrm{obs}} + 1/\\sigma_{s+b}^{2}}{2/\\sigma_{s+b}}\\right)\n\n .. math::\n\n \\textrm{CL}_{b} = 1- p_{b} = 1 - \\int\\limits_{-\\infty}^{q_{\\textrm{obs}}} f\\left(q\\,\\middle|b\\right)\\,dq = 1 - \\Phi\\left(\\frac{q_{\\textrm{obs}} - 1/\\sigma_{b}^{2}}{2/\\sigma_{b}}\\right)\n\n with Equations (73) and (74) for the mean\n\n .. math::\n\n E\\left[q\\right] = \\frac{1 - 2\\mu}{\\sigma^{2}}\n\n and variance\n\n .. math::\n\n V\\left[q\\right] = \\frac{4}{\\sigma^{2}}\n\n of the test statistic :math:`q` under the background only and and signal + background hypotheses. Only returned when ``return_tail_probs`` is ``True``.\n\n - :math:`\\textrm{CL}_{s,\\textrm{exp}}`: The expected :math:`\\textrm{CL}_{s}` value corresponding to the test statistic under the background only hypothesis :math:`\\left(\\mu=0\\right)`. Only returned when ``return_expected`` is ``True``.\n\n - :math:`\\textrm{CL}_{s,\\textrm{exp}}` band: The set of expected :math:`\\textrm{CL}_{s}` values corresponding to the median significance of variations of the signal strength from the background only hypothesis :math:`\\left(\\mu=0\\right)` at :math:`(-2,-1,0,1,2)\\sigma`. That is, the :math:`p`-values that satisfy Equation (89) of :xref:`arXiv:1007.1727`\n\n .. math::\n\n \\textrm{band}_{N\\sigma} = \\mu' + \\sigma\\,\\Phi^{-1}\\left(1-\\alpha\\right) \\pm N\\sigma\n\n for :math:`\\mu'=0` and :math:`N \\in \\left\\{-2, -1, 0, 1, 2\\right\\}`. These values define the boundaries of an uncertainty band sometimes referred to as the \"Brazil band\". Only returned when ``return_expected_set`` is ``True``.\n\n \"\"\"\n init_pars = init_pars or pdf.config.suggested_init()\n par_bounds = par_bounds or pdf.config.suggested_bounds()\n tensorlib, _ = get_backend()\n\n calc = AsymptoticCalculator(data, pdf, init_pars, par_bounds, qtilde=qtilde)\n teststat = calc.teststatistic(poi_test)\n sig_plus_bkg_distribution, b_only_distribution = calc.distributions(poi_test)\n\n CLsb = sig_plus_bkg_distribution.pvalue(teststat)\n CLb = b_only_distribution.pvalue(teststat)\n CLs = CLsb / CLb\n CLsb, CLb, CLs = (\n tensorlib.reshape(CLsb, (1,)),\n tensorlib.reshape(CLb, (1,)),\n tensorlib.reshape(CLs, (1,)),\n )\n\n _returns = [CLs]\n if kwargs.get('return_tail_probs'):\n _returns.append([CLsb, CLb])\n if kwargs.get('return_expected_set'):\n CLs_exp = []\n for n_sigma in [2, 1, 0, -1, -2]:\n\n expected_bonly_teststat = b_only_distribution.expected_value(n_sigma)\n\n CLs = sig_plus_bkg_distribution.pvalue(\n expected_bonly_teststat\n ) / b_only_distribution.pvalue(expected_bonly_teststat)\n CLs_exp.append(tensorlib.reshape(CLs, (1,)))\n CLs_exp = tensorlib.astensor(CLs_exp)\n if kwargs.get('return_expected'):\n _returns.append(CLs_exp[2])\n _returns.append(CLs_exp)\n elif kwargs.get('return_expected'):\n n_sigma = 0\n expected_bonly_teststat = b_only_distribution.expected_value(n_sigma)\n\n CLs = sig_plus_bkg_distribution.pvalue(\n expected_bonly_teststat\n ) / b_only_distribution.pvalue(expected_bonly_teststat)\n _returns.append(tensorlib.reshape(CLs, (1,)))\n # Enforce a consistent return type of the observed CLs\n return tuple(_returns) if len(_returns) > 1 else _returns[0]\n\n\n__all__ = ['qmu', 'hypotest']\n", "path": "src/pyhf/infer/__init__.py"}], "after_files": [{"content": "\"\"\"The inference CLI group.\"\"\"\nimport logging\n\nimport click\nimport json\n\nfrom ..utils import EqDelimStringParamType\nfrom ..infer import hypotest\nfrom ..workspace import Workspace\nfrom .. import get_backend, set_backend, optimize\n\nlog = logging.getLogger(__name__)\n\n\[email protected](name='infer')\ndef cli():\n \"\"\"Infererence CLI group.\"\"\"\n\n\[email protected]()\[email protected]('workspace', default='-')\[email protected](\n '--output-file',\n help='The location of the output json file. If not specified, prints to screen.',\n default=None,\n)\[email protected]('--measurement', default=None)\[email protected]('-p', '--patch', multiple=True)\[email protected]('--testpoi', default=1.0)\[email protected]('--teststat', type=click.Choice(['q', 'qtilde']), default='qtilde')\[email protected](\n '--backend',\n type=click.Choice(['numpy', 'pytorch', 'tensorflow', 'jax', 'np', 'torch', 'tf']),\n help='The tensor backend used for the calculation.',\n default='numpy',\n)\[email protected]('--optimizer')\[email protected]('--optconf', type=EqDelimStringParamType(), multiple=True)\ndef cls(\n workspace,\n output_file,\n measurement,\n patch,\n testpoi,\n teststat,\n backend,\n optimizer,\n optconf,\n):\n \"\"\"\n Compute CLs value(s) for a given pyhf workspace.\n\n Example:\n\n .. code-block:: shell\n\n $ curl -sL https://git.io/JJYDE | pyhf cls\n\n \\b\n {\n \"CLs_exp\": [\n 0.07807427911686156,\n 0.17472571775474618,\n 0.35998495263681285,\n 0.6343568235898907,\n 0.8809947004472013\n ],\n \"CLs_obs\": 0.3599845631401915\n }\n \"\"\"\n with click.open_file(workspace, 'r') as specstream:\n spec = json.load(specstream)\n\n ws = Workspace(spec)\n\n is_qtilde = teststat == 'qtilde'\n\n patches = [json.loads(click.open_file(pfile, 'r').read()) for pfile in patch]\n model = ws.model(\n measurement_name=measurement,\n patches=patches,\n modifier_settings={\n 'normsys': {'interpcode': 'code4'},\n 'histosys': {'interpcode': 'code4p'},\n },\n )\n\n # set the backend if not NumPy\n if backend in ['pytorch', 'torch']:\n set_backend(\"pytorch\", precision=\"64b\")\n elif backend in ['tensorflow', 'tf']:\n set_backend(\"tensorflow\", precision=\"64b\")\n elif backend in ['jax']:\n set_backend(\"jax\")\n tensorlib, _ = get_backend()\n\n optconf = {k: v for item in optconf for k, v in item.items()}\n\n # set the new optimizer\n if optimizer:\n new_optimizer = getattr(optimize, optimizer) or getattr(\n optimize, f'{optimizer}_optimizer'\n )\n set_backend(tensorlib, new_optimizer(**optconf))\n\n result = hypotest(\n testpoi, ws.data(model), model, qtilde=is_qtilde, return_expected_set=True\n )\n result = {\n 'CLs_obs': tensorlib.tolist(result[0]),\n 'CLs_exp': [tensorlib.tolist(tensor) for tensor in result[-1]],\n }\n\n if output_file is None:\n click.echo(json.dumps(result, indent=4, sort_keys=True))\n else:\n with open(output_file, 'w+') as out_file:\n json.dump(result, out_file, indent=4, sort_keys=True)\n log.debug(\"Written to {0:s}\".format(output_file))\n", "path": "src/pyhf/cli/infer.py"}, {"content": "\"\"\"Inference for Statistical Models.\"\"\"\n\nfrom .test_statistics import qmu\nfrom .. import get_backend\nfrom .calculators import AsymptoticCalculator\n\n\ndef hypotest(\n poi_test, data, pdf, init_pars=None, par_bounds=None, qtilde=False, **kwargs\n):\n r\"\"\"\n Compute :math:`p`-values and test statistics for a single value of the parameter of interest.\n\n Example:\n >>> import pyhf\n >>> pyhf.set_backend(\"numpy\")\n >>> model = pyhf.simplemodels.hepdata_like(\n ... signal_data=[12.0, 11.0], bkg_data=[50.0, 52.0], bkg_uncerts=[3.0, 7.0]\n ... )\n >>> observations = [51, 48]\n >>> data = pyhf.tensorlib.astensor(observations + model.config.auxdata)\n >>> test_poi = 1.0\n >>> CLs_obs, CLs_exp_band = pyhf.infer.hypotest(\n ... test_poi, data, model, qtilde=True, return_expected_set=True\n ... )\n >>> CLs_obs\n array(0.05251554)\n >>> CLs_exp_band\n [array(0.00260641), array(0.01382066), array(0.06445521), array(0.23526104), array(0.57304182)]\n\n Args:\n poi_test (Number or Tensor): The value of the parameter of interest (POI)\n data (Number or Tensor): The root of the calculated test statistic given the Asimov data, :math:`\\sqrt{q_{\\mu,A}}`\n pdf (~pyhf.pdf.Model): The HistFactory statistical model\n init_pars (Array or Tensor): The initial parameter values to be used for minimization\n par_bounds (Array or Tensor): The parameter value bounds to be used for minimization\n qtilde (Bool): When ``True`` perform the calculation using the alternative test statistic, :math:`\\tilde{q}`, as defined in Equation (62) of :xref:`arXiv:1007.1727`\n\n Keyword Args:\n return_tail_probs (bool): Bool for returning :math:`\\textrm{CL}_{s+b}` and :math:`\\textrm{CL}_{b}`\n return_expected (bool): Bool for returning :math:`\\textrm{CL}_{\\textrm{exp}}`\n return_expected_set (bool): Bool for returning the :math:`(-2,-1,0,1,2)\\sigma` :math:`\\textrm{CL}_{\\textrm{exp}}` --- the \"Brazil band\"\n\n Returns:\n Tuple of Floats and lists of Floats:\n\n - :math:`\\textrm{CL}_{s}`: The :math:`p`-value compared to the given threshold :math:`\\alpha`, typically taken to be :math:`0.05`, defined in :xref:`arXiv:1007.1727` as\n\n .. math::\n\n \\textrm{CL}_{s} = \\frac{\\textrm{CL}_{s+b}}{\\textrm{CL}_{b}} = \\frac{p_{s+b}}{1-p_{b}}\n\n to protect against excluding signal models in which there is little sensitivity. In the case that :math:`\\textrm{CL}_{s} \\leq \\alpha` the given signal model is excluded.\n\n - :math:`\\left[\\textrm{CL}_{s+b}, \\textrm{CL}_{b}\\right]`: The signal + background :math:`p`-value and 1 minus the background only :math:`p`-value as defined in Equations (75) and (76) of :xref:`arXiv:1007.1727`\n\n .. math::\n\n \\textrm{CL}_{s+b} = p_{s+b} = \\int\\limits_{q_{\\textrm{obs}}}^{\\infty} f\\left(q\\,\\middle|s+b\\right)\\,dq = 1 - \\Phi\\left(\\frac{q_{\\textrm{obs}} + 1/\\sigma_{s+b}^{2}}{2/\\sigma_{s+b}}\\right)\n\n .. math::\n\n \\textrm{CL}_{b} = 1- p_{b} = 1 - \\int\\limits_{-\\infty}^{q_{\\textrm{obs}}} f\\left(q\\,\\middle|b\\right)\\,dq = 1 - \\Phi\\left(\\frac{q_{\\textrm{obs}} - 1/\\sigma_{b}^{2}}{2/\\sigma_{b}}\\right)\n\n with Equations (73) and (74) for the mean\n\n .. math::\n\n E\\left[q\\right] = \\frac{1 - 2\\mu}{\\sigma^{2}}\n\n and variance\n\n .. math::\n\n V\\left[q\\right] = \\frac{4}{\\sigma^{2}}\n\n of the test statistic :math:`q` under the background only and and signal + background hypotheses. Only returned when ``return_tail_probs`` is ``True``.\n\n - :math:`\\textrm{CL}_{s,\\textrm{exp}}`: The expected :math:`\\textrm{CL}_{s}` value corresponding to the test statistic under the background only hypothesis :math:`\\left(\\mu=0\\right)`. Only returned when ``return_expected`` is ``True``.\n\n - :math:`\\textrm{CL}_{s,\\textrm{exp}}` band: The set of expected :math:`\\textrm{CL}_{s}` values corresponding to the median significance of variations of the signal strength from the background only hypothesis :math:`\\left(\\mu=0\\right)` at :math:`(-2,-1,0,1,2)\\sigma`. That is, the :math:`p`-values that satisfy Equation (89) of :xref:`arXiv:1007.1727`\n\n .. math::\n\n \\textrm{band}_{N\\sigma} = \\mu' + \\sigma\\,\\Phi^{-1}\\left(1-\\alpha\\right) \\pm N\\sigma\n\n for :math:`\\mu'=0` and :math:`N \\in \\left\\{-2, -1, 0, 1, 2\\right\\}`. These values define the boundaries of an uncertainty band sometimes referred to as the \"Brazil band\". Only returned when ``return_expected_set`` is ``True``.\n\n \"\"\"\n init_pars = init_pars or pdf.config.suggested_init()\n par_bounds = par_bounds or pdf.config.suggested_bounds()\n tensorlib, _ = get_backend()\n\n calc = AsymptoticCalculator(data, pdf, init_pars, par_bounds, qtilde=qtilde)\n teststat = calc.teststatistic(poi_test)\n sig_plus_bkg_distribution, b_only_distribution = calc.distributions(poi_test)\n\n CLsb = sig_plus_bkg_distribution.pvalue(teststat)\n CLb = b_only_distribution.pvalue(teststat)\n CLs = CLsb / CLb\n # Ensure that all CL values are 0-d tensors\n CLsb, CLb, CLs = (\n tensorlib.astensor(CLsb),\n tensorlib.astensor(CLb),\n tensorlib.astensor(CLs),\n )\n\n _returns = [CLs]\n if kwargs.get('return_tail_probs'):\n _returns.append([CLsb, CLb])\n if kwargs.get('return_expected_set'):\n CLs_exp = []\n for n_sigma in [2, 1, 0, -1, -2]:\n\n expected_bonly_teststat = b_only_distribution.expected_value(n_sigma)\n\n CLs = sig_plus_bkg_distribution.pvalue(\n expected_bonly_teststat\n ) / b_only_distribution.pvalue(expected_bonly_teststat)\n CLs_exp.append(tensorlib.astensor(CLs))\n if kwargs.get('return_expected'):\n _returns.append(CLs_exp[2])\n _returns.append(CLs_exp)\n elif kwargs.get('return_expected'):\n n_sigma = 0\n expected_bonly_teststat = b_only_distribution.expected_value(n_sigma)\n\n CLs = sig_plus_bkg_distribution.pvalue(\n expected_bonly_teststat\n ) / b_only_distribution.pvalue(expected_bonly_teststat)\n _returns.append(tensorlib.astensor(CLs))\n # Enforce a consistent return type of the observed CLs\n return tuple(_returns) if len(_returns) > 1 else _returns[0]\n\n\n__all__ = ['qmu', 'hypotest']\n", "path": "src/pyhf/infer/__init__.py"}]} | 3,789 | 865 |
gh_patches_debug_66455 | rasdani/github-patches | git_diff | pyca__cryptography-8319 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Incorrect docstrings in x25519 and x448 `.public_key()` methods
See:
https://github.com/pyca/cryptography/blob/127a2860740c77f45362e68e0ed7d2d108a39033/src/cryptography/hazmat/primitives/asymmetric/x25519.py#L60-L64
https://github.com/pyca/cryptography/blob/127a2860740c77f45362e68e0ed7d2d108a39033/src/cryptography/hazmat/primitives/asymmetric/x448.py#L60-L64
In both instances, the method does not return serialised bytes, but a public key object. The full [generated documentation](https://cryptography.io/en/latest/hazmat/primitives/asymmetric/x25519/#cryptography.hazmat.primitives.asymmetric.x25519.X25519PrivateKey.public_key) is correct, as are the Ed* docstrings.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cryptography/hazmat/primitives/asymmetric/x448.py`
Content:
```
1 # This file is dual licensed under the terms of the Apache License, Version
2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3 # for complete details.
4
5
6 import abc
7
8 from cryptography.exceptions import UnsupportedAlgorithm, _Reasons
9 from cryptography.hazmat.primitives import _serialization
10
11
12 class X448PublicKey(metaclass=abc.ABCMeta):
13 @classmethod
14 def from_public_bytes(cls, data: bytes) -> "X448PublicKey":
15 from cryptography.hazmat.backends.openssl.backend import backend
16
17 if not backend.x448_supported():
18 raise UnsupportedAlgorithm(
19 "X448 is not supported by this version of OpenSSL.",
20 _Reasons.UNSUPPORTED_EXCHANGE_ALGORITHM,
21 )
22
23 return backend.x448_load_public_bytes(data)
24
25 @abc.abstractmethod
26 def public_bytes(
27 self,
28 encoding: _serialization.Encoding,
29 format: _serialization.PublicFormat,
30 ) -> bytes:
31 """
32 The serialized bytes of the public key.
33 """
34
35
36 class X448PrivateKey(metaclass=abc.ABCMeta):
37 @classmethod
38 def generate(cls) -> "X448PrivateKey":
39 from cryptography.hazmat.backends.openssl.backend import backend
40
41 if not backend.x448_supported():
42 raise UnsupportedAlgorithm(
43 "X448 is not supported by this version of OpenSSL.",
44 _Reasons.UNSUPPORTED_EXCHANGE_ALGORITHM,
45 )
46 return backend.x448_generate_key()
47
48 @classmethod
49 def from_private_bytes(cls, data: bytes) -> "X448PrivateKey":
50 from cryptography.hazmat.backends.openssl.backend import backend
51
52 if not backend.x448_supported():
53 raise UnsupportedAlgorithm(
54 "X448 is not supported by this version of OpenSSL.",
55 _Reasons.UNSUPPORTED_EXCHANGE_ALGORITHM,
56 )
57
58 return backend.x448_load_private_bytes(data)
59
60 @abc.abstractmethod
61 def public_key(self) -> X448PublicKey:
62 """
63 The serialized bytes of the public key.
64 """
65
66 @abc.abstractmethod
67 def private_bytes(
68 self,
69 encoding: _serialization.Encoding,
70 format: _serialization.PrivateFormat,
71 encryption_algorithm: _serialization.KeySerializationEncryption,
72 ) -> bytes:
73 """
74 The serialized bytes of the private key.
75 """
76
77 @abc.abstractmethod
78 def exchange(self, peer_public_key: X448PublicKey) -> bytes:
79 """
80 Performs a key exchange operation using the provided peer's public key.
81 """
82
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/cryptography/hazmat/primitives/asymmetric/x448.py b/src/cryptography/hazmat/primitives/asymmetric/x448.py
--- a/src/cryptography/hazmat/primitives/asymmetric/x448.py
+++ b/src/cryptography/hazmat/primitives/asymmetric/x448.py
@@ -60,7 +60,7 @@
@abc.abstractmethod
def public_key(self) -> X448PublicKey:
"""
- The serialized bytes of the public key.
+ Returns the public key associated with this private key
"""
@abc.abstractmethod
| {"golden_diff": "diff --git a/src/cryptography/hazmat/primitives/asymmetric/x448.py b/src/cryptography/hazmat/primitives/asymmetric/x448.py\n--- a/src/cryptography/hazmat/primitives/asymmetric/x448.py\n+++ b/src/cryptography/hazmat/primitives/asymmetric/x448.py\n@@ -60,7 +60,7 @@\n @abc.abstractmethod\n def public_key(self) -> X448PublicKey:\n \"\"\"\n- The serialized bytes of the public key.\n+ Returns the public key associated with this private key\n \"\"\"\n \n @abc.abstractmethod\n", "issue": "Incorrect docstrings in x25519 and x448 `.public_key()` methods\nSee:\r\n\r\nhttps://github.com/pyca/cryptography/blob/127a2860740c77f45362e68e0ed7d2d108a39033/src/cryptography/hazmat/primitives/asymmetric/x25519.py#L60-L64\r\n\r\nhttps://github.com/pyca/cryptography/blob/127a2860740c77f45362e68e0ed7d2d108a39033/src/cryptography/hazmat/primitives/asymmetric/x448.py#L60-L64\r\n\r\nIn both instances, the method does not return serialised bytes, but a public key object. The full [generated documentation](https://cryptography.io/en/latest/hazmat/primitives/asymmetric/x25519/#cryptography.hazmat.primitives.asymmetric.x25519.X25519PrivateKey.public_key) is correct, as are the Ed* docstrings.\n", "before_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\n\nimport abc\n\nfrom cryptography.exceptions import UnsupportedAlgorithm, _Reasons\nfrom cryptography.hazmat.primitives import _serialization\n\n\nclass X448PublicKey(metaclass=abc.ABCMeta):\n @classmethod\n def from_public_bytes(cls, data: bytes) -> \"X448PublicKey\":\n from cryptography.hazmat.backends.openssl.backend import backend\n\n if not backend.x448_supported():\n raise UnsupportedAlgorithm(\n \"X448 is not supported by this version of OpenSSL.\",\n _Reasons.UNSUPPORTED_EXCHANGE_ALGORITHM,\n )\n\n return backend.x448_load_public_bytes(data)\n\n @abc.abstractmethod\n def public_bytes(\n self,\n encoding: _serialization.Encoding,\n format: _serialization.PublicFormat,\n ) -> bytes:\n \"\"\"\n The serialized bytes of the public key.\n \"\"\"\n\n\nclass X448PrivateKey(metaclass=abc.ABCMeta):\n @classmethod\n def generate(cls) -> \"X448PrivateKey\":\n from cryptography.hazmat.backends.openssl.backend import backend\n\n if not backend.x448_supported():\n raise UnsupportedAlgorithm(\n \"X448 is not supported by this version of OpenSSL.\",\n _Reasons.UNSUPPORTED_EXCHANGE_ALGORITHM,\n )\n return backend.x448_generate_key()\n\n @classmethod\n def from_private_bytes(cls, data: bytes) -> \"X448PrivateKey\":\n from cryptography.hazmat.backends.openssl.backend import backend\n\n if not backend.x448_supported():\n raise UnsupportedAlgorithm(\n \"X448 is not supported by this version of OpenSSL.\",\n _Reasons.UNSUPPORTED_EXCHANGE_ALGORITHM,\n )\n\n return backend.x448_load_private_bytes(data)\n\n @abc.abstractmethod\n def public_key(self) -> X448PublicKey:\n \"\"\"\n The serialized bytes of the public key.\n \"\"\"\n\n @abc.abstractmethod\n def private_bytes(\n self,\n encoding: _serialization.Encoding,\n format: _serialization.PrivateFormat,\n encryption_algorithm: _serialization.KeySerializationEncryption,\n ) -> bytes:\n \"\"\"\n The serialized bytes of the private key.\n \"\"\"\n\n @abc.abstractmethod\n def exchange(self, peer_public_key: X448PublicKey) -> bytes:\n \"\"\"\n Performs a key exchange operation using the provided peer's public key.\n \"\"\"\n", "path": "src/cryptography/hazmat/primitives/asymmetric/x448.py"}], "after_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\n\nimport abc\n\nfrom cryptography.exceptions import UnsupportedAlgorithm, _Reasons\nfrom cryptography.hazmat.primitives import _serialization\n\n\nclass X448PublicKey(metaclass=abc.ABCMeta):\n @classmethod\n def from_public_bytes(cls, data: bytes) -> \"X448PublicKey\":\n from cryptography.hazmat.backends.openssl.backend import backend\n\n if not backend.x448_supported():\n raise UnsupportedAlgorithm(\n \"X448 is not supported by this version of OpenSSL.\",\n _Reasons.UNSUPPORTED_EXCHANGE_ALGORITHM,\n )\n\n return backend.x448_load_public_bytes(data)\n\n @abc.abstractmethod\n def public_bytes(\n self,\n encoding: _serialization.Encoding,\n format: _serialization.PublicFormat,\n ) -> bytes:\n \"\"\"\n The serialized bytes of the public key.\n \"\"\"\n\n\nclass X448PrivateKey(metaclass=abc.ABCMeta):\n @classmethod\n def generate(cls) -> \"X448PrivateKey\":\n from cryptography.hazmat.backends.openssl.backend import backend\n\n if not backend.x448_supported():\n raise UnsupportedAlgorithm(\n \"X448 is not supported by this version of OpenSSL.\",\n _Reasons.UNSUPPORTED_EXCHANGE_ALGORITHM,\n )\n return backend.x448_generate_key()\n\n @classmethod\n def from_private_bytes(cls, data: bytes) -> \"X448PrivateKey\":\n from cryptography.hazmat.backends.openssl.backend import backend\n\n if not backend.x448_supported():\n raise UnsupportedAlgorithm(\n \"X448 is not supported by this version of OpenSSL.\",\n _Reasons.UNSUPPORTED_EXCHANGE_ALGORITHM,\n )\n\n return backend.x448_load_private_bytes(data)\n\n @abc.abstractmethod\n def public_key(self) -> X448PublicKey:\n \"\"\"\n Returns the public key associated with this private key\n \"\"\"\n\n @abc.abstractmethod\n def private_bytes(\n self,\n encoding: _serialization.Encoding,\n format: _serialization.PrivateFormat,\n encryption_algorithm: _serialization.KeySerializationEncryption,\n ) -> bytes:\n \"\"\"\n The serialized bytes of the private key.\n \"\"\"\n\n @abc.abstractmethod\n def exchange(self, peer_public_key: X448PublicKey) -> bytes:\n \"\"\"\n Performs a key exchange operation using the provided peer's public key.\n \"\"\"\n", "path": "src/cryptography/hazmat/primitives/asymmetric/x448.py"}]} | 1,236 | 136 |
gh_patches_debug_30497 | rasdani/github-patches | git_diff | quantumlib__Cirq-5786 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
merge_single_qubit_gates_to_phxz raises IndexError when encountering GlobalPhaseGate
**Description of the issue**
**How to reproduce the issue**
```
cirq.merge_single_qubit_gates_to_phxz(cirq.Circuit(cirq.GlobalPhaseGate(1j).on()))
...
/src/cirq/cirq-core/cirq/linalg/decompositions.py in deconstruct_single_qubit_matrix_into_angles(mat)
88 """
89 # Anti-cancel left-vs-right phase along top row.
---> 90 right_phase = cmath.phase(mat[0, 1] * np.conj(mat[0, 0])) + math.pi
91 mat = np.dot(mat, _phase_matrix(-right_phase))
92
IndexError: index 1 is out of bounds for axis 1 with size 1
```
I also checked `merge_single_qubit_gates_to_phased_x_and_z` and it suffers from the same issue.
The problem appears to be that the rewriter passes a 1x1 matrix to `single_qubit_matrix_to_phxz` which expects a 2x2 matrix.
**Cirq version**
0.16.0.dev
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cirq-core/cirq/transformers/merge_single_qubit_gates.py`
Content:
```
1 # Copyright 2022 The Cirq Developers
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Transformer passes to combine adjacent single-qubit rotations."""
16
17 from typing import Optional, TYPE_CHECKING
18
19 from cirq import protocols, circuits
20 from cirq.transformers.analytical_decompositions import single_qubit_decompositions
21 from cirq.transformers import transformer_api, transformer_primitives, merge_k_qubit_gates
22
23 if TYPE_CHECKING:
24 import cirq
25
26
27 @transformer_api.transformer
28 def merge_single_qubit_gates_to_phased_x_and_z(
29 circuit: 'cirq.AbstractCircuit',
30 *,
31 context: Optional['cirq.TransformerContext'] = None,
32 atol: float = 1e-8,
33 ) -> 'cirq.Circuit':
34 """Replaces runs of single qubit rotations with `cirq.PhasedXPowGate` and `cirq.ZPowGate`.
35
36 Specifically, any run of non-parameterized single-qubit unitaries will be replaced by an
37 optional PhasedX operation followed by an optional Z operation.
38
39 Args:
40 circuit: Input circuit to transform. It will not be modified.
41 context: `cirq.TransformerContext` storing common configurable options for transformers.
42 atol: Absolute tolerance to angle error. Larger values allow more negligible gates to be
43 dropped, smaller values increase accuracy.
44
45 Returns:
46 Copy of the transformed input circuit.
47 """
48
49 def rewriter(op: 'cirq.CircuitOperation') -> 'cirq.OP_TREE':
50 return [
51 g(op.qubits[0])
52 for g in single_qubit_decompositions.single_qubit_matrix_to_phased_x_z(
53 protocols.unitary(op), atol
54 )
55 ]
56
57 return merge_k_qubit_gates.merge_k_qubit_unitaries(
58 circuit, k=1, context=context, rewriter=rewriter
59 )
60
61
62 @transformer_api.transformer
63 def merge_single_qubit_gates_to_phxz(
64 circuit: 'cirq.AbstractCircuit',
65 *,
66 context: Optional['cirq.TransformerContext'] = None,
67 atol: float = 1e-8,
68 ) -> 'cirq.Circuit':
69 """Replaces runs of single qubit rotations with a single optional `cirq.PhasedXZGate`.
70
71 Specifically, any run of non-parameterized single-qubit unitaries will be replaced by an
72 optional PhasedXZ.
73
74 Args:
75 circuit: Input circuit to transform. It will not be modified.
76 context: `cirq.TransformerContext` storing common configurable options for transformers.
77 atol: Absolute tolerance to angle error. Larger values allow more negligible gates to be
78 dropped, smaller values increase accuracy.
79
80 Returns:
81 Copy of the transformed input circuit.
82 """
83
84 def rewriter(op: 'cirq.CircuitOperation') -> 'cirq.OP_TREE':
85 gate = single_qubit_decompositions.single_qubit_matrix_to_phxz(protocols.unitary(op), atol)
86 return gate(op.qubits[0]) if gate else []
87
88 return merge_k_qubit_gates.merge_k_qubit_unitaries(
89 circuit, k=1, context=context, rewriter=rewriter
90 )
91
92
93 @transformer_api.transformer
94 def merge_single_qubit_moments_to_phxz(
95 circuit: 'cirq.AbstractCircuit',
96 *,
97 context: Optional['cirq.TransformerContext'] = None,
98 atol: float = 1e-8,
99 ) -> 'cirq.Circuit':
100 """Merges adjacent moments with only 1-qubit rotations to a single moment with PhasedXZ gates.
101
102 Args:
103 circuit: Input circuit to transform. It will not be modified.
104 context: `cirq.TransformerContext` storing common configurable options for transformers.
105 atol: Absolute tolerance to angle error. Larger values allow more negligible gates to be
106 dropped, smaller values increase accuracy.
107
108 Returns:
109 Copy of the transformed input circuit.
110 """
111 tags_to_ignore = set(context.tags_to_ignore) if context else set()
112
113 def can_merge_moment(m: 'cirq.Moment'):
114 return all(
115 protocols.num_qubits(op) == 1
116 and protocols.has_unitary(op)
117 and tags_to_ignore.isdisjoint(op.tags)
118 for op in m
119 )
120
121 def merge_func(m1: 'cirq.Moment', m2: 'cirq.Moment') -> Optional['cirq.Moment']:
122 if not (can_merge_moment(m1) and can_merge_moment(m2)):
123 return None
124 ret_ops = []
125 for q in m1.qubits | m2.qubits:
126 mat = protocols.unitary(circuits.Circuit(m.operation_at(q) or [] for m in [m1, m2]))
127 gate = single_qubit_decompositions.single_qubit_matrix_to_phxz(mat, atol)
128 if gate:
129 ret_ops.append(gate(q))
130 return circuits.Moment(ret_ops)
131
132 return transformer_primitives.merge_moments(
133 circuit,
134 merge_func,
135 deep=context.deep if context else False,
136 tags_to_ignore=tuple(tags_to_ignore),
137 ).unfreeze(copy=False)
138
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cirq-core/cirq/transformers/merge_single_qubit_gates.py b/cirq-core/cirq/transformers/merge_single_qubit_gates.py
--- a/cirq-core/cirq/transformers/merge_single_qubit_gates.py
+++ b/cirq-core/cirq/transformers/merge_single_qubit_gates.py
@@ -16,7 +16,7 @@
from typing import Optional, TYPE_CHECKING
-from cirq import protocols, circuits
+from cirq import circuits, ops, protocols
from cirq.transformers.analytical_decompositions import single_qubit_decompositions
from cirq.transformers import transformer_api, transformer_primitives, merge_k_qubit_gates
@@ -47,11 +47,12 @@
"""
def rewriter(op: 'cirq.CircuitOperation') -> 'cirq.OP_TREE':
+ u = protocols.unitary(op)
+ if protocols.num_qubits(op) == 0:
+ return ops.GlobalPhaseGate(u[0, 0]).on()
return [
g(op.qubits[0])
- for g in single_qubit_decompositions.single_qubit_matrix_to_phased_x_z(
- protocols.unitary(op), atol
- )
+ for g in single_qubit_decompositions.single_qubit_matrix_to_phased_x_z(u, atol)
]
return merge_k_qubit_gates.merge_k_qubit_unitaries(
@@ -82,7 +83,10 @@
"""
def rewriter(op: 'cirq.CircuitOperation') -> 'cirq.OP_TREE':
- gate = single_qubit_decompositions.single_qubit_matrix_to_phxz(protocols.unitary(op), atol)
+ u = protocols.unitary(op)
+ if protocols.num_qubits(op) == 0:
+ return ops.GlobalPhaseGate(u[0, 0]).on()
+ gate = single_qubit_decompositions.single_qubit_matrix_to_phxz(u, atol)
return gate(op.qubits[0]) if gate else []
return merge_k_qubit_gates.merge_k_qubit_unitaries(
| {"golden_diff": "diff --git a/cirq-core/cirq/transformers/merge_single_qubit_gates.py b/cirq-core/cirq/transformers/merge_single_qubit_gates.py\n--- a/cirq-core/cirq/transformers/merge_single_qubit_gates.py\n+++ b/cirq-core/cirq/transformers/merge_single_qubit_gates.py\n@@ -16,7 +16,7 @@\n \n from typing import Optional, TYPE_CHECKING\n \n-from cirq import protocols, circuits\n+from cirq import circuits, ops, protocols\n from cirq.transformers.analytical_decompositions import single_qubit_decompositions\n from cirq.transformers import transformer_api, transformer_primitives, merge_k_qubit_gates\n \n@@ -47,11 +47,12 @@\n \"\"\"\n \n def rewriter(op: 'cirq.CircuitOperation') -> 'cirq.OP_TREE':\n+ u = protocols.unitary(op)\n+ if protocols.num_qubits(op) == 0:\n+ return ops.GlobalPhaseGate(u[0, 0]).on()\n return [\n g(op.qubits[0])\n- for g in single_qubit_decompositions.single_qubit_matrix_to_phased_x_z(\n- protocols.unitary(op), atol\n- )\n+ for g in single_qubit_decompositions.single_qubit_matrix_to_phased_x_z(u, atol)\n ]\n \n return merge_k_qubit_gates.merge_k_qubit_unitaries(\n@@ -82,7 +83,10 @@\n \"\"\"\n \n def rewriter(op: 'cirq.CircuitOperation') -> 'cirq.OP_TREE':\n- gate = single_qubit_decompositions.single_qubit_matrix_to_phxz(protocols.unitary(op), atol)\n+ u = protocols.unitary(op)\n+ if protocols.num_qubits(op) == 0:\n+ return ops.GlobalPhaseGate(u[0, 0]).on()\n+ gate = single_qubit_decompositions.single_qubit_matrix_to_phxz(u, atol)\n return gate(op.qubits[0]) if gate else []\n \n return merge_k_qubit_gates.merge_k_qubit_unitaries(\n", "issue": "merge_single_qubit_gates_to_phxz raises IndexError when encountering GlobalPhaseGate\n**Description of the issue**\r\n\r\n**How to reproduce the issue**\r\n\r\n```\r\ncirq.merge_single_qubit_gates_to_phxz(cirq.Circuit(cirq.GlobalPhaseGate(1j).on()))\r\n...\r\n/src/cirq/cirq-core/cirq/linalg/decompositions.py in deconstruct_single_qubit_matrix_into_angles(mat)\r\n 88 \"\"\"\r\n 89 # Anti-cancel left-vs-right phase along top row.\r\n---> 90 right_phase = cmath.phase(mat[0, 1] * np.conj(mat[0, 0])) + math.pi\r\n 91 mat = np.dot(mat, _phase_matrix(-right_phase))\r\n 92 \r\n\r\nIndexError: index 1 is out of bounds for axis 1 with size 1\r\n```\r\n\r\nI also checked `merge_single_qubit_gates_to_phased_x_and_z` and it suffers from the same issue.\r\n\r\nThe problem appears to be that the rewriter passes a 1x1 matrix to `single_qubit_matrix_to_phxz` which expects a 2x2 matrix.\r\n\r\n**Cirq version**\r\n0.16.0.dev\r\n\n", "before_files": [{"content": "# Copyright 2022 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Transformer passes to combine adjacent single-qubit rotations.\"\"\"\n\nfrom typing import Optional, TYPE_CHECKING\n\nfrom cirq import protocols, circuits\nfrom cirq.transformers.analytical_decompositions import single_qubit_decompositions\nfrom cirq.transformers import transformer_api, transformer_primitives, merge_k_qubit_gates\n\nif TYPE_CHECKING:\n import cirq\n\n\n@transformer_api.transformer\ndef merge_single_qubit_gates_to_phased_x_and_z(\n circuit: 'cirq.AbstractCircuit',\n *,\n context: Optional['cirq.TransformerContext'] = None,\n atol: float = 1e-8,\n) -> 'cirq.Circuit':\n \"\"\"Replaces runs of single qubit rotations with `cirq.PhasedXPowGate` and `cirq.ZPowGate`.\n\n Specifically, any run of non-parameterized single-qubit unitaries will be replaced by an\n optional PhasedX operation followed by an optional Z operation.\n\n Args:\n circuit: Input circuit to transform. It will not be modified.\n context: `cirq.TransformerContext` storing common configurable options for transformers.\n atol: Absolute tolerance to angle error. Larger values allow more negligible gates to be\n dropped, smaller values increase accuracy.\n\n Returns:\n Copy of the transformed input circuit.\n \"\"\"\n\n def rewriter(op: 'cirq.CircuitOperation') -> 'cirq.OP_TREE':\n return [\n g(op.qubits[0])\n for g in single_qubit_decompositions.single_qubit_matrix_to_phased_x_z(\n protocols.unitary(op), atol\n )\n ]\n\n return merge_k_qubit_gates.merge_k_qubit_unitaries(\n circuit, k=1, context=context, rewriter=rewriter\n )\n\n\n@transformer_api.transformer\ndef merge_single_qubit_gates_to_phxz(\n circuit: 'cirq.AbstractCircuit',\n *,\n context: Optional['cirq.TransformerContext'] = None,\n atol: float = 1e-8,\n) -> 'cirq.Circuit':\n \"\"\"Replaces runs of single qubit rotations with a single optional `cirq.PhasedXZGate`.\n\n Specifically, any run of non-parameterized single-qubit unitaries will be replaced by an\n optional PhasedXZ.\n\n Args:\n circuit: Input circuit to transform. It will not be modified.\n context: `cirq.TransformerContext` storing common configurable options for transformers.\n atol: Absolute tolerance to angle error. Larger values allow more negligible gates to be\n dropped, smaller values increase accuracy.\n\n Returns:\n Copy of the transformed input circuit.\n \"\"\"\n\n def rewriter(op: 'cirq.CircuitOperation') -> 'cirq.OP_TREE':\n gate = single_qubit_decompositions.single_qubit_matrix_to_phxz(protocols.unitary(op), atol)\n return gate(op.qubits[0]) if gate else []\n\n return merge_k_qubit_gates.merge_k_qubit_unitaries(\n circuit, k=1, context=context, rewriter=rewriter\n )\n\n\n@transformer_api.transformer\ndef merge_single_qubit_moments_to_phxz(\n circuit: 'cirq.AbstractCircuit',\n *,\n context: Optional['cirq.TransformerContext'] = None,\n atol: float = 1e-8,\n) -> 'cirq.Circuit':\n \"\"\"Merges adjacent moments with only 1-qubit rotations to a single moment with PhasedXZ gates.\n\n Args:\n circuit: Input circuit to transform. It will not be modified.\n context: `cirq.TransformerContext` storing common configurable options for transformers.\n atol: Absolute tolerance to angle error. Larger values allow more negligible gates to be\n dropped, smaller values increase accuracy.\n\n Returns:\n Copy of the transformed input circuit.\n \"\"\"\n tags_to_ignore = set(context.tags_to_ignore) if context else set()\n\n def can_merge_moment(m: 'cirq.Moment'):\n return all(\n protocols.num_qubits(op) == 1\n and protocols.has_unitary(op)\n and tags_to_ignore.isdisjoint(op.tags)\n for op in m\n )\n\n def merge_func(m1: 'cirq.Moment', m2: 'cirq.Moment') -> Optional['cirq.Moment']:\n if not (can_merge_moment(m1) and can_merge_moment(m2)):\n return None\n ret_ops = []\n for q in m1.qubits | m2.qubits:\n mat = protocols.unitary(circuits.Circuit(m.operation_at(q) or [] for m in [m1, m2]))\n gate = single_qubit_decompositions.single_qubit_matrix_to_phxz(mat, atol)\n if gate:\n ret_ops.append(gate(q))\n return circuits.Moment(ret_ops)\n\n return transformer_primitives.merge_moments(\n circuit,\n merge_func,\n deep=context.deep if context else False,\n tags_to_ignore=tuple(tags_to_ignore),\n ).unfreeze(copy=False)\n", "path": "cirq-core/cirq/transformers/merge_single_qubit_gates.py"}], "after_files": [{"content": "# Copyright 2022 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Transformer passes to combine adjacent single-qubit rotations.\"\"\"\n\nfrom typing import Optional, TYPE_CHECKING\n\nfrom cirq import circuits, ops, protocols\nfrom cirq.transformers.analytical_decompositions import single_qubit_decompositions\nfrom cirq.transformers import transformer_api, transformer_primitives, merge_k_qubit_gates\n\nif TYPE_CHECKING:\n import cirq\n\n\n@transformer_api.transformer\ndef merge_single_qubit_gates_to_phased_x_and_z(\n circuit: 'cirq.AbstractCircuit',\n *,\n context: Optional['cirq.TransformerContext'] = None,\n atol: float = 1e-8,\n) -> 'cirq.Circuit':\n \"\"\"Replaces runs of single qubit rotations with `cirq.PhasedXPowGate` and `cirq.ZPowGate`.\n\n Specifically, any run of non-parameterized single-qubit unitaries will be replaced by an\n optional PhasedX operation followed by an optional Z operation.\n\n Args:\n circuit: Input circuit to transform. It will not be modified.\n context: `cirq.TransformerContext` storing common configurable options for transformers.\n atol: Absolute tolerance to angle error. Larger values allow more negligible gates to be\n dropped, smaller values increase accuracy.\n\n Returns:\n Copy of the transformed input circuit.\n \"\"\"\n\n def rewriter(op: 'cirq.CircuitOperation') -> 'cirq.OP_TREE':\n u = protocols.unitary(op)\n if protocols.num_qubits(op) == 0:\n return ops.GlobalPhaseGate(u[0, 0]).on()\n return [\n g(op.qubits[0])\n for g in single_qubit_decompositions.single_qubit_matrix_to_phased_x_z(u, atol)\n ]\n\n return merge_k_qubit_gates.merge_k_qubit_unitaries(\n circuit, k=1, context=context, rewriter=rewriter\n )\n\n\n@transformer_api.transformer\ndef merge_single_qubit_gates_to_phxz(\n circuit: 'cirq.AbstractCircuit',\n *,\n context: Optional['cirq.TransformerContext'] = None,\n atol: float = 1e-8,\n) -> 'cirq.Circuit':\n \"\"\"Replaces runs of single qubit rotations with a single optional `cirq.PhasedXZGate`.\n\n Specifically, any run of non-parameterized single-qubit unitaries will be replaced by an\n optional PhasedXZ.\n\n Args:\n circuit: Input circuit to transform. It will not be modified.\n context: `cirq.TransformerContext` storing common configurable options for transformers.\n atol: Absolute tolerance to angle error. Larger values allow more negligible gates to be\n dropped, smaller values increase accuracy.\n\n Returns:\n Copy of the transformed input circuit.\n \"\"\"\n\n def rewriter(op: 'cirq.CircuitOperation') -> 'cirq.OP_TREE':\n u = protocols.unitary(op)\n if protocols.num_qubits(op) == 0:\n return ops.GlobalPhaseGate(u[0, 0]).on()\n gate = single_qubit_decompositions.single_qubit_matrix_to_phxz(u, atol)\n return gate(op.qubits[0]) if gate else []\n\n return merge_k_qubit_gates.merge_k_qubit_unitaries(\n circuit, k=1, context=context, rewriter=rewriter\n )\n\n\n@transformer_api.transformer\ndef merge_single_qubit_moments_to_phxz(\n circuit: 'cirq.AbstractCircuit',\n *,\n context: Optional['cirq.TransformerContext'] = None,\n atol: float = 1e-8,\n) -> 'cirq.Circuit':\n \"\"\"Merges adjacent moments with only 1-qubit rotations to a single moment with PhasedXZ gates.\n\n Args:\n circuit: Input circuit to transform. It will not be modified.\n context: `cirq.TransformerContext` storing common configurable options for transformers.\n atol: Absolute tolerance to angle error. Larger values allow more negligible gates to be\n dropped, smaller values increase accuracy.\n\n Returns:\n Copy of the transformed input circuit.\n \"\"\"\n tags_to_ignore = set(context.tags_to_ignore) if context else set()\n\n def can_merge_moment(m: 'cirq.Moment'):\n return all(\n protocols.num_qubits(op) == 1\n and protocols.has_unitary(op)\n and tags_to_ignore.isdisjoint(op.tags)\n for op in m\n )\n\n def merge_func(m1: 'cirq.Moment', m2: 'cirq.Moment') -> Optional['cirq.Moment']:\n if not (can_merge_moment(m1) and can_merge_moment(m2)):\n return None\n ret_ops = []\n for q in m1.qubits | m2.qubits:\n mat = protocols.unitary(circuits.Circuit(m.operation_at(q) or [] for m in [m1, m2]))\n gate = single_qubit_decompositions.single_qubit_matrix_to_phxz(mat, atol)\n if gate:\n ret_ops.append(gate(q))\n return circuits.Moment(ret_ops)\n\n return transformer_primitives.merge_moments(\n circuit,\n merge_func,\n deep=context.deep if context else False,\n tags_to_ignore=tuple(tags_to_ignore),\n ).unfreeze(copy=False)\n", "path": "cirq-core/cirq/transformers/merge_single_qubit_gates.py"}]} | 2,059 | 466 |
gh_patches_debug_38604 | rasdani/github-patches | git_diff | tensorlayer__TensorLayer-189 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Issues with ops.exit_tf function
So far I've 2 issues with this function :
- ops.exit_tf() returns an error because a session is expected as argument.
- ops.exit_tf(sess) returns "NameError: name 'exit' is not defined" when launched from an IPython console - in my case it's Spyder's internal IPython console (tested both on Mac and Windows)
Both issue are easy to solve :
- add `if sess != None:` before `sess.close()`
- use `from sys import exit as _exit` to avoid whatever is the interference that's throwing the error
Before I propose a pull request, I just wanted to check that I'm not misusing the function : I tried to use the function without an argument to easly exit tensorboard (which is a nice feature) and I'm launching it from IPyton instead of the terminal because it's seems like a usefull function to be able to include in a script (and i run my scripts from Spyder)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tensorlayer/ops.py`
Content:
```
1 #! /usr/bin/python
2 # -*- coding: utf8 -*-
3
4
5
6
7 import tensorflow as tf
8 import os
9 import sys
10 from sys import platform as _platform
11
12
13 def exit_tf(sess=None):
14 """Close tensorboard and nvidia-process if available
15
16 Parameters
17 ----------
18 sess : a session instance of TensorFlow
19 TensorFlow session
20 """
21 text = "[tl] Close tensorboard and nvidia-process if available"
22 sess.close()
23 # import time
24 # time.sleep(2)
25 if _platform == "linux" or _platform == "linux2":
26 print('linux: %s' % text)
27 os.system('nvidia-smi')
28 os.system('fuser 6006/tcp -k') # kill tensorboard 6006
29 os.system("nvidia-smi | grep python |awk '{print $3}'|xargs kill") # kill all nvidia-smi python process
30 elif _platform == "darwin":
31 print('OS X: %s' % text)
32 os.system("lsof -i tcp:6006 | grep -v PID | awk '{print $2}' | xargs kill") # kill tensorboard 6006
33 elif _platform == "win32":
34 print('Windows: %s' % text)
35 else:
36 print(_platform)
37 exit()
38
39 def clear_all(printable=True):
40 """Clears all the placeholder variables of keep prob,
41 including keeping probabilities of all dropout, denoising, dropconnect etc.
42
43 Parameters
44 ----------
45 printable : boolean
46 If True, print all deleted variables.
47 """
48 print('clear all .....................................')
49 gl = globals().copy()
50 for var in gl:
51 if var[0] == '_': continue
52 if 'func' in str(globals()[var]): continue
53 if 'module' in str(globals()[var]): continue
54 if 'class' in str(globals()[var]): continue
55
56 if printable:
57 print(" clear_all ------- %s" % str(globals()[var]))
58
59 del globals()[var]
60
61 # def clear_all2(vars, printable=True):
62 # """
63 # The :function:`clear_all()` Clears all the placeholder variables of keep prob,
64 # including keeping probabilities of all dropout, denoising, dropconnect
65 # Parameters
66 # ----------
67 # printable : if True, print all deleted variables.
68 # """
69 # print('clear all .....................................')
70 # for var in vars:
71 # if var[0] == '_': continue
72 # if 'func' in str(var): continue
73 # if 'module' in str(var): continue
74 # if 'class' in str(var): continue
75 #
76 # if printable:
77 # print(" clear_all ------- %s" % str(var))
78 #
79 # del var
80
81 def set_gpu_fraction(sess=None, gpu_fraction=0.3):
82 """Set the GPU memory fraction for the application.
83
84 Parameters
85 ----------
86 sess : a session instance of TensorFlow
87 TensorFlow session
88 gpu_fraction : a float
89 Fraction of GPU memory, (0 ~ 1]
90
91 References
92 ----------
93 - `TensorFlow using GPU <https://www.tensorflow.org/versions/r0.9/how_tos/using_gpu/index.html>`_
94 """
95 print(" tensorlayer: GPU MEM Fraction %f" % gpu_fraction)
96 gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_fraction)
97 sess = tf.Session(config = tf.ConfigProto(gpu_options = gpu_options))
98 return sess
99
100
101
102
103
104 def disable_print():
105 """Disable console output, ``suppress_stdout`` is recommended.
106
107 Examples
108 ---------
109 >>> print("You can see me")
110 >>> tl.ops.disable_print()
111 >>> print(" You can't see me")
112 >>> tl.ops.enable_print()
113 >>> print("You can see me")
114 """
115 # sys.stdout = os.devnull # this one kill the process
116 sys.stdout = None
117 sys.stderr = os.devnull
118
119 def enable_print():
120 """Enable console output, ``suppress_stdout`` is recommended.
121
122 Examples
123 --------
124 - see tl.ops.disable_print()
125 """
126 sys.stdout = sys.__stdout__
127 sys.stderr = sys.__stderr__
128
129
130 # class temporary_disable_print:
131 # """Temporarily disable console output.
132 #
133 # Examples
134 # ---------
135 # >>> print("You can see me")
136 # >>> with tl.ops.temporary_disable_print() as t:
137 # >>> print("You can't see me")
138 # >>> print("You can see me")
139 # """
140 # def __init__(self):
141 # pass
142 # def __enter__(self):
143 # sys.stdout = None
144 # sys.stderr = os.devnull
145 # def __exit__(self, type, value, traceback):
146 # sys.stdout = sys.__stdout__
147 # sys.stderr = sys.__stderr__
148 # return isinstance(value, TypeError)
149
150
151 from contextlib import contextmanager
152 @contextmanager
153 def suppress_stdout():
154 """Temporarily disable console output.
155
156 Examples
157 ---------
158 >>> print("You can see me")
159 >>> with tl.ops.suppress_stdout():
160 >>> print("You can't see me")
161 >>> print("You can see me")
162
163 References
164 -----------
165 - `stackoverflow <http://stackoverflow.com/questions/2125702/how-to-suppress-console-output-in-python>`_
166 """
167 with open(os.devnull, "w") as devnull:
168 old_stdout = sys.stdout
169 sys.stdout = devnull
170 try:
171 yield
172 finally:
173 sys.stdout = old_stdout
174
175
176
177 def get_site_packages_directory():
178 """Print and return the site-packages directory.
179
180 Examples
181 ---------
182 >>> loc = tl.ops.get_site_packages_directory()
183 """
184 import site
185 try:
186 loc = site.getsitepackages()
187 print(" tl.ops : site-packages in ", loc)
188 return loc
189 except:
190 print(" tl.ops : Cannot find package dir from virtual environment")
191 return False
192
193
194
195 def empty_trash():
196 """Empty trash folder.
197
198 """
199 text = "[tl] Empty the trash"
200 if _platform == "linux" or _platform == "linux2":
201 print('linux: %s' % text)
202 os.system("rm -rf ~/.local/share/Trash/*")
203 elif _platform == "darwin":
204 print('OS X: %s' % text)
205 os.system("sudo rm -rf ~/.Trash/*")
206 elif _platform == "win32":
207 print('Windows: %s' % text)
208 try:
209 os.system("rd /s c:\$Recycle.Bin") # Windows 7 or Server 2008
210 except:
211 pass
212 try:
213 os.system("rd /s c:\recycler") # Windows XP, Vista, or Server 2003
214 except:
215 pass
216 else:
217 print(_platform)
218
219 #
220
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/tensorlayer/ops.py b/tensorlayer/ops.py
--- a/tensorlayer/ops.py
+++ b/tensorlayer/ops.py
@@ -5,36 +5,73 @@
import tensorflow as tf
+import tensorlayer as tl
import os
+import subprocess
import sys
from sys import platform as _platform
+from sys import exit as _exit
-def exit_tf(sess=None):
- """Close tensorboard and nvidia-process if available
+def exit_tf(sess=None, port=6006):
+ """Close tensorflow session, tensorboard and nvidia-process if available
Parameters
----------
sess : a session instance of TensorFlow
TensorFlow session
+ tb_port : an integer
+ TensorBoard port you want to close, 6006 is tensorboard default
"""
text = "[tl] Close tensorboard and nvidia-process if available"
- sess.close()
+ text2 = "[tl] Close tensorboard and nvidia-process not yet supported by this function (tl.ops.exit_tf) on "
+ if sess != None:
+ sess.close()
# import time
# time.sleep(2)
if _platform == "linux" or _platform == "linux2":
print('linux: %s' % text)
os.system('nvidia-smi')
- os.system('fuser 6006/tcp -k') # kill tensorboard 6006
+ os.system('fuser '+ port +'/tcp -k') # kill tensorboard 6006
os.system("nvidia-smi | grep python |awk '{print $3}'|xargs kill") # kill all nvidia-smi python process
+ _exit()
elif _platform == "darwin":
print('OS X: %s' % text)
- os.system("lsof -i tcp:6006 | grep -v PID | awk '{print $2}' | xargs kill") # kill tensorboard 6006
+ subprocess.Popen("lsof -i tcp:"+ str(port) +" | grep -v PID | awk '{print $2}' | xargs kill", shell=True) # kill tensorboard
elif _platform == "win32":
- print('Windows: %s' % text)
+ print(text2 + "Windows")
+ # TODO
else:
- print(_platform)
- exit()
+ print(text2 + _platform)
+
+def open_tb(logdir='/tmp/tensorflow', port=6006):
+ """Open tensorboard
+
+ Parameters
+ ----------
+ logdir : a string
+ Directory where your tensorboard logs are saved
+ port : an integer
+ TensorBoard port you want to open, 6006 is tensorboard default
+ """
+
+ text = "[tl] Open tensorboard, go to localhost:" + str(port) + " to access"
+ text2 = " not yet supported by this function (tl.ops.open_tb)"
+
+ if not tl.files.exists_or_mkdir(logdir, verbose=False):
+ print("[tl] Log reportory was created at %s" % logdir)
+
+ if _platform == "linux" or _platform == "linux2":
+ print('linux %s' % text2)
+ # TODO
+ elif _platform == "darwin":
+ print('OS X: %s' % text)
+ subprocess.Popen(sys.prefix + " | python -m tensorflow.tensorboard --logdir=" + logdir + " --port=" + str(port), shell=True) # open tensorboard in localhost:6006/ or whatever port you chose
+ elif _platform == "win32":
+ print('Windows%s' % text2)
+ # TODO
+ else:
+ print(_platform + text2)
def clear_all(printable=True):
"""Clears all the placeholder variables of keep prob,
| {"golden_diff": "diff --git a/tensorlayer/ops.py b/tensorlayer/ops.py\n--- a/tensorlayer/ops.py\n+++ b/tensorlayer/ops.py\n@@ -5,36 +5,73 @@\n \n \n import tensorflow as tf\n+import tensorlayer as tl\n import os\n+import subprocess\n import sys\n from sys import platform as _platform\n+from sys import exit as _exit\n \n \n-def exit_tf(sess=None):\n- \"\"\"Close tensorboard and nvidia-process if available\n+def exit_tf(sess=None, port=6006):\n+ \"\"\"Close tensorflow session, tensorboard and nvidia-process if available\n \n Parameters\n ----------\n sess : a session instance of TensorFlow\n TensorFlow session\n+ tb_port : an integer\n+ TensorBoard port you want to close, 6006 is tensorboard default\n \"\"\"\n text = \"[tl] Close tensorboard and nvidia-process if available\"\n- sess.close()\n+ text2 = \"[tl] Close tensorboard and nvidia-process not yet supported by this function (tl.ops.exit_tf) on \"\n+ if sess != None:\n+ sess.close()\n # import time\n # time.sleep(2)\n if _platform == \"linux\" or _platform == \"linux2\":\n print('linux: %s' % text)\n os.system('nvidia-smi')\n- os.system('fuser 6006/tcp -k') # kill tensorboard 6006\n+ os.system('fuser '+ port +'/tcp -k') # kill tensorboard 6006\n os.system(\"nvidia-smi | grep python |awk '{print $3}'|xargs kill\") # kill all nvidia-smi python process\n+ _exit()\n elif _platform == \"darwin\":\n print('OS X: %s' % text)\n- os.system(\"lsof -i tcp:6006 | grep -v PID | awk '{print $2}' | xargs kill\") # kill tensorboard 6006\n+ subprocess.Popen(\"lsof -i tcp:\"+ str(port) +\" | grep -v PID | awk '{print $2}' | xargs kill\", shell=True) # kill tensorboard\n elif _platform == \"win32\":\n- print('Windows: %s' % text)\n+ print(text2 + \"Windows\")\n+ # TODO\n else:\n- print(_platform)\n- exit()\n+ print(text2 + _platform)\n+ \n+def open_tb(logdir='/tmp/tensorflow', port=6006):\n+ \"\"\"Open tensorboard\n+ \n+ Parameters\n+ ----------\n+ logdir : a string\n+ Directory where your tensorboard logs are saved\n+ port : an integer\n+ TensorBoard port you want to open, 6006 is tensorboard default\n+ \"\"\"\n+\n+ text = \"[tl] Open tensorboard, go to localhost:\" + str(port) + \" to access\"\n+ text2 = \" not yet supported by this function (tl.ops.open_tb)\"\n+ \n+ if not tl.files.exists_or_mkdir(logdir, verbose=False):\n+ print(\"[tl] Log reportory was created at %s\" % logdir)\n+ \n+ if _platform == \"linux\" or _platform == \"linux2\":\n+ print('linux %s' % text2)\n+ # TODO\n+ elif _platform == \"darwin\":\n+ print('OS X: %s' % text)\n+ subprocess.Popen(sys.prefix + \" | python -m tensorflow.tensorboard --logdir=\" + logdir + \" --port=\" + str(port), shell=True) # open tensorboard in localhost:6006/ or whatever port you chose\n+ elif _platform == \"win32\":\n+ print('Windows%s' % text2)\n+ # TODO\n+ else:\n+ print(_platform + text2)\n \n def clear_all(printable=True):\n \"\"\"Clears all the placeholder variables of keep prob,\n", "issue": "Issues with ops.exit_tf function\nSo far I've 2 issues with this function :\r\n- ops.exit_tf() returns an error because a session is expected as argument.\r\n- ops.exit_tf(sess) returns \"NameError: name 'exit' is not defined\" when launched from an IPython console - in my case it's Spyder's internal IPython console (tested both on Mac and Windows)\r\n\r\nBoth issue are easy to solve :\r\n- add `if sess != None:` before `sess.close()`\r\n- use `from sys import exit as _exit` to avoid whatever is the interference that's throwing the error\r\n\r\nBefore I propose a pull request, I just wanted to check that I'm not misusing the function : I tried to use the function without an argument to easly exit tensorboard (which is a nice feature) and I'm launching it from IPyton instead of the terminal because it's seems like a usefull function to be able to include in a script (and i run my scripts from Spyder)\n", "before_files": [{"content": "#! /usr/bin/python\n# -*- coding: utf8 -*-\n\n\n\n\nimport tensorflow as tf\nimport os\nimport sys\nfrom sys import platform as _platform\n\n\ndef exit_tf(sess=None):\n \"\"\"Close tensorboard and nvidia-process if available\n\n Parameters\n ----------\n sess : a session instance of TensorFlow\n TensorFlow session\n \"\"\"\n text = \"[tl] Close tensorboard and nvidia-process if available\"\n sess.close()\n # import time\n # time.sleep(2)\n if _platform == \"linux\" or _platform == \"linux2\":\n print('linux: %s' % text)\n os.system('nvidia-smi')\n os.system('fuser 6006/tcp -k') # kill tensorboard 6006\n os.system(\"nvidia-smi | grep python |awk '{print $3}'|xargs kill\") # kill all nvidia-smi python process\n elif _platform == \"darwin\":\n print('OS X: %s' % text)\n os.system(\"lsof -i tcp:6006 | grep -v PID | awk '{print $2}' | xargs kill\") # kill tensorboard 6006\n elif _platform == \"win32\":\n print('Windows: %s' % text)\n else:\n print(_platform)\n exit()\n\ndef clear_all(printable=True):\n \"\"\"Clears all the placeholder variables of keep prob,\n including keeping probabilities of all dropout, denoising, dropconnect etc.\n\n Parameters\n ----------\n printable : boolean\n If True, print all deleted variables.\n \"\"\"\n print('clear all .....................................')\n gl = globals().copy()\n for var in gl:\n if var[0] == '_': continue\n if 'func' in str(globals()[var]): continue\n if 'module' in str(globals()[var]): continue\n if 'class' in str(globals()[var]): continue\n\n if printable:\n print(\" clear_all ------- %s\" % str(globals()[var]))\n\n del globals()[var]\n\n# def clear_all2(vars, printable=True):\n# \"\"\"\n# The :function:`clear_all()` Clears all the placeholder variables of keep prob,\n# including keeping probabilities of all dropout, denoising, dropconnect\n# Parameters\n# ----------\n# printable : if True, print all deleted variables.\n# \"\"\"\n# print('clear all .....................................')\n# for var in vars:\n# if var[0] == '_': continue\n# if 'func' in str(var): continue\n# if 'module' in str(var): continue\n# if 'class' in str(var): continue\n#\n# if printable:\n# print(\" clear_all ------- %s\" % str(var))\n#\n# del var\n\ndef set_gpu_fraction(sess=None, gpu_fraction=0.3):\n \"\"\"Set the GPU memory fraction for the application.\n\n Parameters\n ----------\n sess : a session instance of TensorFlow\n TensorFlow session\n gpu_fraction : a float\n Fraction of GPU memory, (0 ~ 1]\n\n References\n ----------\n - `TensorFlow using GPU <https://www.tensorflow.org/versions/r0.9/how_tos/using_gpu/index.html>`_\n \"\"\"\n print(\" tensorlayer: GPU MEM Fraction %f\" % gpu_fraction)\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_fraction)\n sess = tf.Session(config = tf.ConfigProto(gpu_options = gpu_options))\n return sess\n\n\n\n\n\ndef disable_print():\n \"\"\"Disable console output, ``suppress_stdout`` is recommended.\n\n Examples\n ---------\n >>> print(\"You can see me\")\n >>> tl.ops.disable_print()\n >>> print(\" You can't see me\")\n >>> tl.ops.enable_print()\n >>> print(\"You can see me\")\n \"\"\"\n # sys.stdout = os.devnull # this one kill the process\n sys.stdout = None\n sys.stderr = os.devnull\n\ndef enable_print():\n \"\"\"Enable console output, ``suppress_stdout`` is recommended.\n\n Examples\n --------\n - see tl.ops.disable_print()\n \"\"\"\n sys.stdout = sys.__stdout__\n sys.stderr = sys.__stderr__\n\n\n# class temporary_disable_print:\n# \"\"\"Temporarily disable console output.\n#\n# Examples\n# ---------\n# >>> print(\"You can see me\")\n# >>> with tl.ops.temporary_disable_print() as t:\n# >>> print(\"You can't see me\")\n# >>> print(\"You can see me\")\n# \"\"\"\n# def __init__(self):\n# pass\n# def __enter__(self):\n# sys.stdout = None\n# sys.stderr = os.devnull\n# def __exit__(self, type, value, traceback):\n# sys.stdout = sys.__stdout__\n# sys.stderr = sys.__stderr__\n# return isinstance(value, TypeError)\n\n\nfrom contextlib import contextmanager\n@contextmanager\ndef suppress_stdout():\n \"\"\"Temporarily disable console output.\n\n Examples\n ---------\n >>> print(\"You can see me\")\n >>> with tl.ops.suppress_stdout():\n >>> print(\"You can't see me\")\n >>> print(\"You can see me\")\n\n References\n -----------\n - `stackoverflow <http://stackoverflow.com/questions/2125702/how-to-suppress-console-output-in-python>`_\n \"\"\"\n with open(os.devnull, \"w\") as devnull:\n old_stdout = sys.stdout\n sys.stdout = devnull\n try:\n yield\n finally:\n sys.stdout = old_stdout\n\n\n\ndef get_site_packages_directory():\n \"\"\"Print and return the site-packages directory.\n\n Examples\n ---------\n >>> loc = tl.ops.get_site_packages_directory()\n \"\"\"\n import site\n try:\n loc = site.getsitepackages()\n print(\" tl.ops : site-packages in \", loc)\n return loc\n except:\n print(\" tl.ops : Cannot find package dir from virtual environment\")\n return False\n\n\n\ndef empty_trash():\n \"\"\"Empty trash folder.\n\n \"\"\"\n text = \"[tl] Empty the trash\"\n if _platform == \"linux\" or _platform == \"linux2\":\n print('linux: %s' % text)\n os.system(\"rm -rf ~/.local/share/Trash/*\")\n elif _platform == \"darwin\":\n print('OS X: %s' % text)\n os.system(\"sudo rm -rf ~/.Trash/*\")\n elif _platform == \"win32\":\n print('Windows: %s' % text)\n try:\n os.system(\"rd /s c:\\$Recycle.Bin\") # Windows 7 or Server 2008\n except:\n pass\n try:\n os.system(\"rd /s c:\\recycler\") # Windows XP, Vista, or Server 2003\n except:\n pass\n else:\n print(_platform)\n\n#\n", "path": "tensorlayer/ops.py"}], "after_files": [{"content": "#! /usr/bin/python\n# -*- coding: utf8 -*-\n\n\n\n\nimport tensorflow as tf\nimport tensorlayer as tl\nimport os\nimport subprocess\nimport sys\nfrom sys import platform as _platform\nfrom sys import exit as _exit\n\n\ndef exit_tf(sess=None, port=6006):\n \"\"\"Close tensorflow session, tensorboard and nvidia-process if available\n\n Parameters\n ----------\n sess : a session instance of TensorFlow\n TensorFlow session\n tb_port : an integer\n TensorBoard port you want to close, 6006 is tensorboard default\n \"\"\"\n text = \"[tl] Close tensorboard and nvidia-process if available\"\n text2 = \"[tl] Close tensorboard and nvidia-process not yet supported by this function (tl.ops.exit_tf) on \"\n if sess != None:\n sess.close()\n # import time\n # time.sleep(2)\n if _platform == \"linux\" or _platform == \"linux2\":\n print('linux: %s' % text)\n os.system('nvidia-smi')\n os.system('fuser '+ port +'/tcp -k') # kill tensorboard 6006\n os.system(\"nvidia-smi | grep python |awk '{print $3}'|xargs kill\") # kill all nvidia-smi python process\n _exit()\n elif _platform == \"darwin\":\n print('OS X: %s' % text)\n subprocess.Popen(\"lsof -i tcp:\"+ str(port) +\" | grep -v PID | awk '{print $2}' | xargs kill\", shell=True) # kill tensorboard\n elif _platform == \"win32\":\n print(text2 + \"Windows\")\n # TODO\n else:\n print(text2 + _platform)\n \ndef open_tb(logdir='/tmp/tensorflow', port=6006):\n \"\"\"Open tensorboard\n \n Parameters\n ----------\n logdir : a string\n Directory where your tensorboard logs are saved\n port : an integer\n TensorBoard port you want to open, 6006 is tensorboard default\n \"\"\"\n\n text = \"[tl] Open tensorboard, go to localhost:\" + str(port) + \" to access\"\n text2 = \" not yet supported by this function (tl.ops.open_tb)\"\n \n if not tl.files.exists_or_mkdir(logdir, verbose=False):\n print(\"[tl] Log reportory was created at %s\" % logdir)\n \n if _platform == \"linux\" or _platform == \"linux2\":\n print('linux %s' % text2)\n # TODO\n elif _platform == \"darwin\":\n print('OS X: %s' % text)\n subprocess.Popen(sys.prefix + \" | python -m tensorflow.tensorboard --logdir=\" + logdir + \" --port=\" + str(port), shell=True) # open tensorboard in localhost:6006/ or whatever port you chose\n elif _platform == \"win32\":\n print('Windows%s' % text2)\n # TODO\n else:\n print(_platform + text2)\n\ndef clear_all(printable=True):\n \"\"\"Clears all the placeholder variables of keep prob,\n including keeping probabilities of all dropout, denoising, dropconnect etc.\n\n Parameters\n ----------\n printable : boolean\n If True, print all deleted variables.\n \"\"\"\n print('clear all .....................................')\n gl = globals().copy()\n for var in gl:\n if var[0] == '_': continue\n if 'func' in str(globals()[var]): continue\n if 'module' in str(globals()[var]): continue\n if 'class' in str(globals()[var]): continue\n\n if printable:\n print(\" clear_all ------- %s\" % str(globals()[var]))\n\n del globals()[var]\n\n# def clear_all2(vars, printable=True):\n# \"\"\"\n# The :function:`clear_all()` Clears all the placeholder variables of keep prob,\n# including keeping probabilities of all dropout, denoising, dropconnect\n# Parameters\n# ----------\n# printable : if True, print all deleted variables.\n# \"\"\"\n# print('clear all .....................................')\n# for var in vars:\n# if var[0] == '_': continue\n# if 'func' in str(var): continue\n# if 'module' in str(var): continue\n# if 'class' in str(var): continue\n#\n# if printable:\n# print(\" clear_all ------- %s\" % str(var))\n#\n# del var\n\ndef set_gpu_fraction(sess=None, gpu_fraction=0.3):\n \"\"\"Set the GPU memory fraction for the application.\n\n Parameters\n ----------\n sess : a session instance of TensorFlow\n TensorFlow session\n gpu_fraction : a float\n Fraction of GPU memory, (0 ~ 1]\n\n References\n ----------\n - `TensorFlow using GPU <https://www.tensorflow.org/versions/r0.9/how_tos/using_gpu/index.html>`_\n \"\"\"\n print(\" tensorlayer: GPU MEM Fraction %f\" % gpu_fraction)\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_fraction)\n sess = tf.Session(config = tf.ConfigProto(gpu_options = gpu_options))\n return sess\n\n\n\n\n\ndef disable_print():\n \"\"\"Disable console output, ``suppress_stdout`` is recommended.\n\n Examples\n ---------\n >>> print(\"You can see me\")\n >>> tl.ops.disable_print()\n >>> print(\" You can't see me\")\n >>> tl.ops.enable_print()\n >>> print(\"You can see me\")\n \"\"\"\n # sys.stdout = os.devnull # this one kill the process\n sys.stdout = None\n sys.stderr = os.devnull\n\ndef enable_print():\n \"\"\"Enable console output, ``suppress_stdout`` is recommended.\n\n Examples\n --------\n - see tl.ops.disable_print()\n \"\"\"\n sys.stdout = sys.__stdout__\n sys.stderr = sys.__stderr__\n\n\n# class temporary_disable_print:\n# \"\"\"Temporarily disable console output.\n#\n# Examples\n# ---------\n# >>> print(\"You can see me\")\n# >>> with tl.ops.temporary_disable_print() as t:\n# >>> print(\"You can't see me\")\n# >>> print(\"You can see me\")\n# \"\"\"\n# def __init__(self):\n# pass\n# def __enter__(self):\n# sys.stdout = None\n# sys.stderr = os.devnull\n# def __exit__(self, type, value, traceback):\n# sys.stdout = sys.__stdout__\n# sys.stderr = sys.__stderr__\n# return isinstance(value, TypeError)\n\n\nfrom contextlib import contextmanager\n@contextmanager\ndef suppress_stdout():\n \"\"\"Temporarily disable console output.\n\n Examples\n ---------\n >>> print(\"You can see me\")\n >>> with tl.ops.suppress_stdout():\n >>> print(\"You can't see me\")\n >>> print(\"You can see me\")\n\n References\n -----------\n - `stackoverflow <http://stackoverflow.com/questions/2125702/how-to-suppress-console-output-in-python>`_\n \"\"\"\n with open(os.devnull, \"w\") as devnull:\n old_stdout = sys.stdout\n sys.stdout = devnull\n try:\n yield\n finally:\n sys.stdout = old_stdout\n\n\n\ndef get_site_packages_directory():\n \"\"\"Print and return the site-packages directory.\n\n Examples\n ---------\n >>> loc = tl.ops.get_site_packages_directory()\n \"\"\"\n import site\n try:\n loc = site.getsitepackages()\n print(\" tl.ops : site-packages in \", loc)\n return loc\n except:\n print(\" tl.ops : Cannot find package dir from virtual environment\")\n return False\n\n\n\ndef empty_trash():\n \"\"\"Empty trash folder.\n\n \"\"\"\n text = \"[tl] Empty the trash\"\n if _platform == \"linux\" or _platform == \"linux2\":\n print('linux: %s' % text)\n os.system(\"rm -rf ~/.local/share/Trash/*\")\n elif _platform == \"darwin\":\n print('OS X: %s' % text)\n os.system(\"sudo rm -rf ~/.Trash/*\")\n elif _platform == \"win32\":\n print('Windows: %s' % text)\n try:\n os.system(\"rd /s c:\\$Recycle.Bin\") # Windows 7 or Server 2008\n except:\n pass\n try:\n os.system(\"rd /s c:\\recycler\") # Windows XP, Vista, or Server 2003\n except:\n pass\n else:\n print(_platform)\n\n#\n", "path": "tensorlayer/ops.py"}]} | 2,548 | 886 |
gh_patches_debug_21467 | rasdani/github-patches | git_diff | DataDog__dd-agent-333 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Allow strings to be elements in dogstatsd sets
Right now it only counts numbers.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `aggregator.py`
Content:
```
1 import logging
2 from time import time
3
4
5 logger = logging.getLogger(__name__)
6
7
8 class Infinity(Exception): pass
9 class UnknownValue(Exception): pass
10
11 class Metric(object):
12 """
13 A base metric class that accepts points, slices them into time intervals
14 and performs roll-ups within those intervals.
15 """
16
17 def sample(self, value, sample_rate):
18 """ Add a point to the given metric. """
19 raise NotImplementedError()
20
21 def flush(self, timestamp, interval):
22 """ Flush all metrics up to the given timestamp. """
23 raise NotImplementedError()
24
25
26 class Gauge(Metric):
27 """ A metric that tracks a value at particular points in time. """
28
29 def __init__(self, formatter, name, tags, hostname, device_name):
30 self.formatter = formatter
31 self.name = name
32 self.value = None
33 self.tags = tags
34 self.hostname = hostname
35 self.device_name = device_name
36 self.last_sample_time = None
37
38 def sample(self, value, sample_rate):
39 self.value = value
40 self.last_sample_time = time()
41
42 def flush(self, timestamp, interval):
43 if self.value is not None:
44 res = [self.formatter(
45 metric=self.name,
46 timestamp=timestamp,
47 value=self.value,
48 tags=self.tags,
49 hostname=self.hostname,
50 device_name=self.device_name
51 )]
52 self.value = None
53 return res
54
55 return []
56
57
58 class Counter(Metric):
59 """ A metric that tracks a counter value. """
60
61 def __init__(self, formatter, name, tags, hostname, device_name):
62 self.formatter = formatter
63 self.name = name
64 self.value = 0
65 self.tags = tags
66 self.hostname = hostname
67 self.device_name = device_name
68
69 def sample(self, value, sample_rate):
70 self.value += value * int(1 / sample_rate)
71 self.last_sample_time = time()
72
73 def flush(self, timestamp, interval):
74 try:
75 value = self.value / interval
76 return [self.formatter(
77 metric=self.name,
78 value=value,
79 timestamp=timestamp,
80 tags=self.tags,
81 hostname=self.hostname,
82 device_name=self.device_name
83 )]
84 finally:
85 self.value = 0
86
87
88 class Histogram(Metric):
89 """ A metric to track the distribution of a set of values. """
90
91 def __init__(self, formatter, name, tags, hostname, device_name):
92 self.formatter = formatter
93 self.name = name
94 self.count = 0
95 self.samples = []
96 self.percentiles = [0.95]
97 self.tags = tags
98 self.hostname = hostname
99 self.device_name = device_name
100
101 def sample(self, value, sample_rate):
102 self.count += int(1 / sample_rate)
103 self.samples.append(value)
104 self.last_sample_time = time()
105
106 def flush(self, ts, interval):
107 if not self.count:
108 return []
109
110 self.samples.sort()
111 length = len(self.samples)
112
113 max_ = self.samples[-1]
114 med = self.samples[int(round(length/2 - 1))]
115 avg = sum(self.samples) / float(length)
116
117 metric_aggrs = [
118 ('max', max_),
119 ('median', med),
120 ('avg', avg),
121 ('count', self.count/interval)
122 ]
123
124 metrics = [self.formatter(
125 hostname=self.hostname,
126 device_name=self.device_name,
127 tags=self.tags,
128 metric='%s.%s' % (self.name, suffix),
129 value=value,
130 timestamp=ts
131 ) for suffix, value in metric_aggrs
132 ]
133
134 for p in self.percentiles:
135 val = self.samples[int(round(p * length - 1))]
136 name = '%s.%spercentile' % (self.name, int(p * 100))
137 metrics.append(self.formatter(
138 hostname=self.hostname,
139 tags=self.tags,
140 metric=name,
141 value=val,
142 timestamp=ts
143 ))
144
145 # Reset our state.
146 self.samples = []
147 self.count = 0
148
149 return metrics
150
151
152 class Set(Metric):
153 """ A metric to track the number of unique elements in a set. """
154
155 def __init__(self, formatter, name, tags, hostname, device_name):
156 self.formatter = formatter
157 self.name = name
158 self.tags = tags
159 self.hostname = hostname
160 self.device_name = device_name
161 self.values = set()
162
163 def sample(self, value, sample_rate):
164 self.values.add(value)
165 self.last_sample_time = time()
166
167 def flush(self, timestamp, interval):
168 if not self.values:
169 return []
170 try:
171 return [self.formatter(
172 hostname=self.hostname,
173 device_name=self.device_name,
174 tags=self.tags,
175 metric=self.name,
176 value=len(self.values),
177 timestamp=timestamp
178 )]
179 finally:
180 self.values = set()
181
182
183 class Rate(Metric):
184 """ Track the rate of metrics over each flush interval """
185
186 def __init__(self, formatter, name, tags, hostname, device_name):
187 self.formatter = formatter
188 self.name = name
189 self.tags = tags
190 self.hostname = hostname
191 self.device_name = device_name
192 self.samples = []
193
194 def sample(self, value, sample_rate):
195 ts = time()
196 self.samples.append((int(ts), value))
197 self.last_sample_time = ts
198
199 def _rate(self, sample1, sample2):
200 interval = sample2[0] - sample1[0]
201 if interval == 0:
202 logger.warn('Metric %s has an interval of 0. Not flushing.' % self.name)
203 raise Infinity()
204
205 delta = sample2[1] - sample1[1]
206 if delta < 0:
207 logger.warn('Metric %s has a rate < 0. Not flushing.' % self.name)
208 raise UnknownValue()
209
210 return (delta / interval)
211
212 def flush(self, timestamp, interval):
213 if len(self.samples) < 2:
214 return []
215 try:
216 try:
217 val = self._rate(self.samples[-2], self.samples[-1])
218 except:
219 return []
220
221 return [self.formatter(
222 hostname=self.hostname,
223 device_name=self.device_name,
224 tags=self.tags,
225 metric=self.name,
226 value=val,
227 timestamp=timestamp
228 )]
229 finally:
230 self.samples = self.samples[-1:]
231
232
233
234 class MetricsAggregator(object):
235 """
236 A metric aggregator class.
237 """
238
239 def __init__(self, hostname, interval=1.0, expiry_seconds=300, formatter=None):
240 self.metrics = {}
241 self.total_count = 0
242 self.count = 0
243 self.metric_type_to_class = {
244 'g': Gauge,
245 'c': Counter,
246 'h': Histogram,
247 'ms' : Histogram,
248 's' : Set,
249 '_dd-r': Rate,
250 }
251 self.hostname = hostname
252 self.expiry_seconds = expiry_seconds
253 self.formatter = formatter or self.api_formatter
254 self.interval = float(interval)
255
256 def packets_per_second(self, interval):
257 return round(float(self.count)/interval, 2)
258
259 def submit_packets(self, packets):
260
261 for packet in packets.split("\n"):
262 self.count += 1
263 # We can have colons in tags, so split once.
264 name_and_metadata = packet.split(':', 1)
265
266 if not packet.strip():
267 continue
268
269 if len(name_and_metadata) != 2:
270 raise Exception('Unparseable packet: %s' % packet)
271
272 name = name_and_metadata[0]
273 metadata = name_and_metadata[1].split('|')
274
275 if len(metadata) < 2:
276 raise Exception('Unparseable packet: %s' % packet)
277
278 # Try to cast as an int first to avoid precision issues, then as a
279 # float.
280 try:
281 value = int(metadata[0])
282 except ValueError:
283 try:
284 value = float(metadata[0])
285 except ValueError:
286 raise Exception('Metric value must be a number: %s, %s' % name, metadata[0])
287
288 # Parse the optional values - sample rate & tags.
289 sample_rate = 1
290 tags = None
291 for m in metadata[2:]:
292 # Parse the sample rate
293 if m[0] == '@':
294 sample_rate = float(m[1:])
295 assert 0 <= sample_rate <= 1
296 elif m[0] == '#':
297 tags = tuple(sorted(m[1:].split(',')))
298
299 # Submit the metric
300 mtype = metadata[1]
301 self.submit_metric(name, value, mtype, tags=tags, sample_rate=sample_rate)
302
303 def submit_metric(self, name, value, mtype, tags=None, hostname=None,
304 device_name=None, timestamp=None, sample_rate=1):
305 # Avoid calling extra functions to dedupe tags if there are none
306 if tags is None:
307 context = (name, tuple(), hostname, device_name)
308 else:
309 context = (name, tuple(sorted(set(tags))), hostname, device_name)
310 if context not in self.metrics:
311 metric_class = self.metric_type_to_class[mtype]
312 self.metrics[context] = metric_class(self.formatter, name, tags,
313 hostname or self.hostname, device_name)
314 self.metrics[context].sample(value, sample_rate)
315
316 def gauge(self, name, value, tags=None, hostname=None, device_name=None, timestamp=None):
317 self.submit_metric(name, value, 'g', tags, hostname, device_name, timestamp)
318
319 def increment(self, name, value=1, tags=None, hostname=None, device_name=None):
320 self.submit_metric(name, value, 'c', tags, hostname, device_name)
321
322 def decrement(self, name, value=-1, tags=None, hostname=None, device_name=None):
323 self.submit_metric(name, value, 'c', tags, hostname, device_name)
324
325 def rate(self, name, value, tags=None, hostname=None, device_name=None):
326 self.submit_metric(name, value, '_dd-r', tags, hostname, device_name)
327
328 def histogram(self, name, value, tags=None, hostname=None, device_name=None):
329 self.submit_metric(name, value, 'h', tags, hostname, device_name)
330
331 def set(self, name, value, tags=None, hostname=None, device_name=None):
332 self.submit_metric(name, value, 's', tags, hostname, device_name)
333
334 def flush(self):
335 timestamp = time()
336 expiry_timestamp = timestamp - self.expiry_seconds
337
338 # Flush points and remove expired metrics. We mutate this dictionary
339 # while iterating so don't use an iterator.
340 metrics = []
341 for context, metric in self.metrics.items():
342 if metric.last_sample_time < expiry_timestamp:
343 logger.debug("%s hasn't been submitted in %ss. Expiring." % (context, self.expiry_seconds))
344 del self.metrics[context]
345 else:
346 metrics += metric.flush(timestamp, self.interval)
347
348 # Save some stats.
349 logger.debug("received %s payloads since last flush" % self.count)
350 self.total_count += self.count
351 self.count = 0
352 return metrics
353
354 def send_packet_count(self, metric_name):
355 self.submit_metric(metric_name, self.count, 'g')
356
357 def api_formatter(self, metric, value, timestamp, tags, hostname, device_name=None):
358 return {
359 'metric' : metric,
360 'points' : [(timestamp, value)],
361 'tags' : tags,
362 'host' : hostname,
363 'device_name': device_name
364 }
365
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/aggregator.py b/aggregator.py
--- a/aggregator.py
+++ b/aggregator.py
@@ -236,6 +236,9 @@
A metric aggregator class.
"""
+ # Types of metrics that allow strings
+ ALLOW_STRINGS = ['s', ]
+
def __init__(self, hostname, interval=1.0, expiry_seconds=300, formatter=None):
self.metrics = {}
self.total_count = 0
@@ -283,7 +286,13 @@
try:
value = float(metadata[0])
except ValueError:
- raise Exception('Metric value must be a number: %s, %s' % name, metadata[0])
+
+ # If the data type is Set, we will allow strings
+ if metadata[1] in self.ALLOW_STRINGS:
+ value = metadata[0]
+ else:
+ # Otherwise, raise an error saying it must be a number
+ raise Exception('Metric value must be a number: %s, %s' % (name, metadata[0]))
# Parse the optional values - sample rate & tags.
sample_rate = 1
| {"golden_diff": "diff --git a/aggregator.py b/aggregator.py\n--- a/aggregator.py\n+++ b/aggregator.py\n@@ -236,6 +236,9 @@\n A metric aggregator class.\n \"\"\"\n \n+ # Types of metrics that allow strings\n+ ALLOW_STRINGS = ['s', ]\n+\n def __init__(self, hostname, interval=1.0, expiry_seconds=300, formatter=None):\n self.metrics = {}\n self.total_count = 0\n@@ -283,7 +286,13 @@\n try:\n value = float(metadata[0])\n except ValueError:\n- raise Exception('Metric value must be a number: %s, %s' % name, metadata[0])\n+\n+ # If the data type is Set, we will allow strings\n+ if metadata[1] in self.ALLOW_STRINGS:\n+ value = metadata[0]\n+ else:\n+ # Otherwise, raise an error saying it must be a number\n+ raise Exception('Metric value must be a number: %s, %s' % (name, metadata[0]))\n \n # Parse the optional values - sample rate & tags.\n sample_rate = 1\n", "issue": "Allow strings to be elements in dogstatsd sets\nRight now it only counts numbers.\n\n", "before_files": [{"content": "import logging\nfrom time import time\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass Infinity(Exception): pass\nclass UnknownValue(Exception): pass\n\nclass Metric(object):\n \"\"\"\n A base metric class that accepts points, slices them into time intervals\n and performs roll-ups within those intervals.\n \"\"\"\n\n def sample(self, value, sample_rate):\n \"\"\" Add a point to the given metric. \"\"\"\n raise NotImplementedError()\n\n def flush(self, timestamp, interval):\n \"\"\" Flush all metrics up to the given timestamp. \"\"\"\n raise NotImplementedError()\n\n\nclass Gauge(Metric):\n \"\"\" A metric that tracks a value at particular points in time. \"\"\"\n\n def __init__(self, formatter, name, tags, hostname, device_name):\n self.formatter = formatter\n self.name = name\n self.value = None\n self.tags = tags\n self.hostname = hostname\n self.device_name = device_name\n self.last_sample_time = None\n\n def sample(self, value, sample_rate):\n self.value = value\n self.last_sample_time = time()\n\n def flush(self, timestamp, interval):\n if self.value is not None:\n res = [self.formatter(\n metric=self.name,\n timestamp=timestamp,\n value=self.value,\n tags=self.tags,\n hostname=self.hostname,\n device_name=self.device_name\n )]\n self.value = None\n return res\n\n return []\n\n\nclass Counter(Metric):\n \"\"\" A metric that tracks a counter value. \"\"\"\n\n def __init__(self, formatter, name, tags, hostname, device_name):\n self.formatter = formatter\n self.name = name\n self.value = 0\n self.tags = tags\n self.hostname = hostname\n self.device_name = device_name\n\n def sample(self, value, sample_rate):\n self.value += value * int(1 / sample_rate)\n self.last_sample_time = time()\n\n def flush(self, timestamp, interval):\n try:\n value = self.value / interval\n return [self.formatter(\n metric=self.name,\n value=value,\n timestamp=timestamp,\n tags=self.tags,\n hostname=self.hostname,\n device_name=self.device_name\n )]\n finally:\n self.value = 0\n\n\nclass Histogram(Metric):\n \"\"\" A metric to track the distribution of a set of values. \"\"\"\n\n def __init__(self, formatter, name, tags, hostname, device_name):\n self.formatter = formatter\n self.name = name\n self.count = 0\n self.samples = []\n self.percentiles = [0.95]\n self.tags = tags\n self.hostname = hostname\n self.device_name = device_name\n\n def sample(self, value, sample_rate):\n self.count += int(1 / sample_rate)\n self.samples.append(value)\n self.last_sample_time = time()\n\n def flush(self, ts, interval):\n if not self.count:\n return []\n\n self.samples.sort()\n length = len(self.samples)\n\n max_ = self.samples[-1]\n med = self.samples[int(round(length/2 - 1))]\n avg = sum(self.samples) / float(length)\n\n metric_aggrs = [\n ('max', max_),\n ('median', med),\n ('avg', avg),\n ('count', self.count/interval)\n ]\n\n metrics = [self.formatter(\n hostname=self.hostname,\n device_name=self.device_name,\n tags=self.tags,\n metric='%s.%s' % (self.name, suffix),\n value=value,\n timestamp=ts\n ) for suffix, value in metric_aggrs\n ]\n\n for p in self.percentiles:\n val = self.samples[int(round(p * length - 1))]\n name = '%s.%spercentile' % (self.name, int(p * 100))\n metrics.append(self.formatter(\n hostname=self.hostname,\n tags=self.tags,\n metric=name,\n value=val,\n timestamp=ts\n ))\n\n # Reset our state.\n self.samples = []\n self.count = 0\n\n return metrics\n\n\nclass Set(Metric):\n \"\"\" A metric to track the number of unique elements in a set. \"\"\"\n\n def __init__(self, formatter, name, tags, hostname, device_name):\n self.formatter = formatter\n self.name = name\n self.tags = tags\n self.hostname = hostname\n self.device_name = device_name\n self.values = set()\n\n def sample(self, value, sample_rate):\n self.values.add(value)\n self.last_sample_time = time()\n\n def flush(self, timestamp, interval):\n if not self.values:\n return []\n try:\n return [self.formatter(\n hostname=self.hostname,\n device_name=self.device_name,\n tags=self.tags,\n metric=self.name,\n value=len(self.values),\n timestamp=timestamp\n )]\n finally:\n self.values = set()\n\n\nclass Rate(Metric):\n \"\"\" Track the rate of metrics over each flush interval \"\"\"\n\n def __init__(self, formatter, name, tags, hostname, device_name):\n self.formatter = formatter\n self.name = name\n self.tags = tags\n self.hostname = hostname\n self.device_name = device_name\n self.samples = []\n\n def sample(self, value, sample_rate):\n ts = time()\n self.samples.append((int(ts), value))\n self.last_sample_time = ts\n\n def _rate(self, sample1, sample2):\n interval = sample2[0] - sample1[0]\n if interval == 0:\n logger.warn('Metric %s has an interval of 0. Not flushing.' % self.name)\n raise Infinity()\n\n delta = sample2[1] - sample1[1]\n if delta < 0:\n logger.warn('Metric %s has a rate < 0. Not flushing.' % self.name)\n raise UnknownValue()\n\n return (delta / interval)\n\n def flush(self, timestamp, interval):\n if len(self.samples) < 2:\n return []\n try:\n try:\n val = self._rate(self.samples[-2], self.samples[-1])\n except:\n return []\n\n return [self.formatter(\n hostname=self.hostname,\n device_name=self.device_name,\n tags=self.tags,\n metric=self.name,\n value=val,\n timestamp=timestamp\n )]\n finally:\n self.samples = self.samples[-1:]\n\n\n\nclass MetricsAggregator(object):\n \"\"\"\n A metric aggregator class.\n \"\"\"\n\n def __init__(self, hostname, interval=1.0, expiry_seconds=300, formatter=None):\n self.metrics = {}\n self.total_count = 0\n self.count = 0\n self.metric_type_to_class = {\n 'g': Gauge,\n 'c': Counter,\n 'h': Histogram,\n 'ms' : Histogram,\n 's' : Set,\n '_dd-r': Rate,\n }\n self.hostname = hostname\n self.expiry_seconds = expiry_seconds\n self.formatter = formatter or self.api_formatter\n self.interval = float(interval)\n\n def packets_per_second(self, interval):\n return round(float(self.count)/interval, 2)\n\n def submit_packets(self, packets):\n\n for packet in packets.split(\"\\n\"):\n self.count += 1\n # We can have colons in tags, so split once.\n name_and_metadata = packet.split(':', 1)\n\n if not packet.strip():\n continue\n\n if len(name_and_metadata) != 2:\n raise Exception('Unparseable packet: %s' % packet)\n\n name = name_and_metadata[0]\n metadata = name_and_metadata[1].split('|')\n\n if len(metadata) < 2:\n raise Exception('Unparseable packet: %s' % packet)\n\n # Try to cast as an int first to avoid precision issues, then as a\n # float.\n try:\n value = int(metadata[0])\n except ValueError:\n try:\n value = float(metadata[0])\n except ValueError:\n raise Exception('Metric value must be a number: %s, %s' % name, metadata[0])\n\n # Parse the optional values - sample rate & tags.\n sample_rate = 1\n tags = None\n for m in metadata[2:]:\n # Parse the sample rate\n if m[0] == '@':\n sample_rate = float(m[1:])\n assert 0 <= sample_rate <= 1\n elif m[0] == '#':\n tags = tuple(sorted(m[1:].split(',')))\n\n # Submit the metric\n mtype = metadata[1]\n self.submit_metric(name, value, mtype, tags=tags, sample_rate=sample_rate)\n\n def submit_metric(self, name, value, mtype, tags=None, hostname=None,\n device_name=None, timestamp=None, sample_rate=1):\n # Avoid calling extra functions to dedupe tags if there are none\n if tags is None:\n context = (name, tuple(), hostname, device_name)\n else:\n context = (name, tuple(sorted(set(tags))), hostname, device_name)\n if context not in self.metrics:\n metric_class = self.metric_type_to_class[mtype]\n self.metrics[context] = metric_class(self.formatter, name, tags,\n hostname or self.hostname, device_name)\n self.metrics[context].sample(value, sample_rate)\n\n def gauge(self, name, value, tags=None, hostname=None, device_name=None, timestamp=None):\n self.submit_metric(name, value, 'g', tags, hostname, device_name, timestamp)\n\n def increment(self, name, value=1, tags=None, hostname=None, device_name=None):\n self.submit_metric(name, value, 'c', tags, hostname, device_name)\n\n def decrement(self, name, value=-1, tags=None, hostname=None, device_name=None):\n self.submit_metric(name, value, 'c', tags, hostname, device_name)\n\n def rate(self, name, value, tags=None, hostname=None, device_name=None):\n self.submit_metric(name, value, '_dd-r', tags, hostname, device_name)\n\n def histogram(self, name, value, tags=None, hostname=None, device_name=None):\n self.submit_metric(name, value, 'h', tags, hostname, device_name)\n\n def set(self, name, value, tags=None, hostname=None, device_name=None):\n self.submit_metric(name, value, 's', tags, hostname, device_name)\n\n def flush(self):\n timestamp = time()\n expiry_timestamp = timestamp - self.expiry_seconds\n\n # Flush points and remove expired metrics. We mutate this dictionary\n # while iterating so don't use an iterator.\n metrics = []\n for context, metric in self.metrics.items():\n if metric.last_sample_time < expiry_timestamp:\n logger.debug(\"%s hasn't been submitted in %ss. Expiring.\" % (context, self.expiry_seconds))\n del self.metrics[context]\n else:\n metrics += metric.flush(timestamp, self.interval)\n\n # Save some stats.\n logger.debug(\"received %s payloads since last flush\" % self.count)\n self.total_count += self.count\n self.count = 0\n return metrics\n\n def send_packet_count(self, metric_name):\n self.submit_metric(metric_name, self.count, 'g')\n\n def api_formatter(self, metric, value, timestamp, tags, hostname, device_name=None):\n return {\n 'metric' : metric,\n 'points' : [(timestamp, value)],\n 'tags' : tags,\n 'host' : hostname,\n 'device_name': device_name\n }\n", "path": "aggregator.py"}], "after_files": [{"content": "import logging\nfrom time import time\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass Infinity(Exception): pass\nclass UnknownValue(Exception): pass\n\nclass Metric(object):\n \"\"\"\n A base metric class that accepts points, slices them into time intervals\n and performs roll-ups within those intervals.\n \"\"\"\n\n def sample(self, value, sample_rate):\n \"\"\" Add a point to the given metric. \"\"\"\n raise NotImplementedError()\n\n def flush(self, timestamp, interval):\n \"\"\" Flush all metrics up to the given timestamp. \"\"\"\n raise NotImplementedError()\n\n\nclass Gauge(Metric):\n \"\"\" A metric that tracks a value at particular points in time. \"\"\"\n\n def __init__(self, formatter, name, tags, hostname, device_name):\n self.formatter = formatter\n self.name = name\n self.value = None\n self.tags = tags\n self.hostname = hostname\n self.device_name = device_name\n self.last_sample_time = None\n\n def sample(self, value, sample_rate):\n self.value = value\n self.last_sample_time = time()\n\n def flush(self, timestamp, interval):\n if self.value is not None:\n res = [self.formatter(\n metric=self.name,\n timestamp=timestamp,\n value=self.value,\n tags=self.tags,\n hostname=self.hostname,\n device_name=self.device_name\n )]\n self.value = None\n return res\n\n return []\n\n\nclass Counter(Metric):\n \"\"\" A metric that tracks a counter value. \"\"\"\n\n def __init__(self, formatter, name, tags, hostname, device_name):\n self.formatter = formatter\n self.name = name\n self.value = 0\n self.tags = tags\n self.hostname = hostname\n self.device_name = device_name\n\n def sample(self, value, sample_rate):\n self.value += value * int(1 / sample_rate)\n self.last_sample_time = time()\n\n def flush(self, timestamp, interval):\n try:\n value = self.value / interval\n return [self.formatter(\n metric=self.name,\n value=value,\n timestamp=timestamp,\n tags=self.tags,\n hostname=self.hostname,\n device_name=self.device_name\n )]\n finally:\n self.value = 0\n\n\nclass Histogram(Metric):\n \"\"\" A metric to track the distribution of a set of values. \"\"\"\n\n def __init__(self, formatter, name, tags, hostname, device_name):\n self.formatter = formatter\n self.name = name\n self.count = 0\n self.samples = []\n self.percentiles = [0.95]\n self.tags = tags\n self.hostname = hostname\n self.device_name = device_name\n\n def sample(self, value, sample_rate):\n self.count += int(1 / sample_rate)\n self.samples.append(value)\n self.last_sample_time = time()\n\n def flush(self, ts, interval):\n if not self.count:\n return []\n\n self.samples.sort()\n length = len(self.samples)\n\n max_ = self.samples[-1]\n med = self.samples[int(round(length/2 - 1))]\n avg = sum(self.samples) / float(length)\n\n metric_aggrs = [\n ('max', max_),\n ('median', med),\n ('avg', avg),\n ('count', self.count/interval)\n ]\n\n metrics = [self.formatter(\n hostname=self.hostname,\n device_name=self.device_name,\n tags=self.tags,\n metric='%s.%s' % (self.name, suffix),\n value=value,\n timestamp=ts\n ) for suffix, value in metric_aggrs\n ]\n\n for p in self.percentiles:\n val = self.samples[int(round(p * length - 1))]\n name = '%s.%spercentile' % (self.name, int(p * 100))\n metrics.append(self.formatter(\n hostname=self.hostname,\n tags=self.tags,\n metric=name,\n value=val,\n timestamp=ts\n ))\n\n # Reset our state.\n self.samples = []\n self.count = 0\n\n return metrics\n\n\nclass Set(Metric):\n \"\"\" A metric to track the number of unique elements in a set. \"\"\"\n\n def __init__(self, formatter, name, tags, hostname, device_name):\n self.formatter = formatter\n self.name = name\n self.tags = tags\n self.hostname = hostname\n self.device_name = device_name\n self.values = set()\n\n def sample(self, value, sample_rate):\n self.values.add(value)\n self.last_sample_time = time()\n\n def flush(self, timestamp, interval):\n if not self.values:\n return []\n try:\n return [self.formatter(\n hostname=self.hostname,\n device_name=self.device_name,\n tags=self.tags,\n metric=self.name,\n value=len(self.values),\n timestamp=timestamp\n )]\n finally:\n self.values = set()\n\n\nclass Rate(Metric):\n \"\"\" Track the rate of metrics over each flush interval \"\"\"\n\n def __init__(self, formatter, name, tags, hostname, device_name):\n self.formatter = formatter\n self.name = name\n self.tags = tags\n self.hostname = hostname\n self.device_name = device_name\n self.samples = []\n\n def sample(self, value, sample_rate):\n ts = time()\n self.samples.append((int(ts), value))\n self.last_sample_time = ts\n\n def _rate(self, sample1, sample2):\n interval = sample2[0] - sample1[0]\n if interval == 0:\n logger.warn('Metric %s has an interval of 0. Not flushing.' % self.name)\n raise Infinity()\n\n delta = sample2[1] - sample1[1]\n if delta < 0:\n logger.warn('Metric %s has a rate < 0. Not flushing.' % self.name)\n raise UnknownValue()\n\n return (delta / interval)\n\n def flush(self, timestamp, interval):\n if len(self.samples) < 2:\n return []\n try:\n try:\n val = self._rate(self.samples[-2], self.samples[-1])\n except:\n return []\n\n return [self.formatter(\n hostname=self.hostname,\n device_name=self.device_name,\n tags=self.tags,\n metric=self.name,\n value=val,\n timestamp=timestamp\n )]\n finally:\n self.samples = self.samples[-1:]\n\n\n\nclass MetricsAggregator(object):\n \"\"\"\n A metric aggregator class.\n \"\"\"\n\n # Types of metrics that allow strings\n ALLOW_STRINGS = ['s', ]\n\n def __init__(self, hostname, interval=1.0, expiry_seconds=300, formatter=None):\n self.metrics = {}\n self.total_count = 0\n self.count = 0\n self.metric_type_to_class = {\n 'g': Gauge,\n 'c': Counter,\n 'h': Histogram,\n 'ms' : Histogram,\n 's' : Set,\n '_dd-r': Rate,\n }\n self.hostname = hostname\n self.expiry_seconds = expiry_seconds\n self.formatter = formatter or self.api_formatter\n self.interval = float(interval)\n\n def packets_per_second(self, interval):\n return round(float(self.count)/interval, 2)\n\n def submit_packets(self, packets):\n\n for packet in packets.split(\"\\n\"):\n self.count += 1\n # We can have colons in tags, so split once.\n name_and_metadata = packet.split(':', 1)\n\n if not packet.strip():\n continue\n\n if len(name_and_metadata) != 2:\n raise Exception('Unparseable packet: %s' % packet)\n\n name = name_and_metadata[0]\n metadata = name_and_metadata[1].split('|')\n\n if len(metadata) < 2:\n raise Exception('Unparseable packet: %s' % packet)\n\n # Try to cast as an int first to avoid precision issues, then as a\n # float.\n try:\n value = int(metadata[0])\n except ValueError:\n try:\n value = float(metadata[0])\n except ValueError:\n\n # If the data type is Set, we will allow strings\n if metadata[1] in self.ALLOW_STRINGS:\n value = metadata[0]\n else:\n # Otherwise, raise an error saying it must be a number\n raise Exception('Metric value must be a number: %s, %s' % (name, metadata[0]))\n\n # Parse the optional values - sample rate & tags.\n sample_rate = 1\n tags = None\n for m in metadata[2:]:\n # Parse the sample rate\n if m[0] == '@':\n sample_rate = float(m[1:])\n assert 0 <= sample_rate <= 1\n elif m[0] == '#':\n tags = tuple(sorted(m[1:].split(',')))\n\n # Submit the metric\n mtype = metadata[1]\n self.submit_metric(name, value, mtype, tags=tags, sample_rate=sample_rate)\n\n def submit_metric(self, name, value, mtype, tags=None, hostname=None,\n device_name=None, timestamp=None, sample_rate=1):\n # If the value is not NaN, +infinity or -infinity\n if value == value and value != float('inf') and value != float('-inf'):\n # Avoid calling extra functions to dedupe tags if there are none\n if tags is None:\n context = (name, tuple(), hostname, device_name)\n else:\n context = (name, tuple(sorted(set(tags))), hostname, device_name)\n if context not in self.metrics:\n metric_class = self.metric_type_to_class[mtype]\n self.metrics[context] = metric_class(self.formatter, name, tags,\n hostname or self.hostname, device_name)\n self.metrics[context].sample(value, sample_rate)\n\n elif value != value:\n logger.warning(\"Trying to send a NaN value for metric %s\" % name)\n elif value == float('inf'):\n logger.warning(\"Trying to send an Infinity value for metric %s\" % name)\n elif value == float('-inf'):\n logger.warning(\"Trying to send an -Infinity value for metric %s\" % name)\n\n def gauge(self, name, value, tags=None, hostname=None, device_name=None, timestamp=None):\n self.submit_metric(name, value, 'g', tags, hostname, device_name, timestamp)\n\n def increment(self, name, value=1, tags=None, hostname=None, device_name=None):\n self.submit_metric(name, value, 'c', tags, hostname, device_name)\n\n def decrement(self, name, value=-1, tags=None, hostname=None, device_name=None):\n self.submit_metric(name, value, 'c', tags, hostname, device_name)\n\n def rate(self, name, value, tags=None, hostname=None, device_name=None):\n self.submit_metric(name, value, '_dd-r', tags, hostname, device_name)\n\n def histogram(self, name, value, tags=None, hostname=None, device_name=None):\n self.submit_metric(name, value, 'h', tags, hostname, device_name)\n\n def set(self, name, value, tags=None, hostname=None, device_name=None):\n self.submit_metric(name, value, 's', tags, hostname, device_name)\n\n def flush(self):\n timestamp = time()\n expiry_timestamp = timestamp - self.expiry_seconds\n\n # Flush points and remove expired metrics. We mutate this dictionary\n # while iterating so don't use an iterator.\n metrics = []\n for context, metric in self.metrics.items():\n if metric.last_sample_time < expiry_timestamp:\n logger.debug(\"%s hasn't been submitted in %ss. Expiring.\" % (context, self.expiry_seconds))\n del self.metrics[context]\n else:\n metrics += metric.flush(timestamp, self.interval)\n\n # Save some stats.\n logger.debug(\"received %s payloads since last flush\" % self.count)\n self.total_count += self.count\n self.count = 0\n return metrics\n\n def send_packet_count(self, metric_name):\n self.submit_metric(metric_name, self.count, 'g')\n\n def api_formatter(self, metric, value, timestamp, tags, hostname, device_name=None):\n return {\n 'metric' : metric,\n 'points' : [(timestamp, value)],\n 'tags' : tags,\n 'host' : hostname,\n 'device_name': device_name\n }\n", "path": "aggregator.py"}]} | 3,795 | 264 |
gh_patches_debug_3817 | rasdani/github-patches | git_diff | opsdroid__opsdroid-1220 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Slack connector crashes after running dance several times
# Description
Slack connector crashes after running dance several times in a row
## Steps to Reproduce
Launch opsdroid with slack connector configured with bot token
go the channel connected in and run dance 3+ times in a row
## Expected Functionality
Opsdroid keeps running fine
## Experienced Functionality
Opsdroid crashes with the following error:
```
ERROR slack.rtm.client: When calling '#process_message()' in the 'opsdroid.connector.slack' module the following error was raised: 'user'
Traceback (most recent call last):
File "C:\Users\koe4945\desktop\git\personal\ops\venv\Scripts\opsdroid-script.py", line 11, in <module>
load_entry_point('opsdroid', 'console_scripts', 'opsdroid')()
File "c:\users\koe4945\desktop\git\personal\ops\venv\lib\site-packages\click\core.py", line 764, in __call__
return self.main(*args, **kwargs)
File "c:\users\koe4945\desktop\git\personal\ops\venv\lib\site-packages\click\core.py", line 717, in main
rv = self.invoke(ctx)
File "c:\users\koe4945\desktop\git\personal\ops\venv\lib\site-packages\click\core.py", line 1137, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "c:\users\koe4945\desktop\git\personal\ops\venv\lib\site-packages\click\core.py", line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "c:\users\koe4945\desktop\git\personal\ops\venv\lib\site-packages\click\core.py", line 555, in invoke
return callback(*args, **kwargs)
File "c:\users\koe4945\desktop\git\personal\ops\opsdroid\opsdroid\cli\start.py", line 31, in start
opsdroid.run()
File "c:\users\koe4945\desktop\git\personal\ops\opsdroid\opsdroid\core.py", line 161, in run
self.eventloop.run_until_complete(asyncio.gather(*pending))
File "C:\Users\koe4945\AppData\Local\Programs\Python\Python37\lib\asyncio\base_events.py", line 584, in run_until_complete
return future.result()
File "c:\users\koe4945\desktop\git\personal\ops\python-slackclient\slack\rtm\client.py", line 339, in _connect_and_read
await self._read_messages()
File "c:\users\koe4945\desktop\git\personal\ops\python-slackclient\slack\rtm\client.py", line 390, in _read_messages
await self._dispatch_event(event, data=payload)
File "c:\users\koe4945\desktop\git\personal\ops\python-slackclient\slack\rtm\client.py", line 437, in _dispatch_event
rtm_client=self, web_client=self._web_client, data=data
File "c:\users\koe4945\desktop\git\personal\ops\opsdroid\opsdroid\connector\slack\__init__.py", line 104, in process_message
user_info = await self.lookup_username(message["user"])
KeyError: 'user'
ERROR: Unhandled exception in opsdroid, exiting...
```
## Versions
- **Opsdroid version:** v0.16.0+62.g620590f
- **Python version:** 3.7.3
- **OS/Docker version:** Windows 10
## Configuration File
Please include your version of the configuration file below.
```yaml
connectors:
- name: slack
# required
api-token: "[...bot token...]"
```
## Additional Details
Any other details you wish to include such as screenshots, console messages, etc.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `opsdroid/connector/slack/__init__.py`
Content:
```
1 """A connector for Slack."""
2 import logging
3 import re
4
5 import slack
6 from emoji import demojize
7
8 from opsdroid.connector import Connector, register_event
9 from opsdroid.events import Message, Reaction
10 from opsdroid.connector.slack.events import Blocks
11
12
13 _LOGGER = logging.getLogger(__name__)
14
15
16 class ConnectorSlack(Connector):
17 """A connector for Slack."""
18
19 def __init__(self, config, opsdroid=None):
20 """Create the connector."""
21 super().__init__(config, opsdroid=opsdroid)
22 _LOGGER.debug(_("Starting Slack connector"))
23 self.name = "slack"
24 self.default_target = config.get("default-room", "#general")
25 self.icon_emoji = config.get("icon-emoji", ":robot_face:")
26 self.token = config["api-token"]
27 self.timeout = config.get("connect-timeout", 10)
28 self.slack = slack.WebClient(token=self.token, run_async=True)
29 self.slack_rtm = slack.RTMClient(token=self.token, run_async=True)
30 self.websocket = None
31 self.bot_name = config.get("bot-name", "opsdroid")
32 self.auth_info = None
33 self.user_info = None
34 self.bot_id = None
35 self.known_users = {}
36 self.keepalive = None
37 self.reconnecting = False
38 self.listening = True
39 self._message_id = 0
40
41 # Register callbacks
42 slack.RTMClient.on(event="message", callback=self.process_message)
43
44 async def connect(self):
45 """Connect to the chat service."""
46 _LOGGER.info(_("Connecting to Slack"))
47
48 try:
49 # The slack library recommends you call `self.slack_rtm.start()`` here but it
50 # seems to mess with the event loop's signal handlers which breaks opsdroid.
51 # Therefore we need to directly call the private `_connect_and_read` method
52 # instead. This method also blocks so we need to dispatch it to the loop as a task.
53 self.opsdroid.eventloop.create_task(self.slack_rtm._connect_and_read())
54
55 self.auth_info = (await self.slack.api_call("auth.test")).data
56 self.user_info = (
57 await self.slack.api_call(
58 "users.info",
59 http_verb="GET",
60 params={"user": self.auth_info["user_id"]},
61 )
62 ).data
63 self.bot_id = self.user_info["user"]["profile"]["bot_id"]
64
65 _LOGGER.debug(_("Connected as %s"), self.bot_name)
66 _LOGGER.debug(_("Using icon %s"), self.icon_emoji)
67 _LOGGER.debug(_("Default room is %s"), self.default_target)
68 _LOGGER.info(_("Connected successfully"))
69 except slack.errors.SlackApiError as error:
70 _LOGGER.error(
71 _(
72 "Unable to connect to Slack due to %s - "
73 "The Slack Connector will not be available."
74 ),
75 error,
76 )
77 except Exception:
78 await self.disconnect()
79 raise
80
81 async def disconnect(self):
82 """Disconnect from Slack."""
83 await self.slack_rtm.stop()
84 self.listening = False
85
86 async def listen(self):
87 """Listen for and parse new messages."""
88
89 async def process_message(self, **payload):
90 """Process a raw message and pass it to the parser."""
91 message = payload["data"]
92
93 # Ignore own messages
94 if (
95 "subtype" in message
96 and message["subtype"] == "bot_message"
97 and message["bot_id"] == self.bot_id
98 ):
99 return
100
101 # Lookup username
102 _LOGGER.debug(_("Looking up sender username"))
103 try:
104 user_info = await self.lookup_username(message["user"])
105 except ValueError:
106 return
107
108 # Replace usernames in the message
109 _LOGGER.debug(_("Replacing userids in message with usernames"))
110 message["text"] = await self.replace_usernames(message["text"])
111
112 await self.opsdroid.parse(
113 Message(
114 message["text"],
115 user_info["name"],
116 message["channel"],
117 self,
118 raw_event=message,
119 )
120 )
121
122 @register_event(Message)
123 async def send_message(self, message):
124 """Respond with a message."""
125 _LOGGER.debug(
126 _("Responding with: '%s' in room %s"), message.text, message.target
127 )
128 await self.slack.api_call(
129 "chat.postMessage",
130 data={
131 "channel": message.target,
132 "text": message.text,
133 "as_user": False,
134 "username": self.bot_name,
135 "icon_emoji": self.icon_emoji,
136 },
137 )
138
139 @register_event(Blocks)
140 async def send_blocks(self, blocks):
141 """Respond with structured blocks."""
142 _LOGGER.debug(
143 _("Responding with interactive blocks in room %s"), blocks.target
144 )
145 await self.slack.api_call(
146 "chat.postMessage",
147 data={
148 "channel": blocks.target,
149 "username": self.bot_name,
150 "blocks": blocks.blocks,
151 "icon_emoji": self.icon_emoji,
152 },
153 )
154
155 @register_event(Reaction)
156 async def send_reaction(self, reaction):
157 """React to a message."""
158 emoji = demojize(reaction.emoji).replace(":", "")
159 _LOGGER.debug(_("Reacting with: %s"), emoji)
160 try:
161 await self.slack.api_call(
162 "reactions.add",
163 data={
164 "name": emoji,
165 "channel": reaction.target,
166 "timestamp": reaction.linked_event.raw_event["ts"],
167 },
168 )
169 except slack.errors.SlackApiError as error:
170 if "invalid_name" in str(error):
171 _LOGGER.warning(_("Slack does not support the emoji %s"), emoji)
172 else:
173 raise
174
175 async def lookup_username(self, userid):
176 """Lookup a username and cache it."""
177 if userid in self.known_users:
178 user_info = self.known_users[userid]
179 else:
180 response = await self.slack.users_info(user=userid)
181 user_info = response.data["user"]
182 if isinstance(user_info, dict):
183 self.known_users[userid] = user_info
184 else:
185 raise ValueError("Returned user is not a dict.")
186 return user_info
187
188 async def replace_usernames(self, message):
189 """Replace User ID with username in message text."""
190 userids = re.findall(r"\<\@([A-Z0-9]+)(?:\|.+)?\>", message)
191 for userid in userids:
192 user_info = await self.lookup_username(userid)
193 message = message.replace(
194 "<@{userid}>".format(userid=userid), user_info["name"]
195 )
196 return message
197
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/opsdroid/connector/slack/__init__.py b/opsdroid/connector/slack/__init__.py
--- a/opsdroid/connector/slack/__init__.py
+++ b/opsdroid/connector/slack/__init__.py
@@ -90,6 +90,10 @@
"""Process a raw message and pass it to the parser."""
message = payload["data"]
+ # Ignore message edits
+ if "subtype" in message and message["subtype"] == "message_changed":
+ return
+
# Ignore own messages
if (
"subtype" in message
| {"golden_diff": "diff --git a/opsdroid/connector/slack/__init__.py b/opsdroid/connector/slack/__init__.py\n--- a/opsdroid/connector/slack/__init__.py\n+++ b/opsdroid/connector/slack/__init__.py\n@@ -90,6 +90,10 @@\n \"\"\"Process a raw message and pass it to the parser.\"\"\"\n message = payload[\"data\"]\n \n+ # Ignore message edits\n+ if \"subtype\" in message and message[\"subtype\"] == \"message_changed\":\n+ return\n+\n # Ignore own messages\n if (\n \"subtype\" in message\n", "issue": "Slack connector crashes after running dance several times\n# Description\r\nSlack connector crashes after running dance several times in a row\r\n\r\n## Steps to Reproduce\r\nLaunch opsdroid with slack connector configured with bot token\r\ngo the channel connected in and run dance 3+ times in a row\r\n\r\n## Expected Functionality\r\nOpsdroid keeps running fine\r\n\r\n\r\n## Experienced Functionality\r\nOpsdroid crashes with the following error:\r\n```\r\nERROR slack.rtm.client: When calling '#process_message()' in the 'opsdroid.connector.slack' module the following error was raised: 'user'\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\koe4945\\desktop\\git\\personal\\ops\\venv\\Scripts\\opsdroid-script.py\", line 11, in <module>\r\n load_entry_point('opsdroid', 'console_scripts', 'opsdroid')()\r\n File \"c:\\users\\koe4945\\desktop\\git\\personal\\ops\\venv\\lib\\site-packages\\click\\core.py\", line 764, in __call__\r\n return self.main(*args, **kwargs)\r\n File \"c:\\users\\koe4945\\desktop\\git\\personal\\ops\\venv\\lib\\site-packages\\click\\core.py\", line 717, in main\r\n rv = self.invoke(ctx)\r\n File \"c:\\users\\koe4945\\desktop\\git\\personal\\ops\\venv\\lib\\site-packages\\click\\core.py\", line 1137, in invoke\r\n return _process_result(sub_ctx.command.invoke(sub_ctx))\r\n File \"c:\\users\\koe4945\\desktop\\git\\personal\\ops\\venv\\lib\\site-packages\\click\\core.py\", line 956, in invoke\r\n return ctx.invoke(self.callback, **ctx.params)\r\n File \"c:\\users\\koe4945\\desktop\\git\\personal\\ops\\venv\\lib\\site-packages\\click\\core.py\", line 555, in invoke\r\n return callback(*args, **kwargs)\r\n File \"c:\\users\\koe4945\\desktop\\git\\personal\\ops\\opsdroid\\opsdroid\\cli\\start.py\", line 31, in start\r\n opsdroid.run()\r\n File \"c:\\users\\koe4945\\desktop\\git\\personal\\ops\\opsdroid\\opsdroid\\core.py\", line 161, in run\r\n self.eventloop.run_until_complete(asyncio.gather(*pending))\r\n File \"C:\\Users\\koe4945\\AppData\\Local\\Programs\\Python\\Python37\\lib\\asyncio\\base_events.py\", line 584, in run_until_complete\r\n return future.result()\r\n File \"c:\\users\\koe4945\\desktop\\git\\personal\\ops\\python-slackclient\\slack\\rtm\\client.py\", line 339, in _connect_and_read\r\n await self._read_messages()\r\n File \"c:\\users\\koe4945\\desktop\\git\\personal\\ops\\python-slackclient\\slack\\rtm\\client.py\", line 390, in _read_messages\r\n await self._dispatch_event(event, data=payload)\r\n File \"c:\\users\\koe4945\\desktop\\git\\personal\\ops\\python-slackclient\\slack\\rtm\\client.py\", line 437, in _dispatch_event\r\n rtm_client=self, web_client=self._web_client, data=data\r\n File \"c:\\users\\koe4945\\desktop\\git\\personal\\ops\\opsdroid\\opsdroid\\connector\\slack\\__init__.py\", line 104, in process_message\r\n user_info = await self.lookup_username(message[\"user\"])\r\nKeyError: 'user'\r\nERROR: Unhandled exception in opsdroid, exiting...\r\n```\r\n\r\n## Versions\r\n- **Opsdroid version:** v0.16.0+62.g620590f\r\n- **Python version:** 3.7.3\r\n- **OS/Docker version:** Windows 10\r\n\r\n## Configuration File\r\nPlease include your version of the configuration file below.\r\n\r\n```yaml\r\nconnectors:\r\n - name: slack\r\n # required\r\n api-token: \"[...bot token...]\"\r\n\r\n```\r\n\r\n## Additional Details\r\nAny other details you wish to include such as screenshots, console messages, etc.\r\n\n", "before_files": [{"content": "\"\"\"A connector for Slack.\"\"\"\nimport logging\nimport re\n\nimport slack\nfrom emoji import demojize\n\nfrom opsdroid.connector import Connector, register_event\nfrom opsdroid.events import Message, Reaction\nfrom opsdroid.connector.slack.events import Blocks\n\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass ConnectorSlack(Connector):\n \"\"\"A connector for Slack.\"\"\"\n\n def __init__(self, config, opsdroid=None):\n \"\"\"Create the connector.\"\"\"\n super().__init__(config, opsdroid=opsdroid)\n _LOGGER.debug(_(\"Starting Slack connector\"))\n self.name = \"slack\"\n self.default_target = config.get(\"default-room\", \"#general\")\n self.icon_emoji = config.get(\"icon-emoji\", \":robot_face:\")\n self.token = config[\"api-token\"]\n self.timeout = config.get(\"connect-timeout\", 10)\n self.slack = slack.WebClient(token=self.token, run_async=True)\n self.slack_rtm = slack.RTMClient(token=self.token, run_async=True)\n self.websocket = None\n self.bot_name = config.get(\"bot-name\", \"opsdroid\")\n self.auth_info = None\n self.user_info = None\n self.bot_id = None\n self.known_users = {}\n self.keepalive = None\n self.reconnecting = False\n self.listening = True\n self._message_id = 0\n\n # Register callbacks\n slack.RTMClient.on(event=\"message\", callback=self.process_message)\n\n async def connect(self):\n \"\"\"Connect to the chat service.\"\"\"\n _LOGGER.info(_(\"Connecting to Slack\"))\n\n try:\n # The slack library recommends you call `self.slack_rtm.start()`` here but it\n # seems to mess with the event loop's signal handlers which breaks opsdroid.\n # Therefore we need to directly call the private `_connect_and_read` method\n # instead. This method also blocks so we need to dispatch it to the loop as a task.\n self.opsdroid.eventloop.create_task(self.slack_rtm._connect_and_read())\n\n self.auth_info = (await self.slack.api_call(\"auth.test\")).data\n self.user_info = (\n await self.slack.api_call(\n \"users.info\",\n http_verb=\"GET\",\n params={\"user\": self.auth_info[\"user_id\"]},\n )\n ).data\n self.bot_id = self.user_info[\"user\"][\"profile\"][\"bot_id\"]\n\n _LOGGER.debug(_(\"Connected as %s\"), self.bot_name)\n _LOGGER.debug(_(\"Using icon %s\"), self.icon_emoji)\n _LOGGER.debug(_(\"Default room is %s\"), self.default_target)\n _LOGGER.info(_(\"Connected successfully\"))\n except slack.errors.SlackApiError as error:\n _LOGGER.error(\n _(\n \"Unable to connect to Slack due to %s - \"\n \"The Slack Connector will not be available.\"\n ),\n error,\n )\n except Exception:\n await self.disconnect()\n raise\n\n async def disconnect(self):\n \"\"\"Disconnect from Slack.\"\"\"\n await self.slack_rtm.stop()\n self.listening = False\n\n async def listen(self):\n \"\"\"Listen for and parse new messages.\"\"\"\n\n async def process_message(self, **payload):\n \"\"\"Process a raw message and pass it to the parser.\"\"\"\n message = payload[\"data\"]\n\n # Ignore own messages\n if (\n \"subtype\" in message\n and message[\"subtype\"] == \"bot_message\"\n and message[\"bot_id\"] == self.bot_id\n ):\n return\n\n # Lookup username\n _LOGGER.debug(_(\"Looking up sender username\"))\n try:\n user_info = await self.lookup_username(message[\"user\"])\n except ValueError:\n return\n\n # Replace usernames in the message\n _LOGGER.debug(_(\"Replacing userids in message with usernames\"))\n message[\"text\"] = await self.replace_usernames(message[\"text\"])\n\n await self.opsdroid.parse(\n Message(\n message[\"text\"],\n user_info[\"name\"],\n message[\"channel\"],\n self,\n raw_event=message,\n )\n )\n\n @register_event(Message)\n async def send_message(self, message):\n \"\"\"Respond with a message.\"\"\"\n _LOGGER.debug(\n _(\"Responding with: '%s' in room %s\"), message.text, message.target\n )\n await self.slack.api_call(\n \"chat.postMessage\",\n data={\n \"channel\": message.target,\n \"text\": message.text,\n \"as_user\": False,\n \"username\": self.bot_name,\n \"icon_emoji\": self.icon_emoji,\n },\n )\n\n @register_event(Blocks)\n async def send_blocks(self, blocks):\n \"\"\"Respond with structured blocks.\"\"\"\n _LOGGER.debug(\n _(\"Responding with interactive blocks in room %s\"), blocks.target\n )\n await self.slack.api_call(\n \"chat.postMessage\",\n data={\n \"channel\": blocks.target,\n \"username\": self.bot_name,\n \"blocks\": blocks.blocks,\n \"icon_emoji\": self.icon_emoji,\n },\n )\n\n @register_event(Reaction)\n async def send_reaction(self, reaction):\n \"\"\"React to a message.\"\"\"\n emoji = demojize(reaction.emoji).replace(\":\", \"\")\n _LOGGER.debug(_(\"Reacting with: %s\"), emoji)\n try:\n await self.slack.api_call(\n \"reactions.add\",\n data={\n \"name\": emoji,\n \"channel\": reaction.target,\n \"timestamp\": reaction.linked_event.raw_event[\"ts\"],\n },\n )\n except slack.errors.SlackApiError as error:\n if \"invalid_name\" in str(error):\n _LOGGER.warning(_(\"Slack does not support the emoji %s\"), emoji)\n else:\n raise\n\n async def lookup_username(self, userid):\n \"\"\"Lookup a username and cache it.\"\"\"\n if userid in self.known_users:\n user_info = self.known_users[userid]\n else:\n response = await self.slack.users_info(user=userid)\n user_info = response.data[\"user\"]\n if isinstance(user_info, dict):\n self.known_users[userid] = user_info\n else:\n raise ValueError(\"Returned user is not a dict.\")\n return user_info\n\n async def replace_usernames(self, message):\n \"\"\"Replace User ID with username in message text.\"\"\"\n userids = re.findall(r\"\\<\\@([A-Z0-9]+)(?:\\|.+)?\\>\", message)\n for userid in userids:\n user_info = await self.lookup_username(userid)\n message = message.replace(\n \"<@{userid}>\".format(userid=userid), user_info[\"name\"]\n )\n return message\n", "path": "opsdroid/connector/slack/__init__.py"}], "after_files": [{"content": "\"\"\"A connector for Slack.\"\"\"\nimport logging\nimport re\n\nimport slack\nfrom emoji import demojize\n\nfrom opsdroid.connector import Connector, register_event\nfrom opsdroid.events import Message, Reaction\nfrom opsdroid.connector.slack.events import Blocks\n\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass ConnectorSlack(Connector):\n \"\"\"A connector for Slack.\"\"\"\n\n def __init__(self, config, opsdroid=None):\n \"\"\"Create the connector.\"\"\"\n super().__init__(config, opsdroid=opsdroid)\n _LOGGER.debug(_(\"Starting Slack connector\"))\n self.name = \"slack\"\n self.default_target = config.get(\"default-room\", \"#general\")\n self.icon_emoji = config.get(\"icon-emoji\", \":robot_face:\")\n self.token = config[\"api-token\"]\n self.timeout = config.get(\"connect-timeout\", 10)\n self.slack = slack.WebClient(token=self.token, run_async=True)\n self.slack_rtm = slack.RTMClient(token=self.token, run_async=True)\n self.websocket = None\n self.bot_name = config.get(\"bot-name\", \"opsdroid\")\n self.auth_info = None\n self.user_info = None\n self.bot_id = None\n self.known_users = {}\n self.keepalive = None\n self.reconnecting = False\n self.listening = True\n self._message_id = 0\n\n # Register callbacks\n slack.RTMClient.on(event=\"message\", callback=self.process_message)\n\n async def connect(self):\n \"\"\"Connect to the chat service.\"\"\"\n _LOGGER.info(_(\"Connecting to Slack\"))\n\n try:\n # The slack library recommends you call `self.slack_rtm.start()`` here but it\n # seems to mess with the event loop's signal handlers which breaks opsdroid.\n # Therefore we need to directly call the private `_connect_and_read` method\n # instead. This method also blocks so we need to dispatch it to the loop as a task.\n self.opsdroid.eventloop.create_task(self.slack_rtm._connect_and_read())\n\n self.auth_info = (await self.slack.api_call(\"auth.test\")).data\n self.user_info = (\n await self.slack.api_call(\n \"users.info\",\n http_verb=\"GET\",\n params={\"user\": self.auth_info[\"user_id\"]},\n )\n ).data\n self.bot_id = self.user_info[\"user\"][\"profile\"][\"bot_id\"]\n\n _LOGGER.debug(_(\"Connected as %s\"), self.bot_name)\n _LOGGER.debug(_(\"Using icon %s\"), self.icon_emoji)\n _LOGGER.debug(_(\"Default room is %s\"), self.default_target)\n _LOGGER.info(_(\"Connected successfully\"))\n except slack.errors.SlackApiError as error:\n _LOGGER.error(\n _(\n \"Unable to connect to Slack due to %s - \"\n \"The Slack Connector will not be available.\"\n ),\n error,\n )\n except Exception:\n await self.disconnect()\n raise\n\n async def disconnect(self):\n \"\"\"Disconnect from Slack.\"\"\"\n await self.slack_rtm.stop()\n self.listening = False\n\n async def listen(self):\n \"\"\"Listen for and parse new messages.\"\"\"\n\n async def process_message(self, **payload):\n \"\"\"Process a raw message and pass it to the parser.\"\"\"\n message = payload[\"data\"]\n\n # Ignore message edits\n if \"subtype\" in message and message[\"subtype\"] == \"message_changed\":\n return\n\n # Ignore own messages\n if (\n \"subtype\" in message\n and message[\"subtype\"] == \"bot_message\"\n and message[\"bot_id\"] == self.bot_id\n ):\n return\n\n # Lookup username\n _LOGGER.debug(_(\"Looking up sender username\"))\n try:\n user_info = await self.lookup_username(message[\"user\"])\n except ValueError:\n return\n\n # Replace usernames in the message\n _LOGGER.debug(_(\"Replacing userids in message with usernames\"))\n message[\"text\"] = await self.replace_usernames(message[\"text\"])\n\n await self.opsdroid.parse(\n Message(\n message[\"text\"],\n user_info[\"name\"],\n message[\"channel\"],\n self,\n raw_event=message,\n )\n )\n\n @register_event(Message)\n async def send_message(self, message):\n \"\"\"Respond with a message.\"\"\"\n _LOGGER.debug(\n _(\"Responding with: '%s' in room %s\"), message.text, message.target\n )\n await self.slack.api_call(\n \"chat.postMessage\",\n data={\n \"channel\": message.target,\n \"text\": message.text,\n \"as_user\": False,\n \"username\": self.bot_name,\n \"icon_emoji\": self.icon_emoji,\n },\n )\n\n @register_event(Blocks)\n async def send_blocks(self, blocks):\n \"\"\"Respond with structured blocks.\"\"\"\n _LOGGER.debug(\n _(\"Responding with interactive blocks in room %s\"), blocks.target\n )\n await self.slack.api_call(\n \"chat.postMessage\",\n data={\n \"channel\": blocks.target,\n \"username\": self.bot_name,\n \"blocks\": blocks.blocks,\n \"icon_emoji\": self.icon_emoji,\n },\n )\n\n @register_event(Reaction)\n async def send_reaction(self, reaction):\n \"\"\"React to a message.\"\"\"\n emoji = demojize(reaction.emoji).replace(\":\", \"\")\n _LOGGER.debug(_(\"Reacting with: %s\"), emoji)\n try:\n await self.slack.api_call(\n \"reactions.add\",\n data={\n \"name\": emoji,\n \"channel\": reaction.target,\n \"timestamp\": reaction.linked_event.raw_event[\"ts\"],\n },\n )\n except slack.errors.SlackApiError as error:\n if \"invalid_name\" in str(error):\n _LOGGER.warning(_(\"Slack does not support the emoji %s\"), emoji)\n else:\n raise\n\n async def lookup_username(self, userid):\n \"\"\"Lookup a username and cache it.\"\"\"\n if userid in self.known_users:\n user_info = self.known_users[userid]\n else:\n response = await self.slack.users_info(user=userid)\n user_info = response.data[\"user\"]\n if isinstance(user_info, dict):\n self.known_users[userid] = user_info\n else:\n raise ValueError(\"Returned user is not a dict.\")\n return user_info\n\n async def replace_usernames(self, message):\n \"\"\"Replace User ID with username in message text.\"\"\"\n userids = re.findall(r\"\\<\\@([A-Z0-9]+)(?:\\|.+)?\\>\", message)\n for userid in userids:\n user_info = await self.lookup_username(userid)\n message = message.replace(\n \"<@{userid}>\".format(userid=userid), user_info[\"name\"]\n )\n return message\n", "path": "opsdroid/connector/slack/__init__.py"}]} | 3,171 | 140 |
gh_patches_debug_26296 | rasdani/github-patches | git_diff | DataDog__dd-trace-py-4432 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
"Tracing Across Processes" example crashes
<!--
Thanks for taking the time for reporting an issue!
Before reporting an issue on dd-trace-py, please be sure to provide all
necessary information.
If you're hitting a bug, make sure that you're using the latest version of this
library.
-->
### Summary of problem
Hello, I cannot get "tracing across processes" to work. Python crashes while trying to pickle the tracer context RLock. The minimal reproduction example is [the example from the ddtrace advanced usage docs](https://ddtrace.readthedocs.io/en/stable/advanced_usage.html#tracing-across-processes). I also cannot get this to work with any prior ddtrace version.
### Which version of dd-trace-py are you using?
ddtrace: 1.5.3
python: 3.9.13
### How can we reproduce your problem?
Run the `Tracing Across Processes` example [from the docs](https://ddtrace.readthedocs.io/en/stable/advanced_usage.html#tracing-across-processes)
```python
from multiprocessing import Process
import time
from ddtrace import tracer
def _target(ctx):
tracer.context_provider.activate(ctx)
with tracer.trace("proc"):
time.sleep(1)
tracer.shutdown()
with tracer.trace("work"):
proc = Process(target=_target, args=(tracer.current_trace_context(),))
proc.start()
time.sleep(1)
proc.join()
```
### What is the result that you get?
```
Traceback (most recent call last):
File "/Users/jboulanger/Library/Application Support/JetBrains/PyCharm2021.2/scratches/scratch.py", line 15, in <module>
proc.start()
File "/usr/local/Cellar/[email protected]/3.9.13_3/Frameworks/Python.framework/Versions/3.9/lib/python3.9/multiprocessing/process.py", line 121, in start
self._popen = self._Popen(self)
File "/usr/local/Cellar/[email protected]/3.9.13_3/Frameworks/Python.framework/Versions/3.9/lib/python3.9/multiprocessing/context.py", line 224, in _Popen
return _default_context.get_context().Process._Popen(process_obj)
File "/usr/local/Cellar/[email protected]/3.9.13_3/Frameworks/Python.framework/Versions/3.9/lib/python3.9/multiprocessing/context.py", line 284, in _Popen
return Popen(process_obj)
File "/usr/local/Cellar/[email protected]/3.9.13_3/Frameworks/Python.framework/Versions/3.9/lib/python3.9/multiprocessing/popen_spawn_posix.py", line 32, in __init__
super().__init__(process_obj)
File "/usr/local/Cellar/[email protected]/3.9.13_3/Frameworks/Python.framework/Versions/3.9/lib/python3.9/multiprocessing/popen_fork.py", line 19, in __init__
self._launch(process_obj)
File "/usr/local/Cellar/[email protected]/3.9.13_3/Frameworks/Python.framework/Versions/3.9/lib/python3.9/multiprocessing/popen_spawn_posix.py", line 47, in _launch
reduction.dump(process_obj, fp)
File "/usr/local/Cellar/[email protected]/3.9.13_3/Frameworks/Python.framework/Versions/3.9/lib/python3.9/multiprocessing/reduction.py", line 60, in dump
ForkingPickler(file, protocol).dump(obj)
TypeError: cannot pickle '_thread.RLock' object
Process finished with exit code 1
```
### What is the result that you expected?
I expected this to complete without crashing
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ddtrace/context.py`
Content:
```
1 import base64
2 import threading
3 from typing import Any
4 from typing import Optional
5 from typing import TYPE_CHECKING
6 from typing import Text
7
8 from .constants import ORIGIN_KEY
9 from .constants import SAMPLING_PRIORITY_KEY
10 from .constants import USER_ID_KEY
11 from .internal.compat import NumericType
12 from .internal.compat import PY2
13 from .internal.logger import get_logger
14
15
16 if TYPE_CHECKING: # pragma: no cover
17 from .span import Span
18 from .span import _MetaDictType
19 from .span import _MetricDictType
20
21 log = get_logger(__name__)
22
23
24 class Context(object):
25 """Represents the state required to propagate a trace across execution
26 boundaries.
27 """
28
29 __slots__ = [
30 "trace_id",
31 "span_id",
32 "_lock",
33 "_meta",
34 "_metrics",
35 ]
36
37 def __init__(
38 self,
39 trace_id=None, # type: Optional[int]
40 span_id=None, # type: Optional[int]
41 dd_origin=None, # type: Optional[str]
42 sampling_priority=None, # type: Optional[float]
43 meta=None, # type: Optional[_MetaDictType]
44 metrics=None, # type: Optional[_MetricDictType]
45 lock=None, # type: Optional[threading.RLock]
46 ):
47 self._meta = meta if meta is not None else {} # type: _MetaDictType
48 self._metrics = metrics if metrics is not None else {} # type: _MetricDictType
49
50 self.trace_id = trace_id # type: Optional[int]
51 self.span_id = span_id # type: Optional[int]
52
53 if dd_origin is not None:
54 self._meta[ORIGIN_KEY] = dd_origin
55 if sampling_priority is not None:
56 self._metrics[SAMPLING_PRIORITY_KEY] = sampling_priority
57
58 if lock is not None:
59 self._lock = lock
60 else:
61 # DEV: A `forksafe.RLock` is not necessary here since Contexts
62 # are recreated by the tracer after fork
63 # https://github.com/DataDog/dd-trace-py/blob/a1932e8ddb704d259ea8a3188d30bf542f59fd8d/ddtrace/tracer.py#L489-L508
64 self._lock = threading.RLock()
65
66 def _with_span(self, span):
67 # type: (Span) -> Context
68 """Return a shallow copy of the context with the given span."""
69 return self.__class__(
70 trace_id=span.trace_id, span_id=span.span_id, meta=self._meta, metrics=self._metrics, lock=self._lock
71 )
72
73 def _update_tags(self, span):
74 # type: (Span) -> None
75 with self._lock:
76 for tag in self._meta:
77 span._meta.setdefault(tag, self._meta[tag])
78 for metric in self._metrics:
79 span._metrics.setdefault(metric, self._metrics[metric])
80
81 @property
82 def sampling_priority(self):
83 # type: () -> Optional[NumericType]
84 """Return the context sampling priority for the trace."""
85 return self._metrics.get(SAMPLING_PRIORITY_KEY)
86
87 @sampling_priority.setter
88 def sampling_priority(self, value):
89 # type: (Optional[NumericType]) -> None
90 with self._lock:
91 if value is None:
92 if SAMPLING_PRIORITY_KEY in self._metrics:
93 del self._metrics[SAMPLING_PRIORITY_KEY]
94 return
95 self._metrics[SAMPLING_PRIORITY_KEY] = value
96
97 @property
98 def _traceparent(self):
99 # type: () -> str
100 if self.trace_id is None or self.span_id is None:
101 return ""
102
103 sampled = 1 if self.sampling_priority and self.sampling_priority > 0 else 0
104 return "00-{:032x}-{:016x}-{:02x}".format(self.trace_id, self.span_id, sampled)
105
106 @property
107 def dd_origin(self):
108 # type: () -> Optional[Text]
109 """Get the origin of the trace."""
110 return self._meta.get(ORIGIN_KEY)
111
112 @dd_origin.setter
113 def dd_origin(self, value):
114 # type: (Optional[Text]) -> None
115 """Set the origin of the trace."""
116 with self._lock:
117 if value is None:
118 if ORIGIN_KEY in self._meta:
119 del self._meta[ORIGIN_KEY]
120 return
121 self._meta[ORIGIN_KEY] = value
122
123 @property
124 def dd_user_id(self):
125 # type: () -> Optional[Text]
126 """Get the user ID of the trace."""
127 user_id = self._meta.get(USER_ID_KEY)
128 if user_id:
129 if not PY2:
130 return str(base64.b64decode(user_id), encoding="utf-8")
131 else:
132 return str(base64.b64decode(user_id))
133 return None
134
135 @dd_user_id.setter
136 def dd_user_id(self, value):
137 # type: (Optional[Text]) -> None
138 """Set the user ID of the trace."""
139 with self._lock:
140 if value is None:
141 if USER_ID_KEY in self._meta:
142 del self._meta[USER_ID_KEY]
143 return
144 if not PY2:
145 value = str(base64.b64encode(bytes(value, encoding="utf-8")), encoding="utf-8")
146 else:
147 value = str(base64.b64encode(bytes(value)))
148 self._meta[USER_ID_KEY] = value
149
150 def __eq__(self, other):
151 # type: (Any) -> bool
152 if isinstance(other, Context):
153 with self._lock:
154 return (
155 self.trace_id == other.trace_id
156 and self.span_id == other.span_id
157 and self._meta == other._meta
158 and self._metrics == other._metrics
159 )
160 return False
161
162 def __repr__(self):
163 # type: () -> str
164 return "Context(trace_id=%s, span_id=%s, _meta=%s, _metrics=%s)" % (
165 self.trace_id,
166 self.span_id,
167 self._meta,
168 self._metrics,
169 )
170
171 __str__ = __repr__
172
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ddtrace/context.py b/ddtrace/context.py
--- a/ddtrace/context.py
+++ b/ddtrace/context.py
@@ -14,10 +14,20 @@
if TYPE_CHECKING: # pragma: no cover
+ from typing import Tuple
+
from .span import Span
from .span import _MetaDictType
from .span import _MetricDictType
+ _ContextState = Tuple[
+ Optional[int], # trace_id
+ Optional[int], # span_id
+ _MetaDictType, # _meta
+ _MetricDictType, # _metrics
+ ]
+
+
log = get_logger(__name__)
@@ -63,6 +73,22 @@
# https://github.com/DataDog/dd-trace-py/blob/a1932e8ddb704d259ea8a3188d30bf542f59fd8d/ddtrace/tracer.py#L489-L508
self._lock = threading.RLock()
+ def __getstate__(self):
+ # type: () -> _ContextState
+ return (
+ self.trace_id,
+ self.span_id,
+ self._meta,
+ self._metrics,
+ # Note: self._lock is not serializable
+ )
+
+ def __setstate__(self, state):
+ # type: (_ContextState) -> None
+ self.trace_id, self.span_id, self._meta, self._metrics = state
+ # We cannot serialize and lock, so we must recreate it unless we already have one
+ self._lock = threading.RLock()
+
def _with_span(self, span):
# type: (Span) -> Context
"""Return a shallow copy of the context with the given span."""
| {"golden_diff": "diff --git a/ddtrace/context.py b/ddtrace/context.py\n--- a/ddtrace/context.py\n+++ b/ddtrace/context.py\n@@ -14,10 +14,20 @@\n \n \n if TYPE_CHECKING: # pragma: no cover\n+ from typing import Tuple\n+\n from .span import Span\n from .span import _MetaDictType\n from .span import _MetricDictType\n \n+ _ContextState = Tuple[\n+ Optional[int], # trace_id\n+ Optional[int], # span_id\n+ _MetaDictType, # _meta\n+ _MetricDictType, # _metrics\n+ ]\n+\n+\n log = get_logger(__name__)\n \n \n@@ -63,6 +73,22 @@\n # https://github.com/DataDog/dd-trace-py/blob/a1932e8ddb704d259ea8a3188d30bf542f59fd8d/ddtrace/tracer.py#L489-L508\n self._lock = threading.RLock()\n \n+ def __getstate__(self):\n+ # type: () -> _ContextState\n+ return (\n+ self.trace_id,\n+ self.span_id,\n+ self._meta,\n+ self._metrics,\n+ # Note: self._lock is not serializable\n+ )\n+\n+ def __setstate__(self, state):\n+ # type: (_ContextState) -> None\n+ self.trace_id, self.span_id, self._meta, self._metrics = state\n+ # We cannot serialize and lock, so we must recreate it unless we already have one\n+ self._lock = threading.RLock()\n+\n def _with_span(self, span):\n # type: (Span) -> Context\n \"\"\"Return a shallow copy of the context with the given span.\"\"\"\n", "issue": "\"Tracing Across Processes\" example crashes\n<!--\r\nThanks for taking the time for reporting an issue!\r\n\r\nBefore reporting an issue on dd-trace-py, please be sure to provide all\r\nnecessary information.\r\n\r\nIf you're hitting a bug, make sure that you're using the latest version of this\r\nlibrary.\r\n-->\r\n\r\n### Summary of problem\r\n\r\nHello, I cannot get \"tracing across processes\" to work. Python crashes while trying to pickle the tracer context RLock. The minimal reproduction example is [the example from the ddtrace advanced usage docs](https://ddtrace.readthedocs.io/en/stable/advanced_usage.html#tracing-across-processes). I also cannot get this to work with any prior ddtrace version.\r\n\r\n### Which version of dd-trace-py are you using?\r\n\r\nddtrace: 1.5.3\r\npython: 3.9.13\r\n\r\n### How can we reproduce your problem?\r\n\r\nRun the `Tracing Across Processes` example [from the docs](https://ddtrace.readthedocs.io/en/stable/advanced_usage.html#tracing-across-processes)\r\n\r\n```python\r\nfrom multiprocessing import Process\r\nimport time\r\nfrom ddtrace import tracer\r\n\r\ndef _target(ctx):\r\n tracer.context_provider.activate(ctx)\r\n with tracer.trace(\"proc\"):\r\n time.sleep(1)\r\n tracer.shutdown()\r\n\r\nwith tracer.trace(\"work\"):\r\n proc = Process(target=_target, args=(tracer.current_trace_context(),))\r\n proc.start()\r\n time.sleep(1)\r\n proc.join()\r\n```\r\n\r\n### What is the result that you get?\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/Users/jboulanger/Library/Application Support/JetBrains/PyCharm2021.2/scratches/scratch.py\", line 15, in <module>\r\n proc.start()\r\n File \"/usr/local/Cellar/[email protected]/3.9.13_3/Frameworks/Python.framework/Versions/3.9/lib/python3.9/multiprocessing/process.py\", line 121, in start\r\n self._popen = self._Popen(self)\r\n File \"/usr/local/Cellar/[email protected]/3.9.13_3/Frameworks/Python.framework/Versions/3.9/lib/python3.9/multiprocessing/context.py\", line 224, in _Popen\r\n return _default_context.get_context().Process._Popen(process_obj)\r\n File \"/usr/local/Cellar/[email protected]/3.9.13_3/Frameworks/Python.framework/Versions/3.9/lib/python3.9/multiprocessing/context.py\", line 284, in _Popen\r\n return Popen(process_obj)\r\n File \"/usr/local/Cellar/[email protected]/3.9.13_3/Frameworks/Python.framework/Versions/3.9/lib/python3.9/multiprocessing/popen_spawn_posix.py\", line 32, in __init__\r\n super().__init__(process_obj)\r\n File \"/usr/local/Cellar/[email protected]/3.9.13_3/Frameworks/Python.framework/Versions/3.9/lib/python3.9/multiprocessing/popen_fork.py\", line 19, in __init__\r\n self._launch(process_obj)\r\n File \"/usr/local/Cellar/[email protected]/3.9.13_3/Frameworks/Python.framework/Versions/3.9/lib/python3.9/multiprocessing/popen_spawn_posix.py\", line 47, in _launch\r\n reduction.dump(process_obj, fp)\r\n File \"/usr/local/Cellar/[email protected]/3.9.13_3/Frameworks/Python.framework/Versions/3.9/lib/python3.9/multiprocessing/reduction.py\", line 60, in dump\r\n ForkingPickler(file, protocol).dump(obj)\r\nTypeError: cannot pickle '_thread.RLock' object\r\nProcess finished with exit code 1\r\n```\r\n\r\n### What is the result that you expected?\r\n\r\nI expected this to complete without crashing\n", "before_files": [{"content": "import base64\nimport threading\nfrom typing import Any\nfrom typing import Optional\nfrom typing import TYPE_CHECKING\nfrom typing import Text\n\nfrom .constants import ORIGIN_KEY\nfrom .constants import SAMPLING_PRIORITY_KEY\nfrom .constants import USER_ID_KEY\nfrom .internal.compat import NumericType\nfrom .internal.compat import PY2\nfrom .internal.logger import get_logger\n\n\nif TYPE_CHECKING: # pragma: no cover\n from .span import Span\n from .span import _MetaDictType\n from .span import _MetricDictType\n\nlog = get_logger(__name__)\n\n\nclass Context(object):\n \"\"\"Represents the state required to propagate a trace across execution\n boundaries.\n \"\"\"\n\n __slots__ = [\n \"trace_id\",\n \"span_id\",\n \"_lock\",\n \"_meta\",\n \"_metrics\",\n ]\n\n def __init__(\n self,\n trace_id=None, # type: Optional[int]\n span_id=None, # type: Optional[int]\n dd_origin=None, # type: Optional[str]\n sampling_priority=None, # type: Optional[float]\n meta=None, # type: Optional[_MetaDictType]\n metrics=None, # type: Optional[_MetricDictType]\n lock=None, # type: Optional[threading.RLock]\n ):\n self._meta = meta if meta is not None else {} # type: _MetaDictType\n self._metrics = metrics if metrics is not None else {} # type: _MetricDictType\n\n self.trace_id = trace_id # type: Optional[int]\n self.span_id = span_id # type: Optional[int]\n\n if dd_origin is not None:\n self._meta[ORIGIN_KEY] = dd_origin\n if sampling_priority is not None:\n self._metrics[SAMPLING_PRIORITY_KEY] = sampling_priority\n\n if lock is not None:\n self._lock = lock\n else:\n # DEV: A `forksafe.RLock` is not necessary here since Contexts\n # are recreated by the tracer after fork\n # https://github.com/DataDog/dd-trace-py/blob/a1932e8ddb704d259ea8a3188d30bf542f59fd8d/ddtrace/tracer.py#L489-L508\n self._lock = threading.RLock()\n\n def _with_span(self, span):\n # type: (Span) -> Context\n \"\"\"Return a shallow copy of the context with the given span.\"\"\"\n return self.__class__(\n trace_id=span.trace_id, span_id=span.span_id, meta=self._meta, metrics=self._metrics, lock=self._lock\n )\n\n def _update_tags(self, span):\n # type: (Span) -> None\n with self._lock:\n for tag in self._meta:\n span._meta.setdefault(tag, self._meta[tag])\n for metric in self._metrics:\n span._metrics.setdefault(metric, self._metrics[metric])\n\n @property\n def sampling_priority(self):\n # type: () -> Optional[NumericType]\n \"\"\"Return the context sampling priority for the trace.\"\"\"\n return self._metrics.get(SAMPLING_PRIORITY_KEY)\n\n @sampling_priority.setter\n def sampling_priority(self, value):\n # type: (Optional[NumericType]) -> None\n with self._lock:\n if value is None:\n if SAMPLING_PRIORITY_KEY in self._metrics:\n del self._metrics[SAMPLING_PRIORITY_KEY]\n return\n self._metrics[SAMPLING_PRIORITY_KEY] = value\n\n @property\n def _traceparent(self):\n # type: () -> str\n if self.trace_id is None or self.span_id is None:\n return \"\"\n\n sampled = 1 if self.sampling_priority and self.sampling_priority > 0 else 0\n return \"00-{:032x}-{:016x}-{:02x}\".format(self.trace_id, self.span_id, sampled)\n\n @property\n def dd_origin(self):\n # type: () -> Optional[Text]\n \"\"\"Get the origin of the trace.\"\"\"\n return self._meta.get(ORIGIN_KEY)\n\n @dd_origin.setter\n def dd_origin(self, value):\n # type: (Optional[Text]) -> None\n \"\"\"Set the origin of the trace.\"\"\"\n with self._lock:\n if value is None:\n if ORIGIN_KEY in self._meta:\n del self._meta[ORIGIN_KEY]\n return\n self._meta[ORIGIN_KEY] = value\n\n @property\n def dd_user_id(self):\n # type: () -> Optional[Text]\n \"\"\"Get the user ID of the trace.\"\"\"\n user_id = self._meta.get(USER_ID_KEY)\n if user_id:\n if not PY2:\n return str(base64.b64decode(user_id), encoding=\"utf-8\")\n else:\n return str(base64.b64decode(user_id))\n return None\n\n @dd_user_id.setter\n def dd_user_id(self, value):\n # type: (Optional[Text]) -> None\n \"\"\"Set the user ID of the trace.\"\"\"\n with self._lock:\n if value is None:\n if USER_ID_KEY in self._meta:\n del self._meta[USER_ID_KEY]\n return\n if not PY2:\n value = str(base64.b64encode(bytes(value, encoding=\"utf-8\")), encoding=\"utf-8\")\n else:\n value = str(base64.b64encode(bytes(value)))\n self._meta[USER_ID_KEY] = value\n\n def __eq__(self, other):\n # type: (Any) -> bool\n if isinstance(other, Context):\n with self._lock:\n return (\n self.trace_id == other.trace_id\n and self.span_id == other.span_id\n and self._meta == other._meta\n and self._metrics == other._metrics\n )\n return False\n\n def __repr__(self):\n # type: () -> str\n return \"Context(trace_id=%s, span_id=%s, _meta=%s, _metrics=%s)\" % (\n self.trace_id,\n self.span_id,\n self._meta,\n self._metrics,\n )\n\n __str__ = __repr__\n", "path": "ddtrace/context.py"}], "after_files": [{"content": "import base64\nimport threading\nfrom typing import Any\nfrom typing import Optional\nfrom typing import TYPE_CHECKING\nfrom typing import Text\n\nfrom .constants import ORIGIN_KEY\nfrom .constants import SAMPLING_PRIORITY_KEY\nfrom .constants import USER_ID_KEY\nfrom .internal.compat import NumericType\nfrom .internal.compat import PY2\nfrom .internal.logger import get_logger\n\n\nif TYPE_CHECKING: # pragma: no cover\n from typing import Tuple\n\n from .span import Span\n from .span import _MetaDictType\n from .span import _MetricDictType\n\n _ContextState = Tuple[\n Optional[int], # trace_id\n Optional[int], # span_id\n _MetaDictType, # _meta\n _MetricDictType, # _metrics\n ]\n\n\nlog = get_logger(__name__)\n\n\nclass Context(object):\n \"\"\"Represents the state required to propagate a trace across execution\n boundaries.\n \"\"\"\n\n __slots__ = [\n \"trace_id\",\n \"span_id\",\n \"_lock\",\n \"_meta\",\n \"_metrics\",\n ]\n\n def __init__(\n self,\n trace_id=None, # type: Optional[int]\n span_id=None, # type: Optional[int]\n dd_origin=None, # type: Optional[str]\n sampling_priority=None, # type: Optional[float]\n meta=None, # type: Optional[_MetaDictType]\n metrics=None, # type: Optional[_MetricDictType]\n lock=None, # type: Optional[threading.RLock]\n ):\n self._meta = meta if meta is not None else {} # type: _MetaDictType\n self._metrics = metrics if metrics is not None else {} # type: _MetricDictType\n\n self.trace_id = trace_id # type: Optional[int]\n self.span_id = span_id # type: Optional[int]\n\n if dd_origin is not None:\n self._meta[ORIGIN_KEY] = dd_origin\n if sampling_priority is not None:\n self._metrics[SAMPLING_PRIORITY_KEY] = sampling_priority\n\n if lock is not None:\n self._lock = lock\n else:\n # DEV: A `forksafe.RLock` is not necessary here since Contexts\n # are recreated by the tracer after fork\n # https://github.com/DataDog/dd-trace-py/blob/a1932e8ddb704d259ea8a3188d30bf542f59fd8d/ddtrace/tracer.py#L489-L508\n self._lock = threading.RLock()\n\n def __getstate__(self):\n # type: () -> _ContextState\n return (\n self.trace_id,\n self.span_id,\n self._meta,\n self._metrics,\n # Note: self._lock is not serializable\n )\n\n def __setstate__(self, state):\n # type: (_ContextState) -> None\n self.trace_id, self.span_id, self._meta, self._metrics = state\n # We cannot serialize and lock, so we must recreate it unless we already have one\n self._lock = threading.RLock()\n\n def _with_span(self, span):\n # type: (Span) -> Context\n \"\"\"Return a shallow copy of the context with the given span.\"\"\"\n return self.__class__(\n trace_id=span.trace_id, span_id=span.span_id, meta=self._meta, metrics=self._metrics, lock=self._lock\n )\n\n def _update_tags(self, span):\n # type: (Span) -> None\n with self._lock:\n for tag in self._meta:\n span._meta.setdefault(tag, self._meta[tag])\n for metric in self._metrics:\n span._metrics.setdefault(metric, self._metrics[metric])\n\n @property\n def sampling_priority(self):\n # type: () -> Optional[NumericType]\n \"\"\"Return the context sampling priority for the trace.\"\"\"\n return self._metrics.get(SAMPLING_PRIORITY_KEY)\n\n @sampling_priority.setter\n def sampling_priority(self, value):\n # type: (Optional[NumericType]) -> None\n with self._lock:\n if value is None:\n if SAMPLING_PRIORITY_KEY in self._metrics:\n del self._metrics[SAMPLING_PRIORITY_KEY]\n return\n self._metrics[SAMPLING_PRIORITY_KEY] = value\n\n @property\n def _traceparent(self):\n # type: () -> str\n if self.trace_id is None or self.span_id is None:\n return \"\"\n\n sampled = 1 if self.sampling_priority and self.sampling_priority > 0 else 0\n return \"00-{:032x}-{:016x}-{:02x}\".format(self.trace_id, self.span_id, sampled)\n\n @property\n def dd_origin(self):\n # type: () -> Optional[Text]\n \"\"\"Get the origin of the trace.\"\"\"\n return self._meta.get(ORIGIN_KEY)\n\n @dd_origin.setter\n def dd_origin(self, value):\n # type: (Optional[Text]) -> None\n \"\"\"Set the origin of the trace.\"\"\"\n with self._lock:\n if value is None:\n if ORIGIN_KEY in self._meta:\n del self._meta[ORIGIN_KEY]\n return\n self._meta[ORIGIN_KEY] = value\n\n @property\n def dd_user_id(self):\n # type: () -> Optional[Text]\n \"\"\"Get the user ID of the trace.\"\"\"\n user_id = self._meta.get(USER_ID_KEY)\n if user_id:\n if not PY2:\n return str(base64.b64decode(user_id), encoding=\"utf-8\")\n else:\n return str(base64.b64decode(user_id))\n return None\n\n @dd_user_id.setter\n def dd_user_id(self, value):\n # type: (Optional[Text]) -> None\n \"\"\"Set the user ID of the trace.\"\"\"\n with self._lock:\n if value is None:\n if USER_ID_KEY in self._meta:\n del self._meta[USER_ID_KEY]\n return\n if not PY2:\n value = str(base64.b64encode(bytes(value, encoding=\"utf-8\")), encoding=\"utf-8\")\n else:\n value = str(base64.b64encode(bytes(value)))\n self._meta[USER_ID_KEY] = value\n\n def __eq__(self, other):\n # type: (Any) -> bool\n if isinstance(other, Context):\n with self._lock:\n return (\n self.trace_id == other.trace_id\n and self.span_id == other.span_id\n and self._meta == other._meta\n and self._metrics == other._metrics\n )\n return False\n\n def __repr__(self):\n # type: () -> str\n return \"Context(trace_id=%s, span_id=%s, _meta=%s, _metrics=%s)\" % (\n self.trace_id,\n self.span_id,\n self._meta,\n self._metrics,\n )\n\n __str__ = __repr__\n", "path": "ddtrace/context.py"}]} | 2,926 | 414 |
gh_patches_debug_38411 | rasdani/github-patches | git_diff | cloud-custodian__cloud-custodian-5275 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Support for EFS Lifecycles
According to this, it's not an available action: https://cloudcustodian.io/docs/aws/resources/efs.html
Console and CLI instructions from AWS are here: https://docs.aws.amazon.com/efs/latest/ug/enable-lifecycle-management.html
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `c7n/resources/efs.py`
Content:
```
1 # Copyright 2015-2017 Capital One Services, LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from __future__ import absolute_import, division, print_function, unicode_literals
15
16 from c7n.actions import Action
17 from c7n.filters.kms import KmsRelatedFilter
18 from c7n.manager import resources
19 from c7n.filters.vpc import SecurityGroupFilter, SubnetFilter
20 from c7n.query import QueryResourceManager, ChildResourceManager, TypeInfo
21 from c7n.tags import universal_augment
22 from c7n.utils import local_session, type_schema, get_retry
23
24
25 @resources.register('efs')
26 class ElasticFileSystem(QueryResourceManager):
27
28 class resource_type(TypeInfo):
29 service = 'efs'
30 enum_spec = ('describe_file_systems', 'FileSystems', None)
31 id = 'FileSystemId'
32 name = 'Name'
33 date = 'CreationTime'
34 dimension = 'FileSystemId'
35 arn_type = 'file-system'
36 permission_prefix = arn_service = 'elasticfilesystem'
37 filter_name = 'FileSystemId'
38 filter_type = 'scalar'
39 universal_taggable = True
40
41 augment = universal_augment
42
43
44 @resources.register('efs-mount-target')
45 class ElasticFileSystemMountTarget(ChildResourceManager):
46
47 class resource_type(TypeInfo):
48 service = 'efs'
49 parent_spec = ('efs', 'FileSystemId', None)
50 enum_spec = ('describe_mount_targets', 'MountTargets', None)
51 permission_prefix = 'elasticfilesystem'
52 name = id = 'MountTargetId'
53 filter_name = 'MountTargetId'
54 filter_type = 'scalar'
55 arn = False
56
57
58 @ElasticFileSystemMountTarget.filter_registry.register('subnet')
59 class Subnet(SubnetFilter):
60
61 RelatedIdsExpression = "SubnetId"
62
63
64 @ElasticFileSystemMountTarget.filter_registry.register('security-group')
65 class SecurityGroup(SecurityGroupFilter):
66
67 efs_group_cache = None
68
69 RelatedIdsExpression = ""
70
71 def get_related_ids(self, resources):
72
73 if self.efs_group_cache:
74 group_ids = set()
75 for r in resources:
76 group_ids.update(
77 self.efs_group_cache.get(r['MountTargetId'], ()))
78 return list(group_ids)
79
80 client = local_session(self.manager.session_factory).client('efs')
81 groups = {}
82 group_ids = set()
83 retry = get_retry(('Throttled',), 12)
84
85 for r in resources:
86 groups[r['MountTargetId']] = retry(
87 client.describe_mount_target_security_groups,
88 MountTargetId=r['MountTargetId'])['SecurityGroups']
89 group_ids.update(groups[r['MountTargetId']])
90
91 self.efs_group_cache = groups
92 return list(group_ids)
93
94
95 @ElasticFileSystem.filter_registry.register('kms-key')
96 class KmsFilter(KmsRelatedFilter):
97 """
98 Filter a resource by its associcated kms key and optionally the aliasname
99 of the kms key by using 'c7n:AliasName'
100
101 :example:
102
103 .. code-block:: yaml
104
105 policies:
106 - name: efs-kms-key-filters
107 resource: efs
108 filters:
109 - type: kms-key
110 key: c7n:AliasName
111 value: "^(alias/aws/)"
112 op: regex
113 """
114 RelatedIdsExpression = 'KmsKeyId'
115
116
117 @ElasticFileSystem.action_registry.register('delete')
118 class Delete(Action):
119
120 schema = type_schema('delete')
121 permissions = ('elasticfilesystem:DescribeMountTargets',
122 'elasticfilesystem:DeleteMountTarget',
123 'elasticfilesystem:DeleteFileSystem')
124
125 def process(self, resources):
126 client = local_session(self.manager.session_factory).client('efs')
127 self.unmount_filesystems(resources)
128 retry = get_retry(('FileSystemInUse',), 12)
129 for r in resources:
130 retry(client.delete_file_system, FileSystemId=r['FileSystemId'])
131
132 def unmount_filesystems(self, resources):
133 client = local_session(self.manager.session_factory).client('efs')
134 for r in resources:
135 if not r['NumberOfMountTargets']:
136 continue
137 for t in client.describe_mount_targets(
138 FileSystemId=r['FileSystemId'])['MountTargets']:
139 client.delete_mount_target(MountTargetId=t['MountTargetId'])
140
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/c7n/resources/efs.py b/c7n/resources/efs.py
--- a/c7n/resources/efs.py
+++ b/c7n/resources/efs.py
@@ -13,13 +13,15 @@
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
-from c7n.actions import Action
+from c7n.actions import Action, BaseAction
+from c7n.exceptions import PolicyValidationError
from c7n.filters.kms import KmsRelatedFilter
from c7n.manager import resources
from c7n.filters.vpc import SecurityGroupFilter, SubnetFilter
from c7n.query import QueryResourceManager, ChildResourceManager, TypeInfo
from c7n.tags import universal_augment
from c7n.utils import local_session, type_schema, get_retry
+from .aws import shape_validate
@resources.register('efs')
@@ -137,3 +139,57 @@
for t in client.describe_mount_targets(
FileSystemId=r['FileSystemId'])['MountTargets']:
client.delete_mount_target(MountTargetId=t['MountTargetId'])
+
+
[email protected]_registry.register('configure-lifecycle-policy')
+class ConfigureLifecycle(BaseAction):
+ """Enable/disable lifecycle policy for efs.
+
+ :example:
+
+ .. code-block:: yaml
+
+ policies:
+ - name: efs-apply-lifecycle
+ resource: efs
+ actions:
+ - type: configure-lifecycle-policy
+ state: enable
+ rules:
+ - 'TransitionToIA': 'AFTER_7_DAYS'
+
+ """
+ schema = type_schema(
+ 'configure-lifecycle-policy',
+ state={'enum': ['enable', 'disable']},
+ rules={
+ 'type': 'array',
+ 'items': {'type': 'object'}},
+ required=['state'])
+
+ permissions = ('elasticfilesystem:PutLifecycleConfiguration',)
+ shape = 'PutLifecycleConfigurationRequest'
+
+ def validate(self):
+ if self.data.get('state') == 'enable' and 'rules' not in self.data:
+ raise PolicyValidationError(
+ 'rules are required to enable lifecycle configuration %s' % (self.manager.data))
+ if self.data.get('state') == 'disable' and 'rules' in self.data:
+ raise PolicyValidationError(
+ 'rules not required to disable lifecycle configuration %s' % (self.manager.data))
+ if self.data.get('rules'):
+ attrs = {}
+ attrs['LifecyclePolicies'] = self.data['rules']
+ attrs['FileSystemId'] = 'PolicyValidator'
+ return shape_validate(attrs, self.shape, 'efs')
+
+ def process(self, resources):
+ client = local_session(self.manager.session_factory).client('efs')
+ op_map = {'enable': self.data.get('rules'), 'disable': []}
+ for r in resources:
+ try:
+ client.put_lifecycle_configuration(
+ FileSystemId=r['FileSystemId'],
+ LifecyclePolicies=op_map.get(self.data.get('state')))
+ except client.exceptions.FileSystemNotFound:
+ continue
| {"golden_diff": "diff --git a/c7n/resources/efs.py b/c7n/resources/efs.py\n--- a/c7n/resources/efs.py\n+++ b/c7n/resources/efs.py\n@@ -13,13 +13,15 @@\n # limitations under the License.\n from __future__ import absolute_import, division, print_function, unicode_literals\n \n-from c7n.actions import Action\n+from c7n.actions import Action, BaseAction\n+from c7n.exceptions import PolicyValidationError\n from c7n.filters.kms import KmsRelatedFilter\n from c7n.manager import resources\n from c7n.filters.vpc import SecurityGroupFilter, SubnetFilter\n from c7n.query import QueryResourceManager, ChildResourceManager, TypeInfo\n from c7n.tags import universal_augment\n from c7n.utils import local_session, type_schema, get_retry\n+from .aws import shape_validate\n \n \n @resources.register('efs')\n@@ -137,3 +139,57 @@\n for t in client.describe_mount_targets(\n FileSystemId=r['FileSystemId'])['MountTargets']:\n client.delete_mount_target(MountTargetId=t['MountTargetId'])\n+\n+\[email protected]_registry.register('configure-lifecycle-policy')\n+class ConfigureLifecycle(BaseAction):\n+ \"\"\"Enable/disable lifecycle policy for efs.\n+\n+ :example:\n+\n+ .. code-block:: yaml\n+\n+ policies:\n+ - name: efs-apply-lifecycle\n+ resource: efs\n+ actions:\n+ - type: configure-lifecycle-policy\n+ state: enable\n+ rules:\n+ - 'TransitionToIA': 'AFTER_7_DAYS'\n+\n+ \"\"\"\n+ schema = type_schema(\n+ 'configure-lifecycle-policy',\n+ state={'enum': ['enable', 'disable']},\n+ rules={\n+ 'type': 'array',\n+ 'items': {'type': 'object'}},\n+ required=['state'])\n+\n+ permissions = ('elasticfilesystem:PutLifecycleConfiguration',)\n+ shape = 'PutLifecycleConfigurationRequest'\n+\n+ def validate(self):\n+ if self.data.get('state') == 'enable' and 'rules' not in self.data:\n+ raise PolicyValidationError(\n+ 'rules are required to enable lifecycle configuration %s' % (self.manager.data))\n+ if self.data.get('state') == 'disable' and 'rules' in self.data:\n+ raise PolicyValidationError(\n+ 'rules not required to disable lifecycle configuration %s' % (self.manager.data))\n+ if self.data.get('rules'):\n+ attrs = {}\n+ attrs['LifecyclePolicies'] = self.data['rules']\n+ attrs['FileSystemId'] = 'PolicyValidator'\n+ return shape_validate(attrs, self.shape, 'efs')\n+\n+ def process(self, resources):\n+ client = local_session(self.manager.session_factory).client('efs')\n+ op_map = {'enable': self.data.get('rules'), 'disable': []}\n+ for r in resources:\n+ try:\n+ client.put_lifecycle_configuration(\n+ FileSystemId=r['FileSystemId'],\n+ LifecyclePolicies=op_map.get(self.data.get('state')))\n+ except client.exceptions.FileSystemNotFound:\n+ continue\n", "issue": "Support for EFS Lifecycles\nAccording to this, it's not an available action: https://cloudcustodian.io/docs/aws/resources/efs.html\r\n\r\nConsole and CLI instructions from AWS are here: https://docs.aws.amazon.com/efs/latest/ug/enable-lifecycle-management.html\n", "before_files": [{"content": "# Copyright 2015-2017 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom c7n.actions import Action\nfrom c7n.filters.kms import KmsRelatedFilter\nfrom c7n.manager import resources\nfrom c7n.filters.vpc import SecurityGroupFilter, SubnetFilter\nfrom c7n.query import QueryResourceManager, ChildResourceManager, TypeInfo\nfrom c7n.tags import universal_augment\nfrom c7n.utils import local_session, type_schema, get_retry\n\n\[email protected]('efs')\nclass ElasticFileSystem(QueryResourceManager):\n\n class resource_type(TypeInfo):\n service = 'efs'\n enum_spec = ('describe_file_systems', 'FileSystems', None)\n id = 'FileSystemId'\n name = 'Name'\n date = 'CreationTime'\n dimension = 'FileSystemId'\n arn_type = 'file-system'\n permission_prefix = arn_service = 'elasticfilesystem'\n filter_name = 'FileSystemId'\n filter_type = 'scalar'\n universal_taggable = True\n\n augment = universal_augment\n\n\[email protected]('efs-mount-target')\nclass ElasticFileSystemMountTarget(ChildResourceManager):\n\n class resource_type(TypeInfo):\n service = 'efs'\n parent_spec = ('efs', 'FileSystemId', None)\n enum_spec = ('describe_mount_targets', 'MountTargets', None)\n permission_prefix = 'elasticfilesystem'\n name = id = 'MountTargetId'\n filter_name = 'MountTargetId'\n filter_type = 'scalar'\n arn = False\n\n\[email protected]_registry.register('subnet')\nclass Subnet(SubnetFilter):\n\n RelatedIdsExpression = \"SubnetId\"\n\n\[email protected]_registry.register('security-group')\nclass SecurityGroup(SecurityGroupFilter):\n\n efs_group_cache = None\n\n RelatedIdsExpression = \"\"\n\n def get_related_ids(self, resources):\n\n if self.efs_group_cache:\n group_ids = set()\n for r in resources:\n group_ids.update(\n self.efs_group_cache.get(r['MountTargetId'], ()))\n return list(group_ids)\n\n client = local_session(self.manager.session_factory).client('efs')\n groups = {}\n group_ids = set()\n retry = get_retry(('Throttled',), 12)\n\n for r in resources:\n groups[r['MountTargetId']] = retry(\n client.describe_mount_target_security_groups,\n MountTargetId=r['MountTargetId'])['SecurityGroups']\n group_ids.update(groups[r['MountTargetId']])\n\n self.efs_group_cache = groups\n return list(group_ids)\n\n\[email protected]_registry.register('kms-key')\nclass KmsFilter(KmsRelatedFilter):\n \"\"\"\n Filter a resource by its associcated kms key and optionally the aliasname\n of the kms key by using 'c7n:AliasName'\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: efs-kms-key-filters\n resource: efs\n filters:\n - type: kms-key\n key: c7n:AliasName\n value: \"^(alias/aws/)\"\n op: regex\n \"\"\"\n RelatedIdsExpression = 'KmsKeyId'\n\n\[email protected]_registry.register('delete')\nclass Delete(Action):\n\n schema = type_schema('delete')\n permissions = ('elasticfilesystem:DescribeMountTargets',\n 'elasticfilesystem:DeleteMountTarget',\n 'elasticfilesystem:DeleteFileSystem')\n\n def process(self, resources):\n client = local_session(self.manager.session_factory).client('efs')\n self.unmount_filesystems(resources)\n retry = get_retry(('FileSystemInUse',), 12)\n for r in resources:\n retry(client.delete_file_system, FileSystemId=r['FileSystemId'])\n\n def unmount_filesystems(self, resources):\n client = local_session(self.manager.session_factory).client('efs')\n for r in resources:\n if not r['NumberOfMountTargets']:\n continue\n for t in client.describe_mount_targets(\n FileSystemId=r['FileSystemId'])['MountTargets']:\n client.delete_mount_target(MountTargetId=t['MountTargetId'])\n", "path": "c7n/resources/efs.py"}], "after_files": [{"content": "# Copyright 2015-2017 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom c7n.actions import Action, BaseAction\nfrom c7n.exceptions import PolicyValidationError\nfrom c7n.filters.kms import KmsRelatedFilter\nfrom c7n.manager import resources\nfrom c7n.filters.vpc import SecurityGroupFilter, SubnetFilter\nfrom c7n.query import QueryResourceManager, ChildResourceManager, TypeInfo\nfrom c7n.tags import universal_augment\nfrom c7n.utils import local_session, type_schema, get_retry\nfrom .aws import shape_validate\n\n\[email protected]('efs')\nclass ElasticFileSystem(QueryResourceManager):\n\n class resource_type(TypeInfo):\n service = 'efs'\n enum_spec = ('describe_file_systems', 'FileSystems', None)\n id = 'FileSystemId'\n name = 'Name'\n date = 'CreationTime'\n dimension = 'FileSystemId'\n arn_type = 'file-system'\n permission_prefix = arn_service = 'elasticfilesystem'\n filter_name = 'FileSystemId'\n filter_type = 'scalar'\n universal_taggable = True\n\n augment = universal_augment\n\n\[email protected]('efs-mount-target')\nclass ElasticFileSystemMountTarget(ChildResourceManager):\n\n class resource_type(TypeInfo):\n service = 'efs'\n parent_spec = ('efs', 'FileSystemId', None)\n enum_spec = ('describe_mount_targets', 'MountTargets', None)\n permission_prefix = 'elasticfilesystem'\n name = id = 'MountTargetId'\n filter_name = 'MountTargetId'\n filter_type = 'scalar'\n arn = False\n\n\[email protected]_registry.register('subnet')\nclass Subnet(SubnetFilter):\n\n RelatedIdsExpression = \"SubnetId\"\n\n\[email protected]_registry.register('security-group')\nclass SecurityGroup(SecurityGroupFilter):\n\n efs_group_cache = None\n\n RelatedIdsExpression = \"\"\n\n def get_related_ids(self, resources):\n\n if self.efs_group_cache:\n group_ids = set()\n for r in resources:\n group_ids.update(\n self.efs_group_cache.get(r['MountTargetId'], ()))\n return list(group_ids)\n\n client = local_session(self.manager.session_factory).client('efs')\n groups = {}\n group_ids = set()\n retry = get_retry(('Throttled',), 12)\n\n for r in resources:\n groups[r['MountTargetId']] = retry(\n client.describe_mount_target_security_groups,\n MountTargetId=r['MountTargetId'])['SecurityGroups']\n group_ids.update(groups[r['MountTargetId']])\n\n self.efs_group_cache = groups\n return list(group_ids)\n\n\[email protected]_registry.register('kms-key')\nclass KmsFilter(KmsRelatedFilter):\n \"\"\"\n Filter a resource by its associcated kms key and optionally the aliasname\n of the kms key by using 'c7n:AliasName'\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: efs-kms-key-filters\n resource: efs\n filters:\n - type: kms-key\n key: c7n:AliasName\n value: \"^(alias/aws/)\"\n op: regex\n \"\"\"\n RelatedIdsExpression = 'KmsKeyId'\n\n\[email protected]_registry.register('delete')\nclass Delete(Action):\n\n schema = type_schema('delete')\n permissions = ('elasticfilesystem:DescribeMountTargets',\n 'elasticfilesystem:DeleteMountTarget',\n 'elasticfilesystem:DeleteFileSystem')\n\n def process(self, resources):\n client = local_session(self.manager.session_factory).client('efs')\n self.unmount_filesystems(resources)\n retry = get_retry(('FileSystemInUse',), 12)\n for r in resources:\n retry(client.delete_file_system, FileSystemId=r['FileSystemId'])\n\n def unmount_filesystems(self, resources):\n client = local_session(self.manager.session_factory).client('efs')\n for r in resources:\n if not r['NumberOfMountTargets']:\n continue\n for t in client.describe_mount_targets(\n FileSystemId=r['FileSystemId'])['MountTargets']:\n client.delete_mount_target(MountTargetId=t['MountTargetId'])\n\n\[email protected]_registry.register('configure-lifecycle-policy')\nclass ConfigureLifecycle(BaseAction):\n \"\"\"Enable/disable lifecycle policy for efs.\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: efs-apply-lifecycle\n resource: efs\n actions:\n - type: configure-lifecycle-policy\n state: enable\n rules:\n - 'TransitionToIA': 'AFTER_7_DAYS'\n\n \"\"\"\n schema = type_schema(\n 'configure-lifecycle-policy',\n state={'enum': ['enable', 'disable']},\n rules={\n 'type': 'array',\n 'items': {'type': 'object'}},\n required=['state'])\n\n permissions = ('elasticfilesystem:PutLifecycleConfiguration',)\n shape = 'PutLifecycleConfigurationRequest'\n\n def validate(self):\n if self.data.get('state') == 'enable' and 'rules' not in self.data:\n raise PolicyValidationError(\n 'rules are required to enable lifecycle configuration %s' % (self.manager.data))\n if self.data.get('state') == 'disable' and 'rules' in self.data:\n raise PolicyValidationError(\n 'rules not required to disable lifecycle configuration %s' % (self.manager.data))\n if self.data.get('rules'):\n attrs = {}\n attrs['LifecyclePolicies'] = self.data['rules']\n attrs['FileSystemId'] = 'PolicyValidator'\n return shape_validate(attrs, self.shape, 'efs')\n\n def process(self, resources):\n client = local_session(self.manager.session_factory).client('efs')\n op_map = {'enable': self.data.get('rules'), 'disable': []}\n for r in resources:\n try:\n client.put_lifecycle_configuration(\n FileSystemId=r['FileSystemId'],\n LifecyclePolicies=op_map.get(self.data.get('state')))\n except client.exceptions.FileSystemNotFound:\n continue\n", "path": "c7n/resources/efs.py"}]} | 1,662 | 695 |
gh_patches_debug_28943 | rasdani/github-patches | git_diff | comic__grand-challenge.org-1020 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Replace links in the challenge copying command
People might use full url links in the page HTML, use a regex to replace the links using the new challenge short name.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/grandchallenge/challenges/management/commands/copy_challenge.py`
Content:
```
1 from django.core.management import BaseCommand, CommandError
2
3 from grandchallenge.challenges.models import Challenge
4 from grandchallenge.pages.models import Page
5
6
7 class Command(BaseCommand):
8 help = "Creates a copy of a challenge"
9
10 challenge_fields = [
11 "creator",
12 "description",
13 "educational",
14 "disclaimer",
15 "require_participant_review",
16 "use_registration_page",
17 "registration_page_text",
18 "use_evaluation",
19 "logo",
20 "banner",
21 ]
22
23 challenge_m2m_fields = [
24 "task_types",
25 "modalities",
26 "structures",
27 ]
28
29 config_fields = [
30 "use_teams",
31 "score_title",
32 "score_jsonpath",
33 "score_error_jsonpath",
34 "score_default_sort",
35 "score_decimal_places",
36 "extra_results_columns",
37 "scoring_method_choice",
38 "result_display_choice",
39 "allow_submission_comments",
40 "display_submission_comments",
41 "supplementary_file_choice",
42 "supplementary_file_label",
43 "supplementary_file_help_text",
44 "show_supplementary_file_link",
45 "publication_url_choice",
46 "show_publication_url",
47 "daily_submission_limit",
48 "submission_page_html",
49 "auto_publish_new_results",
50 "display_all_metrics",
51 "submission_join_key",
52 ]
53
54 page_fields = [
55 "title",
56 "permission_lvl",
57 "order",
58 "display_title",
59 "hidden",
60 "html",
61 ]
62
63 def add_arguments(self, parser):
64 parser.add_argument("source", type=str)
65 parser.add_argument("dest", type=str)
66
67 def handle(self, *args, **options):
68 src_name = options.pop("source")
69 dest_name = options.pop("dest")
70
71 if src_name.lower() == dest_name.lower():
72 raise CommandError("Source and dest names must be different")
73
74 src_challenge = Challenge.objects.get(short_name__iexact=src_name)
75 dest_challenge = self._create_new_challenge(
76 src_challenge=src_challenge, dest_name=dest_name
77 )
78
79 self._copy_m2m_fields(
80 src_challenge=src_challenge, dest_challenge=dest_challenge
81 )
82 self._copy_evaluation_config(
83 src_challenge=src_challenge, dest_challenge=dest_challenge
84 )
85 self._copy_pages(
86 src_challenge=src_challenge, dest_challenge=dest_challenge
87 )
88 self._copy_admins(
89 src_challenge=src_challenge, dest_challenge=dest_challenge
90 )
91
92 def _create_new_challenge(self, *, src_challenge, dest_name):
93 new_challenge = Challenge(
94 short_name=dest_name,
95 **{f: getattr(src_challenge, f) for f in self.challenge_fields},
96 )
97 new_challenge.full_clean()
98 new_challenge.save()
99 return new_challenge
100
101 def _copy_m2m_fields(self, *, src_challenge, dest_challenge):
102 for f in self.challenge_m2m_fields:
103 src_m2m = getattr(src_challenge, f)
104 dest_m2m = getattr(dest_challenge, f)
105 dest_m2m.set(src_m2m.all())
106
107 def _copy_evaluation_config(self, *, src_challenge, dest_challenge):
108 src_config = src_challenge.evaluation_config
109 dest_config = dest_challenge.evaluation_config
110
111 for attr in self.config_fields:
112 setattr(dest_config, attr, getattr(src_config, attr))
113
114 dest_config.save()
115
116 def _copy_pages(self, *, src_challenge, dest_challenge):
117 src_pages = src_challenge.page_set.all()
118
119 for src_page in src_pages:
120 Page.objects.create(
121 challenge=dest_challenge,
122 **{f: getattr(src_page, f) for f in self.page_fields},
123 )
124
125 def _copy_admins(self, *, src_challenge, dest_challenge):
126 for u in src_challenge.get_admins():
127 dest_challenge.add_admin(u)
128
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/app/grandchallenge/challenges/management/commands/copy_challenge.py b/app/grandchallenge/challenges/management/commands/copy_challenge.py
--- a/app/grandchallenge/challenges/management/commands/copy_challenge.py
+++ b/app/grandchallenge/challenges/management/commands/copy_challenge.py
@@ -1,3 +1,6 @@
+import re
+
+from django.contrib.sites.models import Site
from django.core.management import BaseCommand, CommandError
from grandchallenge.challenges.models import Challenge
@@ -57,7 +60,6 @@
"order",
"display_title",
"hidden",
- "html",
]
def add_arguments(self, parser):
@@ -113,12 +115,24 @@
dest_config.save()
+ def _substitute_urls(self, html, domain, old, new):
+ quote_replace = r"href='([^']*)'"
+ regex = fr'href="[^/]*//{old}.{domain}([^""]*)"'
+ html = re.sub(quote_replace, r'href="\1"', html)
+ return re.sub(regex, fr'href="https://{new}.{domain}\1"', html,)
+
def _copy_pages(self, *, src_challenge, dest_challenge):
src_pages = src_challenge.page_set.all()
+ site = Site.objects.get_current()
+ domain = site.domain
+ old = src_challenge.short_name
+ new = dest_challenge.short_name
+
for src_page in src_pages:
Page.objects.create(
challenge=dest_challenge,
+ html=self._substitute_urls(src_page.html, domain, old, new),
**{f: getattr(src_page, f) for f in self.page_fields},
)
| {"golden_diff": "diff --git a/app/grandchallenge/challenges/management/commands/copy_challenge.py b/app/grandchallenge/challenges/management/commands/copy_challenge.py\n--- a/app/grandchallenge/challenges/management/commands/copy_challenge.py\n+++ b/app/grandchallenge/challenges/management/commands/copy_challenge.py\n@@ -1,3 +1,6 @@\n+import re\n+\n+from django.contrib.sites.models import Site\n from django.core.management import BaseCommand, CommandError\n \n from grandchallenge.challenges.models import Challenge\n@@ -57,7 +60,6 @@\n \"order\",\n \"display_title\",\n \"hidden\",\n- \"html\",\n ]\n \n def add_arguments(self, parser):\n@@ -113,12 +115,24 @@\n \n dest_config.save()\n \n+ def _substitute_urls(self, html, domain, old, new):\n+ quote_replace = r\"href='([^']*)'\"\n+ regex = fr'href=\"[^/]*//{old}.{domain}([^\"\"]*)\"'\n+ html = re.sub(quote_replace, r'href=\"\\1\"', html)\n+ return re.sub(regex, fr'href=\"https://{new}.{domain}\\1\"', html,)\n+\n def _copy_pages(self, *, src_challenge, dest_challenge):\n src_pages = src_challenge.page_set.all()\n \n+ site = Site.objects.get_current()\n+ domain = site.domain\n+ old = src_challenge.short_name\n+ new = dest_challenge.short_name\n+\n for src_page in src_pages:\n Page.objects.create(\n challenge=dest_challenge,\n+ html=self._substitute_urls(src_page.html, domain, old, new),\n **{f: getattr(src_page, f) for f in self.page_fields},\n )\n", "issue": "Replace links in the challenge copying command\nPeople might use full url links in the page HTML, use a regex to replace the links using the new challenge short name.\n", "before_files": [{"content": "from django.core.management import BaseCommand, CommandError\n\nfrom grandchallenge.challenges.models import Challenge\nfrom grandchallenge.pages.models import Page\n\n\nclass Command(BaseCommand):\n help = \"Creates a copy of a challenge\"\n\n challenge_fields = [\n \"creator\",\n \"description\",\n \"educational\",\n \"disclaimer\",\n \"require_participant_review\",\n \"use_registration_page\",\n \"registration_page_text\",\n \"use_evaluation\",\n \"logo\",\n \"banner\",\n ]\n\n challenge_m2m_fields = [\n \"task_types\",\n \"modalities\",\n \"structures\",\n ]\n\n config_fields = [\n \"use_teams\",\n \"score_title\",\n \"score_jsonpath\",\n \"score_error_jsonpath\",\n \"score_default_sort\",\n \"score_decimal_places\",\n \"extra_results_columns\",\n \"scoring_method_choice\",\n \"result_display_choice\",\n \"allow_submission_comments\",\n \"display_submission_comments\",\n \"supplementary_file_choice\",\n \"supplementary_file_label\",\n \"supplementary_file_help_text\",\n \"show_supplementary_file_link\",\n \"publication_url_choice\",\n \"show_publication_url\",\n \"daily_submission_limit\",\n \"submission_page_html\",\n \"auto_publish_new_results\",\n \"display_all_metrics\",\n \"submission_join_key\",\n ]\n\n page_fields = [\n \"title\",\n \"permission_lvl\",\n \"order\",\n \"display_title\",\n \"hidden\",\n \"html\",\n ]\n\n def add_arguments(self, parser):\n parser.add_argument(\"source\", type=str)\n parser.add_argument(\"dest\", type=str)\n\n def handle(self, *args, **options):\n src_name = options.pop(\"source\")\n dest_name = options.pop(\"dest\")\n\n if src_name.lower() == dest_name.lower():\n raise CommandError(\"Source and dest names must be different\")\n\n src_challenge = Challenge.objects.get(short_name__iexact=src_name)\n dest_challenge = self._create_new_challenge(\n src_challenge=src_challenge, dest_name=dest_name\n )\n\n self._copy_m2m_fields(\n src_challenge=src_challenge, dest_challenge=dest_challenge\n )\n self._copy_evaluation_config(\n src_challenge=src_challenge, dest_challenge=dest_challenge\n )\n self._copy_pages(\n src_challenge=src_challenge, dest_challenge=dest_challenge\n )\n self._copy_admins(\n src_challenge=src_challenge, dest_challenge=dest_challenge\n )\n\n def _create_new_challenge(self, *, src_challenge, dest_name):\n new_challenge = Challenge(\n short_name=dest_name,\n **{f: getattr(src_challenge, f) for f in self.challenge_fields},\n )\n new_challenge.full_clean()\n new_challenge.save()\n return new_challenge\n\n def _copy_m2m_fields(self, *, src_challenge, dest_challenge):\n for f in self.challenge_m2m_fields:\n src_m2m = getattr(src_challenge, f)\n dest_m2m = getattr(dest_challenge, f)\n dest_m2m.set(src_m2m.all())\n\n def _copy_evaluation_config(self, *, src_challenge, dest_challenge):\n src_config = src_challenge.evaluation_config\n dest_config = dest_challenge.evaluation_config\n\n for attr in self.config_fields:\n setattr(dest_config, attr, getattr(src_config, attr))\n\n dest_config.save()\n\n def _copy_pages(self, *, src_challenge, dest_challenge):\n src_pages = src_challenge.page_set.all()\n\n for src_page in src_pages:\n Page.objects.create(\n challenge=dest_challenge,\n **{f: getattr(src_page, f) for f in self.page_fields},\n )\n\n def _copy_admins(self, *, src_challenge, dest_challenge):\n for u in src_challenge.get_admins():\n dest_challenge.add_admin(u)\n", "path": "app/grandchallenge/challenges/management/commands/copy_challenge.py"}], "after_files": [{"content": "import re\n\nfrom django.contrib.sites.models import Site\nfrom django.core.management import BaseCommand, CommandError\n\nfrom grandchallenge.challenges.models import Challenge\nfrom grandchallenge.pages.models import Page\n\n\nclass Command(BaseCommand):\n help = \"Creates a copy of a challenge\"\n\n challenge_fields = [\n \"creator\",\n \"description\",\n \"educational\",\n \"disclaimer\",\n \"require_participant_review\",\n \"use_registration_page\",\n \"registration_page_text\",\n \"use_evaluation\",\n \"logo\",\n \"banner\",\n ]\n\n challenge_m2m_fields = [\n \"task_types\",\n \"modalities\",\n \"structures\",\n ]\n\n config_fields = [\n \"use_teams\",\n \"score_title\",\n \"score_jsonpath\",\n \"score_error_jsonpath\",\n \"score_default_sort\",\n \"score_decimal_places\",\n \"extra_results_columns\",\n \"scoring_method_choice\",\n \"result_display_choice\",\n \"allow_submission_comments\",\n \"display_submission_comments\",\n \"supplementary_file_choice\",\n \"supplementary_file_label\",\n \"supplementary_file_help_text\",\n \"show_supplementary_file_link\",\n \"publication_url_choice\",\n \"show_publication_url\",\n \"daily_submission_limit\",\n \"submission_page_html\",\n \"auto_publish_new_results\",\n \"display_all_metrics\",\n \"submission_join_key\",\n ]\n\n page_fields = [\n \"title\",\n \"permission_lvl\",\n \"order\",\n \"display_title\",\n \"hidden\",\n ]\n\n def add_arguments(self, parser):\n parser.add_argument(\"source\", type=str)\n parser.add_argument(\"dest\", type=str)\n\n def handle(self, *args, **options):\n src_name = options.pop(\"source\")\n dest_name = options.pop(\"dest\")\n\n if src_name.lower() == dest_name.lower():\n raise CommandError(\"Source and dest names must be different\")\n\n src_challenge = Challenge.objects.get(short_name__iexact=src_name)\n dest_challenge = self._create_new_challenge(\n src_challenge=src_challenge, dest_name=dest_name\n )\n\n self._copy_m2m_fields(\n src_challenge=src_challenge, dest_challenge=dest_challenge\n )\n self._copy_evaluation_config(\n src_challenge=src_challenge, dest_challenge=dest_challenge\n )\n self._copy_pages(\n src_challenge=src_challenge, dest_challenge=dest_challenge\n )\n self._copy_admins(\n src_challenge=src_challenge, dest_challenge=dest_challenge\n )\n\n def _create_new_challenge(self, *, src_challenge, dest_name):\n new_challenge = Challenge(\n short_name=dest_name,\n **{f: getattr(src_challenge, f) for f in self.challenge_fields},\n )\n new_challenge.full_clean()\n new_challenge.save()\n return new_challenge\n\n def _copy_m2m_fields(self, *, src_challenge, dest_challenge):\n for f in self.challenge_m2m_fields:\n src_m2m = getattr(src_challenge, f)\n dest_m2m = getattr(dest_challenge, f)\n dest_m2m.set(src_m2m.all())\n\n def _copy_evaluation_config(self, *, src_challenge, dest_challenge):\n src_config = src_challenge.evaluation_config\n dest_config = dest_challenge.evaluation_config\n\n for attr in self.config_fields:\n setattr(dest_config, attr, getattr(src_config, attr))\n\n dest_config.save()\n\n def _substitute_urls(self, html, domain, old, new):\n quote_replace = r\"href='([^']*)'\"\n regex = fr'href=\"[^/]*//{old}.{domain}([^\"\"]*)\"'\n html = re.sub(quote_replace, r'href=\"\\1\"', html)\n return re.sub(regex, fr'href=\"https://{new}.{domain}\\1\"', html,)\n\n def _copy_pages(self, *, src_challenge, dest_challenge):\n src_pages = src_challenge.page_set.all()\n\n site = Site.objects.get_current()\n domain = site.domain\n old = src_challenge.short_name\n new = dest_challenge.short_name\n\n for src_page in src_pages:\n Page.objects.create(\n challenge=dest_challenge,\n html=self._substitute_urls(src_page.html, domain, old, new),\n **{f: getattr(src_page, f) for f in self.page_fields},\n )\n\n def _copy_admins(self, *, src_challenge, dest_challenge):\n for u in src_challenge.get_admins():\n dest_challenge.add_admin(u)\n", "path": "app/grandchallenge/challenges/management/commands/copy_challenge.py"}]} | 1,399 | 386 |
gh_patches_debug_58008 | rasdani/github-patches | git_diff | marshmallow-code__webargs-385 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix compatibility with Falcon 2.0
Tests are currently failing when Falcon 2.0.0 is installed.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 import sys
3 import re
4 from setuptools import setup, find_packages
5
6 INSTALL_REQUIRES = ["marshmallow>=2.15.2"]
7 if sys.version_info[0] < 3:
8 INSTALL_REQUIRES.append("simplejson>=2.1.0")
9
10 FRAMEWORKS = [
11 "Flask>=0.12.2",
12 "Django>=1.11.16",
13 "bottle>=0.12.13",
14 "tornado>=4.5.2",
15 "pyramid>=1.9.1",
16 "webapp2>=3.0.0b1",
17 "falcon>=1.4.0",
18 'aiohttp>=3.0.0; python_version >= "3.5"',
19 ]
20 EXTRAS_REQUIRE = {
21 "frameworks": FRAMEWORKS,
22 "tests": [
23 "pytest",
24 "mock",
25 "webtest==2.0.32",
26 'webtest-aiohttp==2.0.0; python_version >= "3.5"',
27 'pytest-aiohttp>=0.3.0; python_version >= "3.5"',
28 ]
29 + FRAMEWORKS,
30 "lint": [
31 'mypy==0.650; python_version >= "3.5"',
32 "flake8==3.6.0",
33 'flake8-bugbear==18.8.0; python_version >= "3.5"',
34 "pre-commit==1.13.0",
35 ],
36 }
37 EXTRAS_REQUIRE["dev"] = EXTRAS_REQUIRE["tests"] + EXTRAS_REQUIRE["lint"] + ["tox"]
38
39
40 def find_version(fname):
41 """Attempts to find the version number in the file names fname.
42 Raises RuntimeError if not found.
43 """
44 version = ""
45 with open(fname, "r") as fp:
46 reg = re.compile(r'__version__ = [\'"]([^\'"]*)[\'"]')
47 for line in fp:
48 m = reg.match(line)
49 if m:
50 version = m.group(1)
51 break
52 if not version:
53 raise RuntimeError("Cannot find version information")
54 return version
55
56
57 def read(fname):
58 with open(fname) as fp:
59 content = fp.read()
60 return content
61
62
63 setup(
64 name="webargs",
65 version=find_version("src/webargs/__init__.py"),
66 description=(
67 "Declarative parsing and validation of HTTP request objects, "
68 "with built-in support for popular web frameworks, including "
69 "Flask, Django, Bottle, Tornado, Pyramid, webapp2, Falcon, and aiohttp."
70 ),
71 long_description=read("README.rst"),
72 author="Steven Loria",
73 author_email="[email protected]",
74 url="https://github.com/marshmallow-code/webargs",
75 packages=find_packages("src"),
76 package_dir={"": "src"},
77 install_requires=INSTALL_REQUIRES,
78 extras_require=EXTRAS_REQUIRE,
79 license="MIT",
80 zip_safe=False,
81 keywords=(
82 "webargs",
83 "http",
84 "flask",
85 "django",
86 "bottle",
87 "tornado",
88 "aiohttp",
89 "webapp2",
90 "request",
91 "arguments",
92 "validation",
93 "parameters",
94 "rest",
95 "api",
96 "marshmallow",
97 ),
98 classifiers=[
99 "Development Status :: 5 - Production/Stable",
100 "Intended Audience :: Developers",
101 "License :: OSI Approved :: MIT License",
102 "Natural Language :: English",
103 "Programming Language :: Python :: 2",
104 "Programming Language :: Python :: 2.7",
105 "Programming Language :: Python :: 3",
106 "Programming Language :: Python :: 3.5",
107 "Programming Language :: Python :: 3.6",
108 "Programming Language :: Python :: 3.7",
109 "Topic :: Internet :: WWW/HTTP :: Dynamic Content",
110 "Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
111 ],
112 test_suite="tests",
113 project_urls={
114 "Changelog": "https://webargs.readthedocs.io/en/latest/changelog.html",
115 "Issues": "https://github.com/marshmallow-code/webargs/issues",
116 "Funding": "https://opencollective.com/marshmallow",
117 "Tidelift": "https://tidelift.com/subscription/pkg/pypi-webargs?utm_source=pypi-marshmallow&utm_medium=pypi", # noqa
118 },
119 )
120
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -14,7 +14,7 @@
"tornado>=4.5.2",
"pyramid>=1.9.1",
"webapp2>=3.0.0b1",
- "falcon>=1.4.0",
+ "falcon>=1.4.0,<2.0",
'aiohttp>=3.0.0; python_version >= "3.5"',
]
EXTRAS_REQUIRE = {
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -14,7 +14,7 @@\n \"tornado>=4.5.2\",\n \"pyramid>=1.9.1\",\n \"webapp2>=3.0.0b1\",\n- \"falcon>=1.4.0\",\n+ \"falcon>=1.4.0,<2.0\",\n 'aiohttp>=3.0.0; python_version >= \"3.5\"',\n ]\n EXTRAS_REQUIRE = {\n", "issue": "Fix compatibility with Falcon 2.0\nTests are currently failing when Falcon 2.0.0 is installed.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport sys\nimport re\nfrom setuptools import setup, find_packages\n\nINSTALL_REQUIRES = [\"marshmallow>=2.15.2\"]\nif sys.version_info[0] < 3:\n INSTALL_REQUIRES.append(\"simplejson>=2.1.0\")\n\nFRAMEWORKS = [\n \"Flask>=0.12.2\",\n \"Django>=1.11.16\",\n \"bottle>=0.12.13\",\n \"tornado>=4.5.2\",\n \"pyramid>=1.9.1\",\n \"webapp2>=3.0.0b1\",\n \"falcon>=1.4.0\",\n 'aiohttp>=3.0.0; python_version >= \"3.5\"',\n]\nEXTRAS_REQUIRE = {\n \"frameworks\": FRAMEWORKS,\n \"tests\": [\n \"pytest\",\n \"mock\",\n \"webtest==2.0.32\",\n 'webtest-aiohttp==2.0.0; python_version >= \"3.5\"',\n 'pytest-aiohttp>=0.3.0; python_version >= \"3.5\"',\n ]\n + FRAMEWORKS,\n \"lint\": [\n 'mypy==0.650; python_version >= \"3.5\"',\n \"flake8==3.6.0\",\n 'flake8-bugbear==18.8.0; python_version >= \"3.5\"',\n \"pre-commit==1.13.0\",\n ],\n}\nEXTRAS_REQUIRE[\"dev\"] = EXTRAS_REQUIRE[\"tests\"] + EXTRAS_REQUIRE[\"lint\"] + [\"tox\"]\n\n\ndef find_version(fname):\n \"\"\"Attempts to find the version number in the file names fname.\n Raises RuntimeError if not found.\n \"\"\"\n version = \"\"\n with open(fname, \"r\") as fp:\n reg = re.compile(r'__version__ = [\\'\"]([^\\'\"]*)[\\'\"]')\n for line in fp:\n m = reg.match(line)\n if m:\n version = m.group(1)\n break\n if not version:\n raise RuntimeError(\"Cannot find version information\")\n return version\n\n\ndef read(fname):\n with open(fname) as fp:\n content = fp.read()\n return content\n\n\nsetup(\n name=\"webargs\",\n version=find_version(\"src/webargs/__init__.py\"),\n description=(\n \"Declarative parsing and validation of HTTP request objects, \"\n \"with built-in support for popular web frameworks, including \"\n \"Flask, Django, Bottle, Tornado, Pyramid, webapp2, Falcon, and aiohttp.\"\n ),\n long_description=read(\"README.rst\"),\n author=\"Steven Loria\",\n author_email=\"[email protected]\",\n url=\"https://github.com/marshmallow-code/webargs\",\n packages=find_packages(\"src\"),\n package_dir={\"\": \"src\"},\n install_requires=INSTALL_REQUIRES,\n extras_require=EXTRAS_REQUIRE,\n license=\"MIT\",\n zip_safe=False,\n keywords=(\n \"webargs\",\n \"http\",\n \"flask\",\n \"django\",\n \"bottle\",\n \"tornado\",\n \"aiohttp\",\n \"webapp2\",\n \"request\",\n \"arguments\",\n \"validation\",\n \"parameters\",\n \"rest\",\n \"api\",\n \"marshmallow\",\n ),\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Natural Language :: English\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Topic :: Internet :: WWW/HTTP :: Dynamic Content\",\n \"Topic :: Internet :: WWW/HTTP :: WSGI :: Application\",\n ],\n test_suite=\"tests\",\n project_urls={\n \"Changelog\": \"https://webargs.readthedocs.io/en/latest/changelog.html\",\n \"Issues\": \"https://github.com/marshmallow-code/webargs/issues\",\n \"Funding\": \"https://opencollective.com/marshmallow\",\n \"Tidelift\": \"https://tidelift.com/subscription/pkg/pypi-webargs?utm_source=pypi-marshmallow&utm_medium=pypi\", # noqa\n },\n)\n", "path": "setup.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport sys\nimport re\nfrom setuptools import setup, find_packages\n\nINSTALL_REQUIRES = [\"marshmallow>=2.15.2\"]\nif sys.version_info[0] < 3:\n INSTALL_REQUIRES.append(\"simplejson>=2.1.0\")\n\nFRAMEWORKS = [\n \"Flask>=0.12.2\",\n \"Django>=1.11.16\",\n \"bottle>=0.12.13\",\n \"tornado>=4.5.2\",\n \"pyramid>=1.9.1\",\n \"webapp2>=3.0.0b1\",\n \"falcon>=1.4.0,<2.0\",\n 'aiohttp>=3.0.0; python_version >= \"3.5\"',\n]\nEXTRAS_REQUIRE = {\n \"frameworks\": FRAMEWORKS,\n \"tests\": [\n \"pytest\",\n \"mock\",\n \"webtest==2.0.32\",\n 'webtest-aiohttp==2.0.0; python_version >= \"3.5\"',\n 'pytest-aiohttp>=0.3.0; python_version >= \"3.5\"',\n ]\n + FRAMEWORKS,\n \"lint\": [\n 'mypy==0.650; python_version >= \"3.5\"',\n \"flake8==3.6.0\",\n 'flake8-bugbear==18.8.0; python_version >= \"3.5\"',\n \"pre-commit==1.13.0\",\n ],\n}\nEXTRAS_REQUIRE[\"dev\"] = EXTRAS_REQUIRE[\"tests\"] + EXTRAS_REQUIRE[\"lint\"] + [\"tox\"]\n\n\ndef find_version(fname):\n \"\"\"Attempts to find the version number in the file names fname.\n Raises RuntimeError if not found.\n \"\"\"\n version = \"\"\n with open(fname, \"r\") as fp:\n reg = re.compile(r'__version__ = [\\'\"]([^\\'\"]*)[\\'\"]')\n for line in fp:\n m = reg.match(line)\n if m:\n version = m.group(1)\n break\n if not version:\n raise RuntimeError(\"Cannot find version information\")\n return version\n\n\ndef read(fname):\n with open(fname) as fp:\n content = fp.read()\n return content\n\n\nsetup(\n name=\"webargs\",\n version=find_version(\"src/webargs/__init__.py\"),\n description=(\n \"Declarative parsing and validation of HTTP request objects, \"\n \"with built-in support for popular web frameworks, including \"\n \"Flask, Django, Bottle, Tornado, Pyramid, webapp2, Falcon, and aiohttp.\"\n ),\n long_description=read(\"README.rst\"),\n author=\"Steven Loria\",\n author_email=\"[email protected]\",\n url=\"https://github.com/marshmallow-code/webargs\",\n packages=find_packages(\"src\"),\n package_dir={\"\": \"src\"},\n install_requires=INSTALL_REQUIRES,\n extras_require=EXTRAS_REQUIRE,\n license=\"MIT\",\n zip_safe=False,\n keywords=(\n \"webargs\",\n \"http\",\n \"flask\",\n \"django\",\n \"bottle\",\n \"tornado\",\n \"aiohttp\",\n \"webapp2\",\n \"request\",\n \"arguments\",\n \"validation\",\n \"parameters\",\n \"rest\",\n \"api\",\n \"marshmallow\",\n ),\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Natural Language :: English\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Topic :: Internet :: WWW/HTTP :: Dynamic Content\",\n \"Topic :: Internet :: WWW/HTTP :: WSGI :: Application\",\n ],\n test_suite=\"tests\",\n project_urls={\n \"Changelog\": \"https://webargs.readthedocs.io/en/latest/changelog.html\",\n \"Issues\": \"https://github.com/marshmallow-code/webargs/issues\",\n \"Funding\": \"https://opencollective.com/marshmallow\",\n \"Tidelift\": \"https://tidelift.com/subscription/pkg/pypi-webargs?utm_source=pypi-marshmallow&utm_medium=pypi\", # noqa\n },\n)\n", "path": "setup.py"}]} | 1,518 | 123 |
gh_patches_debug_41585 | rasdani/github-patches | git_diff | napari__napari-5309 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add cadenced runs for benchmarking slicing
## Description
The goal of this task is to ensure that any existing benchmarking tools are running as expected. And that the output of those tasks can be easily used to detect performance changes in napari.
## Motivation
Slicing is a core part of napari and it's critical that any changes to the related code does not substantially negatively impact performance of slicing. Many of the existing ASV benchmarks measure the time and memory used during layer initialization and slicing, which could be affect by this project.
## Output
The key output of this task is to have some way of easily detecting when the performance of napari changes at some regular interval (e.g. daily, weekly) based on the existing ASV benchmarks. This could be as simple as using `asv publish` after running `asv continuous` in the existing GitHub workflow to generate static HTML that can be downloaded and rendered locally. Alternatively, the report could be entirely text based as long as it's easy to find any changes in performance.
We should also have some way to trigger a full benchmark run for a PR related to async slicing that also generates a similar report, but that could be done in a follow-up PR. This task can also be broken down as needed.
## Notes
need to add workflow runs for benchmarking
Jaime can support
Relevant previous PRs
- https://github.com/napari/napari/pull/4554
- https://github.com/napari/napari/pull/4656
<hr>
### KCP update:
Issues with the `Benchmark` workflow have been resolved in https://github.com/napari/napari/pull/5083
Unfortunately I introduced a regression which I'm fixing in https://github.com/napari/napari/pull/5246
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `napari/benchmarks/benchmark_qt_slicing.py`
Content:
```
1 # See "Writing benchmarks" in the asv docs for more information.
2 # https://asv.readthedocs.io/en/latest/writing_benchmarks.html
3 # or the napari documentation on benchmarking
4 # https://github.com/napari/napari/blob/main/docs/BENCHMARKS.md
5
6 import time
7
8 import numpy as np
9 import zarr
10 from qtpy.QtWidgets import QApplication
11
12 import napari
13 from napari.layers import Image
14
15 SAMPLE_PARAMS = {
16 'skin_data': {
17 # napari-bio-sample-data
18 'shape': (1280, 960, 3),
19 'chunk_shape': (512, 512, 3),
20 'dtype': 'uint8',
21 },
22 'jrc_hela-2 (scale 3)': {
23 # s3://janelia-cosem-datasets/jrc_hela-2/jrc_hela-2.n5
24 'shape': (796, 200, 1500),
25 'dtype': 'uint16',
26 'chunk_shape': (64, 64, 64),
27 },
28 }
29
30
31 def get_image_params():
32 # chunksizes = [(64,64,64), (256,256,256), (512,512,512)]
33 latencies = [0.05 * i for i in range(0, 3)]
34 datanames = SAMPLE_PARAMS.keys()
35 params = (latencies, datanames)
36
37 return params
38
39
40 class SlowMemoryStore(zarr.storage.MemoryStore):
41 def __init__(self, load_delay, *args, **kwargs):
42 self.load_delay = load_delay
43 super().__init__(*args, **kwargs)
44
45 def __getitem__(self, item: str):
46 time.sleep(self.load_delay)
47 return super().__getitem__(item)
48
49
50 class AsyncImage2DSuite:
51 params = get_image_params()
52
53 def setup(self, latency, dataname):
54 shape = SAMPLE_PARAMS[dataname]['shape']
55 chunk_shape = SAMPLE_PARAMS[dataname]['chunk_shape']
56 dtype = SAMPLE_PARAMS[dataname]['dtype']
57
58 store = SlowMemoryStore(load_delay=latency)
59 self.data = zarr.zeros(
60 shape,
61 chunks=chunk_shape,
62 dtype=dtype,
63 store=store,
64 )
65
66 self.layer = Image(self.data)
67
68 def time_create_layer(self, *args):
69 """Time to create an image layer."""
70 Image(self.data)
71
72 def time_set_view_slice(self, *args):
73 """Time to set view slice."""
74 self.layer._set_view_slice()
75
76 def time_refresh(self, *args):
77 """Time to refresh view."""
78 self.layer.refresh()
79
80
81 class QtViewerAsyncImage2DSuite:
82 params = get_image_params()
83
84 def setup(self, latency, dataname):
85 shape = SAMPLE_PARAMS[dataname]['shape']
86 chunk_shape = SAMPLE_PARAMS[dataname]['chunk_shape']
87 dtype = SAMPLE_PARAMS[dataname]['dtype']
88
89 if len(shape) == 3 and shape[2] == 3:
90 # Skip 2D RGB tests -- scrolling does not apply
91 self.viewer = None
92 raise NotImplementedError
93
94 store = SlowMemoryStore(load_delay=latency)
95 _ = QApplication.instance() or QApplication([])
96 self.data = zarr.zeros(
97 shape,
98 chunks=chunk_shape,
99 dtype=dtype,
100 store=store,
101 )
102
103 self.viewer = napari.Viewer()
104 self.viewer.add_image(self.data)
105
106 def time_z_scroll(self, *args):
107 layers_to_scroll = 4
108 for z in range(layers_to_scroll):
109 z = z * (self.data.shape[2] // layers_to_scroll)
110 self.viewer.dims.set_current_step(0, z)
111
112 def teardown(self, *args):
113 if self.viewer is not None:
114 self.viewer.window.close()
115
116
117 class QtViewerAsyncPointsSuite:
118 n_points = [2**i for i in range(12, 18)]
119 params = n_points
120
121 def setup(self, n_points):
122 _ = QApplication.instance() or QApplication([])
123
124 np.random.seed(0)
125 self.viewer = napari.Viewer()
126 # Fake image layer to set bounds. Is this really needed?
127 self.empty_image = np.zeros((512, 512, 512), dtype="uint8")
128 self.viewer.add_image(self.empty_image)
129 self.point_data = np.random.randint(512, size=(n_points, 3))
130 self.viewer.add_points(self.point_data)
131
132 def time_z_scroll(self, *args):
133 for z in range(self.empty_image.shape[0]):
134 self.viewer.dims.set_current_step(0, z)
135
136 def teardown(self, *args):
137 self.viewer.window.close()
138
139
140 class QtViewerAsyncPointsAndImage2DSuite:
141 n_points = [2**i for i in range(12, 18, 2)]
142 chunksize = [256, 512, 1024]
143 latency = [0.05 * i for i in range(0, 3)]
144 params = (n_points, latency, chunksize)
145
146 def setup(self, n_points, latency, chunksize):
147 store = SlowMemoryStore(load_delay=latency)
148 _ = QApplication.instance() or QApplication([])
149
150 np.random.seed(0)
151
152 self.image_data = zarr.zeros(
153 (64, 2048, 2048),
154 chunks=(1, chunksize, chunksize),
155 dtype='uint8',
156 store=store,
157 )
158
159 self.viewer = napari.Viewer()
160 self.viewer.add_image(self.image_data)
161 self.point_data = np.random.randint(512, size=(n_points, 3))
162 self.viewer.add_points(self.point_data)
163
164 def time_z_scroll(self, *args):
165 for z in range(self.image_data.shape[0]):
166 self.viewer.dims.set_current_step(0, z)
167
168 def teardown(self, *args):
169 self.viewer.window.close()
170
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/napari/benchmarks/benchmark_qt_slicing.py b/napari/benchmarks/benchmark_qt_slicing.py
--- a/napari/benchmarks/benchmark_qt_slicing.py
+++ b/napari/benchmarks/benchmark_qt_slicing.py
@@ -48,7 +48,12 @@
class AsyncImage2DSuite:
+ """TODO: these benchmarks are skipped. Remove the NotImplementedError in
+ setup to enable.
+ """
+
params = get_image_params()
+ timeout = 300
def setup(self, latency, dataname):
shape = SAMPLE_PARAMS[dataname]['shape']
@@ -64,6 +69,7 @@
)
self.layer = Image(self.data)
+ raise NotImplementedError
def time_create_layer(self, *args):
"""Time to create an image layer."""
@@ -79,7 +85,12 @@
class QtViewerAsyncImage2DSuite:
+ """TODO: these benchmarks are skipped. Remove the NotImplementedError in
+ setup to enable.
+ """
+
params = get_image_params()
+ timeout = 300
def setup(self, latency, dataname):
shape = SAMPLE_PARAMS[dataname]['shape']
@@ -102,6 +113,7 @@
self.viewer = napari.Viewer()
self.viewer.add_image(self.data)
+ raise NotImplementedError
def time_z_scroll(self, *args):
layers_to_scroll = 4
@@ -115,6 +127,10 @@
class QtViewerAsyncPointsSuite:
+ """TODO: these benchmarks are skipped. Remove the NotImplementedError in
+ setup to enable.
+ """
+
n_points = [2**i for i in range(12, 18)]
params = n_points
@@ -128,6 +144,7 @@
self.viewer.add_image(self.empty_image)
self.point_data = np.random.randint(512, size=(n_points, 3))
self.viewer.add_points(self.point_data)
+ raise NotImplementedError
def time_z_scroll(self, *args):
for z in range(self.empty_image.shape[0]):
@@ -138,10 +155,15 @@
class QtViewerAsyncPointsAndImage2DSuite:
+ """TODO: these benchmarks are skipped. Remove the NotImplementedError in
+ setup to enable.
+ """
+
n_points = [2**i for i in range(12, 18, 2)]
chunksize = [256, 512, 1024]
latency = [0.05 * i for i in range(0, 3)]
params = (n_points, latency, chunksize)
+ timeout = 600
def setup(self, n_points, latency, chunksize):
store = SlowMemoryStore(load_delay=latency)
@@ -160,6 +182,7 @@
self.viewer.add_image(self.image_data)
self.point_data = np.random.randint(512, size=(n_points, 3))
self.viewer.add_points(self.point_data)
+ raise NotImplementedError
def time_z_scroll(self, *args):
for z in range(self.image_data.shape[0]):
| {"golden_diff": "diff --git a/napari/benchmarks/benchmark_qt_slicing.py b/napari/benchmarks/benchmark_qt_slicing.py\n--- a/napari/benchmarks/benchmark_qt_slicing.py\n+++ b/napari/benchmarks/benchmark_qt_slicing.py\n@@ -48,7 +48,12 @@\n \n \n class AsyncImage2DSuite:\n+ \"\"\"TODO: these benchmarks are skipped. Remove the NotImplementedError in\n+ setup to enable.\n+ \"\"\"\n+\n params = get_image_params()\n+ timeout = 300\n \n def setup(self, latency, dataname):\n shape = SAMPLE_PARAMS[dataname]['shape']\n@@ -64,6 +69,7 @@\n )\n \n self.layer = Image(self.data)\n+ raise NotImplementedError\n \n def time_create_layer(self, *args):\n \"\"\"Time to create an image layer.\"\"\"\n@@ -79,7 +85,12 @@\n \n \n class QtViewerAsyncImage2DSuite:\n+ \"\"\"TODO: these benchmarks are skipped. Remove the NotImplementedError in\n+ setup to enable.\n+ \"\"\"\n+\n params = get_image_params()\n+ timeout = 300\n \n def setup(self, latency, dataname):\n shape = SAMPLE_PARAMS[dataname]['shape']\n@@ -102,6 +113,7 @@\n \n self.viewer = napari.Viewer()\n self.viewer.add_image(self.data)\n+ raise NotImplementedError\n \n def time_z_scroll(self, *args):\n layers_to_scroll = 4\n@@ -115,6 +127,10 @@\n \n \n class QtViewerAsyncPointsSuite:\n+ \"\"\"TODO: these benchmarks are skipped. Remove the NotImplementedError in\n+ setup to enable.\n+ \"\"\"\n+\n n_points = [2**i for i in range(12, 18)]\n params = n_points\n \n@@ -128,6 +144,7 @@\n self.viewer.add_image(self.empty_image)\n self.point_data = np.random.randint(512, size=(n_points, 3))\n self.viewer.add_points(self.point_data)\n+ raise NotImplementedError\n \n def time_z_scroll(self, *args):\n for z in range(self.empty_image.shape[0]):\n@@ -138,10 +155,15 @@\n \n \n class QtViewerAsyncPointsAndImage2DSuite:\n+ \"\"\"TODO: these benchmarks are skipped. Remove the NotImplementedError in\n+ setup to enable.\n+ \"\"\"\n+\n n_points = [2**i for i in range(12, 18, 2)]\n chunksize = [256, 512, 1024]\n latency = [0.05 * i for i in range(0, 3)]\n params = (n_points, latency, chunksize)\n+ timeout = 600\n \n def setup(self, n_points, latency, chunksize):\n store = SlowMemoryStore(load_delay=latency)\n@@ -160,6 +182,7 @@\n self.viewer.add_image(self.image_data)\n self.point_data = np.random.randint(512, size=(n_points, 3))\n self.viewer.add_points(self.point_data)\n+ raise NotImplementedError\n \n def time_z_scroll(self, *args):\n for z in range(self.image_data.shape[0]):\n", "issue": "Add cadenced runs for benchmarking slicing\n## Description\n\nThe goal of this task is to ensure that any existing benchmarking tools are running as expected. And that the output of those tasks can be easily used to detect performance changes in napari.\n\n## Motivation\n\nSlicing is a core part of napari and it's critical that any changes to the related code does not substantially negatively impact performance of slicing. Many of the existing ASV benchmarks measure the time and memory used during layer initialization and slicing, which could be affect by this project.\n\n## Output\n\nThe key output of this task is to have some way of easily detecting when the performance of napari changes at some regular interval (e.g. daily, weekly) based on the existing ASV benchmarks. This could be as simple as using `asv publish` after running `asv continuous` in the existing GitHub workflow to generate static HTML that can be downloaded and rendered locally. Alternatively, the report could be entirely text based as long as it's easy to find any changes in performance.\n\nWe should also have some way to trigger a full benchmark run for a PR related to async slicing that also generates a similar report, but that could be done in a follow-up PR. This task can also be broken down as needed.\n\n## Notes\nneed to add workflow runs for benchmarking\n\nJaime can support\n\nRelevant previous PRs\n- https://github.com/napari/napari/pull/4554\n- https://github.com/napari/napari/pull/4656\n\n\n<hr>\n\n### KCP update:\n\nIssues with the `Benchmark` workflow have been resolved in https://github.com/napari/napari/pull/5083\n\nUnfortunately I introduced a regression which I'm fixing in https://github.com/napari/napari/pull/5246\n", "before_files": [{"content": "# See \"Writing benchmarks\" in the asv docs for more information.\n# https://asv.readthedocs.io/en/latest/writing_benchmarks.html\n# or the napari documentation on benchmarking\n# https://github.com/napari/napari/blob/main/docs/BENCHMARKS.md\n\nimport time\n\nimport numpy as np\nimport zarr\nfrom qtpy.QtWidgets import QApplication\n\nimport napari\nfrom napari.layers import Image\n\nSAMPLE_PARAMS = {\n 'skin_data': {\n # napari-bio-sample-data\n 'shape': (1280, 960, 3),\n 'chunk_shape': (512, 512, 3),\n 'dtype': 'uint8',\n },\n 'jrc_hela-2 (scale 3)': {\n # s3://janelia-cosem-datasets/jrc_hela-2/jrc_hela-2.n5\n 'shape': (796, 200, 1500),\n 'dtype': 'uint16',\n 'chunk_shape': (64, 64, 64),\n },\n}\n\n\ndef get_image_params():\n # chunksizes = [(64,64,64), (256,256,256), (512,512,512)]\n latencies = [0.05 * i for i in range(0, 3)]\n datanames = SAMPLE_PARAMS.keys()\n params = (latencies, datanames)\n\n return params\n\n\nclass SlowMemoryStore(zarr.storage.MemoryStore):\n def __init__(self, load_delay, *args, **kwargs):\n self.load_delay = load_delay\n super().__init__(*args, **kwargs)\n\n def __getitem__(self, item: str):\n time.sleep(self.load_delay)\n return super().__getitem__(item)\n\n\nclass AsyncImage2DSuite:\n params = get_image_params()\n\n def setup(self, latency, dataname):\n shape = SAMPLE_PARAMS[dataname]['shape']\n chunk_shape = SAMPLE_PARAMS[dataname]['chunk_shape']\n dtype = SAMPLE_PARAMS[dataname]['dtype']\n\n store = SlowMemoryStore(load_delay=latency)\n self.data = zarr.zeros(\n shape,\n chunks=chunk_shape,\n dtype=dtype,\n store=store,\n )\n\n self.layer = Image(self.data)\n\n def time_create_layer(self, *args):\n \"\"\"Time to create an image layer.\"\"\"\n Image(self.data)\n\n def time_set_view_slice(self, *args):\n \"\"\"Time to set view slice.\"\"\"\n self.layer._set_view_slice()\n\n def time_refresh(self, *args):\n \"\"\"Time to refresh view.\"\"\"\n self.layer.refresh()\n\n\nclass QtViewerAsyncImage2DSuite:\n params = get_image_params()\n\n def setup(self, latency, dataname):\n shape = SAMPLE_PARAMS[dataname]['shape']\n chunk_shape = SAMPLE_PARAMS[dataname]['chunk_shape']\n dtype = SAMPLE_PARAMS[dataname]['dtype']\n\n if len(shape) == 3 and shape[2] == 3:\n # Skip 2D RGB tests -- scrolling does not apply\n self.viewer = None\n raise NotImplementedError\n\n store = SlowMemoryStore(load_delay=latency)\n _ = QApplication.instance() or QApplication([])\n self.data = zarr.zeros(\n shape,\n chunks=chunk_shape,\n dtype=dtype,\n store=store,\n )\n\n self.viewer = napari.Viewer()\n self.viewer.add_image(self.data)\n\n def time_z_scroll(self, *args):\n layers_to_scroll = 4\n for z in range(layers_to_scroll):\n z = z * (self.data.shape[2] // layers_to_scroll)\n self.viewer.dims.set_current_step(0, z)\n\n def teardown(self, *args):\n if self.viewer is not None:\n self.viewer.window.close()\n\n\nclass QtViewerAsyncPointsSuite:\n n_points = [2**i for i in range(12, 18)]\n params = n_points\n\n def setup(self, n_points):\n _ = QApplication.instance() or QApplication([])\n\n np.random.seed(0)\n self.viewer = napari.Viewer()\n # Fake image layer to set bounds. Is this really needed?\n self.empty_image = np.zeros((512, 512, 512), dtype=\"uint8\")\n self.viewer.add_image(self.empty_image)\n self.point_data = np.random.randint(512, size=(n_points, 3))\n self.viewer.add_points(self.point_data)\n\n def time_z_scroll(self, *args):\n for z in range(self.empty_image.shape[0]):\n self.viewer.dims.set_current_step(0, z)\n\n def teardown(self, *args):\n self.viewer.window.close()\n\n\nclass QtViewerAsyncPointsAndImage2DSuite:\n n_points = [2**i for i in range(12, 18, 2)]\n chunksize = [256, 512, 1024]\n latency = [0.05 * i for i in range(0, 3)]\n params = (n_points, latency, chunksize)\n\n def setup(self, n_points, latency, chunksize):\n store = SlowMemoryStore(load_delay=latency)\n _ = QApplication.instance() or QApplication([])\n\n np.random.seed(0)\n\n self.image_data = zarr.zeros(\n (64, 2048, 2048),\n chunks=(1, chunksize, chunksize),\n dtype='uint8',\n store=store,\n )\n\n self.viewer = napari.Viewer()\n self.viewer.add_image(self.image_data)\n self.point_data = np.random.randint(512, size=(n_points, 3))\n self.viewer.add_points(self.point_data)\n\n def time_z_scroll(self, *args):\n for z in range(self.image_data.shape[0]):\n self.viewer.dims.set_current_step(0, z)\n\n def teardown(self, *args):\n self.viewer.window.close()\n", "path": "napari/benchmarks/benchmark_qt_slicing.py"}], "after_files": [{"content": "# See \"Writing benchmarks\" in the asv docs for more information.\n# https://asv.readthedocs.io/en/latest/writing_benchmarks.html\n# or the napari documentation on benchmarking\n# https://github.com/napari/napari/blob/main/docs/BENCHMARKS.md\n\nimport time\n\nimport numpy as np\nimport zarr\nfrom qtpy.QtWidgets import QApplication\n\nimport napari\nfrom napari.layers import Image\n\nSAMPLE_PARAMS = {\n 'skin_data': {\n # napari-bio-sample-data\n 'shape': (1280, 960, 3),\n 'chunk_shape': (512, 512, 3),\n 'dtype': 'uint8',\n },\n 'jrc_hela-2 (scale 3)': {\n # s3://janelia-cosem-datasets/jrc_hela-2/jrc_hela-2.n5\n 'shape': (796, 200, 1500),\n 'dtype': 'uint16',\n 'chunk_shape': (64, 64, 64),\n },\n}\n\n\ndef get_image_params():\n # chunksizes = [(64,64,64), (256,256,256), (512,512,512)]\n latencies = [0.05 * i for i in range(0, 3)]\n datanames = SAMPLE_PARAMS.keys()\n params = (latencies, datanames)\n\n return params\n\n\nclass SlowMemoryStore(zarr.storage.MemoryStore):\n def __init__(self, load_delay, *args, **kwargs):\n self.load_delay = load_delay\n super().__init__(*args, **kwargs)\n\n def __getitem__(self, item: str):\n time.sleep(self.load_delay)\n return super().__getitem__(item)\n\n\nclass AsyncImage2DSuite:\n \"\"\"TODO: these benchmarks are skipped. Remove the NotImplementedError in\n setup to enable.\n \"\"\"\n\n params = get_image_params()\n timeout = 300\n\n def setup(self, latency, dataname):\n shape = SAMPLE_PARAMS[dataname]['shape']\n chunk_shape = SAMPLE_PARAMS[dataname]['chunk_shape']\n dtype = SAMPLE_PARAMS[dataname]['dtype']\n\n store = SlowMemoryStore(load_delay=latency)\n self.data = zarr.zeros(\n shape,\n chunks=chunk_shape,\n dtype=dtype,\n store=store,\n )\n\n self.layer = Image(self.data)\n raise NotImplementedError\n\n def time_create_layer(self, *args):\n \"\"\"Time to create an image layer.\"\"\"\n Image(self.data)\n\n def time_set_view_slice(self, *args):\n \"\"\"Time to set view slice.\"\"\"\n self.layer._set_view_slice()\n\n def time_refresh(self, *args):\n \"\"\"Time to refresh view.\"\"\"\n self.layer.refresh()\n\n\nclass QtViewerAsyncImage2DSuite:\n \"\"\"TODO: these benchmarks are skipped. Remove the NotImplementedError in\n setup to enable.\n \"\"\"\n\n params = get_image_params()\n timeout = 300\n\n def setup(self, latency, dataname):\n shape = SAMPLE_PARAMS[dataname]['shape']\n chunk_shape = SAMPLE_PARAMS[dataname]['chunk_shape']\n dtype = SAMPLE_PARAMS[dataname]['dtype']\n\n if len(shape) == 3 and shape[2] == 3:\n # Skip 2D RGB tests -- scrolling does not apply\n self.viewer = None\n raise NotImplementedError\n\n store = SlowMemoryStore(load_delay=latency)\n _ = QApplication.instance() or QApplication([])\n self.data = zarr.zeros(\n shape,\n chunks=chunk_shape,\n dtype=dtype,\n store=store,\n )\n\n self.viewer = napari.Viewer()\n self.viewer.add_image(self.data)\n raise NotImplementedError\n\n def time_z_scroll(self, *args):\n layers_to_scroll = 4\n for z in range(layers_to_scroll):\n z = z * (self.data.shape[2] // layers_to_scroll)\n self.viewer.dims.set_current_step(0, z)\n\n def teardown(self, *args):\n if self.viewer is not None:\n self.viewer.window.close()\n\n\nclass QtViewerAsyncPointsSuite:\n \"\"\"TODO: these benchmarks are skipped. Remove the NotImplementedError in\n setup to enable.\n \"\"\"\n\n n_points = [2**i for i in range(12, 18)]\n params = n_points\n\n def setup(self, n_points):\n _ = QApplication.instance() or QApplication([])\n\n np.random.seed(0)\n self.viewer = napari.Viewer()\n # Fake image layer to set bounds. Is this really needed?\n self.empty_image = np.zeros((512, 512, 512), dtype=\"uint8\")\n self.viewer.add_image(self.empty_image)\n self.point_data = np.random.randint(512, size=(n_points, 3))\n self.viewer.add_points(self.point_data)\n raise NotImplementedError\n\n def time_z_scroll(self, *args):\n for z in range(self.empty_image.shape[0]):\n self.viewer.dims.set_current_step(0, z)\n\n def teardown(self, *args):\n self.viewer.window.close()\n\n\nclass QtViewerAsyncPointsAndImage2DSuite:\n \"\"\"TODO: these benchmarks are skipped. Remove the NotImplementedError in\n setup to enable.\n \"\"\"\n\n n_points = [2**i for i in range(12, 18, 2)]\n chunksize = [256, 512, 1024]\n latency = [0.05 * i for i in range(0, 3)]\n params = (n_points, latency, chunksize)\n timeout = 600\n\n def setup(self, n_points, latency, chunksize):\n store = SlowMemoryStore(load_delay=latency)\n _ = QApplication.instance() or QApplication([])\n\n np.random.seed(0)\n\n self.image_data = zarr.zeros(\n (64, 2048, 2048),\n chunks=(1, chunksize, chunksize),\n dtype='uint8',\n store=store,\n )\n\n self.viewer = napari.Viewer()\n self.viewer.add_image(self.image_data)\n self.point_data = np.random.randint(512, size=(n_points, 3))\n self.viewer.add_points(self.point_data)\n raise NotImplementedError\n\n def time_z_scroll(self, *args):\n for z in range(self.image_data.shape[0]):\n self.viewer.dims.set_current_step(0, z)\n\n def teardown(self, *args):\n self.viewer.window.close()\n", "path": "napari/benchmarks/benchmark_qt_slicing.py"}]} | 2,400 | 730 |
gh_patches_debug_19737 | rasdani/github-patches | git_diff | biopython__biopython-3247 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Should we remove/replace Bio.Alphabet?
Filing this meta-issue, since we don't seem to have a single place discussing this on GitHub.
**My (likely biased) summary from those discussions is no one likes the current alphabet system, and most people ignore it.**
Biopython has a complicated hard to use legacy alphabet system in ``Bio.Alphabet`` which is used as a typing system (e.g. can't do reverse-complement on a protein), can store some useful information like the expected letters, if gapped and the gap character (although only one), and a stop codon symbol (although only one).
The objects in ``Bio.Alphabet`` cover three-letter alphabets as well as the more commonly used one-letter alphabets, although the ``Seq`` object effectively assumes the later only. Three-letter alphabets can be used with the array-based ``MutableSeq`` object, but it is very fragile and many things break - thus #1681.
Note we do not (currently) validate the expected letters when making a sequence object with an alphabet with an explicit set of expected letters - #1040.
Discussion on #1674 (hiding alphabets) has meant we now hide the alphabet in the ``Seq`` object representation if it is the default alphabet. Discussion there, and on #1681, and on the mailing list suggests going further and *removing* ``Bio.Alphabet`` entirely.
Note that removal is not as simple as it might sound - it will require some delicate modifications, for example several of the parsers in ``Bio.SeqIO`` use the alphabet to store the sequence type (important metadata in more than one file format).
I personally do like the typing system aspect of the alphabet system, but think we only need a much simpler DNA/RNA/nucleotide/protein/other system (more like an enum). I started looking at that on this branch which aimed to break as little existing code as possible: https://github.com/peterjc/biopython/tree/alpha_lite
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `Bio/Alphabet/__init__.py`
Content:
```
1 # Copyright 2000-2002 by Andrew Dalke.
2 # Revisions copyright 2007-2010 by Peter Cock.
3 # All rights reserved.
4 #
5 # This file is part of the Biopython distribution and governed by your
6 # choice of the "Biopython License Agreement" or the "BSD 3-Clause License".
7 # Please see the LICENSE file that should have been included as part of this
8 # package.
9 """Alphabets were previously used to declare sequence type and letters (OBSOLETE).
10
11 The design of Bio.Aphabet included a number of historic design choices
12 which, with the benefit of hindsight, were regretable. Bio.Alphabet was
13 therefore removed from Biopython in release 1.78. Instead, the molecule type is
14 included as an annotation on SeqRecords where appropriate.
15
16 Please see
17 https://github.com/biopython/biopython/issues/3156
18 for examples showing how to transition from Bio.Alphabet to molecule type
19 annotations.
20 """
21
22 raise ImportError(
23 "Bio.Alphabet has been removed from Biopython. In many cases, the alphabet can simply be ignored and removed from scripts. In a few cases, you may need to specify the ``molecule_type`` as an annotation on a SeqRecord for your script to work correctly. Please see https://github.com/biopython/biopython/issues/3156 for more information."
24 )
25
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/Bio/Alphabet/__init__.py b/Bio/Alphabet/__init__.py
--- a/Bio/Alphabet/__init__.py
+++ b/Bio/Alphabet/__init__.py
@@ -13,12 +13,10 @@
therefore removed from Biopython in release 1.78. Instead, the molecule type is
included as an annotation on SeqRecords where appropriate.
-Please see
-https://github.com/biopython/biopython/issues/3156
-for examples showing how to transition from Bio.Alphabet to molecule type
-annotations.
+Please see https://biopython.org/wiki/Alphabet for examples showing how to
+transition from Bio.Alphabet to molecule type annotations.
"""
raise ImportError(
- "Bio.Alphabet has been removed from Biopython. In many cases, the alphabet can simply be ignored and removed from scripts. In a few cases, you may need to specify the ``molecule_type`` as an annotation on a SeqRecord for your script to work correctly. Please see https://github.com/biopython/biopython/issues/3156 for more information."
+ "Bio.Alphabet has been removed from Biopython. In many cases, the alphabet can simply be ignored and removed from scripts. In a few cases, you may need to specify the ``molecule_type`` as an annotation on a SeqRecord for your script to work correctly. Please see https://biopython.org/wiki/Alphabet for more information."
)
| {"golden_diff": "diff --git a/Bio/Alphabet/__init__.py b/Bio/Alphabet/__init__.py\n--- a/Bio/Alphabet/__init__.py\n+++ b/Bio/Alphabet/__init__.py\n@@ -13,12 +13,10 @@\n therefore removed from Biopython in release 1.78. Instead, the molecule type is\n included as an annotation on SeqRecords where appropriate.\n \n-Please see\n-https://github.com/biopython/biopython/issues/3156\n-for examples showing how to transition from Bio.Alphabet to molecule type\n-annotations.\n+Please see https://biopython.org/wiki/Alphabet for examples showing how to\n+transition from Bio.Alphabet to molecule type annotations.\n \"\"\"\n \n raise ImportError(\n- \"Bio.Alphabet has been removed from Biopython. In many cases, the alphabet can simply be ignored and removed from scripts. In a few cases, you may need to specify the ``molecule_type`` as an annotation on a SeqRecord for your script to work correctly. Please see https://github.com/biopython/biopython/issues/3156 for more information.\"\n+ \"Bio.Alphabet has been removed from Biopython. In many cases, the alphabet can simply be ignored and removed from scripts. In a few cases, you may need to specify the ``molecule_type`` as an annotation on a SeqRecord for your script to work correctly. Please see https://biopython.org/wiki/Alphabet for more information.\"\n )\n", "issue": "Should we remove/replace Bio.Alphabet?\nFiling this meta-issue, since we don't seem to have a single place discussing this on GitHub.\r\n\r\n**My (likely biased) summary from those discussions is no one likes the current alphabet system, and most people ignore it.**\r\n\r\nBiopython has a complicated hard to use legacy alphabet system in ``Bio.Alphabet`` which is used as a typing system (e.g. can't do reverse-complement on a protein), can store some useful information like the expected letters, if gapped and the gap character (although only one), and a stop codon symbol (although only one).\r\n\r\nThe objects in ``Bio.Alphabet`` cover three-letter alphabets as well as the more commonly used one-letter alphabets, although the ``Seq`` object effectively assumes the later only. Three-letter alphabets can be used with the array-based ``MutableSeq`` object, but it is very fragile and many things break - thus #1681.\r\n\r\nNote we do not (currently) validate the expected letters when making a sequence object with an alphabet with an explicit set of expected letters - #1040.\r\n\r\nDiscussion on #1674 (hiding alphabets) has meant we now hide the alphabet in the ``Seq`` object representation if it is the default alphabet. Discussion there, and on #1681, and on the mailing list suggests going further and *removing* ``Bio.Alphabet`` entirely.\r\n\r\nNote that removal is not as simple as it might sound - it will require some delicate modifications, for example several of the parsers in ``Bio.SeqIO`` use the alphabet to store the sequence type (important metadata in more than one file format).\r\n\r\nI personally do like the typing system aspect of the alphabet system, but think we only need a much simpler DNA/RNA/nucleotide/protein/other system (more like an enum). I started looking at that on this branch which aimed to break as little existing code as possible: https://github.com/peterjc/biopython/tree/alpha_lite\r\n\n", "before_files": [{"content": "# Copyright 2000-2002 by Andrew Dalke.\n# Revisions copyright 2007-2010 by Peter Cock.\n# All rights reserved.\n#\n# This file is part of the Biopython distribution and governed by your\n# choice of the \"Biopython License Agreement\" or the \"BSD 3-Clause License\".\n# Please see the LICENSE file that should have been included as part of this\n# package.\n\"\"\"Alphabets were previously used to declare sequence type and letters (OBSOLETE).\n\nThe design of Bio.Aphabet included a number of historic design choices\nwhich, with the benefit of hindsight, were regretable. Bio.Alphabet was\ntherefore removed from Biopython in release 1.78. Instead, the molecule type is\nincluded as an annotation on SeqRecords where appropriate.\n\nPlease see\nhttps://github.com/biopython/biopython/issues/3156\nfor examples showing how to transition from Bio.Alphabet to molecule type\nannotations.\n\"\"\"\n\nraise ImportError(\n \"Bio.Alphabet has been removed from Biopython. In many cases, the alphabet can simply be ignored and removed from scripts. In a few cases, you may need to specify the ``molecule_type`` as an annotation on a SeqRecord for your script to work correctly. Please see https://github.com/biopython/biopython/issues/3156 for more information.\"\n)\n", "path": "Bio/Alphabet/__init__.py"}], "after_files": [{"content": "# Copyright 2000-2002 by Andrew Dalke.\n# Revisions copyright 2007-2010 by Peter Cock.\n# All rights reserved.\n#\n# This file is part of the Biopython distribution and governed by your\n# choice of the \"Biopython License Agreement\" or the \"BSD 3-Clause License\".\n# Please see the LICENSE file that should have been included as part of this\n# package.\n\"\"\"Alphabets were previously used to declare sequence type and letters (OBSOLETE).\n\nThe design of Bio.Aphabet included a number of historic design choices\nwhich, with the benefit of hindsight, were regretable. Bio.Alphabet was\ntherefore removed from Biopython in release 1.78. Instead, the molecule type is\nincluded as an annotation on SeqRecords where appropriate.\n\nPlease see https://biopython.org/wiki/Alphabet for examples showing how to\ntransition from Bio.Alphabet to molecule type annotations.\n\"\"\"\n\nraise ImportError(\n \"Bio.Alphabet has been removed from Biopython. In many cases, the alphabet can simply be ignored and removed from scripts. In a few cases, you may need to specify the ``molecule_type`` as an annotation on a SeqRecord for your script to work correctly. Please see https://biopython.org/wiki/Alphabet for more information.\"\n)\n", "path": "Bio/Alphabet/__init__.py"}]} | 1,039 | 329 |
gh_patches_debug_711 | rasdani/github-patches | git_diff | dmlc__gluon-nlp-184 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
API doc examples are currently not easy to copy/paste
users may want to use a snippet from example directly, so making the notebooks copy-friendly is important
currently the code blocks have python shell prefix ">>>" in them. see http://gluon-nlp.mxnet.io/api/notes/data_api.html
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/conf.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # documentation build configuration file, created by
4 # sphinx-quickstart on Thu Jul 23 19:40:08 2015.
5 #
6 # This file is execfile()d with the current directory set to its
7 # containing dir.
8 #
9 # Note that not all possible configuration values are present in this
10 # autogenerated file.
11 #
12 # All configuration values have a default; values that are commented out
13 # serve to show the default.
14 import sys
15 import os, subprocess
16 import shlex
17 import recommonmark
18 import sphinx_gallery
19 from recommonmark.parser import CommonMarkParser
20 from recommonmark.transform import AutoStructify
21
22 # If extensions (or modules to document with autodoc) are in another directory,
23 # add these directories to sys.path here. If the directory is relative to the
24 # documentation root, use os.path.abspath to make it absolute, like shown here.
25 curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
26 sys.path.insert(0, os.path.join(curr_path, '..'))
27
28 # -- General configuration ------------------------------------------------
29
30 # Version information.
31 import gluonnlp as nlp
32 version = nlp.__version__
33 release = nlp.__version__
34
35 # General information about the project.
36 project = u'gluonnlp'
37 author = u'%s developers' % project
38 copyright = u'2018, %s' % author
39 github_doc_root = 'http://gluon-nlp.mxnet.io/{}/'.format(str(version))
40
41 # add markdown parser
42 CommonMarkParser.github_doc_root = github_doc_root
43 source_parsers = {
44 '.md': CommonMarkParser
45 }
46
47 # Add any Sphinx extension module names here, as strings. They can be
48 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones
49 extensions = [
50 'sphinx.ext.autodoc',
51 'sphinx.ext.autosummary',
52 'sphinx.ext.intersphinx',
53 'sphinx.ext.viewcode',
54 'sphinx.ext.napoleon',
55 'sphinx.ext.mathjax',
56 'sphinx_gallery.gen_gallery',
57 'nbsphinx',
58 ]
59
60 # Add any paths that contain templates here, relative to this directory.
61 templates_path = ['_templates']
62
63 nbsphinx_kernel_name = 'python3'
64 nbsphinx_allow_errors = True
65 nbsphinx_timeout = 1200
66 html_sourcelink_suffix = ''
67
68 nbsphinx_prolog = """
69 {% set paths = env.docname.split('/') %}
70
71 .. only:: html
72
73 :download:`[Download] <{{ "../%s.zip"|format(paths[1]) }}>`
74 """
75
76 # The suffix(es) of source filenames.
77 # You can specify multiple suffix as a list of string:
78 # source_suffix = ['.rst', '.md']
79 source_suffix = ['.rst', '.ipynb', '.md']
80
81 # The encoding of source files.
82 #source_encoding = 'utf-8-sig'
83
84 # generate autosummary even if no references
85 autosummary_generate = True
86
87 # The master toctree document.
88 master_doc = 'index'
89
90 # The language for content autogenerated by Sphinx. Refer to documentation
91 # for a list of supported languages.
92 #
93 # This is also used if you do content translation via gettext catalogs.
94 # Usually you set "language" from the command line for these cases.
95 language = None
96
97 # There are two options for replacing |today|: either, you set today to some
98 # non-false value, then it is used:
99 #today = ''
100 # Else, today_fmt is used as the format for a strftime call.
101 #today_fmt = '%B %d, %Y'
102
103 # The name of an image file (relative to this directory) to place at the top
104 # of the sidebar.
105 html_logo = '_static/gluon_white.png'
106
107 # The name of an image file (relative to this directory) to use as a favicon of
108 # the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
109 # pixels large.
110 html_favicon = '_static/gluon_s2.png'
111
112 # List of patterns, relative to source directory, that match files and
113 # directories to ignore when looking for source files.
114 exclude_patterns = ['_build', '**.ipynb_checkpoints']
115
116 # The reST default role (used for this markup: `text`) to use for all
117 # documents.
118 #default_role = None
119
120 # If true, '()' will be appended to :func: etc. cross-reference text.
121 #add_function_parentheses = True
122
123 # If true, the current module name will be prepended to all description
124 # unit titles (such as .. function::).
125 #add_module_names = True
126
127 # If true, sectionauthor and moduleauthor directives will be shown in the
128 # output. They are ignored by default.
129 #show_authors = False
130
131 # The name of the Pygments (syntax highlighting) style to use.
132 pygments_style = 'sphinx'
133
134 # A list of ignored prefixes for module index sorting.
135 #modindex_common_prefix = []
136
137 # If true, keep warnings as "system message" paragraphs in the built documents.
138 #keep_warnings = False
139
140 # If true, `todo` and `todoList` produce output, else they produce nothing.
141 todo_include_todos = False
142
143 # -- Options for HTML output ----------------------------------------------
144
145 # The theme is set by the make target
146 html_theme = os.environ.get('NNVM_THEME', 'rtd')
147
148 on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
149 # only import rtd theme and set it if want to build docs locally
150 if not on_rtd and html_theme == 'rtd':
151 import sphinx_rtd_theme
152 html_theme = 'sphinx_rtd_theme'
153 html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
154
155 # Add any paths that contain custom static files (such as style sheets) here,
156 # relative to this directory. They are copied after the builtin static files,
157 # so a file named "default.css" will overwrite the builtin "default.css".
158 html_static_path = ['_static']
159
160 # Output file base name for HTML help builder.
161 htmlhelp_basename = project + 'doc'
162
163 # -- Options for LaTeX output ---------------------------------------------
164 latex_elements = {
165 }
166
167 # Grouping the document tree into LaTeX files. List of tuples
168 # (source start file, target name, title,
169 # author, documentclass [howto, manual, or own class]).
170 latex_documents = [
171 (master_doc, '%s.tex' % project, project,
172 author, 'manual'),
173 ]
174
175 # hook for doxygen
176 def run_doxygen(folder):
177 """Run the doxygen make command in the designated folder."""
178 try:
179 #retcode = subprocess.call("cd %s; make doc" % folder, shell=True)
180 retcode = subprocess.call("rm -rf _build/html/doxygen", shell=True)
181 retcode = subprocess.call("mkdir -p _build/html", shell=True)
182 retcode = subprocess.call("cp -rf doxygen/html _build/html/doxygen", shell=True)
183 if retcode < 0:
184 sys.stderr.write("doxygen terminated by signal %s" % (-retcode))
185 except OSError as e:
186 sys.stderr.write("doxygen execution failed: %s" % e)
187
188 intersphinx_mapping = {
189 'python': ('https://docs.python.org/{.major}'.format(sys.version_info), None),
190 'mxnet': ('https://mxnet.apache.org/', None),
191 'numpy': ('http://docs.scipy.org/doc/numpy/', None),
192 'scipy': ('http://docs.scipy.org/doc/scipy/reference', None),
193 'matplotlib': ('http://matplotlib.org/', None),
194 'nltk': ('http://www.nltk.org/', None),
195 }
196
197
198 from sphinx_gallery.sorting import ExplicitOrder
199
200 examples_dirs = []
201 gallery_dirs = []
202
203 subsection_order = ExplicitOrder([])
204
205 def generate_doxygen_xml(app):
206 """Run the doxygen make commands if we're on the ReadTheDocs server"""
207 run_doxygen('..')
208
209 def setup(app):
210 # Add hook for building doxygen xml when needed
211 # no c++ API for now
212 app.connect("builder-inited", generate_doxygen_xml)
213 app.add_config_value('recommonmark_config', {
214 'url_resolver': lambda url: github_doc_root + url,
215 'auto_doc_ref': True
216 }, True)
217 app.add_transform(AutoStructify)
218 app.add_javascript('google_analytics.js')
219
220
221 sphinx_gallery_conf = {
222 'backreferences_dir': 'gen_modules/backreferences',
223 'doc_module': ('gluonnlp', 'mxnet', 'numpy'),
224 'reference_url': {
225 'gluonnlp': None,
226 'numpy': 'http://docs.scipy.org/doc/numpy-1.9.1'},
227 'examples_dirs': examples_dirs,
228 'gallery_dirs': gallery_dirs,
229 'subsection_order': subsection_order,
230 'find_mayavi_figures': False,
231 'filename_pattern': '.py',
232 'expected_failing_examples': []
233 }
234
235 # Napoleon settings
236 napoleon_use_ivar = True
237
238
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -216,6 +216,7 @@
}, True)
app.add_transform(AutoStructify)
app.add_javascript('google_analytics.js')
+ app.add_javascript('copybutton.js')
sphinx_gallery_conf = {
| {"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -216,6 +216,7 @@\n }, True)\n app.add_transform(AutoStructify)\n app.add_javascript('google_analytics.js')\n+ app.add_javascript('copybutton.js')\n \n \n sphinx_gallery_conf = {\n", "issue": "API doc examples are currently not easy to copy/paste\nusers may want to use a snippet from example directly, so making the notebooks copy-friendly is important\r\n\r\ncurrently the code blocks have python shell prefix \">>>\" in them. see http://gluon-nlp.mxnet.io/api/notes/data_api.html\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# documentation build configuration file, created by\n# sphinx-quickstart on Thu Jul 23 19:40:08 2015.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\nimport sys\nimport os, subprocess\nimport shlex\nimport recommonmark\nimport sphinx_gallery\nfrom recommonmark.parser import CommonMarkParser\nfrom recommonmark.transform import AutoStructify\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\ncurr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))\nsys.path.insert(0, os.path.join(curr_path, '..'))\n\n# -- General configuration ------------------------------------------------\n\n# Version information.\nimport gluonnlp as nlp\nversion = nlp.__version__\nrelease = nlp.__version__\n\n# General information about the project.\nproject = u'gluonnlp'\nauthor = u'%s developers' % project\ncopyright = u'2018, %s' % author\ngithub_doc_root = 'http://gluon-nlp.mxnet.io/{}/'.format(str(version))\n\n# add markdown parser\nCommonMarkParser.github_doc_root = github_doc_root\nsource_parsers = {\n '.md': CommonMarkParser\n}\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.viewcode',\n 'sphinx.ext.napoleon',\n 'sphinx.ext.mathjax',\n 'sphinx_gallery.gen_gallery',\n 'nbsphinx',\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\nnbsphinx_kernel_name = 'python3'\nnbsphinx_allow_errors = True\nnbsphinx_timeout = 1200\nhtml_sourcelink_suffix = ''\n\nnbsphinx_prolog = \"\"\"\n{% set paths = env.docname.split('/') %}\n\n.. only:: html\n\n :download:`[Download] <{{ \"../%s.zip\"|format(paths[1]) }}>`\n\"\"\"\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n# source_suffix = ['.rst', '.md']\nsource_suffix = ['.rst', '.ipynb', '.md']\n\n# The encoding of source files.\n#source_encoding = 'utf-8-sig'\n\n# generate autosummary even if no references\nautosummary_generate = True\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\nhtml_logo = '_static/gluon_white.png'\n\n# The name of an image file (relative to this directory) to use as a favicon of\n# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\nhtml_favicon = '_static/gluon_s2.png'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = ['_build', '**.ipynb_checkpoints']\n\n# The reST default role (used for this markup: `text`) to use for all\n# documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n# If true, keep warnings as \"system message\" paragraphs in the built documents.\n#keep_warnings = False\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = False\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme is set by the make target\nhtml_theme = os.environ.get('NNVM_THEME', 'rtd')\n\non_rtd = os.environ.get('READTHEDOCS', None) == 'True'\n# only import rtd theme and set it if want to build docs locally\nif not on_rtd and html_theme == 'rtd':\n import sphinx_rtd_theme\n html_theme = 'sphinx_rtd_theme'\n html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = project + 'doc'\n\n# -- Options for LaTeX output ---------------------------------------------\nlatex_elements = {\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, '%s.tex' % project, project,\n author, 'manual'),\n]\n\n# hook for doxygen\ndef run_doxygen(folder):\n \"\"\"Run the doxygen make command in the designated folder.\"\"\"\n try:\n #retcode = subprocess.call(\"cd %s; make doc\" % folder, shell=True)\n retcode = subprocess.call(\"rm -rf _build/html/doxygen\", shell=True)\n retcode = subprocess.call(\"mkdir -p _build/html\", shell=True)\n retcode = subprocess.call(\"cp -rf doxygen/html _build/html/doxygen\", shell=True)\n if retcode < 0:\n sys.stderr.write(\"doxygen terminated by signal %s\" % (-retcode))\n except OSError as e:\n sys.stderr.write(\"doxygen execution failed: %s\" % e)\n\nintersphinx_mapping = {\n 'python': ('https://docs.python.org/{.major}'.format(sys.version_info), None),\n 'mxnet': ('https://mxnet.apache.org/', None),\n 'numpy': ('http://docs.scipy.org/doc/numpy/', None),\n 'scipy': ('http://docs.scipy.org/doc/scipy/reference', None),\n 'matplotlib': ('http://matplotlib.org/', None),\n 'nltk': ('http://www.nltk.org/', None),\n}\n\n\nfrom sphinx_gallery.sorting import ExplicitOrder\n\nexamples_dirs = []\ngallery_dirs = []\n\nsubsection_order = ExplicitOrder([])\n\ndef generate_doxygen_xml(app):\n \"\"\"Run the doxygen make commands if we're on the ReadTheDocs server\"\"\"\n run_doxygen('..')\n\ndef setup(app):\n # Add hook for building doxygen xml when needed\n # no c++ API for now\n app.connect(\"builder-inited\", generate_doxygen_xml)\n app.add_config_value('recommonmark_config', {\n 'url_resolver': lambda url: github_doc_root + url,\n 'auto_doc_ref': True\n }, True)\n app.add_transform(AutoStructify)\n app.add_javascript('google_analytics.js')\n\n\nsphinx_gallery_conf = {\n 'backreferences_dir': 'gen_modules/backreferences',\n 'doc_module': ('gluonnlp', 'mxnet', 'numpy'),\n'reference_url': {\n 'gluonnlp': None,\n 'numpy': 'http://docs.scipy.org/doc/numpy-1.9.1'},\n 'examples_dirs': examples_dirs,\n 'gallery_dirs': gallery_dirs,\n 'subsection_order': subsection_order,\n 'find_mayavi_figures': False,\n 'filename_pattern': '.py',\n 'expected_failing_examples': []\n}\n\n# Napoleon settings\nnapoleon_use_ivar = True\n\n", "path": "docs/conf.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# documentation build configuration file, created by\n# sphinx-quickstart on Thu Jul 23 19:40:08 2015.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\nimport sys\nimport os, subprocess\nimport shlex\nimport recommonmark\nimport sphinx_gallery\nfrom recommonmark.parser import CommonMarkParser\nfrom recommonmark.transform import AutoStructify\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\ncurr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))\nsys.path.insert(0, os.path.join(curr_path, '..'))\n\n# -- General configuration ------------------------------------------------\n\n# Version information.\nimport gluonnlp as nlp\nversion = nlp.__version__\nrelease = nlp.__version__\n\n# General information about the project.\nproject = u'gluonnlp'\nauthor = u'%s developers' % project\ncopyright = u'2018, %s' % author\ngithub_doc_root = 'http://gluon-nlp.mxnet.io/{}/'.format(str(version))\n\n# add markdown parser\nCommonMarkParser.github_doc_root = github_doc_root\nsource_parsers = {\n '.md': CommonMarkParser\n}\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.viewcode',\n 'sphinx.ext.napoleon',\n 'sphinx.ext.mathjax',\n 'sphinx_gallery.gen_gallery',\n 'nbsphinx',\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\nnbsphinx_kernel_name = 'python3'\nnbsphinx_allow_errors = True\nnbsphinx_timeout = 1200\nhtml_sourcelink_suffix = ''\n\nnbsphinx_prolog = \"\"\"\n{% set paths = env.docname.split('/') %}\n\n.. only:: html\n\n :download:`[Download] <{{ \"../%s.zip\"|format(paths[1]) }}>`\n\"\"\"\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n# source_suffix = ['.rst', '.md']\nsource_suffix = ['.rst', '.ipynb', '.md']\n\n# The encoding of source files.\n#source_encoding = 'utf-8-sig'\n\n# generate autosummary even if no references\nautosummary_generate = True\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\nhtml_logo = '_static/gluon_white.png'\n\n# The name of an image file (relative to this directory) to use as a favicon of\n# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\nhtml_favicon = '_static/gluon_s2.png'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = ['_build', '**.ipynb_checkpoints']\n\n# The reST default role (used for this markup: `text`) to use for all\n# documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n# If true, keep warnings as \"system message\" paragraphs in the built documents.\n#keep_warnings = False\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = False\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme is set by the make target\nhtml_theme = os.environ.get('NNVM_THEME', 'rtd')\n\non_rtd = os.environ.get('READTHEDOCS', None) == 'True'\n# only import rtd theme and set it if want to build docs locally\nif not on_rtd and html_theme == 'rtd':\n import sphinx_rtd_theme\n html_theme = 'sphinx_rtd_theme'\n html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = project + 'doc'\n\n# -- Options for LaTeX output ---------------------------------------------\nlatex_elements = {\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, '%s.tex' % project, project,\n author, 'manual'),\n]\n\n# hook for doxygen\ndef run_doxygen(folder):\n \"\"\"Run the doxygen make command in the designated folder.\"\"\"\n try:\n #retcode = subprocess.call(\"cd %s; make doc\" % folder, shell=True)\n retcode = subprocess.call(\"rm -rf _build/html/doxygen\", shell=True)\n retcode = subprocess.call(\"mkdir -p _build/html\", shell=True)\n retcode = subprocess.call(\"cp -rf doxygen/html _build/html/doxygen\", shell=True)\n if retcode < 0:\n sys.stderr.write(\"doxygen terminated by signal %s\" % (-retcode))\n except OSError as e:\n sys.stderr.write(\"doxygen execution failed: %s\" % e)\n\nintersphinx_mapping = {\n 'python': ('https://docs.python.org/{.major}'.format(sys.version_info), None),\n 'mxnet': ('https://mxnet.apache.org/', None),\n 'numpy': ('http://docs.scipy.org/doc/numpy/', None),\n 'scipy': ('http://docs.scipy.org/doc/scipy/reference', None),\n 'matplotlib': ('http://matplotlib.org/', None),\n 'nltk': ('http://www.nltk.org/', None),\n}\n\n\nfrom sphinx_gallery.sorting import ExplicitOrder\n\nexamples_dirs = []\ngallery_dirs = []\n\nsubsection_order = ExplicitOrder([])\n\ndef generate_doxygen_xml(app):\n \"\"\"Run the doxygen make commands if we're on the ReadTheDocs server\"\"\"\n run_doxygen('..')\n\ndef setup(app):\n # Add hook for building doxygen xml when needed\n # no c++ API for now\n app.connect(\"builder-inited\", generate_doxygen_xml)\n app.add_config_value('recommonmark_config', {\n 'url_resolver': lambda url: github_doc_root + url,\n 'auto_doc_ref': True\n }, True)\n app.add_transform(AutoStructify)\n app.add_javascript('google_analytics.js')\n app.add_javascript('copybutton.js')\n\n\nsphinx_gallery_conf = {\n 'backreferences_dir': 'gen_modules/backreferences',\n 'doc_module': ('gluonnlp', 'mxnet', 'numpy'),\n'reference_url': {\n 'gluonnlp': None,\n 'numpy': 'http://docs.scipy.org/doc/numpy-1.9.1'},\n 'examples_dirs': examples_dirs,\n 'gallery_dirs': gallery_dirs,\n 'subsection_order': subsection_order,\n 'find_mayavi_figures': False,\n 'filename_pattern': '.py',\n 'expected_failing_examples': []\n}\n\n# Napoleon settings\nnapoleon_use_ivar = True\n\n", "path": "docs/conf.py"}]} | 2,921 | 80 |
gh_patches_debug_33949 | rasdani/github-patches | git_diff | xonsh__xonsh-522 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Feature-request: Static configuration file as a command line option
I suggest that an option to the commandline --config_file is added that allows the user to specify a file location for a static configuration file. (http://xonsh.org/xonshconfig.html)
This would allow portable usage of xonsh, e.g. install python and xonsh on a usb and bring the shell on a stick.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `xonsh/main.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """The main xonsh script."""
3 import os
4 import sys
5 import shlex
6 import signal
7 import builtins
8 import subprocess
9 from argparse import ArgumentParser, Namespace
10 from contextlib import contextmanager
11
12 from xonsh import __version__
13 from xonsh.shell import Shell
14 from xonsh.pretty import pprint
15 from xonsh.jobs import ignore_sigtstp
16
17 parser = ArgumentParser(description='xonsh')
18 parser.add_argument('-V', '--version',
19 action='version',
20 version='/'.join(('xonsh', __version__)),
21 help='show version information and exit')
22 parser.add_argument('-c',
23 help="Run a single command and exit",
24 dest='command',
25 required=False,
26 default=None)
27 parser.add_argument('-i',
28 help='force running in interactive mode',
29 dest='force_interactive',
30 action='store_true',
31 default=False)
32 parser.add_argument('-l',
33 help='run as a login shell',
34 dest='login',
35 action='store_true',
36 default=False)
37 parser.add_argument('--no-rc',
38 help="Do not load the .xonshrc file",
39 dest='norc',
40 action='store_true',
41 default=False)
42 parser.add_argument('-D',
43 dest='defines',
44 help='define an environment variable, in the form of '
45 '-DNAME=VAL. May be used many times.',
46 metavar='ITEM',
47 nargs='*',
48 default=None)
49 parser.add_argument('--shell-type',
50 help='What kind of shell should be used. '
51 'Possible options: readline, prompt_toolkit. '
52 'Warning! If set this overrides $SHELL_TYPE variable.',
53 dest='shell_type',
54 choices=('readline', 'prompt_toolkit'),
55 default=None)
56 parser.add_argument('file',
57 metavar='script-file',
58 help='If present, execute the script in script-file'
59 ' and exit',
60 nargs='?',
61 default=None)
62 parser.add_argument('args',
63 metavar='args',
64 help='Additional arguments to the script specified'
65 ' by script-file',
66 nargs='*',
67 default=[])
68
69
70 def _pprint_displayhook(value):
71 if value is not None:
72 builtins._ = value
73 pprint(value)
74
75
76 def premain(argv=None):
77 """Setup for main xonsh entry point, returns parsed arguments."""
78 args = parser.parse_args(argv)
79 shell_kwargs = {'shell_type': args.shell_type}
80 if args.norc:
81 shell_kwargs['ctx'] = {}
82 setattr(sys, 'displayhook', _pprint_displayhook)
83 shell = builtins.__xonsh_shell__ = Shell(**shell_kwargs)
84 from xonsh import imphooks
85 env = builtins.__xonsh_env__
86 if args.defines is not None:
87 env.update([x.split('=', 1) for x in args.defines])
88 if args.login:
89 env['XONSH_LOGIN'] = True
90 env['XONSH_INTERACTIVE'] = False
91 return args
92
93
94 def main(argv=None):
95 """Main entry point for xonsh cli."""
96 args = premain(argv)
97 env = builtins.__xonsh_env__
98 shell = builtins.__xonsh_shell__
99 if args.command is not None:
100 # run a single command and exit
101 shell.default(args.command)
102 elif args.file is not None:
103 # run a script contained in a file
104 if os.path.isfile(args.file):
105 with open(args.file) as f:
106 code = f.read()
107 code = code if code.endswith('\n') else code + '\n'
108 env['ARGS'] = [args.file] + args.args
109 code = shell.execer.compile(code, mode='exec', glbs=shell.ctx)
110 shell.execer.exec(code, mode='exec', glbs=shell.ctx)
111 else:
112 print('xonsh: {0}: No such file or directory.'.format(args.file))
113 elif not sys.stdin.isatty() and not args.force_interactive:
114 # run a script given on stdin
115 code = sys.stdin.read()
116 code = code if code.endswith('\n') else code + '\n'
117 code = shell.execer.compile(code, mode='exec', glbs=shell.ctx)
118 shell.execer.exec(code, mode='exec', glbs=shell.ctx)
119 else:
120 # otherwise, enter the shell
121 env['XONSH_INTERACTIVE'] = True
122 ignore_sigtstp()
123 shell.cmdloop()
124 postmain(args)
125
126
127 def postmain(args=None):
128 """Teardown for main xonsh entry point, accepts parsed arguments."""
129 del builtins.__xonsh_shell__
130
131
132 @contextmanager
133 def main_context(argv=None):
134 """Generator that runs pre- and post-main() functions. This has two iterations.
135 The first yields the shell. The second returns None but cleans
136 up the shell.
137 """
138 args = premain(argv)
139 yield builtins.__xonsh_shell__
140 postmain(args)
141
142
143
144 if __name__ == '__main__':
145 main()
146
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/xonsh/main.py b/xonsh/main.py
--- a/xonsh/main.py
+++ b/xonsh/main.py
@@ -2,11 +2,8 @@
"""The main xonsh script."""
import os
import sys
-import shlex
-import signal
import builtins
-import subprocess
-from argparse import ArgumentParser, Namespace
+from argparse import ArgumentParser, ArgumentTypeError
from contextlib import contextmanager
from xonsh import __version__
@@ -14,6 +11,18 @@
from xonsh.pretty import pprint
from xonsh.jobs import ignore_sigtstp
+def path_argument(s):
+ """Return a path only if the path is actually legal
+
+ This is very similar to argparse.FileType, except that it doesn't return
+ an open file handle, but rather simply validates the path."""
+
+ s = os.path.abspath(os.path.expanduser(s))
+ if not os.path.isfile(s):
+ raise ArgumentTypeError('"%s" must be a valid path to a file' % s)
+ return s
+
+
parser = ArgumentParser(description='xonsh')
parser.add_argument('-V', '--version',
action='version',
@@ -34,6 +43,10 @@
dest='login',
action='store_true',
default=False)
+parser.add_argument('--config-path',
+ help='specify a custom static configuration file',
+ dest='config_path',
+ type=path_argument)
parser.add_argument('--no-rc',
help="Do not load the .xonshrc file",
dest='norc',
@@ -79,6 +92,8 @@
shell_kwargs = {'shell_type': args.shell_type}
if args.norc:
shell_kwargs['ctx'] = {}
+ if args.config_path:
+ shell_kwargs['ctx']= {'XONSHCONFIG': args.config_path}
setattr(sys, 'displayhook', _pprint_displayhook)
shell = builtins.__xonsh_shell__ = Shell(**shell_kwargs)
from xonsh import imphooks
| {"golden_diff": "diff --git a/xonsh/main.py b/xonsh/main.py\n--- a/xonsh/main.py\n+++ b/xonsh/main.py\n@@ -2,11 +2,8 @@\n \"\"\"The main xonsh script.\"\"\"\n import os\n import sys\n-import shlex\n-import signal\n import builtins\n-import subprocess\n-from argparse import ArgumentParser, Namespace\n+from argparse import ArgumentParser, ArgumentTypeError\n from contextlib import contextmanager\n \n from xonsh import __version__\n@@ -14,6 +11,18 @@\n from xonsh.pretty import pprint\n from xonsh.jobs import ignore_sigtstp\n \n+def path_argument(s):\n+ \"\"\"Return a path only if the path is actually legal\n+\n+ This is very similar to argparse.FileType, except that it doesn't return\n+ an open file handle, but rather simply validates the path.\"\"\"\n+\n+ s = os.path.abspath(os.path.expanduser(s))\n+ if not os.path.isfile(s):\n+ raise ArgumentTypeError('\"%s\" must be a valid path to a file' % s)\n+ return s\n+\n+\n parser = ArgumentParser(description='xonsh')\n parser.add_argument('-V', '--version',\n action='version',\n@@ -34,6 +43,10 @@\n dest='login',\n action='store_true',\n default=False)\n+parser.add_argument('--config-path',\n+ help='specify a custom static configuration file',\n+ dest='config_path',\n+ type=path_argument)\n parser.add_argument('--no-rc',\n help=\"Do not load the .xonshrc file\",\n dest='norc',\n@@ -79,6 +92,8 @@\n shell_kwargs = {'shell_type': args.shell_type}\n if args.norc:\n shell_kwargs['ctx'] = {}\n+ if args.config_path:\n+ shell_kwargs['ctx']= {'XONSHCONFIG': args.config_path}\n setattr(sys, 'displayhook', _pprint_displayhook)\n shell = builtins.__xonsh_shell__ = Shell(**shell_kwargs)\n from xonsh import imphooks\n", "issue": "Feature-request: Static configuration file as a command line option\nI suggest that an option to the commandline --config_file is added that allows the user to specify a file location for a static configuration file. (http://xonsh.org/xonshconfig.html)\n\nThis would allow portable usage of xonsh, e.g. install python and xonsh on a usb and bring the shell on a stick. \n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"The main xonsh script.\"\"\"\nimport os\nimport sys\nimport shlex\nimport signal\nimport builtins\nimport subprocess\nfrom argparse import ArgumentParser, Namespace\nfrom contextlib import contextmanager\n\nfrom xonsh import __version__\nfrom xonsh.shell import Shell\nfrom xonsh.pretty import pprint\nfrom xonsh.jobs import ignore_sigtstp\n\nparser = ArgumentParser(description='xonsh')\nparser.add_argument('-V', '--version',\n action='version',\n version='/'.join(('xonsh', __version__)),\n help='show version information and exit')\nparser.add_argument('-c',\n help=\"Run a single command and exit\",\n dest='command',\n required=False,\n default=None)\nparser.add_argument('-i',\n help='force running in interactive mode',\n dest='force_interactive',\n action='store_true',\n default=False)\nparser.add_argument('-l',\n help='run as a login shell',\n dest='login',\n action='store_true',\n default=False)\nparser.add_argument('--no-rc',\n help=\"Do not load the .xonshrc file\",\n dest='norc',\n action='store_true',\n default=False)\nparser.add_argument('-D',\n dest='defines',\n help='define an environment variable, in the form of '\n '-DNAME=VAL. May be used many times.',\n metavar='ITEM',\n nargs='*',\n default=None)\nparser.add_argument('--shell-type',\n help='What kind of shell should be used. '\n 'Possible options: readline, prompt_toolkit. '\n 'Warning! If set this overrides $SHELL_TYPE variable.',\n dest='shell_type',\n choices=('readline', 'prompt_toolkit'),\n default=None)\nparser.add_argument('file',\n metavar='script-file',\n help='If present, execute the script in script-file'\n ' and exit',\n nargs='?',\n default=None)\nparser.add_argument('args',\n metavar='args',\n help='Additional arguments to the script specified'\n ' by script-file',\n nargs='*',\n default=[])\n\n\ndef _pprint_displayhook(value):\n if value is not None:\n builtins._ = value\n pprint(value)\n\n\ndef premain(argv=None):\n \"\"\"Setup for main xonsh entry point, returns parsed arguments.\"\"\"\n args = parser.parse_args(argv)\n shell_kwargs = {'shell_type': args.shell_type}\n if args.norc:\n shell_kwargs['ctx'] = {}\n setattr(sys, 'displayhook', _pprint_displayhook)\n shell = builtins.__xonsh_shell__ = Shell(**shell_kwargs)\n from xonsh import imphooks\n env = builtins.__xonsh_env__\n if args.defines is not None:\n env.update([x.split('=', 1) for x in args.defines])\n if args.login:\n env['XONSH_LOGIN'] = True\n env['XONSH_INTERACTIVE'] = False\n return args\n\n\ndef main(argv=None):\n \"\"\"Main entry point for xonsh cli.\"\"\"\n args = premain(argv)\n env = builtins.__xonsh_env__\n shell = builtins.__xonsh_shell__\n if args.command is not None:\n # run a single command and exit\n shell.default(args.command)\n elif args.file is not None:\n # run a script contained in a file\n if os.path.isfile(args.file):\n with open(args.file) as f:\n code = f.read()\n code = code if code.endswith('\\n') else code + '\\n'\n env['ARGS'] = [args.file] + args.args\n code = shell.execer.compile(code, mode='exec', glbs=shell.ctx)\n shell.execer.exec(code, mode='exec', glbs=shell.ctx)\n else:\n print('xonsh: {0}: No such file or directory.'.format(args.file))\n elif not sys.stdin.isatty() and not args.force_interactive:\n # run a script given on stdin\n code = sys.stdin.read()\n code = code if code.endswith('\\n') else code + '\\n'\n code = shell.execer.compile(code, mode='exec', glbs=shell.ctx)\n shell.execer.exec(code, mode='exec', glbs=shell.ctx)\n else:\n # otherwise, enter the shell\n env['XONSH_INTERACTIVE'] = True\n ignore_sigtstp()\n shell.cmdloop()\n postmain(args)\n\n\ndef postmain(args=None):\n \"\"\"Teardown for main xonsh entry point, accepts parsed arguments.\"\"\"\n del builtins.__xonsh_shell__\n\n\n@contextmanager\ndef main_context(argv=None):\n \"\"\"Generator that runs pre- and post-main() functions. This has two iterations.\n The first yields the shell. The second returns None but cleans\n up the shell.\n \"\"\"\n args = premain(argv)\n yield builtins.__xonsh_shell__\n postmain(args)\n\n\n\nif __name__ == '__main__':\n main()\n", "path": "xonsh/main.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"The main xonsh script.\"\"\"\nimport os\nimport sys\nimport builtins\nfrom argparse import ArgumentParser, ArgumentTypeError\nfrom contextlib import contextmanager\n\nfrom xonsh import __version__\nfrom xonsh.shell import Shell\nfrom xonsh.pretty import pprint\nfrom xonsh.jobs import ignore_sigtstp\n\ndef path_argument(s):\n \"\"\"Return a path only if the path is actually legal\n\n This is very similar to argparse.FileType, except that it doesn't return\n an open file handle, but rather simply validates the path.\"\"\"\n\n s = os.path.abspath(os.path.expanduser(s))\n if not os.path.isfile(s):\n raise ArgumentTypeError('\"%s\" must be a valid path to a file' % s)\n return s\n\n\nparser = ArgumentParser(description='xonsh')\nparser.add_argument('-V', '--version',\n action='version',\n version='/'.join(('xonsh', __version__)),\n help='show version information and exit')\nparser.add_argument('-c',\n help=\"Run a single command and exit\",\n dest='command',\n required=False,\n default=None)\nparser.add_argument('-i',\n help='force running in interactive mode',\n dest='force_interactive',\n action='store_true',\n default=False)\nparser.add_argument('-l',\n help='run as a login shell',\n dest='login',\n action='store_true',\n default=False)\nparser.add_argument('--config-path',\n help='specify a custom static configuration file',\n dest='config_path',\n type=path_argument)\nparser.add_argument('--no-rc',\n help=\"Do not load the .xonshrc file\",\n dest='norc',\n action='store_true',\n default=False)\nparser.add_argument('-D',\n dest='defines',\n help='define an environment variable, in the form of '\n '-DNAME=VAL. May be used many times.',\n metavar='ITEM',\n nargs='*',\n default=None)\nparser.add_argument('--shell-type',\n help='What kind of shell should be used. '\n 'Possible options: readline, prompt_toolkit. '\n 'Warning! If set this overrides $SHELL_TYPE variable.',\n dest='shell_type',\n choices=('readline', 'prompt_toolkit'),\n default=None)\nparser.add_argument('file',\n metavar='script-file',\n help='If present, execute the script in script-file'\n ' and exit',\n nargs='?',\n default=None)\nparser.add_argument('args',\n metavar='args',\n help='Additional arguments to the script specified'\n ' by script-file',\n nargs='*',\n default=[])\n\n\ndef _pprint_displayhook(value):\n if value is not None:\n builtins._ = value\n pprint(value)\n\n\ndef premain(argv=None):\n \"\"\"Setup for main xonsh entry point, returns parsed arguments.\"\"\"\n args = parser.parse_args(argv)\n shell_kwargs = {'shell_type': args.shell_type}\n if args.norc:\n shell_kwargs['ctx'] = {}\n if args.config_path:\n shell_kwargs['ctx']= {'XONSHCONFIG': args.config_path}\n setattr(sys, 'displayhook', _pprint_displayhook)\n shell = builtins.__xonsh_shell__ = Shell(**shell_kwargs)\n from xonsh import imphooks\n env = builtins.__xonsh_env__\n if args.defines is not None:\n env.update([x.split('=', 1) for x in args.defines])\n if args.login:\n env['XONSH_LOGIN'] = True\n env['XONSH_INTERACTIVE'] = False\n return args\n\n\ndef main(argv=None):\n \"\"\"Main entry point for xonsh cli.\"\"\"\n args = premain(argv)\n env = builtins.__xonsh_env__\n shell = builtins.__xonsh_shell__\n if args.command is not None:\n # run a single command and exit\n shell.default(args.command)\n elif args.file is not None:\n # run a script contained in a file\n if os.path.isfile(args.file):\n with open(args.file) as f:\n code = f.read()\n code = code if code.endswith('\\n') else code + '\\n'\n env['ARGS'] = [args.file] + args.args\n code = shell.execer.compile(code, mode='exec', glbs=shell.ctx)\n shell.execer.exec(code, mode='exec', glbs=shell.ctx)\n else:\n print('xonsh: {0}: No such file or directory.'.format(args.file))\n elif not sys.stdin.isatty() and not args.force_interactive:\n # run a script given on stdin\n code = sys.stdin.read()\n code = code if code.endswith('\\n') else code + '\\n'\n code = shell.execer.compile(code, mode='exec', glbs=shell.ctx)\n shell.execer.exec(code, mode='exec', glbs=shell.ctx)\n else:\n # otherwise, enter the shell\n env['XONSH_INTERACTIVE'] = True\n ignore_sigtstp()\n shell.cmdloop()\n postmain(args)\n\n\ndef postmain(args=None):\n \"\"\"Teardown for main xonsh entry point, accepts parsed arguments.\"\"\"\n del builtins.__xonsh_shell__\n\n\n@contextmanager\ndef main_context(argv=None):\n \"\"\"Generator that runs pre- and post-main() functions. This has two iterations.\n The first yields the shell. The second returns None but cleans\n up the shell.\n \"\"\"\n args = premain(argv)\n yield builtins.__xonsh_shell__\n postmain(args)\n\n\n\nif __name__ == '__main__':\n main()\n", "path": "xonsh/main.py"}]} | 1,756 | 453 |
gh_patches_debug_29947 | rasdani/github-patches | git_diff | fossasia__open-event-server-7152 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
My Sessions: Server is returning duplicated data
**Describe the bug**
My Session is showing session's multiple times
**Expected behaviour**
Server should return session only once
**Screenshots**
[](http://www.youtube.com/watch?v=EMXLyCehYCM "multiple session")
Responses for Reference :
https://github.com/fossasia/open-event-frontend/pull/3811#issuecomment-575883439
https://github.com/fossasia/open-event-frontend/pull/3811#issuecomment-575882180
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/api/sessions.py`
Content:
```
1 from flask_jwt_extended import current_user
2 from flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship
3
4 from app.api.bootstrap import api
5 from app.api.events import Event
6 from app.api.helpers.custom_forms import validate_custom_form_constraints_request
7 from app.api.helpers.db import get_count, safe_query, safe_query_kwargs, save_to_db
8 from app.api.helpers.errors import ForbiddenError
9 from app.api.helpers.files import make_frontend_url
10 from app.api.helpers.mail import send_email_new_session, send_email_session_accept_reject
11 from app.api.helpers.notification import (
12 send_notif_new_session_organizer,
13 send_notif_session_accept_reject,
14 )
15 from app.api.helpers.permission_manager import has_access
16 from app.api.helpers.query import event_query
17 from app.api.helpers.speaker import can_edit_after_cfs_ends
18 from app.api.helpers.utilities import require_relationship
19 from app.api.schema.sessions import SessionSchema
20 from app.models import db
21 from app.models.microlocation import Microlocation
22 from app.models.session import Session
23 from app.models.session_speaker_link import SessionsSpeakersLink
24 from app.models.session_type import SessionType
25 from app.models.speaker import Speaker
26 from app.models.track import Track
27 from app.models.user import User
28 from app.settings import get_settings
29
30
31 class SessionListPost(ResourceList):
32 """
33 List Sessions
34 """
35
36 def before_post(self, args, kwargs, data):
37 """
38 before post method to check for required relationship and proper permission
39 :param args:
40 :param kwargs:
41 :param data:
42 :return:
43 """
44 require_relationship(['event', 'track'], data)
45 data['creator_id'] = current_user.id
46 if (
47 get_count(
48 db.session.query(Event).filter_by(
49 id=int(data['event']), is_sessions_speakers_enabled=False
50 )
51 )
52 > 0
53 ):
54 raise ForbiddenError({'pointer': ''}, "Sessions are disabled for this Event")
55
56 data['complex_field_values'] = validate_custom_form_constraints_request(
57 'session', self.schema, Session(event_id=data['event']), data
58 )
59
60 def after_create_object(self, session, data, view_kwargs):
61 """
62 method to send email for creation of new session
63 mails session link to the concerned user
64 :param session:
65 :param data:
66 :param view_kwargs:
67 :return:
68 """
69 if session.event.get_owner():
70 event_name = session.event.name
71 owner = session.event.get_owner()
72 owner_email = owner.email
73 event = session.event
74 link = make_frontend_url(
75 "/events/{}/sessions/{}".format(event.identifier, session.id)
76 )
77 send_email_new_session(owner_email, event_name, link)
78 send_notif_new_session_organizer(owner, event_name, link, session.id)
79
80 for speaker in session.speakers:
81 session_speaker_link = SessionsSpeakersLink(
82 session_state=session.state,
83 session_id=session.id,
84 event_id=session.event.id,
85 speaker_id=speaker.id,
86 )
87 save_to_db(session_speaker_link, "Session Speaker Link Saved")
88
89 decorators = (api.has_permission('create_event'),)
90 schema = SessionSchema
91 data_layer = {
92 'session': db.session,
93 'model': Session,
94 'methods': {'after_create_object': after_create_object},
95 }
96
97
98 class SessionList(ResourceList):
99 """
100 List Sessions
101 """
102
103 def query(self, view_kwargs):
104 """
105 query method for SessionList class
106 :param view_kwargs:
107 :return:
108 """
109 query_ = self.session.query(Session)
110 if view_kwargs.get('track_id') is not None:
111 track = safe_query_kwargs(Track, view_kwargs, 'track_id')
112 query_ = query_.join(Track).filter(Track.id == track.id)
113 if view_kwargs.get('session_type_id') is not None:
114 session_type = safe_query_kwargs(SessionType, view_kwargs, 'session_type_id')
115 query_ = query_.join(SessionType).filter(SessionType.id == session_type.id)
116 if view_kwargs.get('microlocation_id') is not None:
117 microlocation = safe_query_kwargs(
118 Microlocation, view_kwargs, 'microlocation_id',
119 )
120 query_ = query_.join(Microlocation).filter(
121 Microlocation.id == microlocation.id
122 )
123 if view_kwargs.get('user_id') is not None:
124 user = safe_query_kwargs(User, view_kwargs, 'user_id')
125 query_ = (
126 query_.join(User)
127 .join(Speaker)
128 .filter(
129 (
130 User.id == user.id
131 or Session.speakers.any(Speaker.user_id == user.id)
132 )
133 )
134 )
135 query_ = event_query(query_, view_kwargs)
136 if view_kwargs.get('speaker_id'):
137 speaker = safe_query_kwargs(Speaker, view_kwargs, 'speaker_id')
138 # session-speaker :: many-to-many relationship
139 query_ = Session.query.filter(Session.speakers.any(id=speaker.id))
140
141 return query_
142
143 view_kwargs = True
144 methods = ['GET']
145 schema = SessionSchema
146 data_layer = {'session': db.session, 'model': Session, 'methods': {'query': query}}
147
148
149 class SessionDetail(ResourceDetail):
150 """
151 Session detail by id
152 """
153
154 def before_get_object(self, view_kwargs):
155 """
156 before get method to get the resource id for fetching details
157 :param view_kwargs:
158 :return:
159 """
160 if view_kwargs.get('event_identifier'):
161 event = safe_query(
162 Event, 'identifier', view_kwargs['event_identifier'], 'identifier'
163 )
164 view_kwargs['event_id'] = event.id
165
166 def before_update_object(self, session, data, view_kwargs):
167 """
168 before update method to verify if session is locked before updating session object
169 :param event:
170 :param data:
171 :param view_kwargs:
172 :return:
173 """
174 if session.is_locked:
175 if not (
176 has_access('is_admin')
177 or has_access('is_organizer', event_id=session.event_id)
178 ):
179 raise ForbiddenError(
180 {'source': '/data/attributes/is-locked'},
181 "You don't have enough permissions to change this property",
182 )
183
184 if session.is_locked and data.get('is_locked') != session.is_locked:
185 raise ForbiddenError(
186 {'source': '/data/attributes/is-locked'},
187 "Locked sessions cannot be edited",
188 )
189
190 if not can_edit_after_cfs_ends(session.event_id):
191 raise ForbiddenError(
192 {'source': ''}, "Cannot edit session after the call for speaker is ended"
193 )
194
195 data['complex_field_values'] = validate_custom_form_constraints_request(
196 'session', self.resource.schema, session, data
197 )
198
199 def after_update_object(self, session, data, view_kwargs):
200 """ Send email if session accepted or rejected """
201
202 if (
203 'state' in data
204 and data.get('send_email', None)
205 and (session.state == 'accepted' or session.state == 'rejected')
206 ):
207
208 event = session.event
209 # Email for speaker
210 speakers = session.speakers
211 for speaker in speakers:
212 frontend_url = get_settings()['frontend_url']
213 link = "{}/events/{}/sessions/{}".format(
214 frontend_url, event.identifier, session.id
215 )
216 if not speaker.is_email_overridden:
217 send_email_session_accept_reject(speaker.email, session, link)
218 send_notif_session_accept_reject(
219 speaker, session.title, session.state, link, session.id
220 )
221
222 # Email for owner
223 if session.event.get_owner():
224 owner = session.event.get_owner()
225 owner_email = owner.email
226 frontend_url = get_settings()['frontend_url']
227 link = "{}/events/{}/sessions/{}".format(
228 frontend_url, event.identifier, session.id
229 )
230 send_email_session_accept_reject(owner_email, session, link)
231 send_notif_session_accept_reject(
232 owner, session.title, session.state, link, session.id
233 )
234 if 'state' in data:
235 entry_count = SessionsSpeakersLink.query.filter_by(session_id=session.id)
236 if entry_count.count() == 0:
237 is_patch_request = False
238 else:
239 is_patch_request = True
240
241 if is_patch_request:
242 for focus_session in entry_count:
243 focus_session.session_state = session.state
244 db.session.commit()
245 else:
246 current_session = Session.query.filter_by(id=session.id).first()
247 for speaker in current_session.speakers:
248 session_speaker_link = SessionsSpeakersLink(
249 session_state=session.state,
250 session_id=session.id,
251 event_id=session.event.id,
252 speaker_id=speaker.id,
253 )
254 save_to_db(session_speaker_link, "Session Speaker Link Saved")
255
256 decorators = (api.has_permission('is_speaker_for_session', methods="PATCH,DELETE"),)
257 schema = SessionSchema
258 data_layer = {
259 'session': db.session,
260 'model': Session,
261 'methods': {
262 'before_update_object': before_update_object,
263 'before_get_object': before_get_object,
264 'after_update_object': after_update_object,
265 },
266 }
267
268
269 class SessionRelationshipRequired(ResourceRelationship):
270 """
271 Session Relationship
272 """
273
274 schema = SessionSchema
275 decorators = (api.has_permission('is_speaker_for_session', methods="PATCH,DELETE"),)
276 methods = ['GET', 'PATCH']
277 data_layer = {'session': db.session, 'model': Session}
278
279
280 class SessionRelationshipOptional(ResourceRelationship):
281 """
282 Session Relationship
283 """
284
285 schema = SessionSchema
286 decorators = (api.has_permission('is_speaker_for_session', methods="PATCH,DELETE"),)
287 data_layer = {'session': db.session, 'model': Session}
288
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/app/api/sessions.py b/app/api/sessions.py
--- a/app/api/sessions.py
+++ b/app/api/sessions.py
@@ -1,5 +1,7 @@
+from flask import request
from flask_jwt_extended import current_user
from flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship
+from flask_rest_jsonapi.querystring import QueryStringManager as QSManager
from app.api.bootstrap import api
from app.api.events import Event
@@ -95,6 +97,28 @@
}
+def get_distinct_sort_fields(schema, model, sort=True):
+ """Due to the poor code of flask-rest-jsonapi, distinct query needed
+ in sessions API to remove duplicate sessions can't be sorted on
+ returning subquery, thus we need to add all sort fields in distinct
+ group and repeat it in sort group as well"""
+ fields = []
+ qs = QSManager(request.args, schema)
+ for sort_opt in qs.sorting:
+ field = sort_opt['field']
+ if not hasattr(model, field):
+ continue
+ field = getattr(model, field)
+ if sort:
+ field = getattr(field, sort_opt['order'])()
+ fields.append(field)
+ field = Session.id
+ if sort:
+ field = field.desc()
+ fields.append(field)
+ return fields
+
+
class SessionList(ResourceList):
"""
List Sessions
@@ -131,6 +155,8 @@
or Session.speakers.any(Speaker.user_id == user.id)
)
)
+ .distinct(*get_distinct_sort_fields(SessionSchema, Session, sort=False))
+ .order_by(*get_distinct_sort_fields(SessionSchema, Session))
)
query_ = event_query(query_, view_kwargs)
if view_kwargs.get('speaker_id'):
| {"golden_diff": "diff --git a/app/api/sessions.py b/app/api/sessions.py\n--- a/app/api/sessions.py\n+++ b/app/api/sessions.py\n@@ -1,5 +1,7 @@\n+from flask import request\n from flask_jwt_extended import current_user\n from flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship\n+from flask_rest_jsonapi.querystring import QueryStringManager as QSManager\n \n from app.api.bootstrap import api\n from app.api.events import Event\n@@ -95,6 +97,28 @@\n }\n \n \n+def get_distinct_sort_fields(schema, model, sort=True):\n+ \"\"\"Due to the poor code of flask-rest-jsonapi, distinct query needed\n+ in sessions API to remove duplicate sessions can't be sorted on\n+ returning subquery, thus we need to add all sort fields in distinct\n+ group and repeat it in sort group as well\"\"\"\n+ fields = []\n+ qs = QSManager(request.args, schema)\n+ for sort_opt in qs.sorting:\n+ field = sort_opt['field']\n+ if not hasattr(model, field):\n+ continue\n+ field = getattr(model, field)\n+ if sort:\n+ field = getattr(field, sort_opt['order'])()\n+ fields.append(field)\n+ field = Session.id\n+ if sort:\n+ field = field.desc()\n+ fields.append(field)\n+ return fields\n+\n+\n class SessionList(ResourceList):\n \"\"\"\n List Sessions\n@@ -131,6 +155,8 @@\n or Session.speakers.any(Speaker.user_id == user.id)\n )\n )\n+ .distinct(*get_distinct_sort_fields(SessionSchema, Session, sort=False))\n+ .order_by(*get_distinct_sort_fields(SessionSchema, Session))\n )\n query_ = event_query(query_, view_kwargs)\n if view_kwargs.get('speaker_id'):\n", "issue": "My Sessions: Server is returning duplicated data\n**Describe the bug**\r\nMy Session is showing session's multiple times \r\n\r\n\r\n**Expected behaviour**\r\nServer should return session only once\r\n\r\n**Screenshots**\r\n[](http://www.youtube.com/watch?v=EMXLyCehYCM \"multiple session\")\r\n\r\nResponses for Reference :\r\nhttps://github.com/fossasia/open-event-frontend/pull/3811#issuecomment-575883439\r\nhttps://github.com/fossasia/open-event-frontend/pull/3811#issuecomment-575882180\n", "before_files": [{"content": "from flask_jwt_extended import current_user\nfrom flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship\n\nfrom app.api.bootstrap import api\nfrom app.api.events import Event\nfrom app.api.helpers.custom_forms import validate_custom_form_constraints_request\nfrom app.api.helpers.db import get_count, safe_query, safe_query_kwargs, save_to_db\nfrom app.api.helpers.errors import ForbiddenError\nfrom app.api.helpers.files import make_frontend_url\nfrom app.api.helpers.mail import send_email_new_session, send_email_session_accept_reject\nfrom app.api.helpers.notification import (\n send_notif_new_session_organizer,\n send_notif_session_accept_reject,\n)\nfrom app.api.helpers.permission_manager import has_access\nfrom app.api.helpers.query import event_query\nfrom app.api.helpers.speaker import can_edit_after_cfs_ends\nfrom app.api.helpers.utilities import require_relationship\nfrom app.api.schema.sessions import SessionSchema\nfrom app.models import db\nfrom app.models.microlocation import Microlocation\nfrom app.models.session import Session\nfrom app.models.session_speaker_link import SessionsSpeakersLink\nfrom app.models.session_type import SessionType\nfrom app.models.speaker import Speaker\nfrom app.models.track import Track\nfrom app.models.user import User\nfrom app.settings import get_settings\n\n\nclass SessionListPost(ResourceList):\n \"\"\"\n List Sessions\n \"\"\"\n\n def before_post(self, args, kwargs, data):\n \"\"\"\n before post method to check for required relationship and proper permission\n :param args:\n :param kwargs:\n :param data:\n :return:\n \"\"\"\n require_relationship(['event', 'track'], data)\n data['creator_id'] = current_user.id\n if (\n get_count(\n db.session.query(Event).filter_by(\n id=int(data['event']), is_sessions_speakers_enabled=False\n )\n )\n > 0\n ):\n raise ForbiddenError({'pointer': ''}, \"Sessions are disabled for this Event\")\n\n data['complex_field_values'] = validate_custom_form_constraints_request(\n 'session', self.schema, Session(event_id=data['event']), data\n )\n\n def after_create_object(self, session, data, view_kwargs):\n \"\"\"\n method to send email for creation of new session\n mails session link to the concerned user\n :param session:\n :param data:\n :param view_kwargs:\n :return:\n \"\"\"\n if session.event.get_owner():\n event_name = session.event.name\n owner = session.event.get_owner()\n owner_email = owner.email\n event = session.event\n link = make_frontend_url(\n \"/events/{}/sessions/{}\".format(event.identifier, session.id)\n )\n send_email_new_session(owner_email, event_name, link)\n send_notif_new_session_organizer(owner, event_name, link, session.id)\n\n for speaker in session.speakers:\n session_speaker_link = SessionsSpeakersLink(\n session_state=session.state,\n session_id=session.id,\n event_id=session.event.id,\n speaker_id=speaker.id,\n )\n save_to_db(session_speaker_link, \"Session Speaker Link Saved\")\n\n decorators = (api.has_permission('create_event'),)\n schema = SessionSchema\n data_layer = {\n 'session': db.session,\n 'model': Session,\n 'methods': {'after_create_object': after_create_object},\n }\n\n\nclass SessionList(ResourceList):\n \"\"\"\n List Sessions\n \"\"\"\n\n def query(self, view_kwargs):\n \"\"\"\n query method for SessionList class\n :param view_kwargs:\n :return:\n \"\"\"\n query_ = self.session.query(Session)\n if view_kwargs.get('track_id') is not None:\n track = safe_query_kwargs(Track, view_kwargs, 'track_id')\n query_ = query_.join(Track).filter(Track.id == track.id)\n if view_kwargs.get('session_type_id') is not None:\n session_type = safe_query_kwargs(SessionType, view_kwargs, 'session_type_id')\n query_ = query_.join(SessionType).filter(SessionType.id == session_type.id)\n if view_kwargs.get('microlocation_id') is not None:\n microlocation = safe_query_kwargs(\n Microlocation, view_kwargs, 'microlocation_id',\n )\n query_ = query_.join(Microlocation).filter(\n Microlocation.id == microlocation.id\n )\n if view_kwargs.get('user_id') is not None:\n user = safe_query_kwargs(User, view_kwargs, 'user_id')\n query_ = (\n query_.join(User)\n .join(Speaker)\n .filter(\n (\n User.id == user.id\n or Session.speakers.any(Speaker.user_id == user.id)\n )\n )\n )\n query_ = event_query(query_, view_kwargs)\n if view_kwargs.get('speaker_id'):\n speaker = safe_query_kwargs(Speaker, view_kwargs, 'speaker_id')\n # session-speaker :: many-to-many relationship\n query_ = Session.query.filter(Session.speakers.any(id=speaker.id))\n\n return query_\n\n view_kwargs = True\n methods = ['GET']\n schema = SessionSchema\n data_layer = {'session': db.session, 'model': Session, 'methods': {'query': query}}\n\n\nclass SessionDetail(ResourceDetail):\n \"\"\"\n Session detail by id\n \"\"\"\n\n def before_get_object(self, view_kwargs):\n \"\"\"\n before get method to get the resource id for fetching details\n :param view_kwargs:\n :return:\n \"\"\"\n if view_kwargs.get('event_identifier'):\n event = safe_query(\n Event, 'identifier', view_kwargs['event_identifier'], 'identifier'\n )\n view_kwargs['event_id'] = event.id\n\n def before_update_object(self, session, data, view_kwargs):\n \"\"\"\n before update method to verify if session is locked before updating session object\n :param event:\n :param data:\n :param view_kwargs:\n :return:\n \"\"\"\n if session.is_locked:\n if not (\n has_access('is_admin')\n or has_access('is_organizer', event_id=session.event_id)\n ):\n raise ForbiddenError(\n {'source': '/data/attributes/is-locked'},\n \"You don't have enough permissions to change this property\",\n )\n\n if session.is_locked and data.get('is_locked') != session.is_locked:\n raise ForbiddenError(\n {'source': '/data/attributes/is-locked'},\n \"Locked sessions cannot be edited\",\n )\n\n if not can_edit_after_cfs_ends(session.event_id):\n raise ForbiddenError(\n {'source': ''}, \"Cannot edit session after the call for speaker is ended\"\n )\n\n data['complex_field_values'] = validate_custom_form_constraints_request(\n 'session', self.resource.schema, session, data\n )\n\n def after_update_object(self, session, data, view_kwargs):\n \"\"\" Send email if session accepted or rejected \"\"\"\n\n if (\n 'state' in data\n and data.get('send_email', None)\n and (session.state == 'accepted' or session.state == 'rejected')\n ):\n\n event = session.event\n # Email for speaker\n speakers = session.speakers\n for speaker in speakers:\n frontend_url = get_settings()['frontend_url']\n link = \"{}/events/{}/sessions/{}\".format(\n frontend_url, event.identifier, session.id\n )\n if not speaker.is_email_overridden:\n send_email_session_accept_reject(speaker.email, session, link)\n send_notif_session_accept_reject(\n speaker, session.title, session.state, link, session.id\n )\n\n # Email for owner\n if session.event.get_owner():\n owner = session.event.get_owner()\n owner_email = owner.email\n frontend_url = get_settings()['frontend_url']\n link = \"{}/events/{}/sessions/{}\".format(\n frontend_url, event.identifier, session.id\n )\n send_email_session_accept_reject(owner_email, session, link)\n send_notif_session_accept_reject(\n owner, session.title, session.state, link, session.id\n )\n if 'state' in data:\n entry_count = SessionsSpeakersLink.query.filter_by(session_id=session.id)\n if entry_count.count() == 0:\n is_patch_request = False\n else:\n is_patch_request = True\n\n if is_patch_request:\n for focus_session in entry_count:\n focus_session.session_state = session.state\n db.session.commit()\n else:\n current_session = Session.query.filter_by(id=session.id).first()\n for speaker in current_session.speakers:\n session_speaker_link = SessionsSpeakersLink(\n session_state=session.state,\n session_id=session.id,\n event_id=session.event.id,\n speaker_id=speaker.id,\n )\n save_to_db(session_speaker_link, \"Session Speaker Link Saved\")\n\n decorators = (api.has_permission('is_speaker_for_session', methods=\"PATCH,DELETE\"),)\n schema = SessionSchema\n data_layer = {\n 'session': db.session,\n 'model': Session,\n 'methods': {\n 'before_update_object': before_update_object,\n 'before_get_object': before_get_object,\n 'after_update_object': after_update_object,\n },\n }\n\n\nclass SessionRelationshipRequired(ResourceRelationship):\n \"\"\"\n Session Relationship\n \"\"\"\n\n schema = SessionSchema\n decorators = (api.has_permission('is_speaker_for_session', methods=\"PATCH,DELETE\"),)\n methods = ['GET', 'PATCH']\n data_layer = {'session': db.session, 'model': Session}\n\n\nclass SessionRelationshipOptional(ResourceRelationship):\n \"\"\"\n Session Relationship\n \"\"\"\n\n schema = SessionSchema\n decorators = (api.has_permission('is_speaker_for_session', methods=\"PATCH,DELETE\"),)\n data_layer = {'session': db.session, 'model': Session}\n", "path": "app/api/sessions.py"}], "after_files": [{"content": "from flask import request\nfrom flask_jwt_extended import current_user\nfrom flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship\nfrom flask_rest_jsonapi.querystring import QueryStringManager as QSManager\n\nfrom app.api.bootstrap import api\nfrom app.api.events import Event\nfrom app.api.helpers.custom_forms import validate_custom_form_constraints_request\nfrom app.api.helpers.db import get_count, safe_query, safe_query_kwargs, save_to_db\nfrom app.api.helpers.errors import ForbiddenError\nfrom app.api.helpers.files import make_frontend_url\nfrom app.api.helpers.mail import send_email_new_session, send_email_session_accept_reject\nfrom app.api.helpers.notification import (\n send_notif_new_session_organizer,\n send_notif_session_accept_reject,\n)\nfrom app.api.helpers.permission_manager import has_access\nfrom app.api.helpers.query import event_query\nfrom app.api.helpers.speaker import can_edit_after_cfs_ends\nfrom app.api.helpers.utilities import require_relationship\nfrom app.api.schema.sessions import SessionSchema\nfrom app.models import db\nfrom app.models.microlocation import Microlocation\nfrom app.models.session import Session\nfrom app.models.session_speaker_link import SessionsSpeakersLink\nfrom app.models.session_type import SessionType\nfrom app.models.speaker import Speaker\nfrom app.models.track import Track\nfrom app.models.user import User\nfrom app.settings import get_settings\n\n\nclass SessionListPost(ResourceList):\n \"\"\"\n List Sessions\n \"\"\"\n\n def before_post(self, args, kwargs, data):\n \"\"\"\n before post method to check for required relationship and proper permission\n :param args:\n :param kwargs:\n :param data:\n :return:\n \"\"\"\n require_relationship(['event', 'track'], data)\n data['creator_id'] = current_user.id\n if (\n get_count(\n db.session.query(Event).filter_by(\n id=int(data['event']), is_sessions_speakers_enabled=False\n )\n )\n > 0\n ):\n raise ForbiddenError({'pointer': ''}, \"Sessions are disabled for this Event\")\n\n data['complex_field_values'] = validate_custom_form_constraints_request(\n 'session', self.schema, Session(event_id=data['event']), data\n )\n\n def after_create_object(self, session, data, view_kwargs):\n \"\"\"\n method to send email for creation of new session\n mails session link to the concerned user\n :param session:\n :param data:\n :param view_kwargs:\n :return:\n \"\"\"\n if session.event.get_owner():\n event_name = session.event.name\n owner = session.event.get_owner()\n owner_email = owner.email\n event = session.event\n link = make_frontend_url(\n \"/events/{}/sessions/{}\".format(event.identifier, session.id)\n )\n send_email_new_session(owner_email, event_name, link)\n send_notif_new_session_organizer(owner, event_name, link, session.id)\n\n for speaker in session.speakers:\n session_speaker_link = SessionsSpeakersLink(\n session_state=session.state,\n session_id=session.id,\n event_id=session.event.id,\n speaker_id=speaker.id,\n )\n save_to_db(session_speaker_link, \"Session Speaker Link Saved\")\n\n decorators = (api.has_permission('create_event'),)\n schema = SessionSchema\n data_layer = {\n 'session': db.session,\n 'model': Session,\n 'methods': {'after_create_object': after_create_object},\n }\n\n\ndef get_distinct_sort_fields(schema, model, sort=True):\n \"\"\"Due to the poor code of flask-rest-jsonapi, distinct query needed\n in sessions API to remove duplicate sessions can't be sorted on\n returning subquery, thus we need to add all sort fields in distinct\n group and repeat it in sort group as well\"\"\"\n fields = []\n qs = QSManager(request.args, schema)\n for sort_opt in qs.sorting:\n field = sort_opt['field']\n if not hasattr(model, field):\n continue\n field = getattr(model, field)\n if sort:\n field = getattr(field, sort_opt['order'])()\n fields.append(field)\n field = Session.id\n if sort:\n field = field.desc()\n fields.append(field)\n return fields\n\n\nclass SessionList(ResourceList):\n \"\"\"\n List Sessions\n \"\"\"\n\n def query(self, view_kwargs):\n \"\"\"\n query method for SessionList class\n :param view_kwargs:\n :return:\n \"\"\"\n query_ = self.session.query(Session)\n if view_kwargs.get('track_id') is not None:\n track = safe_query_kwargs(Track, view_kwargs, 'track_id')\n query_ = query_.join(Track).filter(Track.id == track.id)\n if view_kwargs.get('session_type_id') is not None:\n session_type = safe_query_kwargs(SessionType, view_kwargs, 'session_type_id')\n query_ = query_.join(SessionType).filter(SessionType.id == session_type.id)\n if view_kwargs.get('microlocation_id') is not None:\n microlocation = safe_query_kwargs(\n Microlocation, view_kwargs, 'microlocation_id',\n )\n query_ = query_.join(Microlocation).filter(\n Microlocation.id == microlocation.id\n )\n if view_kwargs.get('user_id') is not None:\n user = safe_query_kwargs(User, view_kwargs, 'user_id')\n query_ = (\n query_.join(User)\n .join(Speaker)\n .filter(\n (\n User.id == user.id\n or Session.speakers.any(Speaker.user_id == user.id)\n )\n )\n .distinct(*get_distinct_sort_fields(SessionSchema, Session, sort=False))\n .order_by(*get_distinct_sort_fields(SessionSchema, Session))\n )\n query_ = event_query(query_, view_kwargs)\n if view_kwargs.get('speaker_id'):\n speaker = safe_query_kwargs(Speaker, view_kwargs, 'speaker_id')\n # session-speaker :: many-to-many relationship\n query_ = Session.query.filter(Session.speakers.any(id=speaker.id))\n\n return query_\n\n view_kwargs = True\n methods = ['GET']\n schema = SessionSchema\n data_layer = {'session': db.session, 'model': Session, 'methods': {'query': query}}\n\n\nclass SessionDetail(ResourceDetail):\n \"\"\"\n Session detail by id\n \"\"\"\n\n def before_get_object(self, view_kwargs):\n \"\"\"\n before get method to get the resource id for fetching details\n :param view_kwargs:\n :return:\n \"\"\"\n if view_kwargs.get('event_identifier'):\n event = safe_query(\n Event, 'identifier', view_kwargs['event_identifier'], 'identifier'\n )\n view_kwargs['event_id'] = event.id\n\n def before_update_object(self, session, data, view_kwargs):\n \"\"\"\n before update method to verify if session is locked before updating session object\n :param event:\n :param data:\n :param view_kwargs:\n :return:\n \"\"\"\n if session.is_locked:\n if not (\n has_access('is_admin')\n or has_access('is_organizer', event_id=session.event_id)\n ):\n raise ForbiddenError(\n {'source': '/data/attributes/is-locked'},\n \"You don't have enough permissions to change this property\",\n )\n\n if session.is_locked and data.get('is_locked') != session.is_locked:\n raise ForbiddenError(\n {'source': '/data/attributes/is-locked'},\n \"Locked sessions cannot be edited\",\n )\n\n if not can_edit_after_cfs_ends(session.event_id):\n raise ForbiddenError(\n {'source': ''}, \"Cannot edit session after the call for speaker is ended\"\n )\n\n data['complex_field_values'] = validate_custom_form_constraints_request(\n 'session', self.resource.schema, session, data\n )\n\n def after_update_object(self, session, data, view_kwargs):\n \"\"\" Send email if session accepted or rejected \"\"\"\n\n if (\n 'state' in data\n and data.get('send_email', None)\n and (session.state == 'accepted' or session.state == 'rejected')\n ):\n\n event = session.event\n # Email for speaker\n speakers = session.speakers\n for speaker in speakers:\n frontend_url = get_settings()['frontend_url']\n link = \"{}/events/{}/sessions/{}\".format(\n frontend_url, event.identifier, session.id\n )\n if not speaker.is_email_overridden:\n send_email_session_accept_reject(speaker.email, session, link)\n send_notif_session_accept_reject(\n speaker, session.title, session.state, link, session.id\n )\n\n # Email for owner\n if session.event.get_owner():\n owner = session.event.get_owner()\n owner_email = owner.email\n frontend_url = get_settings()['frontend_url']\n link = \"{}/events/{}/sessions/{}\".format(\n frontend_url, event.identifier, session.id\n )\n send_email_session_accept_reject(owner_email, session, link)\n send_notif_session_accept_reject(\n owner, session.title, session.state, link, session.id\n )\n if 'state' in data:\n entry_count = SessionsSpeakersLink.query.filter_by(session_id=session.id)\n if entry_count.count() == 0:\n is_patch_request = False\n else:\n is_patch_request = True\n\n if is_patch_request:\n for focus_session in entry_count:\n focus_session.session_state = session.state\n db.session.commit()\n else:\n current_session = Session.query.filter_by(id=session.id).first()\n for speaker in current_session.speakers:\n session_speaker_link = SessionsSpeakersLink(\n session_state=session.state,\n session_id=session.id,\n event_id=session.event.id,\n speaker_id=speaker.id,\n )\n save_to_db(session_speaker_link, \"Session Speaker Link Saved\")\n\n decorators = (api.has_permission('is_speaker_for_session', methods=\"PATCH,DELETE\"),)\n schema = SessionSchema\n data_layer = {\n 'session': db.session,\n 'model': Session,\n 'methods': {\n 'before_update_object': before_update_object,\n 'before_get_object': before_get_object,\n 'after_update_object': after_update_object,\n },\n }\n\n\nclass SessionRelationshipRequired(ResourceRelationship):\n \"\"\"\n Session Relationship\n \"\"\"\n\n schema = SessionSchema\n decorators = (api.has_permission('is_speaker_for_session', methods=\"PATCH,DELETE\"),)\n methods = ['GET', 'PATCH']\n data_layer = {'session': db.session, 'model': Session}\n\n\nclass SessionRelationshipOptional(ResourceRelationship):\n \"\"\"\n Session Relationship\n \"\"\"\n\n schema = SessionSchema\n decorators = (api.has_permission('is_speaker_for_session', methods=\"PATCH,DELETE\"),)\n data_layer = {'session': db.session, 'model': Session}\n", "path": "app/api/sessions.py"}]} | 3,259 | 407 |
gh_patches_debug_33692 | rasdani/github-patches | git_diff | openshift__openshift-ansible-2092 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] iptable insert
The following doesn't appear to handle a situation where the last rule in the INPUT chain is a DROP and we've witnessed behavior where the installer is appending the etcd 8443 rule after the DROP.
"os_firewall_manage_iptables.py" line 155 of 273
```
# Naively assume that if the last row is a REJECT rule, then
# we can add insert our rule right before it, otherwise we
# assume that we can just append the rule.
if (last_rule_num and last_rule_target
and last_rule_target == 'REJECT'):
# insert rule
cmd = self.cmd + ['-I', self.jump_rule_chain,
str(last_rule_num)]
else:
# append rule
cmd = self.cmd + ['-A', self.jump_rule_chain]
```
Suggested Fix
```
# Naively assume that if the last row is a REJECT rule, then
# we can add insert our rule right before it, otherwise we
# assume that we can just append the rule.
if (last_rule_num and last_rule_target
and ( last_rule_target == 'REJECT' || last_rule_target == 'DROP')):
# insert rule
cmd = self.cmd + ['-I', self.jump_rule_chain,
str(last_rule_num)]
else:
# append rule
cmd = self.cmd + ['-A', self.jump_rule_chain]
cmd += ['-j', self.chain]
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `roles/os_firewall/library/os_firewall_manage_iptables.py`
Content:
```
1 #!/usr/bin/python
2 # -*- coding: utf-8 -*-
3 # vim: expandtab:tabstop=4:shiftwidth=4
4 # pylint: disable=fixme, missing-docstring
5 from subprocess import call, check_output
6
7 DOCUMENTATION = '''
8 ---
9 module: os_firewall_manage_iptables
10 short_description: This module manages iptables rules for a given chain
11 author: Jason DeTiberus
12 requirements: [ ]
13 '''
14 EXAMPLES = '''
15 '''
16
17
18 class IpTablesError(Exception):
19 def __init__(self, msg, cmd, exit_code, output):
20 super(IpTablesError, self).__init__(msg)
21 self.msg = msg
22 self.cmd = cmd
23 self.exit_code = exit_code
24 self.output = output
25
26
27 class IpTablesAddRuleError(IpTablesError):
28 pass
29
30
31 class IpTablesRemoveRuleError(IpTablesError):
32 pass
33
34
35 class IpTablesSaveError(IpTablesError):
36 pass
37
38
39 class IpTablesCreateChainError(IpTablesError):
40 def __init__(self, chain, msg, cmd, exit_code, output): # pylint: disable=too-many-arguments, line-too-long
41 super(IpTablesCreateChainError, self).__init__(msg, cmd, exit_code,
42 output)
43 self.chain = chain
44
45
46 class IpTablesCreateJumpRuleError(IpTablesError):
47 def __init__(self, chain, msg, cmd, exit_code, output): # pylint: disable=too-many-arguments, line-too-long
48 super(IpTablesCreateJumpRuleError, self).__init__(msg, cmd, exit_code,
49 output)
50 self.chain = chain
51
52
53 # TODO: impliment rollbacks for any events that where successful and an
54 # exception was thrown later. for example, when the chain is created
55 # successfully, but the add/remove rule fails.
56 class IpTablesManager(object): # pylint: disable=too-many-instance-attributes
57 def __init__(self, module):
58 self.module = module
59 self.ip_version = module.params['ip_version']
60 self.check_mode = module.check_mode
61 self.chain = module.params['chain']
62 self.create_jump_rule = module.params['create_jump_rule']
63 self.jump_rule_chain = module.params['jump_rule_chain']
64 self.cmd = self.gen_cmd()
65 self.save_cmd = self.gen_save_cmd()
66 self.output = []
67 self.changed = False
68
69 def save(self):
70 try:
71 self.output.append(check_output(self.save_cmd,
72 stderr=subprocess.STDOUT))
73 except subprocess.CalledProcessError as ex:
74 raise IpTablesSaveError(
75 msg="Failed to save iptables rules",
76 cmd=ex.cmd, exit_code=ex.returncode, output=ex.output)
77
78 def verify_chain(self):
79 if not self.chain_exists():
80 self.create_chain()
81 if self.create_jump_rule and not self.jump_rule_exists():
82 self.create_jump()
83
84 def add_rule(self, port, proto):
85 rule = self.gen_rule(port, proto)
86 if not self.rule_exists(rule):
87 self.verify_chain()
88
89 if self.check_mode:
90 self.changed = True
91 self.output.append("Create rule for %s %s" % (proto, port))
92 else:
93 cmd = self.cmd + ['-A'] + rule
94 try:
95 self.output.append(check_output(cmd))
96 self.changed = True
97 self.save()
98 except subprocess.CalledProcessError as ex:
99 raise IpTablesCreateChainError(
100 chain=self.chain,
101 msg="Failed to create rule for "
102 "%s %s" % (proto, port),
103 cmd=ex.cmd, exit_code=ex.returncode,
104 output=ex.output)
105
106 def remove_rule(self, port, proto):
107 rule = self.gen_rule(port, proto)
108 if self.rule_exists(rule):
109 if self.check_mode:
110 self.changed = True
111 self.output.append("Remove rule for %s %s" % (proto, port))
112 else:
113 cmd = self.cmd + ['-D'] + rule
114 try:
115 self.output.append(check_output(cmd))
116 self.changed = True
117 self.save()
118 except subprocess.CalledProcessError as ex:
119 raise IpTablesRemoveRuleError(
120 chain=self.chain,
121 msg="Failed to remove rule for %s %s" % (proto, port),
122 cmd=ex.cmd, exit_code=ex.returncode, output=ex.output)
123
124 def rule_exists(self, rule):
125 check_cmd = self.cmd + ['-C'] + rule
126 return True if call(check_cmd) == 0 else False
127
128 def gen_rule(self, port, proto):
129 return [self.chain, '-p', proto, '-m', 'state', '--state', 'NEW',
130 '-m', proto, '--dport', str(port), '-j', 'ACCEPT']
131
132 def create_jump(self):
133 if self.check_mode:
134 self.changed = True
135 self.output.append("Create jump rule for chain %s" % self.chain)
136 else:
137 try:
138 cmd = self.cmd + ['-L', self.jump_rule_chain, '--line-numbers']
139 output = check_output(cmd, stderr=subprocess.STDOUT)
140
141 # break the input rules into rows and columns
142 input_rules = [s.split() for s in output.split('\n')]
143
144 # Find the last numbered rule
145 last_rule_num = None
146 last_rule_target = None
147 for rule in input_rules[:-1]:
148 if rule:
149 try:
150 last_rule_num = int(rule[0])
151 except ValueError:
152 continue
153 last_rule_target = rule[1]
154
155 # Naively assume that if the last row is a REJECT rule, then
156 # we can add insert our rule right before it, otherwise we
157 # assume that we can just append the rule.
158 if (last_rule_num and last_rule_target
159 and last_rule_target == 'REJECT'):
160 # insert rule
161 cmd = self.cmd + ['-I', self.jump_rule_chain,
162 str(last_rule_num)]
163 else:
164 # append rule
165 cmd = self.cmd + ['-A', self.jump_rule_chain]
166 cmd += ['-j', self.chain]
167 output = check_output(cmd, stderr=subprocess.STDOUT)
168 self.changed = True
169 self.output.append(output)
170 self.save()
171 except subprocess.CalledProcessError as ex:
172 if '--line-numbers' in ex.cmd:
173 raise IpTablesCreateJumpRuleError(
174 chain=self.chain,
175 msg=("Failed to query existing " +
176 self.jump_rule_chain +
177 " rules to determine jump rule location"),
178 cmd=ex.cmd, exit_code=ex.returncode,
179 output=ex.output)
180 else:
181 raise IpTablesCreateJumpRuleError(
182 chain=self.chain,
183 msg=("Failed to create jump rule for chain " +
184 self.chain),
185 cmd=ex.cmd, exit_code=ex.returncode,
186 output=ex.output)
187
188 def create_chain(self):
189 if self.check_mode:
190 self.changed = True
191 self.output.append("Create chain %s" % self.chain)
192 else:
193 try:
194 cmd = self.cmd + ['-N', self.chain]
195 self.output.append(check_output(cmd,
196 stderr=subprocess.STDOUT))
197 self.changed = True
198 self.output.append("Successfully created chain %s" %
199 self.chain)
200 self.save()
201 except subprocess.CalledProcessError as ex:
202 raise IpTablesCreateChainError(
203 chain=self.chain,
204 msg="Failed to create chain: %s" % self.chain,
205 cmd=ex.cmd, exit_code=ex.returncode, output=ex.output
206 )
207
208 def jump_rule_exists(self):
209 cmd = self.cmd + ['-C', self.jump_rule_chain, '-j', self.chain]
210 return True if call(cmd) == 0 else False
211
212 def chain_exists(self):
213 cmd = self.cmd + ['-L', self.chain]
214 return True if call(cmd) == 0 else False
215
216 def gen_cmd(self):
217 cmd = 'iptables' if self.ip_version == 'ipv4' else 'ip6tables'
218 return ["/usr/sbin/%s" % cmd]
219
220 def gen_save_cmd(self): # pylint: disable=no-self-use
221 return ['/usr/libexec/iptables/iptables.init', 'save']
222
223
224 def main():
225 module = AnsibleModule(
226 argument_spec=dict(
227 name=dict(required=True),
228 action=dict(required=True, choices=['add', 'remove',
229 'verify_chain']),
230 chain=dict(required=False, default='OS_FIREWALL_ALLOW'),
231 create_jump_rule=dict(required=False, type='bool', default=True),
232 jump_rule_chain=dict(required=False, default='INPUT'),
233 protocol=dict(required=False, choices=['tcp', 'udp']),
234 port=dict(required=False, type='int'),
235 ip_version=dict(required=False, default='ipv4',
236 choices=['ipv4', 'ipv6']),
237 ),
238 supports_check_mode=True
239 )
240
241 action = module.params['action']
242 protocol = module.params['protocol']
243 port = module.params['port']
244
245 if action in ['add', 'remove']:
246 if not protocol:
247 error = "protocol is required when action is %s" % action
248 module.fail_json(msg=error)
249 if not port:
250 error = "port is required when action is %s" % action
251 module.fail_json(msg=error)
252
253 iptables_manager = IpTablesManager(module)
254
255 try:
256 if action == 'add':
257 iptables_manager.add_rule(port, protocol)
258 elif action == 'remove':
259 iptables_manager.remove_rule(port, protocol)
260 elif action == 'verify_chain':
261 iptables_manager.verify_chain()
262 except IpTablesError as ex:
263 module.fail_json(msg=ex.msg)
264
265 return module.exit_json(changed=iptables_manager.changed,
266 output=iptables_manager.output)
267
268
269 # pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
270 # import module snippets
271 from ansible.module_utils.basic import *
272 if __name__ == '__main__':
273 main()
274
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/roles/os_firewall/library/os_firewall_manage_iptables.py b/roles/os_firewall/library/os_firewall_manage_iptables.py
--- a/roles/os_firewall/library/os_firewall_manage_iptables.py
+++ b/roles/os_firewall/library/os_firewall_manage_iptables.py
@@ -37,14 +37,14 @@
class IpTablesCreateChainError(IpTablesError):
- def __init__(self, chain, msg, cmd, exit_code, output): # pylint: disable=too-many-arguments, line-too-long
+ def __init__(self, chain, msg, cmd, exit_code, output): # pylint: disable=too-many-arguments, line-too-long, redefined-outer-name
super(IpTablesCreateChainError, self).__init__(msg, cmd, exit_code,
output)
self.chain = chain
class IpTablesCreateJumpRuleError(IpTablesError):
- def __init__(self, chain, msg, cmd, exit_code, output): # pylint: disable=too-many-arguments, line-too-long
+ def __init__(self, chain, msg, cmd, exit_code, output): # pylint: disable=too-many-arguments, line-too-long, redefined-outer-name
super(IpTablesCreateJumpRuleError, self).__init__(msg, cmd, exit_code,
output)
self.chain = chain
@@ -152,11 +152,11 @@
continue
last_rule_target = rule[1]
- # Naively assume that if the last row is a REJECT rule, then
- # we can add insert our rule right before it, otherwise we
+ # Naively assume that if the last row is a REJECT or DROP rule,
+ # then we can insert our rule right before it, otherwise we
# assume that we can just append the rule.
if (last_rule_num and last_rule_target
- and last_rule_target == 'REJECT'):
+ and last_rule_target in ['REJECT', 'DROP']):
# insert rule
cmd = self.cmd + ['-I', self.jump_rule_chain,
str(last_rule_num)]
| {"golden_diff": "diff --git a/roles/os_firewall/library/os_firewall_manage_iptables.py b/roles/os_firewall/library/os_firewall_manage_iptables.py\n--- a/roles/os_firewall/library/os_firewall_manage_iptables.py\n+++ b/roles/os_firewall/library/os_firewall_manage_iptables.py\n@@ -37,14 +37,14 @@\n \n \n class IpTablesCreateChainError(IpTablesError):\n- def __init__(self, chain, msg, cmd, exit_code, output): # pylint: disable=too-many-arguments, line-too-long\n+ def __init__(self, chain, msg, cmd, exit_code, output): # pylint: disable=too-many-arguments, line-too-long, redefined-outer-name\n super(IpTablesCreateChainError, self).__init__(msg, cmd, exit_code,\n output)\n self.chain = chain\n \n \n class IpTablesCreateJumpRuleError(IpTablesError):\n- def __init__(self, chain, msg, cmd, exit_code, output): # pylint: disable=too-many-arguments, line-too-long\n+ def __init__(self, chain, msg, cmd, exit_code, output): # pylint: disable=too-many-arguments, line-too-long, redefined-outer-name\n super(IpTablesCreateJumpRuleError, self).__init__(msg, cmd, exit_code,\n output)\n self.chain = chain\n@@ -152,11 +152,11 @@\n continue\n last_rule_target = rule[1]\n \n- # Naively assume that if the last row is a REJECT rule, then\n- # we can add insert our rule right before it, otherwise we\n+ # Naively assume that if the last row is a REJECT or DROP rule,\n+ # then we can insert our rule right before it, otherwise we\n # assume that we can just append the rule.\n if (last_rule_num and last_rule_target\n- and last_rule_target == 'REJECT'):\n+ and last_rule_target in ['REJECT', 'DROP']):\n # insert rule\n cmd = self.cmd + ['-I', self.jump_rule_chain,\n str(last_rule_num)]\n", "issue": "[BUG] iptable insert \nThe following doesn't appear to handle a situation where the last rule in the INPUT chain is a DROP and we've witnessed behavior where the installer is appending the etcd 8443 rule after the DROP. \n\n\"os_firewall_manage_iptables.py\" line 155 of 273\n\n```\n # Naively assume that if the last row is a REJECT rule, then\n # we can add insert our rule right before it, otherwise we\n # assume that we can just append the rule.\n if (last_rule_num and last_rule_target\n and last_rule_target == 'REJECT'):\n # insert rule\n cmd = self.cmd + ['-I', self.jump_rule_chain,\n str(last_rule_num)]\n else:\n # append rule\n cmd = self.cmd + ['-A', self.jump_rule_chain]\n```\n\nSuggested Fix\n\n```\n # Naively assume that if the last row is a REJECT rule, then\n # we can add insert our rule right before it, otherwise we\n # assume that we can just append the rule.\n if (last_rule_num and last_rule_target\n and ( last_rule_target == 'REJECT' || last_rule_target == 'DROP')):\n # insert rule\n cmd = self.cmd + ['-I', self.jump_rule_chain,\n str(last_rule_num)]\n else:\n # append rule\n cmd = self.cmd + ['-A', self.jump_rule_chain]\n cmd += ['-j', self.chain]\n```\n\n", "before_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n# vim: expandtab:tabstop=4:shiftwidth=4\n# pylint: disable=fixme, missing-docstring\nfrom subprocess import call, check_output\n\nDOCUMENTATION = '''\n---\nmodule: os_firewall_manage_iptables\nshort_description: This module manages iptables rules for a given chain\nauthor: Jason DeTiberus\nrequirements: [ ]\n'''\nEXAMPLES = '''\n'''\n\n\nclass IpTablesError(Exception):\n def __init__(self, msg, cmd, exit_code, output):\n super(IpTablesError, self).__init__(msg)\n self.msg = msg\n self.cmd = cmd\n self.exit_code = exit_code\n self.output = output\n\n\nclass IpTablesAddRuleError(IpTablesError):\n pass\n\n\nclass IpTablesRemoveRuleError(IpTablesError):\n pass\n\n\nclass IpTablesSaveError(IpTablesError):\n pass\n\n\nclass IpTablesCreateChainError(IpTablesError):\n def __init__(self, chain, msg, cmd, exit_code, output): # pylint: disable=too-many-arguments, line-too-long\n super(IpTablesCreateChainError, self).__init__(msg, cmd, exit_code,\n output)\n self.chain = chain\n\n\nclass IpTablesCreateJumpRuleError(IpTablesError):\n def __init__(self, chain, msg, cmd, exit_code, output): # pylint: disable=too-many-arguments, line-too-long\n super(IpTablesCreateJumpRuleError, self).__init__(msg, cmd, exit_code,\n output)\n self.chain = chain\n\n\n# TODO: impliment rollbacks for any events that where successful and an\n# exception was thrown later. for example, when the chain is created\n# successfully, but the add/remove rule fails.\nclass IpTablesManager(object): # pylint: disable=too-many-instance-attributes\n def __init__(self, module):\n self.module = module\n self.ip_version = module.params['ip_version']\n self.check_mode = module.check_mode\n self.chain = module.params['chain']\n self.create_jump_rule = module.params['create_jump_rule']\n self.jump_rule_chain = module.params['jump_rule_chain']\n self.cmd = self.gen_cmd()\n self.save_cmd = self.gen_save_cmd()\n self.output = []\n self.changed = False\n\n def save(self):\n try:\n self.output.append(check_output(self.save_cmd,\n stderr=subprocess.STDOUT))\n except subprocess.CalledProcessError as ex:\n raise IpTablesSaveError(\n msg=\"Failed to save iptables rules\",\n cmd=ex.cmd, exit_code=ex.returncode, output=ex.output)\n\n def verify_chain(self):\n if not self.chain_exists():\n self.create_chain()\n if self.create_jump_rule and not self.jump_rule_exists():\n self.create_jump()\n\n def add_rule(self, port, proto):\n rule = self.gen_rule(port, proto)\n if not self.rule_exists(rule):\n self.verify_chain()\n\n if self.check_mode:\n self.changed = True\n self.output.append(\"Create rule for %s %s\" % (proto, port))\n else:\n cmd = self.cmd + ['-A'] + rule\n try:\n self.output.append(check_output(cmd))\n self.changed = True\n self.save()\n except subprocess.CalledProcessError as ex:\n raise IpTablesCreateChainError(\n chain=self.chain,\n msg=\"Failed to create rule for \"\n \"%s %s\" % (proto, port),\n cmd=ex.cmd, exit_code=ex.returncode,\n output=ex.output)\n\n def remove_rule(self, port, proto):\n rule = self.gen_rule(port, proto)\n if self.rule_exists(rule):\n if self.check_mode:\n self.changed = True\n self.output.append(\"Remove rule for %s %s\" % (proto, port))\n else:\n cmd = self.cmd + ['-D'] + rule\n try:\n self.output.append(check_output(cmd))\n self.changed = True\n self.save()\n except subprocess.CalledProcessError as ex:\n raise IpTablesRemoveRuleError(\n chain=self.chain,\n msg=\"Failed to remove rule for %s %s\" % (proto, port),\n cmd=ex.cmd, exit_code=ex.returncode, output=ex.output)\n\n def rule_exists(self, rule):\n check_cmd = self.cmd + ['-C'] + rule\n return True if call(check_cmd) == 0 else False\n\n def gen_rule(self, port, proto):\n return [self.chain, '-p', proto, '-m', 'state', '--state', 'NEW',\n '-m', proto, '--dport', str(port), '-j', 'ACCEPT']\n\n def create_jump(self):\n if self.check_mode:\n self.changed = True\n self.output.append(\"Create jump rule for chain %s\" % self.chain)\n else:\n try:\n cmd = self.cmd + ['-L', self.jump_rule_chain, '--line-numbers']\n output = check_output(cmd, stderr=subprocess.STDOUT)\n\n # break the input rules into rows and columns\n input_rules = [s.split() for s in output.split('\\n')]\n\n # Find the last numbered rule\n last_rule_num = None\n last_rule_target = None\n for rule in input_rules[:-1]:\n if rule:\n try:\n last_rule_num = int(rule[0])\n except ValueError:\n continue\n last_rule_target = rule[1]\n\n # Naively assume that if the last row is a REJECT rule, then\n # we can add insert our rule right before it, otherwise we\n # assume that we can just append the rule.\n if (last_rule_num and last_rule_target\n and last_rule_target == 'REJECT'):\n # insert rule\n cmd = self.cmd + ['-I', self.jump_rule_chain,\n str(last_rule_num)]\n else:\n # append rule\n cmd = self.cmd + ['-A', self.jump_rule_chain]\n cmd += ['-j', self.chain]\n output = check_output(cmd, stderr=subprocess.STDOUT)\n self.changed = True\n self.output.append(output)\n self.save()\n except subprocess.CalledProcessError as ex:\n if '--line-numbers' in ex.cmd:\n raise IpTablesCreateJumpRuleError(\n chain=self.chain,\n msg=(\"Failed to query existing \" +\n self.jump_rule_chain +\n \" rules to determine jump rule location\"),\n cmd=ex.cmd, exit_code=ex.returncode,\n output=ex.output)\n else:\n raise IpTablesCreateJumpRuleError(\n chain=self.chain,\n msg=(\"Failed to create jump rule for chain \" +\n self.chain),\n cmd=ex.cmd, exit_code=ex.returncode,\n output=ex.output)\n\n def create_chain(self):\n if self.check_mode:\n self.changed = True\n self.output.append(\"Create chain %s\" % self.chain)\n else:\n try:\n cmd = self.cmd + ['-N', self.chain]\n self.output.append(check_output(cmd,\n stderr=subprocess.STDOUT))\n self.changed = True\n self.output.append(\"Successfully created chain %s\" %\n self.chain)\n self.save()\n except subprocess.CalledProcessError as ex:\n raise IpTablesCreateChainError(\n chain=self.chain,\n msg=\"Failed to create chain: %s\" % self.chain,\n cmd=ex.cmd, exit_code=ex.returncode, output=ex.output\n )\n\n def jump_rule_exists(self):\n cmd = self.cmd + ['-C', self.jump_rule_chain, '-j', self.chain]\n return True if call(cmd) == 0 else False\n\n def chain_exists(self):\n cmd = self.cmd + ['-L', self.chain]\n return True if call(cmd) == 0 else False\n\n def gen_cmd(self):\n cmd = 'iptables' if self.ip_version == 'ipv4' else 'ip6tables'\n return [\"/usr/sbin/%s\" % cmd]\n\n def gen_save_cmd(self): # pylint: disable=no-self-use\n return ['/usr/libexec/iptables/iptables.init', 'save']\n\n\ndef main():\n module = AnsibleModule(\n argument_spec=dict(\n name=dict(required=True),\n action=dict(required=True, choices=['add', 'remove',\n 'verify_chain']),\n chain=dict(required=False, default='OS_FIREWALL_ALLOW'),\n create_jump_rule=dict(required=False, type='bool', default=True),\n jump_rule_chain=dict(required=False, default='INPUT'),\n protocol=dict(required=False, choices=['tcp', 'udp']),\n port=dict(required=False, type='int'),\n ip_version=dict(required=False, default='ipv4',\n choices=['ipv4', 'ipv6']),\n ),\n supports_check_mode=True\n )\n\n action = module.params['action']\n protocol = module.params['protocol']\n port = module.params['port']\n\n if action in ['add', 'remove']:\n if not protocol:\n error = \"protocol is required when action is %s\" % action\n module.fail_json(msg=error)\n if not port:\n error = \"port is required when action is %s\" % action\n module.fail_json(msg=error)\n\n iptables_manager = IpTablesManager(module)\n\n try:\n if action == 'add':\n iptables_manager.add_rule(port, protocol)\n elif action == 'remove':\n iptables_manager.remove_rule(port, protocol)\n elif action == 'verify_chain':\n iptables_manager.verify_chain()\n except IpTablesError as ex:\n module.fail_json(msg=ex.msg)\n\n return module.exit_json(changed=iptables_manager.changed,\n output=iptables_manager.output)\n\n\n# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import\n# import module snippets\nfrom ansible.module_utils.basic import *\nif __name__ == '__main__':\n main()\n", "path": "roles/os_firewall/library/os_firewall_manage_iptables.py"}], "after_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n# vim: expandtab:tabstop=4:shiftwidth=4\n# pylint: disable=fixme, missing-docstring\nfrom subprocess import call, check_output\n\nDOCUMENTATION = '''\n---\nmodule: os_firewall_manage_iptables\nshort_description: This module manages iptables rules for a given chain\nauthor: Jason DeTiberus\nrequirements: [ ]\n'''\nEXAMPLES = '''\n'''\n\n\nclass IpTablesError(Exception):\n def __init__(self, msg, cmd, exit_code, output):\n super(IpTablesError, self).__init__(msg)\n self.msg = msg\n self.cmd = cmd\n self.exit_code = exit_code\n self.output = output\n\n\nclass IpTablesAddRuleError(IpTablesError):\n pass\n\n\nclass IpTablesRemoveRuleError(IpTablesError):\n pass\n\n\nclass IpTablesSaveError(IpTablesError):\n pass\n\n\nclass IpTablesCreateChainError(IpTablesError):\n def __init__(self, chain, msg, cmd, exit_code, output): # pylint: disable=too-many-arguments, line-too-long, redefined-outer-name\n super(IpTablesCreateChainError, self).__init__(msg, cmd, exit_code,\n output)\n self.chain = chain\n\n\nclass IpTablesCreateJumpRuleError(IpTablesError):\n def __init__(self, chain, msg, cmd, exit_code, output): # pylint: disable=too-many-arguments, line-too-long, redefined-outer-name\n super(IpTablesCreateJumpRuleError, self).__init__(msg, cmd, exit_code,\n output)\n self.chain = chain\n\n\n# TODO: impliment rollbacks for any events that where successful and an\n# exception was thrown later. for example, when the chain is created\n# successfully, but the add/remove rule fails.\nclass IpTablesManager(object): # pylint: disable=too-many-instance-attributes\n def __init__(self, module):\n self.module = module\n self.ip_version = module.params['ip_version']\n self.check_mode = module.check_mode\n self.chain = module.params['chain']\n self.create_jump_rule = module.params['create_jump_rule']\n self.jump_rule_chain = module.params['jump_rule_chain']\n self.cmd = self.gen_cmd()\n self.save_cmd = self.gen_save_cmd()\n self.output = []\n self.changed = False\n\n def save(self):\n try:\n self.output.append(check_output(self.save_cmd,\n stderr=subprocess.STDOUT))\n except subprocess.CalledProcessError as ex:\n raise IpTablesSaveError(\n msg=\"Failed to save iptables rules\",\n cmd=ex.cmd, exit_code=ex.returncode, output=ex.output)\n\n def verify_chain(self):\n if not self.chain_exists():\n self.create_chain()\n if self.create_jump_rule and not self.jump_rule_exists():\n self.create_jump()\n\n def add_rule(self, port, proto):\n rule = self.gen_rule(port, proto)\n if not self.rule_exists(rule):\n self.verify_chain()\n\n if self.check_mode:\n self.changed = True\n self.output.append(\"Create rule for %s %s\" % (proto, port))\n else:\n cmd = self.cmd + ['-A'] + rule\n try:\n self.output.append(check_output(cmd))\n self.changed = True\n self.save()\n except subprocess.CalledProcessError as ex:\n raise IpTablesCreateChainError(\n chain=self.chain,\n msg=\"Failed to create rule for \"\n \"%s %s\" % (proto, port),\n cmd=ex.cmd, exit_code=ex.returncode,\n output=ex.output)\n\n def remove_rule(self, port, proto):\n rule = self.gen_rule(port, proto)\n if self.rule_exists(rule):\n if self.check_mode:\n self.changed = True\n self.output.append(\"Remove rule for %s %s\" % (proto, port))\n else:\n cmd = self.cmd + ['-D'] + rule\n try:\n self.output.append(check_output(cmd))\n self.changed = True\n self.save()\n except subprocess.CalledProcessError as ex:\n raise IpTablesRemoveRuleError(\n chain=self.chain,\n msg=\"Failed to remove rule for %s %s\" % (proto, port),\n cmd=ex.cmd, exit_code=ex.returncode, output=ex.output)\n\n def rule_exists(self, rule):\n check_cmd = self.cmd + ['-C'] + rule\n return True if call(check_cmd) == 0 else False\n\n def gen_rule(self, port, proto):\n return [self.chain, '-p', proto, '-m', 'state', '--state', 'NEW',\n '-m', proto, '--dport', str(port), '-j', 'ACCEPT']\n\n def create_jump(self):\n if self.check_mode:\n self.changed = True\n self.output.append(\"Create jump rule for chain %s\" % self.chain)\n else:\n try:\n cmd = self.cmd + ['-L', self.jump_rule_chain, '--line-numbers']\n output = check_output(cmd, stderr=subprocess.STDOUT)\n\n # break the input rules into rows and columns\n input_rules = [s.split() for s in output.split('\\n')]\n\n # Find the last numbered rule\n last_rule_num = None\n last_rule_target = None\n for rule in input_rules[:-1]:\n if rule:\n try:\n last_rule_num = int(rule[0])\n except ValueError:\n continue\n last_rule_target = rule[1]\n\n # Naively assume that if the last row is a REJECT or DROP rule,\n # then we can insert our rule right before it, otherwise we\n # assume that we can just append the rule.\n if (last_rule_num and last_rule_target\n and last_rule_target in ['REJECT', 'DROP']):\n # insert rule\n cmd = self.cmd + ['-I', self.jump_rule_chain,\n str(last_rule_num)]\n else:\n # append rule\n cmd = self.cmd + ['-A', self.jump_rule_chain]\n cmd += ['-j', self.chain]\n output = check_output(cmd, stderr=subprocess.STDOUT)\n self.changed = True\n self.output.append(output)\n self.save()\n except subprocess.CalledProcessError as ex:\n if '--line-numbers' in ex.cmd:\n raise IpTablesCreateJumpRuleError(\n chain=self.chain,\n msg=(\"Failed to query existing \" +\n self.jump_rule_chain +\n \" rules to determine jump rule location\"),\n cmd=ex.cmd, exit_code=ex.returncode,\n output=ex.output)\n else:\n raise IpTablesCreateJumpRuleError(\n chain=self.chain,\n msg=(\"Failed to create jump rule for chain \" +\n self.chain),\n cmd=ex.cmd, exit_code=ex.returncode,\n output=ex.output)\n\n def create_chain(self):\n if self.check_mode:\n self.changed = True\n self.output.append(\"Create chain %s\" % self.chain)\n else:\n try:\n cmd = self.cmd + ['-N', self.chain]\n self.output.append(check_output(cmd,\n stderr=subprocess.STDOUT))\n self.changed = True\n self.output.append(\"Successfully created chain %s\" %\n self.chain)\n self.save()\n except subprocess.CalledProcessError as ex:\n raise IpTablesCreateChainError(\n chain=self.chain,\n msg=\"Failed to create chain: %s\" % self.chain,\n cmd=ex.cmd, exit_code=ex.returncode, output=ex.output\n )\n\n def jump_rule_exists(self):\n cmd = self.cmd + ['-C', self.jump_rule_chain, '-j', self.chain]\n return True if call(cmd) == 0 else False\n\n def chain_exists(self):\n cmd = self.cmd + ['-L', self.chain]\n return True if call(cmd) == 0 else False\n\n def gen_cmd(self):\n cmd = 'iptables' if self.ip_version == 'ipv4' else 'ip6tables'\n return [\"/usr/sbin/%s\" % cmd]\n\n def gen_save_cmd(self): # pylint: disable=no-self-use\n return ['/usr/libexec/iptables/iptables.init', 'save']\n\n\ndef main():\n module = AnsibleModule(\n argument_spec=dict(\n name=dict(required=True),\n action=dict(required=True, choices=['add', 'remove',\n 'verify_chain']),\n chain=dict(required=False, default='OS_FIREWALL_ALLOW'),\n create_jump_rule=dict(required=False, type='bool', default=True),\n jump_rule_chain=dict(required=False, default='INPUT'),\n protocol=dict(required=False, choices=['tcp', 'udp']),\n port=dict(required=False, type='int'),\n ip_version=dict(required=False, default='ipv4',\n choices=['ipv4', 'ipv6']),\n ),\n supports_check_mode=True\n )\n\n action = module.params['action']\n protocol = module.params['protocol']\n port = module.params['port']\n\n if action in ['add', 'remove']:\n if not protocol:\n error = \"protocol is required when action is %s\" % action\n module.fail_json(msg=error)\n if not port:\n error = \"port is required when action is %s\" % action\n module.fail_json(msg=error)\n\n iptables_manager = IpTablesManager(module)\n\n try:\n if action == 'add':\n iptables_manager.add_rule(port, protocol)\n elif action == 'remove':\n iptables_manager.remove_rule(port, protocol)\n elif action == 'verify_chain':\n iptables_manager.verify_chain()\n except IpTablesError as ex:\n module.fail_json(msg=ex.msg)\n\n return module.exit_json(changed=iptables_manager.changed,\n output=iptables_manager.output)\n\n\n# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import\n# import module snippets\nfrom ansible.module_utils.basic import *\nif __name__ == '__main__':\n main()\n", "path": "roles/os_firewall/library/os_firewall_manage_iptables.py"}]} | 3,457 | 481 |
gh_patches_debug_23517 | rasdani/github-patches | git_diff | hydroshare__hydroshare-2550 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Can't add user via admin panel
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `hs_core/admin.py`
Content:
```
1 from mezzanine.pages.admin import PageAdmin
2 from django.contrib.gis import admin
3 from django.contrib.contenttypes.admin import GenericTabularInline
4 from .models import *
5
6 class InlineResourceFiles(GenericTabularInline):
7 model = ResourceFile
8
9 class GenericResourceAdmin(PageAdmin):
10 inlines = PageAdmin.inlines + [InlineResourceFiles]
11
12 admin.site.register(GenericResource, GenericResourceAdmin)
13
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/hs_core/admin.py b/hs_core/admin.py
--- a/hs_core/admin.py
+++ b/hs_core/admin.py
@@ -1,12 +1,35 @@
-from mezzanine.pages.admin import PageAdmin
+from django import forms
+from django.contrib.auth.admin import UserAdmin
+from django.contrib.auth.forms import UserCreationForm
+from django.contrib.auth.models import User
from django.contrib.gis import admin
from django.contrib.contenttypes.admin import GenericTabularInline
+from django.utils.translation import ugettext_lazy as _
+
+from mezzanine.pages.admin import PageAdmin
+
from .models import *
+
+class UserCreationFormExtended(UserCreationForm):
+ def __init__(self, *args, **kwargs):
+ super(UserCreationFormExtended, self).__init__(*args, **kwargs)
+ self.fields['email'] = forms.EmailField(label=_("E-mail"), max_length=75)
+
+UserAdmin.add_form = UserCreationFormExtended
+UserAdmin.add_fieldsets = (
+ (None, {
+ 'classes': ('wide',),
+ 'fields': ('email', 'username', 'password1', 'password2',)
+ }),
+)
+
class InlineResourceFiles(GenericTabularInline):
model = ResourceFile
class GenericResourceAdmin(PageAdmin):
inlines = PageAdmin.inlines + [InlineResourceFiles]
+admin.site.unregister(User)
+admin.site.register(User, UserAdmin)
admin.site.register(GenericResource, GenericResourceAdmin)
| {"golden_diff": "diff --git a/hs_core/admin.py b/hs_core/admin.py\n--- a/hs_core/admin.py\n+++ b/hs_core/admin.py\n@@ -1,12 +1,35 @@\n-from mezzanine.pages.admin import PageAdmin\n+from django import forms\n+from django.contrib.auth.admin import UserAdmin\n+from django.contrib.auth.forms import UserCreationForm\n+from django.contrib.auth.models import User\n from django.contrib.gis import admin\n from django.contrib.contenttypes.admin import GenericTabularInline\n+from django.utils.translation import ugettext_lazy as _\n+\n+from mezzanine.pages.admin import PageAdmin\n+\n from .models import *\n \n+\n+class UserCreationFormExtended(UserCreationForm):\n+ def __init__(self, *args, **kwargs):\n+ super(UserCreationFormExtended, self).__init__(*args, **kwargs)\n+ self.fields['email'] = forms.EmailField(label=_(\"E-mail\"), max_length=75)\n+\n+UserAdmin.add_form = UserCreationFormExtended\n+UserAdmin.add_fieldsets = (\n+ (None, {\n+ 'classes': ('wide',),\n+ 'fields': ('email', 'username', 'password1', 'password2',)\n+ }),\n+)\n+\n class InlineResourceFiles(GenericTabularInline):\n model = ResourceFile\n \n class GenericResourceAdmin(PageAdmin):\n inlines = PageAdmin.inlines + [InlineResourceFiles]\n \n+admin.site.unregister(User)\n+admin.site.register(User, UserAdmin)\n admin.site.register(GenericResource, GenericResourceAdmin)\n", "issue": "Can't add user via admin panel\n\n", "before_files": [{"content": "from mezzanine.pages.admin import PageAdmin\nfrom django.contrib.gis import admin\nfrom django.contrib.contenttypes.admin import GenericTabularInline\nfrom .models import *\n\nclass InlineResourceFiles(GenericTabularInline):\n model = ResourceFile\n\nclass GenericResourceAdmin(PageAdmin):\n inlines = PageAdmin.inlines + [InlineResourceFiles]\n\nadmin.site.register(GenericResource, GenericResourceAdmin)\n", "path": "hs_core/admin.py"}], "after_files": [{"content": "from django import forms\nfrom django.contrib.auth.admin import UserAdmin\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.auth.models import User\nfrom django.contrib.gis import admin\nfrom django.contrib.contenttypes.admin import GenericTabularInline\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom mezzanine.pages.admin import PageAdmin\n\nfrom .models import *\n\n\nclass UserCreationFormExtended(UserCreationForm):\n def __init__(self, *args, **kwargs):\n super(UserCreationFormExtended, self).__init__(*args, **kwargs)\n self.fields['email'] = forms.EmailField(label=_(\"E-mail\"), max_length=75)\n\nUserAdmin.add_form = UserCreationFormExtended\nUserAdmin.add_fieldsets = (\n (None, {\n 'classes': ('wide',),\n 'fields': ('email', 'username', 'password1', 'password2',)\n }),\n)\n\nclass InlineResourceFiles(GenericTabularInline):\n model = ResourceFile\n\nclass GenericResourceAdmin(PageAdmin):\n inlines = PageAdmin.inlines + [InlineResourceFiles]\n\nadmin.site.unregister(User)\nadmin.site.register(User, UserAdmin)\nadmin.site.register(GenericResource, GenericResourceAdmin)\n", "path": "hs_core/admin.py"}]} | 369 | 328 |
gh_patches_debug_36251 | rasdani/github-patches | git_diff | electricitymaps__electricitymaps-contrib-2194 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Data for region Canaries Islands
Hello, I had a look at https://www.electricitymap.org/ , it is quite interesting and a very good job ! But there is a mistake for canarias islands : you indicate generation from gas, but the LNG terminals have not been built and commissioned yet ! The mistakes comes probablyfrom bad understanding of datas published by Red Electrica de España : the amount indicated by them as "turbina de gas" and "ciclo combinado" refers to amount produced by combined cycle and open cycle gas turbine, but these plants are still operated on liquidfuel, awaiting for natural gas availability !
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `parsers/ES_CN.py`
Content:
```
1 #!/usr/bin/env python3
2
3 import logging
4
5 # The arrow library is used to handle datetimes
6 from arrow import get
7 # The request library is used to fetch content through HTTP
8 from requests import Session
9 from ree import (ElHierro, GranCanaria, Gomera, LanzaroteFuerteventura,
10 LaPalma, Tenerife)
11 from .lib.exceptions import ParserException
12 from .lib.validation import validate
13
14
15 # Minimum valid zone demand. This is used to eliminate some cases
16 # where generation for one or more modes is obviously missing.
17 FLOORS = {
18 'ES-CN-FVLZ': 50,
19 'ES-CN-GC': 150,
20 'ES-CN-IG': 3,
21 'ES-CN-LP': 10,
22 'ES-CN-TE': 150,
23 'ES-CN-HI': 2
24 }
25
26 # TODO: Remove verify SSL config when working without it.
27 def fetch_island_data(zone_key, session):
28 if zone_key == 'ES-CN-FVLZ':
29 lanzarote_fuerteventura_data = LanzaroteFuerteventura(session, verify=False).get_all()
30 if not lanzarote_fuerteventura_data:
31 raise ParserException(zone_key, "LanzaroteFuerteventura not response")
32 else:
33 return lanzarote_fuerteventura_data
34 elif zone_key == 'ES-CN-GC':
35 gran_canaria_data = GranCanaria(session, verify=False).get_all()
36 if not gran_canaria_data:
37 raise ParserException(zone_key, "GranCanaria not response")
38 else:
39 return gran_canaria_data
40 elif zone_key == 'ES-CN-IG':
41 gomera_data = Gomera(session, verify=False).get_all()
42 if not gomera_data:
43 raise ParserException(zone_key, "Gomera not response")
44 else:
45 return gomera_data
46 elif zone_key == 'ES-CN-LP':
47 la_palma_data = LaPalma(session, verify=False).get_all()
48 if not la_palma_data:
49 raise ParserException(zone_key, "LaPalma not response")
50 else:
51 return la_palma_data
52 elif zone_key == 'ES-CN-TE':
53 tenerife_data = Tenerife(session, verify=False).get_all()
54 if not tenerife_data:
55 raise ParserException(zone_key, "Tenerife not response")
56 else:
57 return tenerife_data
58 elif zone_key == 'ES-CN-HI':
59 el_hierro_data = ElHierro(session, verify=False).get_all()
60 if not el_hierro_data:
61 raise ParserException(zone_key, "ElHierro not response")
62 else:
63 return el_hierro_data
64 else:
65 raise ParserException(zone_key, 'Can\'t read this country code {0}'.format(zone_key))
66
67
68 def fetch_consumption(zone_key='ES-CN', session=None, target_datetime=None, logger=None):
69 if target_datetime:
70 raise NotImplementedError('This parser is not yet able to parse past dates')
71
72 ses = session or Session()
73 island_data = fetch_island_data(zone_key, ses)
74 data = []
75 for response in island_data:
76 response_data = {
77 'zoneKey': zone_key,
78 'datetime': get(response.timestamp).datetime,
79 'consumption': response.demand,
80 'source': 'demanda.ree.es'
81 }
82
83 data.append(response_data)
84
85 return data
86
87
88 def fetch_production(zone_key, session=None, target_datetime=None,
89 logger=logging.getLogger(__name__)):
90 if target_datetime:
91 raise NotImplementedError('This parser is not yet able to parse past dates')
92
93 ses = session or Session()
94 island_data = fetch_island_data(zone_key, ses)
95 data = []
96
97 if zone_key == 'ES-CN-HI':
98 for response in island_data:
99 if response.production() > 0:
100 response_data = {
101 'zoneKey': zone_key,
102 'datetime': get(response.timestamp).datetime,
103 'production': {
104 'coal': 0.0,
105 'gas': round(response.gas + response.combined, 2),
106 'solar': round(response.solar, 2),
107 'oil': round(response.vapor + response.diesel, 2),
108 'wind': round(response.wind, 2),
109 'hydro': 0.0,
110 'biomass': 0.0,
111 'nuclear': 0.0,
112 'geothermal': 0.0
113 },
114 'storage': {
115 'hydro': round(-response.hydraulic, 2),
116 'battery': 0.0
117 },
118 'source': 'demanda.ree.es',
119 }
120 response_data = validate(response_data, logger,
121 floor=FLOORS[zone_key])
122
123 if response_data:
124 # append if valid
125 data.append(response_data)
126
127 else:
128 for response in island_data:
129 if response.production() > 0:
130 response_data = {
131 'zoneKey': zone_key,
132 'datetime': get(response.timestamp).datetime,
133 'production': {
134 'coal': 0.0,
135 'gas': round(response.gas + response.combined, 2),
136 'solar': round(response.solar, 2),
137 'oil': round(response.vapor + response.diesel, 2),
138 'wind': round(response.wind, 2),
139 'hydro': round(response.hydraulic, 2),
140 'biomass': 0.0,
141 'nuclear': 0.0,
142 'geothermal': 0.0
143 },
144 'storage': {
145 'hydro': 0.0,
146 'battery': 0.0
147 },
148 'source': 'demanda.ree.es',
149 }
150 response_data = validate(response_data, logger,
151 floor=FLOORS[zone_key])
152
153 if response_data:
154 # append if valid
155 data.append(response_data)
156
157 return data
158
159
160 if __name__ == '__main__':
161 session = Session
162 print("# ES-CN-FVLZ")
163 print(fetch_consumption('ES-CN-FVLZ', session))
164 print(fetch_production('ES-CN-FVLZ', session))
165 print("# ES-CN-GC")
166 print(fetch_consumption('ES-CN-GC', session))
167 print(fetch_production('ES-CN-GC', session))
168 print("# ES-CN-IG")
169 print(fetch_consumption('ES-CN-IG', session))
170 print(fetch_production('ES-CN-IG', session))
171 print("# ES-CN-LP")
172 print(fetch_consumption('ES-CN-LP', session))
173 print(fetch_production('ES-CN-LP', session))
174 print("# ES-CN-TE")
175 print(fetch_consumption('ES-CN-TE', session))
176 print(fetch_production('ES-CN-TE', session))
177 print("# ES-CN-HI")
178 print(fetch_consumption('ES-CN-HI', session))
179 print(fetch_production('ES-CN-HI', session))
180
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/parsers/ES_CN.py b/parsers/ES_CN.py
--- a/parsers/ES_CN.py
+++ b/parsers/ES_CN.py
@@ -23,6 +23,7 @@
'ES-CN-HI': 2
}
+
# TODO: Remove verify SSL config when working without it.
def fetch_island_data(zone_key, session):
if zone_key == 'ES-CN-FVLZ':
@@ -94,6 +95,10 @@
island_data = fetch_island_data(zone_key, ses)
data = []
+ # NOTE the LNG terminals are not built yet, so power generated by "gas" or "combined" is actually using oil.
+ # Recheck this every 6 months and move to gas key if there has been a change.
+ # Last checked - 2020/02/01
+
if zone_key == 'ES-CN-HI':
for response in island_data:
if response.production() > 0:
@@ -102,9 +107,9 @@
'datetime': get(response.timestamp).datetime,
'production': {
'coal': 0.0,
- 'gas': round(response.gas + response.combined, 2),
+ 'gas': 0.0,
'solar': round(response.solar, 2),
- 'oil': round(response.vapor + response.diesel, 2),
+ 'oil': round(response.vapor + response.diesel + response.gas + response.combined, 2),
'wind': round(response.wind, 2),
'hydro': 0.0,
'biomass': 0.0,
@@ -132,9 +137,9 @@
'datetime': get(response.timestamp).datetime,
'production': {
'coal': 0.0,
- 'gas': round(response.gas + response.combined, 2),
+ 'gas': 0.0,
'solar': round(response.solar, 2),
- 'oil': round(response.vapor + response.diesel, 2),
+ 'oil': round(response.vapor + response.diesel + response.gas + response.combined, 2),
'wind': round(response.wind, 2),
'hydro': round(response.hydraulic, 2),
'biomass': 0.0,
| {"golden_diff": "diff --git a/parsers/ES_CN.py b/parsers/ES_CN.py\n--- a/parsers/ES_CN.py\n+++ b/parsers/ES_CN.py\n@@ -23,6 +23,7 @@\n 'ES-CN-HI': 2\n }\n \n+\n # TODO: Remove verify SSL config when working without it.\n def fetch_island_data(zone_key, session):\n if zone_key == 'ES-CN-FVLZ':\n@@ -94,6 +95,10 @@\n island_data = fetch_island_data(zone_key, ses)\n data = []\n \n+ # NOTE the LNG terminals are not built yet, so power generated by \"gas\" or \"combined\" is actually using oil.\n+ # Recheck this every 6 months and move to gas key if there has been a change.\n+ # Last checked - 2020/02/01\n+\n if zone_key == 'ES-CN-HI':\n for response in island_data:\n if response.production() > 0:\n@@ -102,9 +107,9 @@\n 'datetime': get(response.timestamp).datetime,\n 'production': {\n 'coal': 0.0,\n- 'gas': round(response.gas + response.combined, 2),\n+ 'gas': 0.0,\n 'solar': round(response.solar, 2),\n- 'oil': round(response.vapor + response.diesel, 2),\n+ 'oil': round(response.vapor + response.diesel + response.gas + response.combined, 2),\n 'wind': round(response.wind, 2),\n 'hydro': 0.0,\n 'biomass': 0.0,\n@@ -132,9 +137,9 @@\n 'datetime': get(response.timestamp).datetime,\n 'production': {\n 'coal': 0.0,\n- 'gas': round(response.gas + response.combined, 2),\n+ 'gas': 0.0,\n 'solar': round(response.solar, 2),\n- 'oil': round(response.vapor + response.diesel, 2),\n+ 'oil': round(response.vapor + response.diesel + response.gas + response.combined, 2),\n 'wind': round(response.wind, 2),\n 'hydro': round(response.hydraulic, 2),\n 'biomass': 0.0,\n", "issue": "Data for region Canaries Islands\nHello, I had a look at https://www.electricitymap.org/ , it is quite interesting and a very good job ! But there is a mistake for canarias islands : you indicate generation from gas, but the LNG terminals have not been built and commissioned yet ! The mistakes comes probablyfrom bad understanding of datas published by Red Electrica de Espa\u00f1a : the amount indicated by them as \"turbina de gas\" and \"ciclo combinado\" refers to amount produced by combined cycle and open cycle gas turbine, but these plants are still operated on liquidfuel, awaiting for natural gas availability ! \n", "before_files": [{"content": "#!/usr/bin/env python3\n\nimport logging\n\n# The arrow library is used to handle datetimes\nfrom arrow import get\n# The request library is used to fetch content through HTTP\nfrom requests import Session\nfrom ree import (ElHierro, GranCanaria, Gomera, LanzaroteFuerteventura,\n LaPalma, Tenerife)\nfrom .lib.exceptions import ParserException\nfrom .lib.validation import validate\n\n\n# Minimum valid zone demand. This is used to eliminate some cases\n# where generation for one or more modes is obviously missing.\nFLOORS = {\n 'ES-CN-FVLZ': 50,\n 'ES-CN-GC': 150,\n 'ES-CN-IG': 3,\n 'ES-CN-LP': 10,\n 'ES-CN-TE': 150,\n 'ES-CN-HI': 2\n}\n\n# TODO: Remove verify SSL config when working without it.\ndef fetch_island_data(zone_key, session):\n if zone_key == 'ES-CN-FVLZ':\n lanzarote_fuerteventura_data = LanzaroteFuerteventura(session, verify=False).get_all()\n if not lanzarote_fuerteventura_data:\n raise ParserException(zone_key, \"LanzaroteFuerteventura not response\")\n else:\n return lanzarote_fuerteventura_data\n elif zone_key == 'ES-CN-GC':\n gran_canaria_data = GranCanaria(session, verify=False).get_all()\n if not gran_canaria_data:\n raise ParserException(zone_key, \"GranCanaria not response\")\n else:\n return gran_canaria_data\n elif zone_key == 'ES-CN-IG':\n gomera_data = Gomera(session, verify=False).get_all()\n if not gomera_data:\n raise ParserException(zone_key, \"Gomera not response\")\n else:\n return gomera_data\n elif zone_key == 'ES-CN-LP':\n la_palma_data = LaPalma(session, verify=False).get_all()\n if not la_palma_data:\n raise ParserException(zone_key, \"LaPalma not response\")\n else:\n return la_palma_data\n elif zone_key == 'ES-CN-TE':\n tenerife_data = Tenerife(session, verify=False).get_all()\n if not tenerife_data:\n raise ParserException(zone_key, \"Tenerife not response\")\n else:\n return tenerife_data\n elif zone_key == 'ES-CN-HI':\n el_hierro_data = ElHierro(session, verify=False).get_all()\n if not el_hierro_data:\n raise ParserException(zone_key, \"ElHierro not response\")\n else:\n return el_hierro_data\n else:\n raise ParserException(zone_key, 'Can\\'t read this country code {0}'.format(zone_key))\n\n\ndef fetch_consumption(zone_key='ES-CN', session=None, target_datetime=None, logger=None):\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n \n ses = session or Session()\n island_data = fetch_island_data(zone_key, ses)\n data = []\n for response in island_data:\n response_data = {\n 'zoneKey': zone_key,\n 'datetime': get(response.timestamp).datetime,\n 'consumption': response.demand,\n 'source': 'demanda.ree.es'\n }\n\n data.append(response_data)\n\n return data\n\n\ndef fetch_production(zone_key, session=None, target_datetime=None,\n logger=logging.getLogger(__name__)):\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n \n ses = session or Session()\n island_data = fetch_island_data(zone_key, ses)\n data = []\n\n if zone_key == 'ES-CN-HI':\n for response in island_data:\n if response.production() > 0:\n response_data = {\n 'zoneKey': zone_key,\n 'datetime': get(response.timestamp).datetime,\n 'production': {\n 'coal': 0.0,\n 'gas': round(response.gas + response.combined, 2),\n 'solar': round(response.solar, 2),\n 'oil': round(response.vapor + response.diesel, 2),\n 'wind': round(response.wind, 2),\n 'hydro': 0.0,\n 'biomass': 0.0,\n 'nuclear': 0.0,\n 'geothermal': 0.0\n },\n 'storage': {\n 'hydro': round(-response.hydraulic, 2),\n 'battery': 0.0\n },\n 'source': 'demanda.ree.es',\n }\n response_data = validate(response_data, logger,\n floor=FLOORS[zone_key])\n\n if response_data:\n # append if valid\n data.append(response_data)\n\n else:\n for response in island_data:\n if response.production() > 0:\n response_data = {\n 'zoneKey': zone_key,\n 'datetime': get(response.timestamp).datetime,\n 'production': {\n 'coal': 0.0,\n 'gas': round(response.gas + response.combined, 2),\n 'solar': round(response.solar, 2),\n 'oil': round(response.vapor + response.diesel, 2),\n 'wind': round(response.wind, 2),\n 'hydro': round(response.hydraulic, 2),\n 'biomass': 0.0,\n 'nuclear': 0.0,\n 'geothermal': 0.0\n },\n 'storage': {\n 'hydro': 0.0,\n 'battery': 0.0\n },\n 'source': 'demanda.ree.es',\n }\n response_data = validate(response_data, logger,\n floor=FLOORS[zone_key])\n\n if response_data:\n # append if valid\n data.append(response_data)\n\n return data\n\n\nif __name__ == '__main__':\n session = Session\n print(\"# ES-CN-FVLZ\")\n print(fetch_consumption('ES-CN-FVLZ', session))\n print(fetch_production('ES-CN-FVLZ', session))\n print(\"# ES-CN-GC\")\n print(fetch_consumption('ES-CN-GC', session))\n print(fetch_production('ES-CN-GC', session))\n print(\"# ES-CN-IG\")\n print(fetch_consumption('ES-CN-IG', session))\n print(fetch_production('ES-CN-IG', session))\n print(\"# ES-CN-LP\")\n print(fetch_consumption('ES-CN-LP', session))\n print(fetch_production('ES-CN-LP', session))\n print(\"# ES-CN-TE\")\n print(fetch_consumption('ES-CN-TE', session))\n print(fetch_production('ES-CN-TE', session))\n print(\"# ES-CN-HI\")\n print(fetch_consumption('ES-CN-HI', session))\n print(fetch_production('ES-CN-HI', session))\n", "path": "parsers/ES_CN.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\nimport logging\n\n# The arrow library is used to handle datetimes\nfrom arrow import get\n# The request library is used to fetch content through HTTP\nfrom requests import Session\nfrom ree import (ElHierro, GranCanaria, Gomera, LanzaroteFuerteventura,\n LaPalma, Tenerife)\nfrom .lib.exceptions import ParserException\nfrom .lib.validation import validate\n\n\n# Minimum valid zone demand. This is used to eliminate some cases\n# where generation for one or more modes is obviously missing.\nFLOORS = {\n 'ES-CN-FVLZ': 50,\n 'ES-CN-GC': 150,\n 'ES-CN-IG': 3,\n 'ES-CN-LP': 10,\n 'ES-CN-TE': 150,\n 'ES-CN-HI': 2\n}\n\n\n# TODO: Remove verify SSL config when working without it.\ndef fetch_island_data(zone_key, session):\n if zone_key == 'ES-CN-FVLZ':\n lanzarote_fuerteventura_data = LanzaroteFuerteventura(session, verify=False).get_all()\n if not lanzarote_fuerteventura_data:\n raise ParserException(zone_key, \"LanzaroteFuerteventura not response\")\n else:\n return lanzarote_fuerteventura_data\n elif zone_key == 'ES-CN-GC':\n gran_canaria_data = GranCanaria(session, verify=False).get_all()\n if not gran_canaria_data:\n raise ParserException(zone_key, \"GranCanaria not response\")\n else:\n return gran_canaria_data\n elif zone_key == 'ES-CN-IG':\n gomera_data = Gomera(session, verify=False).get_all()\n if not gomera_data:\n raise ParserException(zone_key, \"Gomera not response\")\n else:\n return gomera_data\n elif zone_key == 'ES-CN-LP':\n la_palma_data = LaPalma(session, verify=False).get_all()\n if not la_palma_data:\n raise ParserException(zone_key, \"LaPalma not response\")\n else:\n return la_palma_data\n elif zone_key == 'ES-CN-TE':\n tenerife_data = Tenerife(session, verify=False).get_all()\n if not tenerife_data:\n raise ParserException(zone_key, \"Tenerife not response\")\n else:\n return tenerife_data\n elif zone_key == 'ES-CN-HI':\n el_hierro_data = ElHierro(session, verify=False).get_all()\n if not el_hierro_data:\n raise ParserException(zone_key, \"ElHierro not response\")\n else:\n return el_hierro_data\n else:\n raise ParserException(zone_key, 'Can\\'t read this country code {0}'.format(zone_key))\n\n\ndef fetch_consumption(zone_key='ES-CN', session=None, target_datetime=None, logger=None):\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n \n ses = session or Session()\n island_data = fetch_island_data(zone_key, ses)\n data = []\n for response in island_data:\n response_data = {\n 'zoneKey': zone_key,\n 'datetime': get(response.timestamp).datetime,\n 'consumption': response.demand,\n 'source': 'demanda.ree.es'\n }\n\n data.append(response_data)\n\n return data\n\n\ndef fetch_production(zone_key, session=None, target_datetime=None,\n logger=logging.getLogger(__name__)):\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n \n ses = session or Session()\n island_data = fetch_island_data(zone_key, ses)\n data = []\n\n # NOTE the LNG terminals are not built yet, so power generated by \"gas\" or \"combined\" is actually using oil.\n # Recheck this every 6 months and move to gas key if there has been a change.\n # Last checked - 2020/02/01\n\n if zone_key == 'ES-CN-HI':\n for response in island_data:\n if response.production() > 0:\n response_data = {\n 'zoneKey': zone_key,\n 'datetime': get(response.timestamp).datetime,\n 'production': {\n 'coal': 0.0,\n 'gas': 0.0,\n 'solar': round(response.solar, 2),\n 'oil': round(response.vapor + response.diesel + response.gas + response.combined, 2),\n 'wind': round(response.wind, 2),\n 'hydro': 0.0,\n 'biomass': 0.0,\n 'nuclear': 0.0,\n 'geothermal': 0.0\n },\n 'storage': {\n 'hydro': round(-response.hydraulic, 2),\n 'battery': 0.0\n },\n 'source': 'demanda.ree.es',\n }\n response_data = validate(response_data, logger,\n floor=FLOORS[zone_key])\n\n if response_data:\n # append if valid\n data.append(response_data)\n\n else:\n for response in island_data:\n if response.production() > 0:\n response_data = {\n 'zoneKey': zone_key,\n 'datetime': get(response.timestamp).datetime,\n 'production': {\n 'coal': 0.0,\n 'gas': 0.0,\n 'solar': round(response.solar, 2),\n 'oil': round(response.vapor + response.diesel + response.gas + response.combined, 2),\n 'wind': round(response.wind, 2),\n 'hydro': round(response.hydraulic, 2),\n 'biomass': 0.0,\n 'nuclear': 0.0,\n 'geothermal': 0.0\n },\n 'storage': {\n 'hydro': 0.0,\n 'battery': 0.0\n },\n 'source': 'demanda.ree.es',\n }\n response_data = validate(response_data, logger,\n floor=FLOORS[zone_key])\n\n if response_data:\n # append if valid\n data.append(response_data)\n\n return data\n\n\nif __name__ == '__main__':\n session = Session\n print(\"# ES-CN-FVLZ\")\n print(fetch_consumption('ES-CN-FVLZ', session))\n print(fetch_production('ES-CN-FVLZ', session))\n print(\"# ES-CN-GC\")\n print(fetch_consumption('ES-CN-GC', session))\n print(fetch_production('ES-CN-GC', session))\n print(\"# ES-CN-IG\")\n print(fetch_consumption('ES-CN-IG', session))\n print(fetch_production('ES-CN-IG', session))\n print(\"# ES-CN-LP\")\n print(fetch_consumption('ES-CN-LP', session))\n print(fetch_production('ES-CN-LP', session))\n print(\"# ES-CN-TE\")\n print(fetch_consumption('ES-CN-TE', session))\n print(fetch_production('ES-CN-TE', session))\n print(\"# ES-CN-HI\")\n print(fetch_consumption('ES-CN-HI', session))\n print(fetch_production('ES-CN-HI', session))\n", "path": "parsers/ES_CN.py"}]} | 2,347 | 531 |
gh_patches_debug_1531 | rasdani/github-patches | git_diff | conda__conda-5496 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
An unexpected error has occurred.
An unexpected error has occurred.
Please consider posting the following information to the
conda GitHub issue tracker at:
https://github.com/conda/conda/issues
Current conda install:
platform : win-64
conda version : 4.3.21
conda is private : False
conda-env version : 4.3.21
conda-build version : not installed
python version : 3.6.1.final.0
requests version : 2.14.2
root environment : C:\ProgramData\Anaconda3 (read only)
default environment : C:\ProgramData\Anaconda3
envs directories : C:\Users\eric\AppData\Local\conda\conda\envs
C:\ProgramData\Anaconda3\envs
C:\Users\eric\.conda\envs
package cache : C:\ProgramData\Anaconda3\pkgs
C:\Users\eric\AppData\Local\conda\conda\pkgs
channel URLs : https://conda.anaconda.org/anaconda-fusion/win-64
https://conda.anaconda.org/anaconda-fusion/noarch
https://repo.continuum.io/pkgs/free/win-64
https://repo.continuum.io/pkgs/free/noarch
https://repo.continuum.io/pkgs/r/win-64
https://repo.continuum.io/pkgs/r/noarch
https://repo.continuum.io/pkgs/pro/win-64
https://repo.continuum.io/pkgs/pro/noarch
https://repo.continuum.io/pkgs/msys2/win-64
https://repo.continuum.io/pkgs/msys2/noarch
config file : C:\Users\eric\.condarc
netrc file : None
offline mode : False
user-agent : conda/4.3.21 requests/2.14.2 CPython/3.6.1 Windows/10 Windows/10.0.10240
administrator : False
`$ C:\ProgramData\Anaconda3\Scripts\conda-script.py ..checkenv cmd.exe C:\ProgramData\Anaconda3`
Traceback (most recent call last):
File "C:\ProgramData\Anaconda3\lib\site-packages\conda\cli\main.py", line 167, in main
import conda.cli.activate as activate
File "C:\ProgramData\Anaconda3\lib\site-packages\conda\cli\activate.py", line 12, in <module>
from ..utils import shells
File "C:\ProgramData\Anaconda3\lib\site-packages\conda\utils.py", line 13, in <module>
from .gateways.disk.read import compute_md5sum
File "C:\ProgramData\Anaconda3\lib\site-packages\conda\gateways\disk\read.py", line 22, in <module>
from ...models.channel import Channel
File "C:\ProgramData\Anaconda3\lib\site-packages\conda\models\channel.py", line 9, in <module>
from ..base.context import context
File "C:\ProgramData\Anaconda3\lib\site-packages\conda\base\context.py", line 18, in <module>
from .._vendor.auxlib.path import expand
File "C:\ProgramData\Anaconda3\lib\site-packages\conda\_vendor\auxlib\path.py", line 8, in <module>
import pkg_resources
File "<frozen importlib._bootstrap>", line 961, in _find_and_load
File "<frozen importlib._bootstrap>", line 950, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 646, in _load_unlocked
File "<frozen importlib._bootstrap>", line 616, in _load_backward_compatible
File "C:\ProgramData\Anaconda3\lib\site-packages\setuptools-27.2.0-py3.6.egg\pkg_resources\__init__.py", line 2985, in <module>
@_call_aside
File "C:\ProgramData\Anaconda3\lib\site-packages\setuptools-27.2.0-py3.6.egg\pkg_resources\__init__.py", line 2971, in _call_aside
f(*args, **kwargs)
File "C:\ProgramData\Anaconda3\lib\site-packages\setuptools-27.2.0-py3.6.egg\pkg_resources\__init__.py", line 3013, in _initialize_master_working_set
dist.activate(replace=False)
File "C:\ProgramData\Anaconda3\lib\site-packages\setuptools-27.2.0-py3.6.egg\pkg_resources\__init__.py", line 2544, in activate
declare_namespace(pkg)
File "C:\ProgramData\Anaconda3\lib\site-packages\setuptools-27.2.0-py3.6.egg\pkg_resources\__init__.py", line 2118, in declare_namespace
_handle_ns(packageName, path_item)
File "C:\ProgramData\Anaconda3\lib\site-packages\setuptools-27.2.0-py3.6.egg\pkg_resources\__init__.py", line 2058, in _handle_ns
_rebuild_mod_path(path, packageName, module)
File "C:\ProgramData\Anaconda3\lib\site-packages\setuptools-27.2.0-py3.6.egg\pkg_resources\__init__.py", line 2087, in _rebuild_mod_path
orig_path.sort(key=position_in_sys_path)
AttributeError: '_NamespacePath' object has no attribute 'sort'
C:\Users\eric>
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conda/_vendor/auxlib/path.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 from __future__ import print_function, division, absolute_import
3 from distutils.sysconfig import get_python_lib
4 from logging import getLogger
5 from os import chdir, getcwd
6 from os.path import abspath, dirname, exists, expanduser, expandvars, isdir, isfile, join, sep
7 try:
8 import pkg_resources
9 except ImportError:
10 pkg_resources = None
11 import sys
12
13 log = getLogger(__name__)
14
15
16 ROOT_PATH = abspath(sep)
17
18
19 def site_packages_paths():
20 if hasattr(sys, 'real_prefix'):
21 # in a virtualenv
22 log.debug('searching virtualenv')
23 return tuple(p for p in sys.path if p.endswith('site-packages'))
24 else:
25 # not in a virtualenv
26 log.debug('searching outside virtualenv') # pragma: no cover
27 return tuple(get_python_lib(), ) # pragma: no cover
28
29
30 class PackageFile(object):
31
32 def __init__(self, file_path, package_name):
33 self.file_path = file_path
34 self.package_name = package_name
35
36 def __enter__(self):
37 self.file_handle = open_package_file(self.file_path, self.package_name)
38 return self.file_handle
39
40 def __exit__(self, *args):
41 self.file_handle.close()
42
43
44 class ChangePath(object):
45
46 def __init__(self, path):
47 self.dirpath = dirname(path) if isfile(path) else path
48 if not isdir(self.dirpath):
49 raise IOError('File or directory not found: {0}'.format(path))
50
51 def __enter__(self):
52 self.cwd = getcwd()
53 chdir(self.dirpath)
54 return self
55
56 def __exit__(self, *args):
57 chdir(self.cwd)
58
59
60 def open_package_file(file_path, package_name):
61 file_path = expand(file_path)
62
63 # look for file at relative path
64 if exists(file_path):
65 log.info("found real file {0}".format(file_path))
66 return open(file_path)
67
68 # look for file in package resources
69 if (package_name and pkg_resources is not None and
70 pkg_resources.resource_exists(package_name, file_path)):
71 log.info("found package resource file {0} for package {1}".format(file_path, package_name))
72 return pkg_resources.resource_stream(package_name, file_path)
73
74 # look for file in site-packages
75 package_path = find_file_in_site_packages(file_path, package_name)
76 if package_path:
77 return open(package_path) # pragma: no cover
78
79 msg = "file for module [{0}] cannot be found at path {1}".format(package_name, file_path)
80 log.error(msg)
81 raise IOError(msg)
82
83
84 def find_file_in_site_packages(file_path, package_name):
85 package_path = package_name.replace('.', '/')
86 for site_packages_path in site_packages_paths():
87 test_path = join(site_packages_path, package_path, file_path)
88 if exists(test_path):
89 log.info("found site-package file {0} for package {1}".format(file_path, package_name))
90 return test_path
91 else:
92 log.error("No file found at {0}.".format(test_path))
93 return None
94
95
96 def expand(path):
97 return abspath(expanduser(expandvars(path)))
98
99
100 def absdirname(path):
101 return abspath(expanduser(dirname(path)))
102
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/conda/_vendor/auxlib/path.py b/conda/_vendor/auxlib/path.py
--- a/conda/_vendor/auxlib/path.py
+++ b/conda/_vendor/auxlib/path.py
@@ -6,7 +6,7 @@
from os.path import abspath, dirname, exists, expanduser, expandvars, isdir, isfile, join, sep
try:
import pkg_resources
-except ImportError:
+except Exception:
pkg_resources = None
import sys
| {"golden_diff": "diff --git a/conda/_vendor/auxlib/path.py b/conda/_vendor/auxlib/path.py\n--- a/conda/_vendor/auxlib/path.py\n+++ b/conda/_vendor/auxlib/path.py\n@@ -6,7 +6,7 @@\n from os.path import abspath, dirname, exists, expanduser, expandvars, isdir, isfile, join, sep\n try:\n import pkg_resources\n-except ImportError:\n+except Exception:\n pkg_resources = None\n import sys\n", "issue": "An unexpected error has occurred.\nAn unexpected error has occurred.\r\nPlease consider posting the following information to the\r\nconda GitHub issue tracker at:\r\n\r\n https://github.com/conda/conda/issues\r\n\r\nCurrent conda install:\r\n\r\n platform : win-64\r\n conda version : 4.3.21\r\n conda is private : False\r\n conda-env version : 4.3.21\r\n conda-build version : not installed\r\n python version : 3.6.1.final.0\r\n requests version : 2.14.2\r\n root environment : C:\\ProgramData\\Anaconda3 (read only)\r\n default environment : C:\\ProgramData\\Anaconda3\r\n envs directories : C:\\Users\\eric\\AppData\\Local\\conda\\conda\\envs\r\n C:\\ProgramData\\Anaconda3\\envs\r\n C:\\Users\\eric\\.conda\\envs\r\n package cache : C:\\ProgramData\\Anaconda3\\pkgs\r\n C:\\Users\\eric\\AppData\\Local\\conda\\conda\\pkgs\r\n channel URLs : https://conda.anaconda.org/anaconda-fusion/win-64\r\n https://conda.anaconda.org/anaconda-fusion/noarch\r\n https://repo.continuum.io/pkgs/free/win-64\r\n https://repo.continuum.io/pkgs/free/noarch\r\n https://repo.continuum.io/pkgs/r/win-64\r\n https://repo.continuum.io/pkgs/r/noarch\r\n https://repo.continuum.io/pkgs/pro/win-64\r\n https://repo.continuum.io/pkgs/pro/noarch\r\n https://repo.continuum.io/pkgs/msys2/win-64\r\n https://repo.continuum.io/pkgs/msys2/noarch\r\n config file : C:\\Users\\eric\\.condarc\r\n netrc file : None\r\n offline mode : False\r\n user-agent : conda/4.3.21 requests/2.14.2 CPython/3.6.1 Windows/10 Windows/10.0.10240\r\n administrator : False\r\n\r\n`$ C:\\ProgramData\\Anaconda3\\Scripts\\conda-script.py ..checkenv cmd.exe C:\\ProgramData\\Anaconda3`\r\n\r\n\r\n\r\n\r\n Traceback (most recent call last):\r\n File \"C:\\ProgramData\\Anaconda3\\lib\\site-packages\\conda\\cli\\main.py\", line 167, in main\r\n import conda.cli.activate as activate\r\n File \"C:\\ProgramData\\Anaconda3\\lib\\site-packages\\conda\\cli\\activate.py\", line 12, in <module>\r\n from ..utils import shells\r\n File \"C:\\ProgramData\\Anaconda3\\lib\\site-packages\\conda\\utils.py\", line 13, in <module>\r\n from .gateways.disk.read import compute_md5sum\r\n File \"C:\\ProgramData\\Anaconda3\\lib\\site-packages\\conda\\gateways\\disk\\read.py\", line 22, in <module>\r\n from ...models.channel import Channel\r\n File \"C:\\ProgramData\\Anaconda3\\lib\\site-packages\\conda\\models\\channel.py\", line 9, in <module>\r\n from ..base.context import context\r\n File \"C:\\ProgramData\\Anaconda3\\lib\\site-packages\\conda\\base\\context.py\", line 18, in <module>\r\n from .._vendor.auxlib.path import expand\r\n File \"C:\\ProgramData\\Anaconda3\\lib\\site-packages\\conda\\_vendor\\auxlib\\path.py\", line 8, in <module>\r\n import pkg_resources\r\n File \"<frozen importlib._bootstrap>\", line 961, in _find_and_load\r\n File \"<frozen importlib._bootstrap>\", line 950, in _find_and_load_unlocked\r\n File \"<frozen importlib._bootstrap>\", line 646, in _load_unlocked\r\n File \"<frozen importlib._bootstrap>\", line 616, in _load_backward_compatible\r\n File \"C:\\ProgramData\\Anaconda3\\lib\\site-packages\\setuptools-27.2.0-py3.6.egg\\pkg_resources\\__init__.py\", line 2985, in <module>\r\n @_call_aside\r\n File \"C:\\ProgramData\\Anaconda3\\lib\\site-packages\\setuptools-27.2.0-py3.6.egg\\pkg_resources\\__init__.py\", line 2971, in _call_aside\r\n f(*args, **kwargs)\r\n File \"C:\\ProgramData\\Anaconda3\\lib\\site-packages\\setuptools-27.2.0-py3.6.egg\\pkg_resources\\__init__.py\", line 3013, in _initialize_master_working_set\r\n dist.activate(replace=False)\r\n File \"C:\\ProgramData\\Anaconda3\\lib\\site-packages\\setuptools-27.2.0-py3.6.egg\\pkg_resources\\__init__.py\", line 2544, in activate\r\n declare_namespace(pkg)\r\n File \"C:\\ProgramData\\Anaconda3\\lib\\site-packages\\setuptools-27.2.0-py3.6.egg\\pkg_resources\\__init__.py\", line 2118, in declare_namespace\r\n _handle_ns(packageName, path_item)\r\n File \"C:\\ProgramData\\Anaconda3\\lib\\site-packages\\setuptools-27.2.0-py3.6.egg\\pkg_resources\\__init__.py\", line 2058, in _handle_ns\r\n _rebuild_mod_path(path, packageName, module)\r\n File \"C:\\ProgramData\\Anaconda3\\lib\\site-packages\\setuptools-27.2.0-py3.6.egg\\pkg_resources\\__init__.py\", line 2087, in _rebuild_mod_path\r\n orig_path.sort(key=position_in_sys_path)\r\n AttributeError: '_NamespacePath' object has no attribute 'sort'\r\n\r\n\r\nC:\\Users\\eric>\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import print_function, division, absolute_import\nfrom distutils.sysconfig import get_python_lib\nfrom logging import getLogger\nfrom os import chdir, getcwd\nfrom os.path import abspath, dirname, exists, expanduser, expandvars, isdir, isfile, join, sep\ntry:\n import pkg_resources\nexcept ImportError:\n pkg_resources = None\nimport sys\n\nlog = getLogger(__name__)\n\n\nROOT_PATH = abspath(sep)\n\n\ndef site_packages_paths():\n if hasattr(sys, 'real_prefix'):\n # in a virtualenv\n log.debug('searching virtualenv')\n return tuple(p for p in sys.path if p.endswith('site-packages'))\n else:\n # not in a virtualenv\n log.debug('searching outside virtualenv') # pragma: no cover\n return tuple(get_python_lib(), ) # pragma: no cover\n\n\nclass PackageFile(object):\n\n def __init__(self, file_path, package_name):\n self.file_path = file_path\n self.package_name = package_name\n\n def __enter__(self):\n self.file_handle = open_package_file(self.file_path, self.package_name)\n return self.file_handle\n\n def __exit__(self, *args):\n self.file_handle.close()\n\n\nclass ChangePath(object):\n\n def __init__(self, path):\n self.dirpath = dirname(path) if isfile(path) else path\n if not isdir(self.dirpath):\n raise IOError('File or directory not found: {0}'.format(path))\n\n def __enter__(self):\n self.cwd = getcwd()\n chdir(self.dirpath)\n return self\n\n def __exit__(self, *args):\n chdir(self.cwd)\n\n\ndef open_package_file(file_path, package_name):\n file_path = expand(file_path)\n\n # look for file at relative path\n if exists(file_path):\n log.info(\"found real file {0}\".format(file_path))\n return open(file_path)\n\n # look for file in package resources\n if (package_name and pkg_resources is not None and\n pkg_resources.resource_exists(package_name, file_path)):\n log.info(\"found package resource file {0} for package {1}\".format(file_path, package_name))\n return pkg_resources.resource_stream(package_name, file_path)\n\n # look for file in site-packages\n package_path = find_file_in_site_packages(file_path, package_name)\n if package_path:\n return open(package_path) # pragma: no cover\n\n msg = \"file for module [{0}] cannot be found at path {1}\".format(package_name, file_path)\n log.error(msg)\n raise IOError(msg)\n\n\ndef find_file_in_site_packages(file_path, package_name):\n package_path = package_name.replace('.', '/')\n for site_packages_path in site_packages_paths():\n test_path = join(site_packages_path, package_path, file_path)\n if exists(test_path):\n log.info(\"found site-package file {0} for package {1}\".format(file_path, package_name))\n return test_path\n else:\n log.error(\"No file found at {0}.\".format(test_path))\n return None\n\n\ndef expand(path):\n return abspath(expanduser(expandvars(path)))\n\n\ndef absdirname(path):\n return abspath(expanduser(dirname(path)))\n", "path": "conda/_vendor/auxlib/path.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import print_function, division, absolute_import\nfrom distutils.sysconfig import get_python_lib\nfrom logging import getLogger\nfrom os import chdir, getcwd\nfrom os.path import abspath, dirname, exists, expanduser, expandvars, isdir, isfile, join, sep\ntry:\n import pkg_resources\nexcept Exception:\n pkg_resources = None\nimport sys\n\nlog = getLogger(__name__)\n\n\nROOT_PATH = abspath(sep)\n\n\ndef site_packages_paths():\n if hasattr(sys, 'real_prefix'):\n # in a virtualenv\n log.debug('searching virtualenv')\n return tuple(p for p in sys.path if p.endswith('site-packages'))\n else:\n # not in a virtualenv\n log.debug('searching outside virtualenv') # pragma: no cover\n return tuple(get_python_lib(), ) # pragma: no cover\n\n\nclass PackageFile(object):\n\n def __init__(self, file_path, package_name):\n self.file_path = file_path\n self.package_name = package_name\n\n def __enter__(self):\n self.file_handle = open_package_file(self.file_path, self.package_name)\n return self.file_handle\n\n def __exit__(self, *args):\n self.file_handle.close()\n\n\nclass ChangePath(object):\n\n def __init__(self, path):\n self.dirpath = dirname(path) if isfile(path) else path\n if not isdir(self.dirpath):\n raise IOError('File or directory not found: {0}'.format(path))\n\n def __enter__(self):\n self.cwd = getcwd()\n chdir(self.dirpath)\n return self\n\n def __exit__(self, *args):\n chdir(self.cwd)\n\n\ndef open_package_file(file_path, package_name):\n file_path = expand(file_path)\n\n # look for file at relative path\n if exists(file_path):\n log.info(\"found real file {0}\".format(file_path))\n return open(file_path)\n\n # look for file in package resources\n if (package_name and pkg_resources is not None and\n pkg_resources.resource_exists(package_name, file_path)):\n log.info(\"found package resource file {0} for package {1}\".format(file_path, package_name))\n return pkg_resources.resource_stream(package_name, file_path)\n\n # look for file in site-packages\n package_path = find_file_in_site_packages(file_path, package_name)\n if package_path:\n return open(package_path) # pragma: no cover\n\n msg = \"file for module [{0}] cannot be found at path {1}\".format(package_name, file_path)\n log.error(msg)\n raise IOError(msg)\n\n\ndef find_file_in_site_packages(file_path, package_name):\n package_path = package_name.replace('.', '/')\n for site_packages_path in site_packages_paths():\n test_path = join(site_packages_path, package_path, file_path)\n if exists(test_path):\n log.info(\"found site-package file {0} for package {1}\".format(file_path, package_name))\n return test_path\n else:\n log.error(\"No file found at {0}.\".format(test_path))\n return None\n\n\ndef expand(path):\n return abspath(expanduser(expandvars(path)))\n\n\ndef absdirname(path):\n return abspath(expanduser(dirname(path)))\n", "path": "conda/_vendor/auxlib/path.py"}]} | 2,548 | 109 |
gh_patches_debug_11294 | rasdani/github-patches | git_diff | jupyterhub__zero-to-jupyterhub-k8s-398 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Allow multiple extraConfig
Binder relies on `hub.extraConfig` to configure the Spawner. If we want to customize a given Binder *deployment* via `hub.extraConfig`, we have to copy/paste the hub.extraConfig from the binderhub chart and add to it.
It would be handy if extraConfig were something that could be added to without having to copy the previous value. Are dicts the only structure that can do this?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `images/hub/jupyterhub_config.py`
Content:
```
1 import os
2 import sys
3 import yaml
4 from tornado.httpclient import AsyncHTTPClient
5
6 def get_config(key, default=None):
7 """
8 Find a config item of a given name & return it
9
10 Parses everything as YAML, so lists and dicts are available too
11 """
12 path = os.path.join('/etc/jupyterhub/config', key)
13 try:
14 with open(path) as f:
15 data = yaml.safe_load(f)
16 return data
17 except FileNotFoundError:
18 return default
19
20 def get_secret(key, default=None):
21 """Get a secret from /etc/jupyterhub/secret"""
22 path = os.path.join('/etc/jupyterhub/secret', key)
23 try:
24 with open(path) as f:
25 return f.read().strip()
26 except FileNotFoundError:
27 return default
28
29
30 # Configure JupyterHub to use the curl backend for making HTTP requests,
31 # rather than the pure-python implementations. The default one starts
32 # being too slow to make a large number of requests to the proxy API
33 # at the rate required.
34 AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient")
35
36 c.JupyterHub.spawner_class = 'kubespawner.KubeSpawner'
37
38 # Connect to a proxy running in a different pod
39 c.ConfigurableHTTPProxy.api_url = 'http://{}:{}'.format(os.environ['PROXY_API_SERVICE_HOST'], int(os.environ['PROXY_API_SERVICE_PORT']))
40 c.ConfigurableHTTPProxy.should_start = False
41
42 # Do not shut down user pods when hub is restarted
43 c.JupyterHub.cleanup_servers = False
44
45 # Check that the proxy has routes appropriately setup
46 # This isn't the best named setting :D
47 c.JupyterHub.last_activity_interval = 60
48
49 # Max number of servers that can be spawning at any one time
50 c.JupyterHub.concurrent_spawn_limit = get_config('hub.concurrent-spawn-limit')
51
52 active_server_limit = get_config('hub.active-server-limit', None)
53
54 if active_server_limit is not None:
55 c.JupyterHub.active_server_limit = int(active_server_limit)
56
57 c.JupyterHub.ip = os.environ['PROXY_PUBLIC_SERVICE_HOST']
58 c.JupyterHub.port = int(os.environ['PROXY_PUBLIC_SERVICE_PORT'])
59
60 # the hub should listen on all interfaces, so the proxy can access it
61 c.JupyterHub.hub_ip = '0.0.0.0'
62
63 c.KubeSpawner.namespace = os.environ.get('POD_NAMESPACE', 'default')
64
65 c.KubeSpawner.start_timeout = get_config('singleuser.start-timeout')
66
67 # Use env var for this, since we want hub to restart when this changes
68 c.KubeSpawner.singleuser_image_spec = os.environ['SINGLEUSER_IMAGE']
69
70 c.KubeSpawner.singleuser_extra_labels = get_config('singleuser.extra-labels', {})
71
72 c.KubeSpawner.singleuser_uid = get_config('singleuser.uid')
73 c.KubeSpawner.singleuser_fs_gid = get_config('singleuser.fs-gid')
74
75 c.KubeSpawner.singleuser_node_selector = get_config('singleuser.node-selector')
76 # Configure dynamically provisioning pvc
77 storage_type = get_config('singleuser.storage.type')
78 if storage_type == 'dynamic':
79 c.KubeSpawner.pvc_name_template = 'claim-{username}{servername}'
80 c.KubeSpawner.user_storage_pvc_ensure = True
81 storage_class = get_config('singleuser.storage.dynamic.storage-class', None)
82 if storage_class:
83 c.KubeSpawner.user_storage_class = storage_class
84 c.KubeSpawner.user_storage_access_modes = ['ReadWriteOnce']
85 c.KubeSpawner.user_storage_capacity = get_config('singleuser.storage.capacity')
86
87 # Add volumes to singleuser pods
88 c.KubeSpawner.volumes = [
89 {
90 'name': 'volume-{username}{servername}',
91 'persistentVolumeClaim': {
92 'claimName': 'claim-{username}{servername}'
93 }
94 }
95 ]
96 c.KubeSpawner.volume_mounts = [
97 {
98 'mountPath': get_config('singleuser.storage.home_mount_path'),
99 'name': 'volume-{username}{servername}'
100 }
101 ]
102 elif storage_type == 'static':
103 pvc_claim_name = get_config('singleuser.storage.static.pvc-name')
104 c.KubeSpawner.volumes = [{
105 'name': 'home',
106 'persistentVolumeClaim': {
107 'claimName': pvc_claim_name
108 }
109 }]
110
111 c.KubeSpawner.volume_mounts = [{
112 'mountPath': get_config('singleuser.storage.home_mount_path'),
113 'name': 'home',
114 'subPath': get_config('singleuser.storage.static.sub-path')
115 }]
116
117 c.KubeSpawner.volumes.extend(get_config('singleuser.storage.extra-volumes', []))
118 c.KubeSpawner.volume_mounts.extend(get_config('singleuser.storage.extra-volume-mounts', []))
119
120 lifecycle_hooks = get_config('singleuser.lifecycle-hooks')
121 if lifecycle_hooks:
122 c.KubeSpawner.singleuser_lifecycle_hooks = lifecycle_hooks
123
124 init_containers = get_config('singleuser.init-containers')
125 if init_containers:
126 c.KubeSpawner.singleuser_init_containers = init_containers
127
128 # Gives spawned containers access to the API of the hub
129 c.KubeSpawner.hub_connect_ip = os.environ['HUB_SERVICE_HOST']
130 c.KubeSpawner.hub_connect_port = int(os.environ['HUB_SERVICE_PORT'])
131
132 c.JupyterHub.hub_connect_ip = os.environ['HUB_SERVICE_HOST']
133 c.JupyterHub.hub_connect_port = int(os.environ['HUB_SERVICE_PORT'])
134
135 c.KubeSpawner.mem_limit = get_config('singleuser.memory.limit')
136 c.KubeSpawner.mem_guarantee = get_config('singleuser.memory.guarantee')
137 c.KubeSpawner.cpu_limit = get_config('singleuser.cpu.limit')
138 c.KubeSpawner.cpu_guarantee = get_config('singleuser.cpu.guarantee')
139
140 # Allow switching authenticators easily
141 auth_type = get_config('auth.type')
142 email_domain = 'local'
143
144 if auth_type == 'google':
145 c.JupyterHub.authenticator_class = 'oauthenticator.GoogleOAuthenticator'
146 c.GoogleOAuthenticator.client_id = get_config('auth.google.client-id')
147 c.GoogleOAuthenticator.client_secret = get_config('auth.google.client-secret')
148 c.GoogleOAuthenticator.oauth_callback_url = get_config('auth.google.callback-url')
149 c.GoogleOAuthenticator.hosted_domain = get_config('auth.google.hosted-domain')
150 c.GoogleOAuthenticator.login_service = get_config('auth.google.login-service')
151 email_domain = get_config('auth.google.hosted-domain')
152 elif auth_type == 'github':
153 c.JupyterHub.authenticator_class = 'oauthenticator.GitHubOAuthenticator'
154 c.GitHubOAuthenticator.oauth_callback_url = get_config('auth.github.callback-url')
155 c.GitHubOAuthenticator.client_id = get_config('auth.github.client-id')
156 c.GitHubOAuthenticator.client_secret = get_config('auth.github.client-secret')
157 elif auth_type == 'cilogon':
158 c.JupyterHub.authenticator_class = 'oauthenticator.CILogonOAuthenticator'
159 c.CILogonOAuthenticator.oauth_callback_url = get_config('auth.cilogon.callback-url')
160 c.CILogonOAuthenticator.client_id = get_config('auth.cilogon.client-id')
161 c.CILogonOAuthenticator.client_secret = get_config('auth.cilogon.client-secret')
162 elif auth_type == 'gitlab':
163 c.JupyterHub.authenticator_class = 'oauthenticator.gitlab.GitLabOAuthenticator'
164 c.GitLabOAuthenticator.oauth_callback_url = get_config('auth.gitlab.callback-url')
165 c.GitLabOAuthenticator.client_id = get_config('auth.gitlab.client-id')
166 c.GitLabOAuthenticator.client_secret = get_config('auth.gitlab.client-secret')
167 elif auth_type == 'mediawiki':
168 c.JupyterHub.authenticator_class = 'oauthenticator.mediawiki.MWOAuthenticator'
169 c.MWOAuthenticator.client_id = get_config('auth.mediawiki.client-id')
170 c.MWOAuthenticator.client_secret = get_config('auth.mediawiki.client-secret')
171 c.MWOAuthenticator.index_url = get_config('auth.mediawiki.index-url')
172 elif auth_type == 'globus':
173 c.JupyterHub.authenticator_class = 'oauthenticator.globus.GlobusOAuthenticator'
174 c.GlobusOAuthenticator.oauth_callback_url = get_config('auth.globus.callback-url')
175 c.GlobusOAuthenticator.client_id = get_config('auth.globus.client-id')
176 c.GlobusOAuthenticator.client_secret = get_config('auth.globus.client-secret')
177 c.GlobusOAuthenticator.identity_provider = get_config('auth.globus.identity-provider', '')
178 elif auth_type == 'hmac':
179 c.JupyterHub.authenticator_class = 'hmacauthenticator.HMACAuthenticator'
180 c.HMACAuthenticator.secret_key = bytes.fromhex(get_config('auth.hmac.secret-key'))
181 elif auth_type == 'dummy':
182 c.JupyterHub.authenticator_class = 'dummyauthenticator.DummyAuthenticator'
183 c.DummyAuthenticator.password = get_config('auth.dummy.password', None)
184 elif auth_type == 'tmp':
185 c.JupyterHub.authenticator_class = 'tmpauthenticator.TmpAuthenticator'
186 elif auth_type == 'lti':
187 c.JupyterHub.authenticator_class = 'ltiauthenticator.LTIAuthenticator'
188 c.LTIAuthenticator.consumers = get_config('auth.lti.consumers')
189 elif auth_type == 'custom':
190 # full_class_name looks like "myauthenticator.MyAuthenticator".
191 # To create a docker image with this class availabe, you can just have the
192 # following Dockerifle:
193 # FROM jupyterhub/k8s-hub:v0.4
194 # RUN pip3 install myauthenticator
195 full_class_name = get_config('auth.custom.class-name')
196 c.JupyterHub.authenticator_class = full_class_name
197 auth_class_name = full_class_name.rsplit('.', 1)[-1]
198 auth_config = c[auth_class_name]
199 auth_config.update(get_config('auth.custom.config') or {})
200 else:
201 raise ValueError("Unhandled auth type: %r" % auth_type)
202
203 c.Authenticator.enable_auth_state = get_config('auth.state.enabled', False)
204
205 def generate_user_email(spawner):
206 """
207 Used as the EMAIL environment variable
208 """
209 return '{username}@{domain}'.format(
210 username=spawner.user.name, domain=email_domain
211 )
212
213 def generate_user_name(spawner):
214 """
215 Used as GIT_AUTHOR_NAME and GIT_COMMITTER_NAME environment variables
216 """
217 return spawner.user.name
218
219 c.KubeSpawner.environment = {
220 'EMAIL': generate_user_email,
221 # git requires these committer attributes
222 'GIT_AUTHOR_NAME': generate_user_name,
223 'GIT_COMMITTER_NAME': generate_user_name
224 }
225
226 c.KubeSpawner.environment.update(get_config('singleuser.extra-env', {}))
227
228 # Enable admins to access user servers
229 c.JupyterHub.admin_access = get_config('auth.admin.access')
230 c.Authenticator.admin_users = get_config('auth.admin.users', [])
231 c.Authenticator.whitelist = get_config('auth.whitelist.users', [])
232
233 c.JupyterHub.base_url = get_config('hub.base_url')
234
235 c.JupyterHub.services = []
236
237 if get_config('cull.enabled', False):
238 cull_timeout = get_config('cull.timeout')
239 cull_every = get_config('cull.every')
240 cull_cmd = [
241 '/usr/local/bin/cull_idle_servers.py',
242 '--timeout=%s' % cull_timeout,
243 '--cull-every=%s' % cull_every,
244 '--url=http://127.0.0.1:8081' + c.JupyterHub.base_url + 'hub/api'
245 ]
246 if get_config('cull.users'):
247 cull_cmd.append('--cull-users')
248 c.JupyterHub.services.append({
249 'name': 'cull-idle',
250 'admin': True,
251 'command': cull_cmd,
252 })
253
254 for name, service in get_config('hub.services', {}).items():
255 api_token = get_secret('services.token.%s' % name)
256 # jupyterhub.services is a list of dicts, but
257 # in the helm chart it is a dict of dicts for easier merged-config
258 service.setdefault('name', name)
259 if api_token:
260 service['api_token'] = api_token
261 c.JupyterHub.services.append(service)
262
263
264 c.JupyterHub.db_url = get_config('hub.db_url')
265
266 cmd = get_config('singleuser.cmd', None)
267 if cmd:
268 c.Spawner.cmd = cmd
269
270
271 scheduler_strategy = get_config('singleuser.scheduler-strategy', 'spread')
272
273 if scheduler_strategy == 'pack':
274 # FIXME: Support setting affinity directly in KubeSpawner
275 c.KubeSpawner.singleuser_extra_pod_config = {
276 'affinity': {
277 'podAffinity': {
278 'preferredDuringSchedulingIgnoredDuringExecution': [{
279 'weight': 100,
280 'podAffinityTerm': {
281 'labelSelector': {
282 'matchExpressions': [{
283 'key': 'component',
284 'operator': 'In',
285 'values': ['singleuser-server']
286 }]
287 },
288 'topologyKey': 'kubernetes.io/hostname'
289 }
290 }],
291 }
292 }
293 }
294 else:
295 # Set default to {} so subconfigs can easily update it
296 c.KubeSpawner.singleuser_extra_pod_config = {}
297
298 extra_config_path = '/etc/jupyterhub/config/hub.extra-config.py'
299 if os.path.exists(extra_config_path):
300 load_subconfig(extra_config_path)
301
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/images/hub/jupyterhub_config.py b/images/hub/jupyterhub_config.py
--- a/images/hub/jupyterhub_config.py
+++ b/images/hub/jupyterhub_config.py
@@ -1,4 +1,5 @@
import os
+import glob
import sys
import yaml
from tornado.httpclient import AsyncHTTPClient
@@ -295,6 +296,6 @@
# Set default to {} so subconfigs can easily update it
c.KubeSpawner.singleuser_extra_pod_config = {}
-extra_config_path = '/etc/jupyterhub/config/hub.extra-config.py'
-if os.path.exists(extra_config_path):
- load_subconfig(extra_config_path)
+extra_configs = sorted(glob.glob('/etc/jupyterhub/config/hub.extra-config.*.py'))
+for ec in extra_configs:
+ load_subconfig(ec)
| {"golden_diff": "diff --git a/images/hub/jupyterhub_config.py b/images/hub/jupyterhub_config.py\n--- a/images/hub/jupyterhub_config.py\n+++ b/images/hub/jupyterhub_config.py\n@@ -1,4 +1,5 @@\n import os\n+import glob\n import sys\n import yaml\n from tornado.httpclient import AsyncHTTPClient\n@@ -295,6 +296,6 @@\n # Set default to {} so subconfigs can easily update it\n c.KubeSpawner.singleuser_extra_pod_config = {}\n \n-extra_config_path = '/etc/jupyterhub/config/hub.extra-config.py'\n-if os.path.exists(extra_config_path):\n- load_subconfig(extra_config_path)\n+extra_configs = sorted(glob.glob('/etc/jupyterhub/config/hub.extra-config.*.py'))\n+for ec in extra_configs:\n+ load_subconfig(ec)\n", "issue": "Allow multiple extraConfig\nBinder relies on `hub.extraConfig` to configure the Spawner. If we want to customize a given Binder *deployment* via `hub.extraConfig`, we have to copy/paste the hub.extraConfig from the binderhub chart and add to it.\r\n\r\nIt would be handy if extraConfig were something that could be added to without having to copy the previous value. Are dicts the only structure that can do this?\n", "before_files": [{"content": "import os\nimport sys\nimport yaml\nfrom tornado.httpclient import AsyncHTTPClient\n\ndef get_config(key, default=None):\n \"\"\"\n Find a config item of a given name & return it\n\n Parses everything as YAML, so lists and dicts are available too\n \"\"\"\n path = os.path.join('/etc/jupyterhub/config', key)\n try:\n with open(path) as f:\n data = yaml.safe_load(f)\n return data\n except FileNotFoundError:\n return default\n\ndef get_secret(key, default=None):\n \"\"\"Get a secret from /etc/jupyterhub/secret\"\"\"\n path = os.path.join('/etc/jupyterhub/secret', key)\n try:\n with open(path) as f:\n return f.read().strip()\n except FileNotFoundError:\n return default\n\n\n# Configure JupyterHub to use the curl backend for making HTTP requests,\n# rather than the pure-python implementations. The default one starts\n# being too slow to make a large number of requests to the proxy API\n# at the rate required.\nAsyncHTTPClient.configure(\"tornado.curl_httpclient.CurlAsyncHTTPClient\")\n\nc.JupyterHub.spawner_class = 'kubespawner.KubeSpawner'\n\n# Connect to a proxy running in a different pod\nc.ConfigurableHTTPProxy.api_url = 'http://{}:{}'.format(os.environ['PROXY_API_SERVICE_HOST'], int(os.environ['PROXY_API_SERVICE_PORT']))\nc.ConfigurableHTTPProxy.should_start = False\n\n# Do not shut down user pods when hub is restarted\nc.JupyterHub.cleanup_servers = False\n\n# Check that the proxy has routes appropriately setup\n# This isn't the best named setting :D\nc.JupyterHub.last_activity_interval = 60\n\n# Max number of servers that can be spawning at any one time\nc.JupyterHub.concurrent_spawn_limit = get_config('hub.concurrent-spawn-limit')\n\nactive_server_limit = get_config('hub.active-server-limit', None)\n\nif active_server_limit is not None:\n c.JupyterHub.active_server_limit = int(active_server_limit)\n\nc.JupyterHub.ip = os.environ['PROXY_PUBLIC_SERVICE_HOST']\nc.JupyterHub.port = int(os.environ['PROXY_PUBLIC_SERVICE_PORT'])\n\n# the hub should listen on all interfaces, so the proxy can access it\nc.JupyterHub.hub_ip = '0.0.0.0'\n\nc.KubeSpawner.namespace = os.environ.get('POD_NAMESPACE', 'default')\n\nc.KubeSpawner.start_timeout = get_config('singleuser.start-timeout')\n\n# Use env var for this, since we want hub to restart when this changes\nc.KubeSpawner.singleuser_image_spec = os.environ['SINGLEUSER_IMAGE']\n\nc.KubeSpawner.singleuser_extra_labels = get_config('singleuser.extra-labels', {})\n\nc.KubeSpawner.singleuser_uid = get_config('singleuser.uid')\nc.KubeSpawner.singleuser_fs_gid = get_config('singleuser.fs-gid')\n\nc.KubeSpawner.singleuser_node_selector = get_config('singleuser.node-selector')\n# Configure dynamically provisioning pvc\nstorage_type = get_config('singleuser.storage.type')\nif storage_type == 'dynamic':\n c.KubeSpawner.pvc_name_template = 'claim-{username}{servername}'\n c.KubeSpawner.user_storage_pvc_ensure = True\n storage_class = get_config('singleuser.storage.dynamic.storage-class', None)\n if storage_class:\n c.KubeSpawner.user_storage_class = storage_class\n c.KubeSpawner.user_storage_access_modes = ['ReadWriteOnce']\n c.KubeSpawner.user_storage_capacity = get_config('singleuser.storage.capacity')\n\n # Add volumes to singleuser pods\n c.KubeSpawner.volumes = [\n {\n 'name': 'volume-{username}{servername}',\n 'persistentVolumeClaim': {\n 'claimName': 'claim-{username}{servername}'\n }\n }\n ]\n c.KubeSpawner.volume_mounts = [\n {\n 'mountPath': get_config('singleuser.storage.home_mount_path'),\n 'name': 'volume-{username}{servername}'\n }\n ]\nelif storage_type == 'static':\n pvc_claim_name = get_config('singleuser.storage.static.pvc-name')\n c.KubeSpawner.volumes = [{\n 'name': 'home',\n 'persistentVolumeClaim': {\n 'claimName': pvc_claim_name\n }\n }]\n\n c.KubeSpawner.volume_mounts = [{\n 'mountPath': get_config('singleuser.storage.home_mount_path'),\n 'name': 'home',\n 'subPath': get_config('singleuser.storage.static.sub-path')\n }]\n\nc.KubeSpawner.volumes.extend(get_config('singleuser.storage.extra-volumes', []))\nc.KubeSpawner.volume_mounts.extend(get_config('singleuser.storage.extra-volume-mounts', []))\n\nlifecycle_hooks = get_config('singleuser.lifecycle-hooks')\nif lifecycle_hooks:\n c.KubeSpawner.singleuser_lifecycle_hooks = lifecycle_hooks\n\ninit_containers = get_config('singleuser.init-containers')\nif init_containers:\n c.KubeSpawner.singleuser_init_containers = init_containers\n\n# Gives spawned containers access to the API of the hub\nc.KubeSpawner.hub_connect_ip = os.environ['HUB_SERVICE_HOST']\nc.KubeSpawner.hub_connect_port = int(os.environ['HUB_SERVICE_PORT'])\n\nc.JupyterHub.hub_connect_ip = os.environ['HUB_SERVICE_HOST']\nc.JupyterHub.hub_connect_port = int(os.environ['HUB_SERVICE_PORT'])\n\nc.KubeSpawner.mem_limit = get_config('singleuser.memory.limit')\nc.KubeSpawner.mem_guarantee = get_config('singleuser.memory.guarantee')\nc.KubeSpawner.cpu_limit = get_config('singleuser.cpu.limit')\nc.KubeSpawner.cpu_guarantee = get_config('singleuser.cpu.guarantee')\n\n# Allow switching authenticators easily\nauth_type = get_config('auth.type')\nemail_domain = 'local'\n\nif auth_type == 'google':\n c.JupyterHub.authenticator_class = 'oauthenticator.GoogleOAuthenticator'\n c.GoogleOAuthenticator.client_id = get_config('auth.google.client-id')\n c.GoogleOAuthenticator.client_secret = get_config('auth.google.client-secret')\n c.GoogleOAuthenticator.oauth_callback_url = get_config('auth.google.callback-url')\n c.GoogleOAuthenticator.hosted_domain = get_config('auth.google.hosted-domain')\n c.GoogleOAuthenticator.login_service = get_config('auth.google.login-service')\n email_domain = get_config('auth.google.hosted-domain')\nelif auth_type == 'github':\n c.JupyterHub.authenticator_class = 'oauthenticator.GitHubOAuthenticator'\n c.GitHubOAuthenticator.oauth_callback_url = get_config('auth.github.callback-url')\n c.GitHubOAuthenticator.client_id = get_config('auth.github.client-id')\n c.GitHubOAuthenticator.client_secret = get_config('auth.github.client-secret')\nelif auth_type == 'cilogon':\n c.JupyterHub.authenticator_class = 'oauthenticator.CILogonOAuthenticator'\n c.CILogonOAuthenticator.oauth_callback_url = get_config('auth.cilogon.callback-url')\n c.CILogonOAuthenticator.client_id = get_config('auth.cilogon.client-id')\n c.CILogonOAuthenticator.client_secret = get_config('auth.cilogon.client-secret')\nelif auth_type == 'gitlab':\n c.JupyterHub.authenticator_class = 'oauthenticator.gitlab.GitLabOAuthenticator'\n c.GitLabOAuthenticator.oauth_callback_url = get_config('auth.gitlab.callback-url')\n c.GitLabOAuthenticator.client_id = get_config('auth.gitlab.client-id')\n c.GitLabOAuthenticator.client_secret = get_config('auth.gitlab.client-secret')\nelif auth_type == 'mediawiki':\n c.JupyterHub.authenticator_class = 'oauthenticator.mediawiki.MWOAuthenticator'\n c.MWOAuthenticator.client_id = get_config('auth.mediawiki.client-id')\n c.MWOAuthenticator.client_secret = get_config('auth.mediawiki.client-secret')\n c.MWOAuthenticator.index_url = get_config('auth.mediawiki.index-url')\nelif auth_type == 'globus':\n c.JupyterHub.authenticator_class = 'oauthenticator.globus.GlobusOAuthenticator'\n c.GlobusOAuthenticator.oauth_callback_url = get_config('auth.globus.callback-url')\n c.GlobusOAuthenticator.client_id = get_config('auth.globus.client-id')\n c.GlobusOAuthenticator.client_secret = get_config('auth.globus.client-secret')\n c.GlobusOAuthenticator.identity_provider = get_config('auth.globus.identity-provider', '')\nelif auth_type == 'hmac':\n c.JupyterHub.authenticator_class = 'hmacauthenticator.HMACAuthenticator'\n c.HMACAuthenticator.secret_key = bytes.fromhex(get_config('auth.hmac.secret-key'))\nelif auth_type == 'dummy':\n c.JupyterHub.authenticator_class = 'dummyauthenticator.DummyAuthenticator'\n c.DummyAuthenticator.password = get_config('auth.dummy.password', None)\nelif auth_type == 'tmp':\n c.JupyterHub.authenticator_class = 'tmpauthenticator.TmpAuthenticator'\nelif auth_type == 'lti':\n c.JupyterHub.authenticator_class = 'ltiauthenticator.LTIAuthenticator'\n c.LTIAuthenticator.consumers = get_config('auth.lti.consumers')\nelif auth_type == 'custom':\n # full_class_name looks like \"myauthenticator.MyAuthenticator\".\n # To create a docker image with this class availabe, you can just have the\n # following Dockerifle:\n # FROM jupyterhub/k8s-hub:v0.4\n # RUN pip3 install myauthenticator\n full_class_name = get_config('auth.custom.class-name')\n c.JupyterHub.authenticator_class = full_class_name\n auth_class_name = full_class_name.rsplit('.', 1)[-1]\n auth_config = c[auth_class_name]\n auth_config.update(get_config('auth.custom.config') or {})\nelse:\n raise ValueError(\"Unhandled auth type: %r\" % auth_type)\n\nc.Authenticator.enable_auth_state = get_config('auth.state.enabled', False)\n\ndef generate_user_email(spawner):\n \"\"\"\n Used as the EMAIL environment variable\n \"\"\"\n return '{username}@{domain}'.format(\n username=spawner.user.name, domain=email_domain\n )\n\ndef generate_user_name(spawner):\n \"\"\"\n Used as GIT_AUTHOR_NAME and GIT_COMMITTER_NAME environment variables\n \"\"\"\n return spawner.user.name\n\nc.KubeSpawner.environment = {\n 'EMAIL': generate_user_email,\n # git requires these committer attributes\n 'GIT_AUTHOR_NAME': generate_user_name,\n 'GIT_COMMITTER_NAME': generate_user_name\n}\n\nc.KubeSpawner.environment.update(get_config('singleuser.extra-env', {}))\n\n# Enable admins to access user servers\nc.JupyterHub.admin_access = get_config('auth.admin.access')\nc.Authenticator.admin_users = get_config('auth.admin.users', [])\nc.Authenticator.whitelist = get_config('auth.whitelist.users', [])\n\nc.JupyterHub.base_url = get_config('hub.base_url')\n\nc.JupyterHub.services = []\n\nif get_config('cull.enabled', False):\n cull_timeout = get_config('cull.timeout')\n cull_every = get_config('cull.every')\n cull_cmd = [\n '/usr/local/bin/cull_idle_servers.py',\n '--timeout=%s' % cull_timeout,\n '--cull-every=%s' % cull_every,\n '--url=http://127.0.0.1:8081' + c.JupyterHub.base_url + 'hub/api'\n ]\n if get_config('cull.users'):\n cull_cmd.append('--cull-users')\n c.JupyterHub.services.append({\n 'name': 'cull-idle',\n 'admin': True,\n 'command': cull_cmd,\n })\n\nfor name, service in get_config('hub.services', {}).items():\n api_token = get_secret('services.token.%s' % name)\n # jupyterhub.services is a list of dicts, but\n # in the helm chart it is a dict of dicts for easier merged-config\n service.setdefault('name', name)\n if api_token:\n service['api_token'] = api_token\n c.JupyterHub.services.append(service)\n\n\nc.JupyterHub.db_url = get_config('hub.db_url')\n\ncmd = get_config('singleuser.cmd', None)\nif cmd:\n c.Spawner.cmd = cmd\n\n\nscheduler_strategy = get_config('singleuser.scheduler-strategy', 'spread')\n\nif scheduler_strategy == 'pack':\n # FIXME: Support setting affinity directly in KubeSpawner\n c.KubeSpawner.singleuser_extra_pod_config = {\n 'affinity': {\n 'podAffinity': {\n 'preferredDuringSchedulingIgnoredDuringExecution': [{\n 'weight': 100,\n 'podAffinityTerm': {\n 'labelSelector': {\n 'matchExpressions': [{\n 'key': 'component',\n 'operator': 'In',\n 'values': ['singleuser-server']\n }]\n },\n 'topologyKey': 'kubernetes.io/hostname'\n }\n }],\n }\n }\n }\nelse:\n # Set default to {} so subconfigs can easily update it\n c.KubeSpawner.singleuser_extra_pod_config = {}\n\nextra_config_path = '/etc/jupyterhub/config/hub.extra-config.py'\nif os.path.exists(extra_config_path):\n load_subconfig(extra_config_path)\n", "path": "images/hub/jupyterhub_config.py"}], "after_files": [{"content": "import os\nimport glob\nimport sys\nimport yaml\nfrom tornado.httpclient import AsyncHTTPClient\n\ndef get_config(key, default=None):\n \"\"\"\n Find a config item of a given name & return it\n\n Parses everything as YAML, so lists and dicts are available too\n \"\"\"\n path = os.path.join('/etc/jupyterhub/config', key)\n try:\n with open(path) as f:\n data = yaml.safe_load(f)\n return data\n except FileNotFoundError:\n return default\n\ndef get_secret(key, default=None):\n \"\"\"Get a secret from /etc/jupyterhub/secret\"\"\"\n path = os.path.join('/etc/jupyterhub/secret', key)\n try:\n with open(path) as f:\n return f.read().strip()\n except FileNotFoundError:\n return default\n\n\n# Configure JupyterHub to use the curl backend for making HTTP requests,\n# rather than the pure-python implementations. The default one starts\n# being too slow to make a large number of requests to the proxy API\n# at the rate required.\nAsyncHTTPClient.configure(\"tornado.curl_httpclient.CurlAsyncHTTPClient\")\n\nc.JupyterHub.spawner_class = 'kubespawner.KubeSpawner'\n\n# Connect to a proxy running in a different pod\nc.ConfigurableHTTPProxy.api_url = 'http://{}:{}'.format(os.environ['PROXY_API_SERVICE_HOST'], int(os.environ['PROXY_API_SERVICE_PORT']))\nc.ConfigurableHTTPProxy.should_start = False\n\n# Do not shut down user pods when hub is restarted\nc.JupyterHub.cleanup_servers = False\n\n# Check that the proxy has routes appropriately setup\n# This isn't the best named setting :D\nc.JupyterHub.last_activity_interval = 60\n\n# Max number of servers that can be spawning at any one time\nc.JupyterHub.concurrent_spawn_limit = get_config('hub.concurrent-spawn-limit')\n\nactive_server_limit = get_config('hub.active-server-limit', None)\n\nif active_server_limit is not None:\n c.JupyterHub.active_server_limit = int(active_server_limit)\n\nc.JupyterHub.ip = os.environ['PROXY_PUBLIC_SERVICE_HOST']\nc.JupyterHub.port = int(os.environ['PROXY_PUBLIC_SERVICE_PORT'])\n\n# the hub should listen on all interfaces, so the proxy can access it\nc.JupyterHub.hub_ip = '0.0.0.0'\n\nc.KubeSpawner.namespace = os.environ.get('POD_NAMESPACE', 'default')\n\nc.KubeSpawner.start_timeout = get_config('singleuser.start-timeout')\n\n# Use env var for this, since we want hub to restart when this changes\nc.KubeSpawner.singleuser_image_spec = os.environ['SINGLEUSER_IMAGE']\n\nc.KubeSpawner.singleuser_extra_labels = get_config('singleuser.extra-labels', {})\n\nc.KubeSpawner.singleuser_uid = get_config('singleuser.uid')\nc.KubeSpawner.singleuser_fs_gid = get_config('singleuser.fs-gid')\n\nc.KubeSpawner.singleuser_node_selector = get_config('singleuser.node-selector')\n# Configure dynamically provisioning pvc\nstorage_type = get_config('singleuser.storage.type')\nif storage_type == 'dynamic':\n c.KubeSpawner.pvc_name_template = 'claim-{username}{servername}'\n c.KubeSpawner.user_storage_pvc_ensure = True\n storage_class = get_config('singleuser.storage.dynamic.storage-class', None)\n if storage_class:\n c.KubeSpawner.user_storage_class = storage_class\n c.KubeSpawner.user_storage_access_modes = ['ReadWriteOnce']\n c.KubeSpawner.user_storage_capacity = get_config('singleuser.storage.capacity')\n\n # Add volumes to singleuser pods\n c.KubeSpawner.volumes = [\n {\n 'name': 'volume-{username}{servername}',\n 'persistentVolumeClaim': {\n 'claimName': 'claim-{username}{servername}'\n }\n }\n ]\n c.KubeSpawner.volume_mounts = [\n {\n 'mountPath': get_config('singleuser.storage.home_mount_path'),\n 'name': 'volume-{username}{servername}'\n }\n ]\nelif storage_type == 'static':\n pvc_claim_name = get_config('singleuser.storage.static.pvc-name')\n c.KubeSpawner.volumes = [{\n 'name': 'home',\n 'persistentVolumeClaim': {\n 'claimName': pvc_claim_name\n }\n }]\n\n c.KubeSpawner.volume_mounts = [{\n 'mountPath': get_config('singleuser.storage.home_mount_path'),\n 'name': 'home',\n 'subPath': get_config('singleuser.storage.static.sub-path')\n }]\n\nc.KubeSpawner.volumes.extend(get_config('singleuser.storage.extra-volumes', []))\nc.KubeSpawner.volume_mounts.extend(get_config('singleuser.storage.extra-volume-mounts', []))\n\nlifecycle_hooks = get_config('singleuser.lifecycle-hooks')\nif lifecycle_hooks:\n c.KubeSpawner.singleuser_lifecycle_hooks = lifecycle_hooks\n\ninit_containers = get_config('singleuser.init-containers')\nif init_containers:\n c.KubeSpawner.singleuser_init_containers = init_containers\n\n# Gives spawned containers access to the API of the hub\nc.KubeSpawner.hub_connect_ip = os.environ['HUB_SERVICE_HOST']\nc.KubeSpawner.hub_connect_port = int(os.environ['HUB_SERVICE_PORT'])\n\nc.JupyterHub.hub_connect_ip = os.environ['HUB_SERVICE_HOST']\nc.JupyterHub.hub_connect_port = int(os.environ['HUB_SERVICE_PORT'])\n\nc.KubeSpawner.mem_limit = get_config('singleuser.memory.limit')\nc.KubeSpawner.mem_guarantee = get_config('singleuser.memory.guarantee')\nc.KubeSpawner.cpu_limit = get_config('singleuser.cpu.limit')\nc.KubeSpawner.cpu_guarantee = get_config('singleuser.cpu.guarantee')\n\n# Allow switching authenticators easily\nauth_type = get_config('auth.type')\nemail_domain = 'local'\n\nif auth_type == 'google':\n c.JupyterHub.authenticator_class = 'oauthenticator.GoogleOAuthenticator'\n c.GoogleOAuthenticator.client_id = get_config('auth.google.client-id')\n c.GoogleOAuthenticator.client_secret = get_config('auth.google.client-secret')\n c.GoogleOAuthenticator.oauth_callback_url = get_config('auth.google.callback-url')\n c.GoogleOAuthenticator.hosted_domain = get_config('auth.google.hosted-domain')\n c.GoogleOAuthenticator.login_service = get_config('auth.google.login-service')\n email_domain = get_config('auth.google.hosted-domain')\nelif auth_type == 'github':\n c.JupyterHub.authenticator_class = 'oauthenticator.GitHubOAuthenticator'\n c.GitHubOAuthenticator.oauth_callback_url = get_config('auth.github.callback-url')\n c.GitHubOAuthenticator.client_id = get_config('auth.github.client-id')\n c.GitHubOAuthenticator.client_secret = get_config('auth.github.client-secret')\nelif auth_type == 'cilogon':\n c.JupyterHub.authenticator_class = 'oauthenticator.CILogonOAuthenticator'\n c.CILogonOAuthenticator.oauth_callback_url = get_config('auth.cilogon.callback-url')\n c.CILogonOAuthenticator.client_id = get_config('auth.cilogon.client-id')\n c.CILogonOAuthenticator.client_secret = get_config('auth.cilogon.client-secret')\nelif auth_type == 'gitlab':\n c.JupyterHub.authenticator_class = 'oauthenticator.gitlab.GitLabOAuthenticator'\n c.GitLabOAuthenticator.oauth_callback_url = get_config('auth.gitlab.callback-url')\n c.GitLabOAuthenticator.client_id = get_config('auth.gitlab.client-id')\n c.GitLabOAuthenticator.client_secret = get_config('auth.gitlab.client-secret')\nelif auth_type == 'mediawiki':\n c.JupyterHub.authenticator_class = 'oauthenticator.mediawiki.MWOAuthenticator'\n c.MWOAuthenticator.client_id = get_config('auth.mediawiki.client-id')\n c.MWOAuthenticator.client_secret = get_config('auth.mediawiki.client-secret')\n c.MWOAuthenticator.index_url = get_config('auth.mediawiki.index-url')\nelif auth_type == 'globus':\n c.JupyterHub.authenticator_class = 'oauthenticator.globus.GlobusOAuthenticator'\n c.GlobusOAuthenticator.oauth_callback_url = get_config('auth.globus.callback-url')\n c.GlobusOAuthenticator.client_id = get_config('auth.globus.client-id')\n c.GlobusOAuthenticator.client_secret = get_config('auth.globus.client-secret')\n c.GlobusOAuthenticator.identity_provider = get_config('auth.globus.identity-provider', '')\nelif auth_type == 'hmac':\n c.JupyterHub.authenticator_class = 'hmacauthenticator.HMACAuthenticator'\n c.HMACAuthenticator.secret_key = bytes.fromhex(get_config('auth.hmac.secret-key'))\nelif auth_type == 'dummy':\n c.JupyterHub.authenticator_class = 'dummyauthenticator.DummyAuthenticator'\n c.DummyAuthenticator.password = get_config('auth.dummy.password', None)\nelif auth_type == 'tmp':\n c.JupyterHub.authenticator_class = 'tmpauthenticator.TmpAuthenticator'\nelif auth_type == 'lti':\n c.JupyterHub.authenticator_class = 'ltiauthenticator.LTIAuthenticator'\n c.LTIAuthenticator.consumers = get_config('auth.lti.consumers')\nelif auth_type == 'custom':\n # full_class_name looks like \"myauthenticator.MyAuthenticator\".\n # To create a docker image with this class availabe, you can just have the\n # following Dockerifle:\n # FROM jupyterhub/k8s-hub:v0.4\n # RUN pip3 install myauthenticator\n full_class_name = get_config('auth.custom.class-name')\n c.JupyterHub.authenticator_class = full_class_name\n auth_class_name = full_class_name.rsplit('.', 1)[-1]\n auth_config = c[auth_class_name]\n auth_config.update(get_config('auth.custom.config') or {})\nelse:\n raise ValueError(\"Unhandled auth type: %r\" % auth_type)\n\nc.Authenticator.enable_auth_state = get_config('auth.state.enabled', False)\n\ndef generate_user_email(spawner):\n \"\"\"\n Used as the EMAIL environment variable\n \"\"\"\n return '{username}@{domain}'.format(\n username=spawner.user.name, domain=email_domain\n )\n\ndef generate_user_name(spawner):\n \"\"\"\n Used as GIT_AUTHOR_NAME and GIT_COMMITTER_NAME environment variables\n \"\"\"\n return spawner.user.name\n\nc.KubeSpawner.environment = {\n 'EMAIL': generate_user_email,\n # git requires these committer attributes\n 'GIT_AUTHOR_NAME': generate_user_name,\n 'GIT_COMMITTER_NAME': generate_user_name\n}\n\nc.KubeSpawner.environment.update(get_config('singleuser.extra-env', {}))\n\n# Enable admins to access user servers\nc.JupyterHub.admin_access = get_config('auth.admin.access')\nc.Authenticator.admin_users = get_config('auth.admin.users', [])\nc.Authenticator.whitelist = get_config('auth.whitelist.users', [])\n\nc.JupyterHub.base_url = get_config('hub.base_url')\n\nc.JupyterHub.services = []\n\nif get_config('cull.enabled', False):\n cull_timeout = get_config('cull.timeout')\n cull_every = get_config('cull.every')\n cull_cmd = [\n '/usr/local/bin/cull_idle_servers.py',\n '--timeout=%s' % cull_timeout,\n '--cull-every=%s' % cull_every,\n '--url=http://127.0.0.1:8081' + c.JupyterHub.base_url + 'hub/api'\n ]\n if get_config('cull.users'):\n cull_cmd.append('--cull-users')\n c.JupyterHub.services.append({\n 'name': 'cull-idle',\n 'admin': True,\n 'command': cull_cmd,\n })\n\nfor name, service in get_config('hub.services', {}).items():\n api_token = get_secret('services.token.%s' % name)\n # jupyterhub.services is a list of dicts, but\n # in the helm chart it is a dict of dicts for easier merged-config\n service.setdefault('name', name)\n if api_token:\n service['api_token'] = api_token\n c.JupyterHub.services.append(service)\n\n\nc.JupyterHub.db_url = get_config('hub.db_url')\n\ncmd = get_config('singleuser.cmd', None)\nif cmd:\n c.Spawner.cmd = cmd\n\n\nscheduler_strategy = get_config('singleuser.scheduler-strategy', 'spread')\n\nif scheduler_strategy == 'pack':\n # FIXME: Support setting affinity directly in KubeSpawner\n c.KubeSpawner.singleuser_extra_pod_config = {\n 'affinity': {\n 'podAffinity': {\n 'preferredDuringSchedulingIgnoredDuringExecution': [{\n 'weight': 100,\n 'podAffinityTerm': {\n 'labelSelector': {\n 'matchExpressions': [{\n 'key': 'component',\n 'operator': 'In',\n 'values': ['singleuser-server']\n }]\n },\n 'topologyKey': 'kubernetes.io/hostname'\n }\n }],\n }\n }\n }\nelse:\n # Set default to {} so subconfigs can easily update it\n c.KubeSpawner.singleuser_extra_pod_config = {}\n\nextra_configs = sorted(glob.glob('/etc/jupyterhub/config/hub.extra-config.*.py'))\nfor ec in extra_configs:\n load_subconfig(ec)\n", "path": "images/hub/jupyterhub_config.py"}]} | 4,069 | 187 |
gh_patches_debug_14239 | rasdani/github-patches | git_diff | RUCAIBox__RecBole-692 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[🐛BUG] case_study.py 中, 输入的用户id只有一个时, full_sort_topk 报错
代码
```python3
import torch
import pandas as pd
from recbole.model.general_recommender.bpr import BPR
from recbole.config import Config
from recbole.data import create_dataset, data_preparation
from recbole.utils.case_study import full_sort_topk
param_dict = {
'use_gpu': False
}
# 加载 BPR 模型
bpr_model_path = "D:\\source\\recbole-0.2.0\\app\\ex\\saved\\BPR-Jan-18-2021_14-03-52.pth"
bpr_config = Config(model='BPR',
dataset='ml-100k',
config_dict=param_dict)
dataset = create_dataset(bpr_config)
train_data, valid_data, test_data = data_preparation(bpr_config, dataset)
bpr_model = BPR(bpr_config, train_data)
checkpoint = torch.load(bpr_model_path)
bpr_model.load_state_dict(checkpoint['state_dict'])
bpr_model.eval()
uid_series = dataset.token2id(dataset.uid_field, ['200']) # 原始数据集中的用户id,变换为训练内部使用的索引id
full_sort_topk(uid_series, bpr_model, test_data, 10)
```
报错信息
Traceback (most recent call last):
File "D:/source/recbole-0.2.0/app/ex/bpr_predict_ml100k.py", line 33, in <module>
full_sort_topk(uid_series, bpr_model, test_data, 10)
File "D:\source\recbole-0.2.0\recbole\utils\case_study.py", line 87, in full_sort_topk
scores = full_sort_scores(uid_series, model, test_data)
File "D:\Anaconda3\envs\pytorch\lib\site-packages\torch\autograd\grad_mode.py", line 26, in decorate_context
return func(*args, **kwargs)
File "D:\source\recbole-0.2.0\recbole\utils\case_study.py", line 45, in full_sort_scores
history_row = torch.cat([torch.full_like(hist_iid, i) for i, hist_iid in enumerate(history_item)])
RuntimeError: zero-dimensional tensor (at position 0) cannot be concatenated
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `recbole/utils/case_study.py`
Content:
```
1 # @Time : 2020/12/25
2 # @Author : Yushuo Chen
3 # @Email : [email protected]
4
5 # UPDATE
6 # @Time : 2020/12/25
7 # @Author : Yushuo Chen
8 # @email : [email protected]
9
10 """
11 recbole.utils.case_study
12 #####################################
13 """
14
15 import numpy as np
16 import torch
17
18 from recbole.data.dataloader.general_dataloader import GeneralFullDataLoader
19 from recbole.data.dataloader.sequential_dataloader import SequentialFullDataLoader
20
21
22 @torch.no_grad()
23 def full_sort_scores(uid_series, model, test_data):
24 """Calculate the scores of all items for each user in uid_series.
25
26 Note:
27 The score of [pad] and history items will be set into -inf.
28
29 Args:
30 uid_series (numpy.ndarray): User id series
31 model (AbstractRecommender): Model to predict
32 test_data (AbstractDataLoader): The test_data of model
33
34 Returns:
35 torch.Tensor: the scores of all items for each user in uid_series.
36 """
37 uid_field = test_data.dataset.uid_field
38 dataset = test_data.dataset
39 model.eval()
40
41 if isinstance(test_data, GeneralFullDataLoader):
42 index = np.isin(test_data.user_df[uid_field].numpy(), uid_series)
43 input_interaction = test_data.user_df[index]
44 history_item = test_data.uid2history_item[input_interaction[uid_field]]
45 history_row = torch.cat([torch.full_like(hist_iid, i) for i, hist_iid in enumerate(history_item)])
46 history_col = torch.cat(list(history_item))
47 history_index = history_row, history_col
48 elif isinstance(test_data, SequentialFullDataLoader):
49 index = np.isin(test_data.uid_list, uid_series)
50 input_interaction = test_data.augmentation(
51 test_data.item_list_index[index], test_data.target_index[index], test_data.item_list_length[index]
52 )
53 history_index = None
54 else:
55 raise NotImplementedError
56
57 # Get scores of all items
58 try:
59 scores = model.full_sort_predict(input_interaction)
60 except NotImplementedError:
61 input_interaction = input_interaction.repeat(dataset.item_num)
62 input_interaction.update(test_data.get_item_feature().repeat(len(uid_series)))
63 scores = model.predict(input_interaction)
64
65 scores = scores.view(-1, dataset.item_num)
66 scores[:, 0] = -np.inf # set scores of [pad] to -inf
67 if history_index is not None:
68 scores[history_index] = -np.inf # set scores of history items to -inf
69
70 return scores
71
72
73 def full_sort_topk(uid_series, model, test_data, k):
74 """Calculate the top-k items' scores and ids for each user in uid_series.
75
76 Args:
77 uid_series (numpy.ndarray): User id series
78 model (AbstractRecommender): Model to predict
79 test_data (AbstractDataLoader): The test_data of model
80 k (int): The top-k items.
81
82 Returns:
83 tuple:
84 - topk_scores (torch.Tensor): The scores of topk items.
85 - topk_index (torch.Tensor): The index of topk items, which is also the internal ids of items.
86 """
87 scores = full_sort_scores(uid_series, model, test_data)
88 return torch.topk(scores, k)
89
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/recbole/utils/case_study.py b/recbole/utils/case_study.py
--- a/recbole/utils/case_study.py
+++ b/recbole/utils/case_study.py
@@ -41,7 +41,7 @@
if isinstance(test_data, GeneralFullDataLoader):
index = np.isin(test_data.user_df[uid_field].numpy(), uid_series)
input_interaction = test_data.user_df[index]
- history_item = test_data.uid2history_item[input_interaction[uid_field]]
+ history_item = test_data.uid2history_item[input_interaction[uid_field].numpy()]
history_row = torch.cat([torch.full_like(hist_iid, i) for i, hist_iid in enumerate(history_item)])
history_col = torch.cat(list(history_item))
history_index = history_row, history_col
| {"golden_diff": "diff --git a/recbole/utils/case_study.py b/recbole/utils/case_study.py\n--- a/recbole/utils/case_study.py\n+++ b/recbole/utils/case_study.py\n@@ -41,7 +41,7 @@\n if isinstance(test_data, GeneralFullDataLoader):\n index = np.isin(test_data.user_df[uid_field].numpy(), uid_series)\n input_interaction = test_data.user_df[index]\n- history_item = test_data.uid2history_item[input_interaction[uid_field]]\n+ history_item = test_data.uid2history_item[input_interaction[uid_field].numpy()]\n history_row = torch.cat([torch.full_like(hist_iid, i) for i, hist_iid in enumerate(history_item)])\n history_col = torch.cat(list(history_item))\n history_index = history_row, history_col\n", "issue": "[\ud83d\udc1bBUG] case_study.py \u4e2d\uff0c \u8f93\u5165\u7684\u7528\u6237id\u53ea\u6709\u4e00\u4e2a\u65f6\uff0c full_sort_topk \u62a5\u9519\n\u4ee3\u7801\r\n```python3\r\nimport torch\r\nimport pandas as pd\r\n\r\nfrom recbole.model.general_recommender.bpr import BPR\r\nfrom recbole.config import Config\r\nfrom recbole.data import create_dataset, data_preparation\r\nfrom recbole.utils.case_study import full_sort_topk\r\n\r\nparam_dict = {\r\n 'use_gpu': False\r\n}\r\n\r\n# \u52a0\u8f7d BPR \u6a21\u578b\r\nbpr_model_path = \"D:\\\\source\\\\recbole-0.2.0\\\\app\\\\ex\\\\saved\\\\BPR-Jan-18-2021_14-03-52.pth\"\r\nbpr_config = Config(model='BPR',\r\n dataset='ml-100k',\r\n config_dict=param_dict)\r\ndataset = create_dataset(bpr_config)\r\ntrain_data, valid_data, test_data = data_preparation(bpr_config, dataset)\r\n\r\nbpr_model = BPR(bpr_config, train_data)\r\ncheckpoint = torch.load(bpr_model_path)\r\nbpr_model.load_state_dict(checkpoint['state_dict'])\r\nbpr_model.eval()\r\n\r\nuid_series = dataset.token2id(dataset.uid_field, ['200']) # \u539f\u59cb\u6570\u636e\u96c6\u4e2d\u7684\u7528\u6237id\uff0c\u53d8\u6362\u4e3a\u8bad\u7ec3\u5185\u90e8\u4f7f\u7528\u7684\u7d22\u5f15id\r\n\r\nfull_sort_topk(uid_series, bpr_model, test_data, 10)\r\n```\r\n\r\n\u62a5\u9519\u4fe1\u606f\r\nTraceback (most recent call last):\r\n File \"D:/source/recbole-0.2.0/app/ex/bpr_predict_ml100k.py\", line 33, in <module>\r\n full_sort_topk(uid_series, bpr_model, test_data, 10)\r\n File \"D:\\source\\recbole-0.2.0\\recbole\\utils\\case_study.py\", line 87, in full_sort_topk\r\n scores = full_sort_scores(uid_series, model, test_data)\r\n File \"D:\\Anaconda3\\envs\\pytorch\\lib\\site-packages\\torch\\autograd\\grad_mode.py\", line 26, in decorate_context\r\n return func(*args, **kwargs)\r\n File \"D:\\source\\recbole-0.2.0\\recbole\\utils\\case_study.py\", line 45, in full_sort_scores\r\n history_row = torch.cat([torch.full_like(hist_iid, i) for i, hist_iid in enumerate(history_item)])\r\nRuntimeError: zero-dimensional tensor (at position 0) cannot be concatenated\n", "before_files": [{"content": "# @Time : 2020/12/25\n# @Author : Yushuo Chen\n# @Email : [email protected]\n\n# UPDATE\n# @Time : 2020/12/25\n# @Author : Yushuo Chen\n# @email : [email protected]\n\n\"\"\"\nrecbole.utils.case_study\n#####################################\n\"\"\"\n\nimport numpy as np\nimport torch\n\nfrom recbole.data.dataloader.general_dataloader import GeneralFullDataLoader\nfrom recbole.data.dataloader.sequential_dataloader import SequentialFullDataLoader\n\n\[email protected]_grad()\ndef full_sort_scores(uid_series, model, test_data):\n \"\"\"Calculate the scores of all items for each user in uid_series.\n\n Note:\n The score of [pad] and history items will be set into -inf.\n\n Args:\n uid_series (numpy.ndarray): User id series\n model (AbstractRecommender): Model to predict\n test_data (AbstractDataLoader): The test_data of model\n\n Returns:\n torch.Tensor: the scores of all items for each user in uid_series.\n \"\"\"\n uid_field = test_data.dataset.uid_field\n dataset = test_data.dataset\n model.eval()\n\n if isinstance(test_data, GeneralFullDataLoader):\n index = np.isin(test_data.user_df[uid_field].numpy(), uid_series)\n input_interaction = test_data.user_df[index]\n history_item = test_data.uid2history_item[input_interaction[uid_field]]\n history_row = torch.cat([torch.full_like(hist_iid, i) for i, hist_iid in enumerate(history_item)])\n history_col = torch.cat(list(history_item))\n history_index = history_row, history_col\n elif isinstance(test_data, SequentialFullDataLoader):\n index = np.isin(test_data.uid_list, uid_series)\n input_interaction = test_data.augmentation(\n test_data.item_list_index[index], test_data.target_index[index], test_data.item_list_length[index]\n )\n history_index = None\n else:\n raise NotImplementedError\n\n # Get scores of all items\n try:\n scores = model.full_sort_predict(input_interaction)\n except NotImplementedError:\n input_interaction = input_interaction.repeat(dataset.item_num)\n input_interaction.update(test_data.get_item_feature().repeat(len(uid_series)))\n scores = model.predict(input_interaction)\n\n scores = scores.view(-1, dataset.item_num)\n scores[:, 0] = -np.inf # set scores of [pad] to -inf\n if history_index is not None:\n scores[history_index] = -np.inf # set scores of history items to -inf\n\n return scores\n\n\ndef full_sort_topk(uid_series, model, test_data, k):\n \"\"\"Calculate the top-k items' scores and ids for each user in uid_series.\n\n Args:\n uid_series (numpy.ndarray): User id series\n model (AbstractRecommender): Model to predict\n test_data (AbstractDataLoader): The test_data of model\n k (int): The top-k items.\n\n Returns:\n tuple:\n - topk_scores (torch.Tensor): The scores of topk items.\n - topk_index (torch.Tensor): The index of topk items, which is also the internal ids of items.\n \"\"\"\n scores = full_sort_scores(uid_series, model, test_data)\n return torch.topk(scores, k)\n", "path": "recbole/utils/case_study.py"}], "after_files": [{"content": "# @Time : 2020/12/25\n# @Author : Yushuo Chen\n# @Email : [email protected]\n\n# UPDATE\n# @Time : 2020/12/25\n# @Author : Yushuo Chen\n# @email : [email protected]\n\n\"\"\"\nrecbole.utils.case_study\n#####################################\n\"\"\"\n\nimport numpy as np\nimport torch\n\nfrom recbole.data.dataloader.general_dataloader import GeneralFullDataLoader\nfrom recbole.data.dataloader.sequential_dataloader import SequentialFullDataLoader\n\n\[email protected]_grad()\ndef full_sort_scores(uid_series, model, test_data):\n \"\"\"Calculate the scores of all items for each user in uid_series.\n\n Note:\n The score of [pad] and history items will be set into -inf.\n\n Args:\n uid_series (numpy.ndarray): User id series\n model (AbstractRecommender): Model to predict\n test_data (AbstractDataLoader): The test_data of model\n\n Returns:\n torch.Tensor: the scores of all items for each user in uid_series.\n \"\"\"\n uid_field = test_data.dataset.uid_field\n dataset = test_data.dataset\n model.eval()\n\n if isinstance(test_data, GeneralFullDataLoader):\n index = np.isin(test_data.user_df[uid_field].numpy(), uid_series)\n input_interaction = test_data.user_df[index]\n history_item = test_data.uid2history_item[input_interaction[uid_field].numpy()]\n history_row = torch.cat([torch.full_like(hist_iid, i) for i, hist_iid in enumerate(history_item)])\n history_col = torch.cat(list(history_item))\n history_index = history_row, history_col\n elif isinstance(test_data, SequentialFullDataLoader):\n index = np.isin(test_data.uid_list, uid_series)\n input_interaction = test_data.augmentation(\n test_data.item_list_index[index], test_data.target_index[index], test_data.item_list_length[index]\n )\n history_index = None\n else:\n raise NotImplementedError\n\n # Get scores of all items\n try:\n scores = model.full_sort_predict(input_interaction)\n except NotImplementedError:\n input_interaction = input_interaction.repeat(dataset.item_num)\n input_interaction.update(test_data.get_item_feature().repeat(len(uid_series)))\n scores = model.predict(input_interaction)\n\n scores = scores.view(-1, dataset.item_num)\n scores[:, 0] = -np.inf # set scores of [pad] to -inf\n if history_index is not None:\n scores[history_index] = -np.inf # set scores of history items to -inf\n\n return scores\n\n\ndef full_sort_topk(uid_series, model, test_data, k):\n \"\"\"Calculate the top-k items' scores and ids for each user in uid_series.\n\n Args:\n uid_series (numpy.ndarray): User id series\n model (AbstractRecommender): Model to predict\n test_data (AbstractDataLoader): The test_data of model\n k (int): The top-k items.\n\n Returns:\n tuple:\n - topk_scores (torch.Tensor): The scores of topk items.\n - topk_index (torch.Tensor): The index of topk items, which is also the internal ids of items.\n \"\"\"\n scores = full_sort_scores(uid_series, model, test_data)\n return torch.topk(scores, k)\n", "path": "recbole/utils/case_study.py"}]} | 1,710 | 175 |
gh_patches_debug_21908 | rasdani/github-patches | git_diff | Lightning-AI__pytorch-lightning-1453 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Propose to save new model before deleting previous ones in ModelCheckpointing
## 🚀 Feature
<!-- A clear and concise description of the feature proposal -->
In an edge case, the trainer deleted previous model and then was killed because of system error before successfully saving new model. Thus all the models were lost.
I understand specifying save_top_k > 1 helps, and saving before deleting leads to larger disk consumption. But it might be good to provide an option for this?
### Motivation
<!-- Please outline the motivation for the proposal. Is your feature request related to a problem? e.g., I'm always frustrated when [...]. If this is related to another GitHub issue, please link here too -->
### Pitch
<!-- A clear and concise description of what you want to happen. -->
in the worst case, you have two but never none...
### Alternatives
<!-- A clear and concise description of any alternative solutions or features you've considered, if any. -->
### Additional context
<!-- Add any other context or screenshots about the feature request here. -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pytorch_lightning/callbacks/model_checkpoint.py`
Content:
```
1 """
2 Model Checkpointing
3 ===================
4
5 Automatically save model checkpoints during training.
6
7 """
8
9 import os
10 import re
11
12 import numpy as np
13
14 from pytorch_lightning import _logger as log
15 from pytorch_lightning.callbacks.base import Callback
16 from pytorch_lightning.utilities import rank_zero_warn
17
18
19 class ModelCheckpoint(Callback):
20 r"""
21 Save the model after every epoch.
22
23 Args:
24 filepath: path to save the model file.
25 Can contain named formatting options to be auto-filled.
26
27 Example::
28
29 # custom path
30 # saves a file like: my/path/epoch_0.ckpt
31 >>> checkpoint_callback = ModelCheckpoint('my/path/')
32
33 # save any arbitrary metrics like `val_loss`, etc. in name
34 # saves a file like: my/path/epoch=2-val_loss=0.2_other_metric=0.3.ckpt
35 >>> checkpoint_callback = ModelCheckpoint(
36 ... filepath='my/path/{epoch}-{val_loss:.2f}-{other_metric:.2f}'
37 ... )
38
39 monitor: quantity to monitor.
40 verbose: verbosity mode. Default: ``False``.
41 save_top_k: if `save_top_k == k`,
42 the best k models according to
43 the quantity monitored will be saved.
44 if ``save_top_k == 0``, no models are saved.
45 if ``save_top_k == -1``, all models are saved.
46 Please note that the monitors are checked every `period` epochs.
47 if ``save_top_k >= 2`` and the callback is called multiple
48 times inside an epoch, the name of the saved file will be
49 appended with a version count starting with `v0`.
50 mode: one of {auto, min, max}.
51 If ``save_top_k != 0``, the decision
52 to overwrite the current save file is made
53 based on either the maximization or the
54 minimization of the monitored quantity. For `val_acc`,
55 this should be `max`, for `val_loss` this should
56 be `min`, etc. In `auto` mode, the direction is
57 automatically inferred from the name of the monitored quantity.
58 save_weights_only: if ``True``, then only the model's weights will be
59 saved (``model.save_weights(filepath)``), else the full model
60 is saved (``model.save(filepath)``).
61 period: Interval (number of epochs) between checkpoints.
62
63 Example::
64
65 >>> from pytorch_lightning import Trainer
66 >>> from pytorch_lightning.callbacks import ModelCheckpoint
67
68 # saves checkpoints to 'my/path/' whenever 'val_loss' has a new min
69 >>> checkpoint_callback = ModelCheckpoint(filepath='my/path/')
70 >>> trainer = Trainer(checkpoint_callback=checkpoint_callback)
71
72 # save epoch and val_loss in name
73 # saves a file like: my/path/sample-mnist_epoch=02_val_loss=0.32.ckpt
74 >>> checkpoint_callback = ModelCheckpoint(
75 ... filepath='my/path/sample-mnist_{epoch:02d}-{val_loss:.2f}'
76 ... )
77
78 """
79
80 def __init__(self, filepath: str, monitor: str = 'val_loss', verbose: bool = False,
81 save_top_k: int = 1, save_weights_only: bool = False,
82 mode: str = 'auto', period: int = 1, prefix: str = ''):
83 super().__init__()
84 if save_top_k > 0 and os.path.isdir(filepath) and len(os.listdir(filepath)) > 0:
85 rank_zero_warn(
86 f"Checkpoint directory {filepath} exists and is not empty with save_top_k != 0."
87 "All files in this directory will be deleted when a checkpoint is saved!"
88 )
89
90 self.monitor = monitor
91 self.verbose = verbose
92 if os.path.isdir(filepath):
93 self.dirpath, self.filename = filepath, '{epoch}'
94 else:
95 self.dirpath, self.filename = os.path.split(filepath)
96
97 os.makedirs(self.dirpath, exist_ok=True)
98 self.save_top_k = save_top_k
99 self.save_weights_only = save_weights_only
100 self.period = period
101 self.epoch_last_check = None
102 self.prefix = prefix
103 self.best_k_models = {}
104 # {filename: monitor}
105 self.kth_best_model = ''
106 self.best = 0
107 self.save_function = None
108
109 mode_dict = {
110 'min': (np.less, np.Inf, 'min'),
111 'max': (np.greater, -np.Inf, 'max'),
112 'auto': (np.greater, -np.Inf, 'max') if 'acc' in self.monitor or self.monitor.startswith('fmeasure')
113 else (np.less, np.Inf, 'min'),
114 }
115
116 if mode not in mode_dict:
117 rank_zero_warn(f'ModelCheckpoint mode {mode} is unknown, fallback to auto mode.', RuntimeWarning)
118 mode = 'auto'
119
120 self.monitor_op, self.kth_value, self.mode = mode_dict[mode]
121
122 def _del_model(self, filepath):
123 os.remove(filepath)
124
125 def _save_model(self, filepath):
126 # make paths
127 os.makedirs(os.path.dirname(filepath), exist_ok=True)
128
129 # delegate the saving to the model
130 if self.save_function is not None:
131 self.save_function(filepath)
132 else:
133 raise ValueError(".save_function() not set")
134
135 def check_monitor_top_k(self, current):
136 less_than_k_models = len(self.best_k_models) < self.save_top_k
137 if less_than_k_models:
138 return True
139 return self.monitor_op(current, self.best_k_models[self.kth_best_model])
140
141 def format_checkpoint_name(self, epoch, metrics, ver=None):
142 """Generate a filename according to the defined template.
143
144 Example::
145
146 >>> tmpdir = os.path.dirname(__file__)
147 >>> ckpt = ModelCheckpoint(os.path.join(tmpdir, '{epoch}'))
148 >>> os.path.basename(ckpt.format_checkpoint_name(0, {}))
149 'epoch=0.ckpt'
150 >>> ckpt = ModelCheckpoint(os.path.join(tmpdir, '{epoch:03d}'))
151 >>> os.path.basename(ckpt.format_checkpoint_name(5, {}))
152 'epoch=005.ckpt'
153 >>> ckpt = ModelCheckpoint(os.path.join(tmpdir, '{epoch}-{val_loss:.2f}'))
154 >>> os.path.basename(ckpt.format_checkpoint_name(2, dict(val_loss=0.123456)))
155 'epoch=2-val_loss=0.12.ckpt'
156 >>> ckpt = ModelCheckpoint(os.path.join(tmpdir, '{missing:d}'))
157 >>> os.path.basename(ckpt.format_checkpoint_name(0, {}))
158 'missing=0.ckpt'
159 """
160 # check if user passed in keys to the string
161 groups = re.findall(r'(\{.*?)[:\}]', self.filename)
162
163 if len(groups) == 0:
164 # default name
165 filename = f'{self.prefix}_ckpt_epoch_{epoch}'
166 else:
167 metrics['epoch'] = epoch
168 filename = self.filename
169 for tmp in groups:
170 name = tmp[1:]
171 filename = filename.replace(tmp, name + '={' + name)
172 if name not in metrics:
173 metrics[name] = 0
174 filename = filename.format(**metrics)
175 str_ver = f'_v{ver}' if ver is not None else ''
176 filepath = os.path.join(self.dirpath, self.prefix + filename + str_ver + '.ckpt')
177 return filepath
178
179 def on_validation_end(self, trainer, pl_module):
180 # only run on main process
181 if trainer.proc_rank != 0:
182 return
183
184 metrics = trainer.callback_metrics
185 epoch = trainer.current_epoch
186 if self.save_top_k == 0:
187 # no models are saved
188 return
189 if self.epoch_last_check is not None and (epoch - self.epoch_last_check) < self.period:
190 # skipping in this term
191 return
192
193 self.epoch_last_check = epoch
194
195 filepath = self.format_checkpoint_name(epoch, metrics)
196 version_cnt = 0
197 while os.path.isfile(filepath):
198 filepath = self.format_checkpoint_name(epoch, metrics, ver=version_cnt)
199 # this epoch called before
200 version_cnt += 1
201
202 if self.save_top_k != -1:
203 current = metrics.get(self.monitor)
204
205 if current is None:
206 rank_zero_warn(f'Can save best model only with {self.monitor} available, skipping.', RuntimeWarning)
207 elif self.check_monitor_top_k(current):
208 self._do_check_save(filepath, current, epoch)
209 elif self.verbose > 0:
210 log.info(f'\nEpoch {epoch:05d}: {self.monitor} was not in top {self.save_top_k}')
211
212 else:
213 if self.verbose > 0:
214 log.info(f'\nEpoch {epoch:05d}: saving model to {filepath}')
215 self._save_model(filepath)
216
217 def _do_check_save(self, filepath, current, epoch):
218 # remove kth
219 if len(self.best_k_models) == self.save_top_k and self.save_top_k > 0:
220 delpath = self.kth_best_model
221 self.best_k_models.pop(self.kth_best_model)
222 self._del_model(delpath)
223
224 self.best_k_models[filepath] = current
225 if len(self.best_k_models) == self.save_top_k:
226 # monitor dict has reached k elements
227 _op = max if self.mode == 'min' else min
228 self.kth_best_model = _op(self.best_k_models,
229 key=self.best_k_models.get)
230 self.kth_value = self.best_k_models[self.kth_best_model]
231
232 _op = min if self.mode == 'min' else max
233 self.best = _op(self.best_k_models.values())
234
235 if self.verbose > 0:
236 log.info(
237 f'\nEpoch {epoch:05d}: {self.monitor} reached'
238 f' {current:0.5f} (best {self.best:0.5f}), saving model to'
239 f' {filepath} as top {self.save_top_k}')
240 self._save_model(filepath)
241
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pytorch_lightning/callbacks/model_checkpoint.py b/pytorch_lightning/callbacks/model_checkpoint.py
--- a/pytorch_lightning/callbacks/model_checkpoint.py
+++ b/pytorch_lightning/callbacks/model_checkpoint.py
@@ -216,10 +216,12 @@
def _do_check_save(self, filepath, current, epoch):
# remove kth
+
+ del_list = []
if len(self.best_k_models) == self.save_top_k and self.save_top_k > 0:
delpath = self.kth_best_model
self.best_k_models.pop(self.kth_best_model)
- self._del_model(delpath)
+ del_list.append(delpath)
self.best_k_models[filepath] = current
if len(self.best_k_models) == self.save_top_k:
@@ -238,3 +240,7 @@
f' {current:0.5f} (best {self.best:0.5f}), saving model to'
f' {filepath} as top {self.save_top_k}')
self._save_model(filepath)
+
+ for cur_path in del_list:
+ if cur_path != filepath:
+ self._del_model(cur_path)
| {"golden_diff": "diff --git a/pytorch_lightning/callbacks/model_checkpoint.py b/pytorch_lightning/callbacks/model_checkpoint.py\n--- a/pytorch_lightning/callbacks/model_checkpoint.py\n+++ b/pytorch_lightning/callbacks/model_checkpoint.py\n@@ -216,10 +216,12 @@\n \n def _do_check_save(self, filepath, current, epoch):\n # remove kth\n+\n+ del_list = []\n if len(self.best_k_models) == self.save_top_k and self.save_top_k > 0:\n delpath = self.kth_best_model\n self.best_k_models.pop(self.kth_best_model)\n- self._del_model(delpath)\n+ del_list.append(delpath)\n \n self.best_k_models[filepath] = current\n if len(self.best_k_models) == self.save_top_k:\n@@ -238,3 +240,7 @@\n f' {current:0.5f} (best {self.best:0.5f}), saving model to'\n f' {filepath} as top {self.save_top_k}')\n self._save_model(filepath)\n+\n+ for cur_path in del_list:\n+ if cur_path != filepath:\n+ self._del_model(cur_path)\n", "issue": "Propose to save new model before deleting previous ones in ModelCheckpointing\n## \ud83d\ude80 Feature\r\n<!-- A clear and concise description of the feature proposal -->\r\nIn an edge case, the trainer deleted previous model and then was killed because of system error before successfully saving new model. Thus all the models were lost.\r\nI understand specifying save_top_k > 1 helps, and saving before deleting leads to larger disk consumption. But it might be good to provide an option for this?\r\n### Motivation\r\n\r\n<!-- Please outline the motivation for the proposal. Is your feature request related to a problem? e.g., I'm always frustrated when [...]. If this is related to another GitHub issue, please link here too -->\r\n\r\n### Pitch\r\n\r\n<!-- A clear and concise description of what you want to happen. -->\r\nin the worst case, you have two but never none...\r\n\r\n### Alternatives\r\n\r\n<!-- A clear and concise description of any alternative solutions or features you've considered, if any. -->\r\n\r\n### Additional context\r\n\r\n<!-- Add any other context or screenshots about the feature request here. -->\r\n\n", "before_files": [{"content": "\"\"\"\nModel Checkpointing\n===================\n\nAutomatically save model checkpoints during training.\n\n\"\"\"\n\nimport os\nimport re\n\nimport numpy as np\n\nfrom pytorch_lightning import _logger as log\nfrom pytorch_lightning.callbacks.base import Callback\nfrom pytorch_lightning.utilities import rank_zero_warn\n\n\nclass ModelCheckpoint(Callback):\n r\"\"\"\n Save the model after every epoch.\n\n Args:\n filepath: path to save the model file.\n Can contain named formatting options to be auto-filled.\n\n Example::\n\n # custom path\n # saves a file like: my/path/epoch_0.ckpt\n >>> checkpoint_callback = ModelCheckpoint('my/path/')\n\n # save any arbitrary metrics like `val_loss`, etc. in name\n # saves a file like: my/path/epoch=2-val_loss=0.2_other_metric=0.3.ckpt\n >>> checkpoint_callback = ModelCheckpoint(\n ... filepath='my/path/{epoch}-{val_loss:.2f}-{other_metric:.2f}'\n ... )\n\n monitor: quantity to monitor.\n verbose: verbosity mode. Default: ``False``.\n save_top_k: if `save_top_k == k`,\n the best k models according to\n the quantity monitored will be saved.\n if ``save_top_k == 0``, no models are saved.\n if ``save_top_k == -1``, all models are saved.\n Please note that the monitors are checked every `period` epochs.\n if ``save_top_k >= 2`` and the callback is called multiple\n times inside an epoch, the name of the saved file will be\n appended with a version count starting with `v0`.\n mode: one of {auto, min, max}.\n If ``save_top_k != 0``, the decision\n to overwrite the current save file is made\n based on either the maximization or the\n minimization of the monitored quantity. For `val_acc`,\n this should be `max`, for `val_loss` this should\n be `min`, etc. In `auto` mode, the direction is\n automatically inferred from the name of the monitored quantity.\n save_weights_only: if ``True``, then only the model's weights will be\n saved (``model.save_weights(filepath)``), else the full model\n is saved (``model.save(filepath)``).\n period: Interval (number of epochs) between checkpoints.\n\n Example::\n\n >>> from pytorch_lightning import Trainer\n >>> from pytorch_lightning.callbacks import ModelCheckpoint\n\n # saves checkpoints to 'my/path/' whenever 'val_loss' has a new min\n >>> checkpoint_callback = ModelCheckpoint(filepath='my/path/')\n >>> trainer = Trainer(checkpoint_callback=checkpoint_callback)\n\n # save epoch and val_loss in name\n # saves a file like: my/path/sample-mnist_epoch=02_val_loss=0.32.ckpt\n >>> checkpoint_callback = ModelCheckpoint(\n ... filepath='my/path/sample-mnist_{epoch:02d}-{val_loss:.2f}'\n ... )\n\n \"\"\"\n\n def __init__(self, filepath: str, monitor: str = 'val_loss', verbose: bool = False,\n save_top_k: int = 1, save_weights_only: bool = False,\n mode: str = 'auto', period: int = 1, prefix: str = ''):\n super().__init__()\n if save_top_k > 0 and os.path.isdir(filepath) and len(os.listdir(filepath)) > 0:\n rank_zero_warn(\n f\"Checkpoint directory {filepath} exists and is not empty with save_top_k != 0.\"\n \"All files in this directory will be deleted when a checkpoint is saved!\"\n )\n\n self.monitor = monitor\n self.verbose = verbose\n if os.path.isdir(filepath):\n self.dirpath, self.filename = filepath, '{epoch}'\n else:\n self.dirpath, self.filename = os.path.split(filepath)\n\n os.makedirs(self.dirpath, exist_ok=True)\n self.save_top_k = save_top_k\n self.save_weights_only = save_weights_only\n self.period = period\n self.epoch_last_check = None\n self.prefix = prefix\n self.best_k_models = {}\n # {filename: monitor}\n self.kth_best_model = ''\n self.best = 0\n self.save_function = None\n\n mode_dict = {\n 'min': (np.less, np.Inf, 'min'),\n 'max': (np.greater, -np.Inf, 'max'),\n 'auto': (np.greater, -np.Inf, 'max') if 'acc' in self.monitor or self.monitor.startswith('fmeasure')\n else (np.less, np.Inf, 'min'),\n }\n\n if mode not in mode_dict:\n rank_zero_warn(f'ModelCheckpoint mode {mode} is unknown, fallback to auto mode.', RuntimeWarning)\n mode = 'auto'\n\n self.monitor_op, self.kth_value, self.mode = mode_dict[mode]\n\n def _del_model(self, filepath):\n os.remove(filepath)\n\n def _save_model(self, filepath):\n # make paths\n os.makedirs(os.path.dirname(filepath), exist_ok=True)\n\n # delegate the saving to the model\n if self.save_function is not None:\n self.save_function(filepath)\n else:\n raise ValueError(\".save_function() not set\")\n\n def check_monitor_top_k(self, current):\n less_than_k_models = len(self.best_k_models) < self.save_top_k\n if less_than_k_models:\n return True\n return self.monitor_op(current, self.best_k_models[self.kth_best_model])\n\n def format_checkpoint_name(self, epoch, metrics, ver=None):\n \"\"\"Generate a filename according to the defined template.\n\n Example::\n\n >>> tmpdir = os.path.dirname(__file__)\n >>> ckpt = ModelCheckpoint(os.path.join(tmpdir, '{epoch}'))\n >>> os.path.basename(ckpt.format_checkpoint_name(0, {}))\n 'epoch=0.ckpt'\n >>> ckpt = ModelCheckpoint(os.path.join(tmpdir, '{epoch:03d}'))\n >>> os.path.basename(ckpt.format_checkpoint_name(5, {}))\n 'epoch=005.ckpt'\n >>> ckpt = ModelCheckpoint(os.path.join(tmpdir, '{epoch}-{val_loss:.2f}'))\n >>> os.path.basename(ckpt.format_checkpoint_name(2, dict(val_loss=0.123456)))\n 'epoch=2-val_loss=0.12.ckpt'\n >>> ckpt = ModelCheckpoint(os.path.join(tmpdir, '{missing:d}'))\n >>> os.path.basename(ckpt.format_checkpoint_name(0, {}))\n 'missing=0.ckpt'\n \"\"\"\n # check if user passed in keys to the string\n groups = re.findall(r'(\\{.*?)[:\\}]', self.filename)\n\n if len(groups) == 0:\n # default name\n filename = f'{self.prefix}_ckpt_epoch_{epoch}'\n else:\n metrics['epoch'] = epoch\n filename = self.filename\n for tmp in groups:\n name = tmp[1:]\n filename = filename.replace(tmp, name + '={' + name)\n if name not in metrics:\n metrics[name] = 0\n filename = filename.format(**metrics)\n str_ver = f'_v{ver}' if ver is not None else ''\n filepath = os.path.join(self.dirpath, self.prefix + filename + str_ver + '.ckpt')\n return filepath\n\n def on_validation_end(self, trainer, pl_module):\n # only run on main process\n if trainer.proc_rank != 0:\n return\n\n metrics = trainer.callback_metrics\n epoch = trainer.current_epoch\n if self.save_top_k == 0:\n # no models are saved\n return\n if self.epoch_last_check is not None and (epoch - self.epoch_last_check) < self.period:\n # skipping in this term\n return\n\n self.epoch_last_check = epoch\n\n filepath = self.format_checkpoint_name(epoch, metrics)\n version_cnt = 0\n while os.path.isfile(filepath):\n filepath = self.format_checkpoint_name(epoch, metrics, ver=version_cnt)\n # this epoch called before\n version_cnt += 1\n\n if self.save_top_k != -1:\n current = metrics.get(self.monitor)\n\n if current is None:\n rank_zero_warn(f'Can save best model only with {self.monitor} available, skipping.', RuntimeWarning)\n elif self.check_monitor_top_k(current):\n self._do_check_save(filepath, current, epoch)\n elif self.verbose > 0:\n log.info(f'\\nEpoch {epoch:05d}: {self.monitor} was not in top {self.save_top_k}')\n\n else:\n if self.verbose > 0:\n log.info(f'\\nEpoch {epoch:05d}: saving model to {filepath}')\n self._save_model(filepath)\n\n def _do_check_save(self, filepath, current, epoch):\n # remove kth\n if len(self.best_k_models) == self.save_top_k and self.save_top_k > 0:\n delpath = self.kth_best_model\n self.best_k_models.pop(self.kth_best_model)\n self._del_model(delpath)\n\n self.best_k_models[filepath] = current\n if len(self.best_k_models) == self.save_top_k:\n # monitor dict has reached k elements\n _op = max if self.mode == 'min' else min\n self.kth_best_model = _op(self.best_k_models,\n key=self.best_k_models.get)\n self.kth_value = self.best_k_models[self.kth_best_model]\n\n _op = min if self.mode == 'min' else max\n self.best = _op(self.best_k_models.values())\n\n if self.verbose > 0:\n log.info(\n f'\\nEpoch {epoch:05d}: {self.monitor} reached'\n f' {current:0.5f} (best {self.best:0.5f}), saving model to'\n f' {filepath} as top {self.save_top_k}')\n self._save_model(filepath)\n", "path": "pytorch_lightning/callbacks/model_checkpoint.py"}], "after_files": [{"content": "\"\"\"\nModel Checkpointing\n===================\n\nAutomatically save model checkpoints during training.\n\n\"\"\"\n\nimport os\nimport re\n\nimport numpy as np\n\nfrom pytorch_lightning import _logger as log\nfrom pytorch_lightning.callbacks.base import Callback\nfrom pytorch_lightning.utilities import rank_zero_warn\n\n\nclass ModelCheckpoint(Callback):\n r\"\"\"\n Save the model after every epoch.\n\n Args:\n filepath: path to save the model file.\n Can contain named formatting options to be auto-filled.\n\n Example::\n\n # custom path\n # saves a file like: my/path/epoch_0.ckpt\n >>> checkpoint_callback = ModelCheckpoint('my/path/')\n\n # save any arbitrary metrics like `val_loss`, etc. in name\n # saves a file like: my/path/epoch=2-val_loss=0.2_other_metric=0.3.ckpt\n >>> checkpoint_callback = ModelCheckpoint(\n ... filepath='my/path/{epoch}-{val_loss:.2f}-{other_metric:.2f}'\n ... )\n\n monitor: quantity to monitor.\n verbose: verbosity mode. Default: ``False``.\n save_top_k: if `save_top_k == k`,\n the best k models according to\n the quantity monitored will be saved.\n if ``save_top_k == 0``, no models are saved.\n if ``save_top_k == -1``, all models are saved.\n Please note that the monitors are checked every `period` epochs.\n if ``save_top_k >= 2`` and the callback is called multiple\n times inside an epoch, the name of the saved file will be\n appended with a version count starting with `v0`.\n mode: one of {auto, min, max}.\n If ``save_top_k != 0``, the decision\n to overwrite the current save file is made\n based on either the maximization or the\n minimization of the monitored quantity. For `val_acc`,\n this should be `max`, for `val_loss` this should\n be `min`, etc. In `auto` mode, the direction is\n automatically inferred from the name of the monitored quantity.\n save_weights_only: if ``True``, then only the model's weights will be\n saved (``model.save_weights(filepath)``), else the full model\n is saved (``model.save(filepath)``).\n period: Interval (number of epochs) between checkpoints.\n\n Example::\n\n >>> from pytorch_lightning import Trainer\n >>> from pytorch_lightning.callbacks import ModelCheckpoint\n\n # saves checkpoints to 'my/path/' whenever 'val_loss' has a new min\n >>> checkpoint_callback = ModelCheckpoint(filepath='my/path/')\n >>> trainer = Trainer(checkpoint_callback=checkpoint_callback)\n\n # save epoch and val_loss in name\n # saves a file like: my/path/sample-mnist_epoch=02_val_loss=0.32.ckpt\n >>> checkpoint_callback = ModelCheckpoint(\n ... filepath='my/path/sample-mnist_{epoch:02d}-{val_loss:.2f}'\n ... )\n\n \"\"\"\n\n def __init__(self, filepath: str, monitor: str = 'val_loss', verbose: bool = False,\n save_top_k: int = 1, save_weights_only: bool = False,\n mode: str = 'auto', period: int = 1, prefix: str = ''):\n super().__init__()\n if save_top_k > 0 and os.path.isdir(filepath) and len(os.listdir(filepath)) > 0:\n rank_zero_warn(\n f\"Checkpoint directory {filepath} exists and is not empty with save_top_k != 0.\"\n \"All files in this directory will be deleted when a checkpoint is saved!\"\n )\n\n self.monitor = monitor\n self.verbose = verbose\n if os.path.isdir(filepath):\n self.dirpath, self.filename = filepath, '{epoch}'\n else:\n self.dirpath, self.filename = os.path.split(filepath)\n\n os.makedirs(self.dirpath, exist_ok=True)\n self.save_top_k = save_top_k\n self.save_weights_only = save_weights_only\n self.period = period\n self.epoch_last_check = None\n self.prefix = prefix\n self.best_k_models = {}\n # {filename: monitor}\n self.kth_best_model = ''\n self.best = 0\n self.save_function = None\n\n mode_dict = {\n 'min': (np.less, np.Inf, 'min'),\n 'max': (np.greater, -np.Inf, 'max'),\n 'auto': (np.greater, -np.Inf, 'max') if 'acc' in self.monitor or self.monitor.startswith('fmeasure')\n else (np.less, np.Inf, 'min'),\n }\n\n if mode not in mode_dict:\n rank_zero_warn(f'ModelCheckpoint mode {mode} is unknown, fallback to auto mode.', RuntimeWarning)\n mode = 'auto'\n\n self.monitor_op, self.kth_value, self.mode = mode_dict[mode]\n\n def _del_model(self, filepath):\n os.remove(filepath)\n\n def _save_model(self, filepath):\n # make paths\n os.makedirs(os.path.dirname(filepath), exist_ok=True)\n\n # delegate the saving to the model\n if self.save_function is not None:\n self.save_function(filepath)\n else:\n raise ValueError(\".save_function() not set\")\n\n def check_monitor_top_k(self, current):\n less_than_k_models = len(self.best_k_models) < self.save_top_k\n if less_than_k_models:\n return True\n return self.monitor_op(current, self.best_k_models[self.kth_best_model])\n\n def format_checkpoint_name(self, epoch, metrics, ver=None):\n \"\"\"Generate a filename according to the defined template.\n\n Example::\n\n >>> tmpdir = os.path.dirname(__file__)\n >>> ckpt = ModelCheckpoint(os.path.join(tmpdir, '{epoch}'))\n >>> os.path.basename(ckpt.format_checkpoint_name(0, {}))\n 'epoch=0.ckpt'\n >>> ckpt = ModelCheckpoint(os.path.join(tmpdir, '{epoch:03d}'))\n >>> os.path.basename(ckpt.format_checkpoint_name(5, {}))\n 'epoch=005.ckpt'\n >>> ckpt = ModelCheckpoint(os.path.join(tmpdir, '{epoch}-{val_loss:.2f}'))\n >>> os.path.basename(ckpt.format_checkpoint_name(2, dict(val_loss=0.123456)))\n 'epoch=2-val_loss=0.12.ckpt'\n >>> ckpt = ModelCheckpoint(os.path.join(tmpdir, '{missing:d}'))\n >>> os.path.basename(ckpt.format_checkpoint_name(0, {}))\n 'missing=0.ckpt'\n \"\"\"\n # check if user passed in keys to the string\n groups = re.findall(r'(\\{.*?)[:\\}]', self.filename)\n\n if len(groups) == 0:\n # default name\n filename = f'{self.prefix}_ckpt_epoch_{epoch}'\n else:\n metrics['epoch'] = epoch\n filename = self.filename\n for tmp in groups:\n name = tmp[1:]\n filename = filename.replace(tmp, name + '={' + name)\n if name not in metrics:\n metrics[name] = 0\n filename = filename.format(**metrics)\n str_ver = f'_v{ver}' if ver is not None else ''\n filepath = os.path.join(self.dirpath, self.prefix + filename + str_ver + '.ckpt')\n return filepath\n\n def on_validation_end(self, trainer, pl_module):\n # only run on main process\n if trainer.proc_rank != 0:\n return\n\n metrics = trainer.callback_metrics\n epoch = trainer.current_epoch\n if self.save_top_k == 0:\n # no models are saved\n return\n if self.epoch_last_check is not None and (epoch - self.epoch_last_check) < self.period:\n # skipping in this term\n return\n\n self.epoch_last_check = epoch\n\n filepath = self.format_checkpoint_name(epoch, metrics)\n version_cnt = 0\n while os.path.isfile(filepath):\n filepath = self.format_checkpoint_name(epoch, metrics, ver=version_cnt)\n # this epoch called before\n version_cnt += 1\n\n if self.save_top_k != -1:\n current = metrics.get(self.monitor)\n\n if current is None:\n rank_zero_warn(f'Can save best model only with {self.monitor} available, skipping.', RuntimeWarning)\n elif self.check_monitor_top_k(current):\n self._do_check_save(filepath, current, epoch)\n elif self.verbose > 0:\n log.info(f'\\nEpoch {epoch:05d}: {self.monitor} was not in top {self.save_top_k}')\n\n else:\n if self.verbose > 0:\n log.info(f'\\nEpoch {epoch:05d}: saving model to {filepath}')\n self._save_model(filepath)\n\n def _do_check_save(self, filepath, current, epoch):\n # remove kth\n\n del_list = []\n if len(self.best_k_models) == self.save_top_k and self.save_top_k > 0:\n delpath = self.kth_best_model\n self.best_k_models.pop(self.kth_best_model)\n del_list.append(delpath)\n\n self.best_k_models[filepath] = current\n if len(self.best_k_models) == self.save_top_k:\n # monitor dict has reached k elements\n _op = max if self.mode == 'min' else min\n self.kth_best_model = _op(self.best_k_models,\n key=self.best_k_models.get)\n self.kth_value = self.best_k_models[self.kth_best_model]\n\n _op = min if self.mode == 'min' else max\n self.best = _op(self.best_k_models.values())\n\n if self.verbose > 0:\n log.info(\n f'\\nEpoch {epoch:05d}: {self.monitor} reached'\n f' {current:0.5f} (best {self.best:0.5f}), saving model to'\n f' {filepath} as top {self.save_top_k}')\n self._save_model(filepath)\n\n for cur_path in del_list:\n if cur_path != filepath:\n self._del_model(cur_path)\n", "path": "pytorch_lightning/callbacks/model_checkpoint.py"}]} | 3,288 | 270 |
gh_patches_debug_57934 | rasdani/github-patches | git_diff | scrapy__scrapy-1905 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
response.body is duplicate
Access the [text page(not mine)](http://files.qidian.com/Author4/3615059/88542882.txt) by browsers or wget and you will find the response content is not duplicate, but scrapy's `response.body` is duplicate. I had tried set the scrapy's headers same as a real brower's, but it is still duplicate.
Just use the follow sample code, and you will find the issue.
```
scrapy shell "http://files.qidian.com/Author4/3615059/88542882.txt"
```
Sorry for my bad english.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scrapy/utils/gz.py`
Content:
```
1 import struct
2
3 try:
4 from cStringIO import StringIO as BytesIO
5 except ImportError:
6 from io import BytesIO
7 from gzip import GzipFile
8
9 import six
10 import re
11
12 # - Python>=3.5 GzipFile's read() has issues returning leftover
13 # uncompressed data when input is corrupted
14 # (regression or bug-fix compared to Python 3.4)
15 # - read1(), which fetches data before raising EOFError on next call
16 # works here but is only available from Python>=3.3
17 # - scrapy does not support Python 3.2
18 # - Python 2.7 GzipFile works fine with standard read() + extrabuf
19 if six.PY2:
20 def read1(gzf, size=-1):
21 return gzf.read(size)
22 else:
23 def read1(gzf, size=-1):
24 return gzf.read1(size)
25
26
27 def gunzip(data):
28 """Gunzip the given data and return as much data as possible.
29
30 This is resilient to CRC checksum errors.
31 """
32 f = GzipFile(fileobj=BytesIO(data))
33 output = b''
34 chunk = b'.'
35 while chunk:
36 try:
37 chunk = read1(f, 8196)
38 output += chunk
39 except (IOError, EOFError, struct.error):
40 # complete only if there is some data, otherwise re-raise
41 # see issue 87 about catching struct.error
42 # some pages are quite small so output is '' and f.extrabuf
43 # contains the whole page content
44 if output or getattr(f, 'extrabuf', None):
45 try:
46 output += f.extrabuf
47 finally:
48 break
49 else:
50 raise
51 return output
52
53 _is_gzipped = re.compile(br'^application/(x-)?gzip\b', re.I).search
54 _is_octetstream = re.compile(br'^(application|binary)/octet-stream\b', re.I).search
55
56 def is_gzipped(response):
57 """Return True if the response is gzipped, or False otherwise"""
58 ctype = response.headers.get('Content-Type', b'')
59 cenc = response.headers.get('Content-Encoding', b'').lower()
60 return (_is_gzipped(ctype) or
61 (_is_octetstream(ctype) and cenc in (b'gzip', b'x-gzip')))
62
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scrapy/utils/gz.py b/scrapy/utils/gz.py
--- a/scrapy/utils/gz.py
+++ b/scrapy/utils/gz.py
@@ -43,7 +43,7 @@
# contains the whole page content
if output or getattr(f, 'extrabuf', None):
try:
- output += f.extrabuf
+ output += f.extrabuf[-f.extrasize:]
finally:
break
else:
| {"golden_diff": "diff --git a/scrapy/utils/gz.py b/scrapy/utils/gz.py\n--- a/scrapy/utils/gz.py\n+++ b/scrapy/utils/gz.py\n@@ -43,7 +43,7 @@\n # contains the whole page content\n if output or getattr(f, 'extrabuf', None):\n try:\n- output += f.extrabuf\n+ output += f.extrabuf[-f.extrasize:]\n finally:\n break\n else:\n", "issue": "response.body is duplicate\nAccess the [text page(not mine)](http://files.qidian.com/Author4/3615059/88542882.txt) by browsers or wget and you will find the response content is not duplicate, but scrapy's `response.body` is duplicate. I had tried set the scrapy's headers same as a real brower's, but it is still duplicate.\n\nJust use the follow sample code, and you will find the issue.\n\n```\nscrapy shell \"http://files.qidian.com/Author4/3615059/88542882.txt\"\n```\n\nSorry for my bad english. \n\n", "before_files": [{"content": "import struct\n\ntry:\n from cStringIO import StringIO as BytesIO\nexcept ImportError:\n from io import BytesIO\nfrom gzip import GzipFile\n\nimport six\nimport re\n\n# - Python>=3.5 GzipFile's read() has issues returning leftover\n# uncompressed data when input is corrupted\n# (regression or bug-fix compared to Python 3.4)\n# - read1(), which fetches data before raising EOFError on next call\n# works here but is only available from Python>=3.3\n# - scrapy does not support Python 3.2\n# - Python 2.7 GzipFile works fine with standard read() + extrabuf\nif six.PY2:\n def read1(gzf, size=-1):\n return gzf.read(size)\nelse:\n def read1(gzf, size=-1):\n return gzf.read1(size)\n\n\ndef gunzip(data):\n \"\"\"Gunzip the given data and return as much data as possible.\n\n This is resilient to CRC checksum errors.\n \"\"\"\n f = GzipFile(fileobj=BytesIO(data))\n output = b''\n chunk = b'.'\n while chunk:\n try:\n chunk = read1(f, 8196)\n output += chunk\n except (IOError, EOFError, struct.error):\n # complete only if there is some data, otherwise re-raise\n # see issue 87 about catching struct.error\n # some pages are quite small so output is '' and f.extrabuf\n # contains the whole page content\n if output or getattr(f, 'extrabuf', None):\n try:\n output += f.extrabuf\n finally:\n break\n else:\n raise\n return output\n\n_is_gzipped = re.compile(br'^application/(x-)?gzip\\b', re.I).search\n_is_octetstream = re.compile(br'^(application|binary)/octet-stream\\b', re.I).search\n\ndef is_gzipped(response):\n \"\"\"Return True if the response is gzipped, or False otherwise\"\"\"\n ctype = response.headers.get('Content-Type', b'')\n cenc = response.headers.get('Content-Encoding', b'').lower()\n return (_is_gzipped(ctype) or\n (_is_octetstream(ctype) and cenc in (b'gzip', b'x-gzip')))\n", "path": "scrapy/utils/gz.py"}], "after_files": [{"content": "import struct\n\ntry:\n from cStringIO import StringIO as BytesIO\nexcept ImportError:\n from io import BytesIO\nfrom gzip import GzipFile\n\nimport six\nimport re\n\n# - Python>=3.5 GzipFile's read() has issues returning leftover\n# uncompressed data when input is corrupted\n# (regression or bug-fix compared to Python 3.4)\n# - read1(), which fetches data before raising EOFError on next call\n# works here but is only available from Python>=3.3\n# - scrapy does not support Python 3.2\n# - Python 2.7 GzipFile works fine with standard read() + extrabuf\nif six.PY2:\n def read1(gzf, size=-1):\n return gzf.read(size)\nelse:\n def read1(gzf, size=-1):\n return gzf.read1(size)\n\n\ndef gunzip(data):\n \"\"\"Gunzip the given data and return as much data as possible.\n\n This is resilient to CRC checksum errors.\n \"\"\"\n f = GzipFile(fileobj=BytesIO(data))\n output = b''\n chunk = b'.'\n while chunk:\n try:\n chunk = read1(f, 8196)\n output += chunk\n except (IOError, EOFError, struct.error):\n # complete only if there is some data, otherwise re-raise\n # see issue 87 about catching struct.error\n # some pages are quite small so output is '' and f.extrabuf\n # contains the whole page content\n if output or getattr(f, 'extrabuf', None):\n try:\n output += f.extrabuf[-f.extrasize:]\n finally:\n break\n else:\n raise\n return output\n\n_is_gzipped = re.compile(br'^application/(x-)?gzip\\b', re.I).search\n_is_octetstream = re.compile(br'^(application|binary)/octet-stream\\b', re.I).search\n\ndef is_gzipped(response):\n \"\"\"Return True if the response is gzipped, or False otherwise\"\"\"\n ctype = response.headers.get('Content-Type', b'')\n cenc = response.headers.get('Content-Encoding', b'').lower()\n return (_is_gzipped(ctype) or\n (_is_octetstream(ctype) and cenc in (b'gzip', b'x-gzip')))\n", "path": "scrapy/utils/gz.py"}]} | 1,044 | 107 |
gh_patches_debug_31615 | rasdani/github-patches | git_diff | DDMAL__CantusDB-1313 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Users assigned to Bower sources should be able to edit sequences in those sources
Currently, only project managers can edit sequences. Debra requests that editors should be able to be assigned to Bower sources and edit the sequences contained therein.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `django/cantusdb_project/main_app/views/sequence.py`
Content:
```
1 from django.views.generic import DetailView, ListView, UpdateView
2 from main_app.models import Sequence
3 from django.db.models import Q
4 from main_app.forms import SequenceEditForm
5 from django.contrib.auth.mixins import LoginRequiredMixin
6 from django.contrib import messages
7 from django.contrib.auth.mixins import UserPassesTestMixin
8 from django.core.exceptions import PermissionDenied
9 from main_app.permissions import (
10 user_can_view_sequence,
11 user_can_edit_sequences,
12 )
13
14
15 class SequenceDetailView(DetailView):
16 """
17 Displays a single Sequence object. Accessed with ``sequences/<int:pk>``
18 """
19
20 model = Sequence
21 context_object_name = "sequence"
22 template_name = "sequence_detail.html"
23
24 def get_context_data(self, **kwargs):
25 sequence = self.get_object()
26 user = self.request.user
27
28 # if the sequence's source isn't published,
29 # only logged-in users should be able to view the sequence's detail page
30 if not user_can_view_sequence(user, sequence):
31 raise PermissionDenied()
32
33 context = super().get_context_data(**kwargs)
34 context["concordances"] = (
35 Sequence.objects.filter(cantus_id=sequence.cantus_id)
36 .select_related("source")
37 .order_by("siglum")
38 )
39 context["user_can_edit_sequence"] = user_can_edit_sequences(user)
40 return context
41
42
43 class SequenceListView(ListView):
44 """
45 Displays a list of Sequence objects. Accessed with ``sequences/``
46 """
47
48 paginate_by = 100
49 context_object_name = "sequences"
50 template_name = "sequence_list.html"
51
52 def get_queryset(self):
53 queryset = Sequence.objects.select_related("source")
54 display_unpublished = self.request.user.is_authenticated
55 if display_unpublished:
56 q_obj_filter = Q()
57 else:
58 q_obj_filter = Q(source__published=True)
59
60 if self.request.GET.get("incipit"):
61 incipit = self.request.GET.get("incipit")
62 q_obj_filter &= Q(incipit__icontains=incipit)
63 if self.request.GET.get("siglum"):
64 siglum = self.request.GET.get("siglum")
65 q_obj_filter &= Q(siglum__icontains=siglum)
66 if self.request.GET.get("cantus_id"):
67 cantus_id = self.request.GET.get("cantus_id")
68 q_obj_filter &= Q(cantus_id__icontains=cantus_id)
69
70 return queryset.filter(q_obj_filter).order_by("siglum", "s_sequence")
71
72
73 class SequenceEditView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):
74 template_name = "sequence_edit.html"
75 model = Sequence
76 form_class = SequenceEditForm
77 pk_url_kwarg = "sequence_id"
78
79 def form_valid(self, form):
80 form.instance.last_updated_by = self.request.user
81 messages.success(
82 self.request,
83 "Sequence updated successfully!",
84 )
85 return super().form_valid(form)
86
87 def test_func(self):
88 user = self.request.user
89 return user_can_edit_sequences(user)
90
```
Path: `django/cantusdb_project/main_app/permissions.py`
Content:
```
1 from django.db.models import Q
2 from typing import Optional
3 from main_app.models import (
4 Source,
5 Chant,
6 Sequence,
7 )
8 from users.models import User
9
10
11 def user_can_edit_chants_in_source(user: User, source: Optional[Source]) -> bool:
12 """
13 Checks if the user can edit Chants in a given Source.
14 Used in ChantDetail, ChantList, ChantCreate, ChantDelete, ChantEdit,
15 ChantEditSyllabification, and SourceDetail views.
16 """
17 if user.is_anonymous or (source is None):
18 return False
19
20 source_id = source.id
21 user_is_assigned_to_source: bool = user.sources_user_can_edit.filter(
22 id=source_id
23 ).exists()
24
25 user_is_project_manager: bool = user.groups.filter(name="project manager").exists()
26 user_is_editor: bool = user.groups.filter(name="editor").exists()
27 user_is_contributor: bool = user.groups.filter(name="contributor").exists()
28
29 return (
30 (user_is_project_manager)
31 or (user_is_editor and user_is_assigned_to_source)
32 or (user_is_editor and source.created_by == user)
33 or (user_is_contributor and user_is_assigned_to_source)
34 or (user_is_contributor and source.created_by == user)
35 )
36
37
38 def user_can_proofread_chant(user: User, chant: Chant) -> bool:
39 """
40 Checks if the user can access the proofreading page of a given Source.
41 Used in SourceEditChantsView.
42 """
43 source_id = chant.source.id
44 if user.is_anonymous:
45 return False
46 user_is_assigned_to_source: bool = user.sources_user_can_edit.filter(
47 id=source_id
48 ).exists()
49
50 user_is_project_manager: bool = user.groups.filter(name="project manager").exists()
51 user_is_editor: bool = user.groups.filter(name="editor").exists()
52
53 return (user_is_project_manager) or (user_is_editor and user_is_assigned_to_source)
54
55
56 def user_can_view_source(user: User, source: Source) -> bool:
57 """
58 Checks if the user can view an unpublished Source on the site.
59 Used in ChantDetail, SequenceDetail, and SourceDetail views.
60 """
61 user_is_authenticated: bool = user.is_authenticated
62 return (source.published) or (user_is_authenticated)
63
64
65 def user_can_view_chant(user: User, chant: Chant) -> bool:
66 """
67 Checks if the user can view a Chant belonging to an unpublished Source on the site.
68 Used in ChantDetail, SequenceDetail, and SourceDetail views.
69 """
70 source = chant.source
71 user_is_authenticated: bool = user.is_authenticated
72 return (source is not None) and ((source.published) or (user_is_authenticated))
73
74
75 def user_can_view_sequence(user: User, sequence: Sequence) -> bool:
76 """
77 Checks if the user can view a Sequence belonging to an unpublished Source on the site.
78 Used in ChantDetail, SequenceDetail, and SourceDetail views.
79 """
80 source = sequence.source
81 user_is_authenticated: bool = user.is_authenticated
82 return (source is not None) and ((source.published) or (user_is_authenticated))
83
84
85 def user_can_edit_sequences(user: User) -> bool:
86 """
87 Checks if the user has permission to edit a Sequence object.
88 Used in SequenceDetail and SequenceEdit views.
89 """
90 return user.groups.filter(name="project manager").exists()
91
92
93 def user_can_create_sources(user: User) -> bool:
94 """
95 Checks if the user has permission to create a Source object.
96 Used in SourceCreateView.
97 """
98
99 return user.groups.filter(
100 Q(name="project manager") | Q(name="editor") | Q(name="contributor")
101 ).exists()
102
103
104 def user_can_edit_source(user: User, source: Source) -> bool:
105 """
106 Checks if the user has permission to edit a Source object.
107 Used in SourceDetail, SourceEdit, and SourceDelete views.
108 """
109 if user.is_anonymous:
110 return False
111 source_id = source.id
112 assigned_to_source = user.sources_user_can_edit.filter(id=source_id)
113
114 is_project_manager: bool = user.groups.filter(name="project manager").exists()
115 is_editor: bool = user.groups.filter(name="editor").exists()
116 is_contributor: bool = user.groups.filter(name="contributor").exists()
117
118 return (
119 (is_project_manager)
120 or (is_editor and assigned_to_source)
121 or (is_editor and source.created_by == user)
122 or (is_contributor and source.created_by == user)
123 )
124
125
126 def user_can_view_user_detail(viewing_user: User, user: User) -> bool:
127 """
128 Checks if the user can view the user detail pages of regular users in the database or just indexers.
129 Used in UserDetailView.
130 """
131 return viewing_user.is_authenticated or user.is_indexer
132
133
134 def user_can_manage_source_editors(user: User) -> bool:
135 """
136 Checks if the user has permission to change the editors assigned to a Source.
137 Used in SourceDetailView.
138 """
139 return user.is_staff or user.groups.filter(name="project manager").exists()
140
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/django/cantusdb_project/main_app/permissions.py b/django/cantusdb_project/main_app/permissions.py
--- a/django/cantusdb_project/main_app/permissions.py
+++ b/django/cantusdb_project/main_app/permissions.py
@@ -82,12 +82,31 @@
return (source is not None) and ((source.published) or (user_is_authenticated))
-def user_can_edit_sequences(user: User) -> bool:
+def user_can_edit_sequences(user: User, sequence: Sequence) -> bool:
"""
Checks if the user has permission to edit a Sequence object.
Used in SequenceDetail and SequenceEdit views.
"""
- return user.groups.filter(name="project manager").exists()
+ source = sequence.source
+ if user.is_anonymous or (source is None):
+ return False
+
+ source_id = source.id
+ user_is_assigned_to_source: bool = user.sources_user_can_edit.filter(
+ id=source_id
+ ).exists()
+
+ user_is_project_manager: bool = user.groups.filter(name="project manager").exists()
+ user_is_editor: bool = user.groups.filter(name="editor").exists()
+ user_is_contributor: bool = user.groups.filter(name="contributor").exists()
+
+ return (
+ (user_is_project_manager)
+ or (user_is_editor and user_is_assigned_to_source)
+ or (user_is_editor and source.created_by == user)
+ or (user_is_contributor and user_is_assigned_to_source)
+ or (user_is_contributor and source.created_by == user)
+ )
def user_can_create_sources(user: User) -> bool:
diff --git a/django/cantusdb_project/main_app/views/sequence.py b/django/cantusdb_project/main_app/views/sequence.py
--- a/django/cantusdb_project/main_app/views/sequence.py
+++ b/django/cantusdb_project/main_app/views/sequence.py
@@ -36,7 +36,7 @@
.select_related("source")
.order_by("siglum")
)
- context["user_can_edit_sequence"] = user_can_edit_sequences(user)
+ context["user_can_edit_sequence"] = user_can_edit_sequences(user, sequence)
return context
@@ -86,4 +86,5 @@
def test_func(self):
user = self.request.user
- return user_can_edit_sequences(user)
+ sequence = self.get_object()
+ return user_can_edit_sequences(user, sequence)
| {"golden_diff": "diff --git a/django/cantusdb_project/main_app/permissions.py b/django/cantusdb_project/main_app/permissions.py\n--- a/django/cantusdb_project/main_app/permissions.py\n+++ b/django/cantusdb_project/main_app/permissions.py\n@@ -82,12 +82,31 @@\n return (source is not None) and ((source.published) or (user_is_authenticated))\n \n \n-def user_can_edit_sequences(user: User) -> bool:\n+def user_can_edit_sequences(user: User, sequence: Sequence) -> bool:\n \"\"\"\n Checks if the user has permission to edit a Sequence object.\n Used in SequenceDetail and SequenceEdit views.\n \"\"\"\n- return user.groups.filter(name=\"project manager\").exists()\n+ source = sequence.source\n+ if user.is_anonymous or (source is None):\n+ return False\n+\n+ source_id = source.id\n+ user_is_assigned_to_source: bool = user.sources_user_can_edit.filter(\n+ id=source_id\n+ ).exists()\n+\n+ user_is_project_manager: bool = user.groups.filter(name=\"project manager\").exists()\n+ user_is_editor: bool = user.groups.filter(name=\"editor\").exists()\n+ user_is_contributor: bool = user.groups.filter(name=\"contributor\").exists()\n+\n+ return (\n+ (user_is_project_manager)\n+ or (user_is_editor and user_is_assigned_to_source)\n+ or (user_is_editor and source.created_by == user)\n+ or (user_is_contributor and user_is_assigned_to_source)\n+ or (user_is_contributor and source.created_by == user)\n+ )\n \n \n def user_can_create_sources(user: User) -> bool:\ndiff --git a/django/cantusdb_project/main_app/views/sequence.py b/django/cantusdb_project/main_app/views/sequence.py\n--- a/django/cantusdb_project/main_app/views/sequence.py\n+++ b/django/cantusdb_project/main_app/views/sequence.py\n@@ -36,7 +36,7 @@\n .select_related(\"source\")\n .order_by(\"siglum\")\n )\n- context[\"user_can_edit_sequence\"] = user_can_edit_sequences(user)\n+ context[\"user_can_edit_sequence\"] = user_can_edit_sequences(user, sequence)\n return context\n \n \n@@ -86,4 +86,5 @@\n \n def test_func(self):\n user = self.request.user\n- return user_can_edit_sequences(user)\n+ sequence = self.get_object()\n+ return user_can_edit_sequences(user, sequence)\n", "issue": "Users assigned to Bower sources should be able to edit sequences in those sources\nCurrently, only project managers can edit sequences. Debra requests that editors should be able to be assigned to Bower sources and edit the sequences contained therein.\n", "before_files": [{"content": "from django.views.generic import DetailView, ListView, UpdateView\nfrom main_app.models import Sequence\nfrom django.db.models import Q\nfrom main_app.forms import SequenceEditForm\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib import messages\nfrom django.contrib.auth.mixins import UserPassesTestMixin\nfrom django.core.exceptions import PermissionDenied\nfrom main_app.permissions import (\n user_can_view_sequence,\n user_can_edit_sequences,\n)\n\n\nclass SequenceDetailView(DetailView):\n \"\"\"\n Displays a single Sequence object. Accessed with ``sequences/<int:pk>``\n \"\"\"\n\n model = Sequence\n context_object_name = \"sequence\"\n template_name = \"sequence_detail.html\"\n\n def get_context_data(self, **kwargs):\n sequence = self.get_object()\n user = self.request.user\n\n # if the sequence's source isn't published,\n # only logged-in users should be able to view the sequence's detail page\n if not user_can_view_sequence(user, sequence):\n raise PermissionDenied()\n\n context = super().get_context_data(**kwargs)\n context[\"concordances\"] = (\n Sequence.objects.filter(cantus_id=sequence.cantus_id)\n .select_related(\"source\")\n .order_by(\"siglum\")\n )\n context[\"user_can_edit_sequence\"] = user_can_edit_sequences(user)\n return context\n\n\nclass SequenceListView(ListView):\n \"\"\"\n Displays a list of Sequence objects. Accessed with ``sequences/``\n \"\"\"\n\n paginate_by = 100\n context_object_name = \"sequences\"\n template_name = \"sequence_list.html\"\n\n def get_queryset(self):\n queryset = Sequence.objects.select_related(\"source\")\n display_unpublished = self.request.user.is_authenticated\n if display_unpublished:\n q_obj_filter = Q()\n else:\n q_obj_filter = Q(source__published=True)\n\n if self.request.GET.get(\"incipit\"):\n incipit = self.request.GET.get(\"incipit\")\n q_obj_filter &= Q(incipit__icontains=incipit)\n if self.request.GET.get(\"siglum\"):\n siglum = self.request.GET.get(\"siglum\")\n q_obj_filter &= Q(siglum__icontains=siglum)\n if self.request.GET.get(\"cantus_id\"):\n cantus_id = self.request.GET.get(\"cantus_id\")\n q_obj_filter &= Q(cantus_id__icontains=cantus_id)\n\n return queryset.filter(q_obj_filter).order_by(\"siglum\", \"s_sequence\")\n\n\nclass SequenceEditView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):\n template_name = \"sequence_edit.html\"\n model = Sequence\n form_class = SequenceEditForm\n pk_url_kwarg = \"sequence_id\"\n\n def form_valid(self, form):\n form.instance.last_updated_by = self.request.user\n messages.success(\n self.request,\n \"Sequence updated successfully!\",\n )\n return super().form_valid(form)\n\n def test_func(self):\n user = self.request.user\n return user_can_edit_sequences(user)\n", "path": "django/cantusdb_project/main_app/views/sequence.py"}, {"content": "from django.db.models import Q\nfrom typing import Optional\nfrom main_app.models import (\n Source,\n Chant,\n Sequence,\n)\nfrom users.models import User\n\n\ndef user_can_edit_chants_in_source(user: User, source: Optional[Source]) -> bool:\n \"\"\"\n Checks if the user can edit Chants in a given Source.\n Used in ChantDetail, ChantList, ChantCreate, ChantDelete, ChantEdit,\n ChantEditSyllabification, and SourceDetail views.\n \"\"\"\n if user.is_anonymous or (source is None):\n return False\n\n source_id = source.id\n user_is_assigned_to_source: bool = user.sources_user_can_edit.filter(\n id=source_id\n ).exists()\n\n user_is_project_manager: bool = user.groups.filter(name=\"project manager\").exists()\n user_is_editor: bool = user.groups.filter(name=\"editor\").exists()\n user_is_contributor: bool = user.groups.filter(name=\"contributor\").exists()\n\n return (\n (user_is_project_manager)\n or (user_is_editor and user_is_assigned_to_source)\n or (user_is_editor and source.created_by == user)\n or (user_is_contributor and user_is_assigned_to_source)\n or (user_is_contributor and source.created_by == user)\n )\n\n\ndef user_can_proofread_chant(user: User, chant: Chant) -> bool:\n \"\"\"\n Checks if the user can access the proofreading page of a given Source.\n Used in SourceEditChantsView.\n \"\"\"\n source_id = chant.source.id\n if user.is_anonymous:\n return False\n user_is_assigned_to_source: bool = user.sources_user_can_edit.filter(\n id=source_id\n ).exists()\n\n user_is_project_manager: bool = user.groups.filter(name=\"project manager\").exists()\n user_is_editor: bool = user.groups.filter(name=\"editor\").exists()\n\n return (user_is_project_manager) or (user_is_editor and user_is_assigned_to_source)\n\n\ndef user_can_view_source(user: User, source: Source) -> bool:\n \"\"\"\n Checks if the user can view an unpublished Source on the site.\n Used in ChantDetail, SequenceDetail, and SourceDetail views.\n \"\"\"\n user_is_authenticated: bool = user.is_authenticated\n return (source.published) or (user_is_authenticated)\n\n\ndef user_can_view_chant(user: User, chant: Chant) -> bool:\n \"\"\"\n Checks if the user can view a Chant belonging to an unpublished Source on the site.\n Used in ChantDetail, SequenceDetail, and SourceDetail views.\n \"\"\"\n source = chant.source\n user_is_authenticated: bool = user.is_authenticated\n return (source is not None) and ((source.published) or (user_is_authenticated))\n\n\ndef user_can_view_sequence(user: User, sequence: Sequence) -> bool:\n \"\"\"\n Checks if the user can view a Sequence belonging to an unpublished Source on the site.\n Used in ChantDetail, SequenceDetail, and SourceDetail views.\n \"\"\"\n source = sequence.source\n user_is_authenticated: bool = user.is_authenticated\n return (source is not None) and ((source.published) or (user_is_authenticated))\n\n\ndef user_can_edit_sequences(user: User) -> bool:\n \"\"\"\n Checks if the user has permission to edit a Sequence object.\n Used in SequenceDetail and SequenceEdit views.\n \"\"\"\n return user.groups.filter(name=\"project manager\").exists()\n\n\ndef user_can_create_sources(user: User) -> bool:\n \"\"\"\n Checks if the user has permission to create a Source object.\n Used in SourceCreateView.\n \"\"\"\n\n return user.groups.filter(\n Q(name=\"project manager\") | Q(name=\"editor\") | Q(name=\"contributor\")\n ).exists()\n\n\ndef user_can_edit_source(user: User, source: Source) -> bool:\n \"\"\"\n Checks if the user has permission to edit a Source object.\n Used in SourceDetail, SourceEdit, and SourceDelete views.\n \"\"\"\n if user.is_anonymous:\n return False\n source_id = source.id\n assigned_to_source = user.sources_user_can_edit.filter(id=source_id)\n\n is_project_manager: bool = user.groups.filter(name=\"project manager\").exists()\n is_editor: bool = user.groups.filter(name=\"editor\").exists()\n is_contributor: bool = user.groups.filter(name=\"contributor\").exists()\n\n return (\n (is_project_manager)\n or (is_editor and assigned_to_source)\n or (is_editor and source.created_by == user)\n or (is_contributor and source.created_by == user)\n )\n\n\ndef user_can_view_user_detail(viewing_user: User, user: User) -> bool:\n \"\"\"\n Checks if the user can view the user detail pages of regular users in the database or just indexers.\n Used in UserDetailView.\n \"\"\"\n return viewing_user.is_authenticated or user.is_indexer\n\n\ndef user_can_manage_source_editors(user: User) -> bool:\n \"\"\"\n Checks if the user has permission to change the editors assigned to a Source.\n Used in SourceDetailView.\n \"\"\"\n return user.is_staff or user.groups.filter(name=\"project manager\").exists()\n", "path": "django/cantusdb_project/main_app/permissions.py"}], "after_files": [{"content": "from django.views.generic import DetailView, ListView, UpdateView\nfrom main_app.models import Sequence\nfrom django.db.models import Q\nfrom main_app.forms import SequenceEditForm\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib import messages\nfrom django.contrib.auth.mixins import UserPassesTestMixin\nfrom django.core.exceptions import PermissionDenied\nfrom main_app.permissions import (\n user_can_view_sequence,\n user_can_edit_sequences,\n)\n\n\nclass SequenceDetailView(DetailView):\n \"\"\"\n Displays a single Sequence object. Accessed with ``sequences/<int:pk>``\n \"\"\"\n\n model = Sequence\n context_object_name = \"sequence\"\n template_name = \"sequence_detail.html\"\n\n def get_context_data(self, **kwargs):\n sequence = self.get_object()\n user = self.request.user\n\n # if the sequence's source isn't published,\n # only logged-in users should be able to view the sequence's detail page\n if not user_can_view_sequence(user, sequence):\n raise PermissionDenied()\n\n context = super().get_context_data(**kwargs)\n context[\"concordances\"] = (\n Sequence.objects.filter(cantus_id=sequence.cantus_id)\n .select_related(\"source\")\n .order_by(\"siglum\")\n )\n context[\"user_can_edit_sequence\"] = user_can_edit_sequences(user, sequence)\n return context\n\n\nclass SequenceListView(ListView):\n \"\"\"\n Displays a list of Sequence objects. Accessed with ``sequences/``\n \"\"\"\n\n paginate_by = 100\n context_object_name = \"sequences\"\n template_name = \"sequence_list.html\"\n\n def get_queryset(self):\n queryset = Sequence.objects.select_related(\"source\")\n display_unpublished = self.request.user.is_authenticated\n if display_unpublished:\n q_obj_filter = Q()\n else:\n q_obj_filter = Q(source__published=True)\n\n if self.request.GET.get(\"incipit\"):\n incipit = self.request.GET.get(\"incipit\")\n q_obj_filter &= Q(incipit__icontains=incipit)\n if self.request.GET.get(\"siglum\"):\n siglum = self.request.GET.get(\"siglum\")\n q_obj_filter &= Q(siglum__icontains=siglum)\n if self.request.GET.get(\"cantus_id\"):\n cantus_id = self.request.GET.get(\"cantus_id\")\n q_obj_filter &= Q(cantus_id__icontains=cantus_id)\n\n return queryset.filter(q_obj_filter).order_by(\"siglum\", \"s_sequence\")\n\n\nclass SequenceEditView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):\n template_name = \"sequence_edit.html\"\n model = Sequence\n form_class = SequenceEditForm\n pk_url_kwarg = \"sequence_id\"\n\n def form_valid(self, form):\n form.instance.last_updated_by = self.request.user\n messages.success(\n self.request,\n \"Sequence updated successfully!\",\n )\n return super().form_valid(form)\n\n def test_func(self):\n user = self.request.user\n sequence = self.get_object()\n return user_can_edit_sequences(user, sequence)\n", "path": "django/cantusdb_project/main_app/views/sequence.py"}, {"content": "from django.db.models import Q\nfrom typing import Optional\nfrom main_app.models import (\n Source,\n Chant,\n Sequence,\n)\nfrom users.models import User\n\n\ndef user_can_edit_chants_in_source(user: User, source: Optional[Source]) -> bool:\n \"\"\"\n Checks if the user can edit Chants in a given Source.\n Used in ChantDetail, ChantList, ChantCreate, ChantDelete, ChantEdit,\n ChantEditSyllabification, and SourceDetail views.\n \"\"\"\n if user.is_anonymous or (source is None):\n return False\n\n source_id = source.id\n user_is_assigned_to_source: bool = user.sources_user_can_edit.filter(\n id=source_id\n ).exists()\n\n user_is_project_manager: bool = user.groups.filter(name=\"project manager\").exists()\n user_is_editor: bool = user.groups.filter(name=\"editor\").exists()\n user_is_contributor: bool = user.groups.filter(name=\"contributor\").exists()\n\n return (\n (user_is_project_manager)\n or (user_is_editor and user_is_assigned_to_source)\n or (user_is_editor and source.created_by == user)\n or (user_is_contributor and user_is_assigned_to_source)\n or (user_is_contributor and source.created_by == user)\n )\n\n\ndef user_can_proofread_chant(user: User, chant: Chant) -> bool:\n \"\"\"\n Checks if the user can access the proofreading page of a given Source.\n Used in SourceEditChantsView.\n \"\"\"\n source_id = chant.source.id\n if user.is_anonymous:\n return False\n user_is_assigned_to_source: bool = user.sources_user_can_edit.filter(\n id=source_id\n ).exists()\n\n user_is_project_manager: bool = user.groups.filter(name=\"project manager\").exists()\n user_is_editor: bool = user.groups.filter(name=\"editor\").exists()\n\n return (user_is_project_manager) or (user_is_editor and user_is_assigned_to_source)\n\n\ndef user_can_view_source(user: User, source: Source) -> bool:\n \"\"\"\n Checks if the user can view an unpublished Source on the site.\n Used in ChantDetail, SequenceDetail, and SourceDetail views.\n \"\"\"\n user_is_authenticated: bool = user.is_authenticated\n return (source.published) or (user_is_authenticated)\n\n\ndef user_can_view_chant(user: User, chant: Chant) -> bool:\n \"\"\"\n Checks if the user can view a Chant belonging to an unpublished Source on the site.\n Used in ChantDetail, SequenceDetail, and SourceDetail views.\n \"\"\"\n source = chant.source\n user_is_authenticated: bool = user.is_authenticated\n return (source is not None) and ((source.published) or (user_is_authenticated))\n\n\ndef user_can_view_sequence(user: User, sequence: Sequence) -> bool:\n \"\"\"\n Checks if the user can view a Sequence belonging to an unpublished Source on the site.\n Used in ChantDetail, SequenceDetail, and SourceDetail views.\n \"\"\"\n source = sequence.source\n user_is_authenticated: bool = user.is_authenticated\n return (source is not None) and ((source.published) or (user_is_authenticated))\n\n\ndef user_can_edit_sequences(user: User, sequence: Sequence) -> bool:\n \"\"\"\n Checks if the user has permission to edit a Sequence object.\n Used in SequenceDetail and SequenceEdit views.\n \"\"\"\n source = sequence.source\n if user.is_anonymous or (source is None):\n return False\n\n source_id = source.id\n user_is_assigned_to_source: bool = user.sources_user_can_edit.filter(\n id=source_id\n ).exists()\n\n user_is_project_manager: bool = user.groups.filter(name=\"project manager\").exists()\n user_is_editor: bool = user.groups.filter(name=\"editor\").exists()\n user_is_contributor: bool = user.groups.filter(name=\"contributor\").exists()\n\n return (\n (user_is_project_manager)\n or (user_is_editor and user_is_assigned_to_source)\n or (user_is_editor and source.created_by == user)\n or (user_is_contributor and user_is_assigned_to_source)\n or (user_is_contributor and source.created_by == user)\n )\n\n\ndef user_can_create_sources(user: User) -> bool:\n \"\"\"\n Checks if the user has permission to create a Source object.\n Used in SourceCreateView.\n \"\"\"\n\n return user.groups.filter(\n Q(name=\"project manager\") | Q(name=\"editor\") | Q(name=\"contributor\")\n ).exists()\n\n\ndef user_can_edit_source(user: User, source: Source) -> bool:\n \"\"\"\n Checks if the user has permission to edit a Source object.\n Used in SourceDetail, SourceEdit, and SourceDelete views.\n \"\"\"\n if user.is_anonymous:\n return False\n source_id = source.id\n assigned_to_source = user.sources_user_can_edit.filter(id=source_id)\n\n is_project_manager: bool = user.groups.filter(name=\"project manager\").exists()\n is_editor: bool = user.groups.filter(name=\"editor\").exists()\n is_contributor: bool = user.groups.filter(name=\"contributor\").exists()\n\n return (\n (is_project_manager)\n or (is_editor and assigned_to_source)\n or (is_editor and source.created_by == user)\n or (is_contributor and source.created_by == user)\n )\n\n\ndef user_can_view_user_detail(viewing_user: User, user: User) -> bool:\n \"\"\"\n Checks if the user can view the user detail pages of regular users in the database or just indexers.\n Used in UserDetailView.\n \"\"\"\n return viewing_user.is_authenticated or user.is_indexer\n\n\ndef user_can_manage_source_editors(user: User) -> bool:\n \"\"\"\n Checks if the user has permission to change the editors assigned to a Source.\n Used in SourceDetailView.\n \"\"\"\n return user.is_staff or user.groups.filter(name=\"project manager\").exists()\n", "path": "django/cantusdb_project/main_app/permissions.py"}]} | 2,589 | 556 |
gh_patches_debug_26561 | rasdani/github-patches | git_diff | OpenEnergyPlatform__oeplatform-611 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix OEO page and add OEO pages to navigation bar
- [x] Fix linebreak in link (Dipl.-Ing.)
- [x] Add Daniel to list
- [x] Add pages to navigation bar
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ontology/views.py`
Content:
```
1 from django.shortcuts import render, HttpResponse, redirect, Http404
2 from django.views import View
3 from rdflib import Graph, RDFS
4 from oeplatform.settings import ONTOLOGY_FOLDER
5 from collections import OrderedDict
6
7 import os
8 import re
9
10
11 def collect_modules(path):
12 modules = dict()
13 for file in os.listdir(path):
14 if not os.path.isdir(os.path.join(path,file)):
15 match = re.match("^(?P<filename>.*)\.(?P<extension>\w+)$", file)
16 filename, extension = match.groups()
17 if filename not in modules:
18 modules[filename] = dict(extensions=[], comment="No description found")
19 if extension == "owl":
20 g = Graph()
21 g.parse(os.path.join(path, file))
22 root = dict(g.namespaces())['']
23 comments = g.objects(root, RDFS.comment)
24 try:
25 modules[filename]["comment"] = next(comments)
26 except StopIteration:
27 modules[filename]["comment"] = "No description found"
28 modules[filename]["extensions"].append(extension)
29 return modules
30
31 class OntologyOverview(View):
32 def get(self, request, ontology, module_or_id=None, version=None, imports=False):
33 versions = os.listdir(f"{ONTOLOGY_FOLDER}/{ontology}")
34 if not version:
35 version = max((d for d in versions), key=lambda d:[int(x) for x in d.split(".")])
36
37 if "text/html" in request.headers.get("accept","").split(","):
38 main_module = collect_modules(f"{ONTOLOGY_FOLDER}/{ontology}/{version}")
39 main_module_name = list(main_module.keys())[0]
40 main_module = main_module[main_module_name]
41 main_module["name"] = main_module_name
42 submodules = collect_modules(f"{ONTOLOGY_FOLDER}/{ontology}/{version}/modules")
43 # Collect all file names
44
45 imports = collect_modules(f"{ONTOLOGY_FOLDER}/{ontology}/{version}/imports")
46
47 return render(request, "ontology/oeo.html", dict(
48 ontology=ontology,
49 version=version,
50 main_module=main_module,
51 submodules=submodules.items(),
52 imports=imports.items()
53 ))
54 else:
55 module_name = None
56 if module_or_id:
57 if imports:
58 submodules = collect_modules(f"{ONTOLOGY_FOLDER}/{ontology}/{version}/imports")
59 else:
60 submodules = collect_modules(f"{ONTOLOGY_FOLDER}/{ontology}/{version}/modules")
61 # If module_or_id is the name of a valid submodule, use this module
62 if module_or_id in submodules:
63 module_name = module_or_id
64 if imports:
65 return redirect(f"/ontology/{ontology}/releases/{version}/imports/{module_name}.owl")
66 else:
67 return redirect(f"/ontology/{ontology}/releases/{version}/{module_name}.owl")
68 # If no module was requested or the requested id was not a module, serve main ontology
69 if module_name is None:
70 main_module = collect_modules(f"{ONTOLOGY_FOLDER}/{ontology}/{version}")
71 module_name = list(main_module.keys())[0]
72 return redirect(f"/ontology/{ontology}/releases/{version}/{module_name}.owl")
73
74
75 class OntologyStatics(View):
76 def get(self, request, ontology, file, version=None, extension=None, imports=False):
77 """
78 Returns the requested file `{file}.{extension}` of version `version`
79 of ontology `ontology`
80
81 :param version: default: highest version in folder
82 :param extension: default: `.owl`
83 :return:
84 """
85
86 if not extension:
87 extension = "owl"
88 if not version:
89 version = max((d for d in os.listdir(f"{ONTOLOGY_FOLDER}/{ontology}")), key=lambda d:[int(x) for x in d.split(".")])
90 if imports:
91 file_path = f"{ONTOLOGY_FOLDER}/{ontology}/{version}/imports/{file}.{extension}"
92 else:
93 file_path = f"{ONTOLOGY_FOLDER}/{ontology}/{version}/{file}.{extension}"
94 if os.path.exists(file_path):
95 with open(file_path, "br") as f:
96 response = HttpResponse(f, content_type="application/rdf+xml; charset=utf-8")
97 response["Content-Disposition"] = f'attachment; filename="{file}.{extension}"'
98 return response
99 else:
100 file_path = f"{ONTOLOGY_FOLDER}/{ontology}/{version}/modules/{file}.{extension}"
101 if not os.path.exists(file_path):
102 raise Http404
103 with open(file_path, "br") as f:
104 response = HttpResponse(f, content_type="application/rdf+xml; charset=utf-8")
105 response["Content-Disposition"] = f'attachment; filename="{file}.{extension}"'
106 return response
```
Path: `ontology/urls.py`
Content:
```
1 from django.conf.urls import url
2 from django.conf.urls.static import static
3 from django.views.generic import TemplateView, RedirectView
4 from os import walk
5 from ontology import views
6 from oeplatform import settings
7
8 urlpatterns = [
9 url(r"^$", TemplateView.as_view(template_name="ontology/about.html")),
10 url(r"^ontology/oeo-steering-committee$",
11 TemplateView.as_view(template_name="ontology/oeo-steering-committee.html"),
12 name="oeo-s-c"),
13 url(r"^(?P<ontology>[\w_-]+)\/releases(\/v?(?P<version>[\d\.]+))?\/imports\/(?P<file>[\w_-]+)(.(?P<extension>[\w_-]+))?$",
14 views.OntologyStatics.as_view(), {"imports": True}),
15
16 url(r"^(?P<ontology>[\w_-]+)\/releases(\/v?(?P<version>[\d\.]+))?\/(?P<file>[\w_-]+)(.(?P<extension>[\w_-]+))?$",
17 views.OntologyStatics.as_view()),
18
19 url(r"^(?P<ontology>[\w_-]+)\/imports\/(?P<module_or_id>[\w\d_-]+)",
20 views.OntologyOverview.as_view(), {"imports": True}),
21
22 url(r"^(?P<ontology>[\w_-]+)(/(?P<module_or_id>[\w\d_-]+))?",
23 views.OntologyOverview.as_view()),
24
25 ]
26
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ontology/urls.py b/ontology/urls.py
--- a/ontology/urls.py
+++ b/ontology/urls.py
@@ -7,9 +7,11 @@
urlpatterns = [
url(r"^$", TemplateView.as_view(template_name="ontology/about.html")),
- url(r"^ontology/oeo-steering-committee$",
+ url(r"^ontology/$", TemplateView.as_view(template_name="ontology/about.html")),
+ url(r"^oeo-steering-committee/$",
TemplateView.as_view(template_name="ontology/oeo-steering-committee.html"),
name="oeo-s-c"),
+ url(r"^ontology/oeo-steering-committee/$", TemplateView.as_view(template_name="ontology/oeo-steering-committee.html")),
url(r"^(?P<ontology>[\w_-]+)\/releases(\/v?(?P<version>[\d\.]+))?\/imports\/(?P<file>[\w_-]+)(.(?P<extension>[\w_-]+))?$",
views.OntologyStatics.as_view(), {"imports": True}),
diff --git a/ontology/views.py b/ontology/views.py
--- a/ontology/views.py
+++ b/ontology/views.py
@@ -30,6 +30,8 @@
class OntologyOverview(View):
def get(self, request, ontology, module_or_id=None, version=None, imports=False):
+ if not os.path.exists(f"{ONTOLOGY_FOLDER}/{ontology}"):
+ raise Http404
versions = os.listdir(f"{ONTOLOGY_FOLDER}/{ontology}")
if not version:
version = max((d for d in versions), key=lambda d:[int(x) for x in d.split(".")])
| {"golden_diff": "diff --git a/ontology/urls.py b/ontology/urls.py\n--- a/ontology/urls.py\n+++ b/ontology/urls.py\n@@ -7,9 +7,11 @@\n \n urlpatterns = [\n url(r\"^$\", TemplateView.as_view(template_name=\"ontology/about.html\")),\n- url(r\"^ontology/oeo-steering-committee$\",\n+ url(r\"^ontology/$\", TemplateView.as_view(template_name=\"ontology/about.html\")),\n+ url(r\"^oeo-steering-committee/$\",\n TemplateView.as_view(template_name=\"ontology/oeo-steering-committee.html\"),\n name=\"oeo-s-c\"),\n+ url(r\"^ontology/oeo-steering-committee/$\", TemplateView.as_view(template_name=\"ontology/oeo-steering-committee.html\")),\n url(r\"^(?P<ontology>[\\w_-]+)\\/releases(\\/v?(?P<version>[\\d\\.]+))?\\/imports\\/(?P<file>[\\w_-]+)(.(?P<extension>[\\w_-]+))?$\",\n views.OntologyStatics.as_view(), {\"imports\": True}),\n \ndiff --git a/ontology/views.py b/ontology/views.py\n--- a/ontology/views.py\n+++ b/ontology/views.py\n@@ -30,6 +30,8 @@\n \n class OntologyOverview(View):\n def get(self, request, ontology, module_or_id=None, version=None, imports=False):\n+ if not os.path.exists(f\"{ONTOLOGY_FOLDER}/{ontology}\"):\n+ raise Http404\n versions = os.listdir(f\"{ONTOLOGY_FOLDER}/{ontology}\")\n if not version:\n version = max((d for d in versions), key=lambda d:[int(x) for x in d.split(\".\")])\n", "issue": "Fix OEO page and add OEO pages to navigation bar\n- [x] Fix linebreak in link (Dipl.-Ing.)\r\n- [x] Add Daniel to list\r\n- [x] Add pages to navigation bar\n", "before_files": [{"content": "from django.shortcuts import render, HttpResponse, redirect, Http404\nfrom django.views import View\nfrom rdflib import Graph, RDFS\nfrom oeplatform.settings import ONTOLOGY_FOLDER\nfrom collections import OrderedDict\n\nimport os\nimport re\n\n\ndef collect_modules(path):\n modules = dict()\n for file in os.listdir(path):\n if not os.path.isdir(os.path.join(path,file)):\n match = re.match(\"^(?P<filename>.*)\\.(?P<extension>\\w+)$\", file)\n filename, extension = match.groups()\n if filename not in modules:\n modules[filename] = dict(extensions=[], comment=\"No description found\")\n if extension == \"owl\":\n g = Graph()\n g.parse(os.path.join(path, file))\n root = dict(g.namespaces())['']\n comments = g.objects(root, RDFS.comment)\n try:\n modules[filename][\"comment\"] = next(comments)\n except StopIteration:\n modules[filename][\"comment\"] = \"No description found\"\n modules[filename][\"extensions\"].append(extension)\n return modules\n\nclass OntologyOverview(View):\n def get(self, request, ontology, module_or_id=None, version=None, imports=False):\n versions = os.listdir(f\"{ONTOLOGY_FOLDER}/{ontology}\")\n if not version:\n version = max((d for d in versions), key=lambda d:[int(x) for x in d.split(\".\")])\n\n if \"text/html\" in request.headers.get(\"accept\",\"\").split(\",\"):\n main_module = collect_modules(f\"{ONTOLOGY_FOLDER}/{ontology}/{version}\")\n main_module_name = list(main_module.keys())[0]\n main_module = main_module[main_module_name]\n main_module[\"name\"] = main_module_name\n submodules = collect_modules(f\"{ONTOLOGY_FOLDER}/{ontology}/{version}/modules\")\n # Collect all file names\n\n imports = collect_modules(f\"{ONTOLOGY_FOLDER}/{ontology}/{version}/imports\")\n\n return render(request, \"ontology/oeo.html\", dict(\n ontology=ontology,\n version=version,\n main_module=main_module,\n submodules=submodules.items(),\n imports=imports.items()\n ))\n else:\n module_name = None\n if module_or_id:\n if imports:\n submodules = collect_modules(f\"{ONTOLOGY_FOLDER}/{ontology}/{version}/imports\")\n else:\n submodules = collect_modules(f\"{ONTOLOGY_FOLDER}/{ontology}/{version}/modules\")\n # If module_or_id is the name of a valid submodule, use this module\n if module_or_id in submodules:\n module_name = module_or_id\n if imports:\n return redirect(f\"/ontology/{ontology}/releases/{version}/imports/{module_name}.owl\")\n else:\n return redirect(f\"/ontology/{ontology}/releases/{version}/{module_name}.owl\")\n # If no module was requested or the requested id was not a module, serve main ontology\n if module_name is None:\n main_module = collect_modules(f\"{ONTOLOGY_FOLDER}/{ontology}/{version}\")\n module_name = list(main_module.keys())[0]\n return redirect(f\"/ontology/{ontology}/releases/{version}/{module_name}.owl\")\n\n\nclass OntologyStatics(View):\n def get(self, request, ontology, file, version=None, extension=None, imports=False):\n \"\"\"\n Returns the requested file `{file}.{extension}` of version `version`\n of ontology `ontology`\n\n :param version: default: highest version in folder\n :param extension: default: `.owl`\n :return:\n \"\"\"\n\n if not extension:\n extension = \"owl\"\n if not version:\n version = max((d for d in os.listdir(f\"{ONTOLOGY_FOLDER}/{ontology}\")), key=lambda d:[int(x) for x in d.split(\".\")])\n if imports:\n file_path = f\"{ONTOLOGY_FOLDER}/{ontology}/{version}/imports/{file}.{extension}\"\n else:\n file_path = f\"{ONTOLOGY_FOLDER}/{ontology}/{version}/{file}.{extension}\"\n if os.path.exists(file_path):\n with open(file_path, \"br\") as f:\n response = HttpResponse(f, content_type=\"application/rdf+xml; charset=utf-8\")\n response[\"Content-Disposition\"] = f'attachment; filename=\"{file}.{extension}\"'\n return response\n else:\n file_path = f\"{ONTOLOGY_FOLDER}/{ontology}/{version}/modules/{file}.{extension}\"\n if not os.path.exists(file_path):\n raise Http404\n with open(file_path, \"br\") as f:\n response = HttpResponse(f, content_type=\"application/rdf+xml; charset=utf-8\")\n response[\"Content-Disposition\"] = f'attachment; filename=\"{file}.{extension}\"'\n return response", "path": "ontology/views.py"}, {"content": "from django.conf.urls import url\nfrom django.conf.urls.static import static\nfrom django.views.generic import TemplateView, RedirectView\nfrom os import walk\nfrom ontology import views\nfrom oeplatform import settings\n\nurlpatterns = [\n url(r\"^$\", TemplateView.as_view(template_name=\"ontology/about.html\")),\n url(r\"^ontology/oeo-steering-committee$\",\n TemplateView.as_view(template_name=\"ontology/oeo-steering-committee.html\"),\n name=\"oeo-s-c\"),\n url(r\"^(?P<ontology>[\\w_-]+)\\/releases(\\/v?(?P<version>[\\d\\.]+))?\\/imports\\/(?P<file>[\\w_-]+)(.(?P<extension>[\\w_-]+))?$\",\n views.OntologyStatics.as_view(), {\"imports\": True}),\n\n url(r\"^(?P<ontology>[\\w_-]+)\\/releases(\\/v?(?P<version>[\\d\\.]+))?\\/(?P<file>[\\w_-]+)(.(?P<extension>[\\w_-]+))?$\",\n views.OntologyStatics.as_view()),\n\n url(r\"^(?P<ontology>[\\w_-]+)\\/imports\\/(?P<module_or_id>[\\w\\d_-]+)\",\n views.OntologyOverview.as_view(), {\"imports\": True}),\n\n url(r\"^(?P<ontology>[\\w_-]+)(/(?P<module_or_id>[\\w\\d_-]+))?\",\n views.OntologyOverview.as_view()),\n\n]\n", "path": "ontology/urls.py"}], "after_files": [{"content": "from django.shortcuts import render, HttpResponse, redirect, Http404\nfrom django.views import View\nfrom rdflib import Graph, RDFS\nfrom oeplatform.settings import ONTOLOGY_FOLDER\nfrom collections import OrderedDict\n\nimport os\nimport re\n\n\ndef collect_modules(path):\n modules = dict()\n for file in os.listdir(path):\n if not os.path.isdir(os.path.join(path,file)):\n match = re.match(\"^(?P<filename>.*)\\.(?P<extension>\\w+)$\", file)\n filename, extension = match.groups()\n if filename not in modules:\n modules[filename] = dict(extensions=[], comment=\"No description found\")\n if extension == \"owl\":\n g = Graph()\n g.parse(os.path.join(path, file))\n root = dict(g.namespaces())['']\n comments = g.objects(root, RDFS.comment)\n try:\n modules[filename][\"comment\"] = next(comments)\n except StopIteration:\n modules[filename][\"comment\"] = \"No description found\"\n modules[filename][\"extensions\"].append(extension)\n return modules\n\nclass OntologyOverview(View):\n def get(self, request, ontology, module_or_id=None, version=None, imports=False):\n if not os.path.exists(f\"{ONTOLOGY_FOLDER}/{ontology}\"):\n raise Http404\n versions = os.listdir(f\"{ONTOLOGY_FOLDER}/{ontology}\")\n if not version:\n version = max((d for d in versions), key=lambda d:[int(x) for x in d.split(\".\")])\n\n if \"text/html\" in request.headers.get(\"accept\",\"\").split(\",\"):\n main_module = collect_modules(f\"{ONTOLOGY_FOLDER}/{ontology}/{version}\")\n main_module_name = list(main_module.keys())[0]\n main_module = main_module[main_module_name]\n main_module[\"name\"] = main_module_name\n submodules = collect_modules(f\"{ONTOLOGY_FOLDER}/{ontology}/{version}/modules\")\n # Collect all file names\n\n imports = collect_modules(f\"{ONTOLOGY_FOLDER}/{ontology}/{version}/imports\")\n\n return render(request, \"ontology/oeo.html\", dict(\n ontology=ontology,\n version=version,\n main_module=main_module,\n submodules=submodules.items(),\n imports=imports.items()\n ))\n else:\n module_name = None\n if module_or_id:\n if imports:\n submodules = collect_modules(f\"{ONTOLOGY_FOLDER}/{ontology}/{version}/imports\")\n else:\n submodules = collect_modules(f\"{ONTOLOGY_FOLDER}/{ontology}/{version}/modules\")\n # If module_or_id is the name of a valid submodule, use this module\n if module_or_id in submodules:\n module_name = module_or_id\n if imports:\n return redirect(f\"/ontology/{ontology}/releases/{version}/imports/{module_name}.owl\")\n else:\n return redirect(f\"/ontology/{ontology}/releases/{version}/{module_name}.owl\")\n # If no module was requested or the requested id was not a module, serve main ontology\n if module_name is None:\n main_module = collect_modules(f\"{ONTOLOGY_FOLDER}/{ontology}/{version}\")\n module_name = list(main_module.keys())[0]\n return redirect(f\"/ontology/{ontology}/releases/{version}/{module_name}.owl\")\n\n\nclass OntologyStatics(View):\n def get(self, request, ontology, file, version=None, extension=None, imports=False):\n \"\"\"\n Returns the requested file `{file}.{extension}` of version `version`\n of ontology `ontology`\n\n :param version: default: highest version in folder\n :param extension: default: `.owl`\n :return:\n \"\"\"\n\n if not extension:\n extension = \"owl\"\n if not version:\n version = max((d for d in os.listdir(f\"{ONTOLOGY_FOLDER}/{ontology}\")), key=lambda d:[int(x) for x in d.split(\".\")])\n if imports:\n file_path = f\"{ONTOLOGY_FOLDER}/{ontology}/{version}/imports/{file}.{extension}\"\n else:\n file_path = f\"{ONTOLOGY_FOLDER}/{ontology}/{version}/{file}.{extension}\"\n if os.path.exists(file_path):\n with open(file_path, \"br\") as f:\n response = HttpResponse(f, content_type=\"application/rdf+xml; charset=utf-8\")\n response[\"Content-Disposition\"] = f'attachment; filename=\"{file}.{extension}\"'\n return response\n else:\n file_path = f\"{ONTOLOGY_FOLDER}/{ontology}/{version}/modules/{file}.{extension}\"\n if not os.path.exists(file_path):\n raise Http404\n with open(file_path, \"br\") as f:\n response = HttpResponse(f, content_type=\"application/rdf+xml; charset=utf-8\")\n response[\"Content-Disposition\"] = f'attachment; filename=\"{file}.{extension}\"'\n return response", "path": "ontology/views.py"}, {"content": "from django.conf.urls import url\nfrom django.conf.urls.static import static\nfrom django.views.generic import TemplateView, RedirectView\nfrom os import walk\nfrom ontology import views\nfrom oeplatform import settings\n\nurlpatterns = [\n url(r\"^$\", TemplateView.as_view(template_name=\"ontology/about.html\")),\n url(r\"^ontology/$\", TemplateView.as_view(template_name=\"ontology/about.html\")),\n url(r\"^oeo-steering-committee/$\",\n TemplateView.as_view(template_name=\"ontology/oeo-steering-committee.html\"),\n name=\"oeo-s-c\"),\n url(r\"^ontology/oeo-steering-committee/$\", TemplateView.as_view(template_name=\"ontology/oeo-steering-committee.html\")),\n url(r\"^(?P<ontology>[\\w_-]+)\\/releases(\\/v?(?P<version>[\\d\\.]+))?\\/imports\\/(?P<file>[\\w_-]+)(.(?P<extension>[\\w_-]+))?$\",\n views.OntologyStatics.as_view(), {\"imports\": True}),\n\n url(r\"^(?P<ontology>[\\w_-]+)\\/releases(\\/v?(?P<version>[\\d\\.]+))?\\/(?P<file>[\\w_-]+)(.(?P<extension>[\\w_-]+))?$\",\n views.OntologyStatics.as_view()),\n\n url(r\"^(?P<ontology>[\\w_-]+)\\/imports\\/(?P<module_or_id>[\\w\\d_-]+)\",\n views.OntologyOverview.as_view(), {\"imports\": True}),\n\n url(r\"^(?P<ontology>[\\w_-]+)(/(?P<module_or_id>[\\w\\d_-]+))?\",\n views.OntologyOverview.as_view()),\n\n]\n", "path": "ontology/urls.py"}]} | 1,912 | 377 |
gh_patches_debug_36747 | rasdani/github-patches | git_diff | CiviWiki__OpenCiviWiki-1037 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Move forms from `api/forms.py` to the `accounts/forms.py`
## Idea summary
There are several account/profile forms defined in [`api/forms.py`](https://github.com/CiviWiki/OpenCiviWiki/blob/develop/project/api/forms.py). Those forms should be moved to [`accounts/forms.py`](https://github.com/CiviWiki/OpenCiviWiki/blob/develop/project/accounts/forms.py) or deleted if they are duplicate code.
**Update:** the code in `api/forms.py` is actually redundant, so may simply be deleted.
## Task
The steps to complete this task are:
- [x] [fork this repository](https://docs.github.com/en/get-started/quickstart/fork-a-repo) and clone it to your local computer
- [x] set up a local development environment as [outlined in our Contributing Guide](https://github.com/CiviWiki/OpenCiviWiki/blob/develop/CONTRIBUTING.md#development)
- [x] delete the file `api/forms.py`
- [x] commit your changes
- [x] push your code to GitHub
- [x] [open a pull request](https://docs.github.com/en/github/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/creating-a-pull-request) against the `main` branch in this repository
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `project/api/forms.py`
Content:
```
1 from django import forms
2 from django.core.files.images import get_image_dimensions
3 from django.contrib.auth import get_user_model
4 from accounts.models import Profile
5
6
7 class UpdatePassword(forms.ModelForm):
8 """
9 Form for updating User Password
10 """
11
12 class Meta:
13 model = get_user_model()
14 fields = ["password", "verify"]
15
16 password = forms.CharField(
17 label="Password",
18 widget=forms.PasswordInput(
19 attrs={
20 "class": "form-control",
21 "placeholder": "Password",
22 "required": "True",
23 }
24 ),
25 )
26 verify = forms.CharField(
27 label="Password Verify",
28 widget=forms.PasswordInput(
29 attrs={
30 "class": "form-control",
31 "placeholder": "Password Verify",
32 "required": "True",
33 }
34 ),
35 help_text="Please retype your password.",
36 )
37
38 def clean(self):
39 """
40 Verifies that the passwords match
41 """
42 clean_data = super(UpdatePassword, self).clean()
43 if "password" in clean_data and "verify" in clean_data:
44 if clean_data["password"] != clean_data["verify"]:
45 raise forms.ValidationError("Passwords don't match.")
46 else:
47 raise forms.ValidationError("Both password fields need to be filled out.")
48 return clean_data
49
50
51 class UpdateProfileImage(forms.ModelForm):
52 """
53 Form for updating profile image
54 """
55
56 class Meta:
57 model = Profile
58 fields = ["profile_image"]
59
60 profile_image = forms.ImageField()
61
62 def clean_profile_image(self):
63 """
64 This function is used to make sure that profile images follow Civiwiki standards.
65
66 Requirements:
67 - Height cannot exceed 960px
68 - Width cannot exceed 1280px
69 - Image must be (jpg, jpeg, pjeg, png)
70 - File size cannot exceed 2MB
71 """
72 profile_image = self.cleaned_data["profile_image"]
73
74 try:
75 w, h = get_image_dimensions(profile_image)
76
77 # validate dimensions
78 max_height = 960
79 max_width = 1280
80 if w > max_width or h > max_height:
81 raise forms.ValidationError(
82 u"Please use an image that is {w} x {h} pixels or smaller.".format(
83 w=max_width, h=max_height
84 )
85 )
86
87 # validate content type
88 main, sub = profile_image.content_type.split("/")
89 if not (main == "image" and sub in ["jpg", "jpeg", "pjpeg", "png"]):
90 raise forms.ValidationError(u"Please use a JPEG or PNG image.")
91
92 # validate file size
93 if len(profile_image) > (2000 * 1024):
94 raise forms.ValidationError(
95 "Profile image file size may not exceed 2MB."
96 )
97
98 except AttributeError:
99 pass
100
101 return profile_image
102
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/project/api/forms.py b/project/api/forms.py
deleted file mode 100644
--- a/project/api/forms.py
+++ /dev/null
@@ -1,101 +0,0 @@
-from django import forms
-from django.core.files.images import get_image_dimensions
-from django.contrib.auth import get_user_model
-from accounts.models import Profile
-
-
-class UpdatePassword(forms.ModelForm):
- """
- Form for updating User Password
- """
-
- class Meta:
- model = get_user_model()
- fields = ["password", "verify"]
-
- password = forms.CharField(
- label="Password",
- widget=forms.PasswordInput(
- attrs={
- "class": "form-control",
- "placeholder": "Password",
- "required": "True",
- }
- ),
- )
- verify = forms.CharField(
- label="Password Verify",
- widget=forms.PasswordInput(
- attrs={
- "class": "form-control",
- "placeholder": "Password Verify",
- "required": "True",
- }
- ),
- help_text="Please retype your password.",
- )
-
- def clean(self):
- """
- Verifies that the passwords match
- """
- clean_data = super(UpdatePassword, self).clean()
- if "password" in clean_data and "verify" in clean_data:
- if clean_data["password"] != clean_data["verify"]:
- raise forms.ValidationError("Passwords don't match.")
- else:
- raise forms.ValidationError("Both password fields need to be filled out.")
- return clean_data
-
-
-class UpdateProfileImage(forms.ModelForm):
- """
- Form for updating profile image
- """
-
- class Meta:
- model = Profile
- fields = ["profile_image"]
-
- profile_image = forms.ImageField()
-
- def clean_profile_image(self):
- """
- This function is used to make sure that profile images follow Civiwiki standards.
-
- Requirements:
- - Height cannot exceed 960px
- - Width cannot exceed 1280px
- - Image must be (jpg, jpeg, pjeg, png)
- - File size cannot exceed 2MB
- """
- profile_image = self.cleaned_data["profile_image"]
-
- try:
- w, h = get_image_dimensions(profile_image)
-
- # validate dimensions
- max_height = 960
- max_width = 1280
- if w > max_width or h > max_height:
- raise forms.ValidationError(
- u"Please use an image that is {w} x {h} pixels or smaller.".format(
- w=max_width, h=max_height
- )
- )
-
- # validate content type
- main, sub = profile_image.content_type.split("/")
- if not (main == "image" and sub in ["jpg", "jpeg", "pjpeg", "png"]):
- raise forms.ValidationError(u"Please use a JPEG or PNG image.")
-
- # validate file size
- if len(profile_image) > (2000 * 1024):
- raise forms.ValidationError(
- "Profile image file size may not exceed 2MB."
- )
-
- except AttributeError:
- pass
-
- return profile_image
| {"golden_diff": "diff --git a/project/api/forms.py b/project/api/forms.py\ndeleted file mode 100644\n--- a/project/api/forms.py\n+++ /dev/null\n@@ -1,101 +0,0 @@\n-from django import forms\n-from django.core.files.images import get_image_dimensions\n-from django.contrib.auth import get_user_model\n-from accounts.models import Profile\n-\n-\n-class UpdatePassword(forms.ModelForm):\n- \"\"\"\n- Form for updating User Password\n- \"\"\"\n-\n- class Meta:\n- model = get_user_model()\n- fields = [\"password\", \"verify\"]\n-\n- password = forms.CharField(\n- label=\"Password\",\n- widget=forms.PasswordInput(\n- attrs={\n- \"class\": \"form-control\",\n- \"placeholder\": \"Password\",\n- \"required\": \"True\",\n- }\n- ),\n- )\n- verify = forms.CharField(\n- label=\"Password Verify\",\n- widget=forms.PasswordInput(\n- attrs={\n- \"class\": \"form-control\",\n- \"placeholder\": \"Password Verify\",\n- \"required\": \"True\",\n- }\n- ),\n- help_text=\"Please retype your password.\",\n- )\n-\n- def clean(self):\n- \"\"\"\n- Verifies that the passwords match\n- \"\"\"\n- clean_data = super(UpdatePassword, self).clean()\n- if \"password\" in clean_data and \"verify\" in clean_data:\n- if clean_data[\"password\"] != clean_data[\"verify\"]:\n- raise forms.ValidationError(\"Passwords don't match.\")\n- else:\n- raise forms.ValidationError(\"Both password fields need to be filled out.\")\n- return clean_data\n-\n-\n-class UpdateProfileImage(forms.ModelForm):\n- \"\"\"\n- Form for updating profile image\n- \"\"\"\n-\n- class Meta:\n- model = Profile\n- fields = [\"profile_image\"]\n-\n- profile_image = forms.ImageField()\n-\n- def clean_profile_image(self):\n- \"\"\"\n- This function is used to make sure that profile images follow Civiwiki standards.\n-\n- Requirements:\n- - Height cannot exceed 960px\n- - Width cannot exceed 1280px\n- - Image must be (jpg, jpeg, pjeg, png)\n- - File size cannot exceed 2MB\n- \"\"\"\n- profile_image = self.cleaned_data[\"profile_image\"]\n-\n- try:\n- w, h = get_image_dimensions(profile_image)\n-\n- # validate dimensions\n- max_height = 960\n- max_width = 1280\n- if w > max_width or h > max_height:\n- raise forms.ValidationError(\n- u\"Please use an image that is {w} x {h} pixels or smaller.\".format(\n- w=max_width, h=max_height\n- )\n- )\n-\n- # validate content type\n- main, sub = profile_image.content_type.split(\"/\")\n- if not (main == \"image\" and sub in [\"jpg\", \"jpeg\", \"pjpeg\", \"png\"]):\n- raise forms.ValidationError(u\"Please use a JPEG or PNG image.\")\n-\n- # validate file size\n- if len(profile_image) > (2000 * 1024):\n- raise forms.ValidationError(\n- \"Profile image file size may not exceed 2MB.\"\n- )\n-\n- except AttributeError:\n- pass\n-\n- return profile_image\n", "issue": "Move forms from `api/forms.py` to the `accounts/forms.py`\n## Idea summary\r\n\r\nThere are several account/profile forms defined in [`api/forms.py`](https://github.com/CiviWiki/OpenCiviWiki/blob/develop/project/api/forms.py). Those forms should be moved to [`accounts/forms.py`](https://github.com/CiviWiki/OpenCiviWiki/blob/develop/project/accounts/forms.py) or deleted if they are duplicate code.\r\n\r\n**Update:** the code in `api/forms.py` is actually redundant, so may simply be deleted.\r\n\r\n## Task\r\n\r\nThe steps to complete this task are:\r\n\r\n- [x] [fork this repository](https://docs.github.com/en/get-started/quickstart/fork-a-repo) and clone it to your local computer\r\n- [x] set up a local development environment as [outlined in our Contributing Guide](https://github.com/CiviWiki/OpenCiviWiki/blob/develop/CONTRIBUTING.md#development)\r\n- [x] delete the file `api/forms.py`\r\n- [x] commit your changes\r\n- [x] push your code to GitHub\r\n- [x] [open a pull request](https://docs.github.com/en/github/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/creating-a-pull-request) against the `main` branch in this repository\n", "before_files": [{"content": "from django import forms\nfrom django.core.files.images import get_image_dimensions\nfrom django.contrib.auth import get_user_model\nfrom accounts.models import Profile\n\n\nclass UpdatePassword(forms.ModelForm):\n \"\"\"\n Form for updating User Password\n \"\"\"\n\n class Meta:\n model = get_user_model()\n fields = [\"password\", \"verify\"]\n\n password = forms.CharField(\n label=\"Password\",\n widget=forms.PasswordInput(\n attrs={\n \"class\": \"form-control\",\n \"placeholder\": \"Password\",\n \"required\": \"True\",\n }\n ),\n )\n verify = forms.CharField(\n label=\"Password Verify\",\n widget=forms.PasswordInput(\n attrs={\n \"class\": \"form-control\",\n \"placeholder\": \"Password Verify\",\n \"required\": \"True\",\n }\n ),\n help_text=\"Please retype your password.\",\n )\n\n def clean(self):\n \"\"\"\n Verifies that the passwords match\n \"\"\"\n clean_data = super(UpdatePassword, self).clean()\n if \"password\" in clean_data and \"verify\" in clean_data:\n if clean_data[\"password\"] != clean_data[\"verify\"]:\n raise forms.ValidationError(\"Passwords don't match.\")\n else:\n raise forms.ValidationError(\"Both password fields need to be filled out.\")\n return clean_data\n\n\nclass UpdateProfileImage(forms.ModelForm):\n \"\"\"\n Form for updating profile image\n \"\"\"\n\n class Meta:\n model = Profile\n fields = [\"profile_image\"]\n\n profile_image = forms.ImageField()\n\n def clean_profile_image(self):\n \"\"\"\n This function is used to make sure that profile images follow Civiwiki standards.\n\n Requirements:\n - Height cannot exceed 960px\n - Width cannot exceed 1280px\n - Image must be (jpg, jpeg, pjeg, png)\n - File size cannot exceed 2MB\n \"\"\"\n profile_image = self.cleaned_data[\"profile_image\"]\n\n try:\n w, h = get_image_dimensions(profile_image)\n\n # validate dimensions\n max_height = 960\n max_width = 1280\n if w > max_width or h > max_height:\n raise forms.ValidationError(\n u\"Please use an image that is {w} x {h} pixels or smaller.\".format(\n w=max_width, h=max_height\n )\n )\n\n # validate content type\n main, sub = profile_image.content_type.split(\"/\")\n if not (main == \"image\" and sub in [\"jpg\", \"jpeg\", \"pjpeg\", \"png\"]):\n raise forms.ValidationError(u\"Please use a JPEG or PNG image.\")\n\n # validate file size\n if len(profile_image) > (2000 * 1024):\n raise forms.ValidationError(\n \"Profile image file size may not exceed 2MB.\"\n )\n\n except AttributeError:\n pass\n\n return profile_image\n", "path": "project/api/forms.py"}], "after_files": [{"content": null, "path": "project/api/forms.py"}]} | 1,357 | 745 |
gh_patches_debug_31243 | rasdani/github-patches | git_diff | quantumlib__Cirq-4330 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`cirq.X**1` should be `_PauliX` instead of `XPowGate`
**Description of the issue**
As @ybc1991 pointed out - the type of `cirq.X**1` should be `_PauliX` instead of `XPowGate`. Context: https://github.com/quantumlib/Cirq/pull/4165/files#r646844399.
Discussed on Cirq Cynque on June 9th 2021:
- We should just fix this
- That would make the output type value-dependent. -- that is true, but it should be fine this direction, because _PauliX is a subclass of the XPowGate
**How to reproduce the issue**
```
print(type(cirq.X), type(cirq.X**1))
# Prints:
# <class 'cirq.ops.pauli_gates._PauliX'> <class 'cirq.ops.common_gates.XPowGate'>
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cirq-core/cirq/ops/pauli_gates.py`
Content:
```
1 # Copyright 2018 The Cirq Developers
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 import abc
15 from typing import Any, cast, Tuple, TYPE_CHECKING, Union, Dict
16
17 from cirq._doc import document
18 from cirq.ops import common_gates, raw_types, identity
19 from cirq.type_workarounds import NotImplementedType
20
21
22 if TYPE_CHECKING:
23 import cirq
24 from cirq.ops.pauli_string import SingleQubitPauliStringGateOperation
25 from cirq.value.product_state import (
26 _XEigenState,
27 _YEigenState,
28 _ZEigenState,
29 ) # coverage: ignore
30
31
32 class Pauli(raw_types.Gate, metaclass=abc.ABCMeta):
33 """Represents the Pauli gates.
34
35 This is an abstract class with no public subclasses. The only instances
36 of private subclasses are the X, Y, or Z Pauli gates defined below.
37 """
38
39 _XYZ = None # type: Tuple[Pauli, Pauli, Pauli]
40
41 @staticmethod
42 def by_index(index: int) -> 'Pauli':
43 return Pauli._XYZ[index % 3]
44
45 @staticmethod
46 def by_relative_index(p: 'Pauli', relative_index: int) -> 'Pauli':
47 return Pauli._XYZ[(p._index + relative_index) % 3]
48
49 def __init__(self, index: int, name: str) -> None:
50 self._index = index
51 self._name = name
52
53 def num_qubits(self):
54 return 1
55
56 def _commutes_(self, other: Any, atol: float) -> Union[bool, NotImplementedType, None]:
57 if not isinstance(other, Pauli):
58 return NotImplemented
59 return self is other
60
61 def third(self, second: 'Pauli') -> 'Pauli':
62 return Pauli._XYZ[(-self._index - second._index) % 3]
63
64 def relative_index(self, second: 'Pauli') -> int:
65 """Relative index of self w.r.t. second in the (X, Y, Z) cycle."""
66 return (self._index - second._index + 1) % 3 - 1
67
68 def phased_pauli_product(
69 self, other: Union['cirq.Pauli', 'identity.IdentityGate']
70 ) -> Tuple[complex, Union['cirq.Pauli', 'identity.IdentityGate']]:
71 if self == other:
72 return 1, identity.I
73 if other is identity.I:
74 return 1, self
75 return 1j ** cast(Pauli, other).relative_index(self), self.third(cast(Pauli, other))
76
77 def __gt__(self, other):
78 if not isinstance(other, Pauli):
79 return NotImplemented
80 return (self._index - other._index) % 3 == 1
81
82 def __lt__(self, other):
83 if not isinstance(other, Pauli):
84 return NotImplemented
85 return (other._index - self._index) % 3 == 1
86
87 def on(self, *qubits: 'cirq.Qid') -> 'SingleQubitPauliStringGateOperation':
88 """Returns an application of this gate to the given qubits.
89
90 Args:
91 *qubits: The collection of qubits to potentially apply the gate to.
92 """
93 if len(qubits) != 1:
94 raise ValueError(f'Expected a single qubit, got <{qubits!r}>.')
95 from cirq.ops.pauli_string import SingleQubitPauliStringGateOperation
96
97 return SingleQubitPauliStringGateOperation(self, qubits[0])
98
99 @property
100 def _canonical_exponent(self):
101 """Overrides EigenGate._canonical_exponent in subclasses."""
102 return 1
103
104
105 class _PauliX(Pauli, common_gates.XPowGate):
106 def __init__(self):
107 Pauli.__init__(self, index=0, name='X')
108 common_gates.XPowGate.__init__(self, exponent=1.0)
109
110 def __pow__(self: '_PauliX', exponent: 'cirq.TParamVal') -> common_gates.XPowGate:
111 return common_gates.XPowGate(exponent=exponent)
112
113 def _with_exponent(self: '_PauliX', exponent: 'cirq.TParamVal') -> common_gates.XPowGate:
114 return self.__pow__(exponent)
115
116 @classmethod
117 def _from_json_dict_(cls, exponent, global_shift, **kwargs):
118 assert global_shift == 0
119 assert exponent == 1
120 return Pauli._XYZ[0]
121
122 @property
123 def basis(self: '_PauliX') -> Dict[int, '_XEigenState']:
124 from cirq.value.product_state import _XEigenState
125
126 return {
127 +1: _XEigenState(+1),
128 -1: _XEigenState(-1),
129 }
130
131
132 class _PauliY(Pauli, common_gates.YPowGate):
133 def __init__(self):
134 Pauli.__init__(self, index=1, name='Y')
135 common_gates.YPowGate.__init__(self, exponent=1.0)
136
137 def __pow__(self: '_PauliY', exponent: 'cirq.TParamVal') -> common_gates.YPowGate:
138 return common_gates.YPowGate(exponent=exponent)
139
140 def _with_exponent(self: '_PauliY', exponent: 'cirq.TParamVal') -> common_gates.YPowGate:
141 return self.__pow__(exponent)
142
143 @classmethod
144 def _from_json_dict_(cls, exponent, global_shift, **kwargs):
145 assert global_shift == 0
146 assert exponent == 1
147 return Pauli._XYZ[1]
148
149 @property
150 def basis(self: '_PauliY') -> Dict[int, '_YEigenState']:
151 from cirq.value.product_state import _YEigenState
152
153 return {
154 +1: _YEigenState(+1),
155 -1: _YEigenState(-1),
156 }
157
158
159 class _PauliZ(Pauli, common_gates.ZPowGate):
160 def __init__(self):
161 Pauli.__init__(self, index=2, name='Z')
162 common_gates.ZPowGate.__init__(self, exponent=1.0)
163
164 def __pow__(self: '_PauliZ', exponent: 'cirq.TParamVal') -> common_gates.ZPowGate:
165 return common_gates.ZPowGate(exponent=exponent)
166
167 def _with_exponent(self: '_PauliZ', exponent: 'cirq.TParamVal') -> common_gates.ZPowGate:
168 return self.__pow__(exponent)
169
170 @classmethod
171 def _from_json_dict_(cls, exponent, global_shift, **kwargs):
172 assert global_shift == 0
173 assert exponent == 1
174 return Pauli._XYZ[2]
175
176 @property
177 def basis(self: '_PauliZ') -> Dict[int, '_ZEigenState']:
178 from cirq.value.product_state import _ZEigenState
179
180 return {
181 +1: _ZEigenState(+1),
182 -1: _ZEigenState(-1),
183 }
184
185
186 X = _PauliX()
187 document(
188 X,
189 """The Pauli X gate.
190
191 Matrix:
192
193 [[0, 1],
194 [1, 0]]
195 """,
196 )
197
198 Y = _PauliY()
199 document(
200 Y,
201 """The Pauli Y gate.
202
203 Matrix:
204
205 [[0, -i],
206 [i, 0]]
207 """,
208 )
209
210 Z = _PauliZ()
211 document(
212 Z,
213 """The Pauli Z gate.
214
215 Matrix:
216
217 [[1, 0],
218 [0, -1]]
219 """,
220 )
221
222 Pauli._XYZ = (X, Y, Z)
223
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cirq-core/cirq/ops/pauli_gates.py b/cirq-core/cirq/ops/pauli_gates.py
--- a/cirq-core/cirq/ops/pauli_gates.py
+++ b/cirq-core/cirq/ops/pauli_gates.py
@@ -108,7 +108,7 @@
common_gates.XPowGate.__init__(self, exponent=1.0)
def __pow__(self: '_PauliX', exponent: 'cirq.TParamVal') -> common_gates.XPowGate:
- return common_gates.XPowGate(exponent=exponent)
+ return common_gates.XPowGate(exponent=exponent) if exponent != 1 else _PauliX()
def _with_exponent(self: '_PauliX', exponent: 'cirq.TParamVal') -> common_gates.XPowGate:
return self.__pow__(exponent)
@@ -135,7 +135,7 @@
common_gates.YPowGate.__init__(self, exponent=1.0)
def __pow__(self: '_PauliY', exponent: 'cirq.TParamVal') -> common_gates.YPowGate:
- return common_gates.YPowGate(exponent=exponent)
+ return common_gates.YPowGate(exponent=exponent) if exponent != 1 else _PauliY()
def _with_exponent(self: '_PauliY', exponent: 'cirq.TParamVal') -> common_gates.YPowGate:
return self.__pow__(exponent)
@@ -162,7 +162,7 @@
common_gates.ZPowGate.__init__(self, exponent=1.0)
def __pow__(self: '_PauliZ', exponent: 'cirq.TParamVal') -> common_gates.ZPowGate:
- return common_gates.ZPowGate(exponent=exponent)
+ return common_gates.ZPowGate(exponent=exponent) if exponent != 1 else _PauliZ()
def _with_exponent(self: '_PauliZ', exponent: 'cirq.TParamVal') -> common_gates.ZPowGate:
return self.__pow__(exponent)
| {"golden_diff": "diff --git a/cirq-core/cirq/ops/pauli_gates.py b/cirq-core/cirq/ops/pauli_gates.py\n--- a/cirq-core/cirq/ops/pauli_gates.py\n+++ b/cirq-core/cirq/ops/pauli_gates.py\n@@ -108,7 +108,7 @@\n common_gates.XPowGate.__init__(self, exponent=1.0)\n \n def __pow__(self: '_PauliX', exponent: 'cirq.TParamVal') -> common_gates.XPowGate:\n- return common_gates.XPowGate(exponent=exponent)\n+ return common_gates.XPowGate(exponent=exponent) if exponent != 1 else _PauliX()\n \n def _with_exponent(self: '_PauliX', exponent: 'cirq.TParamVal') -> common_gates.XPowGate:\n return self.__pow__(exponent)\n@@ -135,7 +135,7 @@\n common_gates.YPowGate.__init__(self, exponent=1.0)\n \n def __pow__(self: '_PauliY', exponent: 'cirq.TParamVal') -> common_gates.YPowGate:\n- return common_gates.YPowGate(exponent=exponent)\n+ return common_gates.YPowGate(exponent=exponent) if exponent != 1 else _PauliY()\n \n def _with_exponent(self: '_PauliY', exponent: 'cirq.TParamVal') -> common_gates.YPowGate:\n return self.__pow__(exponent)\n@@ -162,7 +162,7 @@\n common_gates.ZPowGate.__init__(self, exponent=1.0)\n \n def __pow__(self: '_PauliZ', exponent: 'cirq.TParamVal') -> common_gates.ZPowGate:\n- return common_gates.ZPowGate(exponent=exponent)\n+ return common_gates.ZPowGate(exponent=exponent) if exponent != 1 else _PauliZ()\n \n def _with_exponent(self: '_PauliZ', exponent: 'cirq.TParamVal') -> common_gates.ZPowGate:\n return self.__pow__(exponent)\n", "issue": "`cirq.X**1` should be `_PauliX` instead of `XPowGate`\n**Description of the issue**\r\n\r\nAs @ybc1991 pointed out - the type of `cirq.X**1` should be `_PauliX` instead of `XPowGate`. Context: https://github.com/quantumlib/Cirq/pull/4165/files#r646844399. \r\n\r\nDiscussed on Cirq Cynque on June 9th 2021:\r\n- We should just fix this \r\n- That would make the output type value-dependent. -- that is true, but it should be fine this direction, because _PauliX is a subclass of the XPowGate\r\n\r\n\r\n**How to reproduce the issue**\r\n\r\n```\r\nprint(type(cirq.X), type(cirq.X**1))\r\n# Prints:\r\n# <class 'cirq.ops.pauli_gates._PauliX'> <class 'cirq.ops.common_gates.XPowGate'>\r\n```\r\n\n", "before_files": [{"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport abc\nfrom typing import Any, cast, Tuple, TYPE_CHECKING, Union, Dict\n\nfrom cirq._doc import document\nfrom cirq.ops import common_gates, raw_types, identity\nfrom cirq.type_workarounds import NotImplementedType\n\n\nif TYPE_CHECKING:\n import cirq\n from cirq.ops.pauli_string import SingleQubitPauliStringGateOperation\n from cirq.value.product_state import (\n _XEigenState,\n _YEigenState,\n _ZEigenState,\n ) # coverage: ignore\n\n\nclass Pauli(raw_types.Gate, metaclass=abc.ABCMeta):\n \"\"\"Represents the Pauli gates.\n\n This is an abstract class with no public subclasses. The only instances\n of private subclasses are the X, Y, or Z Pauli gates defined below.\n \"\"\"\n\n _XYZ = None # type: Tuple[Pauli, Pauli, Pauli]\n\n @staticmethod\n def by_index(index: int) -> 'Pauli':\n return Pauli._XYZ[index % 3]\n\n @staticmethod\n def by_relative_index(p: 'Pauli', relative_index: int) -> 'Pauli':\n return Pauli._XYZ[(p._index + relative_index) % 3]\n\n def __init__(self, index: int, name: str) -> None:\n self._index = index\n self._name = name\n\n def num_qubits(self):\n return 1\n\n def _commutes_(self, other: Any, atol: float) -> Union[bool, NotImplementedType, None]:\n if not isinstance(other, Pauli):\n return NotImplemented\n return self is other\n\n def third(self, second: 'Pauli') -> 'Pauli':\n return Pauli._XYZ[(-self._index - second._index) % 3]\n\n def relative_index(self, second: 'Pauli') -> int:\n \"\"\"Relative index of self w.r.t. second in the (X, Y, Z) cycle.\"\"\"\n return (self._index - second._index + 1) % 3 - 1\n\n def phased_pauli_product(\n self, other: Union['cirq.Pauli', 'identity.IdentityGate']\n ) -> Tuple[complex, Union['cirq.Pauli', 'identity.IdentityGate']]:\n if self == other:\n return 1, identity.I\n if other is identity.I:\n return 1, self\n return 1j ** cast(Pauli, other).relative_index(self), self.third(cast(Pauli, other))\n\n def __gt__(self, other):\n if not isinstance(other, Pauli):\n return NotImplemented\n return (self._index - other._index) % 3 == 1\n\n def __lt__(self, other):\n if not isinstance(other, Pauli):\n return NotImplemented\n return (other._index - self._index) % 3 == 1\n\n def on(self, *qubits: 'cirq.Qid') -> 'SingleQubitPauliStringGateOperation':\n \"\"\"Returns an application of this gate to the given qubits.\n\n Args:\n *qubits: The collection of qubits to potentially apply the gate to.\n \"\"\"\n if len(qubits) != 1:\n raise ValueError(f'Expected a single qubit, got <{qubits!r}>.')\n from cirq.ops.pauli_string import SingleQubitPauliStringGateOperation\n\n return SingleQubitPauliStringGateOperation(self, qubits[0])\n\n @property\n def _canonical_exponent(self):\n \"\"\"Overrides EigenGate._canonical_exponent in subclasses.\"\"\"\n return 1\n\n\nclass _PauliX(Pauli, common_gates.XPowGate):\n def __init__(self):\n Pauli.__init__(self, index=0, name='X')\n common_gates.XPowGate.__init__(self, exponent=1.0)\n\n def __pow__(self: '_PauliX', exponent: 'cirq.TParamVal') -> common_gates.XPowGate:\n return common_gates.XPowGate(exponent=exponent)\n\n def _with_exponent(self: '_PauliX', exponent: 'cirq.TParamVal') -> common_gates.XPowGate:\n return self.__pow__(exponent)\n\n @classmethod\n def _from_json_dict_(cls, exponent, global_shift, **kwargs):\n assert global_shift == 0\n assert exponent == 1\n return Pauli._XYZ[0]\n\n @property\n def basis(self: '_PauliX') -> Dict[int, '_XEigenState']:\n from cirq.value.product_state import _XEigenState\n\n return {\n +1: _XEigenState(+1),\n -1: _XEigenState(-1),\n }\n\n\nclass _PauliY(Pauli, common_gates.YPowGate):\n def __init__(self):\n Pauli.__init__(self, index=1, name='Y')\n common_gates.YPowGate.__init__(self, exponent=1.0)\n\n def __pow__(self: '_PauliY', exponent: 'cirq.TParamVal') -> common_gates.YPowGate:\n return common_gates.YPowGate(exponent=exponent)\n\n def _with_exponent(self: '_PauliY', exponent: 'cirq.TParamVal') -> common_gates.YPowGate:\n return self.__pow__(exponent)\n\n @classmethod\n def _from_json_dict_(cls, exponent, global_shift, **kwargs):\n assert global_shift == 0\n assert exponent == 1\n return Pauli._XYZ[1]\n\n @property\n def basis(self: '_PauliY') -> Dict[int, '_YEigenState']:\n from cirq.value.product_state import _YEigenState\n\n return {\n +1: _YEigenState(+1),\n -1: _YEigenState(-1),\n }\n\n\nclass _PauliZ(Pauli, common_gates.ZPowGate):\n def __init__(self):\n Pauli.__init__(self, index=2, name='Z')\n common_gates.ZPowGate.__init__(self, exponent=1.0)\n\n def __pow__(self: '_PauliZ', exponent: 'cirq.TParamVal') -> common_gates.ZPowGate:\n return common_gates.ZPowGate(exponent=exponent)\n\n def _with_exponent(self: '_PauliZ', exponent: 'cirq.TParamVal') -> common_gates.ZPowGate:\n return self.__pow__(exponent)\n\n @classmethod\n def _from_json_dict_(cls, exponent, global_shift, **kwargs):\n assert global_shift == 0\n assert exponent == 1\n return Pauli._XYZ[2]\n\n @property\n def basis(self: '_PauliZ') -> Dict[int, '_ZEigenState']:\n from cirq.value.product_state import _ZEigenState\n\n return {\n +1: _ZEigenState(+1),\n -1: _ZEigenState(-1),\n }\n\n\nX = _PauliX()\ndocument(\n X,\n \"\"\"The Pauli X gate.\n\n Matrix:\n\n [[0, 1],\n [1, 0]]\n \"\"\",\n)\n\nY = _PauliY()\ndocument(\n Y,\n \"\"\"The Pauli Y gate.\n\n Matrix:\n\n [[0, -i],\n [i, 0]]\n \"\"\",\n)\n\nZ = _PauliZ()\ndocument(\n Z,\n \"\"\"The Pauli Z gate.\n\n Matrix:\n\n [[1, 0],\n [0, -1]]\n \"\"\",\n)\n\nPauli._XYZ = (X, Y, Z)\n", "path": "cirq-core/cirq/ops/pauli_gates.py"}], "after_files": [{"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport abc\nfrom typing import Any, cast, Tuple, TYPE_CHECKING, Union, Dict\n\nfrom cirq._doc import document\nfrom cirq.ops import common_gates, raw_types, identity\nfrom cirq.type_workarounds import NotImplementedType\n\n\nif TYPE_CHECKING:\n import cirq\n from cirq.ops.pauli_string import SingleQubitPauliStringGateOperation\n from cirq.value.product_state import (\n _XEigenState,\n _YEigenState,\n _ZEigenState,\n ) # coverage: ignore\n\n\nclass Pauli(raw_types.Gate, metaclass=abc.ABCMeta):\n \"\"\"Represents the Pauli gates.\n\n This is an abstract class with no public subclasses. The only instances\n of private subclasses are the X, Y, or Z Pauli gates defined below.\n \"\"\"\n\n _XYZ = None # type: Tuple[Pauli, Pauli, Pauli]\n\n @staticmethod\n def by_index(index: int) -> 'Pauli':\n return Pauli._XYZ[index % 3]\n\n @staticmethod\n def by_relative_index(p: 'Pauli', relative_index: int) -> 'Pauli':\n return Pauli._XYZ[(p._index + relative_index) % 3]\n\n def __init__(self, index: int, name: str) -> None:\n self._index = index\n self._name = name\n\n def num_qubits(self):\n return 1\n\n def _commutes_(self, other: Any, atol: float) -> Union[bool, NotImplementedType, None]:\n if not isinstance(other, Pauli):\n return NotImplemented\n return self is other\n\n def third(self, second: 'Pauli') -> 'Pauli':\n return Pauli._XYZ[(-self._index - second._index) % 3]\n\n def relative_index(self, second: 'Pauli') -> int:\n \"\"\"Relative index of self w.r.t. second in the (X, Y, Z) cycle.\"\"\"\n return (self._index - second._index + 1) % 3 - 1\n\n def phased_pauli_product(\n self, other: Union['cirq.Pauli', 'identity.IdentityGate']\n ) -> Tuple[complex, Union['cirq.Pauli', 'identity.IdentityGate']]:\n if self == other:\n return 1, identity.I\n if other is identity.I:\n return 1, self\n return 1j ** cast(Pauli, other).relative_index(self), self.third(cast(Pauli, other))\n\n def __gt__(self, other):\n if not isinstance(other, Pauli):\n return NotImplemented\n return (self._index - other._index) % 3 == 1\n\n def __lt__(self, other):\n if not isinstance(other, Pauli):\n return NotImplemented\n return (other._index - self._index) % 3 == 1\n\n def on(self, *qubits: 'cirq.Qid') -> 'SingleQubitPauliStringGateOperation':\n \"\"\"Returns an application of this gate to the given qubits.\n\n Args:\n *qubits: The collection of qubits to potentially apply the gate to.\n \"\"\"\n if len(qubits) != 1:\n raise ValueError(f'Expected a single qubit, got <{qubits!r}>.')\n from cirq.ops.pauli_string import SingleQubitPauliStringGateOperation\n\n return SingleQubitPauliStringGateOperation(self, qubits[0])\n\n @property\n def _canonical_exponent(self):\n \"\"\"Overrides EigenGate._canonical_exponent in subclasses.\"\"\"\n return 1\n\n\nclass _PauliX(Pauli, common_gates.XPowGate):\n def __init__(self):\n Pauli.__init__(self, index=0, name='X')\n common_gates.XPowGate.__init__(self, exponent=1.0)\n\n def __pow__(self: '_PauliX', exponent: 'cirq.TParamVal') -> common_gates.XPowGate:\n return common_gates.XPowGate(exponent=exponent) if exponent != 1 else _PauliX()\n\n def _with_exponent(self: '_PauliX', exponent: 'cirq.TParamVal') -> common_gates.XPowGate:\n return self.__pow__(exponent)\n\n @classmethod\n def _from_json_dict_(cls, exponent, global_shift, **kwargs):\n assert global_shift == 0\n assert exponent == 1\n return Pauli._XYZ[0]\n\n @property\n def basis(self: '_PauliX') -> Dict[int, '_XEigenState']:\n from cirq.value.product_state import _XEigenState\n\n return {\n +1: _XEigenState(+1),\n -1: _XEigenState(-1),\n }\n\n\nclass _PauliY(Pauli, common_gates.YPowGate):\n def __init__(self):\n Pauli.__init__(self, index=1, name='Y')\n common_gates.YPowGate.__init__(self, exponent=1.0)\n\n def __pow__(self: '_PauliY', exponent: 'cirq.TParamVal') -> common_gates.YPowGate:\n return common_gates.YPowGate(exponent=exponent) if exponent != 1 else _PauliY()\n\n def _with_exponent(self: '_PauliY', exponent: 'cirq.TParamVal') -> common_gates.YPowGate:\n return self.__pow__(exponent)\n\n @classmethod\n def _from_json_dict_(cls, exponent, global_shift, **kwargs):\n assert global_shift == 0\n assert exponent == 1\n return Pauli._XYZ[1]\n\n @property\n def basis(self: '_PauliY') -> Dict[int, '_YEigenState']:\n from cirq.value.product_state import _YEigenState\n\n return {\n +1: _YEigenState(+1),\n -1: _YEigenState(-1),\n }\n\n\nclass _PauliZ(Pauli, common_gates.ZPowGate):\n def __init__(self):\n Pauli.__init__(self, index=2, name='Z')\n common_gates.ZPowGate.__init__(self, exponent=1.0)\n\n def __pow__(self: '_PauliZ', exponent: 'cirq.TParamVal') -> common_gates.ZPowGate:\n return common_gates.ZPowGate(exponent=exponent) if exponent != 1 else _PauliZ()\n\n def _with_exponent(self: '_PauliZ', exponent: 'cirq.TParamVal') -> common_gates.ZPowGate:\n return self.__pow__(exponent)\n\n @classmethod\n def _from_json_dict_(cls, exponent, global_shift, **kwargs):\n assert global_shift == 0\n assert exponent == 1\n return Pauli._XYZ[2]\n\n @property\n def basis(self: '_PauliZ') -> Dict[int, '_ZEigenState']:\n from cirq.value.product_state import _ZEigenState\n\n return {\n +1: _ZEigenState(+1),\n -1: _ZEigenState(-1),\n }\n\n\nX = _PauliX()\ndocument(\n X,\n \"\"\"The Pauli X gate.\n\n Matrix:\n\n [[0, 1],\n [1, 0]]\n \"\"\",\n)\n\nY = _PauliY()\ndocument(\n Y,\n \"\"\"The Pauli Y gate.\n\n Matrix:\n\n [[0, -i],\n [i, 0]]\n \"\"\",\n)\n\nZ = _PauliZ()\ndocument(\n Z,\n \"\"\"The Pauli Z gate.\n\n Matrix:\n\n [[1, 0],\n [0, -1]]\n \"\"\",\n)\n\nPauli._XYZ = (X, Y, Z)\n", "path": "cirq-core/cirq/ops/pauli_gates.py"}]} | 2,898 | 493 |
gh_patches_debug_1240 | rasdani/github-patches | git_diff | mindsdb__lightwood-603 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
:wrench: Add default logging level environment variable
## Task
Add a `LIGHTWOOD_LOG` environment variable that controls the default logging level for lightwood. It should be possible to set values for it so that `DEBUG`, `INFO`, `WARNING`, `ERROR` and `CRITICAL` are all possible options. The logger lightwood uses is declared and exported [here](https://github.com/mindsdb/lightwood/blob/stable/lightwood/helpers/log.py).
## Steps :male_detective: :female_detective:
- Fork the Lightwood repository, checkout the `staging` branch and from it create a new one.
- Implement the necessary changes.
- Check that only the appropriate logs are getting through. For this, you can run any of the integration tests, like [`test_boston_housing`](https://github.com/mindsdb/lightwood/blob/stable/tests/integration/basic/test_boston_housing.py), and analyze the output.
- Make the PR and address any comments that reviewers might make.
## Additional rewards :1st_place_medal:
Each documentation PR brings :one: point for entry into the draw for a :computer: Deep Learning Laptop powered by the NVIDIA RTX 3080 Max-Q GPU or other swag :shirt: :bear: . For more info check out https://mindsdb.com/hacktoberfest/
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lightwood/helpers/log.py`
Content:
```
1 import logging
2 import os
3
4
5 def initialize_log():
6 pid = os.getpid()
7 logging.basicConfig()
8 log = logging.getLogger(f'lightwood-{pid}')
9 log.setLevel(logging.DEBUG)
10 return log
11
12
13 log = initialize_log()
14
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lightwood/helpers/log.py b/lightwood/helpers/log.py
--- a/lightwood/helpers/log.py
+++ b/lightwood/helpers/log.py
@@ -6,7 +6,8 @@
pid = os.getpid()
logging.basicConfig()
log = logging.getLogger(f'lightwood-{pid}')
- log.setLevel(logging.DEBUG)
+ log_level = os.environ.get('LIGHTWOOD_LOG', 'DEBUG')
+ log.setLevel(log_level)
return log
| {"golden_diff": "diff --git a/lightwood/helpers/log.py b/lightwood/helpers/log.py\n--- a/lightwood/helpers/log.py\n+++ b/lightwood/helpers/log.py\n@@ -6,7 +6,8 @@\n pid = os.getpid()\n logging.basicConfig()\n log = logging.getLogger(f'lightwood-{pid}')\n- log.setLevel(logging.DEBUG)\n+ log_level = os.environ.get('LIGHTWOOD_LOG', 'DEBUG')\n+ log.setLevel(log_level)\n return log\n", "issue": ":wrench: Add default logging level environment variable\n## Task\r\n\r\nAdd a `LIGHTWOOD_LOG` environment variable that controls the default logging level for lightwood. It should be possible to set values for it so that `DEBUG`, `INFO`, `WARNING`, `ERROR` and `CRITICAL` are all possible options. The logger lightwood uses is declared and exported [here](https://github.com/mindsdb/lightwood/blob/stable/lightwood/helpers/log.py).\r\n\r\n## Steps :male_detective: :female_detective: \r\n\r\n- Fork the Lightwood repository, checkout the `staging` branch and from it create a new one.\r\n- Implement the necessary changes.\r\n- Check that only the appropriate logs are getting through. For this, you can run any of the integration tests, like [`test_boston_housing`](https://github.com/mindsdb/lightwood/blob/stable/tests/integration/basic/test_boston_housing.py), and analyze the output.\r\n- Make the PR and address any comments that reviewers might make.\r\n\r\n## Additional rewards :1st_place_medal: \r\n\r\nEach documentation PR brings :one: point for entry into the draw for a :computer: Deep Learning Laptop powered by the NVIDIA RTX 3080 Max-Q GPU or other swag :shirt: :bear: . For more info check out https://mindsdb.com/hacktoberfest/\n", "before_files": [{"content": "import logging\nimport os\n\n\ndef initialize_log():\n pid = os.getpid()\n logging.basicConfig()\n log = logging.getLogger(f'lightwood-{pid}')\n log.setLevel(logging.DEBUG)\n return log\n\n\nlog = initialize_log()\n", "path": "lightwood/helpers/log.py"}], "after_files": [{"content": "import logging\nimport os\n\n\ndef initialize_log():\n pid = os.getpid()\n logging.basicConfig()\n log = logging.getLogger(f'lightwood-{pid}')\n log_level = os.environ.get('LIGHTWOOD_LOG', 'DEBUG')\n log.setLevel(log_level)\n return log\n\n\nlog = initialize_log()\n", "path": "lightwood/helpers/log.py"}]} | 615 | 99 |
gh_patches_debug_2089 | rasdani/github-patches | git_diff | OpenMined__PySyft-4708 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add Windows to CI
## Description
Add windows to the CI tests as a separate step for say python 3.8 and torch==1.6.0 initially just to get things working. Then if it works expand to all versions to see any potential issues.
## Definition of Done
This ticket is done when we know what does and doesn't run on Windows in CI from the current "fast" tests and the new "slow" tests. Post a screenshot and link to CI here when it's running.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/syft/lib/torch/__init__.py`
Content:
```
1 # stdlib
2 from typing import Dict
3 from typing import Union
4
5 # third party
6 from packaging import version
7 import torch
8
9 # syft relative
10 from . import parameter # noqa: 401
11 from . import uppercase_tensor # noqa: 401
12 from ...ast.globals import Globals
13 from .allowlist import allowlist
14
15 TORCH_VERSION = version.parse(torch.__version__)
16
17
18 def get_return_type(support_dict: Union[str, Dict[str, str]]) -> str:
19 if isinstance(support_dict, str):
20 return support_dict
21 else:
22 return support_dict["return_type"]
23
24
25 def version_supported(support_dict: Union[str, Dict[str, str]]) -> bool:
26 if isinstance(support_dict, str):
27 return True
28 else:
29 return TORCH_VERSION >= version.parse(support_dict["min_version"])
30
31
32 def create_torch_ast() -> Globals:
33 ast = Globals()
34
35 # most methods work in all versions and have a single return type
36 # for the more complicated ones we pass a dict with keys like return_type and
37 # min_version
38 for method, return_type_name_or_dict in allowlist.items():
39 if version_supported(support_dict=return_type_name_or_dict):
40 return_type = get_return_type(support_dict=return_type_name_or_dict)
41 if return_type == "unknown":
42 # this allows us to import them for testing
43 continue
44 ast.add_path(
45 path=method, framework_reference=torch, return_type_name=return_type
46 )
47 # add all the torch.nn.Parameter hooks
48 if method.startswith("torch.Tensor."):
49 method = method.replace("torch.Tensor.", "torch.nn.Parameter.")
50 return_type = return_type.replace("torch.Tensor", "torch.nn.Parameter")
51 ast.add_path(
52 path=method, framework_reference=torch, return_type_name=return_type
53 )
54 else:
55 print(f"Skipping torch.{method} not supported in {TORCH_VERSION}")
56
57 for klass in ast.classes:
58 klass.create_pointer_class()
59 klass.create_send_method()
60 klass.create_serialization_methods()
61 klass.create_storable_object_attr_convenience_methods()
62 return ast
63
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/syft/lib/torch/__init__.py b/src/syft/lib/torch/__init__.py
--- a/src/syft/lib/torch/__init__.py
+++ b/src/syft/lib/torch/__init__.py
@@ -12,7 +12,7 @@
from ...ast.globals import Globals
from .allowlist import allowlist
-TORCH_VERSION = version.parse(torch.__version__)
+TORCH_VERSION = version.parse(torch.__version__.split("+")[0])
def get_return_type(support_dict: Union[str, Dict[str, str]]) -> str:
| {"golden_diff": "diff --git a/src/syft/lib/torch/__init__.py b/src/syft/lib/torch/__init__.py\n--- a/src/syft/lib/torch/__init__.py\n+++ b/src/syft/lib/torch/__init__.py\n@@ -12,7 +12,7 @@\n from ...ast.globals import Globals\n from .allowlist import allowlist\n \n-TORCH_VERSION = version.parse(torch.__version__)\n+TORCH_VERSION = version.parse(torch.__version__.split(\"+\")[0])\n \n \n def get_return_type(support_dict: Union[str, Dict[str, str]]) -> str:\n", "issue": "Add Windows to CI\n## Description\r\nAdd windows to the CI tests as a separate step for say python 3.8 and torch==1.6.0 initially just to get things working. Then if it works expand to all versions to see any potential issues.\r\n\r\n## Definition of Done\r\nThis ticket is done when we know what does and doesn't run on Windows in CI from the current \"fast\" tests and the new \"slow\" tests. Post a screenshot and link to CI here when it's running.\n", "before_files": [{"content": "# stdlib\nfrom typing import Dict\nfrom typing import Union\n\n# third party\nfrom packaging import version\nimport torch\n\n# syft relative\nfrom . import parameter # noqa: 401\nfrom . import uppercase_tensor # noqa: 401\nfrom ...ast.globals import Globals\nfrom .allowlist import allowlist\n\nTORCH_VERSION = version.parse(torch.__version__)\n\n\ndef get_return_type(support_dict: Union[str, Dict[str, str]]) -> str:\n if isinstance(support_dict, str):\n return support_dict\n else:\n return support_dict[\"return_type\"]\n\n\ndef version_supported(support_dict: Union[str, Dict[str, str]]) -> bool:\n if isinstance(support_dict, str):\n return True\n else:\n return TORCH_VERSION >= version.parse(support_dict[\"min_version\"])\n\n\ndef create_torch_ast() -> Globals:\n ast = Globals()\n\n # most methods work in all versions and have a single return type\n # for the more complicated ones we pass a dict with keys like return_type and\n # min_version\n for method, return_type_name_or_dict in allowlist.items():\n if version_supported(support_dict=return_type_name_or_dict):\n return_type = get_return_type(support_dict=return_type_name_or_dict)\n if return_type == \"unknown\":\n # this allows us to import them for testing\n continue\n ast.add_path(\n path=method, framework_reference=torch, return_type_name=return_type\n )\n # add all the torch.nn.Parameter hooks\n if method.startswith(\"torch.Tensor.\"):\n method = method.replace(\"torch.Tensor.\", \"torch.nn.Parameter.\")\n return_type = return_type.replace(\"torch.Tensor\", \"torch.nn.Parameter\")\n ast.add_path(\n path=method, framework_reference=torch, return_type_name=return_type\n )\n else:\n print(f\"Skipping torch.{method} not supported in {TORCH_VERSION}\")\n\n for klass in ast.classes:\n klass.create_pointer_class()\n klass.create_send_method()\n klass.create_serialization_methods()\n klass.create_storable_object_attr_convenience_methods()\n return ast\n", "path": "src/syft/lib/torch/__init__.py"}], "after_files": [{"content": "# stdlib\nfrom typing import Dict\nfrom typing import Union\n\n# third party\nfrom packaging import version\nimport torch\n\n# syft relative\nfrom . import parameter # noqa: 401\nfrom . import uppercase_tensor # noqa: 401\nfrom ...ast.globals import Globals\nfrom .allowlist import allowlist\n\nTORCH_VERSION = version.parse(torch.__version__.split(\"+\")[0])\n\n\ndef get_return_type(support_dict: Union[str, Dict[str, str]]) -> str:\n if isinstance(support_dict, str):\n return support_dict\n else:\n return support_dict[\"return_type\"]\n\n\ndef version_supported(support_dict: Union[str, Dict[str, str]]) -> bool:\n if isinstance(support_dict, str):\n return True\n else:\n return TORCH_VERSION >= version.parse(support_dict[\"min_version\"])\n\n\ndef create_torch_ast() -> Globals:\n ast = Globals()\n\n # most methods work in all versions and have a single return type\n # for the more complicated ones we pass a dict with keys like return_type and\n # min_version\n for method, return_type_name_or_dict in allowlist.items():\n if version_supported(support_dict=return_type_name_or_dict):\n return_type = get_return_type(support_dict=return_type_name_or_dict)\n if return_type == \"unknown\":\n # this allows us to import them for testing\n continue\n ast.add_path(\n path=method, framework_reference=torch, return_type_name=return_type\n )\n # add all the torch.nn.Parameter hooks\n if method.startswith(\"torch.Tensor.\"):\n method = method.replace(\"torch.Tensor.\", \"torch.nn.Parameter.\")\n return_type = return_type.replace(\"torch.Tensor\", \"torch.nn.Parameter\")\n ast.add_path(\n path=method, framework_reference=torch, return_type_name=return_type\n )\n else:\n print(f\"Skipping torch.{method} not supported in {TORCH_VERSION}\")\n\n for klass in ast.classes:\n klass.create_pointer_class()\n klass.create_send_method()\n klass.create_serialization_methods()\n klass.create_storable_object_attr_convenience_methods()\n return ast\n", "path": "src/syft/lib/torch/__init__.py"}]} | 949 | 131 |
gh_patches_debug_11012 | rasdani/github-patches | git_diff | pytorch__vision-3545 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Places365 dataset redundant path.join under _check_integrity
## 🐛 Bug
Redundant path.join at line 165 under _check_integrity in torchvision/datasets/places365.py
path.join is already called at line 116 and line 131 under load_categories and load_file_list
cc @pmeier
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `torchvision/datasets/places365.py`
Content:
```
1 import os
2 from os import path
3 from typing import Any, Callable, Dict, List, Optional, Tuple
4 from urllib.parse import urljoin
5
6 from .folder import default_loader
7 from .utils import verify_str_arg, check_integrity, download_and_extract_archive
8 from .vision import VisionDataset
9
10
11 class Places365(VisionDataset):
12 r"""`Places365 <http://places2.csail.mit.edu/index.html>`_ classification dataset.
13
14 Args:
15 root (string): Root directory of the Places365 dataset.
16 split (string, optional): The dataset split. Can be one of ``train-standard`` (default), ``train-challendge``,
17 ``val``.
18 small (bool, optional): If ``True``, uses the small images, i. e. resized to 256 x 256 pixels, instead of the
19 high resolution ones.
20 download (bool, optional): If ``True``, downloads the dataset components and places them in ``root``. Already
21 downloaded archives are not downloaded again.
22 transform (callable, optional): A function/transform that takes in an PIL image
23 and returns a transformed version. E.g, ``transforms.RandomCrop``
24 target_transform (callable, optional): A function/transform that takes in the
25 target and transforms it.
26 loader (callable, optional): A function to load an image given its path.
27
28 Attributes:
29 classes (list): List of the class names.
30 class_to_idx (dict): Dict with items (class_name, class_index).
31 imgs (list): List of (image path, class_index) tuples
32 targets (list): The class_index value for each image in the dataset
33
34 Raises:
35 RuntimeError: If ``download is False`` and the meta files, i. e. the devkit, are not present or corrupted.
36 RuntimeError: If ``download is True`` and the image archive is already extracted.
37 """
38 _SPLITS = ("train-standard", "train-challenge", "val")
39 _BASE_URL = "http://data.csail.mit.edu/places/places365/"
40 # {variant: (archive, md5)}
41 _DEVKIT_META = {
42 "standard": ("filelist_places365-standard.tar", "35a0585fee1fa656440f3ab298f8479c"),
43 "challenge": ("filelist_places365-challenge.tar", "70a8307e459c3de41690a7c76c931734"),
44 }
45 # (file, md5)
46 _CATEGORIES_META = ("categories_places365.txt", "06c963b85866bd0649f97cb43dd16673")
47 # {split: (file, md5)}
48 _FILE_LIST_META = {
49 "train-standard": ("places365_train_standard.txt", "30f37515461640559006b8329efbed1a"),
50 "train-challenge": ("places365_train_challenge.txt", "b2931dc997b8c33c27e7329c073a6b57"),
51 "val": ("places365_val.txt", "e9f2fd57bfd9d07630173f4e8708e4b1"),
52 }
53 # {(split, small): (file, md5)}
54 _IMAGES_META = {
55 ("train-standard", False): ("train_large_places365standard.tar", "67e186b496a84c929568076ed01a8aa1"),
56 ("train-challenge", False): ("train_large_places365challenge.tar", "605f18e68e510c82b958664ea134545f"),
57 ("val", False): ("val_large.tar", "9b71c4993ad89d2d8bcbdc4aef38042f"),
58 ("train-standard", True): ("train_256_places365standard.tar", "53ca1c756c3d1e7809517cc47c5561c5"),
59 ("train-challenge", True): ("train_256_places365challenge.tar", "741915038a5e3471ec7332404dfb64ef"),
60 ("val", True): ("val_256.tar", "e27b17d8d44f4af9a78502beb927f808"),
61 }
62
63 def __init__(
64 self,
65 root: str,
66 split: str = "train-standard",
67 small: bool = False,
68 download: bool = False,
69 transform: Optional[Callable] = None,
70 target_transform: Optional[Callable] = None,
71 loader: Callable[[str], Any] = default_loader,
72 ) -> None:
73 super().__init__(root, transform=transform, target_transform=target_transform)
74
75 self.split = self._verify_split(split)
76 self.small = small
77 self.loader = loader
78
79 self.classes, self.class_to_idx = self.load_categories(download)
80 self.imgs, self.targets = self.load_file_list(download)
81
82 if download:
83 self.download_images()
84
85 def __getitem__(self, index: int) -> Tuple[Any, Any]:
86 file, target = self.imgs[index]
87 image = self.loader(file)
88
89 if self.transforms is not None:
90 image, target = self.transforms(image, target)
91
92 return image, target
93
94 def __len__(self) -> int:
95 return len(self.imgs)
96
97 @property
98 def variant(self) -> str:
99 return "challenge" if "challenge" in self.split else "standard"
100
101 @property
102 def images_dir(self) -> str:
103 size = "256" if self.small else "large"
104 if self.split.startswith("train"):
105 dir = f"data_{size}_{self.variant}"
106 else:
107 dir = f"{self.split}_{size}"
108 return path.join(self.root, dir)
109
110 def load_categories(self, download: bool = True) -> Tuple[List[str], Dict[str, int]]:
111 def process(line: str) -> Tuple[str, int]:
112 cls, idx = line.split()
113 return cls, int(idx)
114
115 file, md5 = self._CATEGORIES_META
116 file = path.join(self.root, file)
117 if not self._check_integrity(file, md5, download):
118 self.download_devkit()
119
120 with open(file, "r") as fh:
121 class_to_idx = dict(process(line) for line in fh)
122
123 return sorted(class_to_idx.keys()), class_to_idx
124
125 def load_file_list(self, download: bool = True) -> Tuple[List[Tuple[str, int]], List[int]]:
126 def process(line: str, sep="/") -> Tuple[str, int]:
127 image, idx = line.split()
128 return path.join(self.images_dir, image.lstrip(sep).replace(sep, os.sep)), int(idx)
129
130 file, md5 = self._FILE_LIST_META[self.split]
131 file = path.join(self.root, file)
132 if not self._check_integrity(file, md5, download):
133 self.download_devkit()
134
135 with open(file, "r") as fh:
136 images = [process(line) for line in fh]
137
138 _, targets = zip(*images)
139 return images, list(targets)
140
141 def download_devkit(self) -> None:
142 file, md5 = self._DEVKIT_META[self.variant]
143 download_and_extract_archive(urljoin(self._BASE_URL, file), self.root, md5=md5)
144
145 def download_images(self) -> None:
146 if path.exists(self.images_dir):
147 raise RuntimeError(
148 f"The directory {self.images_dir} already exists. If you want to re-download or re-extract the images, "
149 f"delete the directory."
150 )
151
152 file, md5 = self._IMAGES_META[(self.split, self.small)]
153 download_and_extract_archive(urljoin(self._BASE_URL, file), self.root, md5=md5)
154
155 if self.split.startswith("train"):
156 os.rename(self.images_dir.rsplit("_", 1)[0], self.images_dir)
157
158 def extra_repr(self) -> str:
159 return "\n".join(("Split: {split}", "Small: {small}")).format(**self.__dict__)
160
161 def _verify_split(self, split: str) -> str:
162 return verify_str_arg(split, "split", self._SPLITS)
163
164 def _check_integrity(self, file: str, md5: str, download: bool) -> bool:
165 integrity = check_integrity(path.join(self.root, file), md5=md5)
166 if not integrity and not download:
167 raise RuntimeError(
168 f"The file {file} does not exist or is corrupted. You can set download=True to download it."
169 )
170 return integrity
171
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/torchvision/datasets/places365.py b/torchvision/datasets/places365.py
--- a/torchvision/datasets/places365.py
+++ b/torchvision/datasets/places365.py
@@ -162,7 +162,7 @@
return verify_str_arg(split, "split", self._SPLITS)
def _check_integrity(self, file: str, md5: str, download: bool) -> bool:
- integrity = check_integrity(path.join(self.root, file), md5=md5)
+ integrity = check_integrity(file, md5=md5)
if not integrity and not download:
raise RuntimeError(
f"The file {file} does not exist or is corrupted. You can set download=True to download it."
| {"golden_diff": "diff --git a/torchvision/datasets/places365.py b/torchvision/datasets/places365.py\n--- a/torchvision/datasets/places365.py\n+++ b/torchvision/datasets/places365.py\n@@ -162,7 +162,7 @@\n return verify_str_arg(split, \"split\", self._SPLITS)\n \n def _check_integrity(self, file: str, md5: str, download: bool) -> bool:\n- integrity = check_integrity(path.join(self.root, file), md5=md5)\n+ integrity = check_integrity(file, md5=md5)\n if not integrity and not download:\n raise RuntimeError(\n f\"The file {file} does not exist or is corrupted. You can set download=True to download it.\"\n", "issue": "Places365 dataset redundant path.join under _check_integrity\n## \ud83d\udc1b Bug\r\n\r\nRedundant path.join at line 165 under _check_integrity in torchvision/datasets/places365.py\r\n\r\npath.join is already called at line 116 and line 131 under load_categories and load_file_list\n\ncc @pmeier\n", "before_files": [{"content": "import os\nfrom os import path\nfrom typing import Any, Callable, Dict, List, Optional, Tuple\nfrom urllib.parse import urljoin\n\nfrom .folder import default_loader\nfrom .utils import verify_str_arg, check_integrity, download_and_extract_archive\nfrom .vision import VisionDataset\n\n\nclass Places365(VisionDataset):\n r\"\"\"`Places365 <http://places2.csail.mit.edu/index.html>`_ classification dataset.\n\n Args:\n root (string): Root directory of the Places365 dataset.\n split (string, optional): The dataset split. Can be one of ``train-standard`` (default), ``train-challendge``,\n ``val``.\n small (bool, optional): If ``True``, uses the small images, i. e. resized to 256 x 256 pixels, instead of the\n high resolution ones.\n download (bool, optional): If ``True``, downloads the dataset components and places them in ``root``. Already\n downloaded archives are not downloaded again.\n transform (callable, optional): A function/transform that takes in an PIL image\n and returns a transformed version. E.g, ``transforms.RandomCrop``\n target_transform (callable, optional): A function/transform that takes in the\n target and transforms it.\n loader (callable, optional): A function to load an image given its path.\n\n Attributes:\n classes (list): List of the class names.\n class_to_idx (dict): Dict with items (class_name, class_index).\n imgs (list): List of (image path, class_index) tuples\n targets (list): The class_index value for each image in the dataset\n\n Raises:\n RuntimeError: If ``download is False`` and the meta files, i. e. the devkit, are not present or corrupted.\n RuntimeError: If ``download is True`` and the image archive is already extracted.\n \"\"\"\n _SPLITS = (\"train-standard\", \"train-challenge\", \"val\")\n _BASE_URL = \"http://data.csail.mit.edu/places/places365/\"\n # {variant: (archive, md5)}\n _DEVKIT_META = {\n \"standard\": (\"filelist_places365-standard.tar\", \"35a0585fee1fa656440f3ab298f8479c\"),\n \"challenge\": (\"filelist_places365-challenge.tar\", \"70a8307e459c3de41690a7c76c931734\"),\n }\n # (file, md5)\n _CATEGORIES_META = (\"categories_places365.txt\", \"06c963b85866bd0649f97cb43dd16673\")\n # {split: (file, md5)}\n _FILE_LIST_META = {\n \"train-standard\": (\"places365_train_standard.txt\", \"30f37515461640559006b8329efbed1a\"),\n \"train-challenge\": (\"places365_train_challenge.txt\", \"b2931dc997b8c33c27e7329c073a6b57\"),\n \"val\": (\"places365_val.txt\", \"e9f2fd57bfd9d07630173f4e8708e4b1\"),\n }\n # {(split, small): (file, md5)}\n _IMAGES_META = {\n (\"train-standard\", False): (\"train_large_places365standard.tar\", \"67e186b496a84c929568076ed01a8aa1\"),\n (\"train-challenge\", False): (\"train_large_places365challenge.tar\", \"605f18e68e510c82b958664ea134545f\"),\n (\"val\", False): (\"val_large.tar\", \"9b71c4993ad89d2d8bcbdc4aef38042f\"),\n (\"train-standard\", True): (\"train_256_places365standard.tar\", \"53ca1c756c3d1e7809517cc47c5561c5\"),\n (\"train-challenge\", True): (\"train_256_places365challenge.tar\", \"741915038a5e3471ec7332404dfb64ef\"),\n (\"val\", True): (\"val_256.tar\", \"e27b17d8d44f4af9a78502beb927f808\"),\n }\n\n def __init__(\n self,\n root: str,\n split: str = \"train-standard\",\n small: bool = False,\n download: bool = False,\n transform: Optional[Callable] = None,\n target_transform: Optional[Callable] = None,\n loader: Callable[[str], Any] = default_loader,\n ) -> None:\n super().__init__(root, transform=transform, target_transform=target_transform)\n\n self.split = self._verify_split(split)\n self.small = small\n self.loader = loader\n\n self.classes, self.class_to_idx = self.load_categories(download)\n self.imgs, self.targets = self.load_file_list(download)\n\n if download:\n self.download_images()\n\n def __getitem__(self, index: int) -> Tuple[Any, Any]:\n file, target = self.imgs[index]\n image = self.loader(file)\n\n if self.transforms is not None:\n image, target = self.transforms(image, target)\n\n return image, target\n\n def __len__(self) -> int:\n return len(self.imgs)\n\n @property\n def variant(self) -> str:\n return \"challenge\" if \"challenge\" in self.split else \"standard\"\n\n @property\n def images_dir(self) -> str:\n size = \"256\" if self.small else \"large\"\n if self.split.startswith(\"train\"):\n dir = f\"data_{size}_{self.variant}\"\n else:\n dir = f\"{self.split}_{size}\"\n return path.join(self.root, dir)\n\n def load_categories(self, download: bool = True) -> Tuple[List[str], Dict[str, int]]:\n def process(line: str) -> Tuple[str, int]:\n cls, idx = line.split()\n return cls, int(idx)\n\n file, md5 = self._CATEGORIES_META\n file = path.join(self.root, file)\n if not self._check_integrity(file, md5, download):\n self.download_devkit()\n\n with open(file, \"r\") as fh:\n class_to_idx = dict(process(line) for line in fh)\n\n return sorted(class_to_idx.keys()), class_to_idx\n\n def load_file_list(self, download: bool = True) -> Tuple[List[Tuple[str, int]], List[int]]:\n def process(line: str, sep=\"/\") -> Tuple[str, int]:\n image, idx = line.split()\n return path.join(self.images_dir, image.lstrip(sep).replace(sep, os.sep)), int(idx)\n\n file, md5 = self._FILE_LIST_META[self.split]\n file = path.join(self.root, file)\n if not self._check_integrity(file, md5, download):\n self.download_devkit()\n\n with open(file, \"r\") as fh:\n images = [process(line) for line in fh]\n\n _, targets = zip(*images)\n return images, list(targets)\n\n def download_devkit(self) -> None:\n file, md5 = self._DEVKIT_META[self.variant]\n download_and_extract_archive(urljoin(self._BASE_URL, file), self.root, md5=md5)\n\n def download_images(self) -> None:\n if path.exists(self.images_dir):\n raise RuntimeError(\n f\"The directory {self.images_dir} already exists. If you want to re-download or re-extract the images, \"\n f\"delete the directory.\"\n )\n\n file, md5 = self._IMAGES_META[(self.split, self.small)]\n download_and_extract_archive(urljoin(self._BASE_URL, file), self.root, md5=md5)\n\n if self.split.startswith(\"train\"):\n os.rename(self.images_dir.rsplit(\"_\", 1)[0], self.images_dir)\n\n def extra_repr(self) -> str:\n return \"\\n\".join((\"Split: {split}\", \"Small: {small}\")).format(**self.__dict__)\n\n def _verify_split(self, split: str) -> str:\n return verify_str_arg(split, \"split\", self._SPLITS)\n\n def _check_integrity(self, file: str, md5: str, download: bool) -> bool:\n integrity = check_integrity(path.join(self.root, file), md5=md5)\n if not integrity and not download:\n raise RuntimeError(\n f\"The file {file} does not exist or is corrupted. You can set download=True to download it.\"\n )\n return integrity\n", "path": "torchvision/datasets/places365.py"}], "after_files": [{"content": "import os\nfrom os import path\nfrom typing import Any, Callable, Dict, List, Optional, Tuple\nfrom urllib.parse import urljoin\n\nfrom .folder import default_loader\nfrom .utils import verify_str_arg, check_integrity, download_and_extract_archive\nfrom .vision import VisionDataset\n\n\nclass Places365(VisionDataset):\n r\"\"\"`Places365 <http://places2.csail.mit.edu/index.html>`_ classification dataset.\n\n Args:\n root (string): Root directory of the Places365 dataset.\n split (string, optional): The dataset split. Can be one of ``train-standard`` (default), ``train-challendge``,\n ``val``.\n small (bool, optional): If ``True``, uses the small images, i. e. resized to 256 x 256 pixels, instead of the\n high resolution ones.\n download (bool, optional): If ``True``, downloads the dataset components and places them in ``root``. Already\n downloaded archives are not downloaded again.\n transform (callable, optional): A function/transform that takes in an PIL image\n and returns a transformed version. E.g, ``transforms.RandomCrop``\n target_transform (callable, optional): A function/transform that takes in the\n target and transforms it.\n loader (callable, optional): A function to load an image given its path.\n\n Attributes:\n classes (list): List of the class names.\n class_to_idx (dict): Dict with items (class_name, class_index).\n imgs (list): List of (image path, class_index) tuples\n targets (list): The class_index value for each image in the dataset\n\n Raises:\n RuntimeError: If ``download is False`` and the meta files, i. e. the devkit, are not present or corrupted.\n RuntimeError: If ``download is True`` and the image archive is already extracted.\n \"\"\"\n _SPLITS = (\"train-standard\", \"train-challenge\", \"val\")\n _BASE_URL = \"http://data.csail.mit.edu/places/places365/\"\n # {variant: (archive, md5)}\n _DEVKIT_META = {\n \"standard\": (\"filelist_places365-standard.tar\", \"35a0585fee1fa656440f3ab298f8479c\"),\n \"challenge\": (\"filelist_places365-challenge.tar\", \"70a8307e459c3de41690a7c76c931734\"),\n }\n # (file, md5)\n _CATEGORIES_META = (\"categories_places365.txt\", \"06c963b85866bd0649f97cb43dd16673\")\n # {split: (file, md5)}\n _FILE_LIST_META = {\n \"train-standard\": (\"places365_train_standard.txt\", \"30f37515461640559006b8329efbed1a\"),\n \"train-challenge\": (\"places365_train_challenge.txt\", \"b2931dc997b8c33c27e7329c073a6b57\"),\n \"val\": (\"places365_val.txt\", \"e9f2fd57bfd9d07630173f4e8708e4b1\"),\n }\n # {(split, small): (file, md5)}\n _IMAGES_META = {\n (\"train-standard\", False): (\"train_large_places365standard.tar\", \"67e186b496a84c929568076ed01a8aa1\"),\n (\"train-challenge\", False): (\"train_large_places365challenge.tar\", \"605f18e68e510c82b958664ea134545f\"),\n (\"val\", False): (\"val_large.tar\", \"9b71c4993ad89d2d8bcbdc4aef38042f\"),\n (\"train-standard\", True): (\"train_256_places365standard.tar\", \"53ca1c756c3d1e7809517cc47c5561c5\"),\n (\"train-challenge\", True): (\"train_256_places365challenge.tar\", \"741915038a5e3471ec7332404dfb64ef\"),\n (\"val\", True): (\"val_256.tar\", \"e27b17d8d44f4af9a78502beb927f808\"),\n }\n\n def __init__(\n self,\n root: str,\n split: str = \"train-standard\",\n small: bool = False,\n download: bool = False,\n transform: Optional[Callable] = None,\n target_transform: Optional[Callable] = None,\n loader: Callable[[str], Any] = default_loader,\n ) -> None:\n super().__init__(root, transform=transform, target_transform=target_transform)\n\n self.split = self._verify_split(split)\n self.small = small\n self.loader = loader\n\n self.classes, self.class_to_idx = self.load_categories(download)\n self.imgs, self.targets = self.load_file_list(download)\n\n if download:\n self.download_images()\n\n def __getitem__(self, index: int) -> Tuple[Any, Any]:\n file, target = self.imgs[index]\n image = self.loader(file)\n\n if self.transforms is not None:\n image, target = self.transforms(image, target)\n\n return image, target\n\n def __len__(self) -> int:\n return len(self.imgs)\n\n @property\n def variant(self) -> str:\n return \"challenge\" if \"challenge\" in self.split else \"standard\"\n\n @property\n def images_dir(self) -> str:\n size = \"256\" if self.small else \"large\"\n if self.split.startswith(\"train\"):\n dir = f\"data_{size}_{self.variant}\"\n else:\n dir = f\"{self.split}_{size}\"\n return path.join(self.root, dir)\n\n def load_categories(self, download: bool = True) -> Tuple[List[str], Dict[str, int]]:\n def process(line: str) -> Tuple[str, int]:\n cls, idx = line.split()\n return cls, int(idx)\n\n file, md5 = self._CATEGORIES_META\n file = path.join(self.root, file)\n if not self._check_integrity(file, md5, download):\n self.download_devkit()\n\n with open(file, \"r\") as fh:\n class_to_idx = dict(process(line) for line in fh)\n\n return sorted(class_to_idx.keys()), class_to_idx\n\n def load_file_list(self, download: bool = True) -> Tuple[List[Tuple[str, int]], List[int]]:\n def process(line: str, sep=\"/\") -> Tuple[str, int]:\n image, idx = line.split()\n return path.join(self.images_dir, image.lstrip(sep).replace(sep, os.sep)), int(idx)\n\n file, md5 = self._FILE_LIST_META[self.split]\n file = path.join(self.root, file)\n if not self._check_integrity(file, md5, download):\n self.download_devkit()\n\n with open(file, \"r\") as fh:\n images = [process(line) for line in fh]\n\n _, targets = zip(*images)\n return images, list(targets)\n\n def download_devkit(self) -> None:\n file, md5 = self._DEVKIT_META[self.variant]\n download_and_extract_archive(urljoin(self._BASE_URL, file), self.root, md5=md5)\n\n def download_images(self) -> None:\n if path.exists(self.images_dir):\n raise RuntimeError(\n f\"The directory {self.images_dir} already exists. If you want to re-download or re-extract the images, \"\n f\"delete the directory.\"\n )\n\n file, md5 = self._IMAGES_META[(self.split, self.small)]\n download_and_extract_archive(urljoin(self._BASE_URL, file), self.root, md5=md5)\n\n if self.split.startswith(\"train\"):\n os.rename(self.images_dir.rsplit(\"_\", 1)[0], self.images_dir)\n\n def extra_repr(self) -> str:\n return \"\\n\".join((\"Split: {split}\", \"Small: {small}\")).format(**self.__dict__)\n\n def _verify_split(self, split: str) -> str:\n return verify_str_arg(split, \"split\", self._SPLITS)\n\n def _check_integrity(self, file: str, md5: str, download: bool) -> bool:\n integrity = check_integrity(file, md5=md5)\n if not integrity and not download:\n raise RuntimeError(\n f\"The file {file} does not exist or is corrupted. You can set download=True to download it.\"\n )\n return integrity\n", "path": "torchvision/datasets/places365.py"}]} | 2,787 | 181 |
gh_patches_debug_18381 | rasdani/github-patches | git_diff | svthalia__concrexit-3155 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ThaliaPay batch push throttled by Moneybird
Whenever we process a ThaliaPay batch, we are throttled by Moneybird on the amount of API requests we make.
This is most likely due to saving the bank accounts which are pushed to moneybird when the post_save signal is triggered.
This shouldn't be triggered when the last_used field of the bank account is updated, but it is.
Potential fix:
Make sure that updated_fields is passed (correctly) with the post_save signal for bank accounts.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `website/payments/services.py`
Content:
```
1 """The services defined by the payments package."""
2 import datetime
3 from typing import Union
4
5 from django.conf import settings
6 from django.core import mail
7 from django.db.models import Model, Q, QuerySet, Sum
8 from django.urls import reverse
9 from django.utils import timezone
10 from django.utils.translation import gettext_lazy as _
11
12 from members.models import Member
13 from utils.snippets import send_email
14
15 from .exceptions import PaymentError
16 from .models import BankAccount, Payment, PaymentUser
17 from .payables import Payable, payables
18 from .signals import processed_batch
19
20
21 def create_payment(
22 model_payable: Union[Model, Payable],
23 processed_by: Member,
24 pay_type: Union[Payment.CASH, Payment.CARD, Payment.WIRE, Payment.TPAY],
25 ) -> Payment:
26 """Create a new payment from a payable object.
27
28 :param model_payable: Payable or Model object
29 :param processed_by: PaymentUser that processed this payment
30 :param pay_type: Payment type
31 :return: Payment object
32 """
33 if pay_type not in (Payment.CASH, Payment.CARD, Payment.WIRE, Payment.TPAY):
34 raise PaymentError("Invalid payment type")
35
36 if isinstance(model_payable, Payable):
37 payable = model_payable
38 else:
39 payable = payables.get_payable(model_payable)
40
41 payer = (
42 PaymentUser.objects.get(pk=payable.payment_payer.pk)
43 if payable.payment_payer
44 else None
45 )
46
47 if not (
48 (payer and payer == processed_by and pay_type == Payment.TPAY)
49 or (payable.can_manage_payment(processed_by) and pay_type != Payment.TPAY)
50 ):
51 raise PaymentError(
52 _("User processing payment does not have the right permissions")
53 )
54
55 if payable.payment_amount == 0:
56 raise PaymentError(_("Payment amount 0 is not accepted"))
57
58 if pay_type == Payment.TPAY and not payer.tpay_enabled:
59 raise PaymentError(_("This user does not have Thalia Pay enabled"))
60
61 if not payable.paying_allowed:
62 raise PaymentError(_("Payment restricted"))
63
64 if payable.payment is not None:
65 payable.payment.amount = payable.payment_amount
66 payable.payment.notes = payable.payment_notes
67 payable.payment.topic = payable.payment_topic
68 payable.payment.paid_by = payer
69 payable.payment.processed_by = processed_by
70 payable.payment.type = pay_type
71 payable.payment.save()
72 else:
73 payable.payment = Payment.objects.create(
74 processed_by=processed_by,
75 amount=payable.payment_amount,
76 notes=payable.payment_notes,
77 topic=payable.payment_topic,
78 paid_by=payer,
79 type=pay_type,
80 )
81 return payable.payment
82
83
84 def delete_payment(model: Model, member: Member = None, ignore_change_window=False):
85 """Remove a payment from a payable object.
86
87 :param model: Payable or Model object
88 :param member: member deleting the payment
89 :param ignore_change_window: ignore the payment change window
90 :return:
91 """
92 payable = payables.get_payable(model)
93
94 if member and not payable.can_manage_payment(member):
95 raise PaymentError(
96 _("User deleting payment does not have the right permissions.")
97 )
98
99 payment = payable.payment
100 if (
101 payment.created_at
102 < timezone.now() - timezone.timedelta(seconds=settings.PAYMENT_CHANGE_WINDOW)
103 and not ignore_change_window
104 ):
105 raise PaymentError(_("This payment cannot be deleted anymore."))
106 if payment.batch and payment.batch.processed:
107 raise PaymentError(
108 _("This payment has already been processed and hence cannot be deleted.")
109 )
110
111 payable.payment = None
112 payable.model.save()
113 payment.delete()
114
115
116 def update_last_used(queryset: QuerySet, date: datetime.date = None) -> int:
117 """Update the last used field of a BankAccount queryset.
118
119 :param queryset: Queryset of BankAccounts
120 :param date: date to set last_used to
121 :return: number of affected rows
122 """
123 if not date:
124 date = timezone.now().date()
125
126 result = queryset.filter(
127 (Q(valid_from__gte=timezone.now()) & Q(valid_until__lt=timezone.now()))
128 | Q(valid_until=None)
129 ).update(last_used=date)
130 return result
131
132
133 def revoke_old_mandates() -> int:
134 """Revoke all mandates that have not been used for 36 months or more.
135
136 :return: number of affected rows
137 """
138 return BankAccount.objects.filter(
139 last_used__lte=(timezone.now() - timezone.timedelta(days=36 * 30))
140 ).update(valid_until=timezone.now().date())
141
142
143 def process_batch(batch):
144 """Process a Thalia Pay batch.
145
146 :param batch: the batch to be processed
147 :return:
148 """
149 batch.processed = True
150
151 payments = batch.payments_set.select_related("paid_by")
152 for payment in payments:
153 bank_account = payment.paid_by.bank_accounts.last()
154 if not bank_account: # pragma: no cover
155 # This should not happen, cannot haver, does not happen (right... ;p), but if it does, we don't want to crash, but just remove the payment from the batch (make it unprocessed)
156 payment.batch = None
157 payment.save()
158 else:
159 bank_account.last_used = batch.withdrawal_date
160 bank_account.save()
161
162 batch.save()
163 processed_batch.send(sender=None, instance=batch)
164
165 send_tpay_batch_processing_emails(batch)
166
167
168 def derive_next_mandate_no(member) -> str:
169 accounts = (
170 BankAccount.objects.filter(owner=PaymentUser.objects.get(pk=member.pk))
171 .exclude(mandate_no=None)
172 .filter(mandate_no__regex=BankAccount.MANDATE_NO_DEFAULT_REGEX)
173 )
174 new_mandate_no = 1 + max(
175 (int(account.mandate_no.split("-")[1]) for account in accounts), default=0
176 )
177 return f"{member.pk}-{new_mandate_no}"
178
179
180 def send_tpay_batch_processing_emails(batch):
181 """Send withdrawal notice emails to all members in a batch."""
182 member_payments = batch.payments_set.values("paid_by").annotate(total=Sum("amount"))
183 with mail.get_connection() as connection:
184 for member_row in member_payments:
185 member = PaymentUser.objects.get(pk=member_row["paid_by"])
186 total_amount = member_row["total"]
187
188 send_email(
189 to=[member.email],
190 subject="Thalia Pay withdrawal notice",
191 txt_template="payments/email/tpay_withdrawal_notice_mail.txt",
192 html_template="payments/email/tpay_withdrawal_notice_mail.html",
193 connection=connection,
194 context={
195 "name": member.get_full_name(),
196 "batch": batch,
197 "bank_account": member.bank_accounts.filter(
198 mandate_no__isnull=False
199 ).last(),
200 "creditor_id": settings.SEPA_CREDITOR_ID,
201 "payments": batch.payments_set.filter(paid_by=member),
202 "total_amount": total_amount,
203 "payments_url": (
204 settings.BASE_URL
205 + reverse(
206 "payments:payment-list",
207 )
208 ),
209 },
210 )
211 return len(member_payments)
212
213
214 def execute_data_minimisation(dry_run=False):
215 """Anonymizes payments older than 7 years."""
216 # Sometimes years are 366 days of course, but better delete 1 or 2 days early than late
217 payment_deletion_period = timezone.now().date() - timezone.timedelta(days=365 * 7)
218 bankaccount_deletion_period = timezone.now() - datetime.timedelta(days=31 * 13)
219
220 queryset_payments = Payment.objects.filter(
221 created_at__lte=payment_deletion_period
222 ).exclude(paid_by__isnull=True)
223
224 # Delete bank accounts that are not valid anymore, and have not been used in the last 13 months
225 # (13 months is the required time we need to keep the mandates for)
226 queryset_bankaccounts = BankAccount.objects.all()
227 queryset_bankaccounts = queryset_bankaccounts.filter(
228 valid_until__lt=timezone.now()
229 ) # We must always keep valid bank accounts. so we only select the ones that are not valid anymore (valid_until < now)
230 queryset_bankaccounts = queryset_bankaccounts.exclude( # Also keep bank accounts that
231 Q(
232 owner__paid_payment_set__type=Payment.TPAY
233 ), # are used for Thalia Pay payments, AND
234 Q(
235 owner__paid_payment_set__batch__isnull=True
236 ) # have a payment that is in no batch, OR
237 | Q(
238 owner__paid_payment_set__batch__processed=False
239 ) # have an unprocessed batch, OR
240 | Q(
241 owner__paid_payment_set__batch__processing_date__gt=bankaccount_deletion_period # or have a processed batch that is not older than 13 months
242 ),
243 )
244
245 if not dry_run:
246 queryset_payments.update(paid_by=None, processed_by=None)
247 queryset_bankaccounts.delete()
248 return queryset_payments
249
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/website/payments/services.py b/website/payments/services.py
--- a/website/payments/services.py
+++ b/website/payments/services.py
@@ -152,12 +152,13 @@
for payment in payments:
bank_account = payment.paid_by.bank_accounts.last()
if not bank_account: # pragma: no cover
- # This should not happen, cannot haver, does not happen (right... ;p), but if it does, we don't want to crash, but just remove the payment from the batch (make it unprocessed)
+ # This should not happen, cannot happen, does not happen (right... ;p)
+ # but if it does, we don't want to crash, but just remove the payment from the batch (make it unprocessed)
payment.batch = None
payment.save()
else:
bank_account.last_used = batch.withdrawal_date
- bank_account.save()
+ bank_account.save(update_fields=["last_used"])
batch.save()
processed_batch.send(sender=None, instance=batch)
| {"golden_diff": "diff --git a/website/payments/services.py b/website/payments/services.py\n--- a/website/payments/services.py\n+++ b/website/payments/services.py\n@@ -152,12 +152,13 @@\n for payment in payments:\n bank_account = payment.paid_by.bank_accounts.last()\n if not bank_account: # pragma: no cover\n- # This should not happen, cannot haver, does not happen (right... ;p), but if it does, we don't want to crash, but just remove the payment from the batch (make it unprocessed)\n+ # This should not happen, cannot happen, does not happen (right... ;p)\n+ # but if it does, we don't want to crash, but just remove the payment from the batch (make it unprocessed)\n payment.batch = None\n payment.save()\n else:\n bank_account.last_used = batch.withdrawal_date\n- bank_account.save()\n+ bank_account.save(update_fields=[\"last_used\"])\n \n batch.save()\n processed_batch.send(sender=None, instance=batch)\n", "issue": "ThaliaPay batch push throttled by Moneybird\nWhenever we process a ThaliaPay batch, we are throttled by Moneybird on the amount of API requests we make.\r\nThis is most likely due to saving the bank accounts which are pushed to moneybird when the post_save signal is triggered.\r\nThis shouldn't be triggered when the last_used field of the bank account is updated, but it is.\r\n\r\nPotential fix:\r\nMake sure that updated_fields is passed (correctly) with the post_save signal for bank accounts.\n", "before_files": [{"content": "\"\"\"The services defined by the payments package.\"\"\"\nimport datetime\nfrom typing import Union\n\nfrom django.conf import settings\nfrom django.core import mail\nfrom django.db.models import Model, Q, QuerySet, Sum\nfrom django.urls import reverse\nfrom django.utils import timezone\nfrom django.utils.translation import gettext_lazy as _\n\nfrom members.models import Member\nfrom utils.snippets import send_email\n\nfrom .exceptions import PaymentError\nfrom .models import BankAccount, Payment, PaymentUser\nfrom .payables import Payable, payables\nfrom .signals import processed_batch\n\n\ndef create_payment(\n model_payable: Union[Model, Payable],\n processed_by: Member,\n pay_type: Union[Payment.CASH, Payment.CARD, Payment.WIRE, Payment.TPAY],\n) -> Payment:\n \"\"\"Create a new payment from a payable object.\n\n :param model_payable: Payable or Model object\n :param processed_by: PaymentUser that processed this payment\n :param pay_type: Payment type\n :return: Payment object\n \"\"\"\n if pay_type not in (Payment.CASH, Payment.CARD, Payment.WIRE, Payment.TPAY):\n raise PaymentError(\"Invalid payment type\")\n\n if isinstance(model_payable, Payable):\n payable = model_payable\n else:\n payable = payables.get_payable(model_payable)\n\n payer = (\n PaymentUser.objects.get(pk=payable.payment_payer.pk)\n if payable.payment_payer\n else None\n )\n\n if not (\n (payer and payer == processed_by and pay_type == Payment.TPAY)\n or (payable.can_manage_payment(processed_by) and pay_type != Payment.TPAY)\n ):\n raise PaymentError(\n _(\"User processing payment does not have the right permissions\")\n )\n\n if payable.payment_amount == 0:\n raise PaymentError(_(\"Payment amount 0 is not accepted\"))\n\n if pay_type == Payment.TPAY and not payer.tpay_enabled:\n raise PaymentError(_(\"This user does not have Thalia Pay enabled\"))\n\n if not payable.paying_allowed:\n raise PaymentError(_(\"Payment restricted\"))\n\n if payable.payment is not None:\n payable.payment.amount = payable.payment_amount\n payable.payment.notes = payable.payment_notes\n payable.payment.topic = payable.payment_topic\n payable.payment.paid_by = payer\n payable.payment.processed_by = processed_by\n payable.payment.type = pay_type\n payable.payment.save()\n else:\n payable.payment = Payment.objects.create(\n processed_by=processed_by,\n amount=payable.payment_amount,\n notes=payable.payment_notes,\n topic=payable.payment_topic,\n paid_by=payer,\n type=pay_type,\n )\n return payable.payment\n\n\ndef delete_payment(model: Model, member: Member = None, ignore_change_window=False):\n \"\"\"Remove a payment from a payable object.\n\n :param model: Payable or Model object\n :param member: member deleting the payment\n :param ignore_change_window: ignore the payment change window\n :return:\n \"\"\"\n payable = payables.get_payable(model)\n\n if member and not payable.can_manage_payment(member):\n raise PaymentError(\n _(\"User deleting payment does not have the right permissions.\")\n )\n\n payment = payable.payment\n if (\n payment.created_at\n < timezone.now() - timezone.timedelta(seconds=settings.PAYMENT_CHANGE_WINDOW)\n and not ignore_change_window\n ):\n raise PaymentError(_(\"This payment cannot be deleted anymore.\"))\n if payment.batch and payment.batch.processed:\n raise PaymentError(\n _(\"This payment has already been processed and hence cannot be deleted.\")\n )\n\n payable.payment = None\n payable.model.save()\n payment.delete()\n\n\ndef update_last_used(queryset: QuerySet, date: datetime.date = None) -> int:\n \"\"\"Update the last used field of a BankAccount queryset.\n\n :param queryset: Queryset of BankAccounts\n :param date: date to set last_used to\n :return: number of affected rows\n \"\"\"\n if not date:\n date = timezone.now().date()\n\n result = queryset.filter(\n (Q(valid_from__gte=timezone.now()) & Q(valid_until__lt=timezone.now()))\n | Q(valid_until=None)\n ).update(last_used=date)\n return result\n\n\ndef revoke_old_mandates() -> int:\n \"\"\"Revoke all mandates that have not been used for 36 months or more.\n\n :return: number of affected rows\n \"\"\"\n return BankAccount.objects.filter(\n last_used__lte=(timezone.now() - timezone.timedelta(days=36 * 30))\n ).update(valid_until=timezone.now().date())\n\n\ndef process_batch(batch):\n \"\"\"Process a Thalia Pay batch.\n\n :param batch: the batch to be processed\n :return:\n \"\"\"\n batch.processed = True\n\n payments = batch.payments_set.select_related(\"paid_by\")\n for payment in payments:\n bank_account = payment.paid_by.bank_accounts.last()\n if not bank_account: # pragma: no cover\n # This should not happen, cannot haver, does not happen (right... ;p), but if it does, we don't want to crash, but just remove the payment from the batch (make it unprocessed)\n payment.batch = None\n payment.save()\n else:\n bank_account.last_used = batch.withdrawal_date\n bank_account.save()\n\n batch.save()\n processed_batch.send(sender=None, instance=batch)\n\n send_tpay_batch_processing_emails(batch)\n\n\ndef derive_next_mandate_no(member) -> str:\n accounts = (\n BankAccount.objects.filter(owner=PaymentUser.objects.get(pk=member.pk))\n .exclude(mandate_no=None)\n .filter(mandate_no__regex=BankAccount.MANDATE_NO_DEFAULT_REGEX)\n )\n new_mandate_no = 1 + max(\n (int(account.mandate_no.split(\"-\")[1]) for account in accounts), default=0\n )\n return f\"{member.pk}-{new_mandate_no}\"\n\n\ndef send_tpay_batch_processing_emails(batch):\n \"\"\"Send withdrawal notice emails to all members in a batch.\"\"\"\n member_payments = batch.payments_set.values(\"paid_by\").annotate(total=Sum(\"amount\"))\n with mail.get_connection() as connection:\n for member_row in member_payments:\n member = PaymentUser.objects.get(pk=member_row[\"paid_by\"])\n total_amount = member_row[\"total\"]\n\n send_email(\n to=[member.email],\n subject=\"Thalia Pay withdrawal notice\",\n txt_template=\"payments/email/tpay_withdrawal_notice_mail.txt\",\n html_template=\"payments/email/tpay_withdrawal_notice_mail.html\",\n connection=connection,\n context={\n \"name\": member.get_full_name(),\n \"batch\": batch,\n \"bank_account\": member.bank_accounts.filter(\n mandate_no__isnull=False\n ).last(),\n \"creditor_id\": settings.SEPA_CREDITOR_ID,\n \"payments\": batch.payments_set.filter(paid_by=member),\n \"total_amount\": total_amount,\n \"payments_url\": (\n settings.BASE_URL\n + reverse(\n \"payments:payment-list\",\n )\n ),\n },\n )\n return len(member_payments)\n\n\ndef execute_data_minimisation(dry_run=False):\n \"\"\"Anonymizes payments older than 7 years.\"\"\"\n # Sometimes years are 366 days of course, but better delete 1 or 2 days early than late\n payment_deletion_period = timezone.now().date() - timezone.timedelta(days=365 * 7)\n bankaccount_deletion_period = timezone.now() - datetime.timedelta(days=31 * 13)\n\n queryset_payments = Payment.objects.filter(\n created_at__lte=payment_deletion_period\n ).exclude(paid_by__isnull=True)\n\n # Delete bank accounts that are not valid anymore, and have not been used in the last 13 months\n # (13 months is the required time we need to keep the mandates for)\n queryset_bankaccounts = BankAccount.objects.all()\n queryset_bankaccounts = queryset_bankaccounts.filter(\n valid_until__lt=timezone.now()\n ) # We must always keep valid bank accounts. so we only select the ones that are not valid anymore (valid_until < now)\n queryset_bankaccounts = queryset_bankaccounts.exclude( # Also keep bank accounts that\n Q(\n owner__paid_payment_set__type=Payment.TPAY\n ), # are used for Thalia Pay payments, AND\n Q(\n owner__paid_payment_set__batch__isnull=True\n ) # have a payment that is in no batch, OR\n | Q(\n owner__paid_payment_set__batch__processed=False\n ) # have an unprocessed batch, OR\n | Q(\n owner__paid_payment_set__batch__processing_date__gt=bankaccount_deletion_period # or have a processed batch that is not older than 13 months\n ),\n )\n\n if not dry_run:\n queryset_payments.update(paid_by=None, processed_by=None)\n queryset_bankaccounts.delete()\n return queryset_payments\n", "path": "website/payments/services.py"}], "after_files": [{"content": "\"\"\"The services defined by the payments package.\"\"\"\nimport datetime\nfrom typing import Union\n\nfrom django.conf import settings\nfrom django.core import mail\nfrom django.db.models import Model, Q, QuerySet, Sum\nfrom django.urls import reverse\nfrom django.utils import timezone\nfrom django.utils.translation import gettext_lazy as _\n\nfrom members.models import Member\nfrom utils.snippets import send_email\n\nfrom .exceptions import PaymentError\nfrom .models import BankAccount, Payment, PaymentUser\nfrom .payables import Payable, payables\nfrom .signals import processed_batch\n\n\ndef create_payment(\n model_payable: Union[Model, Payable],\n processed_by: Member,\n pay_type: Union[Payment.CASH, Payment.CARD, Payment.WIRE, Payment.TPAY],\n) -> Payment:\n \"\"\"Create a new payment from a payable object.\n\n :param model_payable: Payable or Model object\n :param processed_by: PaymentUser that processed this payment\n :param pay_type: Payment type\n :return: Payment object\n \"\"\"\n if pay_type not in (Payment.CASH, Payment.CARD, Payment.WIRE, Payment.TPAY):\n raise PaymentError(\"Invalid payment type\")\n\n if isinstance(model_payable, Payable):\n payable = model_payable\n else:\n payable = payables.get_payable(model_payable)\n\n payer = (\n PaymentUser.objects.get(pk=payable.payment_payer.pk)\n if payable.payment_payer\n else None\n )\n\n if not (\n (payer and payer == processed_by and pay_type == Payment.TPAY)\n or (payable.can_manage_payment(processed_by) and pay_type != Payment.TPAY)\n ):\n raise PaymentError(\n _(\"User processing payment does not have the right permissions\")\n )\n\n if payable.payment_amount == 0:\n raise PaymentError(_(\"Payment amount 0 is not accepted\"))\n\n if pay_type == Payment.TPAY and not payer.tpay_enabled:\n raise PaymentError(_(\"This user does not have Thalia Pay enabled\"))\n\n if not payable.paying_allowed:\n raise PaymentError(_(\"Payment restricted\"))\n\n if payable.payment is not None:\n payable.payment.amount = payable.payment_amount\n payable.payment.notes = payable.payment_notes\n payable.payment.topic = payable.payment_topic\n payable.payment.paid_by = payer\n payable.payment.processed_by = processed_by\n payable.payment.type = pay_type\n payable.payment.save()\n else:\n payable.payment = Payment.objects.create(\n processed_by=processed_by,\n amount=payable.payment_amount,\n notes=payable.payment_notes,\n topic=payable.payment_topic,\n paid_by=payer,\n type=pay_type,\n )\n return payable.payment\n\n\ndef delete_payment(model: Model, member: Member = None, ignore_change_window=False):\n \"\"\"Remove a payment from a payable object.\n\n :param model: Payable or Model object\n :param member: member deleting the payment\n :param ignore_change_window: ignore the payment change window\n :return:\n \"\"\"\n payable = payables.get_payable(model)\n\n if member and not payable.can_manage_payment(member):\n raise PaymentError(\n _(\"User deleting payment does not have the right permissions.\")\n )\n\n payment = payable.payment\n if (\n payment.created_at\n < timezone.now() - timezone.timedelta(seconds=settings.PAYMENT_CHANGE_WINDOW)\n and not ignore_change_window\n ):\n raise PaymentError(_(\"This payment cannot be deleted anymore.\"))\n if payment.batch and payment.batch.processed:\n raise PaymentError(\n _(\"This payment has already been processed and hence cannot be deleted.\")\n )\n\n payable.payment = None\n payable.model.save()\n payment.delete()\n\n\ndef update_last_used(queryset: QuerySet, date: datetime.date = None) -> int:\n \"\"\"Update the last used field of a BankAccount queryset.\n\n :param queryset: Queryset of BankAccounts\n :param date: date to set last_used to\n :return: number of affected rows\n \"\"\"\n if not date:\n date = timezone.now().date()\n\n result = queryset.filter(\n (Q(valid_from__gte=timezone.now()) & Q(valid_until__lt=timezone.now()))\n | Q(valid_until=None)\n ).update(last_used=date)\n return result\n\n\ndef revoke_old_mandates() -> int:\n \"\"\"Revoke all mandates that have not been used for 36 months or more.\n\n :return: number of affected rows\n \"\"\"\n return BankAccount.objects.filter(\n last_used__lte=(timezone.now() - timezone.timedelta(days=36 * 30))\n ).update(valid_until=timezone.now().date())\n\n\ndef process_batch(batch):\n \"\"\"Process a Thalia Pay batch.\n\n :param batch: the batch to be processed\n :return:\n \"\"\"\n batch.processed = True\n\n payments = batch.payments_set.select_related(\"paid_by\")\n for payment in payments:\n bank_account = payment.paid_by.bank_accounts.last()\n if not bank_account: # pragma: no cover\n # This should not happen, cannot happen, does not happen (right... ;p)\n # but if it does, we don't want to crash, but just remove the payment from the batch (make it unprocessed)\n payment.batch = None\n payment.save()\n else:\n bank_account.last_used = batch.withdrawal_date\n bank_account.save(update_fields=[\"last_used\"])\n\n batch.save()\n processed_batch.send(sender=None, instance=batch)\n\n send_tpay_batch_processing_emails(batch)\n\n\ndef derive_next_mandate_no(member) -> str:\n accounts = (\n BankAccount.objects.filter(owner=PaymentUser.objects.get(pk=member.pk))\n .exclude(mandate_no=None)\n .filter(mandate_no__regex=BankAccount.MANDATE_NO_DEFAULT_REGEX)\n )\n new_mandate_no = 1 + max(\n (int(account.mandate_no.split(\"-\")[1]) for account in accounts), default=0\n )\n return f\"{member.pk}-{new_mandate_no}\"\n\n\ndef send_tpay_batch_processing_emails(batch):\n \"\"\"Send withdrawal notice emails to all members in a batch.\"\"\"\n member_payments = batch.payments_set.values(\"paid_by\").annotate(total=Sum(\"amount\"))\n with mail.get_connection() as connection:\n for member_row in member_payments:\n member = PaymentUser.objects.get(pk=member_row[\"paid_by\"])\n total_amount = member_row[\"total\"]\n\n send_email(\n to=[member.email],\n subject=\"Thalia Pay withdrawal notice\",\n txt_template=\"payments/email/tpay_withdrawal_notice_mail.txt\",\n html_template=\"payments/email/tpay_withdrawal_notice_mail.html\",\n connection=connection,\n context={\n \"name\": member.get_full_name(),\n \"batch\": batch,\n \"bank_account\": member.bank_accounts.filter(\n mandate_no__isnull=False\n ).last(),\n \"creditor_id\": settings.SEPA_CREDITOR_ID,\n \"payments\": batch.payments_set.filter(paid_by=member),\n \"total_amount\": total_amount,\n \"payments_url\": (\n settings.BASE_URL\n + reverse(\n \"payments:payment-list\",\n )\n ),\n },\n )\n return len(member_payments)\n\n\ndef execute_data_minimisation(dry_run=False):\n \"\"\"Anonymizes payments older than 7 years.\"\"\"\n # Sometimes years are 366 days of course, but better delete 1 or 2 days early than late\n payment_deletion_period = timezone.now().date() - timezone.timedelta(days=365 * 7)\n bankaccount_deletion_period = timezone.now() - datetime.timedelta(days=31 * 13)\n\n queryset_payments = Payment.objects.filter(\n created_at__lte=payment_deletion_period\n ).exclude(paid_by__isnull=True)\n\n # Delete bank accounts that are not valid anymore, and have not been used in the last 13 months\n # (13 months is the required time we need to keep the mandates for)\n queryset_bankaccounts = BankAccount.objects.all()\n queryset_bankaccounts = queryset_bankaccounts.filter(\n valid_until__lt=timezone.now()\n ) # We must always keep valid bank accounts. so we only select the ones that are not valid anymore (valid_until < now)\n queryset_bankaccounts = queryset_bankaccounts.exclude( # Also keep bank accounts that\n Q(\n owner__paid_payment_set__type=Payment.TPAY\n ), # are used for Thalia Pay payments, AND\n Q(\n owner__paid_payment_set__batch__isnull=True\n ) # have a payment that is in no batch, OR\n | Q(\n owner__paid_payment_set__batch__processed=False\n ) # have an unprocessed batch, OR\n | Q(\n owner__paid_payment_set__batch__processing_date__gt=bankaccount_deletion_period # or have a processed batch that is not older than 13 months\n ),\n )\n\n if not dry_run:\n queryset_payments.update(paid_by=None, processed_by=None)\n queryset_bankaccounts.delete()\n return queryset_payments\n", "path": "website/payments/services.py"}]} | 2,973 | 236 |
gh_patches_debug_37129 | rasdani/github-patches | git_diff | streamlink__streamlink-2912 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[bug] CDNBG - multiple issues
## Bug Report
1. BITelevision should be removed from the plugin and/or wiki/info pages, as it no longer exists.
2. Inlife.bg shouldn't be listed as supported under CDNbg.
3. Tvbulgare.bg should be listed as supported in inlife.bg's place (latter shares the former's stream)
4. Mu-vi.tv gives an error.
5. CDNBG should cover VTK - the national military channel
6. Kanal3's livestream is not found.
7. CDNBG should cover Cherno More - the regional channel for Varna, Bulgaria.
### Reproduction steps / Explicit stream URLs to test
1. https://bitelevision.com/ is not a thing anymore.
2. Inlife.bg can't be opened and shouldn't be listed - it is a 'media partner' that restreams https://tvbulgare.bg/, which could be put in as a replacement for it.
3. https://tvbulgare.bg/ - No playable streams found.
4. http://mu-vi.tv/LiveStreams/pages/Live.aspx - Error: Unable to open URL.
5. https://www.armymedia.bg/
6. https://kanal3.bg/live
7. https://www.chernomore.bg/
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/streamlink/plugins/cdnbg.py`
Content:
```
1 import logging
2 import re
3
4 from streamlink.compat import urlparse
5 from streamlink.plugin import Plugin
6 from streamlink.plugin.api import useragents
7 from streamlink.plugin.api import validate
8 from streamlink.stream import HLSStream
9 from streamlink.utils import update_scheme
10
11 log = logging.getLogger(__name__)
12
13
14 class CDNBG(Plugin):
15 url_re = re.compile(r"""
16 https?://(?:www\.)?(?:
17 tv\.bnt\.bg/\w+(?:/\w+)?|
18 bitelevision\.com/live|
19 nova\.bg/live|
20 kanal3\.bg/live|
21 bgonair\.bg/tvonline|
22 inlife\.bg|
23 mmtvmusic\.com/live|
24 mu-vi\.tv/LiveStreams/pages/Live\.aspx|
25 videochanel\.bstv\.bg|
26 live\.bstv\.bg|
27 bloombergtv.bg/video
28 )/?
29 """, re.VERBOSE)
30 iframe_re = re.compile(r"iframe .*?src=\"((?:https?(?::|:))?//(?:\w+\.)?cdn.bg/live[^\"]+)\"", re.DOTALL)
31 sdata_re = re.compile(r"sdata\.src.*?=.*?(?P<q>[\"'])(?P<url>http.*?)(?P=q)")
32 hls_file_re = re.compile(r"(src|file): (?P<q>[\"'])(?P<url>(https?:)?//.+?m3u8.*?)(?P=q)")
33 hls_src_re = re.compile(r"video src=(?P<url>http[^ ]+m3u8[^ ]*)")
34
35 stream_schema = validate.Schema(
36 validate.any(
37 validate.all(validate.transform(sdata_re.search), validate.get("url")),
38 validate.all(validate.transform(hls_file_re.search), validate.get("url")),
39 validate.all(validate.transform(hls_src_re.search), validate.get("url")),
40 )
41 )
42
43 @classmethod
44 def can_handle_url(cls, url):
45 return cls.url_re.match(url) is not None
46
47 def find_iframe(self, res):
48 p = urlparse(self.url)
49 for url in self.iframe_re.findall(res.text):
50 if "googletagmanager" not in url:
51 url = url.replace(":", ":")
52 if url.startswith("//"):
53 return "{0}:{1}".format(p.scheme, url)
54 else:
55 return url
56
57 def _get_streams(self):
58 self.session.http.headers.update({"User-Agent": useragents.CHROME})
59 res = self.session.http.get(self.url)
60 iframe_url = self.find_iframe(res)
61
62 if iframe_url:
63 log.debug("Found iframe: {0}", iframe_url)
64 res = self.session.http.get(iframe_url, headers={"Referer": self.url})
65 stream_url = update_scheme(self.url, self.stream_schema.validate(res.text))
66 log.warning("SSL Verification disabled.")
67 return HLSStream.parse_variant_playlist(self.session,
68 stream_url,
69 verify=False)
70
71
72 __plugin__ = CDNBG
73
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/streamlink/plugins/cdnbg.py b/src/streamlink/plugins/cdnbg.py
--- a/src/streamlink/plugins/cdnbg.py
+++ b/src/streamlink/plugins/cdnbg.py
@@ -15,16 +15,15 @@
url_re = re.compile(r"""
https?://(?:www\.)?(?:
tv\.bnt\.bg/\w+(?:/\w+)?|
- bitelevision\.com/live|
nova\.bg/live|
- kanal3\.bg/live|
bgonair\.bg/tvonline|
- inlife\.bg|
mmtvmusic\.com/live|
mu-vi\.tv/LiveStreams/pages/Live\.aspx|
- videochanel\.bstv\.bg|
live\.bstv\.bg|
- bloombergtv.bg/video
+ bloombergtv.bg/video|
+ armymedia.bg|
+ chernomore.bg|
+ i.cdn.bg/live/
)/?
""", re.VERBOSE)
iframe_re = re.compile(r"iframe .*?src=\"((?:https?(?::|:))?//(?:\w+\.)?cdn.bg/live[^\"]+)\"", re.DOTALL)
@@ -44,23 +43,26 @@
def can_handle_url(cls, url):
return cls.url_re.match(url) is not None
- def find_iframe(self, res):
- p = urlparse(self.url)
- for url in self.iframe_re.findall(res.text):
- if "googletagmanager" not in url:
- url = url.replace(":", ":")
- if url.startswith("//"):
- return "{0}:{1}".format(p.scheme, url)
+ def find_iframe(self, url):
+ self.session.http.headers.update({"User-Agent": useragents.CHROME})
+ res = self.session.http.get(self.url)
+ p = urlparse(url)
+ for iframe_url in self.iframe_re.findall(res.text):
+ if "googletagmanager" not in iframe_url:
+ log.debug("Found iframe: {0}", iframe_url)
+ iframe_url = iframe_url.replace(":", ":")
+ if iframe_url.startswith("//"):
+ return "{0}:{1}".format(p.scheme, iframe_url)
else:
- return url
+ return iframe_url
def _get_streams(self):
- self.session.http.headers.update({"User-Agent": useragents.CHROME})
- res = self.session.http.get(self.url)
- iframe_url = self.find_iframe(res)
+ if "i.cdn.bg/live/" in self.url:
+ iframe_url = self.url
+ else:
+ iframe_url = self.find_iframe(self.url)
if iframe_url:
- log.debug("Found iframe: {0}", iframe_url)
res = self.session.http.get(iframe_url, headers={"Referer": self.url})
stream_url = update_scheme(self.url, self.stream_schema.validate(res.text))
log.warning("SSL Verification disabled.")
| {"golden_diff": "diff --git a/src/streamlink/plugins/cdnbg.py b/src/streamlink/plugins/cdnbg.py\n--- a/src/streamlink/plugins/cdnbg.py\n+++ b/src/streamlink/plugins/cdnbg.py\n@@ -15,16 +15,15 @@\n url_re = re.compile(r\"\"\"\n https?://(?:www\\.)?(?:\n tv\\.bnt\\.bg/\\w+(?:/\\w+)?|\n- bitelevision\\.com/live|\n nova\\.bg/live|\n- kanal3\\.bg/live|\n bgonair\\.bg/tvonline|\n- inlife\\.bg|\n mmtvmusic\\.com/live|\n mu-vi\\.tv/LiveStreams/pages/Live\\.aspx|\n- videochanel\\.bstv\\.bg|\n live\\.bstv\\.bg|\n- bloombergtv.bg/video\n+ bloombergtv.bg/video|\n+ armymedia.bg|\n+ chernomore.bg|\n+ i.cdn.bg/live/\n )/?\n \"\"\", re.VERBOSE)\n iframe_re = re.compile(r\"iframe .*?src=\\\"((?:https?(?::|:))?//(?:\\w+\\.)?cdn.bg/live[^\\\"]+)\\\"\", re.DOTALL)\n@@ -44,23 +43,26 @@\n def can_handle_url(cls, url):\n return cls.url_re.match(url) is not None\n \n- def find_iframe(self, res):\n- p = urlparse(self.url)\n- for url in self.iframe_re.findall(res.text):\n- if \"googletagmanager\" not in url:\n- url = url.replace(\":\", \":\")\n- if url.startswith(\"//\"):\n- return \"{0}:{1}\".format(p.scheme, url)\n+ def find_iframe(self, url):\n+ self.session.http.headers.update({\"User-Agent\": useragents.CHROME})\n+ res = self.session.http.get(self.url)\n+ p = urlparse(url)\n+ for iframe_url in self.iframe_re.findall(res.text):\n+ if \"googletagmanager\" not in iframe_url:\n+ log.debug(\"Found iframe: {0}\", iframe_url)\n+ iframe_url = iframe_url.replace(\":\", \":\")\n+ if iframe_url.startswith(\"//\"):\n+ return \"{0}:{1}\".format(p.scheme, iframe_url)\n else:\n- return url\n+ return iframe_url\n \n def _get_streams(self):\n- self.session.http.headers.update({\"User-Agent\": useragents.CHROME})\n- res = self.session.http.get(self.url)\n- iframe_url = self.find_iframe(res)\n+ if \"i.cdn.bg/live/\" in self.url:\n+ iframe_url = self.url\n+ else:\n+ iframe_url = self.find_iframe(self.url)\n \n if iframe_url:\n- log.debug(\"Found iframe: {0}\", iframe_url)\n res = self.session.http.get(iframe_url, headers={\"Referer\": self.url})\n stream_url = update_scheme(self.url, self.stream_schema.validate(res.text))\n log.warning(\"SSL Verification disabled.\")\n", "issue": "[bug] CDNBG - multiple issues\n## Bug Report\r\n1. BITelevision should be removed from the plugin and/or wiki/info pages, as it no longer exists.\r\n2. Inlife.bg shouldn't be listed as supported under CDNbg.\r\n3. Tvbulgare.bg should be listed as supported in inlife.bg's place (latter shares the former's stream)\r\n4. Mu-vi.tv gives an error.\r\n5. CDNBG should cover VTK - the national military channel\r\n6. Kanal3's livestream is not found.\r\n7. CDNBG should cover Cherno More - the regional channel for Varna, Bulgaria.\r\n\r\n### Reproduction steps / Explicit stream URLs to test\r\n1. https://bitelevision.com/ is not a thing anymore.\r\n2. Inlife.bg can't be opened and shouldn't be listed - it is a 'media partner' that restreams https://tvbulgare.bg/, which could be put in as a replacement for it.\r\n3. https://tvbulgare.bg/ - No playable streams found.\r\n4. http://mu-vi.tv/LiveStreams/pages/Live.aspx - Error: Unable to open URL.\r\n5. https://www.armymedia.bg/\r\n6. https://kanal3.bg/live\r\n7. https://www.chernomore.bg/\n", "before_files": [{"content": "import logging\nimport re\n\nfrom streamlink.compat import urlparse\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import useragents\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream import HLSStream\nfrom streamlink.utils import update_scheme\n\nlog = logging.getLogger(__name__)\n\n\nclass CDNBG(Plugin):\n url_re = re.compile(r\"\"\"\n https?://(?:www\\.)?(?:\n tv\\.bnt\\.bg/\\w+(?:/\\w+)?|\n bitelevision\\.com/live|\n nova\\.bg/live|\n kanal3\\.bg/live|\n bgonair\\.bg/tvonline|\n inlife\\.bg|\n mmtvmusic\\.com/live|\n mu-vi\\.tv/LiveStreams/pages/Live\\.aspx|\n videochanel\\.bstv\\.bg|\n live\\.bstv\\.bg|\n bloombergtv.bg/video\n )/?\n \"\"\", re.VERBOSE)\n iframe_re = re.compile(r\"iframe .*?src=\\\"((?:https?(?::|:))?//(?:\\w+\\.)?cdn.bg/live[^\\\"]+)\\\"\", re.DOTALL)\n sdata_re = re.compile(r\"sdata\\.src.*?=.*?(?P<q>[\\\"'])(?P<url>http.*?)(?P=q)\")\n hls_file_re = re.compile(r\"(src|file): (?P<q>[\\\"'])(?P<url>(https?:)?//.+?m3u8.*?)(?P=q)\")\n hls_src_re = re.compile(r\"video src=(?P<url>http[^ ]+m3u8[^ ]*)\")\n\n stream_schema = validate.Schema(\n validate.any(\n validate.all(validate.transform(sdata_re.search), validate.get(\"url\")),\n validate.all(validate.transform(hls_file_re.search), validate.get(\"url\")),\n validate.all(validate.transform(hls_src_re.search), validate.get(\"url\")),\n )\n )\n\n @classmethod\n def can_handle_url(cls, url):\n return cls.url_re.match(url) is not None\n\n def find_iframe(self, res):\n p = urlparse(self.url)\n for url in self.iframe_re.findall(res.text):\n if \"googletagmanager\" not in url:\n url = url.replace(\":\", \":\")\n if url.startswith(\"//\"):\n return \"{0}:{1}\".format(p.scheme, url)\n else:\n return url\n\n def _get_streams(self):\n self.session.http.headers.update({\"User-Agent\": useragents.CHROME})\n res = self.session.http.get(self.url)\n iframe_url = self.find_iframe(res)\n\n if iframe_url:\n log.debug(\"Found iframe: {0}\", iframe_url)\n res = self.session.http.get(iframe_url, headers={\"Referer\": self.url})\n stream_url = update_scheme(self.url, self.stream_schema.validate(res.text))\n log.warning(\"SSL Verification disabled.\")\n return HLSStream.parse_variant_playlist(self.session,\n stream_url,\n verify=False)\n\n\n__plugin__ = CDNBG\n", "path": "src/streamlink/plugins/cdnbg.py"}], "after_files": [{"content": "import logging\nimport re\n\nfrom streamlink.compat import urlparse\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import useragents\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream import HLSStream\nfrom streamlink.utils import update_scheme\n\nlog = logging.getLogger(__name__)\n\n\nclass CDNBG(Plugin):\n url_re = re.compile(r\"\"\"\n https?://(?:www\\.)?(?:\n tv\\.bnt\\.bg/\\w+(?:/\\w+)?|\n nova\\.bg/live|\n bgonair\\.bg/tvonline|\n mmtvmusic\\.com/live|\n mu-vi\\.tv/LiveStreams/pages/Live\\.aspx|\n live\\.bstv\\.bg|\n bloombergtv.bg/video|\n armymedia.bg|\n chernomore.bg|\n i.cdn.bg/live/\n )/?\n \"\"\", re.VERBOSE)\n iframe_re = re.compile(r\"iframe .*?src=\\\"((?:https?(?::|:))?//(?:\\w+\\.)?cdn.bg/live[^\\\"]+)\\\"\", re.DOTALL)\n sdata_re = re.compile(r\"sdata\\.src.*?=.*?(?P<q>[\\\"'])(?P<url>http.*?)(?P=q)\")\n hls_file_re = re.compile(r\"(src|file): (?P<q>[\\\"'])(?P<url>(https?:)?//.+?m3u8.*?)(?P=q)\")\n hls_src_re = re.compile(r\"video src=(?P<url>http[^ ]+m3u8[^ ]*)\")\n\n stream_schema = validate.Schema(\n validate.any(\n validate.all(validate.transform(sdata_re.search), validate.get(\"url\")),\n validate.all(validate.transform(hls_file_re.search), validate.get(\"url\")),\n validate.all(validate.transform(hls_src_re.search), validate.get(\"url\")),\n )\n )\n\n @classmethod\n def can_handle_url(cls, url):\n return cls.url_re.match(url) is not None\n\n def find_iframe(self, url):\n self.session.http.headers.update({\"User-Agent\": useragents.CHROME})\n res = self.session.http.get(self.url)\n p = urlparse(url)\n for iframe_url in self.iframe_re.findall(res.text):\n if \"googletagmanager\" not in iframe_url:\n log.debug(\"Found iframe: {0}\", iframe_url)\n iframe_url = iframe_url.replace(\":\", \":\")\n if iframe_url.startswith(\"//\"):\n return \"{0}:{1}\".format(p.scheme, iframe_url)\n else:\n return iframe_url\n\n def _get_streams(self):\n if \"i.cdn.bg/live/\" in self.url:\n iframe_url = self.url\n else:\n iframe_url = self.find_iframe(self.url)\n\n if iframe_url:\n res = self.session.http.get(iframe_url, headers={\"Referer\": self.url})\n stream_url = update_scheme(self.url, self.stream_schema.validate(res.text))\n log.warning(\"SSL Verification disabled.\")\n return HLSStream.parse_variant_playlist(self.session,\n stream_url,\n verify=False)\n\n\n__plugin__ = CDNBG\n", "path": "src/streamlink/plugins/cdnbg.py"}]} | 1,335 | 670 |
gh_patches_debug_26388 | rasdani/github-patches | git_diff | sunpy__sunpy-3592 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add map.sample_at_coords()
Currently to sample the data in a map at a series of coordinates, one has to do (e.g. see [here](http://docs.sunpy.org/en/stable/generated/gallery/plotting/great_arc_example.html)):
```python
pixels = np.asarray(np.rint(m.world_to_pixel(great_arc.coordinates())), dtype=int)
x = pixels[0, :]
y = pixels[1, :]
intensity_along_arc = m.data[y, x]
```
It would be nice if there was a method `sample_at_coords(coords)` that automatically did this under the hood.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sunpy/map/maputils.py`
Content:
```
1 """
2 This submodule provides utility functions to act on `sunpy.map.GenericMap` instances.
3 """
4 from itertools import chain, product
5
6 import numpy as np
7
8 import astropy.units as u
9 from astropy.coordinates import SkyCoord
10
11 from sunpy.coordinates import Helioprojective
12
13 __all__ = ['all_pixel_indices_from_map', 'all_coordinates_from_map',
14 'map_edges', 'solar_angular_radius', 'contains_full_disk',
15 'is_all_off_disk', 'is_all_on_disk', 'contains_limb',
16 'coordinate_is_on_solar_disk', 'on_disk_bounding_coordinates']
17
18
19 def all_pixel_indices_from_map(smap):
20 """
21 Returns pixel pair indices of every pixel in a map.
22
23 Parameters
24 ----------
25 smap : `~sunpy.map.GenericMap`
26 A SunPy map.
27
28 Returns
29 -------
30 `~numpy.array`
31 A `numpy.array` with the all the pixel indices built from the
32 dimensions of the map.
33 """
34 return np.meshgrid(*[np.arange(v.value) for v in smap.dimensions]) * u.pix
35
36
37 def all_coordinates_from_map(smap):
38 """
39 Returns the coordinates of every pixel in a map.
40
41 Parameters
42 ----------
43 smap : `~sunpy.map.GenericMap`
44 A SunPy map.
45
46 Returns
47 -------
48 `~astropy.coordinates.SkyCoord`
49 An two-dimensional array of sky coordinates in the coordinate
50 system "coordinate_system".
51 """
52 x, y = all_pixel_indices_from_map(smap)
53 return smap.pixel_to_world(x, y)
54
55
56 def map_edges(smap):
57 """
58 Returns the pixel locations of the edges of an input map.
59
60 Parameters
61 ----------
62 smap : `~sunpy.map.GenericMap`
63 A SunPy map.
64
65 Returns
66 -------
67 top, bottom, left_hand_side, right_hand_side : `~astropy.units.Quantity`
68 Returns the pixel locations at the edge of the map;
69 the zeroth, first, second and third tuple values
70 return the top, bottom, left hand side and right
71 hand side pixel locations respectively of the input map.
72 """
73 # Calculate all the edge pixels
74 nx, ny = smap.dimensions.x.value, smap.dimensions.y.value
75 top = list(product(np.arange(nx), [ny - 1])) * u.pix
76 bottom = list(product(np.arange(nx), [0])) * u.pix
77 left_hand_side = list(product([0], np.arange(ny))) * u.pix
78 right_hand_side = list(product([nx - 1], np.arange(ny))) * u.pix
79 return top, bottom, left_hand_side, right_hand_side
80
81
82 @u.quantity_input
83 def solar_angular_radius(coordinates):
84 """
85 Calculates the solar angular radius as seen by the observer.
86
87 The tangent of the angular size of the Sun is equal to the radius
88 of the Sun divided by the distance between the observer and the
89 center of the Sun.
90
91 Parameters
92 ----------
93 coordinates : `~astropy.coordinates.SkyCoord`, `~sunpy.coordinates.frames.Helioprojective`
94 The input coordinate. The coordinate frame must be
95 `~sunpy.coordinates.Helioprojective`.
96
97 Returns
98 -------
99 angle : `~astropy.units.Quantity`
100 The solar angular radius.
101 """
102 return np.arctan(coordinates.rsun / coordinates.observer.radius)
103
104
105 def contains_full_disk(smap):
106 """
107 Checks if a map contains the full disk of the Sun.
108
109 A map contains the full disk of the Sun if the following two
110 conditions are met: (1) all the coordinates at the edge of the map are
111 more than solar angular radius from the center of the Sun and, (2) the
112 map is not all off disk. If both these conditions are met, the
113 function returns `True`. Otherwise, the function returns `False`.
114
115 Parameters
116 ----------
117 smap : `~sunpy.map.GenericMap`
118 A map in helioprojective Cartesian coordinates.
119
120 Returns
121 -------
122 `~bool`
123 Returns `False` if any of the coordinates at the edge of the map
124 are less than one solar radius away from the center of the Sun.
125
126 Notes
127 -----
128 This function checks if the image coordinates include the solar disk.
129 Therefore this function would return `True` for a coronagraph image
130 such as from LASCO/C3 or STEREO/SECCHI COR1 since the solar disk is
131 within the field of the view of the instrument (although no emission
132 from the disk itself is present in the data.)
133 """
134 # Calculate all the edge pixels
135 edges = map_edges(smap)
136 edge_pixels = list(chain.from_iterable([edges[0], edges[1], edges[2], edges[3]]))
137 x = [p[0] for p in edge_pixels] * u.pix
138 y = [p[1] for p in edge_pixels] * u.pix
139
140 # Calculate the edge of the world
141 edge_of_world = smap.pixel_to_world(x, y)
142
143 # Calculate the distance of the edge of the world in solar radii
144 coordinate_angles = np.sqrt(edge_of_world.Tx ** 2 + edge_of_world.Ty ** 2)
145
146 # Test if all the edge pixels are more than one solar radius distant
147 # and that the whole map is not all off disk.
148 return np.all(coordinate_angles > solar_angular_radius(edge_of_world)) and ~is_all_off_disk(smap)
149
150
151 @u.quantity_input
152 def coordinate_is_on_solar_disk(coordinates):
153 """
154 Checks if the helioprojective Cartesian coordinates are on the solar disk.
155
156 The check is performed by comparing the coordinate's angular distance
157 to the angular size of the solar radius. The solar disk is assumed to be
158 a circle i.e., solar oblateness and other effects that cause the solar disk to
159 be non-circular are not taken in to account.
160
161 Parameters
162 ----------
163 coordinates : `~astropy.coordinates.SkyCoord`, `~sunpy.coordinates.frames.Helioprojective`
164 The input coordinate. The coordinate frame must be
165 `~sunpy.coordinates.Helioprojective`.
166
167 Returns
168 -------
169 `~bool`
170 Returns `True` if the coordinate is on disk, `False` otherwise.
171 """
172
173 if not isinstance(coordinates.frame, Helioprojective):
174 raise ValueError('The input coordinate(s) must be in the Helioprojective Cartesian frame.')
175 # Calculate the angle of every pixel from the center of the Sun and compare it the angular
176 # radius of the Sun.
177 return np.sqrt(coordinates.Tx ** 2 + coordinates.Ty ** 2) < solar_angular_radius(coordinates)
178
179
180 def is_all_off_disk(smap):
181 """
182 Checks if none of the coordinates in the `~sunpy.map.GenericMap` are on the solar disk.
183
184 The check is performed by calculating the angle of every pixel from
185 the center of the Sun. If they are all greater than the angular
186 radius of the Sun, then the function returns `True`. Otherwise, the function
187 returns `False`.
188
189 Parameters
190 ----------
191 smap : `~sunpy.map.GenericMap`
192 A map in helioprojective Cartesian coordinates.
193
194 Returns
195 -------
196 `~bool`
197 Returns `True` if all map pixels have an angular radius greater than
198 the angular radius of the Sun.
199
200 Notes
201 -----
202 For coronagraph images such as those from LASCO C2 and C3 the full disk is
203 within the field of view of the instrument, but the solar disk itself is not imaged.
204 For such images this function will return `False`.
205 """
206 return np.all(~coordinate_is_on_solar_disk(all_coordinates_from_map(smap)))
207
208
209 def is_all_on_disk(smap):
210 """
211 Checks if all of the coordinates in the `~sunpy.map.GenericMap` are on the solar disk.
212
213 The check is performed by calculating the angle of every pixel from
214 the center of the Sun. If they are all less than the angular
215 radius of the Sun, then the function returns `True`. Otherwise, the function
216 returns `False`.
217
218 Parameters
219 ----------
220 smap : `~sunpy.map.GenericMap`
221 A map in helioprojective Cartesian coordinates.
222
223 Returns
224 -------
225 `~bool`
226 Returns `True` if all map coordinates have an angular radius less than
227 the angular radius of the Sun.
228 """
229 return np.all(coordinate_is_on_solar_disk(all_coordinates_from_map(smap)))
230
231
232 def contains_limb(smap):
233 """
234 Checks if a map contains any part of the solar limb or equivalently whether
235 the map contains both on-disk and off-disk pixels.
236
237 The check is performed by calculating the angular distance of every pixel from
238 the center of the Sun. If at least one pixel is on disk (less than the solar
239 angular radius) and at least one pixel is off disk (greater than the solar
240 angular distance), the function returns `True`. Otherwise, the function
241 returns `False`.
242
243 Parameters
244 ----------
245 smap : `~sunpy.map.GenericMap`
246 A map in helioprojective Cartesian coordinates.
247
248 Returns
249 -------
250 `~bool`
251 Returns `True` If at least one coordinate of the map is on disk and at
252 least one coordinate of the map is off disk.
253
254 Notes
255 -----
256 For coronagraph images such as those from LASCO C2 and C3 the full disk is
257 within the field of view of the instrument, but the solar disk itself is not imaged.
258 For such images this function will return `True`.
259 """
260 on_disk = coordinate_is_on_solar_disk(all_coordinates_from_map(smap))
261 return np.logical_and(np.any(on_disk), np.any(~on_disk))
262
263
264 def on_disk_bounding_coordinates(smap):
265 """
266 Returns the the bottom left and top right coordinates of the smallest
267 rectangular region that contains all the on disk coordinates of the input map.
268
269 Parameters
270 ----------
271 smap : `~sunpy.map.GenericMap`
272 A map in helioprojective Cartesian coordinates.
273
274 Returns
275 -------
276 `~astropy.coordinates.SkyCoord`
277 A `~astropy.coordinates.SkyCoord` of length 2 such that the
278 first entry is the bottom left coordinate and the second entry is the
279 top right coordinate of the smallest rectangular region that contains
280 all the on-disk pixels in the input map.
281 """
282 # Check that the input map is not all off disk.
283 if is_all_off_disk(smap):
284 raise ValueError("The entire map is off disk.")
285
286 # Get all the coordinates from the input map
287 coordinates = all_coordinates_from_map(smap)
288
289 # Find which coordinates are on the disk
290 on_disk = coordinate_is_on_solar_disk(coordinates)
291 on_disk_coordinates = coordinates[on_disk]
292
293 # The bottom left and top right coordinates that contain
294 # the on disk coordinates.
295 tx = on_disk_coordinates.Tx.value
296 ty = on_disk_coordinates.Ty.value
297 return SkyCoord([np.nanmin(tx), np.nanmax(tx)] * u.arcsec,
298 [np.nanmin(ty), np.nanmax(ty)] * u.arcsec,
299 frame=Helioprojective, observer=smap.observer_coordinate)
300
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sunpy/map/maputils.py b/sunpy/map/maputils.py
--- a/sunpy/map/maputils.py
+++ b/sunpy/map/maputils.py
@@ -11,9 +11,10 @@
from sunpy.coordinates import Helioprojective
__all__ = ['all_pixel_indices_from_map', 'all_coordinates_from_map',
- 'map_edges', 'solar_angular_radius', 'contains_full_disk',
- 'is_all_off_disk', 'is_all_on_disk', 'contains_limb',
- 'coordinate_is_on_solar_disk', 'on_disk_bounding_coordinates']
+ 'map_edges', 'solar_angular_radius', 'sample_at_coords',
+ 'contains_full_disk', 'is_all_off_disk', 'is_all_on_disk',
+ 'contains_limb', 'coordinate_is_on_solar_disk',
+ 'on_disk_bounding_coordinates']
def all_pixel_indices_from_map(smap):
@@ -102,6 +103,27 @@
return np.arctan(coordinates.rsun / coordinates.observer.radius)
+def sample_at_coords(smap, coordinates):
+ """
+ Samples the data in a map at given series of coordinates.
+ Uses nearest-neighbor interpolation of coordinates in map, as
+ it effectively uses array indexing.
+
+ Parameters
+ ----------
+ smap : `~sunpy.map.GenericMap`
+ A SunPy map.
+ coordinates : `~astropy.coordinates.SkyCoord`
+ Input coordinates.
+ Returns
+ -------
+ `numpy.array`
+ A `numpy.array` corresponding to the data obtained from the map,
+ at the input coordinates.
+ """
+ return smap.data[smap.wcs.world_to_array_index(coordinates)]
+
+
def contains_full_disk(smap):
"""
Checks if a map contains the full disk of the Sun.
| {"golden_diff": "diff --git a/sunpy/map/maputils.py b/sunpy/map/maputils.py\n--- a/sunpy/map/maputils.py\n+++ b/sunpy/map/maputils.py\n@@ -11,9 +11,10 @@\n from sunpy.coordinates import Helioprojective\n \n __all__ = ['all_pixel_indices_from_map', 'all_coordinates_from_map',\n- 'map_edges', 'solar_angular_radius', 'contains_full_disk',\n- 'is_all_off_disk', 'is_all_on_disk', 'contains_limb',\n- 'coordinate_is_on_solar_disk', 'on_disk_bounding_coordinates']\n+ 'map_edges', 'solar_angular_radius', 'sample_at_coords',\n+ 'contains_full_disk', 'is_all_off_disk', 'is_all_on_disk',\n+ 'contains_limb', 'coordinate_is_on_solar_disk',\n+ 'on_disk_bounding_coordinates']\n \n \n def all_pixel_indices_from_map(smap):\n@@ -102,6 +103,27 @@\n return np.arctan(coordinates.rsun / coordinates.observer.radius)\n \n \n+def sample_at_coords(smap, coordinates):\n+ \"\"\"\n+ Samples the data in a map at given series of coordinates.\n+ Uses nearest-neighbor interpolation of coordinates in map, as\n+ it effectively uses array indexing.\n+\n+ Parameters\n+ ----------\n+ smap : `~sunpy.map.GenericMap`\n+ A SunPy map.\n+ coordinates : `~astropy.coordinates.SkyCoord`\n+ Input coordinates.\n+ Returns\n+ -------\n+ `numpy.array`\n+ A `numpy.array` corresponding to the data obtained from the map,\n+ at the input coordinates.\n+ \"\"\"\n+ return smap.data[smap.wcs.world_to_array_index(coordinates)]\n+\n+\n def contains_full_disk(smap):\n \"\"\"\n Checks if a map contains the full disk of the Sun.\n", "issue": "Add map.sample_at_coords()\nCurrently to sample the data in a map at a series of coordinates, one has to do (e.g. see [here](http://docs.sunpy.org/en/stable/generated/gallery/plotting/great_arc_example.html)):\r\n```python\r\npixels = np.asarray(np.rint(m.world_to_pixel(great_arc.coordinates())), dtype=int)\r\nx = pixels[0, :]\r\ny = pixels[1, :]\r\nintensity_along_arc = m.data[y, x]\r\n```\r\n\r\nIt would be nice if there was a method `sample_at_coords(coords)` that automatically did this under the hood.\n", "before_files": [{"content": "\"\"\"\nThis submodule provides utility functions to act on `sunpy.map.GenericMap` instances.\n\"\"\"\nfrom itertools import chain, product\n\nimport numpy as np\n\nimport astropy.units as u\nfrom astropy.coordinates import SkyCoord\n\nfrom sunpy.coordinates import Helioprojective\n\n__all__ = ['all_pixel_indices_from_map', 'all_coordinates_from_map',\n 'map_edges', 'solar_angular_radius', 'contains_full_disk',\n 'is_all_off_disk', 'is_all_on_disk', 'contains_limb',\n 'coordinate_is_on_solar_disk', 'on_disk_bounding_coordinates']\n\n\ndef all_pixel_indices_from_map(smap):\n \"\"\"\n Returns pixel pair indices of every pixel in a map.\n\n Parameters\n ----------\n smap : `~sunpy.map.GenericMap`\n A SunPy map.\n\n Returns\n -------\n `~numpy.array`\n A `numpy.array` with the all the pixel indices built from the\n dimensions of the map.\n \"\"\"\n return np.meshgrid(*[np.arange(v.value) for v in smap.dimensions]) * u.pix\n\n\ndef all_coordinates_from_map(smap):\n \"\"\"\n Returns the coordinates of every pixel in a map.\n\n Parameters\n ----------\n smap : `~sunpy.map.GenericMap`\n A SunPy map.\n\n Returns\n -------\n `~astropy.coordinates.SkyCoord`\n An two-dimensional array of sky coordinates in the coordinate\n system \"coordinate_system\".\n \"\"\"\n x, y = all_pixel_indices_from_map(smap)\n return smap.pixel_to_world(x, y)\n\n\ndef map_edges(smap):\n \"\"\"\n Returns the pixel locations of the edges of an input map.\n\n Parameters\n ----------\n smap : `~sunpy.map.GenericMap`\n A SunPy map.\n\n Returns\n -------\n top, bottom, left_hand_side, right_hand_side : `~astropy.units.Quantity`\n Returns the pixel locations at the edge of the map;\n the zeroth, first, second and third tuple values\n return the top, bottom, left hand side and right\n hand side pixel locations respectively of the input map.\n \"\"\"\n # Calculate all the edge pixels\n nx, ny = smap.dimensions.x.value, smap.dimensions.y.value\n top = list(product(np.arange(nx), [ny - 1])) * u.pix\n bottom = list(product(np.arange(nx), [0])) * u.pix\n left_hand_side = list(product([0], np.arange(ny))) * u.pix\n right_hand_side = list(product([nx - 1], np.arange(ny))) * u.pix\n return top, bottom, left_hand_side, right_hand_side\n\n\[email protected]_input\ndef solar_angular_radius(coordinates):\n \"\"\"\n Calculates the solar angular radius as seen by the observer.\n\n The tangent of the angular size of the Sun is equal to the radius\n of the Sun divided by the distance between the observer and the\n center of the Sun.\n\n Parameters\n ----------\n coordinates : `~astropy.coordinates.SkyCoord`, `~sunpy.coordinates.frames.Helioprojective`\n The input coordinate. The coordinate frame must be\n `~sunpy.coordinates.Helioprojective`.\n\n Returns\n -------\n angle : `~astropy.units.Quantity`\n The solar angular radius.\n \"\"\"\n return np.arctan(coordinates.rsun / coordinates.observer.radius)\n\n\ndef contains_full_disk(smap):\n \"\"\"\n Checks if a map contains the full disk of the Sun.\n\n A map contains the full disk of the Sun if the following two\n conditions are met: (1) all the coordinates at the edge of the map are\n more than solar angular radius from the center of the Sun and, (2) the\n map is not all off disk. If both these conditions are met, the\n function returns `True`. Otherwise, the function returns `False`.\n\n Parameters\n ----------\n smap : `~sunpy.map.GenericMap`\n A map in helioprojective Cartesian coordinates.\n\n Returns\n -------\n `~bool`\n Returns `False` if any of the coordinates at the edge of the map\n are less than one solar radius away from the center of the Sun.\n\n Notes\n -----\n This function checks if the image coordinates include the solar disk.\n Therefore this function would return `True` for a coronagraph image\n such as from LASCO/C3 or STEREO/SECCHI COR1 since the solar disk is\n within the field of the view of the instrument (although no emission\n from the disk itself is present in the data.)\n \"\"\"\n # Calculate all the edge pixels\n edges = map_edges(smap)\n edge_pixels = list(chain.from_iterable([edges[0], edges[1], edges[2], edges[3]]))\n x = [p[0] for p in edge_pixels] * u.pix\n y = [p[1] for p in edge_pixels] * u.pix\n\n # Calculate the edge of the world\n edge_of_world = smap.pixel_to_world(x, y)\n\n # Calculate the distance of the edge of the world in solar radii\n coordinate_angles = np.sqrt(edge_of_world.Tx ** 2 + edge_of_world.Ty ** 2)\n\n # Test if all the edge pixels are more than one solar radius distant\n # and that the whole map is not all off disk.\n return np.all(coordinate_angles > solar_angular_radius(edge_of_world)) and ~is_all_off_disk(smap)\n\n\[email protected]_input\ndef coordinate_is_on_solar_disk(coordinates):\n \"\"\"\n Checks if the helioprojective Cartesian coordinates are on the solar disk.\n\n The check is performed by comparing the coordinate's angular distance\n to the angular size of the solar radius. The solar disk is assumed to be\n a circle i.e., solar oblateness and other effects that cause the solar disk to\n be non-circular are not taken in to account.\n\n Parameters\n ----------\n coordinates : `~astropy.coordinates.SkyCoord`, `~sunpy.coordinates.frames.Helioprojective`\n The input coordinate. The coordinate frame must be\n `~sunpy.coordinates.Helioprojective`.\n\n Returns\n -------\n `~bool`\n Returns `True` if the coordinate is on disk, `False` otherwise.\n \"\"\"\n\n if not isinstance(coordinates.frame, Helioprojective):\n raise ValueError('The input coordinate(s) must be in the Helioprojective Cartesian frame.')\n # Calculate the angle of every pixel from the center of the Sun and compare it the angular\n # radius of the Sun.\n return np.sqrt(coordinates.Tx ** 2 + coordinates.Ty ** 2) < solar_angular_radius(coordinates)\n\n\ndef is_all_off_disk(smap):\n \"\"\"\n Checks if none of the coordinates in the `~sunpy.map.GenericMap` are on the solar disk.\n\n The check is performed by calculating the angle of every pixel from\n the center of the Sun. If they are all greater than the angular\n radius of the Sun, then the function returns `True`. Otherwise, the function\n returns `False`.\n\n Parameters\n ----------\n smap : `~sunpy.map.GenericMap`\n A map in helioprojective Cartesian coordinates.\n\n Returns\n -------\n `~bool`\n Returns `True` if all map pixels have an angular radius greater than\n the angular radius of the Sun.\n\n Notes\n -----\n For coronagraph images such as those from LASCO C2 and C3 the full disk is\n within the field of view of the instrument, but the solar disk itself is not imaged.\n For such images this function will return `False`.\n \"\"\"\n return np.all(~coordinate_is_on_solar_disk(all_coordinates_from_map(smap)))\n\n\ndef is_all_on_disk(smap):\n \"\"\"\n Checks if all of the coordinates in the `~sunpy.map.GenericMap` are on the solar disk.\n\n The check is performed by calculating the angle of every pixel from\n the center of the Sun. If they are all less than the angular\n radius of the Sun, then the function returns `True`. Otherwise, the function\n returns `False`.\n\n Parameters\n ----------\n smap : `~sunpy.map.GenericMap`\n A map in helioprojective Cartesian coordinates.\n\n Returns\n -------\n `~bool`\n Returns `True` if all map coordinates have an angular radius less than\n the angular radius of the Sun.\n \"\"\"\n return np.all(coordinate_is_on_solar_disk(all_coordinates_from_map(smap)))\n\n\ndef contains_limb(smap):\n \"\"\"\n Checks if a map contains any part of the solar limb or equivalently whether\n the map contains both on-disk and off-disk pixels.\n\n The check is performed by calculating the angular distance of every pixel from\n the center of the Sun. If at least one pixel is on disk (less than the solar\n angular radius) and at least one pixel is off disk (greater than the solar\n angular distance), the function returns `True`. Otherwise, the function\n returns `False`.\n\n Parameters\n ----------\n smap : `~sunpy.map.GenericMap`\n A map in helioprojective Cartesian coordinates.\n\n Returns\n -------\n `~bool`\n Returns `True` If at least one coordinate of the map is on disk and at\n least one coordinate of the map is off disk.\n\n Notes\n -----\n For coronagraph images such as those from LASCO C2 and C3 the full disk is\n within the field of view of the instrument, but the solar disk itself is not imaged.\n For such images this function will return `True`.\n \"\"\"\n on_disk = coordinate_is_on_solar_disk(all_coordinates_from_map(smap))\n return np.logical_and(np.any(on_disk), np.any(~on_disk))\n\n\ndef on_disk_bounding_coordinates(smap):\n \"\"\"\n Returns the the bottom left and top right coordinates of the smallest\n rectangular region that contains all the on disk coordinates of the input map.\n\n Parameters\n ----------\n smap : `~sunpy.map.GenericMap`\n A map in helioprojective Cartesian coordinates.\n\n Returns\n -------\n `~astropy.coordinates.SkyCoord`\n A `~astropy.coordinates.SkyCoord` of length 2 such that the\n first entry is the bottom left coordinate and the second entry is the\n top right coordinate of the smallest rectangular region that contains\n all the on-disk pixels in the input map.\n \"\"\"\n # Check that the input map is not all off disk.\n if is_all_off_disk(smap):\n raise ValueError(\"The entire map is off disk.\")\n\n # Get all the coordinates from the input map\n coordinates = all_coordinates_from_map(smap)\n\n # Find which coordinates are on the disk\n on_disk = coordinate_is_on_solar_disk(coordinates)\n on_disk_coordinates = coordinates[on_disk]\n\n # The bottom left and top right coordinates that contain\n # the on disk coordinates.\n tx = on_disk_coordinates.Tx.value\n ty = on_disk_coordinates.Ty.value\n return SkyCoord([np.nanmin(tx), np.nanmax(tx)] * u.arcsec,\n [np.nanmin(ty), np.nanmax(ty)] * u.arcsec,\n frame=Helioprojective, observer=smap.observer_coordinate)\n", "path": "sunpy/map/maputils.py"}], "after_files": [{"content": "\"\"\"\nThis submodule provides utility functions to act on `sunpy.map.GenericMap` instances.\n\"\"\"\nfrom itertools import chain, product\n\nimport numpy as np\n\nimport astropy.units as u\nfrom astropy.coordinates import SkyCoord\n\nfrom sunpy.coordinates import Helioprojective\n\n__all__ = ['all_pixel_indices_from_map', 'all_coordinates_from_map',\n 'map_edges', 'solar_angular_radius', 'sample_at_coords',\n 'contains_full_disk', 'is_all_off_disk', 'is_all_on_disk',\n 'contains_limb', 'coordinate_is_on_solar_disk',\n 'on_disk_bounding_coordinates']\n\n\ndef all_pixel_indices_from_map(smap):\n \"\"\"\n Returns pixel pair indices of every pixel in a map.\n\n Parameters\n ----------\n smap : `~sunpy.map.GenericMap`\n A SunPy map.\n\n Returns\n -------\n `~numpy.array`\n A `numpy.array` with the all the pixel indices built from the\n dimensions of the map.\n \"\"\"\n return np.meshgrid(*[np.arange(v.value) for v in smap.dimensions]) * u.pix\n\n\ndef all_coordinates_from_map(smap):\n \"\"\"\n Returns the coordinates of every pixel in a map.\n\n Parameters\n ----------\n smap : `~sunpy.map.GenericMap`\n A SunPy map.\n\n Returns\n -------\n `~astropy.coordinates.SkyCoord`\n An two-dimensional array of sky coordinates in the coordinate\n system \"coordinate_system\".\n \"\"\"\n x, y = all_pixel_indices_from_map(smap)\n return smap.pixel_to_world(x, y)\n\n\ndef map_edges(smap):\n \"\"\"\n Returns the pixel locations of the edges of an input map.\n\n Parameters\n ----------\n smap : `~sunpy.map.GenericMap`\n A SunPy map.\n\n Returns\n -------\n top, bottom, left_hand_side, right_hand_side : `~astropy.units.Quantity`\n Returns the pixel locations at the edge of the map;\n the zeroth, first, second and third tuple values\n return the top, bottom, left hand side and right\n hand side pixel locations respectively of the input map.\n \"\"\"\n # Calculate all the edge pixels\n nx, ny = smap.dimensions.x.value, smap.dimensions.y.value\n top = list(product(np.arange(nx), [ny - 1])) * u.pix\n bottom = list(product(np.arange(nx), [0])) * u.pix\n left_hand_side = list(product([0], np.arange(ny))) * u.pix\n right_hand_side = list(product([nx - 1], np.arange(ny))) * u.pix\n return top, bottom, left_hand_side, right_hand_side\n\n\[email protected]_input\ndef solar_angular_radius(coordinates):\n \"\"\"\n Calculates the solar angular radius as seen by the observer.\n\n The tangent of the angular size of the Sun is equal to the radius\n of the Sun divided by the distance between the observer and the\n center of the Sun.\n\n Parameters\n ----------\n coordinates : `~astropy.coordinates.SkyCoord`, `~sunpy.coordinates.frames.Helioprojective`\n The input coordinate. The coordinate frame must be\n `~sunpy.coordinates.Helioprojective`.\n\n Returns\n -------\n angle : `~astropy.units.Quantity`\n The solar angular radius.\n \"\"\"\n return np.arctan(coordinates.rsun / coordinates.observer.radius)\n\n\ndef sample_at_coords(smap, coordinates):\n \"\"\"\n Samples the data in a map at given series of coordinates.\n Uses nearest-neighbor interpolation of coordinates in map, as\n it effectively uses array indexing.\n\n Parameters\n ----------\n smap : `~sunpy.map.GenericMap`\n A SunPy map.\n coordinates : `~astropy.coordinates.SkyCoord`\n Input coordinates.\n Returns\n -------\n `numpy.array`\n A `numpy.array` corresponding to the data obtained from the map,\n at the input coordinates.\n \"\"\"\n return smap.data[smap.wcs.world_to_array_index(coordinates)]\n\n\ndef contains_full_disk(smap):\n \"\"\"\n Checks if a map contains the full disk of the Sun.\n\n A map contains the full disk of the Sun if the following two\n conditions are met: (1) all the coordinates at the edge of the map are\n more than solar angular radius from the center of the Sun and, (2) the\n map is not all off disk. If both these conditions are met, the\n function returns `True`. Otherwise, the function returns `False`.\n\n Parameters\n ----------\n smap : `~sunpy.map.GenericMap`\n A map in helioprojective Cartesian coordinates.\n\n Returns\n -------\n `~bool`\n Returns `False` if any of the coordinates at the edge of the map\n are less than one solar radius away from the center of the Sun.\n\n Notes\n -----\n This function checks if the image coordinates include the solar disk.\n Therefore this function would return `True` for a coronagraph image\n such as from LASCO/C3 or STEREO/SECCHI COR1 since the solar disk is\n within the field of the view of the instrument (although no emission\n from the disk itself is present in the data.)\n \"\"\"\n # Calculate all the edge pixels\n edges = map_edges(smap)\n edge_pixels = list(chain.from_iterable([edges[0], edges[1], edges[2], edges[3]]))\n x = [p[0] for p in edge_pixels] * u.pix\n y = [p[1] for p in edge_pixels] * u.pix\n\n # Calculate the edge of the world\n edge_of_world = smap.pixel_to_world(x, y)\n\n # Calculate the distance of the edge of the world in solar radii\n coordinate_angles = np.sqrt(edge_of_world.Tx ** 2 + edge_of_world.Ty ** 2)\n\n # Test if all the edge pixels are more than one solar radius distant\n # and that the whole map is not all off disk.\n return np.all(coordinate_angles > solar_angular_radius(edge_of_world)) and ~is_all_off_disk(smap)\n\n\[email protected]_input\ndef coordinate_is_on_solar_disk(coordinates):\n \"\"\"\n Checks if the helioprojective Cartesian coordinates are on the solar disk.\n\n The check is performed by comparing the coordinate's angular distance\n to the angular size of the solar radius. The solar disk is assumed to be\n a circle i.e., solar oblateness and other effects that cause the solar disk to\n be non-circular are not taken in to account.\n\n Parameters\n ----------\n coordinates : `~astropy.coordinates.SkyCoord`, `~sunpy.coordinates.frames.Helioprojective`\n The input coordinate. The coordinate frame must be\n `~sunpy.coordinates.Helioprojective`.\n\n Returns\n -------\n `~bool`\n Returns `True` if the coordinate is on disk, `False` otherwise.\n \"\"\"\n\n if not isinstance(coordinates.frame, Helioprojective):\n raise ValueError('The input coordinate(s) must be in the Helioprojective Cartesian frame.')\n # Calculate the angle of every pixel from the center of the Sun and compare it the angular\n # radius of the Sun.\n return np.sqrt(coordinates.Tx ** 2 + coordinates.Ty ** 2) < solar_angular_radius(coordinates)\n\n\ndef is_all_off_disk(smap):\n \"\"\"\n Checks if none of the coordinates in the `~sunpy.map.GenericMap` are on the solar disk.\n\n The check is performed by calculating the angle of every pixel from\n the center of the Sun. If they are all greater than the angular\n radius of the Sun, then the function returns `True`. Otherwise, the function\n returns `False`.\n\n Parameters\n ----------\n smap : `~sunpy.map.GenericMap`\n A map in helioprojective Cartesian coordinates.\n\n Returns\n -------\n `~bool`\n Returns `True` if all map pixels have an angular radius greater than\n the angular radius of the Sun.\n\n Notes\n -----\n For coronagraph images such as those from LASCO C2 and C3 the full disk is\n within the field of view of the instrument, but the solar disk itself is not imaged.\n For such images this function will return `False`.\n \"\"\"\n return np.all(~coordinate_is_on_solar_disk(all_coordinates_from_map(smap)))\n\n\ndef is_all_on_disk(smap):\n \"\"\"\n Checks if all of the coordinates in the `~sunpy.map.GenericMap` are on the solar disk.\n\n The check is performed by calculating the angle of every pixel from\n the center of the Sun. If they are all less than the angular\n radius of the Sun, then the function returns `True`. Otherwise, the function\n returns `False`.\n\n Parameters\n ----------\n smap : `~sunpy.map.GenericMap`\n A map in helioprojective Cartesian coordinates.\n\n Returns\n -------\n `~bool`\n Returns `True` if all map coordinates have an angular radius less than\n the angular radius of the Sun.\n \"\"\"\n return np.all(coordinate_is_on_solar_disk(all_coordinates_from_map(smap)))\n\n\ndef contains_limb(smap):\n \"\"\"\n Checks if a map contains any part of the solar limb or equivalently whether\n the map contains both on-disk and off-disk pixels.\n\n The check is performed by calculating the angular distance of every pixel from\n the center of the Sun. If at least one pixel is on disk (less than the solar\n angular radius) and at least one pixel is off disk (greater than the solar\n angular distance), the function returns `True`. Otherwise, the function\n returns `False`.\n\n Parameters\n ----------\n smap : `~sunpy.map.GenericMap`\n A map in helioprojective Cartesian coordinates.\n\n Returns\n -------\n `~bool`\n Returns `True` If at least one coordinate of the map is on disk and at\n least one coordinate of the map is off disk.\n\n Notes\n -----\n For coronagraph images such as those from LASCO C2 and C3 the full disk is\n within the field of view of the instrument, but the solar disk itself is not imaged.\n For such images this function will return `True`.\n \"\"\"\n on_disk = coordinate_is_on_solar_disk(all_coordinates_from_map(smap))\n return np.logical_and(np.any(on_disk), np.any(~on_disk))\n\n\ndef on_disk_bounding_coordinates(smap):\n \"\"\"\n Returns the the bottom left and top right coordinates of the smallest\n rectangular region that contains all the on disk coordinates of the input map.\n\n Parameters\n ----------\n smap : `~sunpy.map.GenericMap`\n A map in helioprojective Cartesian coordinates.\n\n Returns\n -------\n `~astropy.coordinates.SkyCoord`\n A `~astropy.coordinates.SkyCoord` of length 2 such that the\n first entry is the bottom left coordinate and the second entry is the\n top right coordinate of the smallest rectangular region that contains\n all the on-disk pixels in the input map.\n \"\"\"\n # Check that the input map is not all off disk.\n if is_all_off_disk(smap):\n raise ValueError(\"The entire map is off disk.\")\n\n # Get all the coordinates from the input map\n coordinates = all_coordinates_from_map(smap)\n\n # Find which coordinates are on the disk\n on_disk = coordinate_is_on_solar_disk(coordinates)\n on_disk_coordinates = coordinates[on_disk]\n\n # The bottom left and top right coordinates that contain\n # the on disk coordinates.\n tx = on_disk_coordinates.Tx.value\n ty = on_disk_coordinates.Ty.value\n return SkyCoord([np.nanmin(tx), np.nanmax(tx)] * u.arcsec,\n [np.nanmin(ty), np.nanmax(ty)] * u.arcsec,\n frame=Helioprojective, observer=smap.observer_coordinate)\n", "path": "sunpy/map/maputils.py"}]} | 3,658 | 410 |
gh_patches_debug_9547 | rasdani/github-patches | git_diff | opensearch-project__opensearch-build-537 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] BWC test suite does not run on Jenkins
**Describe the bug**
BWC test job fails to run Jenkins due to misconfigured directory to kick off tests.
```
Running ./bundle-workflow/src/run_bwc_test.py --s3-bucket <artifact_s3_bucket> --opensearch-version 1.1.0 --build-id 163 --architecture x64 --test-run-id 6 ...
2021-09-17 21:50:08 INFO Switching to temporary work_dir: /tmp/tmp70l5b7_e
Traceback (most recent call last):
File "/var/jenkins/workspace/bwc-test/bundle-workflow/src/run_bwc_test.py", line 31, in <module>
sys.exit(main())
File "/var/jenkins/workspace/bwc-test/bundle-workflow/src/run_bwc_test.py", line 26, in main
args.s3_bucket, args.build_id, args.opensearch_version, args.architecture, cur_dir)
File "/var/jenkins/workspace/bwc-test/bundle-workflow/src/manifests/bundle_manifest.py", line 85, in from_s3
S3Bucket(bucket_name).download_file(manifest_s3_path, work_dir)
File "/var/jenkins/workspace/bwc-test/bundle-workflow/src/aws/s3_bucket.py", line 99, in download_file
local_dir = Path(dest)
File "/usr/lib64/python3.7/pathlib.py", line 1027, in __new__
self = cls._from_parts(args, init=False)
File "/usr/lib64/python3.7/pathlib.py", line 674, in _from_parts
drv, root, parts = self._parse_args(args)
File "/usr/lib64/python3.7/pathlib.py", line 658, in _parse_args
a = os.fspath(a)
TypeError: expected str, bytes or os.PathLike object, not NoneType
```
**To Reproduce**
Run `/bundle-workflow/src/test.sh bwc-test --s3-bucket <artifact_s3_bucket> --opensearch-version 1.1.0 --build-id 163 --architecture x64 --test-run-id 6`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bundle-workflow/src/manifests/bundle_manifest.py`
Content:
```
1 # SPDX-License-Identifier: Apache-2.0
2 #
3 # The OpenSearch Contributors require contributions made to
4 # this file be licensed under the Apache-2.0 license or a
5 # compatible open source license.
6
7 import os
8
9 from aws.s3_bucket import S3Bucket
10 from manifests.manifest import Manifest
11
12
13 class BundleManifest(Manifest):
14 """
15 A BundleManifest is an immutable view of the outputs from a assemble step
16 The manifest contains information about the bundle that was built (in the `assemble` section),
17 and the components that made up the bundle in the `components` section.
18
19 The format for schema version 1.0 is:
20 schema-version: "1.0"
21 build:
22 name: string
23 version: string
24 architecture: x64 or arm64
25 location: /relative/path/to/tarball
26 components:
27 - name: string
28 repository: URL of git repository
29 ref: git ref that was built (sha, branch, or tag)
30 commit_id: The actual git commit ID that was built (i.e. the resolved "ref")
31 location: /relative/path/to/artifact
32 """
33
34 SCHEMA = {
35 "build": {
36 "required": True,
37 "type": "dict",
38 "schema": {
39 "architecture": {"required": True, "type": "string"},
40 "id": {"required": True, "type": "string"},
41 "location": {"required": True, "type": "string"},
42 "name": {"required": True, "type": "string"},
43 "version": {"required": True, "type": "string"},
44 },
45 },
46 "schema-version": {"required": True, "type": "string", "allowed": ["1.0"]},
47 "components": {
48 "required": True,
49 "type": "list",
50 "schema": {
51 "type": "dict",
52 "schema": {
53 "commit_id": {"required": True, "type": "string"},
54 "location": {"required": True, "type": "string"},
55 "name": {"required": True, "type": "string"},
56 "ref": {"required": True, "type": "string"},
57 "repository": {"required": True, "type": "string"},
58 },
59 },
60 },
61 }
62
63 def __init__(self, data):
64 super().__init__(data)
65 self.build = self.Build(data["build"])
66 self.components = list(
67 map(lambda entry: self.Component(entry), data["components"])
68 )
69
70 def __to_dict__(self):
71 return {
72 "schema-version": "1.0",
73 "build": self.build.__to_dict__(),
74 "components": list(
75 map(lambda component: component.__to_dict__(), self.components)
76 ),
77 }
78
79 @staticmethod
80 def from_s3(bucket_name, build_id, opensearch_version, architecture, work_dir=None):
81 work_dir = work_dir if not None else str(os.getcwd())
82 manifest_s3_path = BundleManifest.get_bundle_manifest_relative_location(
83 build_id, opensearch_version, architecture
84 )
85 S3Bucket(bucket_name).download_file(manifest_s3_path, work_dir)
86 with open("manifest.yml", "r") as file:
87 bundle_manifest = BundleManifest.from_file(file)
88 os.remove(os.path.realpath(os.path.join(work_dir, "manifest.yml")))
89 return bundle_manifest
90
91 @staticmethod
92 def get_tarball_relative_location(build_id, opensearch_version, architecture):
93 return f"bundles/{opensearch_version}/{build_id}/{architecture}/opensearch-{opensearch_version}-linux-{architecture}.tar.gz"
94
95 @staticmethod
96 def get_tarball_name(opensearch_version, architecture):
97 return f"opensearch-{opensearch_version}-linux-{architecture}.tar.gz"
98
99 @staticmethod
100 def get_bundle_manifest_relative_location(
101 build_id, opensearch_version, architecture
102 ):
103 return f"bundles/{opensearch_version}/{build_id}/{architecture}/manifest.yml"
104
105 class Build:
106 def __init__(self, data):
107 self.name = data["name"]
108 self.version = data["version"]
109 self.architecture = data["architecture"]
110 self.location = data["location"]
111 self.id = data["id"]
112
113 def __to_dict__(self):
114 return {
115 "name": self.name,
116 "version": self.version,
117 "architecture": self.architecture,
118 "location": self.location,
119 "id": self.id,
120 }
121
122 class Component:
123 def __init__(self, data):
124 self.name = data["name"]
125 self.repository = data["repository"]
126 self.ref = data["ref"]
127 self.commit_id = data["commit_id"]
128 self.location = data["location"]
129
130 def __to_dict__(self):
131 return {
132 "name": self.name,
133 "repository": self.repository,
134 "ref": self.ref,
135 "commit_id": self.commit_id,
136 "location": self.location,
137 }
138
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bundle-workflow/src/manifests/bundle_manifest.py b/bundle-workflow/src/manifests/bundle_manifest.py
--- a/bundle-workflow/src/manifests/bundle_manifest.py
+++ b/bundle-workflow/src/manifests/bundle_manifest.py
@@ -83,8 +83,7 @@
build_id, opensearch_version, architecture
)
S3Bucket(bucket_name).download_file(manifest_s3_path, work_dir)
- with open("manifest.yml", "r") as file:
- bundle_manifest = BundleManifest.from_file(file)
+ bundle_manifest = BundleManifest.from_path(os.path.join(work_dir, 'manifest.yml'))
os.remove(os.path.realpath(os.path.join(work_dir, "manifest.yml")))
return bundle_manifest
| {"golden_diff": "diff --git a/bundle-workflow/src/manifests/bundle_manifest.py b/bundle-workflow/src/manifests/bundle_manifest.py\n--- a/bundle-workflow/src/manifests/bundle_manifest.py\n+++ b/bundle-workflow/src/manifests/bundle_manifest.py\n@@ -83,8 +83,7 @@\n build_id, opensearch_version, architecture\n )\n S3Bucket(bucket_name).download_file(manifest_s3_path, work_dir)\n- with open(\"manifest.yml\", \"r\") as file:\n- bundle_manifest = BundleManifest.from_file(file)\n+ bundle_manifest = BundleManifest.from_path(os.path.join(work_dir, 'manifest.yml'))\n os.remove(os.path.realpath(os.path.join(work_dir, \"manifest.yml\")))\n return bundle_manifest\n", "issue": "[BUG] BWC test suite does not run on Jenkins\n**Describe the bug**\r\nBWC test job fails to run Jenkins due to misconfigured directory to kick off tests. \r\n\r\n```\r\nRunning ./bundle-workflow/src/run_bwc_test.py --s3-bucket <artifact_s3_bucket> --opensearch-version 1.1.0 --build-id 163 --architecture x64 --test-run-id 6 ...\r\n2021-09-17 21:50:08 INFO Switching to temporary work_dir: /tmp/tmp70l5b7_e\r\nTraceback (most recent call last):\r\n File \"/var/jenkins/workspace/bwc-test/bundle-workflow/src/run_bwc_test.py\", line 31, in <module>\r\n sys.exit(main())\r\n File \"/var/jenkins/workspace/bwc-test/bundle-workflow/src/run_bwc_test.py\", line 26, in main\r\n args.s3_bucket, args.build_id, args.opensearch_version, args.architecture, cur_dir)\r\n File \"/var/jenkins/workspace/bwc-test/bundle-workflow/src/manifests/bundle_manifest.py\", line 85, in from_s3\r\n S3Bucket(bucket_name).download_file(manifest_s3_path, work_dir)\r\n File \"/var/jenkins/workspace/bwc-test/bundle-workflow/src/aws/s3_bucket.py\", line 99, in download_file\r\n local_dir = Path(dest)\r\n File \"/usr/lib64/python3.7/pathlib.py\", line 1027, in __new__\r\n self = cls._from_parts(args, init=False)\r\n File \"/usr/lib64/python3.7/pathlib.py\", line 674, in _from_parts\r\n drv, root, parts = self._parse_args(args)\r\n File \"/usr/lib64/python3.7/pathlib.py\", line 658, in _parse_args\r\n a = os.fspath(a)\r\nTypeError: expected str, bytes or os.PathLike object, not NoneType\r\n```\r\n\r\n**To Reproduce**\r\nRun `/bundle-workflow/src/test.sh bwc-test --s3-bucket <artifact_s3_bucket> --opensearch-version 1.1.0 --build-id 163 --architecture x64 --test-run-id 6`\r\n\n", "before_files": [{"content": "# SPDX-License-Identifier: Apache-2.0\n#\n# The OpenSearch Contributors require contributions made to\n# this file be licensed under the Apache-2.0 license or a\n# compatible open source license.\n\nimport os\n\nfrom aws.s3_bucket import S3Bucket\nfrom manifests.manifest import Manifest\n\n\nclass BundleManifest(Manifest):\n \"\"\"\n A BundleManifest is an immutable view of the outputs from a assemble step\n The manifest contains information about the bundle that was built (in the `assemble` section),\n and the components that made up the bundle in the `components` section.\n\n The format for schema version 1.0 is:\n schema-version: \"1.0\"\n build:\n name: string\n version: string\n architecture: x64 or arm64\n location: /relative/path/to/tarball\n components:\n - name: string\n repository: URL of git repository\n ref: git ref that was built (sha, branch, or tag)\n commit_id: The actual git commit ID that was built (i.e. the resolved \"ref\")\n location: /relative/path/to/artifact\n \"\"\"\n\n SCHEMA = {\n \"build\": {\n \"required\": True,\n \"type\": \"dict\",\n \"schema\": {\n \"architecture\": {\"required\": True, \"type\": \"string\"},\n \"id\": {\"required\": True, \"type\": \"string\"},\n \"location\": {\"required\": True, \"type\": \"string\"},\n \"name\": {\"required\": True, \"type\": \"string\"},\n \"version\": {\"required\": True, \"type\": \"string\"},\n },\n },\n \"schema-version\": {\"required\": True, \"type\": \"string\", \"allowed\": [\"1.0\"]},\n \"components\": {\n \"required\": True,\n \"type\": \"list\",\n \"schema\": {\n \"type\": \"dict\",\n \"schema\": {\n \"commit_id\": {\"required\": True, \"type\": \"string\"},\n \"location\": {\"required\": True, \"type\": \"string\"},\n \"name\": {\"required\": True, \"type\": \"string\"},\n \"ref\": {\"required\": True, \"type\": \"string\"},\n \"repository\": {\"required\": True, \"type\": \"string\"},\n },\n },\n },\n }\n\n def __init__(self, data):\n super().__init__(data)\n self.build = self.Build(data[\"build\"])\n self.components = list(\n map(lambda entry: self.Component(entry), data[\"components\"])\n )\n\n def __to_dict__(self):\n return {\n \"schema-version\": \"1.0\",\n \"build\": self.build.__to_dict__(),\n \"components\": list(\n map(lambda component: component.__to_dict__(), self.components)\n ),\n }\n\n @staticmethod\n def from_s3(bucket_name, build_id, opensearch_version, architecture, work_dir=None):\n work_dir = work_dir if not None else str(os.getcwd())\n manifest_s3_path = BundleManifest.get_bundle_manifest_relative_location(\n build_id, opensearch_version, architecture\n )\n S3Bucket(bucket_name).download_file(manifest_s3_path, work_dir)\n with open(\"manifest.yml\", \"r\") as file:\n bundle_manifest = BundleManifest.from_file(file)\n os.remove(os.path.realpath(os.path.join(work_dir, \"manifest.yml\")))\n return bundle_manifest\n\n @staticmethod\n def get_tarball_relative_location(build_id, opensearch_version, architecture):\n return f\"bundles/{opensearch_version}/{build_id}/{architecture}/opensearch-{opensearch_version}-linux-{architecture}.tar.gz\"\n\n @staticmethod\n def get_tarball_name(opensearch_version, architecture):\n return f\"opensearch-{opensearch_version}-linux-{architecture}.tar.gz\"\n\n @staticmethod\n def get_bundle_manifest_relative_location(\n build_id, opensearch_version, architecture\n ):\n return f\"bundles/{opensearch_version}/{build_id}/{architecture}/manifest.yml\"\n\n class Build:\n def __init__(self, data):\n self.name = data[\"name\"]\n self.version = data[\"version\"]\n self.architecture = data[\"architecture\"]\n self.location = data[\"location\"]\n self.id = data[\"id\"]\n\n def __to_dict__(self):\n return {\n \"name\": self.name,\n \"version\": self.version,\n \"architecture\": self.architecture,\n \"location\": self.location,\n \"id\": self.id,\n }\n\n class Component:\n def __init__(self, data):\n self.name = data[\"name\"]\n self.repository = data[\"repository\"]\n self.ref = data[\"ref\"]\n self.commit_id = data[\"commit_id\"]\n self.location = data[\"location\"]\n\n def __to_dict__(self):\n return {\n \"name\": self.name,\n \"repository\": self.repository,\n \"ref\": self.ref,\n \"commit_id\": self.commit_id,\n \"location\": self.location,\n }\n", "path": "bundle-workflow/src/manifests/bundle_manifest.py"}], "after_files": [{"content": "# SPDX-License-Identifier: Apache-2.0\n#\n# The OpenSearch Contributors require contributions made to\n# this file be licensed under the Apache-2.0 license or a\n# compatible open source license.\n\nimport os\n\nfrom aws.s3_bucket import S3Bucket\nfrom manifests.manifest import Manifest\n\n\nclass BundleManifest(Manifest):\n \"\"\"\n A BundleManifest is an immutable view of the outputs from a assemble step\n The manifest contains information about the bundle that was built (in the `assemble` section),\n and the components that made up the bundle in the `components` section.\n\n The format for schema version 1.0 is:\n schema-version: \"1.0\"\n build:\n name: string\n version: string\n architecture: x64 or arm64\n location: /relative/path/to/tarball\n components:\n - name: string\n repository: URL of git repository\n ref: git ref that was built (sha, branch, or tag)\n commit_id: The actual git commit ID that was built (i.e. the resolved \"ref\")\n location: /relative/path/to/artifact\n \"\"\"\n\n SCHEMA = {\n \"build\": {\n \"required\": True,\n \"type\": \"dict\",\n \"schema\": {\n \"architecture\": {\"required\": True, \"type\": \"string\"},\n \"id\": {\"required\": True, \"type\": \"string\"},\n \"location\": {\"required\": True, \"type\": \"string\"},\n \"name\": {\"required\": True, \"type\": \"string\"},\n \"version\": {\"required\": True, \"type\": \"string\"},\n },\n },\n \"schema-version\": {\"required\": True, \"type\": \"string\", \"allowed\": [\"1.0\"]},\n \"components\": {\n \"required\": True,\n \"type\": \"list\",\n \"schema\": {\n \"type\": \"dict\",\n \"schema\": {\n \"commit_id\": {\"required\": True, \"type\": \"string\"},\n \"location\": {\"required\": True, \"type\": \"string\"},\n \"name\": {\"required\": True, \"type\": \"string\"},\n \"ref\": {\"required\": True, \"type\": \"string\"},\n \"repository\": {\"required\": True, \"type\": \"string\"},\n },\n },\n },\n }\n\n def __init__(self, data):\n super().__init__(data)\n self.build = self.Build(data[\"build\"])\n self.components = list(\n map(lambda entry: self.Component(entry), data[\"components\"])\n )\n\n def __to_dict__(self):\n return {\n \"schema-version\": \"1.0\",\n \"build\": self.build.__to_dict__(),\n \"components\": list(\n map(lambda component: component.__to_dict__(), self.components)\n ),\n }\n\n @staticmethod\n def from_s3(bucket_name, build_id, opensearch_version, architecture, work_dir=None):\n work_dir = work_dir if not None else str(os.getcwd())\n manifest_s3_path = BundleManifest.get_bundle_manifest_relative_location(\n build_id, opensearch_version, architecture\n )\n S3Bucket(bucket_name).download_file(manifest_s3_path, work_dir)\n bundle_manifest = BundleManifest.from_path(os.path.join(work_dir, 'manifest.yml'))\n os.remove(os.path.realpath(os.path.join(work_dir, \"manifest.yml\")))\n return bundle_manifest\n\n @staticmethod\n def get_tarball_relative_location(build_id, opensearch_version, architecture):\n return f\"bundles/{opensearch_version}/{build_id}/{architecture}/opensearch-{opensearch_version}-linux-{architecture}.tar.gz\"\n\n @staticmethod\n def get_tarball_name(opensearch_version, architecture):\n return f\"opensearch-{opensearch_version}-linux-{architecture}.tar.gz\"\n\n @staticmethod\n def get_bundle_manifest_relative_location(\n build_id, opensearch_version, architecture\n ):\n return f\"bundles/{opensearch_version}/{build_id}/{architecture}/manifest.yml\"\n\n class Build:\n def __init__(self, data):\n self.name = data[\"name\"]\n self.version = data[\"version\"]\n self.architecture = data[\"architecture\"]\n self.location = data[\"location\"]\n self.id = data[\"id\"]\n\n def __to_dict__(self):\n return {\n \"name\": self.name,\n \"version\": self.version,\n \"architecture\": self.architecture,\n \"location\": self.location,\n \"id\": self.id,\n }\n\n class Component:\n def __init__(self, data):\n self.name = data[\"name\"]\n self.repository = data[\"repository\"]\n self.ref = data[\"ref\"]\n self.commit_id = data[\"commit_id\"]\n self.location = data[\"location\"]\n\n def __to_dict__(self):\n return {\n \"name\": self.name,\n \"repository\": self.repository,\n \"ref\": self.ref,\n \"commit_id\": self.commit_id,\n \"location\": self.location,\n }\n", "path": "bundle-workflow/src/manifests/bundle_manifest.py"}]} | 2,158 | 168 |
gh_patches_debug_43843 | rasdani/github-patches | git_diff | StackStorm__st2-3710 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
st2 cli commands in "usage" must be in alphabetical order
```
usage: st2 [-h] [--version] [--url BASE_URL] [--auth-url AUTH_URL]
[--api-url API_URL] [--stream-url STREAM_URL]
[--api-version API_VERSION] [--cacert CACERT]
[--config-file CONFIG_FILE] [--print-config] [--skip-config]
[--debug]
{action,action-alias,auth,apikey,execution,key,login,pack,policy,policy-type,rule,run,runner,sensor,trace,trigger,trigger-instance,webhook,whoami,timer,rule-enforcement,role,role-assignment}
```
Backstory: someone didn't see `timer` just because they're out of the order.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `st2client/st2client/shell.py`
Content:
```
1 #!/usr/bin/env python
2
3 # Licensed to the StackStorm, Inc ('StackStorm') under one or more
4 # contributor license agreements. See the NOTICE file distributed with
5 # this work for additional information regarding copyright ownership.
6 # The ASF licenses this file to You under the Apache License, Version 2.0
7 # (the "License"); you may not use this file except in compliance with
8 # the License. You may obtain a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS,
14 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 # See the License for the specific language governing permissions and
16 # limitations under the License.
17
18 """
19 Command-line interface to StackStorm.
20 """
21
22 from __future__ import print_function
23 from __future__ import absolute_import
24
25 import sys
26 import argcomplete
27 import argparse
28 import logging
29
30 import six
31
32 from st2client import __version__
33 from st2client import models
34 from st2client.base import BaseCLIApp
35 from st2client.commands import auth
36 from st2client.commands import action
37 from st2client.commands import action_alias
38 from st2client.commands import keyvalue
39 from st2client.commands import pack
40 from st2client.commands import policy
41 from st2client.commands import resource
42 from st2client.commands import sensor
43 from st2client.commands import trace
44 from st2client.commands import trigger
45 from st2client.commands import triggerinstance
46 from st2client.commands import timer
47 from st2client.commands import webhook
48 from st2client.commands import rule
49 from st2client.commands import rule_enforcement
50 from st2client.commands import rbac
51 from st2client.config import set_config
52 from st2client.exceptions.operations import OperationFailureException
53 from st2client.utils.logging import LogLevelFilter, set_log_level_for_all_loggers
54 from st2client.commands.auth import TokenCreateCommand
55 from st2client.commands.auth import LoginCommand
56 from st2client.commands.auth import WhoamiCommand
57
58
59 __all__ = [
60 'Shell'
61 ]
62
63 LOGGER = logging.getLogger(__name__)
64
65 CLI_DESCRIPTION = 'CLI for StackStorm event-driven automation platform. https://stackstorm.com'
66
67
68 class Shell(BaseCLIApp):
69 LOG = LOGGER
70
71 SKIP_AUTH_CLASSES = [
72 TokenCreateCommand.__name__,
73 LoginCommand.__name__,
74 WhoamiCommand.__name__
75 ]
76
77 def __init__(self):
78 # Set up of endpoints is delayed until program is run.
79 self.client = None
80
81 # Set up the main parser.
82 self.parser = argparse.ArgumentParser(description=CLI_DESCRIPTION)
83
84 # Set up general program options.
85 self.parser.add_argument(
86 '--version',
87 action='version',
88 version='%(prog)s {version}'.format(version=__version__))
89
90 self.parser.add_argument(
91 '--url',
92 action='store',
93 dest='base_url',
94 default=None,
95 help='Base URL for the API servers. Assumes all servers use the '
96 'same base URL and default ports are used. Get ST2_BASE_URL '
97 'from the environment variables by default.'
98 )
99
100 self.parser.add_argument(
101 '--auth-url',
102 action='store',
103 dest='auth_url',
104 default=None,
105 help='URL for the authentication service. Get ST2_AUTH_URL '
106 'from the environment variables by default.'
107 )
108
109 self.parser.add_argument(
110 '--api-url',
111 action='store',
112 dest='api_url',
113 default=None,
114 help='URL for the API server. Get ST2_API_URL '
115 'from the environment variables by default.'
116 )
117
118 self.parser.add_argument(
119 '--stream-url',
120 action='store',
121 dest='stream_url',
122 default=None,
123 help='URL for the stream endpoint. Get ST2_STREAM_URL'
124 'from the environment variables by default.'
125 )
126
127 self.parser.add_argument(
128 '--api-version',
129 action='store',
130 dest='api_version',
131 default=None,
132 help='API version to use. Get ST2_API_VERSION '
133 'from the environment variables by default.'
134 )
135
136 self.parser.add_argument(
137 '--cacert',
138 action='store',
139 dest='cacert',
140 default=None,
141 help='Path to the CA cert bundle for the SSL endpoints. '
142 'Get ST2_CACERT from the environment variables by default. '
143 'If this is not provided, then SSL cert will not be verified.'
144 )
145
146 self.parser.add_argument(
147 '--config-file',
148 action='store',
149 dest='config_file',
150 default=None,
151 help='Path to the CLI config file'
152 )
153
154 self.parser.add_argument(
155 '--print-config',
156 action='store_true',
157 dest='print_config',
158 default=False,
159 help='Parse the config file and print the values'
160 )
161
162 self.parser.add_argument(
163 '--skip-config',
164 action='store_true',
165 dest='skip_config',
166 default=False,
167 help='Don\'t parse and use the CLI config file'
168 )
169
170 self.parser.add_argument(
171 '--debug',
172 action='store_true',
173 dest='debug',
174 default=False,
175 help='Enable debug mode'
176 )
177
178 # Set up list of commands and subcommands.
179 self.subparsers = self.parser.add_subparsers()
180 self.commands = dict()
181
182 self.commands['action'] = action.ActionBranch(
183 'An activity that happens as a response to the external event.',
184 self, self.subparsers)
185
186 self.commands['action-alias'] = action_alias.ActionAliasBranch(
187 'Action aliases.',
188 self, self.subparsers)
189
190 self.commands['auth'] = auth.TokenCreateCommand(
191 models.Token, self, self.subparsers, name='auth')
192
193 self.commands['api-key'] = auth.ApiKeyBranch(
194 'API Keys.',
195 self, self.subparsers)
196
197 self.commands['execution'] = action.ActionExecutionBranch(
198 'An invocation of an action.',
199 self, self.subparsers)
200
201 self.commands['key'] = keyvalue.KeyValuePairBranch(
202 'Key value pair is used to store commonly used configuration '
203 'for reuse in sensors, actions, and rules.',
204 self, self.subparsers)
205
206 self.commands['login'] = auth.LoginCommand(
207 models.Token, self, self.subparsers, name='login')
208
209 self.commands['pack'] = pack.PackBranch(
210 'A group of related integration resources: '
211 'actions, rules, and sensors.',
212 self, self.subparsers)
213
214 self.commands['policy'] = policy.PolicyBranch(
215 'Policy that is enforced on a resource.',
216 self, self.subparsers)
217
218 self.commands['policy-type'] = policy.PolicyTypeBranch(
219 'Type of policy that can be applied to resources.',
220 self, self.subparsers)
221
222 self.commands['rule'] = rule.RuleBranch(
223 'A specification to invoke an "action" on a "trigger" selectively '
224 'based on some criteria.',
225 self, self.subparsers)
226
227 self.commands['run'] = action.ActionRunCommand(
228 models.Action, self, self.subparsers, name='run', add_help=False)
229
230 self.commands['runner'] = resource.ResourceBranch(
231 models.RunnerType,
232 'Runner is a type of handler for a specific class of actions.',
233 self, self.subparsers, read_only=True, has_disable=True)
234
235 self.commands['sensor'] = sensor.SensorBranch(
236 'An adapter which allows you to integrate StackStorm with external system.',
237 self, self.subparsers)
238
239 self.commands['trace'] = trace.TraceBranch(
240 'A group of executions, rules and triggerinstances that are related.',
241 self, self.subparsers)
242
243 self.commands['trigger'] = trigger.TriggerTypeBranch(
244 'An external event that is mapped to a st2 input. It is the '
245 'st2 invocation point.',
246 self, self.subparsers)
247
248 self.commands['trigger-instance'] = triggerinstance.TriggerInstanceBranch(
249 'Actual instances of triggers received by st2.',
250 self, self.subparsers)
251
252 self.commands['webhook'] = webhook.WebhookBranch(
253 'Webhooks.',
254 self, self.subparsers)
255
256 self.commands['whoami'] = auth.WhoamiCommand(
257 models.Token, self, self.subparsers, name='whoami')
258
259 self.commands['timer'] = timer.TimerBranch(
260 'Timers.',
261 self, self.subparsers)
262
263 self.commands['rule-enforcement'] = rule_enforcement.RuleEnforcementBranch(
264 'Models that represent enforcement of rules.',
265 self, self.subparsers)
266
267 # RBAC
268 self.commands['role'] = rbac.RoleBranch(
269 'RBAC roles.',
270 self, self.subparsers)
271 self.commands['role-assignment'] = rbac.RoleAssignmentBranch(
272 'RBAC role assignments.',
273 self, self.subparsers)
274
275 def run(self, argv):
276 debug = False
277
278 # Provide autocomplete for shell
279 argcomplete.autocomplete(self.parser)
280
281 if '--print-config' in argv:
282 # Hack because --print-config requires no command to be specified
283 argv = argv + ['action', 'list']
284
285 # Parse command line arguments.
286 args = self.parser.parse_args(args=argv)
287
288 print_config = args.print_config
289 if print_config:
290 self._print_config(args=args)
291 return 3
292
293 # Parse config and store it in the config module
294 config = self._parse_config_file(args=args)
295 set_config(config=config)
296
297 # Setup client and run the command
298 try:
299 debug = getattr(args, 'debug', False)
300 if debug:
301 set_log_level_for_all_loggers(level=logging.DEBUG)
302
303 # Set up client.
304 self.client = self.get_client(args=args, debug=debug)
305
306 # Execute command.
307 args.func(args)
308
309 return 0
310 except OperationFailureException as e:
311 if debug:
312 self._print_debug_info(args=args)
313 return 2
314 except Exception as e:
315 # We allow exception to define custom exit codes
316 exit_code = getattr(e, 'exit_code', 1)
317
318 print('ERROR: %s\n' % e)
319 if debug:
320 self._print_debug_info(args=args)
321
322 return exit_code
323
324 def _print_config(self, args):
325 config = self._parse_config_file(args=args)
326
327 for section, options in six.iteritems(config):
328 print('[%s]' % (section))
329
330 for name, value in six.iteritems(options):
331 print('%s = %s' % (name, value))
332
333
334 def setup_logging(argv):
335 debug = '--debug' in argv
336
337 root = LOGGER
338 root.setLevel(logging.WARNING)
339
340 handler = logging.StreamHandler(sys.stderr)
341 handler.setLevel(logging.WARNING)
342 formatter = logging.Formatter('%(asctime)s %(levelname)s - %(message)s')
343 handler.setFormatter(formatter)
344
345 if not debug:
346 handler.addFilter(LogLevelFilter(log_levels=[logging.ERROR]))
347
348 root.addHandler(handler)
349
350
351 def main(argv=sys.argv[1:]):
352 setup_logging(argv)
353 return Shell().run(argv)
354
355
356 if __name__ == '__main__':
357 sys.exit(main(sys.argv[1:]))
358
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/st2client/st2client/shell.py b/st2client/st2client/shell.py
--- a/st2client/st2client/shell.py
+++ b/st2client/st2client/shell.py
@@ -63,6 +63,15 @@
LOGGER = logging.getLogger(__name__)
CLI_DESCRIPTION = 'CLI for StackStorm event-driven automation platform. https://stackstorm.com'
+USAGE_STRING = """
+Usage: %(prog)s [options] <command> <sub command> [options]
+
+For example:
+
+ %(prog)s action list --pack=st2
+ %(prog)s run core.local cmd=date
+ %(prog)s --debug run core.local cmd=date
+""".strip()
class Shell(BaseCLIApp):
@@ -177,7 +186,10 @@
# Set up list of commands and subcommands.
self.subparsers = self.parser.add_subparsers()
- self.commands = dict()
+ self.commands = {}
+
+ self.commands['run'] = action.ActionRunCommand(
+ models.Action, self, self.subparsers, name='run', add_help=False)
self.commands['action'] = action.ActionBranch(
'An activity that happens as a response to the external event.',
@@ -190,6 +202,12 @@
self.commands['auth'] = auth.TokenCreateCommand(
models.Token, self, self.subparsers, name='auth')
+ self.commands['login'] = auth.LoginCommand(
+ models.Token, self, self.subparsers, name='login')
+
+ self.commands['whoami'] = auth.WhoamiCommand(
+ models.Token, self, self.subparsers, name='whoami')
+
self.commands['api-key'] = auth.ApiKeyBranch(
'API Keys.',
self, self.subparsers)
@@ -203,9 +221,6 @@
'for reuse in sensors, actions, and rules.',
self, self.subparsers)
- self.commands['login'] = auth.LoginCommand(
- models.Token, self, self.subparsers, name='login')
-
self.commands['pack'] = pack.PackBranch(
'A group of related integration resources: '
'actions, rules, and sensors.',
@@ -224,8 +239,13 @@
'based on some criteria.',
self, self.subparsers)
- self.commands['run'] = action.ActionRunCommand(
- models.Action, self, self.subparsers, name='run', add_help=False)
+ self.commands['webhook'] = webhook.WebhookBranch(
+ 'Webhooks.',
+ self, self.subparsers)
+
+ self.commands['timer'] = timer.TimerBranch(
+ 'Timers.',
+ self, self.subparsers)
self.commands['runner'] = resource.ResourceBranch(
models.RunnerType,
@@ -249,17 +269,6 @@
'Actual instances of triggers received by st2.',
self, self.subparsers)
- self.commands['webhook'] = webhook.WebhookBranch(
- 'Webhooks.',
- self, self.subparsers)
-
- self.commands['whoami'] = auth.WhoamiCommand(
- models.Token, self, self.subparsers, name='whoami')
-
- self.commands['timer'] = timer.TimerBranch(
- 'Timers.',
- self, self.subparsers)
-
self.commands['rule-enforcement'] = rule_enforcement.RuleEnforcementBranch(
'Models that represent enforcement of rules.',
self, self.subparsers)
@@ -275,6 +284,17 @@
def run(self, argv):
debug = False
+ parser = self.parser
+
+ if len(argv) == 0:
+ # Print a more user-friendly help string if no arguments are provided
+ # Note: We only set usage variable for the main parser. If we passed "usage" argument
+ # to the main ArgumentParser class above, this would also set a custom usage string for
+ # sub-parsers which we don't want.
+ parser.usage = USAGE_STRING
+ sys.stderr.write(parser.format_help())
+ return 2
+
# Provide autocomplete for shell
argcomplete.autocomplete(self.parser)
| {"golden_diff": "diff --git a/st2client/st2client/shell.py b/st2client/st2client/shell.py\n--- a/st2client/st2client/shell.py\n+++ b/st2client/st2client/shell.py\n@@ -63,6 +63,15 @@\n LOGGER = logging.getLogger(__name__)\n \n CLI_DESCRIPTION = 'CLI for StackStorm event-driven automation platform. https://stackstorm.com'\n+USAGE_STRING = \"\"\"\n+Usage: %(prog)s [options] <command> <sub command> [options]\n+\n+For example:\n+\n+ %(prog)s action list --pack=st2\n+ %(prog)s run core.local cmd=date\n+ %(prog)s --debug run core.local cmd=date\n+\"\"\".strip()\n \n \n class Shell(BaseCLIApp):\n@@ -177,7 +186,10 @@\n \n # Set up list of commands and subcommands.\n self.subparsers = self.parser.add_subparsers()\n- self.commands = dict()\n+ self.commands = {}\n+\n+ self.commands['run'] = action.ActionRunCommand(\n+ models.Action, self, self.subparsers, name='run', add_help=False)\n \n self.commands['action'] = action.ActionBranch(\n 'An activity that happens as a response to the external event.',\n@@ -190,6 +202,12 @@\n self.commands['auth'] = auth.TokenCreateCommand(\n models.Token, self, self.subparsers, name='auth')\n \n+ self.commands['login'] = auth.LoginCommand(\n+ models.Token, self, self.subparsers, name='login')\n+\n+ self.commands['whoami'] = auth.WhoamiCommand(\n+ models.Token, self, self.subparsers, name='whoami')\n+\n self.commands['api-key'] = auth.ApiKeyBranch(\n 'API Keys.',\n self, self.subparsers)\n@@ -203,9 +221,6 @@\n 'for reuse in sensors, actions, and rules.',\n self, self.subparsers)\n \n- self.commands['login'] = auth.LoginCommand(\n- models.Token, self, self.subparsers, name='login')\n-\n self.commands['pack'] = pack.PackBranch(\n 'A group of related integration resources: '\n 'actions, rules, and sensors.',\n@@ -224,8 +239,13 @@\n 'based on some criteria.',\n self, self.subparsers)\n \n- self.commands['run'] = action.ActionRunCommand(\n- models.Action, self, self.subparsers, name='run', add_help=False)\n+ self.commands['webhook'] = webhook.WebhookBranch(\n+ 'Webhooks.',\n+ self, self.subparsers)\n+\n+ self.commands['timer'] = timer.TimerBranch(\n+ 'Timers.',\n+ self, self.subparsers)\n \n self.commands['runner'] = resource.ResourceBranch(\n models.RunnerType,\n@@ -249,17 +269,6 @@\n 'Actual instances of triggers received by st2.',\n self, self.subparsers)\n \n- self.commands['webhook'] = webhook.WebhookBranch(\n- 'Webhooks.',\n- self, self.subparsers)\n-\n- self.commands['whoami'] = auth.WhoamiCommand(\n- models.Token, self, self.subparsers, name='whoami')\n-\n- self.commands['timer'] = timer.TimerBranch(\n- 'Timers.',\n- self, self.subparsers)\n-\n self.commands['rule-enforcement'] = rule_enforcement.RuleEnforcementBranch(\n 'Models that represent enforcement of rules.',\n self, self.subparsers)\n@@ -275,6 +284,17 @@\n def run(self, argv):\n debug = False\n \n+ parser = self.parser\n+\n+ if len(argv) == 0:\n+ # Print a more user-friendly help string if no arguments are provided\n+ # Note: We only set usage variable for the main parser. If we passed \"usage\" argument\n+ # to the main ArgumentParser class above, this would also set a custom usage string for\n+ # sub-parsers which we don't want.\n+ parser.usage = USAGE_STRING\n+ sys.stderr.write(parser.format_help())\n+ return 2\n+\n # Provide autocomplete for shell\n argcomplete.autocomplete(self.parser)\n", "issue": "st2 cli commands in \"usage\" must be in alphabetical order\n\r\n```\r\nusage: st2 [-h] [--version] [--url BASE_URL] [--auth-url AUTH_URL]\r\n [--api-url API_URL] [--stream-url STREAM_URL]\r\n [--api-version API_VERSION] [--cacert CACERT]\r\n [--config-file CONFIG_FILE] [--print-config] [--skip-config]\r\n [--debug]\r\n {action,action-alias,auth,apikey,execution,key,login,pack,policy,policy-type,rule,run,runner,sensor,trace,trigger,trigger-instance,webhook,whoami,timer,rule-enforcement,role,role-assignment}\r\n```\r\n\r\nBackstory: someone didn't see `timer` just because they're out of the order. \n", "before_files": [{"content": "#!/usr/bin/env python\n\n# Licensed to the StackStorm, Inc ('StackStorm') under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nCommand-line interface to StackStorm.\n\"\"\"\n\nfrom __future__ import print_function\nfrom __future__ import absolute_import\n\nimport sys\nimport argcomplete\nimport argparse\nimport logging\n\nimport six\n\nfrom st2client import __version__\nfrom st2client import models\nfrom st2client.base import BaseCLIApp\nfrom st2client.commands import auth\nfrom st2client.commands import action\nfrom st2client.commands import action_alias\nfrom st2client.commands import keyvalue\nfrom st2client.commands import pack\nfrom st2client.commands import policy\nfrom st2client.commands import resource\nfrom st2client.commands import sensor\nfrom st2client.commands import trace\nfrom st2client.commands import trigger\nfrom st2client.commands import triggerinstance\nfrom st2client.commands import timer\nfrom st2client.commands import webhook\nfrom st2client.commands import rule\nfrom st2client.commands import rule_enforcement\nfrom st2client.commands import rbac\nfrom st2client.config import set_config\nfrom st2client.exceptions.operations import OperationFailureException\nfrom st2client.utils.logging import LogLevelFilter, set_log_level_for_all_loggers\nfrom st2client.commands.auth import TokenCreateCommand\nfrom st2client.commands.auth import LoginCommand\nfrom st2client.commands.auth import WhoamiCommand\n\n\n__all__ = [\n 'Shell'\n]\n\nLOGGER = logging.getLogger(__name__)\n\nCLI_DESCRIPTION = 'CLI for StackStorm event-driven automation platform. https://stackstorm.com'\n\n\nclass Shell(BaseCLIApp):\n LOG = LOGGER\n\n SKIP_AUTH_CLASSES = [\n TokenCreateCommand.__name__,\n LoginCommand.__name__,\n WhoamiCommand.__name__\n ]\n\n def __init__(self):\n # Set up of endpoints is delayed until program is run.\n self.client = None\n\n # Set up the main parser.\n self.parser = argparse.ArgumentParser(description=CLI_DESCRIPTION)\n\n # Set up general program options.\n self.parser.add_argument(\n '--version',\n action='version',\n version='%(prog)s {version}'.format(version=__version__))\n\n self.parser.add_argument(\n '--url',\n action='store',\n dest='base_url',\n default=None,\n help='Base URL for the API servers. Assumes all servers use the '\n 'same base URL and default ports are used. Get ST2_BASE_URL '\n 'from the environment variables by default.'\n )\n\n self.parser.add_argument(\n '--auth-url',\n action='store',\n dest='auth_url',\n default=None,\n help='URL for the authentication service. Get ST2_AUTH_URL '\n 'from the environment variables by default.'\n )\n\n self.parser.add_argument(\n '--api-url',\n action='store',\n dest='api_url',\n default=None,\n help='URL for the API server. Get ST2_API_URL '\n 'from the environment variables by default.'\n )\n\n self.parser.add_argument(\n '--stream-url',\n action='store',\n dest='stream_url',\n default=None,\n help='URL for the stream endpoint. Get ST2_STREAM_URL'\n 'from the environment variables by default.'\n )\n\n self.parser.add_argument(\n '--api-version',\n action='store',\n dest='api_version',\n default=None,\n help='API version to use. Get ST2_API_VERSION '\n 'from the environment variables by default.'\n )\n\n self.parser.add_argument(\n '--cacert',\n action='store',\n dest='cacert',\n default=None,\n help='Path to the CA cert bundle for the SSL endpoints. '\n 'Get ST2_CACERT from the environment variables by default. '\n 'If this is not provided, then SSL cert will not be verified.'\n )\n\n self.parser.add_argument(\n '--config-file',\n action='store',\n dest='config_file',\n default=None,\n help='Path to the CLI config file'\n )\n\n self.parser.add_argument(\n '--print-config',\n action='store_true',\n dest='print_config',\n default=False,\n help='Parse the config file and print the values'\n )\n\n self.parser.add_argument(\n '--skip-config',\n action='store_true',\n dest='skip_config',\n default=False,\n help='Don\\'t parse and use the CLI config file'\n )\n\n self.parser.add_argument(\n '--debug',\n action='store_true',\n dest='debug',\n default=False,\n help='Enable debug mode'\n )\n\n # Set up list of commands and subcommands.\n self.subparsers = self.parser.add_subparsers()\n self.commands = dict()\n\n self.commands['action'] = action.ActionBranch(\n 'An activity that happens as a response to the external event.',\n self, self.subparsers)\n\n self.commands['action-alias'] = action_alias.ActionAliasBranch(\n 'Action aliases.',\n self, self.subparsers)\n\n self.commands['auth'] = auth.TokenCreateCommand(\n models.Token, self, self.subparsers, name='auth')\n\n self.commands['api-key'] = auth.ApiKeyBranch(\n 'API Keys.',\n self, self.subparsers)\n\n self.commands['execution'] = action.ActionExecutionBranch(\n 'An invocation of an action.',\n self, self.subparsers)\n\n self.commands['key'] = keyvalue.KeyValuePairBranch(\n 'Key value pair is used to store commonly used configuration '\n 'for reuse in sensors, actions, and rules.',\n self, self.subparsers)\n\n self.commands['login'] = auth.LoginCommand(\n models.Token, self, self.subparsers, name='login')\n\n self.commands['pack'] = pack.PackBranch(\n 'A group of related integration resources: '\n 'actions, rules, and sensors.',\n self, self.subparsers)\n\n self.commands['policy'] = policy.PolicyBranch(\n 'Policy that is enforced on a resource.',\n self, self.subparsers)\n\n self.commands['policy-type'] = policy.PolicyTypeBranch(\n 'Type of policy that can be applied to resources.',\n self, self.subparsers)\n\n self.commands['rule'] = rule.RuleBranch(\n 'A specification to invoke an \"action\" on a \"trigger\" selectively '\n 'based on some criteria.',\n self, self.subparsers)\n\n self.commands['run'] = action.ActionRunCommand(\n models.Action, self, self.subparsers, name='run', add_help=False)\n\n self.commands['runner'] = resource.ResourceBranch(\n models.RunnerType,\n 'Runner is a type of handler for a specific class of actions.',\n self, self.subparsers, read_only=True, has_disable=True)\n\n self.commands['sensor'] = sensor.SensorBranch(\n 'An adapter which allows you to integrate StackStorm with external system.',\n self, self.subparsers)\n\n self.commands['trace'] = trace.TraceBranch(\n 'A group of executions, rules and triggerinstances that are related.',\n self, self.subparsers)\n\n self.commands['trigger'] = trigger.TriggerTypeBranch(\n 'An external event that is mapped to a st2 input. It is the '\n 'st2 invocation point.',\n self, self.subparsers)\n\n self.commands['trigger-instance'] = triggerinstance.TriggerInstanceBranch(\n 'Actual instances of triggers received by st2.',\n self, self.subparsers)\n\n self.commands['webhook'] = webhook.WebhookBranch(\n 'Webhooks.',\n self, self.subparsers)\n\n self.commands['whoami'] = auth.WhoamiCommand(\n models.Token, self, self.subparsers, name='whoami')\n\n self.commands['timer'] = timer.TimerBranch(\n 'Timers.',\n self, self.subparsers)\n\n self.commands['rule-enforcement'] = rule_enforcement.RuleEnforcementBranch(\n 'Models that represent enforcement of rules.',\n self, self.subparsers)\n\n # RBAC\n self.commands['role'] = rbac.RoleBranch(\n 'RBAC roles.',\n self, self.subparsers)\n self.commands['role-assignment'] = rbac.RoleAssignmentBranch(\n 'RBAC role assignments.',\n self, self.subparsers)\n\n def run(self, argv):\n debug = False\n\n # Provide autocomplete for shell\n argcomplete.autocomplete(self.parser)\n\n if '--print-config' in argv:\n # Hack because --print-config requires no command to be specified\n argv = argv + ['action', 'list']\n\n # Parse command line arguments.\n args = self.parser.parse_args(args=argv)\n\n print_config = args.print_config\n if print_config:\n self._print_config(args=args)\n return 3\n\n # Parse config and store it in the config module\n config = self._parse_config_file(args=args)\n set_config(config=config)\n\n # Setup client and run the command\n try:\n debug = getattr(args, 'debug', False)\n if debug:\n set_log_level_for_all_loggers(level=logging.DEBUG)\n\n # Set up client.\n self.client = self.get_client(args=args, debug=debug)\n\n # Execute command.\n args.func(args)\n\n return 0\n except OperationFailureException as e:\n if debug:\n self._print_debug_info(args=args)\n return 2\n except Exception as e:\n # We allow exception to define custom exit codes\n exit_code = getattr(e, 'exit_code', 1)\n\n print('ERROR: %s\\n' % e)\n if debug:\n self._print_debug_info(args=args)\n\n return exit_code\n\n def _print_config(self, args):\n config = self._parse_config_file(args=args)\n\n for section, options in six.iteritems(config):\n print('[%s]' % (section))\n\n for name, value in six.iteritems(options):\n print('%s = %s' % (name, value))\n\n\ndef setup_logging(argv):\n debug = '--debug' in argv\n\n root = LOGGER\n root.setLevel(logging.WARNING)\n\n handler = logging.StreamHandler(sys.stderr)\n handler.setLevel(logging.WARNING)\n formatter = logging.Formatter('%(asctime)s %(levelname)s - %(message)s')\n handler.setFormatter(formatter)\n\n if not debug:\n handler.addFilter(LogLevelFilter(log_levels=[logging.ERROR]))\n\n root.addHandler(handler)\n\n\ndef main(argv=sys.argv[1:]):\n setup_logging(argv)\n return Shell().run(argv)\n\n\nif __name__ == '__main__':\n sys.exit(main(sys.argv[1:]))\n", "path": "st2client/st2client/shell.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\n# Licensed to the StackStorm, Inc ('StackStorm') under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nCommand-line interface to StackStorm.\n\"\"\"\n\nfrom __future__ import print_function\nfrom __future__ import absolute_import\n\nimport sys\nimport argcomplete\nimport argparse\nimport logging\n\nimport six\n\nfrom st2client import __version__\nfrom st2client import models\nfrom st2client.base import BaseCLIApp\nfrom st2client.commands import auth\nfrom st2client.commands import action\nfrom st2client.commands import action_alias\nfrom st2client.commands import keyvalue\nfrom st2client.commands import pack\nfrom st2client.commands import policy\nfrom st2client.commands import resource\nfrom st2client.commands import sensor\nfrom st2client.commands import trace\nfrom st2client.commands import trigger\nfrom st2client.commands import triggerinstance\nfrom st2client.commands import timer\nfrom st2client.commands import webhook\nfrom st2client.commands import rule\nfrom st2client.commands import rule_enforcement\nfrom st2client.commands import rbac\nfrom st2client.config import set_config\nfrom st2client.exceptions.operations import OperationFailureException\nfrom st2client.utils.logging import LogLevelFilter, set_log_level_for_all_loggers\nfrom st2client.commands.auth import TokenCreateCommand\nfrom st2client.commands.auth import LoginCommand\nfrom st2client.commands.auth import WhoamiCommand\n\n\n__all__ = [\n 'Shell'\n]\n\nLOGGER = logging.getLogger(__name__)\n\nCLI_DESCRIPTION = 'CLI for StackStorm event-driven automation platform. https://stackstorm.com'\nUSAGE_STRING = \"\"\"\nUsage: %(prog)s [options] <command> <sub command> [options]\n\nFor example:\n\n %(prog)s action list --pack=st2\n %(prog)s run core.local cmd=date\n %(prog)s --debug run core.local cmd=date\n\"\"\".strip()\n\n\nclass Shell(BaseCLIApp):\n LOG = LOGGER\n\n SKIP_AUTH_CLASSES = [\n TokenCreateCommand.__name__,\n LoginCommand.__name__,\n WhoamiCommand.__name__\n ]\n\n def __init__(self):\n # Set up of endpoints is delayed until program is run.\n self.client = None\n\n # Set up the main parser.\n self.parser = argparse.ArgumentParser(description=CLI_DESCRIPTION)\n\n # Set up general program options.\n self.parser.add_argument(\n '--version',\n action='version',\n version='%(prog)s {version}'.format(version=__version__))\n\n self.parser.add_argument(\n '--url',\n action='store',\n dest='base_url',\n default=None,\n help='Base URL for the API servers. Assumes all servers use the '\n 'same base URL and default ports are used. Get ST2_BASE_URL '\n 'from the environment variables by default.'\n )\n\n self.parser.add_argument(\n '--auth-url',\n action='store',\n dest='auth_url',\n default=None,\n help='URL for the authentication service. Get ST2_AUTH_URL '\n 'from the environment variables by default.'\n )\n\n self.parser.add_argument(\n '--api-url',\n action='store',\n dest='api_url',\n default=None,\n help='URL for the API server. Get ST2_API_URL '\n 'from the environment variables by default.'\n )\n\n self.parser.add_argument(\n '--stream-url',\n action='store',\n dest='stream_url',\n default=None,\n help='URL for the stream endpoint. Get ST2_STREAM_URL'\n 'from the environment variables by default.'\n )\n\n self.parser.add_argument(\n '--api-version',\n action='store',\n dest='api_version',\n default=None,\n help='API version to use. Get ST2_API_VERSION '\n 'from the environment variables by default.'\n )\n\n self.parser.add_argument(\n '--cacert',\n action='store',\n dest='cacert',\n default=None,\n help='Path to the CA cert bundle for the SSL endpoints. '\n 'Get ST2_CACERT from the environment variables by default. '\n 'If this is not provided, then SSL cert will not be verified.'\n )\n\n self.parser.add_argument(\n '--config-file',\n action='store',\n dest='config_file',\n default=None,\n help='Path to the CLI config file'\n )\n\n self.parser.add_argument(\n '--print-config',\n action='store_true',\n dest='print_config',\n default=False,\n help='Parse the config file and print the values'\n )\n\n self.parser.add_argument(\n '--skip-config',\n action='store_true',\n dest='skip_config',\n default=False,\n help='Don\\'t parse and use the CLI config file'\n )\n\n self.parser.add_argument(\n '--debug',\n action='store_true',\n dest='debug',\n default=False,\n help='Enable debug mode'\n )\n\n # Set up list of commands and subcommands.\n self.subparsers = self.parser.add_subparsers()\n self.commands = {}\n\n self.commands['run'] = action.ActionRunCommand(\n models.Action, self, self.subparsers, name='run', add_help=False)\n\n self.commands['action'] = action.ActionBranch(\n 'An activity that happens as a response to the external event.',\n self, self.subparsers)\n\n self.commands['action-alias'] = action_alias.ActionAliasBranch(\n 'Action aliases.',\n self, self.subparsers)\n\n self.commands['auth'] = auth.TokenCreateCommand(\n models.Token, self, self.subparsers, name='auth')\n\n self.commands['login'] = auth.LoginCommand(\n models.Token, self, self.subparsers, name='login')\n\n self.commands['whoami'] = auth.WhoamiCommand(\n models.Token, self, self.subparsers, name='whoami')\n\n self.commands['api-key'] = auth.ApiKeyBranch(\n 'API Keys.',\n self, self.subparsers)\n\n self.commands['execution'] = action.ActionExecutionBranch(\n 'An invocation of an action.',\n self, self.subparsers)\n\n self.commands['key'] = keyvalue.KeyValuePairBranch(\n 'Key value pair is used to store commonly used configuration '\n 'for reuse in sensors, actions, and rules.',\n self, self.subparsers)\n\n self.commands['pack'] = pack.PackBranch(\n 'A group of related integration resources: '\n 'actions, rules, and sensors.',\n self, self.subparsers)\n\n self.commands['policy'] = policy.PolicyBranch(\n 'Policy that is enforced on a resource.',\n self, self.subparsers)\n\n self.commands['policy-type'] = policy.PolicyTypeBranch(\n 'Type of policy that can be applied to resources.',\n self, self.subparsers)\n\n self.commands['rule'] = rule.RuleBranch(\n 'A specification to invoke an \"action\" on a \"trigger\" selectively '\n 'based on some criteria.',\n self, self.subparsers)\n\n self.commands['webhook'] = webhook.WebhookBranch(\n 'Webhooks.',\n self, self.subparsers)\n\n self.commands['timer'] = timer.TimerBranch(\n 'Timers.',\n self, self.subparsers)\n\n self.commands['runner'] = resource.ResourceBranch(\n models.RunnerType,\n 'Runner is a type of handler for a specific class of actions.',\n self, self.subparsers, read_only=True, has_disable=True)\n\n self.commands['sensor'] = sensor.SensorBranch(\n 'An adapter which allows you to integrate StackStorm with external system.',\n self, self.subparsers)\n\n self.commands['trace'] = trace.TraceBranch(\n 'A group of executions, rules and triggerinstances that are related.',\n self, self.subparsers)\n\n self.commands['trigger'] = trigger.TriggerTypeBranch(\n 'An external event that is mapped to a st2 input. It is the '\n 'st2 invocation point.',\n self, self.subparsers)\n\n self.commands['trigger-instance'] = triggerinstance.TriggerInstanceBranch(\n 'Actual instances of triggers received by st2.',\n self, self.subparsers)\n\n self.commands['rule-enforcement'] = rule_enforcement.RuleEnforcementBranch(\n 'Models that represent enforcement of rules.',\n self, self.subparsers)\n\n # RBAC\n self.commands['role'] = rbac.RoleBranch(\n 'RBAC roles.',\n self, self.subparsers)\n self.commands['role-assignment'] = rbac.RoleAssignmentBranch(\n 'RBAC role assignments.',\n self, self.subparsers)\n\n def run(self, argv):\n debug = False\n\n parser = self.parser\n\n if len(argv) == 0:\n # Print a more user-friendly help string if no arguments are provided\n # Note: We only set usage variable for the main parser. If we passed \"usage\" argument\n # to the main ArgumentParser class above, this would also set a custom usage string for\n # sub-parsers which we don't want.\n parser.usage = USAGE_STRING\n sys.stderr.write(parser.format_help())\n return 2\n\n # Provide autocomplete for shell\n argcomplete.autocomplete(self.parser)\n\n if '--print-config' in argv:\n # Hack because --print-config requires no command to be specified\n argv = argv + ['action', 'list']\n\n # Parse command line arguments.\n args = self.parser.parse_args(args=argv)\n\n print_config = args.print_config\n if print_config:\n self._print_config(args=args)\n return 3\n\n # Parse config and store it in the config module\n config = self._parse_config_file(args=args)\n set_config(config=config)\n\n # Setup client and run the command\n try:\n debug = getattr(args, 'debug', False)\n if debug:\n set_log_level_for_all_loggers(level=logging.DEBUG)\n\n # Set up client.\n self.client = self.get_client(args=args, debug=debug)\n\n # Execute command.\n args.func(args)\n\n return 0\n except OperationFailureException as e:\n if debug:\n self._print_debug_info(args=args)\n return 2\n except Exception as e:\n # We allow exception to define custom exit codes\n exit_code = getattr(e, 'exit_code', 1)\n\n print('ERROR: %s\\n' % e)\n if debug:\n self._print_debug_info(args=args)\n\n return exit_code\n\n def _print_config(self, args):\n config = self._parse_config_file(args=args)\n\n for section, options in six.iteritems(config):\n print('[%s]' % (section))\n\n for name, value in six.iteritems(options):\n print('%s = %s' % (name, value))\n\n\ndef setup_logging(argv):\n debug = '--debug' in argv\n\n root = LOGGER\n root.setLevel(logging.WARNING)\n\n handler = logging.StreamHandler(sys.stderr)\n handler.setLevel(logging.WARNING)\n formatter = logging.Formatter('%(asctime)s %(levelname)s - %(message)s')\n handler.setFormatter(formatter)\n\n if not debug:\n handler.addFilter(LogLevelFilter(log_levels=[logging.ERROR]))\n\n root.addHandler(handler)\n\n\ndef main(argv=sys.argv[1:]):\n setup_logging(argv)\n return Shell().run(argv)\n\n\nif __name__ == '__main__':\n sys.exit(main(sys.argv[1:]))\n", "path": "st2client/st2client/shell.py"}]} | 3,827 | 942 |
gh_patches_debug_23260 | rasdani/github-patches | git_diff | beeware__toga-1637 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
False positive "Multiple candidiate toga backends"
**Describe the bug**
When using a development version of *toga*, a `RuntimeError` of multiple candidate toga backends is raised with the same backend. Stacktrace example:
```
Traceback (most recent call last):
File "C:\Users\sagi\AppData\Local\Programs\Python\Python310\lib\runpy.py", line 196, in _run_module_as_main
return _run_code(code, main_globals, None,
File "C:\Users\sagi\AppData\Local\Programs\Python\Python310\lib\runpy.py", line 86, in _run_code
exec(code, run_globals)
File "C:\Users\sagi\PycharmProjects\toga\examples\canvas\canvas\__main__.py", line 4, in <module>
main().main_loop()
File "C:\Users\sagi\PycharmProjects\toga\examples\canvas\canvas\app.py", line 565, in main
return ExampleCanvasApp('Canvas', 'org.beeware.widgets.canvas')
File "C:\Users\sagi\PycharmProjects\toga\src\core\src\toga\app.py", line 308, in __init__
self.factory = get_platform_factory()
File "C:\Users\sagi\PycharmProjects\toga\src\core\src\toga\platform.py", line 106, in get_platform_factory
raise RuntimeError(
RuntimeError: Multiple candidiate toga backends found: ('toga_winforms' (windows), 'toga_winforms' (windows)). Uninstall the backends you don't require, or use TOGA_BACKEND to specify a backend.
```
**To Reproduce**
Steps to reproduce the behavior:
1. Open a new virtual environment
2. Go to src\core
3. Run `pip install -e .`
4. Go to src\winforms
5. Run `pip install -e .`
6. Go to example\canvas (or any other example application)
7. Run `python -m canvas`
**Expected behavior**
`RuntimeError` should not be raised in that case.
**Environment:**
- Operating System: Windows
- Python version: 3.10.6
- Software versions:
- Briefcase: latest
- Toga: latest
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/core/src/toga/platform.py`
Content:
```
1 import importlib
2 import os
3 import sys
4 import warnings
5 from functools import lru_cache
6
7 try:
8 # Usually, the pattern is "import module; if it doesn't exist,
9 # import the shim". However, we need the 3.10 API for entry_points,
10 # as the 3.8 didn't support the `groups` argument to entry_points.
11 # Therefore, we try to import the compatibility shim first; and fall
12 # back to the stdlib module if the shim isn't there.
13 from importlib_metadata import entry_points
14 except ImportError:
15 from importlib.metadata import entry_points
16
17
18 # Map python sys.platform with toga platforms names
19 _TOGA_PLATFORMS = {
20 'android': 'android',
21 'darwin': 'macOS',
22 'ios': 'iOS',
23 'linux': 'linux',
24 'tvos': 'tvOS',
25 'watchos': 'watchOS',
26 'wearos': 'wearOS',
27 'emscripten': 'web',
28 'win32': 'windows',
29 }
30
31
32 try:
33 current_platform = os.environ['TOGA_PLATFORM']
34 except KeyError:
35 # Rely on `sys.getandroidapilevel`, which only exists on Android; see
36 # https://github.com/beeware/Python-Android-support/issues/8
37 if hasattr(sys, 'getandroidapilevel'):
38 current_platform = 'android'
39 else:
40 current_platform = _TOGA_PLATFORMS.get(sys.platform)
41
42
43 @lru_cache(maxsize=1)
44 def get_platform_factory(factory=None):
45 """ This function figures out what the current host platform is and
46 imports the adequate factory. The factory is the interface to all platform
47 specific implementations.
48
49 If the TOGA_BACKEND environment variable is set, the factory will be loaded
50 from that module.
51
52 Returns: The suitable factory for the current host platform.
53
54 Raises:
55 RuntimeError: If no supported host platform can be identified.
56 """
57
58 ######################################################################
59 # 2022-09: Backwards compatibility
60 ######################################################################
61 # factory no longer used
62 if factory:
63 warnings.warn("The factory argument is no longer used.", DeprecationWarning)
64 ######################################################################
65 # End backwards compatibility.
66 ######################################################################
67
68 toga_backends = entry_points(group='toga.backends')
69 if not toga_backends:
70 raise RuntimeError("No toga backend could be loaded.")
71
72 backend_value = os.environ.get('TOGA_BACKEND')
73 if backend_value:
74 try:
75 factory = importlib.import_module(f'{backend_value}.factory')
76 except ModuleNotFoundError:
77 toga_backends_values = ', '.join([f'{backend.value!r}' for backend in toga_backends])
78 raise RuntimeError(
79 f"The backend specified by TOGA_BACKEND ({backend_value!r}) "
80 f"could not be loaded. It should be one of: {toga_backends_values}."
81 )
82 else:
83 if len(toga_backends) == 1:
84 backend = list(toga_backends)[0]
85 else:
86 # multiple backends are installed: choose the one that matches the host platform
87 matching_backends = [
88 backend
89 for backend in toga_backends
90 if backend.name == current_platform
91 ]
92 if len(matching_backends) == 0:
93 toga_backends_string = ', '.join([
94 f'{backend.value!r} ({backend.name})'
95 for backend in toga_backends
96 ])
97 raise RuntimeError(
98 f"Multiple Toga backends are installed ({toga_backends_string}), "
99 f"but none of them match your current platform ({current_platform!r})."
100 )
101 if len(matching_backends) > 1:
102 toga_backends_string = ', '.join([
103 f'{backend.value!r} ({backend.name})'
104 for backend in matching_backends
105 ])
106 raise RuntimeError(
107 f"Multiple candidiate toga backends found: ({toga_backends_string}). "
108 "Uninstall the backends you don't require, or use "
109 "TOGA_BACKEND to specify a backend."
110 )
111 backend = matching_backends[0]
112 factory = importlib.import_module(f'{backend.value}.factory')
113 return factory
114
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/core/src/toga/platform.py b/src/core/src/toga/platform.py
--- a/src/core/src/toga/platform.py
+++ b/src/core/src/toga/platform.py
@@ -67,7 +67,7 @@
toga_backends = entry_points(group='toga.backends')
if not toga_backends:
- raise RuntimeError("No toga backend could be loaded.")
+ raise RuntimeError("No Toga backend could be loaded.")
backend_value = os.environ.get('TOGA_BACKEND')
if backend_value:
@@ -80,8 +80,14 @@
f"could not be loaded. It should be one of: {toga_backends_values}."
)
else:
+ # As of Setuptools 65.5, entry points are returned duplicated if the
+ # package is installed editable. Use a set to ensure that each entry point
+ # is only returned once.
+ # See https://github.com/pypa/setuptools/issues/3649
+ toga_backends = sorted(set(toga_backends))
+
if len(toga_backends) == 1:
- backend = list(toga_backends)[0]
+ backend = toga_backends[0]
else:
# multiple backends are installed: choose the one that matches the host platform
matching_backends = [
| {"golden_diff": "diff --git a/src/core/src/toga/platform.py b/src/core/src/toga/platform.py\n--- a/src/core/src/toga/platform.py\n+++ b/src/core/src/toga/platform.py\n@@ -67,7 +67,7 @@\n \n toga_backends = entry_points(group='toga.backends')\n if not toga_backends:\n- raise RuntimeError(\"No toga backend could be loaded.\")\n+ raise RuntimeError(\"No Toga backend could be loaded.\")\n \n backend_value = os.environ.get('TOGA_BACKEND')\n if backend_value:\n@@ -80,8 +80,14 @@\n f\"could not be loaded. It should be one of: {toga_backends_values}.\"\n )\n else:\n+ # As of Setuptools 65.5, entry points are returned duplicated if the\n+ # package is installed editable. Use a set to ensure that each entry point\n+ # is only returned once.\n+ # See https://github.com/pypa/setuptools/issues/3649\n+ toga_backends = sorted(set(toga_backends))\n+\n if len(toga_backends) == 1:\n- backend = list(toga_backends)[0]\n+ backend = toga_backends[0]\n else:\n # multiple backends are installed: choose the one that matches the host platform\n matching_backends = [\n", "issue": "False positive \"Multiple candidiate toga backends\"\n**Describe the bug**\r\nWhen using a development version of *toga*, a `RuntimeError` of multiple candidate toga backends is raised with the same backend. Stacktrace example:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\sagi\\AppData\\Local\\Programs\\Python\\Python310\\lib\\runpy.py\", line 196, in _run_module_as_main\r\n return _run_code(code, main_globals, None,\r\n File \"C:\\Users\\sagi\\AppData\\Local\\Programs\\Python\\Python310\\lib\\runpy.py\", line 86, in _run_code\r\n exec(code, run_globals)\r\n File \"C:\\Users\\sagi\\PycharmProjects\\toga\\examples\\canvas\\canvas\\__main__.py\", line 4, in <module>\r\n main().main_loop()\r\n File \"C:\\Users\\sagi\\PycharmProjects\\toga\\examples\\canvas\\canvas\\app.py\", line 565, in main\r\n return ExampleCanvasApp('Canvas', 'org.beeware.widgets.canvas')\r\n File \"C:\\Users\\sagi\\PycharmProjects\\toga\\src\\core\\src\\toga\\app.py\", line 308, in __init__\r\n self.factory = get_platform_factory()\r\n File \"C:\\Users\\sagi\\PycharmProjects\\toga\\src\\core\\src\\toga\\platform.py\", line 106, in get_platform_factory\r\n raise RuntimeError(\r\nRuntimeError: Multiple candidiate toga backends found: ('toga_winforms' (windows), 'toga_winforms' (windows)). Uninstall the backends you don't require, or use TOGA_BACKEND to specify a backend.\r\n```\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Open a new virtual environment\r\n2. Go to src\\core\r\n3. Run `pip install -e .`\r\n4. Go to src\\winforms\r\n5. Run `pip install -e .`\r\n6. Go to example\\canvas (or any other example application)\r\n7. Run `python -m canvas`\r\n\r\n**Expected behavior**\r\n`RuntimeError` should not be raised in that case.\r\n\r\n**Environment:**\r\n - Operating System: Windows\r\n - Python version: 3.10.6\r\n - Software versions:\r\n - Briefcase: latest\r\n - Toga: latest\r\n\n", "before_files": [{"content": "import importlib\nimport os\nimport sys\nimport warnings\nfrom functools import lru_cache\n\ntry:\n # Usually, the pattern is \"import module; if it doesn't exist,\n # import the shim\". However, we need the 3.10 API for entry_points,\n # as the 3.8 didn't support the `groups` argument to entry_points.\n # Therefore, we try to import the compatibility shim first; and fall\n # back to the stdlib module if the shim isn't there.\n from importlib_metadata import entry_points\nexcept ImportError:\n from importlib.metadata import entry_points\n\n\n# Map python sys.platform with toga platforms names\n_TOGA_PLATFORMS = {\n 'android': 'android',\n 'darwin': 'macOS',\n 'ios': 'iOS',\n 'linux': 'linux',\n 'tvos': 'tvOS',\n 'watchos': 'watchOS',\n 'wearos': 'wearOS',\n 'emscripten': 'web',\n 'win32': 'windows',\n}\n\n\ntry:\n current_platform = os.environ['TOGA_PLATFORM']\nexcept KeyError:\n # Rely on `sys.getandroidapilevel`, which only exists on Android; see\n # https://github.com/beeware/Python-Android-support/issues/8\n if hasattr(sys, 'getandroidapilevel'):\n current_platform = 'android'\n else:\n current_platform = _TOGA_PLATFORMS.get(sys.platform)\n\n\n@lru_cache(maxsize=1)\ndef get_platform_factory(factory=None):\n \"\"\" This function figures out what the current host platform is and\n imports the adequate factory. The factory is the interface to all platform\n specific implementations.\n\n If the TOGA_BACKEND environment variable is set, the factory will be loaded\n from that module.\n\n Returns: The suitable factory for the current host platform.\n\n Raises:\n RuntimeError: If no supported host platform can be identified.\n \"\"\"\n\n ######################################################################\n # 2022-09: Backwards compatibility\n ######################################################################\n # factory no longer used\n if factory:\n warnings.warn(\"The factory argument is no longer used.\", DeprecationWarning)\n ######################################################################\n # End backwards compatibility.\n ######################################################################\n\n toga_backends = entry_points(group='toga.backends')\n if not toga_backends:\n raise RuntimeError(\"No toga backend could be loaded.\")\n\n backend_value = os.environ.get('TOGA_BACKEND')\n if backend_value:\n try:\n factory = importlib.import_module(f'{backend_value}.factory')\n except ModuleNotFoundError:\n toga_backends_values = ', '.join([f'{backend.value!r}' for backend in toga_backends])\n raise RuntimeError(\n f\"The backend specified by TOGA_BACKEND ({backend_value!r}) \"\n f\"could not be loaded. It should be one of: {toga_backends_values}.\"\n )\n else:\n if len(toga_backends) == 1:\n backend = list(toga_backends)[0]\n else:\n # multiple backends are installed: choose the one that matches the host platform\n matching_backends = [\n backend\n for backend in toga_backends\n if backend.name == current_platform\n ]\n if len(matching_backends) == 0:\n toga_backends_string = ', '.join([\n f'{backend.value!r} ({backend.name})'\n for backend in toga_backends\n ])\n raise RuntimeError(\n f\"Multiple Toga backends are installed ({toga_backends_string}), \"\n f\"but none of them match your current platform ({current_platform!r}).\"\n )\n if len(matching_backends) > 1:\n toga_backends_string = ', '.join([\n f'{backend.value!r} ({backend.name})'\n for backend in matching_backends\n ])\n raise RuntimeError(\n f\"Multiple candidiate toga backends found: ({toga_backends_string}). \"\n \"Uninstall the backends you don't require, or use \"\n \"TOGA_BACKEND to specify a backend.\"\n )\n backend = matching_backends[0]\n factory = importlib.import_module(f'{backend.value}.factory')\n return factory\n", "path": "src/core/src/toga/platform.py"}], "after_files": [{"content": "import importlib\nimport os\nimport sys\nimport warnings\nfrom functools import lru_cache\n\ntry:\n # Usually, the pattern is \"import module; if it doesn't exist,\n # import the shim\". However, we need the 3.10 API for entry_points,\n # as the 3.8 didn't support the `groups` argument to entry_points.\n # Therefore, we try to import the compatibility shim first; and fall\n # back to the stdlib module if the shim isn't there.\n from importlib_metadata import entry_points\nexcept ImportError:\n from importlib.metadata import entry_points\n\n\n# Map python sys.platform with toga platforms names\n_TOGA_PLATFORMS = {\n 'android': 'android',\n 'darwin': 'macOS',\n 'ios': 'iOS',\n 'linux': 'linux',\n 'tvos': 'tvOS',\n 'watchos': 'watchOS',\n 'wearos': 'wearOS',\n 'emscripten': 'web',\n 'win32': 'windows',\n}\n\n\ntry:\n current_platform = os.environ['TOGA_PLATFORM']\nexcept KeyError:\n # Rely on `sys.getandroidapilevel`, which only exists on Android; see\n # https://github.com/beeware/Python-Android-support/issues/8\n if hasattr(sys, 'getandroidapilevel'):\n current_platform = 'android'\n else:\n current_platform = _TOGA_PLATFORMS.get(sys.platform)\n\n\n@lru_cache(maxsize=1)\ndef get_platform_factory(factory=None):\n \"\"\" This function figures out what the current host platform is and\n imports the adequate factory. The factory is the interface to all platform\n specific implementations.\n\n If the TOGA_BACKEND environment variable is set, the factory will be loaded\n from that module.\n\n Returns: The suitable factory for the current host platform.\n\n Raises:\n RuntimeError: If no supported host platform can be identified.\n \"\"\"\n\n ######################################################################\n # 2022-09: Backwards compatibility\n ######################################################################\n # factory no longer used\n if factory:\n warnings.warn(\"The factory argument is no longer used.\", DeprecationWarning)\n ######################################################################\n # End backwards compatibility.\n ######################################################################\n\n toga_backends = entry_points(group='toga.backends')\n if not toga_backends:\n raise RuntimeError(\"No Toga backend could be loaded.\")\n\n backend_value = os.environ.get('TOGA_BACKEND')\n if backend_value:\n try:\n factory = importlib.import_module(f'{backend_value}.factory')\n except ModuleNotFoundError:\n toga_backends_values = ', '.join([f'{backend.value!r}' for backend in toga_backends])\n raise RuntimeError(\n f\"The backend specified by TOGA_BACKEND ({backend_value!r}) \"\n f\"could not be loaded. It should be one of: {toga_backends_values}.\"\n )\n else:\n # As of Setuptools 65.5, entry points are returned duplicated if the\n # package is installed editable. Use a set to ensure that each entry point\n # is only returned once.\n # See https://github.com/pypa/setuptools/issues/3649\n toga_backends = sorted(set(toga_backends))\n\n if len(toga_backends) == 1:\n backend = toga_backends[0]\n else:\n # multiple backends are installed: choose the one that matches the host platform\n matching_backends = [\n backend\n for backend in toga_backends\n if backend.name == current_platform\n ]\n if len(matching_backends) == 0:\n toga_backends_string = ', '.join([\n f'{backend.value!r} ({backend.name})'\n for backend in toga_backends\n ])\n raise RuntimeError(\n f\"Multiple Toga backends are installed ({toga_backends_string}), \"\n f\"but none of them match your current platform ({current_platform!r}).\"\n )\n if len(matching_backends) > 1:\n toga_backends_string = ', '.join([\n f'{backend.value!r} ({backend.name})'\n for backend in matching_backends\n ])\n raise RuntimeError(\n f\"Multiple candidiate toga backends found: ({toga_backends_string}). \"\n \"Uninstall the backends you don't require, or use \"\n \"TOGA_BACKEND to specify a backend.\"\n )\n backend = matching_backends[0]\n factory = importlib.import_module(f'{backend.value}.factory')\n return factory\n", "path": "src/core/src/toga/platform.py"}]} | 1,929 | 302 |
gh_patches_debug_9053 | rasdani/github-patches | git_diff | encode__httpx-234 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bug in the 0.7.0 packaging
I've tried to upgrade to 0.7.0 and it exploded with a
```
$ poetry update
Updating dependencies
Resolving dependencies... (1.0s)
Package operations: 0 installs, 1 update, 0 removals
- Updating httpx (0.6.8 -> 0.7.0)
[EnvCommandError]
Command ['/Users/pablo/Library/Caches/pypoetry/virtualenvs/drop-eventsng-1aNj3rOl-py3.7/bin/python', '-m', 'pip', 'install', '--no-deps', '-U', 'httpx==0.7.0'] errored with the following return code 1, and output:
Collecting httpx==0.7.0
Using cached https://files.pythonhosted.org/packages/12/b3/fdd6e528a3385e2149ad42cc4e9b54e326d532e3e79a86e7cfdaea45723e/httpx-0.7.0.tar.gz
ERROR: Command errored out with exit status 1:
command: /Users/pablo/Library/Caches/pypoetry/virtualenvs/drop-eventsng-1aNj3rOl-py3.7/bin/python -c 'import sys, setuptools, tokenize; sys.argv[0] = '"'"'/private/var/folders/x4/txc7_0pn6zlfb30cs_0sh5mm0000gn/T/pip-install-jq6aut9d/httpx/setup.py'"'"'; __file__='"'"'/private/var/folders/x4/txc7_0pn6zlfb30cs_0sh5mm0000gn/T/pip-install-jq6aut9d/httpx/setup.py'"'"';f=getattr(tokenize, '"'"'open'"'"', open)(__file__);code=f.read().replace('"'"'\r\n'"'"', '"'"'\n'"'"');f.close();exec(compile(code, __file__, '"'"'exec'"'"'))' egg_info --egg-base pip-egg-info
cwd: /private/var/folders/x4/txc7_0pn6zlfb30cs_0sh5mm0000gn/T/pip-install-jq6aut9d/httpx/
Complete output (7 lines):
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "/private/var/folders/x4/txc7_0pn6zlfb30cs_0sh5mm0000gn/T/pip-install-jq6aut9d/httpx/setup.py", line 45, in <module>
long_description=get_long_description(),
File "/private/var/folders/x4/txc7_0pn6zlfb30cs_0sh5mm0000gn/T/pip-install-jq6aut9d/httpx/setup.py", line 26, in get_long_description
with open("CHANGELOG.md", encoding="utf8") as f:
FileNotFoundError: [Errno 2] No such file or directory: 'CHANGELOG.md'
----------------------------------------
ERROR: Command errored out with exit status 1: python setup.py egg_info Check the logs for full command output.
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 import re
5 from pathlib import Path
6
7 from setuptools import setup
8
9
10 def get_version(package):
11 """
12 Return package version as listed in `__version__` in `init.py`.
13 """
14 version = Path(package, "__version__.py").read_text()
15 return re.search("__version__ = ['\"]([^'\"]+)['\"]", version).group(1)
16
17
18 def get_long_description():
19 """
20 Return the README.
21 """
22 long_description = ""
23 with open("README.md", encoding="utf8") as f:
24 long_description += f.read()
25 long_description += "\n\n"
26 with open("CHANGELOG.md", encoding="utf8") as f:
27 long_description += f.read()
28 return long_description
29
30
31 def get_packages(package):
32 """
33 Return root package and all sub-packages.
34 """
35 return [str(path.parent) for path in Path(package).glob("**/__init__.py")]
36
37
38 setup(
39 name="httpx",
40 python_requires=">=3.6",
41 version=get_version("httpx"),
42 url="https://github.com/encode/httpx",
43 license="BSD",
44 description="The next generation HTTP client.",
45 long_description=get_long_description(),
46 long_description_content_type="text/markdown",
47 author="Tom Christie",
48 author_email="[email protected]",
49 package_data={"httpx": ["py.typed"]},
50 packages=get_packages("httpx"),
51 install_requires=[
52 "certifi",
53 "chardet==3.*",
54 "h11==0.8.*",
55 "h2==3.*",
56 "hstspreload",
57 "idna==2.*",
58 "rfc3986==1.*",
59 ],
60 classifiers=[
61 "Development Status :: 3 - Alpha",
62 "Environment :: Web Environment",
63 "Intended Audience :: Developers",
64 "License :: OSI Approved :: BSD License",
65 "Operating System :: OS Independent",
66 "Topic :: Internet :: WWW/HTTP",
67 "Programming Language :: Python :: 3",
68 "Programming Language :: Python :: 3.6",
69 "Programming Language :: Python :: 3.7",
70 "Programming Language :: Python :: 3.8",
71 ],
72 )
73
```
Path: `httpx/__version__.py`
Content:
```
1 __title__ = "httpx"
2 __description__ = "A next generation HTTP client, for Python 3."
3 __version__ = "0.7.0"
4
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/httpx/__version__.py b/httpx/__version__.py
--- a/httpx/__version__.py
+++ b/httpx/__version__.py
@@ -1,3 +1,3 @@
__title__ = "httpx"
__description__ = "A next generation HTTP client, for Python 3."
-__version__ = "0.7.0"
+__version__ = "0.7.1"
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -48,6 +48,7 @@
author_email="[email protected]",
package_data={"httpx": ["py.typed"]},
packages=get_packages("httpx"),
+ include_package_data=True,
install_requires=[
"certifi",
"chardet==3.*",
| {"golden_diff": "diff --git a/httpx/__version__.py b/httpx/__version__.py\n--- a/httpx/__version__.py\n+++ b/httpx/__version__.py\n@@ -1,3 +1,3 @@\n __title__ = \"httpx\"\n __description__ = \"A next generation HTTP client, for Python 3.\"\n-__version__ = \"0.7.0\"\n+__version__ = \"0.7.1\"\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -48,6 +48,7 @@\n author_email=\"[email protected]\",\n package_data={\"httpx\": [\"py.typed\"]},\n packages=get_packages(\"httpx\"),\n+ include_package_data=True,\n install_requires=[\n \"certifi\",\n \"chardet==3.*\",\n", "issue": "Bug in the 0.7.0 packaging\nI've tried to upgrade to 0.7.0 and it exploded with a\r\n\r\n```\r\n$ poetry update\r\nUpdating dependencies\r\nResolving dependencies... (1.0s)\r\n\r\n\r\nPackage operations: 0 installs, 1 update, 0 removals\r\n\r\n - Updating httpx (0.6.8 -> 0.7.0)\r\n\r\n[EnvCommandError]\r\nCommand ['/Users/pablo/Library/Caches/pypoetry/virtualenvs/drop-eventsng-1aNj3rOl-py3.7/bin/python', '-m', 'pip', 'install', '--no-deps', '-U', 'httpx==0.7.0'] errored with the following return code 1, and output:\r\nCollecting httpx==0.7.0\r\n Using cached https://files.pythonhosted.org/packages/12/b3/fdd6e528a3385e2149ad42cc4e9b54e326d532e3e79a86e7cfdaea45723e/httpx-0.7.0.tar.gz\r\n ERROR: Command errored out with exit status 1:\r\n command: /Users/pablo/Library/Caches/pypoetry/virtualenvs/drop-eventsng-1aNj3rOl-py3.7/bin/python -c 'import sys, setuptools, tokenize; sys.argv[0] = '\"'\"'/private/var/folders/x4/txc7_0pn6zlfb30cs_0sh5mm0000gn/T/pip-install-jq6aut9d/httpx/setup.py'\"'\"'; __file__='\"'\"'/private/var/folders/x4/txc7_0pn6zlfb30cs_0sh5mm0000gn/T/pip-install-jq6aut9d/httpx/setup.py'\"'\"';f=getattr(tokenize, '\"'\"'open'\"'\"', open)(__file__);code=f.read().replace('\"'\"'\\r\\n'\"'\"', '\"'\"'\\n'\"'\"');f.close();exec(compile(code, __file__, '\"'\"'exec'\"'\"'))' egg_info --egg-base pip-egg-info\r\n cwd: /private/var/folders/x4/txc7_0pn6zlfb30cs_0sh5mm0000gn/T/pip-install-jq6aut9d/httpx/\r\n Complete output (7 lines):\r\n Traceback (most recent call last):\r\n File \"<string>\", line 1, in <module>\r\n File \"/private/var/folders/x4/txc7_0pn6zlfb30cs_0sh5mm0000gn/T/pip-install-jq6aut9d/httpx/setup.py\", line 45, in <module>\r\n long_description=get_long_description(),\r\n File \"/private/var/folders/x4/txc7_0pn6zlfb30cs_0sh5mm0000gn/T/pip-install-jq6aut9d/httpx/setup.py\", line 26, in get_long_description\r\n with open(\"CHANGELOG.md\", encoding=\"utf8\") as f:\r\n FileNotFoundError: [Errno 2] No such file or directory: 'CHANGELOG.md'\r\n ----------------------------------------\r\nERROR: Command errored out with exit status 1: python setup.py egg_info Check the logs for full command output.\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport re\nfrom pathlib import Path\n\nfrom setuptools import setup\n\n\ndef get_version(package):\n \"\"\"\n Return package version as listed in `__version__` in `init.py`.\n \"\"\"\n version = Path(package, \"__version__.py\").read_text()\n return re.search(\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", version).group(1)\n\n\ndef get_long_description():\n \"\"\"\n Return the README.\n \"\"\"\n long_description = \"\"\n with open(\"README.md\", encoding=\"utf8\") as f:\n long_description += f.read()\n long_description += \"\\n\\n\"\n with open(\"CHANGELOG.md\", encoding=\"utf8\") as f:\n long_description += f.read()\n return long_description\n\n\ndef get_packages(package):\n \"\"\"\n Return root package and all sub-packages.\n \"\"\"\n return [str(path.parent) for path in Path(package).glob(\"**/__init__.py\")]\n\n\nsetup(\n name=\"httpx\",\n python_requires=\">=3.6\",\n version=get_version(\"httpx\"),\n url=\"https://github.com/encode/httpx\",\n license=\"BSD\",\n description=\"The next generation HTTP client.\",\n long_description=get_long_description(),\n long_description_content_type=\"text/markdown\",\n author=\"Tom Christie\",\n author_email=\"[email protected]\",\n package_data={\"httpx\": [\"py.typed\"]},\n packages=get_packages(\"httpx\"),\n install_requires=[\n \"certifi\",\n \"chardet==3.*\",\n \"h11==0.8.*\",\n \"h2==3.*\",\n \"hstspreload\",\n \"idna==2.*\",\n \"rfc3986==1.*\",\n ],\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n ],\n)\n", "path": "setup.py"}, {"content": "__title__ = \"httpx\"\n__description__ = \"A next generation HTTP client, for Python 3.\"\n__version__ = \"0.7.0\"\n", "path": "httpx/__version__.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport re\nfrom pathlib import Path\n\nfrom setuptools import setup\n\n\ndef get_version(package):\n \"\"\"\n Return package version as listed in `__version__` in `init.py`.\n \"\"\"\n version = Path(package, \"__version__.py\").read_text()\n return re.search(\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", version).group(1)\n\n\ndef get_long_description():\n \"\"\"\n Return the README.\n \"\"\"\n long_description = \"\"\n with open(\"README.md\", encoding=\"utf8\") as f:\n long_description += f.read()\n long_description += \"\\n\\n\"\n with open(\"CHANGELOG.md\", encoding=\"utf8\") as f:\n long_description += f.read()\n return long_description\n\n\ndef get_packages(package):\n \"\"\"\n Return root package and all sub-packages.\n \"\"\"\n return [str(path.parent) for path in Path(package).glob(\"**/__init__.py\")]\n\n\nsetup(\n name=\"httpx\",\n python_requires=\">=3.6\",\n version=get_version(\"httpx\"),\n url=\"https://github.com/encode/httpx\",\n license=\"BSD\",\n description=\"The next generation HTTP client.\",\n long_description=get_long_description(),\n long_description_content_type=\"text/markdown\",\n author=\"Tom Christie\",\n author_email=\"[email protected]\",\n package_data={\"httpx\": [\"py.typed\"]},\n packages=get_packages(\"httpx\"),\n include_package_data=True,\n install_requires=[\n \"certifi\",\n \"chardet==3.*\",\n \"h11==0.8.*\",\n \"h2==3.*\",\n \"hstspreload\",\n \"idna==2.*\",\n \"rfc3986==1.*\",\n ],\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n ],\n)\n", "path": "setup.py"}, {"content": "__title__ = \"httpx\"\n__description__ = \"A next generation HTTP client, for Python 3.\"\n__version__ = \"0.7.1\"\n", "path": "httpx/__version__.py"}]} | 1,688 | 183 |
gh_patches_debug_19085 | rasdani/github-patches | git_diff | liqd__a4-meinberlin-811 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Use <time> element for dates
This way screen readers (and other ATs) know that it should be read as a date.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `meinberlin/apps/contrib/templatetags/contrib_tags.py`
Content:
```
1 from django import template
2 from django.template.loader import render_to_string
3
4 register = template.Library()
5
6
7 @register.assignment_tag
8 def include_template_string(template, **kwargs):
9 rendered_template = render_to_string(template, kwargs)
10 return str(rendered_template)
11
12
13 @register.assignment_tag
14 def combined_url_parameter(request_query_dict, **kwargs):
15 combined_query_dict = request_query_dict.copy()
16 for key in kwargs:
17 combined_query_dict.setlist(key, [kwargs[key]])
18 encoded_parameter = '?' + combined_query_dict.urlencode()
19 return encoded_parameter
20
21
22 @register.assignment_tag
23 def filter_has_perm(perm, user, objects):
24 """Filter a list of objects based on user permissions."""
25 if not hasattr(user, 'has_perm'):
26 # If the swapped user model does not support permissions, all objects
27 # will be returned. This is taken from rules.templatetags.has_perm.
28 return objects
29 else:
30 return [obj for obj in objects if user.has_perm(perm, obj)]
31
32
33 @register.filter
34 def percentage(value, max_value):
35 return round(value / max_value * 100)
36
37
38 @register.assignment_tag
39 def project_tile_image(project):
40 return project.tile_image or project.image or None
41
42
43 @register.assignment_tag
44 def project_tile_image_copyright(project):
45 if project.tile_image:
46 return project.tile_image_copyright
47 elif project.image:
48 return project.image_copyright
49 else:
50 return None
51
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/meinberlin/apps/contrib/templatetags/contrib_tags.py b/meinberlin/apps/contrib/templatetags/contrib_tags.py
--- a/meinberlin/apps/contrib/templatetags/contrib_tags.py
+++ b/meinberlin/apps/contrib/templatetags/contrib_tags.py
@@ -1,5 +1,8 @@
from django import template
+from django.forms.utils import flatatt
+from django.template import defaultfilters
from django.template.loader import render_to_string
+from django.utils.safestring import mark_safe
register = template.Library()
@@ -48,3 +51,18 @@
return project.image_copyright
else:
return None
+
+
[email protected]_tag()
+def html_date(value, displayfmt=None, datetimefmt='c', **kwargs):
+ """Format a date and wrap it in a html <time> element.
+
+ Additional html attributes may be provided as kwargs (e.g. 'class').
+ """
+ displaydate = defaultfilters.date(value, displayfmt)
+ datetime = defaultfilters.date(value, datetimefmt)
+ attribs = flatatt(kwargs)
+ result = '<time %s datetime="%s">%s</time>' % (attribs,
+ datetime,
+ displaydate)
+ return mark_safe(result)
| {"golden_diff": "diff --git a/meinberlin/apps/contrib/templatetags/contrib_tags.py b/meinberlin/apps/contrib/templatetags/contrib_tags.py\n--- a/meinberlin/apps/contrib/templatetags/contrib_tags.py\n+++ b/meinberlin/apps/contrib/templatetags/contrib_tags.py\n@@ -1,5 +1,8 @@\n from django import template\n+from django.forms.utils import flatatt\n+from django.template import defaultfilters\n from django.template.loader import render_to_string\n+from django.utils.safestring import mark_safe\n \n register = template.Library()\n \n@@ -48,3 +51,18 @@\n return project.image_copyright\n else:\n return None\n+\n+\[email protected]_tag()\n+def html_date(value, displayfmt=None, datetimefmt='c', **kwargs):\n+ \"\"\"Format a date and wrap it in a html <time> element.\n+\n+ Additional html attributes may be provided as kwargs (e.g. 'class').\n+ \"\"\"\n+ displaydate = defaultfilters.date(value, displayfmt)\n+ datetime = defaultfilters.date(value, datetimefmt)\n+ attribs = flatatt(kwargs)\n+ result = '<time %s datetime=\"%s\">%s</time>' % (attribs,\n+ datetime,\n+ displaydate)\n+ return mark_safe(result)\n", "issue": "Use <time> element for dates\nThis way screen readers (and other ATs) know that it should be read as a date.\n", "before_files": [{"content": "from django import template\nfrom django.template.loader import render_to_string\n\nregister = template.Library()\n\n\[email protected]_tag\ndef include_template_string(template, **kwargs):\n rendered_template = render_to_string(template, kwargs)\n return str(rendered_template)\n\n\[email protected]_tag\ndef combined_url_parameter(request_query_dict, **kwargs):\n combined_query_dict = request_query_dict.copy()\n for key in kwargs:\n combined_query_dict.setlist(key, [kwargs[key]])\n encoded_parameter = '?' + combined_query_dict.urlencode()\n return encoded_parameter\n\n\[email protected]_tag\ndef filter_has_perm(perm, user, objects):\n \"\"\"Filter a list of objects based on user permissions.\"\"\"\n if not hasattr(user, 'has_perm'):\n # If the swapped user model does not support permissions, all objects\n # will be returned. This is taken from rules.templatetags.has_perm.\n return objects\n else:\n return [obj for obj in objects if user.has_perm(perm, obj)]\n\n\[email protected]\ndef percentage(value, max_value):\n return round(value / max_value * 100)\n\n\[email protected]_tag\ndef project_tile_image(project):\n return project.tile_image or project.image or None\n\n\[email protected]_tag\ndef project_tile_image_copyright(project):\n if project.tile_image:\n return project.tile_image_copyright\n elif project.image:\n return project.image_copyright\n else:\n return None\n", "path": "meinberlin/apps/contrib/templatetags/contrib_tags.py"}], "after_files": [{"content": "from django import template\nfrom django.forms.utils import flatatt\nfrom django.template import defaultfilters\nfrom django.template.loader import render_to_string\nfrom django.utils.safestring import mark_safe\n\nregister = template.Library()\n\n\[email protected]_tag\ndef include_template_string(template, **kwargs):\n rendered_template = render_to_string(template, kwargs)\n return str(rendered_template)\n\n\[email protected]_tag\ndef combined_url_parameter(request_query_dict, **kwargs):\n combined_query_dict = request_query_dict.copy()\n for key in kwargs:\n combined_query_dict.setlist(key, [kwargs[key]])\n encoded_parameter = '?' + combined_query_dict.urlencode()\n return encoded_parameter\n\n\[email protected]_tag\ndef filter_has_perm(perm, user, objects):\n \"\"\"Filter a list of objects based on user permissions.\"\"\"\n if not hasattr(user, 'has_perm'):\n # If the swapped user model does not support permissions, all objects\n # will be returned. This is taken from rules.templatetags.has_perm.\n return objects\n else:\n return [obj for obj in objects if user.has_perm(perm, obj)]\n\n\[email protected]\ndef percentage(value, max_value):\n return round(value / max_value * 100)\n\n\[email protected]_tag\ndef project_tile_image(project):\n return project.tile_image or project.image or None\n\n\[email protected]_tag\ndef project_tile_image_copyright(project):\n if project.tile_image:\n return project.tile_image_copyright\n elif project.image:\n return project.image_copyright\n else:\n return None\n\n\[email protected]_tag()\ndef html_date(value, displayfmt=None, datetimefmt='c', **kwargs):\n \"\"\"Format a date and wrap it in a html <time> element.\n\n Additional html attributes may be provided as kwargs (e.g. 'class').\n \"\"\"\n displaydate = defaultfilters.date(value, displayfmt)\n datetime = defaultfilters.date(value, datetimefmt)\n attribs = flatatt(kwargs)\n result = '<time %s datetime=\"%s\">%s</time>' % (attribs,\n datetime,\n displaydate)\n return mark_safe(result)\n", "path": "meinberlin/apps/contrib/templatetags/contrib_tags.py"}]} | 704 | 296 |
gh_patches_debug_13218 | rasdani/github-patches | git_diff | getredash__redash-3298 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Pending invitation links broken in latest version
<!--
We use GitHub only for bug reports 🐛
Anything else should be posted to https://discuss.redash.io 👫
🚨For support, help & questions use https://discuss.redash.io/c/support
💡For feature requests & ideas use https://discuss.redash.io/c/feature-requests
**Found a security vulnerability?** Please email [email protected] to report any security vulnerabilities. We will acknowledge receipt of your vulnerability and strive to send you regular updates about our progress. If you're curious about the status of your disclosure please feel free to email us again. If you want to encrypt your disclosure email, you can use this PGP key.
-->
### Issue Summary
It looks like the change in #3261 doesn't handle the case of existing users that haven't accepted their invitations yet, so users trying to sign up using invitations created in a previous version incorrectly get the "This invitation has already been accepted" message. https://github.com/getredash/redash/pull/3229/files#diff-a0c5448134fdb627ae48d25bad76393fR40
I was able to fix this in our org with:
```sql
UPDATE users
SET details = '{"is_invitation_pending": true}'::json
WHERE password_hash IS NULL
```
Maybe this case should be handled in the migration here? https://github.com/getredash/redash/blob/master/migrations/versions/e7f8a917aa8e_add_user_details_json_column.py
### Steps to Reproduce
1. Send invitation from a version prior to #3229 and #3261
2. Upgrade to current master
3. Try to access the invitation link
Any other info e.g. Why do you consider this to be a bug? What did you expect to happen instead?
### Technical details:
* Redash Version:
* Browser/OS:
* How did you install Redash:
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `redash/handlers/authentication.py`
Content:
```
1 import logging
2
3 from flask import abort, flash, redirect, render_template, request, url_for
4
5 from flask_login import current_user, login_required, login_user, logout_user
6 from redash import __version__, limiter, models, settings
7 from redash.authentication import current_org, get_login_url, get_next_path
8 from redash.authentication.account import (BadSignature, SignatureExpired,
9 send_password_reset_email,
10 send_verify_email,
11 validate_token)
12 from redash.handlers import routes
13 from redash.handlers.base import json_response, org_scoped_rule
14 from redash.version_check import get_latest_version
15 from sqlalchemy.orm.exc import NoResultFound
16
17 logger = logging.getLogger(__name__)
18
19
20 def get_google_auth_url(next_path):
21 if settings.MULTI_ORG:
22 google_auth_url = url_for('google_oauth.authorize_org', next=next_path, org_slug=current_org.slug)
23 else:
24 google_auth_url = url_for('google_oauth.authorize', next=next_path)
25 return google_auth_url
26
27
28 def render_token_login_page(template, org_slug, token):
29 try:
30 user_id = validate_token(token)
31 org = current_org._get_current_object()
32 user = models.User.get_by_id_and_org(user_id, org)
33 except NoResultFound:
34 logger.exception("Bad user id in token. Token= , User id= %s, Org=%s", user_id, token, org_slug)
35 return render_template("error.html", error_message="Invalid invite link. Please ask for a new one."), 400
36 except (SignatureExpired, BadSignature):
37 logger.exception("Failed to verify invite token: %s, org=%s", token, org_slug)
38 return render_template("error.html",
39 error_message="Your invite link has expired. Please ask for a new one."), 400
40
41 if not user.is_invitation_pending:
42 return render_template("error.html",
43 error_message=("This invitation has already been accepted. "
44 "Please try resetting your password instead.")), 400
45
46 status_code = 200
47 if request.method == 'POST':
48 if 'password' not in request.form:
49 flash('Bad Request')
50 status_code = 400
51 elif not request.form['password']:
52 flash('Cannot use empty password.')
53 status_code = 400
54 elif len(request.form['password']) < 6:
55 flash('Password length is too short (<6).')
56 status_code = 400
57 else:
58 user.is_invitation_pending = False
59 user.hash_password(request.form['password'])
60 models.db.session.add(user)
61 login_user(user)
62 models.db.session.commit()
63 return redirect(url_for('redash.index', org_slug=org_slug))
64
65 google_auth_url = get_google_auth_url(url_for('redash.index', org_slug=org_slug))
66
67 return render_template(template,
68 show_google_openid=settings.GOOGLE_OAUTH_ENABLED,
69 google_auth_url=google_auth_url,
70 show_saml_login=current_org.get_setting('auth_saml_enabled'),
71 show_remote_user_login=settings.REMOTE_USER_LOGIN_ENABLED,
72 show_ldap_login=settings.LDAP_LOGIN_ENABLED,
73 org_slug=org_slug,
74 user=user), status_code
75
76
77 @routes.route(org_scoped_rule('/invite/<token>'), methods=['GET', 'POST'])
78 def invite(token, org_slug=None):
79 return render_token_login_page("invite.html", org_slug, token)
80
81
82 @routes.route(org_scoped_rule('/reset/<token>'), methods=['GET', 'POST'])
83 def reset(token, org_slug=None):
84 return render_token_login_page("reset.html", org_slug, token)
85
86
87 @routes.route(org_scoped_rule('/verify/<token>'), methods=['GET'])
88 def verify(token, org_slug=None):
89 try:
90 user_id = validate_token(token)
91 org = current_org._get_current_object()
92 user = models.User.get_by_id_and_org(user_id, org)
93 except (BadSignature, NoResultFound):
94 logger.exception("Failed to verify email verification token: %s, org=%s", token, org_slug)
95 return render_template("error.html",
96 error_message="Your verification link is invalid. Please ask for a new one."), 400
97
98 user.is_email_verified = True
99 models.db.session.add(user)
100 models.db.session.commit()
101
102 return render_template("verify.html", org_slug=org_slug)
103
104
105 @routes.route(org_scoped_rule('/forgot'), methods=['GET', 'POST'])
106 def forgot_password(org_slug=None):
107 if not current_org.get_setting('auth_password_login_enabled'):
108 abort(404)
109
110 submitted = False
111 if request.method == 'POST' and request.form['email']:
112 submitted = True
113 email = request.form['email']
114 try:
115 org = current_org._get_current_object()
116 user = models.User.get_by_email_and_org(email, org)
117 send_password_reset_email(user)
118 except NoResultFound:
119 logging.error("No user found for forgot password: %s", email)
120
121 return render_template("forgot.html", submitted=submitted)
122
123
124 @routes.route(org_scoped_rule('/verification_email'), methods=['POST'])
125 def verification_email(org_slug=None):
126 if not current_user.is_email_verified:
127 send_verify_email(current_user, current_org)
128
129 return json_response({
130 "message": "Please check your email inbox in order to verify your email address."
131 })
132
133
134 @routes.route(org_scoped_rule('/login'), methods=['GET', 'POST'])
135 @limiter.limit(settings.THROTTLE_LOGIN_PATTERN)
136 def login(org_slug=None):
137 # We intentionally use == as otherwise it won't actually use the proxy. So weird :O
138 # noinspection PyComparisonWithNone
139 if current_org == None and not settings.MULTI_ORG:
140 return redirect('/setup')
141 elif current_org == None:
142 return redirect('/')
143
144 index_url = url_for('redash.index', org_slug=org_slug)
145 unsafe_next_path = request.args.get('next', index_url)
146 next_path = get_next_path(unsafe_next_path)
147 if current_user.is_authenticated:
148 return redirect(next_path)
149
150 if request.method == 'POST':
151 try:
152 org = current_org._get_current_object()
153 user = models.User.get_by_email_and_org(request.form['email'], org)
154 if user and not user.is_disabled and user.verify_password(request.form['password']):
155 remember = ('remember' in request.form)
156 login_user(user, remember=remember)
157 return redirect(next_path)
158 else:
159 flash("Wrong email or password.")
160 except NoResultFound:
161 flash("Wrong email or password.")
162
163 google_auth_url = get_google_auth_url(next_path)
164
165 return render_template("login.html",
166 org_slug=org_slug,
167 next=next_path,
168 email=request.form.get('email', ''),
169 show_google_openid=settings.GOOGLE_OAUTH_ENABLED,
170 google_auth_url=google_auth_url,
171 show_password_login=current_org.get_setting('auth_password_login_enabled'),
172 show_saml_login=current_org.get_setting('auth_saml_enabled'),
173 show_remote_user_login=settings.REMOTE_USER_LOGIN_ENABLED,
174 show_ldap_login=settings.LDAP_LOGIN_ENABLED)
175
176
177 @routes.route(org_scoped_rule('/logout'))
178 def logout(org_slug=None):
179 logout_user()
180 return redirect(get_login_url(next=None))
181
182
183 def base_href():
184 if settings.MULTI_ORG:
185 base_href = url_for('redash.index', _external=True, org_slug=current_org.slug)
186 else:
187 base_href = url_for('redash.index', _external=True)
188
189 return base_href
190
191
192 def date_format_config():
193 date_format = current_org.get_setting('date_format')
194 date_format_list = set(["DD/MM/YY", "MM/DD/YY", "YYYY-MM-DD", settings.DATE_FORMAT])
195 return {
196 'dateFormat': date_format,
197 'dateFormatList': list(date_format_list),
198 'dateTimeFormat': "{0} HH:mm".format(date_format),
199 }
200
201
202 def client_config():
203 if not current_user.is_api_user() and current_user.is_authenticated:
204 client_config = {
205 'newVersionAvailable': bool(get_latest_version()),
206 'version': __version__
207 }
208 else:
209 client_config = {}
210
211 defaults = {
212 'allowScriptsInUserInput': settings.ALLOW_SCRIPTS_IN_USER_INPUT,
213 'showPermissionsControl': current_org.get_setting("feature_show_permissions_control"),
214 'allowCustomJSVisualizations': settings.FEATURE_ALLOW_CUSTOM_JS_VISUALIZATIONS,
215 'autoPublishNamedQueries': settings.FEATURE_AUTO_PUBLISH_NAMED_QUERIES,
216 'mailSettingsMissing': settings.MAIL_DEFAULT_SENDER is None,
217 'dashboardRefreshIntervals': settings.DASHBOARD_REFRESH_INTERVALS,
218 'queryRefreshIntervals': settings.QUERY_REFRESH_INTERVALS,
219 'googleLoginEnabled': settings.GOOGLE_OAUTH_ENABLED,
220 'pageSize': settings.PAGE_SIZE,
221 'pageSizeOptions': settings.PAGE_SIZE_OPTIONS,
222 }
223
224 client_config.update(defaults)
225 client_config.update({
226 'basePath': base_href()
227 })
228 client_config.update(date_format_config())
229
230 return client_config
231
232
233 @routes.route('/api/config', methods=['GET'])
234 def config(org_slug=None):
235 return json_response({
236 'org_slug': current_org.slug,
237 'client_config': client_config()
238 })
239
240
241 @routes.route(org_scoped_rule('/api/session'), methods=['GET'])
242 @login_required
243 def session(org_slug=None):
244 if current_user.is_api_user():
245 user = {
246 'permissions': [],
247 'apiKey': current_user.id
248 }
249 else:
250 user = {
251 'profile_image_url': current_user.profile_image_url,
252 'id': current_user.id,
253 'name': current_user.name,
254 'email': current_user.email,
255 'groups': current_user.group_ids,
256 'permissions': current_user.permissions,
257 'is_email_verified': current_user.is_email_verified
258 }
259
260 return json_response({
261 'user': user,
262 'org_slug': current_org.slug,
263 'client_config': client_config()
264 })
265
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/redash/handlers/authentication.py b/redash/handlers/authentication.py
--- a/redash/handlers/authentication.py
+++ b/redash/handlers/authentication.py
@@ -38,7 +38,7 @@
return render_template("error.html",
error_message="Your invite link has expired. Please ask for a new one."), 400
- if not user.is_invitation_pending:
+ if user.details.get('is_invitation_pending') is False:
return render_template("error.html",
error_message=("This invitation has already been accepted. "
"Please try resetting your password instead.")), 400
| {"golden_diff": "diff --git a/redash/handlers/authentication.py b/redash/handlers/authentication.py\n--- a/redash/handlers/authentication.py\n+++ b/redash/handlers/authentication.py\n@@ -38,7 +38,7 @@\n return render_template(\"error.html\",\n error_message=\"Your invite link has expired. Please ask for a new one.\"), 400\n \n- if not user.is_invitation_pending:\n+ if user.details.get('is_invitation_pending') is False:\n return render_template(\"error.html\",\n error_message=(\"This invitation has already been accepted. \"\n \"Please try resetting your password instead.\")), 400\n", "issue": "Pending invitation links broken in latest version\n\r\n<!--\r\n\r\nWe use GitHub only for bug reports \ud83d\udc1b\r\n\r\nAnything else should be posted to https://discuss.redash.io \ud83d\udc6b\r\n\r\n\ud83d\udea8For support, help & questions use https://discuss.redash.io/c/support\r\n\ud83d\udca1For feature requests & ideas use https://discuss.redash.io/c/feature-requests\r\n\r\n**Found a security vulnerability?** Please email [email protected] to report any security vulnerabilities. We will acknowledge receipt of your vulnerability and strive to send you regular updates about our progress. If you're curious about the status of your disclosure please feel free to email us again. If you want to encrypt your disclosure email, you can use this PGP key.\r\n\r\n-->\r\n\r\n### Issue Summary\r\n\r\nIt looks like the change in #3261 doesn't handle the case of existing users that haven't accepted their invitations yet, so users trying to sign up using invitations created in a previous version incorrectly get the \"This invitation has already been accepted\" message. https://github.com/getredash/redash/pull/3229/files#diff-a0c5448134fdb627ae48d25bad76393fR40\r\n\r\nI was able to fix this in our org with:\r\n\r\n```sql\r\nUPDATE users\r\nSET details = '{\"is_invitation_pending\": true}'::json\r\nWHERE password_hash IS NULL\r\n```\r\n\r\nMaybe this case should be handled in the migration here? https://github.com/getredash/redash/blob/master/migrations/versions/e7f8a917aa8e_add_user_details_json_column.py\r\n\r\n### Steps to Reproduce\r\n\r\n1. Send invitation from a version prior to #3229 and #3261 \r\n2. Upgrade to current master\r\n3. Try to access the invitation link\r\n\r\nAny other info e.g. Why do you consider this to be a bug? What did you expect to happen instead?\r\n\r\n### Technical details:\r\n\r\n* Redash Version:\r\n* Browser/OS:\r\n* How did you install Redash:\r\n\n", "before_files": [{"content": "import logging\n\nfrom flask import abort, flash, redirect, render_template, request, url_for\n\nfrom flask_login import current_user, login_required, login_user, logout_user\nfrom redash import __version__, limiter, models, settings\nfrom redash.authentication import current_org, get_login_url, get_next_path\nfrom redash.authentication.account import (BadSignature, SignatureExpired,\n send_password_reset_email,\n send_verify_email,\n validate_token)\nfrom redash.handlers import routes\nfrom redash.handlers.base import json_response, org_scoped_rule\nfrom redash.version_check import get_latest_version\nfrom sqlalchemy.orm.exc import NoResultFound\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_google_auth_url(next_path):\n if settings.MULTI_ORG:\n google_auth_url = url_for('google_oauth.authorize_org', next=next_path, org_slug=current_org.slug)\n else:\n google_auth_url = url_for('google_oauth.authorize', next=next_path)\n return google_auth_url\n\n\ndef render_token_login_page(template, org_slug, token):\n try:\n user_id = validate_token(token)\n org = current_org._get_current_object()\n user = models.User.get_by_id_and_org(user_id, org)\n except NoResultFound:\n logger.exception(\"Bad user id in token. Token= , User id= %s, Org=%s\", user_id, token, org_slug)\n return render_template(\"error.html\", error_message=\"Invalid invite link. Please ask for a new one.\"), 400\n except (SignatureExpired, BadSignature):\n logger.exception(\"Failed to verify invite token: %s, org=%s\", token, org_slug)\n return render_template(\"error.html\",\n error_message=\"Your invite link has expired. Please ask for a new one.\"), 400\n\n if not user.is_invitation_pending:\n return render_template(\"error.html\",\n error_message=(\"This invitation has already been accepted. \"\n \"Please try resetting your password instead.\")), 400\n\n status_code = 200\n if request.method == 'POST':\n if 'password' not in request.form:\n flash('Bad Request')\n status_code = 400\n elif not request.form['password']:\n flash('Cannot use empty password.')\n status_code = 400\n elif len(request.form['password']) < 6:\n flash('Password length is too short (<6).')\n status_code = 400\n else:\n user.is_invitation_pending = False\n user.hash_password(request.form['password'])\n models.db.session.add(user)\n login_user(user)\n models.db.session.commit()\n return redirect(url_for('redash.index', org_slug=org_slug))\n\n google_auth_url = get_google_auth_url(url_for('redash.index', org_slug=org_slug))\n\n return render_template(template,\n show_google_openid=settings.GOOGLE_OAUTH_ENABLED,\n google_auth_url=google_auth_url,\n show_saml_login=current_org.get_setting('auth_saml_enabled'),\n show_remote_user_login=settings.REMOTE_USER_LOGIN_ENABLED,\n show_ldap_login=settings.LDAP_LOGIN_ENABLED,\n org_slug=org_slug,\n user=user), status_code\n\n\[email protected](org_scoped_rule('/invite/<token>'), methods=['GET', 'POST'])\ndef invite(token, org_slug=None):\n return render_token_login_page(\"invite.html\", org_slug, token)\n\n\[email protected](org_scoped_rule('/reset/<token>'), methods=['GET', 'POST'])\ndef reset(token, org_slug=None):\n return render_token_login_page(\"reset.html\", org_slug, token)\n\n\[email protected](org_scoped_rule('/verify/<token>'), methods=['GET'])\ndef verify(token, org_slug=None):\n try:\n user_id = validate_token(token)\n org = current_org._get_current_object()\n user = models.User.get_by_id_and_org(user_id, org)\n except (BadSignature, NoResultFound):\n logger.exception(\"Failed to verify email verification token: %s, org=%s\", token, org_slug)\n return render_template(\"error.html\",\n error_message=\"Your verification link is invalid. Please ask for a new one.\"), 400\n\n user.is_email_verified = True\n models.db.session.add(user)\n models.db.session.commit()\n\n return render_template(\"verify.html\", org_slug=org_slug)\n\n\[email protected](org_scoped_rule('/forgot'), methods=['GET', 'POST'])\ndef forgot_password(org_slug=None):\n if not current_org.get_setting('auth_password_login_enabled'):\n abort(404)\n\n submitted = False\n if request.method == 'POST' and request.form['email']:\n submitted = True\n email = request.form['email']\n try:\n org = current_org._get_current_object()\n user = models.User.get_by_email_and_org(email, org)\n send_password_reset_email(user)\n except NoResultFound:\n logging.error(\"No user found for forgot password: %s\", email)\n\n return render_template(\"forgot.html\", submitted=submitted)\n\n\[email protected](org_scoped_rule('/verification_email'), methods=['POST'])\ndef verification_email(org_slug=None):\n if not current_user.is_email_verified:\n send_verify_email(current_user, current_org)\n\n return json_response({\n \"message\": \"Please check your email inbox in order to verify your email address.\"\n })\n\n\[email protected](org_scoped_rule('/login'), methods=['GET', 'POST'])\[email protected](settings.THROTTLE_LOGIN_PATTERN)\ndef login(org_slug=None):\n # We intentionally use == as otherwise it won't actually use the proxy. So weird :O\n # noinspection PyComparisonWithNone\n if current_org == None and not settings.MULTI_ORG:\n return redirect('/setup')\n elif current_org == None:\n return redirect('/')\n\n index_url = url_for('redash.index', org_slug=org_slug)\n unsafe_next_path = request.args.get('next', index_url)\n next_path = get_next_path(unsafe_next_path)\n if current_user.is_authenticated:\n return redirect(next_path)\n\n if request.method == 'POST':\n try:\n org = current_org._get_current_object()\n user = models.User.get_by_email_and_org(request.form['email'], org)\n if user and not user.is_disabled and user.verify_password(request.form['password']):\n remember = ('remember' in request.form)\n login_user(user, remember=remember)\n return redirect(next_path)\n else:\n flash(\"Wrong email or password.\")\n except NoResultFound:\n flash(\"Wrong email or password.\")\n\n google_auth_url = get_google_auth_url(next_path)\n\n return render_template(\"login.html\",\n org_slug=org_slug,\n next=next_path,\n email=request.form.get('email', ''),\n show_google_openid=settings.GOOGLE_OAUTH_ENABLED,\n google_auth_url=google_auth_url,\n show_password_login=current_org.get_setting('auth_password_login_enabled'),\n show_saml_login=current_org.get_setting('auth_saml_enabled'),\n show_remote_user_login=settings.REMOTE_USER_LOGIN_ENABLED,\n show_ldap_login=settings.LDAP_LOGIN_ENABLED)\n\n\[email protected](org_scoped_rule('/logout'))\ndef logout(org_slug=None):\n logout_user()\n return redirect(get_login_url(next=None))\n\n\ndef base_href():\n if settings.MULTI_ORG:\n base_href = url_for('redash.index', _external=True, org_slug=current_org.slug)\n else:\n base_href = url_for('redash.index', _external=True)\n\n return base_href\n\n\ndef date_format_config():\n date_format = current_org.get_setting('date_format')\n date_format_list = set([\"DD/MM/YY\", \"MM/DD/YY\", \"YYYY-MM-DD\", settings.DATE_FORMAT])\n return {\n 'dateFormat': date_format,\n 'dateFormatList': list(date_format_list),\n 'dateTimeFormat': \"{0} HH:mm\".format(date_format),\n }\n\n\ndef client_config():\n if not current_user.is_api_user() and current_user.is_authenticated:\n client_config = {\n 'newVersionAvailable': bool(get_latest_version()),\n 'version': __version__\n }\n else:\n client_config = {}\n\n defaults = {\n 'allowScriptsInUserInput': settings.ALLOW_SCRIPTS_IN_USER_INPUT,\n 'showPermissionsControl': current_org.get_setting(\"feature_show_permissions_control\"),\n 'allowCustomJSVisualizations': settings.FEATURE_ALLOW_CUSTOM_JS_VISUALIZATIONS,\n 'autoPublishNamedQueries': settings.FEATURE_AUTO_PUBLISH_NAMED_QUERIES,\n 'mailSettingsMissing': settings.MAIL_DEFAULT_SENDER is None,\n 'dashboardRefreshIntervals': settings.DASHBOARD_REFRESH_INTERVALS,\n 'queryRefreshIntervals': settings.QUERY_REFRESH_INTERVALS,\n 'googleLoginEnabled': settings.GOOGLE_OAUTH_ENABLED,\n 'pageSize': settings.PAGE_SIZE,\n 'pageSizeOptions': settings.PAGE_SIZE_OPTIONS,\n }\n\n client_config.update(defaults)\n client_config.update({\n 'basePath': base_href()\n })\n client_config.update(date_format_config())\n\n return client_config\n\n\[email protected]('/api/config', methods=['GET'])\ndef config(org_slug=None):\n return json_response({\n 'org_slug': current_org.slug,\n 'client_config': client_config()\n })\n\n\[email protected](org_scoped_rule('/api/session'), methods=['GET'])\n@login_required\ndef session(org_slug=None):\n if current_user.is_api_user():\n user = {\n 'permissions': [],\n 'apiKey': current_user.id\n }\n else:\n user = {\n 'profile_image_url': current_user.profile_image_url,\n 'id': current_user.id,\n 'name': current_user.name,\n 'email': current_user.email,\n 'groups': current_user.group_ids,\n 'permissions': current_user.permissions,\n 'is_email_verified': current_user.is_email_verified\n }\n\n return json_response({\n 'user': user,\n 'org_slug': current_org.slug,\n 'client_config': client_config()\n })\n", "path": "redash/handlers/authentication.py"}], "after_files": [{"content": "import logging\n\nfrom flask import abort, flash, redirect, render_template, request, url_for\n\nfrom flask_login import current_user, login_required, login_user, logout_user\nfrom redash import __version__, limiter, models, settings\nfrom redash.authentication import current_org, get_login_url, get_next_path\nfrom redash.authentication.account import (BadSignature, SignatureExpired,\n send_password_reset_email,\n send_verify_email,\n validate_token)\nfrom redash.handlers import routes\nfrom redash.handlers.base import json_response, org_scoped_rule\nfrom redash.version_check import get_latest_version\nfrom sqlalchemy.orm.exc import NoResultFound\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_google_auth_url(next_path):\n if settings.MULTI_ORG:\n google_auth_url = url_for('google_oauth.authorize_org', next=next_path, org_slug=current_org.slug)\n else:\n google_auth_url = url_for('google_oauth.authorize', next=next_path)\n return google_auth_url\n\n\ndef render_token_login_page(template, org_slug, token):\n try:\n user_id = validate_token(token)\n org = current_org._get_current_object()\n user = models.User.get_by_id_and_org(user_id, org)\n except NoResultFound:\n logger.exception(\"Bad user id in token. Token= , User id= %s, Org=%s\", user_id, token, org_slug)\n return render_template(\"error.html\", error_message=\"Invalid invite link. Please ask for a new one.\"), 400\n except (SignatureExpired, BadSignature):\n logger.exception(\"Failed to verify invite token: %s, org=%s\", token, org_slug)\n return render_template(\"error.html\",\n error_message=\"Your invite link has expired. Please ask for a new one.\"), 400\n\n if user.details.get('is_invitation_pending') is False:\n return render_template(\"error.html\",\n error_message=(\"This invitation has already been accepted. \"\n \"Please try resetting your password instead.\")), 400\n\n status_code = 200\n if request.method == 'POST':\n if 'password' not in request.form:\n flash('Bad Request')\n status_code = 400\n elif not request.form['password']:\n flash('Cannot use empty password.')\n status_code = 400\n elif len(request.form['password']) < 6:\n flash('Password length is too short (<6).')\n status_code = 400\n else:\n user.is_invitation_pending = False\n user.hash_password(request.form['password'])\n models.db.session.add(user)\n login_user(user)\n models.db.session.commit()\n return redirect(url_for('redash.index', org_slug=org_slug))\n\n google_auth_url = get_google_auth_url(url_for('redash.index', org_slug=org_slug))\n\n return render_template(template,\n show_google_openid=settings.GOOGLE_OAUTH_ENABLED,\n google_auth_url=google_auth_url,\n show_saml_login=current_org.get_setting('auth_saml_enabled'),\n show_remote_user_login=settings.REMOTE_USER_LOGIN_ENABLED,\n show_ldap_login=settings.LDAP_LOGIN_ENABLED,\n org_slug=org_slug,\n user=user), status_code\n\n\[email protected](org_scoped_rule('/invite/<token>'), methods=['GET', 'POST'])\ndef invite(token, org_slug=None):\n return render_token_login_page(\"invite.html\", org_slug, token)\n\n\[email protected](org_scoped_rule('/reset/<token>'), methods=['GET', 'POST'])\ndef reset(token, org_slug=None):\n return render_token_login_page(\"reset.html\", org_slug, token)\n\n\[email protected](org_scoped_rule('/verify/<token>'), methods=['GET'])\ndef verify(token, org_slug=None):\n try:\n user_id = validate_token(token)\n org = current_org._get_current_object()\n user = models.User.get_by_id_and_org(user_id, org)\n except (BadSignature, NoResultFound):\n logger.exception(\"Failed to verify email verification token: %s, org=%s\", token, org_slug)\n return render_template(\"error.html\",\n error_message=\"Your verification link is invalid. Please ask for a new one.\"), 400\n\n user.is_email_verified = True\n models.db.session.add(user)\n models.db.session.commit()\n\n return render_template(\"verify.html\", org_slug=org_slug)\n\n\[email protected](org_scoped_rule('/forgot'), methods=['GET', 'POST'])\ndef forgot_password(org_slug=None):\n if not current_org.get_setting('auth_password_login_enabled'):\n abort(404)\n\n submitted = False\n if request.method == 'POST' and request.form['email']:\n submitted = True\n email = request.form['email']\n try:\n org = current_org._get_current_object()\n user = models.User.get_by_email_and_org(email, org)\n send_password_reset_email(user)\n except NoResultFound:\n logging.error(\"No user found for forgot password: %s\", email)\n\n return render_template(\"forgot.html\", submitted=submitted)\n\n\[email protected](org_scoped_rule('/verification_email'), methods=['POST'])\ndef verification_email(org_slug=None):\n if not current_user.is_email_verified:\n send_verify_email(current_user, current_org)\n\n return json_response({\n \"message\": \"Please check your email inbox in order to verify your email address.\"\n })\n\n\[email protected](org_scoped_rule('/login'), methods=['GET', 'POST'])\[email protected](settings.THROTTLE_LOGIN_PATTERN)\ndef login(org_slug=None):\n # We intentionally use == as otherwise it won't actually use the proxy. So weird :O\n # noinspection PyComparisonWithNone\n if current_org == None and not settings.MULTI_ORG:\n return redirect('/setup')\n elif current_org == None:\n return redirect('/')\n\n index_url = url_for('redash.index', org_slug=org_slug)\n unsafe_next_path = request.args.get('next', index_url)\n next_path = get_next_path(unsafe_next_path)\n if current_user.is_authenticated:\n return redirect(next_path)\n\n if request.method == 'POST':\n try:\n org = current_org._get_current_object()\n user = models.User.get_by_email_and_org(request.form['email'], org)\n if user and not user.is_disabled and user.verify_password(request.form['password']):\n remember = ('remember' in request.form)\n login_user(user, remember=remember)\n return redirect(next_path)\n else:\n flash(\"Wrong email or password.\")\n except NoResultFound:\n flash(\"Wrong email or password.\")\n\n google_auth_url = get_google_auth_url(next_path)\n\n return render_template(\"login.html\",\n org_slug=org_slug,\n next=next_path,\n email=request.form.get('email', ''),\n show_google_openid=settings.GOOGLE_OAUTH_ENABLED,\n google_auth_url=google_auth_url,\n show_password_login=current_org.get_setting('auth_password_login_enabled'),\n show_saml_login=current_org.get_setting('auth_saml_enabled'),\n show_remote_user_login=settings.REMOTE_USER_LOGIN_ENABLED,\n show_ldap_login=settings.LDAP_LOGIN_ENABLED)\n\n\[email protected](org_scoped_rule('/logout'))\ndef logout(org_slug=None):\n logout_user()\n return redirect(get_login_url(next=None))\n\n\ndef base_href():\n if settings.MULTI_ORG:\n base_href = url_for('redash.index', _external=True, org_slug=current_org.slug)\n else:\n base_href = url_for('redash.index', _external=True)\n\n return base_href\n\n\ndef date_format_config():\n date_format = current_org.get_setting('date_format')\n date_format_list = set([\"DD/MM/YY\", \"MM/DD/YY\", \"YYYY-MM-DD\", settings.DATE_FORMAT])\n return {\n 'dateFormat': date_format,\n 'dateFormatList': list(date_format_list),\n 'dateTimeFormat': \"{0} HH:mm\".format(date_format),\n }\n\n\ndef client_config():\n if not current_user.is_api_user() and current_user.is_authenticated:\n client_config = {\n 'newVersionAvailable': bool(get_latest_version()),\n 'version': __version__\n }\n else:\n client_config = {}\n\n defaults = {\n 'allowScriptsInUserInput': settings.ALLOW_SCRIPTS_IN_USER_INPUT,\n 'showPermissionsControl': current_org.get_setting(\"feature_show_permissions_control\"),\n 'allowCustomJSVisualizations': settings.FEATURE_ALLOW_CUSTOM_JS_VISUALIZATIONS,\n 'autoPublishNamedQueries': settings.FEATURE_AUTO_PUBLISH_NAMED_QUERIES,\n 'mailSettingsMissing': settings.MAIL_DEFAULT_SENDER is None,\n 'dashboardRefreshIntervals': settings.DASHBOARD_REFRESH_INTERVALS,\n 'queryRefreshIntervals': settings.QUERY_REFRESH_INTERVALS,\n 'googleLoginEnabled': settings.GOOGLE_OAUTH_ENABLED,\n 'pageSize': settings.PAGE_SIZE,\n 'pageSizeOptions': settings.PAGE_SIZE_OPTIONS,\n }\n\n client_config.update(defaults)\n client_config.update({\n 'basePath': base_href()\n })\n client_config.update(date_format_config())\n\n return client_config\n\n\[email protected]('/api/config', methods=['GET'])\ndef config(org_slug=None):\n return json_response({\n 'org_slug': current_org.slug,\n 'client_config': client_config()\n })\n\n\[email protected](org_scoped_rule('/api/session'), methods=['GET'])\n@login_required\ndef session(org_slug=None):\n if current_user.is_api_user():\n user = {\n 'permissions': [],\n 'apiKey': current_user.id\n }\n else:\n user = {\n 'profile_image_url': current_user.profile_image_url,\n 'id': current_user.id,\n 'name': current_user.name,\n 'email': current_user.email,\n 'groups': current_user.group_ids,\n 'permissions': current_user.permissions,\n 'is_email_verified': current_user.is_email_verified\n }\n\n return json_response({\n 'user': user,\n 'org_slug': current_org.slug,\n 'client_config': client_config()\n })\n", "path": "redash/handlers/authentication.py"}]} | 3,543 | 142 |
gh_patches_debug_356 | rasdani/github-patches | git_diff | zigpy__zha-device-handlers-4 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Missing import for types breaking LocalDataCluster
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `zhaquirks/__init__.py`
Content:
```
1 import importlib
2 import pkgutil
3 from zigpy.quirks import CustomCluster
4 from zigpy.util import ListenableMixin
5
6 UNKNOWN = 'Unknown'
7
8
9 class Bus(ListenableMixin):
10
11 def __init__(self, *args, **kwargs):
12 super().__init__(*args, **kwargs)
13 self._listeners = {}
14
15
16 class LocalDataCluster(CustomCluster):
17
18 def __init__(self, *args, **kwargs):
19 super().__init__(*args, **kwargs)
20
21 async def read_attributes_raw(self, attributes, manufacturer=None):
22 attributes = [types.uint16_t(a) for a in attributes]
23 v = [self._attr_cache.get(attr) for attr in attributes]
24 return v
25
26 def _update_attribute(self, attrid, value):
27 super()._update_attribute(attrid, value)
28
29
30 class EventableCluster(CustomCluster):
31
32 def __init__(self, *args, **kwargs):
33 super().__init__(*args, **kwargs)
34
35 def handle_cluster_request(self, tsn, command_id, args):
36 super().handle_cluster_request(tsn, command_id, args)
37 if self.server_commands is not None and\
38 self.server_commands.get(command_id) is not None:
39 self.listener_event(
40 'zha_send_event',
41 self,
42 self.server_commands.get(command_id)[0],
43 args
44 )
45
46 def _update_attribute(self, attrid, value):
47 super()._update_attribute(attrid, value)
48 self.listener_event(
49 'zha_send_event',
50 self,
51 'attribute_updated',
52 {
53 'attribute_id': attrid,
54 'attribute_name': self.attributes.get(attrid, [UNKNOWN])[0],
55 'value': value
56 }
57 )
58
59 name = __name__
60 path = __path__
61 for importer, modname, ispkg in pkgutil.walk_packages(
62 path=path,
63 prefix=name +'.'
64 ):
65 importlib.import_module(modname)
66
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/zhaquirks/__init__.py b/zhaquirks/__init__.py
--- a/zhaquirks/__init__.py
+++ b/zhaquirks/__init__.py
@@ -2,6 +2,7 @@
import pkgutil
from zigpy.quirks import CustomCluster
from zigpy.util import ListenableMixin
+import zigpy.types as types
UNKNOWN = 'Unknown'
| {"golden_diff": "diff --git a/zhaquirks/__init__.py b/zhaquirks/__init__.py\n--- a/zhaquirks/__init__.py\n+++ b/zhaquirks/__init__.py\n@@ -2,6 +2,7 @@\n import pkgutil\n from zigpy.quirks import CustomCluster\n from zigpy.util import ListenableMixin\n+import zigpy.types as types\n \n UNKNOWN = 'Unknown'\n", "issue": "Missing import for types breaking LocalDataCluster\n\n", "before_files": [{"content": "import importlib\nimport pkgutil\nfrom zigpy.quirks import CustomCluster\nfrom zigpy.util import ListenableMixin\n\nUNKNOWN = 'Unknown'\n\n\nclass Bus(ListenableMixin):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._listeners = {}\n\n\nclass LocalDataCluster(CustomCluster):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n async def read_attributes_raw(self, attributes, manufacturer=None):\n attributes = [types.uint16_t(a) for a in attributes]\n v = [self._attr_cache.get(attr) for attr in attributes]\n return v\n\n def _update_attribute(self, attrid, value):\n super()._update_attribute(attrid, value)\n\n\nclass EventableCluster(CustomCluster):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def handle_cluster_request(self, tsn, command_id, args):\n super().handle_cluster_request(tsn, command_id, args)\n if self.server_commands is not None and\\\n self.server_commands.get(command_id) is not None:\n self.listener_event(\n 'zha_send_event',\n self,\n self.server_commands.get(command_id)[0],\n args\n )\n\n def _update_attribute(self, attrid, value):\n super()._update_attribute(attrid, value)\n self.listener_event(\n 'zha_send_event',\n self,\n 'attribute_updated',\n {\n 'attribute_id': attrid,\n 'attribute_name': self.attributes.get(attrid, [UNKNOWN])[0],\n 'value': value\n }\n )\n\nname = __name__\npath = __path__\nfor importer, modname, ispkg in pkgutil.walk_packages(\n path=path,\n prefix=name +'.'\n ):\n importlib.import_module(modname)\n", "path": "zhaquirks/__init__.py"}], "after_files": [{"content": "import importlib\nimport pkgutil\nfrom zigpy.quirks import CustomCluster\nfrom zigpy.util import ListenableMixin\nimport zigpy.types as types\n\nUNKNOWN = 'Unknown'\n\n\nclass Bus(ListenableMixin):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._listeners = {}\n\n\nclass LocalDataCluster(CustomCluster):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n async def read_attributes_raw(self, attributes, manufacturer=None):\n attributes = [types.uint16_t(a) for a in attributes]\n v = [self._attr_cache.get(attr) for attr in attributes]\n return v\n\n def _update_attribute(self, attrid, value):\n super()._update_attribute(attrid, value)\n\n\nclass EventableCluster(CustomCluster):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def handle_cluster_request(self, tsn, command_id, args):\n super().handle_cluster_request(tsn, command_id, args)\n if self.server_commands is not None and\\\n self.server_commands.get(command_id) is not None:\n self.listener_event(\n 'zha_send_event',\n self,\n self.server_commands.get(command_id)[0],\n args\n )\n\n def _update_attribute(self, attrid, value):\n super()._update_attribute(attrid, value)\n self.listener_event(\n 'zha_send_event',\n self,\n 'attribute_updated',\n {\n 'attribute_id': attrid,\n 'attribute_name': self.attributes.get(attrid, [UNKNOWN])[0],\n 'value': value\n }\n )\n\nname = __name__\npath = __path__\nfor importer, modname, ispkg in pkgutil.walk_packages(\n path=path,\n prefix=name +'.'\n ):\n importlib.import_module(modname)\n", "path": "zhaquirks/__init__.py"}]} | 812 | 90 |
gh_patches_debug_597 | rasdani/github-patches | git_diff | pex-tool__pex-1610 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Release 2.1.66
On the docket:
+ [x] Support specifying foreign platforms in full detail. #1597
+ [x] Respect PEX_ROOT in PEXEnvironment.mount. #1599
+ [x] Be able to see what .pex file is run from the list of system processes #1604
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pex/version.py`
Content:
```
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = "2.1.65"
5
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = "2.1.65"
+__version__ = "2.1.66"
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.65\"\n+__version__ = \"2.1.66\"\n", "issue": "Release 2.1.66\nOn the docket:\r\n+ [x] Support specifying foreign platforms in full detail. #1597\r\n+ [x] Respect PEX_ROOT in PEXEnvironment.mount. #1599 \r\n+ [x] Be able to see what .pex file is run from the list of system processes #1604 \n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.65\"\n", "path": "pex/version.py"}], "after_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.66\"\n", "path": "pex/version.py"}]} | 388 | 96 |
gh_patches_debug_34708 | rasdani/github-patches | git_diff | biolab__orange3-text-692 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Preprocess Text: consider pos tags when filtering tokens
**Describe the bug**
If pos tags exists before filtering, final pos tags don't match the tokens. This is because tokens are filtered independently from pos tags and only zipped at the end.
**To Reproduce**
Steps to reproduce the behavior:
1. Corpus
2. Preprocess Text (have POS tagger before filtering on stopwords)
3. Check the result in Corpus Viewer
**Expected behavior**
If pos tags exist, they are filtered together with tokens.
**Orange version:**
3.30.dev
**Text add-on version:**
1.6.dev
**Screenshots**
<img width="509" alt="Screen Shot 2021-07-21 at 15 13 50" src="https://user-images.githubusercontent.com/12524972/126494455-90d78da1-d0d3-4a02-be65-e176268c4c73.png">
**Operating system:**
OSX
**Additional context**
/
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `orangecontrib/text/preprocess/filter.py`
Content:
```
1 from typing import List, Callable
2 import os
3 import re
4
5 import numpy as np
6 from gensim import corpora
7 from nltk.corpus import stopwords
8
9 from Orange.data.io import detect_encoding
10 from Orange.util import wrap_callback, dummy_callback
11
12 from orangecontrib.text import Corpus
13 from orangecontrib.text.misc import wait_nltk_data
14 from orangecontrib.text.preprocess import TokenizedPreprocessor
15
16 __all__ = ['BaseTokenFilter', 'StopwordsFilter', 'LexiconFilter',
17 'RegexpFilter', 'FrequencyFilter', 'MostFrequentTokensFilter',
18 'PosTagFilter']
19
20
21 class BaseTokenFilter(TokenizedPreprocessor):
22 def __call__(self, corpus: Corpus, callback: Callable = None) -> Corpus:
23 if callback is None:
24 callback = dummy_callback
25 corpus = super().__call__(corpus, wrap_callback(callback, end=0.2))
26 return self._filter_tokens(corpus, wrap_callback(callback, start=0.2))
27
28 def _filter_tokens(self, corpus: Corpus, callback: Callable) -> Corpus:
29 callback(0, "Filtering...")
30 tokens = [self._preprocess(tokens) for tokens in corpus.tokens]
31 corpus.store_tokens(tokens)
32 return corpus
33
34 def _preprocess(self, tokens: List) -> List:
35 return list(filter(self._check, tokens))
36
37 def _check(self, token: str) -> bool:
38 raise NotImplementedError
39
40
41 class FileWordListMixin:
42 def __init__(self, path: str = None):
43 self._lexicon = self.from_file(path)
44
45 @staticmethod
46 def from_file(path):
47 if not path:
48 return set()
49
50 for encoding in ('utf-8', None, detect_encoding(path)):
51 try:
52 with open(path, encoding=encoding) as f:
53 return set(line.strip() for line in f)
54 except UnicodeDecodeError:
55 continue
56 # No encoding worked, raise
57 raise UnicodeError("Couldn't determine file encoding")
58
59
60 class StopwordsFilter(BaseTokenFilter, FileWordListMixin):
61 """ Remove tokens present in NLTK's language specific lists or a file. """
62 name = 'Stopwords'
63
64 @wait_nltk_data
65 def __init__(self, language='English', path: str = None):
66 super().__init__()
67 FileWordListMixin.__init__(self, path)
68 self.__stopwords = set(x.strip() for x in
69 stopwords.words(language.lower())) \
70 if language else []
71
72 @staticmethod
73 @wait_nltk_data
74 def supported_languages():
75 # get NLTK list of stopwords
76 stopwords_listdir = []
77 try:
78 stopwords_listdir = [file for file in
79 os.listdir(stopwords._get_root())
80 if file.islower()]
81 except LookupError: # when no NLTK data is available
82 pass
83
84 return sorted(file.capitalize() for file in stopwords_listdir)
85
86 def _check(self, token):
87 return token not in self.__stopwords and token not in self._lexicon
88
89
90 class LexiconFilter(BaseTokenFilter, FileWordListMixin):
91 """ Keep only tokens present in a file. """
92 name = 'Lexicon'
93
94 def _check(self, token):
95 return not self._lexicon or token in self._lexicon
96
97
98 class RegexpFilter(BaseTokenFilter):
99 """ Remove tokens matching this regular expressions. """
100 name = 'Regexp'
101
102 def __init__(self, pattern=r'\.|,|:|!|\?'):
103 self._pattern = pattern
104 # Compiled Regexes are NOT deepcopy-able and hence to make Corpus deepcopy-able
105 # we cannot store then (due to Corpus also storing used_preprocessor for BoW compute values).
106 # To bypass the problem regex is compiled before every __call__ and discarded right after.
107 self.regex = None
108
109 def __call__(self, corpus: Corpus, callback: Callable = None) -> Corpus:
110 self.regex = re.compile(self._pattern)
111 corpus = super().__call__(corpus, callback)
112 self.regex = None
113 return corpus
114
115 @staticmethod
116 def validate_regexp(regexp):
117 try:
118 re.compile(regexp)
119 return True
120 except re.error:
121 return False
122
123 def _check(self, token):
124 return not self.regex.match(token)
125
126
127 class FitDictionaryFilter(BaseTokenFilter):
128 def __init__(self):
129 self._lexicon = None
130 self._dictionary = None
131
132 def __call__(self, corpus: Corpus, callback: Callable = None) -> Corpus:
133 if callback is None:
134 callback = dummy_callback
135 corpus = TokenizedPreprocessor.__call__(
136 self, corpus, wrap_callback(callback, end=0.2))
137 callback(0.2, "Fitting filter...")
138 self._fit(corpus)
139 return self._filter_tokens(corpus, wrap_callback(callback, start=0.6))
140
141 def _fit(self, corpus: Corpus):
142 raise NotImplemented
143
144 def _filter_tokens(self, corpus: Corpus, callback: Callable) -> Corpus:
145 callback(0, "Filtering...")
146 tokens = [self._preprocess(tokens) for tokens in corpus.tokens]
147 corpus.store_tokens(tokens, self._dictionary)
148 return corpus
149
150 def _check(self, token):
151 assert self._lexicon is not None
152 assert self._dictionary is not None
153 return token in self._lexicon
154
155
156 class FrequencyFilter(FitDictionaryFilter):
157 """Remove tokens with document frequency outside this range;
158 use either absolute or relative frequency. """
159 name = 'Document frequency'
160
161 def __init__(self, min_df=0., max_df=1.):
162 super().__init__()
163 self._corpus_len = 0
164 self._max_df = max_df
165 self._min_df = min_df
166
167 def _fit(self, corpus: Corpus):
168 self._corpus_len = len(corpus)
169 self._dictionary = corpora.Dictionary(corpus.tokens)
170 self._dictionary.filter_extremes(self.min_df, self.max_df, None)
171 self._lexicon = set(self._dictionary.token2id.keys())
172
173 @property
174 def max_df(self):
175 if isinstance(self._max_df, int):
176 return self._max_df / self._corpus_len if self._corpus_len else 1.
177 else:
178 return self._max_df
179
180 @property
181 def min_df(self):
182 if isinstance(self._min_df, float):
183 return int(self._corpus_len * self._min_df) or 1
184 else:
185 return self._min_df
186
187
188 class MostFrequentTokensFilter(FitDictionaryFilter):
189 """Keep most frequent tokens."""
190 name = 'Most frequent tokens'
191
192 def __init__(self, keep_n=None):
193 super().__init__()
194 self._keep_n = keep_n
195
196 def _fit(self, corpus: Corpus):
197 self._dictionary = corpora.Dictionary(corpus.tokens)
198 self._dictionary.filter_extremes(0, 1, self._keep_n)
199 self._lexicon = set(self._dictionary.token2id.keys())
200
201
202 class PosTagFilter(BaseTokenFilter):
203 """Keep selected POS tags."""
204 name = 'POS tags'
205
206 def __init__(self, tags=None):
207 self._tags = set(i.strip().upper() for i in tags.split(","))
208
209 def __call__(self, corpus: Corpus, callback: Callable = None) -> Corpus:
210 if callback is None:
211 callback = dummy_callback
212 corpus = super().__call__(corpus, wrap_callback(callback, end=0.2))
213 return self._filter_tokens(corpus, wrap_callback(callback, start=0.2))
214
215 @staticmethod
216 def validate_tags(tags):
217 # should we keep a dict of existing POS tags and compare them with
218 # input?
219 return len(tags.split(",")) > 0
220
221 def _filter_tokens(self, corpus: Corpus, callback: Callable) -> Corpus:
222 if corpus.pos_tags is None:
223 return corpus
224 callback(0, "Filtering...")
225 filtered_tags = []
226 filtered_tokens = []
227 for tags, tokens in zip(corpus.pos_tags, corpus.tokens):
228 tmp_tags = []
229 tmp_tokens = []
230 for tag, token in zip(tags, tokens):
231 # should we consider partial matches, i.e. "NN" for "NNS"?
232 if tag in self._tags:
233 tmp_tags.append(tag)
234 tmp_tokens.append(token)
235 filtered_tags.append(tmp_tags)
236 filtered_tokens.append(tmp_tokens)
237 corpus.store_tokens(filtered_tokens)
238 corpus.pos_tags = filtered_tags
239 return corpus
240
241 def _check(self, token: str) -> bool:
242 pass
243
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/orangecontrib/text/preprocess/filter.py b/orangecontrib/text/preprocess/filter.py
--- a/orangecontrib/text/preprocess/filter.py
+++ b/orangecontrib/text/preprocess/filter.py
@@ -1,3 +1,4 @@
+from itertools import compress
from typing import List, Callable
import os
import re
@@ -25,14 +26,27 @@
corpus = super().__call__(corpus, wrap_callback(callback, end=0.2))
return self._filter_tokens(corpus, wrap_callback(callback, start=0.2))
- def _filter_tokens(self, corpus: Corpus, callback: Callable) -> Corpus:
+ def _filter_tokens(self, corpus: Corpus, callback: Callable,
+ dictionary=None) -> Corpus:
callback(0, "Filtering...")
- tokens = [self._preprocess(tokens) for tokens in corpus.tokens]
- corpus.store_tokens(tokens)
+ filtered_tokens = []
+ filtered_tags = []
+ for i, tokens in enumerate(corpus.tokens):
+ filter_map = self._preprocess(tokens)
+ filtered_tokens.append(list(compress(tokens, filter_map)))
+ if corpus.pos_tags is not None:
+ filtered_tags.append(list(compress(corpus.pos_tags[i],
+ filter_map)))
+ if dictionary is None:
+ corpus.store_tokens(filtered_tokens)
+ else:
+ corpus.store_tokens(filtered_tokens, dictionary)
+ if filtered_tags:
+ corpus.pos_tags = np.array(filtered_tags, dtype=object)
return corpus
def _preprocess(self, tokens: List) -> List:
- return list(filter(self._check, tokens))
+ return [self._check(token) for token in tokens]
def _check(self, token: str) -> bool:
raise NotImplementedError
@@ -141,10 +155,10 @@
def _fit(self, corpus: Corpus):
raise NotImplemented
- def _filter_tokens(self, corpus: Corpus, callback: Callable) -> Corpus:
- callback(0, "Filtering...")
- tokens = [self._preprocess(tokens) for tokens in corpus.tokens]
- corpus.store_tokens(tokens, self._dictionary)
+ def _filter_tokens(self, corpus: Corpus, callback: Callable,
+ dictionary=None) -> Corpus:
+ corpus = super()._filter_tokens(corpus, callback,
+ dictionary=self._dictionary)
return corpus
def _check(self, token):
| {"golden_diff": "diff --git a/orangecontrib/text/preprocess/filter.py b/orangecontrib/text/preprocess/filter.py\n--- a/orangecontrib/text/preprocess/filter.py\n+++ b/orangecontrib/text/preprocess/filter.py\n@@ -1,3 +1,4 @@\n+from itertools import compress\n from typing import List, Callable\n import os\n import re\n@@ -25,14 +26,27 @@\n corpus = super().__call__(corpus, wrap_callback(callback, end=0.2))\n return self._filter_tokens(corpus, wrap_callback(callback, start=0.2))\n \n- def _filter_tokens(self, corpus: Corpus, callback: Callable) -> Corpus:\n+ def _filter_tokens(self, corpus: Corpus, callback: Callable,\n+ dictionary=None) -> Corpus:\n callback(0, \"Filtering...\")\n- tokens = [self._preprocess(tokens) for tokens in corpus.tokens]\n- corpus.store_tokens(tokens)\n+ filtered_tokens = []\n+ filtered_tags = []\n+ for i, tokens in enumerate(corpus.tokens):\n+ filter_map = self._preprocess(tokens)\n+ filtered_tokens.append(list(compress(tokens, filter_map)))\n+ if corpus.pos_tags is not None:\n+ filtered_tags.append(list(compress(corpus.pos_tags[i],\n+ filter_map)))\n+ if dictionary is None:\n+ corpus.store_tokens(filtered_tokens)\n+ else:\n+ corpus.store_tokens(filtered_tokens, dictionary)\n+ if filtered_tags:\n+ corpus.pos_tags = np.array(filtered_tags, dtype=object)\n return corpus\n \n def _preprocess(self, tokens: List) -> List:\n- return list(filter(self._check, tokens))\n+ return [self._check(token) for token in tokens]\n \n def _check(self, token: str) -> bool:\n raise NotImplementedError\n@@ -141,10 +155,10 @@\n def _fit(self, corpus: Corpus):\n raise NotImplemented\n \n- def _filter_tokens(self, corpus: Corpus, callback: Callable) -> Corpus:\n- callback(0, \"Filtering...\")\n- tokens = [self._preprocess(tokens) for tokens in corpus.tokens]\n- corpus.store_tokens(tokens, self._dictionary)\n+ def _filter_tokens(self, corpus: Corpus, callback: Callable,\n+ dictionary=None) -> Corpus:\n+ corpus = super()._filter_tokens(corpus, callback,\n+ dictionary=self._dictionary)\n return corpus\n \n def _check(self, token):\n", "issue": "Preprocess Text: consider pos tags when filtering tokens\n**Describe the bug**\r\nIf pos tags exists before filtering, final pos tags don't match the tokens. This is because tokens are filtered independently from pos tags and only zipped at the end.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Corpus\r\n2. Preprocess Text (have POS tagger before filtering on stopwords)\r\n3. Check the result in Corpus Viewer\r\n\r\n**Expected behavior**\r\nIf pos tags exist, they are filtered together with tokens.\r\n\r\n**Orange version:**\r\n3.30.dev\r\n\r\n**Text add-on version:**\r\n1.6.dev\r\n\r\n**Screenshots**\r\n<img width=\"509\" alt=\"Screen Shot 2021-07-21 at 15 13 50\" src=\"https://user-images.githubusercontent.com/12524972/126494455-90d78da1-d0d3-4a02-be65-e176268c4c73.png\">\r\n\r\n**Operating system:**\r\nOSX\r\n\r\n**Additional context**\r\n/\r\n\n", "before_files": [{"content": "from typing import List, Callable\nimport os\nimport re\n\nimport numpy as np\nfrom gensim import corpora\nfrom nltk.corpus import stopwords\n\nfrom Orange.data.io import detect_encoding\nfrom Orange.util import wrap_callback, dummy_callback\n\nfrom orangecontrib.text import Corpus\nfrom orangecontrib.text.misc import wait_nltk_data\nfrom orangecontrib.text.preprocess import TokenizedPreprocessor\n\n__all__ = ['BaseTokenFilter', 'StopwordsFilter', 'LexiconFilter',\n 'RegexpFilter', 'FrequencyFilter', 'MostFrequentTokensFilter',\n 'PosTagFilter']\n\n\nclass BaseTokenFilter(TokenizedPreprocessor):\n def __call__(self, corpus: Corpus, callback: Callable = None) -> Corpus:\n if callback is None:\n callback = dummy_callback\n corpus = super().__call__(corpus, wrap_callback(callback, end=0.2))\n return self._filter_tokens(corpus, wrap_callback(callback, start=0.2))\n\n def _filter_tokens(self, corpus: Corpus, callback: Callable) -> Corpus:\n callback(0, \"Filtering...\")\n tokens = [self._preprocess(tokens) for tokens in corpus.tokens]\n corpus.store_tokens(tokens)\n return corpus\n\n def _preprocess(self, tokens: List) -> List:\n return list(filter(self._check, tokens))\n\n def _check(self, token: str) -> bool:\n raise NotImplementedError\n\n\nclass FileWordListMixin:\n def __init__(self, path: str = None):\n self._lexicon = self.from_file(path)\n\n @staticmethod\n def from_file(path):\n if not path:\n return set()\n\n for encoding in ('utf-8', None, detect_encoding(path)):\n try:\n with open(path, encoding=encoding) as f:\n return set(line.strip() for line in f)\n except UnicodeDecodeError:\n continue\n # No encoding worked, raise\n raise UnicodeError(\"Couldn't determine file encoding\")\n\n\nclass StopwordsFilter(BaseTokenFilter, FileWordListMixin):\n \"\"\" Remove tokens present in NLTK's language specific lists or a file. \"\"\"\n name = 'Stopwords'\n\n @wait_nltk_data\n def __init__(self, language='English', path: str = None):\n super().__init__()\n FileWordListMixin.__init__(self, path)\n self.__stopwords = set(x.strip() for x in\n stopwords.words(language.lower())) \\\n if language else []\n\n @staticmethod\n @wait_nltk_data\n def supported_languages():\n # get NLTK list of stopwords\n stopwords_listdir = []\n try:\n stopwords_listdir = [file for file in\n os.listdir(stopwords._get_root())\n if file.islower()]\n except LookupError: # when no NLTK data is available\n pass\n\n return sorted(file.capitalize() for file in stopwords_listdir)\n\n def _check(self, token):\n return token not in self.__stopwords and token not in self._lexicon\n\n\nclass LexiconFilter(BaseTokenFilter, FileWordListMixin):\n \"\"\" Keep only tokens present in a file. \"\"\"\n name = 'Lexicon'\n\n def _check(self, token):\n return not self._lexicon or token in self._lexicon\n\n\nclass RegexpFilter(BaseTokenFilter):\n \"\"\" Remove tokens matching this regular expressions. \"\"\"\n name = 'Regexp'\n\n def __init__(self, pattern=r'\\.|,|:|!|\\?'):\n self._pattern = pattern\n # Compiled Regexes are NOT deepcopy-able and hence to make Corpus deepcopy-able\n # we cannot store then (due to Corpus also storing used_preprocessor for BoW compute values).\n # To bypass the problem regex is compiled before every __call__ and discarded right after.\n self.regex = None\n\n def __call__(self, corpus: Corpus, callback: Callable = None) -> Corpus:\n self.regex = re.compile(self._pattern)\n corpus = super().__call__(corpus, callback)\n self.regex = None\n return corpus\n\n @staticmethod\n def validate_regexp(regexp):\n try:\n re.compile(regexp)\n return True\n except re.error:\n return False\n\n def _check(self, token):\n return not self.regex.match(token)\n\n\nclass FitDictionaryFilter(BaseTokenFilter):\n def __init__(self):\n self._lexicon = None\n self._dictionary = None\n\n def __call__(self, corpus: Corpus, callback: Callable = None) -> Corpus:\n if callback is None:\n callback = dummy_callback\n corpus = TokenizedPreprocessor.__call__(\n self, corpus, wrap_callback(callback, end=0.2))\n callback(0.2, \"Fitting filter...\")\n self._fit(corpus)\n return self._filter_tokens(corpus, wrap_callback(callback, start=0.6))\n\n def _fit(self, corpus: Corpus):\n raise NotImplemented\n\n def _filter_tokens(self, corpus: Corpus, callback: Callable) -> Corpus:\n callback(0, \"Filtering...\")\n tokens = [self._preprocess(tokens) for tokens in corpus.tokens]\n corpus.store_tokens(tokens, self._dictionary)\n return corpus\n\n def _check(self, token):\n assert self._lexicon is not None\n assert self._dictionary is not None\n return token in self._lexicon\n\n\nclass FrequencyFilter(FitDictionaryFilter):\n \"\"\"Remove tokens with document frequency outside this range;\n use either absolute or relative frequency. \"\"\"\n name = 'Document frequency'\n\n def __init__(self, min_df=0., max_df=1.):\n super().__init__()\n self._corpus_len = 0\n self._max_df = max_df\n self._min_df = min_df\n\n def _fit(self, corpus: Corpus):\n self._corpus_len = len(corpus)\n self._dictionary = corpora.Dictionary(corpus.tokens)\n self._dictionary.filter_extremes(self.min_df, self.max_df, None)\n self._lexicon = set(self._dictionary.token2id.keys())\n\n @property\n def max_df(self):\n if isinstance(self._max_df, int):\n return self._max_df / self._corpus_len if self._corpus_len else 1.\n else:\n return self._max_df\n\n @property\n def min_df(self):\n if isinstance(self._min_df, float):\n return int(self._corpus_len * self._min_df) or 1\n else:\n return self._min_df\n\n\nclass MostFrequentTokensFilter(FitDictionaryFilter):\n \"\"\"Keep most frequent tokens.\"\"\"\n name = 'Most frequent tokens'\n\n def __init__(self, keep_n=None):\n super().__init__()\n self._keep_n = keep_n\n\n def _fit(self, corpus: Corpus):\n self._dictionary = corpora.Dictionary(corpus.tokens)\n self._dictionary.filter_extremes(0, 1, self._keep_n)\n self._lexicon = set(self._dictionary.token2id.keys())\n\n\nclass PosTagFilter(BaseTokenFilter):\n \"\"\"Keep selected POS tags.\"\"\"\n name = 'POS tags'\n\n def __init__(self, tags=None):\n self._tags = set(i.strip().upper() for i in tags.split(\",\"))\n\n def __call__(self, corpus: Corpus, callback: Callable = None) -> Corpus:\n if callback is None:\n callback = dummy_callback\n corpus = super().__call__(corpus, wrap_callback(callback, end=0.2))\n return self._filter_tokens(corpus, wrap_callback(callback, start=0.2))\n\n @staticmethod\n def validate_tags(tags):\n # should we keep a dict of existing POS tags and compare them with\n # input?\n return len(tags.split(\",\")) > 0\n\n def _filter_tokens(self, corpus: Corpus, callback: Callable) -> Corpus:\n if corpus.pos_tags is None:\n return corpus\n callback(0, \"Filtering...\")\n filtered_tags = []\n filtered_tokens = []\n for tags, tokens in zip(corpus.pos_tags, corpus.tokens):\n tmp_tags = []\n tmp_tokens = []\n for tag, token in zip(tags, tokens):\n # should we consider partial matches, i.e. \"NN\" for \"NNS\"?\n if tag in self._tags:\n tmp_tags.append(tag)\n tmp_tokens.append(token)\n filtered_tags.append(tmp_tags)\n filtered_tokens.append(tmp_tokens)\n corpus.store_tokens(filtered_tokens)\n corpus.pos_tags = filtered_tags\n return corpus\n\n def _check(self, token: str) -> bool:\n pass\n", "path": "orangecontrib/text/preprocess/filter.py"}], "after_files": [{"content": "from itertools import compress\nfrom typing import List, Callable\nimport os\nimport re\n\nimport numpy as np\nfrom gensim import corpora\nfrom nltk.corpus import stopwords\n\nfrom Orange.data.io import detect_encoding\nfrom Orange.util import wrap_callback, dummy_callback\n\nfrom orangecontrib.text import Corpus\nfrom orangecontrib.text.misc import wait_nltk_data\nfrom orangecontrib.text.preprocess import TokenizedPreprocessor\n\n__all__ = ['BaseTokenFilter', 'StopwordsFilter', 'LexiconFilter',\n 'RegexpFilter', 'FrequencyFilter', 'MostFrequentTokensFilter',\n 'PosTagFilter']\n\n\nclass BaseTokenFilter(TokenizedPreprocessor):\n def __call__(self, corpus: Corpus, callback: Callable = None) -> Corpus:\n if callback is None:\n callback = dummy_callback\n corpus = super().__call__(corpus, wrap_callback(callback, end=0.2))\n return self._filter_tokens(corpus, wrap_callback(callback, start=0.2))\n\n def _filter_tokens(self, corpus: Corpus, callback: Callable,\n dictionary=None) -> Corpus:\n callback(0, \"Filtering...\")\n filtered_tokens = []\n filtered_tags = []\n for i, tokens in enumerate(corpus.tokens):\n filter_map = self._preprocess(tokens)\n filtered_tokens.append(list(compress(tokens, filter_map)))\n if corpus.pos_tags is not None:\n filtered_tags.append(list(compress(corpus.pos_tags[i],\n filter_map)))\n if dictionary is None:\n corpus.store_tokens(filtered_tokens)\n else:\n corpus.store_tokens(filtered_tokens, dictionary)\n if filtered_tags:\n corpus.pos_tags = np.array(filtered_tags, dtype=object)\n return corpus\n\n def _preprocess(self, tokens: List) -> List:\n return [self._check(token) for token in tokens]\n\n def _check(self, token: str) -> bool:\n raise NotImplementedError\n\n\nclass FileWordListMixin:\n def __init__(self, path: str = None):\n self._lexicon = self.from_file(path)\n\n @staticmethod\n def from_file(path):\n if not path:\n return set()\n\n for encoding in ('utf-8', None, detect_encoding(path)):\n try:\n with open(path, encoding=encoding) as f:\n return set(line.strip() for line in f)\n except UnicodeDecodeError:\n continue\n # No encoding worked, raise\n raise UnicodeError(\"Couldn't determine file encoding\")\n\n\nclass StopwordsFilter(BaseTokenFilter, FileWordListMixin):\n \"\"\" Remove tokens present in NLTK's language specific lists or a file. \"\"\"\n name = 'Stopwords'\n\n @wait_nltk_data\n def __init__(self, language='English', path: str = None):\n super().__init__()\n FileWordListMixin.__init__(self, path)\n self.__stopwords = set(x.strip() for x in\n stopwords.words(language.lower())) \\\n if language else []\n\n @staticmethod\n @wait_nltk_data\n def supported_languages():\n # get NLTK list of stopwords\n stopwords_listdir = []\n try:\n stopwords_listdir = [file for file in\n os.listdir(stopwords._get_root())\n if file.islower()]\n except LookupError: # when no NLTK data is available\n pass\n\n return sorted(file.capitalize() for file in stopwords_listdir)\n\n def _check(self, token):\n return token not in self.__stopwords and token not in self._lexicon\n\n\nclass LexiconFilter(BaseTokenFilter, FileWordListMixin):\n \"\"\" Keep only tokens present in a file. \"\"\"\n name = 'Lexicon'\n\n def _check(self, token):\n return not self._lexicon or token in self._lexicon\n\n\nclass RegexpFilter(BaseTokenFilter):\n \"\"\" Remove tokens matching this regular expressions. \"\"\"\n name = 'Regexp'\n\n def __init__(self, pattern=r'\\.|,|:|!|\\?'):\n self._pattern = pattern\n # Compiled Regexes are NOT deepcopy-able and hence to make Corpus deepcopy-able\n # we cannot store then (due to Corpus also storing used_preprocessor for BoW compute values).\n # To bypass the problem regex is compiled before every __call__ and discarded right after.\n self.regex = None\n\n def __call__(self, corpus: Corpus, callback: Callable = None) -> Corpus:\n self.regex = re.compile(self._pattern)\n corpus = super().__call__(corpus, callback)\n self.regex = None\n return corpus\n\n @staticmethod\n def validate_regexp(regexp):\n try:\n re.compile(regexp)\n return True\n except re.error:\n return False\n\n def _check(self, token):\n return not self.regex.match(token)\n\n\nclass FitDictionaryFilter(BaseTokenFilter):\n def __init__(self):\n self._lexicon = None\n self._dictionary = None\n\n def __call__(self, corpus: Corpus, callback: Callable = None) -> Corpus:\n if callback is None:\n callback = dummy_callback\n corpus = TokenizedPreprocessor.__call__(\n self, corpus, wrap_callback(callback, end=0.2))\n callback(0.2, \"Fitting filter...\")\n self._fit(corpus)\n return self._filter_tokens(corpus, wrap_callback(callback, start=0.6))\n\n def _fit(self, corpus: Corpus):\n raise NotImplemented\n\n def _filter_tokens(self, corpus: Corpus, callback: Callable,\n dictionary=None) -> Corpus:\n corpus = super()._filter_tokens(corpus, callback,\n dictionary=self._dictionary)\n return corpus\n\n def _check(self, token):\n assert self._lexicon is not None\n assert self._dictionary is not None\n return token in self._lexicon\n\n\nclass FrequencyFilter(FitDictionaryFilter):\n \"\"\"Remove tokens with document frequency outside this range;\n use either absolute or relative frequency. \"\"\"\n name = 'Document frequency'\n\n def __init__(self, min_df=0., max_df=1.):\n super().__init__()\n self._corpus_len = 0\n self._max_df = max_df\n self._min_df = min_df\n\n def _fit(self, corpus: Corpus):\n self._corpus_len = len(corpus)\n self._dictionary = corpora.Dictionary(corpus.tokens)\n self._dictionary.filter_extremes(self.min_df, self.max_df, None)\n self._lexicon = set(self._dictionary.token2id.keys())\n\n @property\n def max_df(self):\n if isinstance(self._max_df, int):\n return self._max_df / self._corpus_len if self._corpus_len else 1.\n else:\n return self._max_df\n\n @property\n def min_df(self):\n if isinstance(self._min_df, float):\n return int(self._corpus_len * self._min_df) or 1\n else:\n return self._min_df\n\n\nclass MostFrequentTokensFilter(FitDictionaryFilter):\n \"\"\"Keep most frequent tokens.\"\"\"\n name = 'Most frequent tokens'\n\n def __init__(self, keep_n=None):\n super().__init__()\n self._keep_n = keep_n\n\n def _fit(self, corpus: Corpus):\n self._dictionary = corpora.Dictionary(corpus.tokens)\n self._dictionary.filter_extremes(0, 1, self._keep_n)\n self._lexicon = set(self._dictionary.token2id.keys())\n\n\nclass PosTagFilter(BaseTokenFilter):\n \"\"\"Keep selected POS tags.\"\"\"\n name = 'POS tags'\n\n def __init__(self, tags=None):\n self._tags = set(i.strip().upper() for i in tags.split(\",\"))\n\n def __call__(self, corpus: Corpus, callback: Callable = None) -> Corpus:\n if callback is None:\n callback = dummy_callback\n corpus = super().__call__(corpus, wrap_callback(callback, end=0.2))\n return self._filter_tokens(corpus, wrap_callback(callback, start=0.2))\n\n @staticmethod\n def validate_tags(tags):\n # should we keep a dict of existing POS tags and compare them with\n # input?\n return len(tags.split(\",\")) > 0\n\n def _filter_tokens(self, corpus: Corpus, callback: Callable) -> Corpus:\n if corpus.pos_tags is None:\n return corpus\n callback(0, \"Filtering...\")\n filtered_tags = []\n filtered_tokens = []\n for tags, tokens in zip(corpus.pos_tags, corpus.tokens):\n tmp_tags = []\n tmp_tokens = []\n for tag, token in zip(tags, tokens):\n # should we consider partial matches, i.e. \"NN\" for \"NNS\"?\n if tag in self._tags:\n tmp_tags.append(tag)\n tmp_tokens.append(token)\n filtered_tags.append(tmp_tags)\n filtered_tokens.append(tmp_tokens)\n corpus.store_tokens(filtered_tokens)\n corpus.pos_tags = filtered_tags\n return corpus\n\n def _check(self, token: str) -> bool:\n pass\n", "path": "orangecontrib/text/preprocess/filter.py"}]} | 3,008 | 534 |
gh_patches_debug_51119 | rasdani/github-patches | git_diff | getmoto__moto-744 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Dynamodb has_item causes scan problems
Calling `has_item` for an item that doesn't exist, and then calling `scan()` causes `AttributeError: 'dict' object has no attribute 'attrs'`
Example of behaviour that causes this:
``` python
from moto import mock_dynamodb
import boto
with mock_dynamodb():
conn = boto.connect_dynamodb()
table = conn.create_table(
name='mytable',
schema=conn.create_schema(
hash_key_name='name',
hash_key_proto_value=str,
),
read_units=1,
write_units=1
)
print(list(table.scan()))
table.has_item('something')
print(list(table.scan()))
```
`has_item` calls down to `Table.get_item` in `dynamodb/models.py`. This accesses `self.items`, which is a `defaultdict` whose default value is an empty dict. This results in an empty dict being set in `self.items`. The `scan()` call in `dynamodb/responses.py` does not handle dicts (or anything other than an Item)
Replacing the error line in `dynamodb/responses.py` with the following code appears to work:
``` python
"Items": [item.attrs for item in items if item],
```
I'm not very familiar with this library (only started using it today) but happy to submit a PR if that seems like the right thing to do.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `moto/dynamodb/responses.py`
Content:
```
1 from __future__ import unicode_literals
2 import json
3 import six
4
5 from moto.core.responses import BaseResponse
6 from moto.core.utils import camelcase_to_underscores
7 from .models import dynamodb_backend, dynamo_json_dump
8
9
10 GET_SESSION_TOKEN_RESULT = """
11 <GetSessionTokenResponse xmlns="https://sts.amazonaws.com/doc/2011-06-15/">
12 <GetSessionTokenResult>
13 <Credentials>
14 <SessionToken>
15 AQoEXAMPLEH4aoAH0gNCAPyJxz4BlCFFxWNE1OPTgk5TthT+FvwqnKwRcOIfrRh3c/L
16 To6UDdyJwOOvEVPvLXCrrrUtdnniCEXAMPLE/IvU1dYUg2RVAJBanLiHb4IgRmpRV3z
17 rkuWJOgQs8IZZaIv2BXIa2R4OlgkBN9bkUDNCJiBeb/AXlzBBko7b15fjrBs2+cTQtp
18 Z3CYWFXG8C5zqx37wnOE49mRl/+OtkIKGO7fAE
19 </SessionToken>
20 <SecretAccessKey>
21 wJalrXUtnFEMI/K7MDENG/bPxRfiCYzEXAMPLEKEY
22 </SecretAccessKey>
23 <Expiration>2011-07-11T19:55:29.611Z</Expiration>
24 <AccessKeyId>AKIAIOSFODNN7EXAMPLE</AccessKeyId>
25 </Credentials>
26 </GetSessionTokenResult>
27 <ResponseMetadata>
28 <RequestId>58c5dbae-abef-11e0-8cfe-09039844ac7d</RequestId>
29 </ResponseMetadata>
30 </GetSessionTokenResponse>"""
31
32
33 def sts_handler():
34 return GET_SESSION_TOKEN_RESULT
35
36
37 class DynamoHandler(BaseResponse):
38
39 def get_endpoint_name(self, headers):
40 """Parses request headers and extracts part od the X-Amz-Target
41 that corresponds to a method of DynamoHandler
42
43 ie: X-Amz-Target: DynamoDB_20111205.ListTables -> ListTables
44 """
45 # Headers are case-insensitive. Probably a better way to do this.
46 match = headers.get('x-amz-target') or headers.get('X-Amz-Target')
47 if match:
48 return match.split(".")[1]
49
50 def error(self, type_, status=400):
51 return status, self.response_headers, dynamo_json_dump({'__type': type_})
52
53 def call_action(self):
54 body = self.body.decode('utf-8')
55 if 'GetSessionToken' in body:
56 return 200, self.response_headers, sts_handler()
57
58 self.body = json.loads(body or '{}')
59 endpoint = self.get_endpoint_name(self.headers)
60 if endpoint:
61 endpoint = camelcase_to_underscores(endpoint)
62 response = getattr(self, endpoint)()
63 if isinstance(response, six.string_types):
64 return 200, self.response_headers, response
65
66 else:
67 status_code, new_headers, response_content = response
68 self.response_headers.update(new_headers)
69 return status_code, self.response_headers, response_content
70 else:
71 return 404, self.response_headers, ""
72
73 def list_tables(self):
74 body = self.body
75 limit = body.get('Limit')
76 if body.get("ExclusiveStartTableName"):
77 last = body.get("ExclusiveStartTableName")
78 start = list(dynamodb_backend.tables.keys()).index(last) + 1
79 else:
80 start = 0
81 all_tables = list(dynamodb_backend.tables.keys())
82 if limit:
83 tables = all_tables[start:start + limit]
84 else:
85 tables = all_tables[start:]
86 response = {"TableNames": tables}
87 if limit and len(all_tables) > start + limit:
88 response["LastEvaluatedTableName"] = tables[-1]
89 return dynamo_json_dump(response)
90
91 def create_table(self):
92 body = self.body
93 name = body['TableName']
94
95 key_schema = body['KeySchema']
96 hash_hey = key_schema['HashKeyElement']
97 hash_key_attr = hash_hey['AttributeName']
98 hash_key_type = hash_hey['AttributeType']
99
100 range_hey = key_schema.get('RangeKeyElement', {})
101 range_key_attr = range_hey.get('AttributeName')
102 range_key_type = range_hey.get('AttributeType')
103
104 throughput = body["ProvisionedThroughput"]
105 read_units = throughput["ReadCapacityUnits"]
106 write_units = throughput["WriteCapacityUnits"]
107
108 table = dynamodb_backend.create_table(
109 name,
110 hash_key_attr=hash_key_attr,
111 hash_key_type=hash_key_type,
112 range_key_attr=range_key_attr,
113 range_key_type=range_key_type,
114 read_capacity=int(read_units),
115 write_capacity=int(write_units),
116 )
117 return dynamo_json_dump(table.describe)
118
119 def delete_table(self):
120 name = self.body['TableName']
121 table = dynamodb_backend.delete_table(name)
122 if table:
123 return dynamo_json_dump(table.describe)
124 else:
125 er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException'
126 return self.error(er)
127
128 def update_table(self):
129 name = self.body['TableName']
130 throughput = self.body["ProvisionedThroughput"]
131 new_read_units = throughput["ReadCapacityUnits"]
132 new_write_units = throughput["WriteCapacityUnits"]
133 table = dynamodb_backend.update_table_throughput(name, new_read_units, new_write_units)
134 return dynamo_json_dump(table.describe)
135
136 def describe_table(self):
137 name = self.body['TableName']
138 try:
139 table = dynamodb_backend.tables[name]
140 except KeyError:
141 er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException'
142 return self.error(er)
143 return dynamo_json_dump(table.describe)
144
145 def put_item(self):
146 name = self.body['TableName']
147 item = self.body['Item']
148 result = dynamodb_backend.put_item(name, item)
149 if result:
150 item_dict = result.to_json()
151 item_dict['ConsumedCapacityUnits'] = 1
152 return dynamo_json_dump(item_dict)
153 else:
154 er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException'
155 return self.error(er)
156
157 def batch_write_item(self):
158 table_batches = self.body['RequestItems']
159
160 for table_name, table_requests in table_batches.items():
161 for table_request in table_requests:
162 request_type = list(table_request)[0]
163 request = list(table_request.values())[0]
164
165 if request_type == 'PutRequest':
166 item = request['Item']
167 dynamodb_backend.put_item(table_name, item)
168 elif request_type == 'DeleteRequest':
169 key = request['Key']
170 hash_key = key['HashKeyElement']
171 range_key = key.get('RangeKeyElement')
172 item = dynamodb_backend.delete_item(table_name, hash_key, range_key)
173
174 response = {
175 "Responses": {
176 "Thread": {
177 "ConsumedCapacityUnits": 1.0
178 },
179 "Reply": {
180 "ConsumedCapacityUnits": 1.0
181 }
182 },
183 "UnprocessedItems": {}
184 }
185
186 return dynamo_json_dump(response)
187
188 def get_item(self):
189 name = self.body['TableName']
190 key = self.body['Key']
191 hash_key = key['HashKeyElement']
192 range_key = key.get('RangeKeyElement')
193 attrs_to_get = self.body.get('AttributesToGet')
194 try:
195 item = dynamodb_backend.get_item(name, hash_key, range_key)
196 except ValueError:
197 er = 'com.amazon.coral.validate#ValidationException'
198 return self.error(er, status=400)
199 if item:
200 item_dict = item.describe_attrs(attrs_to_get)
201 item_dict['ConsumedCapacityUnits'] = 0.5
202 return dynamo_json_dump(item_dict)
203 else:
204 # Item not found
205 er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException'
206 return self.error(er, status=404)
207
208 def batch_get_item(self):
209 table_batches = self.body['RequestItems']
210
211 results = {
212 "Responses": {
213 "UnprocessedKeys": {}
214 }
215 }
216
217 for table_name, table_request in table_batches.items():
218 items = []
219 keys = table_request['Keys']
220 attributes_to_get = table_request.get('AttributesToGet')
221 for key in keys:
222 hash_key = key["HashKeyElement"]
223 range_key = key.get("RangeKeyElement")
224 item = dynamodb_backend.get_item(table_name, hash_key, range_key)
225 if item:
226 item_describe = item.describe_attrs(attributes_to_get)
227 items.append(item_describe)
228 results["Responses"][table_name] = {"Items": items, "ConsumedCapacityUnits": 1}
229 return dynamo_json_dump(results)
230
231 def query(self):
232 name = self.body['TableName']
233 hash_key = self.body['HashKeyValue']
234 range_condition = self.body.get('RangeKeyCondition')
235 if range_condition:
236 range_comparison = range_condition['ComparisonOperator']
237 range_values = range_condition['AttributeValueList']
238 else:
239 range_comparison = None
240 range_values = []
241
242 items, last_page = dynamodb_backend.query(name, hash_key, range_comparison, range_values)
243
244 if items is None:
245 er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException'
246 return self.error(er)
247
248 result = {
249 "Count": len(items),
250 "Items": [item.attrs for item in items],
251 "ConsumedCapacityUnits": 1,
252 }
253
254 # Implement this when we do pagination
255 # if not last_page:
256 # result["LastEvaluatedKey"] = {
257 # "HashKeyElement": items[-1].hash_key,
258 # "RangeKeyElement": items[-1].range_key,
259 # }
260 return dynamo_json_dump(result)
261
262 def scan(self):
263 name = self.body['TableName']
264
265 filters = {}
266 scan_filters = self.body.get('ScanFilter', {})
267 for attribute_name, scan_filter in scan_filters.items():
268 # Keys are attribute names. Values are tuples of (comparison, comparison_value)
269 comparison_operator = scan_filter["ComparisonOperator"]
270 comparison_values = scan_filter.get("AttributeValueList", [])
271 filters[attribute_name] = (comparison_operator, comparison_values)
272
273 items, scanned_count, last_page = dynamodb_backend.scan(name, filters)
274
275 if items is None:
276 er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException'
277 return self.error(er)
278
279 result = {
280 "Count": len(items),
281 "Items": [item.attrs for item in items],
282 "ConsumedCapacityUnits": 1,
283 "ScannedCount": scanned_count
284 }
285
286 # Implement this when we do pagination
287 # if not last_page:
288 # result["LastEvaluatedKey"] = {
289 # "HashKeyElement": items[-1].hash_key,
290 # "RangeKeyElement": items[-1].range_key,
291 # }
292 return dynamo_json_dump(result)
293
294 def delete_item(self):
295 name = self.body['TableName']
296 key = self.body['Key']
297 hash_key = key['HashKeyElement']
298 range_key = key.get('RangeKeyElement')
299 return_values = self.body.get('ReturnValues', '')
300 item = dynamodb_backend.delete_item(name, hash_key, range_key)
301 if item:
302 if return_values == 'ALL_OLD':
303 item_dict = item.to_json()
304 else:
305 item_dict = {'Attributes': []}
306 item_dict['ConsumedCapacityUnits'] = 0.5
307 return dynamo_json_dump(item_dict)
308 else:
309 er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException'
310 return self.error(er)
311
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/moto/dynamodb/responses.py b/moto/dynamodb/responses.py
--- a/moto/dynamodb/responses.py
+++ b/moto/dynamodb/responses.py
@@ -278,7 +278,7 @@
result = {
"Count": len(items),
- "Items": [item.attrs for item in items],
+ "Items": [item.attrs for item in items if item],
"ConsumedCapacityUnits": 1,
"ScannedCount": scanned_count
}
| {"golden_diff": "diff --git a/moto/dynamodb/responses.py b/moto/dynamodb/responses.py\n--- a/moto/dynamodb/responses.py\n+++ b/moto/dynamodb/responses.py\n@@ -278,7 +278,7 @@\n \n result = {\n \"Count\": len(items),\n- \"Items\": [item.attrs for item in items],\n+ \"Items\": [item.attrs for item in items if item],\n \"ConsumedCapacityUnits\": 1,\n \"ScannedCount\": scanned_count\n }\n", "issue": "Dynamodb has_item causes scan problems\nCalling `has_item` for an item that doesn't exist, and then calling `scan()` causes `AttributeError: 'dict' object has no attribute 'attrs'`\n\nExample of behaviour that causes this:\n\n``` python\nfrom moto import mock_dynamodb\nimport boto\n\n\nwith mock_dynamodb():\n conn = boto.connect_dynamodb()\n table = conn.create_table(\n name='mytable',\n schema=conn.create_schema(\n hash_key_name='name',\n hash_key_proto_value=str,\n ),\n read_units=1,\n write_units=1\n )\n print(list(table.scan()))\n table.has_item('something')\n print(list(table.scan()))\n```\n\n`has_item` calls down to `Table.get_item` in `dynamodb/models.py`. This accesses `self.items`, which is a `defaultdict` whose default value is an empty dict. This results in an empty dict being set in `self.items`. The `scan()` call in `dynamodb/responses.py` does not handle dicts (or anything other than an Item)\n\nReplacing the error line in `dynamodb/responses.py` with the following code appears to work:\n\n``` python\n\"Items\": [item.attrs for item in items if item],\n```\n\nI'm not very familiar with this library (only started using it today) but happy to submit a PR if that seems like the right thing to do.\n\n", "before_files": [{"content": "from __future__ import unicode_literals\nimport json\nimport six\n\nfrom moto.core.responses import BaseResponse\nfrom moto.core.utils import camelcase_to_underscores\nfrom .models import dynamodb_backend, dynamo_json_dump\n\n\nGET_SESSION_TOKEN_RESULT = \"\"\"\n<GetSessionTokenResponse xmlns=\"https://sts.amazonaws.com/doc/2011-06-15/\">\n <GetSessionTokenResult>\n <Credentials>\n <SessionToken>\n AQoEXAMPLEH4aoAH0gNCAPyJxz4BlCFFxWNE1OPTgk5TthT+FvwqnKwRcOIfrRh3c/L\n To6UDdyJwOOvEVPvLXCrrrUtdnniCEXAMPLE/IvU1dYUg2RVAJBanLiHb4IgRmpRV3z\n rkuWJOgQs8IZZaIv2BXIa2R4OlgkBN9bkUDNCJiBeb/AXlzBBko7b15fjrBs2+cTQtp\n Z3CYWFXG8C5zqx37wnOE49mRl/+OtkIKGO7fAE\n </SessionToken>\n <SecretAccessKey>\n wJalrXUtnFEMI/K7MDENG/bPxRfiCYzEXAMPLEKEY\n </SecretAccessKey>\n <Expiration>2011-07-11T19:55:29.611Z</Expiration>\n <AccessKeyId>AKIAIOSFODNN7EXAMPLE</AccessKeyId>\n </Credentials>\n </GetSessionTokenResult>\n <ResponseMetadata>\n <RequestId>58c5dbae-abef-11e0-8cfe-09039844ac7d</RequestId>\n </ResponseMetadata>\n</GetSessionTokenResponse>\"\"\"\n\n\ndef sts_handler():\n return GET_SESSION_TOKEN_RESULT\n\n\nclass DynamoHandler(BaseResponse):\n\n def get_endpoint_name(self, headers):\n \"\"\"Parses request headers and extracts part od the X-Amz-Target\n that corresponds to a method of DynamoHandler\n\n ie: X-Amz-Target: DynamoDB_20111205.ListTables -> ListTables\n \"\"\"\n # Headers are case-insensitive. Probably a better way to do this.\n match = headers.get('x-amz-target') or headers.get('X-Amz-Target')\n if match:\n return match.split(\".\")[1]\n\n def error(self, type_, status=400):\n return status, self.response_headers, dynamo_json_dump({'__type': type_})\n\n def call_action(self):\n body = self.body.decode('utf-8')\n if 'GetSessionToken' in body:\n return 200, self.response_headers, sts_handler()\n\n self.body = json.loads(body or '{}')\n endpoint = self.get_endpoint_name(self.headers)\n if endpoint:\n endpoint = camelcase_to_underscores(endpoint)\n response = getattr(self, endpoint)()\n if isinstance(response, six.string_types):\n return 200, self.response_headers, response\n\n else:\n status_code, new_headers, response_content = response\n self.response_headers.update(new_headers)\n return status_code, self.response_headers, response_content\n else:\n return 404, self.response_headers, \"\"\n\n def list_tables(self):\n body = self.body\n limit = body.get('Limit')\n if body.get(\"ExclusiveStartTableName\"):\n last = body.get(\"ExclusiveStartTableName\")\n start = list(dynamodb_backend.tables.keys()).index(last) + 1\n else:\n start = 0\n all_tables = list(dynamodb_backend.tables.keys())\n if limit:\n tables = all_tables[start:start + limit]\n else:\n tables = all_tables[start:]\n response = {\"TableNames\": tables}\n if limit and len(all_tables) > start + limit:\n response[\"LastEvaluatedTableName\"] = tables[-1]\n return dynamo_json_dump(response)\n\n def create_table(self):\n body = self.body\n name = body['TableName']\n\n key_schema = body['KeySchema']\n hash_hey = key_schema['HashKeyElement']\n hash_key_attr = hash_hey['AttributeName']\n hash_key_type = hash_hey['AttributeType']\n\n range_hey = key_schema.get('RangeKeyElement', {})\n range_key_attr = range_hey.get('AttributeName')\n range_key_type = range_hey.get('AttributeType')\n\n throughput = body[\"ProvisionedThroughput\"]\n read_units = throughput[\"ReadCapacityUnits\"]\n write_units = throughput[\"WriteCapacityUnits\"]\n\n table = dynamodb_backend.create_table(\n name,\n hash_key_attr=hash_key_attr,\n hash_key_type=hash_key_type,\n range_key_attr=range_key_attr,\n range_key_type=range_key_type,\n read_capacity=int(read_units),\n write_capacity=int(write_units),\n )\n return dynamo_json_dump(table.describe)\n\n def delete_table(self):\n name = self.body['TableName']\n table = dynamodb_backend.delete_table(name)\n if table:\n return dynamo_json_dump(table.describe)\n else:\n er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException'\n return self.error(er)\n\n def update_table(self):\n name = self.body['TableName']\n throughput = self.body[\"ProvisionedThroughput\"]\n new_read_units = throughput[\"ReadCapacityUnits\"]\n new_write_units = throughput[\"WriteCapacityUnits\"]\n table = dynamodb_backend.update_table_throughput(name, new_read_units, new_write_units)\n return dynamo_json_dump(table.describe)\n\n def describe_table(self):\n name = self.body['TableName']\n try:\n table = dynamodb_backend.tables[name]\n except KeyError:\n er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException'\n return self.error(er)\n return dynamo_json_dump(table.describe)\n\n def put_item(self):\n name = self.body['TableName']\n item = self.body['Item']\n result = dynamodb_backend.put_item(name, item)\n if result:\n item_dict = result.to_json()\n item_dict['ConsumedCapacityUnits'] = 1\n return dynamo_json_dump(item_dict)\n else:\n er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException'\n return self.error(er)\n\n def batch_write_item(self):\n table_batches = self.body['RequestItems']\n\n for table_name, table_requests in table_batches.items():\n for table_request in table_requests:\n request_type = list(table_request)[0]\n request = list(table_request.values())[0]\n\n if request_type == 'PutRequest':\n item = request['Item']\n dynamodb_backend.put_item(table_name, item)\n elif request_type == 'DeleteRequest':\n key = request['Key']\n hash_key = key['HashKeyElement']\n range_key = key.get('RangeKeyElement')\n item = dynamodb_backend.delete_item(table_name, hash_key, range_key)\n\n response = {\n \"Responses\": {\n \"Thread\": {\n \"ConsumedCapacityUnits\": 1.0\n },\n \"Reply\": {\n \"ConsumedCapacityUnits\": 1.0\n }\n },\n \"UnprocessedItems\": {}\n }\n\n return dynamo_json_dump(response)\n\n def get_item(self):\n name = self.body['TableName']\n key = self.body['Key']\n hash_key = key['HashKeyElement']\n range_key = key.get('RangeKeyElement')\n attrs_to_get = self.body.get('AttributesToGet')\n try:\n item = dynamodb_backend.get_item(name, hash_key, range_key)\n except ValueError:\n er = 'com.amazon.coral.validate#ValidationException'\n return self.error(er, status=400)\n if item:\n item_dict = item.describe_attrs(attrs_to_get)\n item_dict['ConsumedCapacityUnits'] = 0.5\n return dynamo_json_dump(item_dict)\n else:\n # Item not found\n er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException'\n return self.error(er, status=404)\n\n def batch_get_item(self):\n table_batches = self.body['RequestItems']\n\n results = {\n \"Responses\": {\n \"UnprocessedKeys\": {}\n }\n }\n\n for table_name, table_request in table_batches.items():\n items = []\n keys = table_request['Keys']\n attributes_to_get = table_request.get('AttributesToGet')\n for key in keys:\n hash_key = key[\"HashKeyElement\"]\n range_key = key.get(\"RangeKeyElement\")\n item = dynamodb_backend.get_item(table_name, hash_key, range_key)\n if item:\n item_describe = item.describe_attrs(attributes_to_get)\n items.append(item_describe)\n results[\"Responses\"][table_name] = {\"Items\": items, \"ConsumedCapacityUnits\": 1}\n return dynamo_json_dump(results)\n\n def query(self):\n name = self.body['TableName']\n hash_key = self.body['HashKeyValue']\n range_condition = self.body.get('RangeKeyCondition')\n if range_condition:\n range_comparison = range_condition['ComparisonOperator']\n range_values = range_condition['AttributeValueList']\n else:\n range_comparison = None\n range_values = []\n\n items, last_page = dynamodb_backend.query(name, hash_key, range_comparison, range_values)\n\n if items is None:\n er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException'\n return self.error(er)\n\n result = {\n \"Count\": len(items),\n \"Items\": [item.attrs for item in items],\n \"ConsumedCapacityUnits\": 1,\n }\n\n # Implement this when we do pagination\n # if not last_page:\n # result[\"LastEvaluatedKey\"] = {\n # \"HashKeyElement\": items[-1].hash_key,\n # \"RangeKeyElement\": items[-1].range_key,\n # }\n return dynamo_json_dump(result)\n\n def scan(self):\n name = self.body['TableName']\n\n filters = {}\n scan_filters = self.body.get('ScanFilter', {})\n for attribute_name, scan_filter in scan_filters.items():\n # Keys are attribute names. Values are tuples of (comparison, comparison_value)\n comparison_operator = scan_filter[\"ComparisonOperator\"]\n comparison_values = scan_filter.get(\"AttributeValueList\", [])\n filters[attribute_name] = (comparison_operator, comparison_values)\n\n items, scanned_count, last_page = dynamodb_backend.scan(name, filters)\n\n if items is None:\n er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException'\n return self.error(er)\n\n result = {\n \"Count\": len(items),\n \"Items\": [item.attrs for item in items],\n \"ConsumedCapacityUnits\": 1,\n \"ScannedCount\": scanned_count\n }\n\n # Implement this when we do pagination\n # if not last_page:\n # result[\"LastEvaluatedKey\"] = {\n # \"HashKeyElement\": items[-1].hash_key,\n # \"RangeKeyElement\": items[-1].range_key,\n # }\n return dynamo_json_dump(result)\n\n def delete_item(self):\n name = self.body['TableName']\n key = self.body['Key']\n hash_key = key['HashKeyElement']\n range_key = key.get('RangeKeyElement')\n return_values = self.body.get('ReturnValues', '')\n item = dynamodb_backend.delete_item(name, hash_key, range_key)\n if item:\n if return_values == 'ALL_OLD':\n item_dict = item.to_json()\n else:\n item_dict = {'Attributes': []}\n item_dict['ConsumedCapacityUnits'] = 0.5\n return dynamo_json_dump(item_dict)\n else:\n er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException'\n return self.error(er)\n", "path": "moto/dynamodb/responses.py"}], "after_files": [{"content": "from __future__ import unicode_literals\nimport json\nimport six\n\nfrom moto.core.responses import BaseResponse\nfrom moto.core.utils import camelcase_to_underscores\nfrom .models import dynamodb_backend, dynamo_json_dump\n\n\nGET_SESSION_TOKEN_RESULT = \"\"\"\n<GetSessionTokenResponse xmlns=\"https://sts.amazonaws.com/doc/2011-06-15/\">\n <GetSessionTokenResult>\n <Credentials>\n <SessionToken>\n AQoEXAMPLEH4aoAH0gNCAPyJxz4BlCFFxWNE1OPTgk5TthT+FvwqnKwRcOIfrRh3c/L\n To6UDdyJwOOvEVPvLXCrrrUtdnniCEXAMPLE/IvU1dYUg2RVAJBanLiHb4IgRmpRV3z\n rkuWJOgQs8IZZaIv2BXIa2R4OlgkBN9bkUDNCJiBeb/AXlzBBko7b15fjrBs2+cTQtp\n Z3CYWFXG8C5zqx37wnOE49mRl/+OtkIKGO7fAE\n </SessionToken>\n <SecretAccessKey>\n wJalrXUtnFEMI/K7MDENG/bPxRfiCYzEXAMPLEKEY\n </SecretAccessKey>\n <Expiration>2011-07-11T19:55:29.611Z</Expiration>\n <AccessKeyId>AKIAIOSFODNN7EXAMPLE</AccessKeyId>\n </Credentials>\n </GetSessionTokenResult>\n <ResponseMetadata>\n <RequestId>58c5dbae-abef-11e0-8cfe-09039844ac7d</RequestId>\n </ResponseMetadata>\n</GetSessionTokenResponse>\"\"\"\n\n\ndef sts_handler():\n return GET_SESSION_TOKEN_RESULT\n\n\nclass DynamoHandler(BaseResponse):\n\n def get_endpoint_name(self, headers):\n \"\"\"Parses request headers and extracts part od the X-Amz-Target\n that corresponds to a method of DynamoHandler\n\n ie: X-Amz-Target: DynamoDB_20111205.ListTables -> ListTables\n \"\"\"\n # Headers are case-insensitive. Probably a better way to do this.\n match = headers.get('x-amz-target') or headers.get('X-Amz-Target')\n if match:\n return match.split(\".\")[1]\n\n def error(self, type_, status=400):\n return status, self.response_headers, dynamo_json_dump({'__type': type_})\n\n def call_action(self):\n body = self.body.decode('utf-8')\n if 'GetSessionToken' in body:\n return 200, self.response_headers, sts_handler()\n\n self.body = json.loads(body or '{}')\n endpoint = self.get_endpoint_name(self.headers)\n if endpoint:\n endpoint = camelcase_to_underscores(endpoint)\n response = getattr(self, endpoint)()\n if isinstance(response, six.string_types):\n return 200, self.response_headers, response\n\n else:\n status_code, new_headers, response_content = response\n self.response_headers.update(new_headers)\n return status_code, self.response_headers, response_content\n else:\n return 404, self.response_headers, \"\"\n\n def list_tables(self):\n body = self.body\n limit = body.get('Limit')\n if body.get(\"ExclusiveStartTableName\"):\n last = body.get(\"ExclusiveStartTableName\")\n start = list(dynamodb_backend.tables.keys()).index(last) + 1\n else:\n start = 0\n all_tables = list(dynamodb_backend.tables.keys())\n if limit:\n tables = all_tables[start:start + limit]\n else:\n tables = all_tables[start:]\n response = {\"TableNames\": tables}\n if limit and len(all_tables) > start + limit:\n response[\"LastEvaluatedTableName\"] = tables[-1]\n return dynamo_json_dump(response)\n\n def create_table(self):\n body = self.body\n name = body['TableName']\n\n key_schema = body['KeySchema']\n hash_hey = key_schema['HashKeyElement']\n hash_key_attr = hash_hey['AttributeName']\n hash_key_type = hash_hey['AttributeType']\n\n range_hey = key_schema.get('RangeKeyElement', {})\n range_key_attr = range_hey.get('AttributeName')\n range_key_type = range_hey.get('AttributeType')\n\n throughput = body[\"ProvisionedThroughput\"]\n read_units = throughput[\"ReadCapacityUnits\"]\n write_units = throughput[\"WriteCapacityUnits\"]\n\n table = dynamodb_backend.create_table(\n name,\n hash_key_attr=hash_key_attr,\n hash_key_type=hash_key_type,\n range_key_attr=range_key_attr,\n range_key_type=range_key_type,\n read_capacity=int(read_units),\n write_capacity=int(write_units),\n )\n return dynamo_json_dump(table.describe)\n\n def delete_table(self):\n name = self.body['TableName']\n table = dynamodb_backend.delete_table(name)\n if table:\n return dynamo_json_dump(table.describe)\n else:\n er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException'\n return self.error(er)\n\n def update_table(self):\n name = self.body['TableName']\n throughput = self.body[\"ProvisionedThroughput\"]\n new_read_units = throughput[\"ReadCapacityUnits\"]\n new_write_units = throughput[\"WriteCapacityUnits\"]\n table = dynamodb_backend.update_table_throughput(name, new_read_units, new_write_units)\n return dynamo_json_dump(table.describe)\n\n def describe_table(self):\n name = self.body['TableName']\n try:\n table = dynamodb_backend.tables[name]\n except KeyError:\n er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException'\n return self.error(er)\n return dynamo_json_dump(table.describe)\n\n def put_item(self):\n name = self.body['TableName']\n item = self.body['Item']\n result = dynamodb_backend.put_item(name, item)\n if result:\n item_dict = result.to_json()\n item_dict['ConsumedCapacityUnits'] = 1\n return dynamo_json_dump(item_dict)\n else:\n er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException'\n return self.error(er)\n\n def batch_write_item(self):\n table_batches = self.body['RequestItems']\n\n for table_name, table_requests in table_batches.items():\n for table_request in table_requests:\n request_type = list(table_request)[0]\n request = list(table_request.values())[0]\n\n if request_type == 'PutRequest':\n item = request['Item']\n dynamodb_backend.put_item(table_name, item)\n elif request_type == 'DeleteRequest':\n key = request['Key']\n hash_key = key['HashKeyElement']\n range_key = key.get('RangeKeyElement')\n item = dynamodb_backend.delete_item(table_name, hash_key, range_key)\n\n response = {\n \"Responses\": {\n \"Thread\": {\n \"ConsumedCapacityUnits\": 1.0\n },\n \"Reply\": {\n \"ConsumedCapacityUnits\": 1.0\n }\n },\n \"UnprocessedItems\": {}\n }\n\n return dynamo_json_dump(response)\n\n def get_item(self):\n name = self.body['TableName']\n key = self.body['Key']\n hash_key = key['HashKeyElement']\n range_key = key.get('RangeKeyElement')\n attrs_to_get = self.body.get('AttributesToGet')\n try:\n item = dynamodb_backend.get_item(name, hash_key, range_key)\n except ValueError:\n er = 'com.amazon.coral.validate#ValidationException'\n return self.error(er, status=400)\n if item:\n item_dict = item.describe_attrs(attrs_to_get)\n item_dict['ConsumedCapacityUnits'] = 0.5\n return dynamo_json_dump(item_dict)\n else:\n # Item not found\n er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException'\n return self.error(er, status=404)\n\n def batch_get_item(self):\n table_batches = self.body['RequestItems']\n\n results = {\n \"Responses\": {\n \"UnprocessedKeys\": {}\n }\n }\n\n for table_name, table_request in table_batches.items():\n items = []\n keys = table_request['Keys']\n attributes_to_get = table_request.get('AttributesToGet')\n for key in keys:\n hash_key = key[\"HashKeyElement\"]\n range_key = key.get(\"RangeKeyElement\")\n item = dynamodb_backend.get_item(table_name, hash_key, range_key)\n if item:\n item_describe = item.describe_attrs(attributes_to_get)\n items.append(item_describe)\n results[\"Responses\"][table_name] = {\"Items\": items, \"ConsumedCapacityUnits\": 1}\n return dynamo_json_dump(results)\n\n def query(self):\n name = self.body['TableName']\n hash_key = self.body['HashKeyValue']\n range_condition = self.body.get('RangeKeyCondition')\n if range_condition:\n range_comparison = range_condition['ComparisonOperator']\n range_values = range_condition['AttributeValueList']\n else:\n range_comparison = None\n range_values = []\n\n items, last_page = dynamodb_backend.query(name, hash_key, range_comparison, range_values)\n\n if items is None:\n er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException'\n return self.error(er)\n\n result = {\n \"Count\": len(items),\n \"Items\": [item.attrs for item in items],\n \"ConsumedCapacityUnits\": 1,\n }\n\n # Implement this when we do pagination\n # if not last_page:\n # result[\"LastEvaluatedKey\"] = {\n # \"HashKeyElement\": items[-1].hash_key,\n # \"RangeKeyElement\": items[-1].range_key,\n # }\n return dynamo_json_dump(result)\n\n def scan(self):\n name = self.body['TableName']\n\n filters = {}\n scan_filters = self.body.get('ScanFilter', {})\n for attribute_name, scan_filter in scan_filters.items():\n # Keys are attribute names. Values are tuples of (comparison, comparison_value)\n comparison_operator = scan_filter[\"ComparisonOperator\"]\n comparison_values = scan_filter.get(\"AttributeValueList\", [])\n filters[attribute_name] = (comparison_operator, comparison_values)\n\n items, scanned_count, last_page = dynamodb_backend.scan(name, filters)\n\n if items is None:\n er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException'\n return self.error(er)\n\n result = {\n \"Count\": len(items),\n \"Items\": [item.attrs for item in items if item],\n \"ConsumedCapacityUnits\": 1,\n \"ScannedCount\": scanned_count\n }\n\n # Implement this when we do pagination\n # if not last_page:\n # result[\"LastEvaluatedKey\"] = {\n # \"HashKeyElement\": items[-1].hash_key,\n # \"RangeKeyElement\": items[-1].range_key,\n # }\n return dynamo_json_dump(result)\n\n def delete_item(self):\n name = self.body['TableName']\n key = self.body['Key']\n hash_key = key['HashKeyElement']\n range_key = key.get('RangeKeyElement')\n return_values = self.body.get('ReturnValues', '')\n item = dynamodb_backend.delete_item(name, hash_key, range_key)\n if item:\n if return_values == 'ALL_OLD':\n item_dict = item.to_json()\n else:\n item_dict = {'Attributes': []}\n item_dict['ConsumedCapacityUnits'] = 0.5\n return dynamo_json_dump(item_dict)\n else:\n er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException'\n return self.error(er)\n", "path": "moto/dynamodb/responses.py"}]} | 4,032 | 115 |
gh_patches_debug_26471 | rasdani/github-patches | git_diff | lmfit__lmfit-py-247 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
compatibility issue with Ipython4
hi,
I get this error experementing with Ipython4:
```
D:\WinPythonQt5\basedir34\build\winpython-3.4.3.amd64_build0\python-3.4.3.amd64\lib\site-packages\IPython\html.py:14: ShimWarning: The `IPython.html` package has been deprecated. You should import from `notebook` instead. `IPython.html.widgets` has moved to `ipywidgets`.
"`IPython.html.widgets` has moved to `ipywidgets`.", ShimWarning)
```
``` python3
python-3.4.3.amd64\lib\site-packages\lmfit\ui\ipy_fitter.py in <module>()
21 else:
22 # as of IPython 3.x:
---> 23 from IPython.html.widgets import Dropdown
24 from IPython.html.widgets import Button
25 from IPython.html.widgets import Box
ImportError: No module named 'widgets'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lmfit/ui/ipy_fitter.py`
Content:
```
1 import warnings
2 import numpy as np
3
4 from ..model import Model
5
6 from .basefitter import MPLFitter, _COMMON_DOC, _COMMON_EXAMPLES_DOC
7
8 # Note: If IPython is not available of the version is < 2,
9 # this module will not be imported, and a different Fitter.
10
11 import IPython
12 from IPython.display import display, clear_output
13 # Widgets were only experimental in IPython 2.x, but this does work there.
14 # Handle the change in naming from 2.x to 3.x.
15 IPY2 = IPython.release.version_info[0] == 2
16 if IPY2:
17 from IPython.html.widgets import DropdownWidget as Dropdown
18 from IPython.html.widgets import ButtonWidget as Button
19 from IPython.html.widgets import ContainerWidget as HBox
20 from IPython.html.widgets import FloatTextWidget as FloatText
21 from IPython.html.widgets import CheckboxWidget as Checkbox
22 class HBox(Box):
23 def __init__(*args, **kwargs):
24 self.add_class('hbox')
25 super(self, HBox).__init__(*args, **kwargs)
26 else:
27 # as of IPython 3.x:
28 from IPython.html.widgets import Dropdown
29 from IPython.html.widgets import Button
30 from IPython.html.widgets import HBox
31 from IPython.html.widgets import FloatText
32 from IPython.html.widgets import Checkbox
33
34
35 class ParameterWidgetGroup(object):
36 """Construct several widgets that together represent a Parameter.
37
38 This will only be used if IPython is available."""
39 def __init__(self, par):
40 self.par = par
41
42 # Define widgets.
43 self.value_text = FloatText(description=par.name,
44 min=self.par.min, max=self.par.max)
45 self.value_text.width = 100
46 self.min_text = FloatText(description='min', max=self.par.max)
47 self.min_text.width = 100
48 self.max_text = FloatText(description='max', min=self.par.min)
49 self.max_text.width = 100
50 self.min_checkbox = Checkbox(description='min')
51 self.max_checkbox = Checkbox(description='max')
52 self.vary_checkbox = Checkbox(description='vary')
53
54 # Set widget values and visibility.
55 if par.value is not None:
56 self.value_text.value = self.par.value
57 min_unset = self.par.min is None or self.par.min == -np.inf
58 max_unset = self.par.max is None or self.par.max == np.inf
59 self.min_checkbox.value = not min_unset
60 self.min_text.visible = not min_unset
61 self.min_text.value = self.par.min
62 self.max_checkbox.value = not max_unset
63 self.max_text.visible = not max_unset
64 self.max_text.value = self.par.max
65 self.vary_checkbox.value = self.par.vary
66
67 # Configure widgets to sync with par attributes.
68 self.value_text.on_trait_change(self._on_value_change, 'value')
69 self.min_text.on_trait_change(self._on_min_value_change, 'value')
70 self.max_text.on_trait_change(self._on_max_value_change, 'value')
71 self.min_checkbox.on_trait_change(self._on_min_checkbox_change,
72 'value')
73 self.max_checkbox.on_trait_change(self._on_max_checkbox_change,
74 'value')
75 self.vary_checkbox.on_trait_change(self._on_vary_change, 'value')
76
77 def _on_value_change(self, name, value):
78 self.par.value = value
79
80 def _on_min_checkbox_change(self, name, value):
81 self.min_text.visible = value
82 if value:
83 # -np.inf does not play well with a numerical text field,
84 # so set min to -1 if activated (and back to -inf if deactivated).
85 self.min_text.value = -1
86 self.par.min = self.min_text.value
87 self.value_text.min = self.min_text.value
88 else:
89 self.par.min = None
90
91 def _on_max_checkbox_change(self, name, value):
92 self.max_text.visible = value
93 if value:
94 # np.inf does not play well with a numerical text field,
95 # so set max to 1 if activated (and back to inf if deactivated).
96 self.max_text.value = 1
97 self.par.max = self.max_text.value
98 self.value_text.max = self.max_text.value
99 else:
100 self.par.max = None
101
102 def _on_min_value_change(self, name, value):
103 self.par.min = value
104 self.value_text.min = value
105 self.max_text.min = value
106
107 def _on_max_value_change(self, name, value):
108 self.par.max = value
109 self.value_text.max = value
110 self.min_text.max = value
111
112 def _on_vary_change(self, name, value):
113 self.par.vary = value
114 # self.value_text.disabled = not value
115
116 def close(self):
117 # one convenience method to close (i.e., hide and disconnect) all
118 # widgets in this group
119 self.value_text.close()
120 self.min_text.close()
121 self.max_text.close()
122 self.vary_checkbox.close()
123 self.min_checkbox.close()
124 self.max_checkbox.close()
125
126 def _repr_html_(self):
127 box = HBox()
128 box.children = [self.value_text, self.vary_checkbox,
129 self.min_checkbox, self.min_text,
130 self.max_checkbox, self.max_text]
131 display(box)
132
133 # Make it easy to set the widget attributes directly.
134 @property
135 def value(self):
136 return self.value_text.value
137
138 @value.setter
139 def value(self, value):
140 self.value_text.value = value
141
142 @property
143 def vary(self):
144 return self.vary_checkbox.value
145
146 @vary.setter
147 def vary(self, value):
148 self.vary_checkbox.value = value
149
150 @property
151 def min(self):
152 return self.min_text.value
153
154 @min.setter
155 def min(self, value):
156 self.min_text.value = value
157
158 @property
159 def max(self):
160 return self.max_text.value
161
162 @max.setter
163 def max(self, value):
164 self.max_text.value = value
165
166 @property
167 def name(self):
168 return self.par.name
169
170
171 class NotebookFitter(MPLFitter):
172 __doc__ = _COMMON_DOC + """
173 If IPython is available, it uses the IPython notebook's rich display
174 to fit data interactively in a web-based GUI. The Parameters are
175 represented in a web-based form that is kept in sync with `current_params`.
176 All subclasses to Model, including user-defined ones, are shown in a
177 drop-down menu.
178
179 Clicking the "Fit" button updates a plot, as above, and updates the
180 Parameters in the form to reflect the best fit.
181
182 Parameters
183 ----------
184 data : array-like
185 model : lmfit.Model
186 optional initial Model to use, maybe be set or changed later
187 all_models : list
188 optional list of Models to populate drop-down menu, by default
189 all built-in and user-defined subclasses of Model are used
190
191 Additional Parameters
192 ---------------------
193 axes_style : dictionary representing style keyword arguments to be
194 passed through to `Axes.set(...)`
195 data_style : dictionary representing style keyword arguments to be passed
196 through to the matplotlib `plot()` command the plots the data points
197 init_style : dictionary representing style keyword arguments to be passed
198 through to the matplotlib `plot()` command the plots the initial fit
199 line
200 best_style : dictionary representing style keyword arguments to be passed
201 through to the matplotlib `plot()` command the plots the best fit
202 line
203 **kwargs : independent variables or extra arguments, passed like `x=x`
204 """ + _COMMON_EXAMPLES_DOC
205 def __init__(self, data, model=None, all_models=None, axes_style={},
206 data_style={}, init_style={}, best_style={}, **kwargs):
207 # Dropdown menu of all subclasses of Model, incl. user-defined.
208 self.models_menu = Dropdown()
209 # Dropbox API is very different between IPy 2.x and 3.x.
210 if IPY2:
211 if all_models is None:
212 all_models = dict([(m.__name__, m) for m in Model.__subclasses__()])
213 self.models_menu.values = all_models
214 else:
215 if all_models is None:
216 all_models = [(m.__name__, m) for m in Model.__subclasses__()]
217 self.models_menu.options = all_models
218 self.models_menu.on_trait_change(self._on_model_value_change,
219 'value')
220 # Button to trigger fitting.
221 self.fit_button = Button(description='Fit')
222 self.fit_button.on_click(self._on_fit_button_click)
223
224 # Button to trigger guessing.
225 self.guess_button = Button(description='Auto-Guess')
226 self.guess_button.on_click(self._on_guess_button_click)
227
228 # Parameter widgets are not built here. They are (re-)built when
229 # the model is (re-)set.
230 super(NotebookFitter, self).__init__(data, model, axes_style,
231 data_style, init_style,
232 best_style, **kwargs)
233
234 def _repr_html_(self):
235 display(self.models_menu)
236 button_box = HBox()
237 button_box.children = [self.fit_button, self.guess_button]
238 display(button_box)
239 for pw in self.param_widgets:
240 display(pw)
241 self.plot()
242
243 def guess(self):
244 guessing_successful = super(NotebookFitter, self).guess()
245 self.guess_button.disabled = not guessing_successful
246
247 def _finalize_model(self, value):
248 first_run = not hasattr(self, 'param_widgets')
249 if not first_run:
250 # Remove all Parameter widgets, and replace them with widgets
251 # for the new model.
252 for pw in self.param_widgets:
253 pw.close()
254 self.models_menu.value = value
255 self.param_widgets = [ParameterWidgetGroup(p)
256 for _, p in self._current_params.items()]
257 if not first_run:
258 for pw in self.param_widgets:
259 display(pw)
260
261 def _finalize_params(self):
262 for pw in self.param_widgets:
263 pw.value = self._current_params[pw.name].value
264 pw.min = self._current_params[pw.name].min
265 pw.max = self._current_params[pw.name].max
266 pw.vary = self._current_params[pw.name].vary
267
268 def plot(self):
269 clear_output(wait=True)
270 super(NotebookFitter, self).plot()
271
272 def fit(self):
273 super(NotebookFitter, self).fit()
274 self.plot()
275
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lmfit/ui/ipy_fitter.py b/lmfit/ui/ipy_fitter.py
--- a/lmfit/ui/ipy_fitter.py
+++ b/lmfit/ui/ipy_fitter.py
@@ -13,6 +13,7 @@
# Widgets were only experimental in IPython 2.x, but this does work there.
# Handle the change in naming from 2.x to 3.x.
IPY2 = IPython.release.version_info[0] == 2
+IPY3 = IPython.release.version_info[0] == 3
if IPY2:
from IPython.html.widgets import DropdownWidget as Dropdown
from IPython.html.widgets import ButtonWidget as Button
@@ -23,13 +24,20 @@
def __init__(*args, **kwargs):
self.add_class('hbox')
super(self, HBox).__init__(*args, **kwargs)
-else:
+elif IPY3:
# as of IPython 3.x:
from IPython.html.widgets import Dropdown
from IPython.html.widgets import Button
from IPython.html.widgets import HBox
from IPython.html.widgets import FloatText
from IPython.html.widgets import Checkbox
+else:
+ # as of IPython 4.x+:
+ from ipywidgets import Dropdown
+ from ipywidgets import Button
+ from ipywidgets import Box
+ from ipywidgets import FloatText
+ from ipywidgets import Checkbox
class ParameterWidgetGroup(object):
| {"golden_diff": "diff --git a/lmfit/ui/ipy_fitter.py b/lmfit/ui/ipy_fitter.py\n--- a/lmfit/ui/ipy_fitter.py\n+++ b/lmfit/ui/ipy_fitter.py\n@@ -13,6 +13,7 @@\n # Widgets were only experimental in IPython 2.x, but this does work there.\n # Handle the change in naming from 2.x to 3.x.\n IPY2 = IPython.release.version_info[0] == 2\n+IPY3 = IPython.release.version_info[0] == 3\n if IPY2:\n from IPython.html.widgets import DropdownWidget as Dropdown\n from IPython.html.widgets import ButtonWidget as Button\n@@ -23,13 +24,20 @@\n def __init__(*args, **kwargs):\n self.add_class('hbox')\n super(self, HBox).__init__(*args, **kwargs)\n-else:\n+elif IPY3:\n # as of IPython 3.x:\n from IPython.html.widgets import Dropdown\n from IPython.html.widgets import Button\n from IPython.html.widgets import HBox\n from IPython.html.widgets import FloatText\n from IPython.html.widgets import Checkbox\n+else:\n+ # as of IPython 4.x+:\n+ from ipywidgets import Dropdown\n+ from ipywidgets import Button\n+ from ipywidgets import Box\n+ from ipywidgets import FloatText\n+ from ipywidgets import Checkbox\n \n \n class ParameterWidgetGroup(object):\n", "issue": "compatibility issue with Ipython4\nhi,\n\nI get this error experementing with Ipython4:\n\n```\nD:\\WinPythonQt5\\basedir34\\build\\winpython-3.4.3.amd64_build0\\python-3.4.3.amd64\\lib\\site-packages\\IPython\\html.py:14: ShimWarning: The `IPython.html` package has been deprecated. You should import from `notebook` instead. `IPython.html.widgets` has moved to `ipywidgets`.\n \"`IPython.html.widgets` has moved to `ipywidgets`.\", ShimWarning)\n```\n\n``` python3\npython-3.4.3.amd64\\lib\\site-packages\\lmfit\\ui\\ipy_fitter.py in <module>()\n 21 else:\n 22 # as of IPython 3.x:\n---> 23 from IPython.html.widgets import Dropdown\n 24 from IPython.html.widgets import Button\n 25 from IPython.html.widgets import Box\n\nImportError: No module named 'widgets'\n```\n\n", "before_files": [{"content": "import warnings\nimport numpy as np\n\nfrom ..model import Model\n\nfrom .basefitter import MPLFitter, _COMMON_DOC, _COMMON_EXAMPLES_DOC\n\n# Note: If IPython is not available of the version is < 2,\n# this module will not be imported, and a different Fitter.\n\nimport IPython\nfrom IPython.display import display, clear_output\n# Widgets were only experimental in IPython 2.x, but this does work there.\n# Handle the change in naming from 2.x to 3.x.\nIPY2 = IPython.release.version_info[0] == 2\nif IPY2:\n from IPython.html.widgets import DropdownWidget as Dropdown\n from IPython.html.widgets import ButtonWidget as Button\n from IPython.html.widgets import ContainerWidget as HBox\n from IPython.html.widgets import FloatTextWidget as FloatText\n from IPython.html.widgets import CheckboxWidget as Checkbox\n class HBox(Box):\n def __init__(*args, **kwargs):\n self.add_class('hbox')\n super(self, HBox).__init__(*args, **kwargs)\nelse:\n # as of IPython 3.x:\n from IPython.html.widgets import Dropdown\n from IPython.html.widgets import Button\n from IPython.html.widgets import HBox\n from IPython.html.widgets import FloatText\n from IPython.html.widgets import Checkbox\n\n\nclass ParameterWidgetGroup(object):\n \"\"\"Construct several widgets that together represent a Parameter.\n\n This will only be used if IPython is available.\"\"\"\n def __init__(self, par):\n self.par = par\n\n # Define widgets.\n self.value_text = FloatText(description=par.name,\n min=self.par.min, max=self.par.max)\n self.value_text.width = 100\n self.min_text = FloatText(description='min', max=self.par.max)\n self.min_text.width = 100\n self.max_text = FloatText(description='max', min=self.par.min)\n self.max_text.width = 100\n self.min_checkbox = Checkbox(description='min')\n self.max_checkbox = Checkbox(description='max')\n self.vary_checkbox = Checkbox(description='vary')\n\n # Set widget values and visibility.\n if par.value is not None:\n self.value_text.value = self.par.value\n min_unset = self.par.min is None or self.par.min == -np.inf\n max_unset = self.par.max is None or self.par.max == np.inf\n self.min_checkbox.value = not min_unset\n self.min_text.visible = not min_unset\n self.min_text.value = self.par.min\n self.max_checkbox.value = not max_unset\n self.max_text.visible = not max_unset\n self.max_text.value = self.par.max\n self.vary_checkbox.value = self.par.vary\n\n # Configure widgets to sync with par attributes.\n self.value_text.on_trait_change(self._on_value_change, 'value')\n self.min_text.on_trait_change(self._on_min_value_change, 'value')\n self.max_text.on_trait_change(self._on_max_value_change, 'value')\n self.min_checkbox.on_trait_change(self._on_min_checkbox_change,\n 'value')\n self.max_checkbox.on_trait_change(self._on_max_checkbox_change,\n 'value')\n self.vary_checkbox.on_trait_change(self._on_vary_change, 'value')\n\n def _on_value_change(self, name, value):\n self.par.value = value\n\n def _on_min_checkbox_change(self, name, value):\n self.min_text.visible = value\n if value:\n # -np.inf does not play well with a numerical text field,\n # so set min to -1 if activated (and back to -inf if deactivated).\n self.min_text.value = -1\n self.par.min = self.min_text.value\n self.value_text.min = self.min_text.value\n else:\n self.par.min = None\n\n def _on_max_checkbox_change(self, name, value):\n self.max_text.visible = value\n if value:\n # np.inf does not play well with a numerical text field,\n # so set max to 1 if activated (and back to inf if deactivated).\n self.max_text.value = 1\n self.par.max = self.max_text.value\n self.value_text.max = self.max_text.value\n else:\n self.par.max = None\n\n def _on_min_value_change(self, name, value):\n self.par.min = value\n self.value_text.min = value\n self.max_text.min = value\n\n def _on_max_value_change(self, name, value):\n self.par.max = value\n self.value_text.max = value\n self.min_text.max = value\n\n def _on_vary_change(self, name, value):\n self.par.vary = value\n # self.value_text.disabled = not value\n\n def close(self):\n # one convenience method to close (i.e., hide and disconnect) all\n # widgets in this group\n self.value_text.close()\n self.min_text.close()\n self.max_text.close()\n self.vary_checkbox.close()\n self.min_checkbox.close()\n self.max_checkbox.close()\n\n def _repr_html_(self):\n box = HBox()\n box.children = [self.value_text, self.vary_checkbox,\n self.min_checkbox, self.min_text,\n self.max_checkbox, self.max_text]\n display(box)\n\n # Make it easy to set the widget attributes directly.\n @property\n def value(self):\n return self.value_text.value\n\n @value.setter\n def value(self, value):\n self.value_text.value = value\n\n @property\n def vary(self):\n return self.vary_checkbox.value\n\n @vary.setter\n def vary(self, value):\n self.vary_checkbox.value = value\n\n @property\n def min(self):\n return self.min_text.value\n\n @min.setter\n def min(self, value):\n self.min_text.value = value\n\n @property\n def max(self):\n return self.max_text.value\n\n @max.setter\n def max(self, value):\n self.max_text.value = value\n\n @property\n def name(self):\n return self.par.name\n\n\nclass NotebookFitter(MPLFitter):\n __doc__ = _COMMON_DOC + \"\"\"\n If IPython is available, it uses the IPython notebook's rich display\n to fit data interactively in a web-based GUI. The Parameters are\n represented in a web-based form that is kept in sync with `current_params`.\n All subclasses to Model, including user-defined ones, are shown in a\n drop-down menu.\n\n Clicking the \"Fit\" button updates a plot, as above, and updates the\n Parameters in the form to reflect the best fit.\n\n Parameters\n ----------\n data : array-like\n model : lmfit.Model\n optional initial Model to use, maybe be set or changed later\n all_models : list\n optional list of Models to populate drop-down menu, by default\n all built-in and user-defined subclasses of Model are used\n\n Additional Parameters\n ---------------------\n axes_style : dictionary representing style keyword arguments to be\n passed through to `Axes.set(...)`\n data_style : dictionary representing style keyword arguments to be passed\n through to the matplotlib `plot()` command the plots the data points\n init_style : dictionary representing style keyword arguments to be passed\n through to the matplotlib `plot()` command the plots the initial fit\n line\n best_style : dictionary representing style keyword arguments to be passed\n through to the matplotlib `plot()` command the plots the best fit\n line\n **kwargs : independent variables or extra arguments, passed like `x=x`\n \"\"\" + _COMMON_EXAMPLES_DOC\n def __init__(self, data, model=None, all_models=None, axes_style={},\n data_style={}, init_style={}, best_style={}, **kwargs):\n # Dropdown menu of all subclasses of Model, incl. user-defined.\n self.models_menu = Dropdown()\n # Dropbox API is very different between IPy 2.x and 3.x.\n if IPY2:\n if all_models is None:\n all_models = dict([(m.__name__, m) for m in Model.__subclasses__()])\n self.models_menu.values = all_models\n else:\n if all_models is None:\n all_models = [(m.__name__, m) for m in Model.__subclasses__()]\n self.models_menu.options = all_models\n self.models_menu.on_trait_change(self._on_model_value_change,\n 'value')\n # Button to trigger fitting.\n self.fit_button = Button(description='Fit')\n self.fit_button.on_click(self._on_fit_button_click)\n\n # Button to trigger guessing.\n self.guess_button = Button(description='Auto-Guess')\n self.guess_button.on_click(self._on_guess_button_click)\n\n # Parameter widgets are not built here. They are (re-)built when\n # the model is (re-)set.\n super(NotebookFitter, self).__init__(data, model, axes_style,\n data_style, init_style,\n best_style, **kwargs)\n\n def _repr_html_(self):\n display(self.models_menu)\n button_box = HBox()\n button_box.children = [self.fit_button, self.guess_button]\n display(button_box)\n for pw in self.param_widgets:\n display(pw)\n self.plot()\n\n def guess(self):\n guessing_successful = super(NotebookFitter, self).guess()\n self.guess_button.disabled = not guessing_successful\n\n def _finalize_model(self, value):\n first_run = not hasattr(self, 'param_widgets')\n if not first_run:\n # Remove all Parameter widgets, and replace them with widgets\n # for the new model.\n for pw in self.param_widgets:\n pw.close()\n self.models_menu.value = value\n self.param_widgets = [ParameterWidgetGroup(p)\n for _, p in self._current_params.items()]\n if not first_run:\n for pw in self.param_widgets:\n display(pw)\n\n def _finalize_params(self):\n for pw in self.param_widgets:\n pw.value = self._current_params[pw.name].value\n pw.min = self._current_params[pw.name].min\n pw.max = self._current_params[pw.name].max\n pw.vary = self._current_params[pw.name].vary\n\n def plot(self):\n clear_output(wait=True)\n super(NotebookFitter, self).plot()\n\n def fit(self):\n super(NotebookFitter, self).fit()\n self.plot()\n", "path": "lmfit/ui/ipy_fitter.py"}], "after_files": [{"content": "import warnings\nimport numpy as np\n\nfrom ..model import Model\n\nfrom .basefitter import MPLFitter, _COMMON_DOC, _COMMON_EXAMPLES_DOC\n\n# Note: If IPython is not available of the version is < 2,\n# this module will not be imported, and a different Fitter.\n\nimport IPython\nfrom IPython.display import display, clear_output\n# Widgets were only experimental in IPython 2.x, but this does work there.\n# Handle the change in naming from 2.x to 3.x.\nIPY2 = IPython.release.version_info[0] == 2\nIPY3 = IPython.release.version_info[0] == 3\nif IPY2:\n from IPython.html.widgets import DropdownWidget as Dropdown\n from IPython.html.widgets import ButtonWidget as Button\n from IPython.html.widgets import ContainerWidget as HBox\n from IPython.html.widgets import FloatTextWidget as FloatText\n from IPython.html.widgets import CheckboxWidget as Checkbox\n class HBox(Box):\n def __init__(*args, **kwargs):\n self.add_class('hbox')\n super(self, HBox).__init__(*args, **kwargs)\nelif IPY3:\n # as of IPython 3.x:\n from IPython.html.widgets import Dropdown\n from IPython.html.widgets import Button\n from IPython.html.widgets import HBox\n from IPython.html.widgets import FloatText\n from IPython.html.widgets import Checkbox\nelse:\n # as of IPython 4.x+:\n from ipywidgets import Dropdown\n from ipywidgets import Button\n from ipywidgets import Box\n from ipywidgets import FloatText\n from ipywidgets import Checkbox\n\n\nclass ParameterWidgetGroup(object):\n \"\"\"Construct several widgets that together represent a Parameter.\n\n This will only be used if IPython is available.\"\"\"\n def __init__(self, par):\n self.par = par\n\n # Define widgets.\n self.value_text = FloatText(description=par.name,\n min=self.par.min, max=self.par.max)\n self.value_text.width = 100\n self.min_text = FloatText(description='min', max=self.par.max)\n self.min_text.width = 100\n self.max_text = FloatText(description='max', min=self.par.min)\n self.max_text.width = 100\n self.min_checkbox = Checkbox(description='min')\n self.max_checkbox = Checkbox(description='max')\n self.vary_checkbox = Checkbox(description='vary')\n\n # Set widget values and visibility.\n if par.value is not None:\n self.value_text.value = self.par.value\n min_unset = self.par.min is None or self.par.min == -np.inf\n max_unset = self.par.max is None or self.par.max == np.inf\n self.min_checkbox.value = not min_unset\n self.min_text.visible = not min_unset\n self.min_text.value = self.par.min\n self.max_checkbox.value = not max_unset\n self.max_text.visible = not max_unset\n self.max_text.value = self.par.max\n self.vary_checkbox.value = self.par.vary\n\n # Configure widgets to sync with par attributes.\n self.value_text.on_trait_change(self._on_value_change, 'value')\n self.min_text.on_trait_change(self._on_min_value_change, 'value')\n self.max_text.on_trait_change(self._on_max_value_change, 'value')\n self.min_checkbox.on_trait_change(self._on_min_checkbox_change,\n 'value')\n self.max_checkbox.on_trait_change(self._on_max_checkbox_change,\n 'value')\n self.vary_checkbox.on_trait_change(self._on_vary_change, 'value')\n\n def _on_value_change(self, name, value):\n self.par.value = value\n\n def _on_min_checkbox_change(self, name, value):\n self.min_text.visible = value\n if value:\n # -np.inf does not play well with a numerical text field,\n # so set min to -1 if activated (and back to -inf if deactivated).\n self.min_text.value = -1\n self.par.min = self.min_text.value\n self.value_text.min = self.min_text.value\n else:\n self.par.min = None\n\n def _on_max_checkbox_change(self, name, value):\n self.max_text.visible = value\n if value:\n # np.inf does not play well with a numerical text field,\n # so set max to 1 if activated (and back to inf if deactivated).\n self.max_text.value = 1\n self.par.max = self.max_text.value\n self.value_text.max = self.max_text.value\n else:\n self.par.max = None\n\n def _on_min_value_change(self, name, value):\n self.par.min = value\n self.value_text.min = value\n self.max_text.min = value\n\n def _on_max_value_change(self, name, value):\n self.par.max = value\n self.value_text.max = value\n self.min_text.max = value\n\n def _on_vary_change(self, name, value):\n self.par.vary = value\n # self.value_text.disabled = not value\n\n def close(self):\n # one convenience method to close (i.e., hide and disconnect) all\n # widgets in this group\n self.value_text.close()\n self.min_text.close()\n self.max_text.close()\n self.vary_checkbox.close()\n self.min_checkbox.close()\n self.max_checkbox.close()\n\n def _repr_html_(self):\n box = HBox()\n box.children = [self.value_text, self.vary_checkbox,\n self.min_checkbox, self.min_text,\n self.max_checkbox, self.max_text]\n display(box)\n\n # Make it easy to set the widget attributes directly.\n @property\n def value(self):\n return self.value_text.value\n\n @value.setter\n def value(self, value):\n self.value_text.value = value\n\n @property\n def vary(self):\n return self.vary_checkbox.value\n\n @vary.setter\n def vary(self, value):\n self.vary_checkbox.value = value\n\n @property\n def min(self):\n return self.min_text.value\n\n @min.setter\n def min(self, value):\n self.min_text.value = value\n\n @property\n def max(self):\n return self.max_text.value\n\n @max.setter\n def max(self, value):\n self.max_text.value = value\n\n @property\n def name(self):\n return self.par.name\n\n\nclass NotebookFitter(MPLFitter):\n __doc__ = _COMMON_DOC + \"\"\"\n If IPython is available, it uses the IPython notebook's rich display\n to fit data interactively in a web-based GUI. The Parameters are\n represented in a web-based form that is kept in sync with `current_params`.\n All subclasses to Model, including user-defined ones, are shown in a\n drop-down menu.\n\n Clicking the \"Fit\" button updates a plot, as above, and updates the\n Parameters in the form to reflect the best fit.\n\n Parameters\n ----------\n data : array-like\n model : lmfit.Model\n optional initial Model to use, maybe be set or changed later\n all_models : list\n optional list of Models to populate drop-down menu, by default\n all built-in and user-defined subclasses of Model are used\n\n Additional Parameters\n ---------------------\n axes_style : dictionary representing style keyword arguments to be\n passed through to `Axes.set(...)`\n data_style : dictionary representing style keyword arguments to be passed\n through to the matplotlib `plot()` command the plots the data points\n init_style : dictionary representing style keyword arguments to be passed\n through to the matplotlib `plot()` command the plots the initial fit\n line\n best_style : dictionary representing style keyword arguments to be passed\n through to the matplotlib `plot()` command the plots the best fit\n line\n **kwargs : independent variables or extra arguments, passed like `x=x`\n \"\"\" + _COMMON_EXAMPLES_DOC\n def __init__(self, data, model=None, all_models=None, axes_style={},\n data_style={}, init_style={}, best_style={}, **kwargs):\n # Dropdown menu of all subclasses of Model, incl. user-defined.\n self.models_menu = Dropdown()\n # Dropbox API is very different between IPy 2.x and 3.x.\n if IPY2:\n if all_models is None:\n all_models = dict([(m.__name__, m) for m in Model.__subclasses__()])\n self.models_menu.values = all_models\n else:\n if all_models is None:\n all_models = [(m.__name__, m) for m in Model.__subclasses__()]\n self.models_menu.options = all_models\n self.models_menu.on_trait_change(self._on_model_value_change,\n 'value')\n # Button to trigger fitting.\n self.fit_button = Button(description='Fit')\n self.fit_button.on_click(self._on_fit_button_click)\n\n # Button to trigger guessing.\n self.guess_button = Button(description='Auto-Guess')\n self.guess_button.on_click(self._on_guess_button_click)\n\n # Parameter widgets are not built here. They are (re-)built when\n # the model is (re-)set.\n super(NotebookFitter, self).__init__(data, model, axes_style,\n data_style, init_style,\n best_style, **kwargs)\n\n def _repr_html_(self):\n display(self.models_menu)\n button_box = HBox()\n button_box.children = [self.fit_button, self.guess_button]\n display(button_box)\n for pw in self.param_widgets:\n display(pw)\n self.plot()\n\n def guess(self):\n guessing_successful = super(NotebookFitter, self).guess()\n self.guess_button.disabled = not guessing_successful\n\n def _finalize_model(self, value):\n first_run = not hasattr(self, 'param_widgets')\n if not first_run:\n # Remove all Parameter widgets, and replace them with widgets\n # for the new model.\n for pw in self.param_widgets:\n pw.close()\n self.models_menu.value = value\n self.param_widgets = [ParameterWidgetGroup(p)\n for _, p in self._current_params.items()]\n if not first_run:\n for pw in self.param_widgets:\n display(pw)\n\n def _finalize_params(self):\n for pw in self.param_widgets:\n pw.value = self._current_params[pw.name].value\n pw.min = self._current_params[pw.name].min\n pw.max = self._current_params[pw.name].max\n pw.vary = self._current_params[pw.name].vary\n\n def plot(self):\n clear_output(wait=True)\n super(NotebookFitter, self).plot()\n\n def fit(self):\n super(NotebookFitter, self).fit()\n self.plot()\n", "path": "lmfit/ui/ipy_fitter.py"}]} | 3,511 | 335 |
gh_patches_debug_12361 | rasdani/github-patches | git_diff | carpentries__amy-2333 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Prepare AMY staging instance for actual use
New test AMY server is running, but it lacks some features from the other server.
- [x] Run fixtures (should be accompanied by #2239)
- [x] Scaffold non-admin users for AMY database
- [ ] Add default admin user
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `amy/workshops/management/commands/create_superuser.py`
Content:
```
1 from django.core.management.base import BaseCommand, CommandError
2
3 from communityroles.models import CommunityRole, CommunityRoleConfig
4 from workshops.models import Person
5
6
7 class Command(BaseCommand):
8 args = "no arguments"
9 help = 'Create a superuser called "admin" with password "admin".'
10
11 def handle(self, *args, **options):
12 try:
13 admin = Person.objects.create_superuser(
14 username="admin",
15 personal="admin",
16 family="admin",
17 email="[email protected]",
18 password="admin",
19 )
20 print("Created admin user")
21
22 role_config = CommunityRoleConfig.objects.get(name="instructor")
23 CommunityRole.objects.create(
24 config=role_config,
25 person=admin,
26 )
27 print("Assigned Instructor community role to admin user")
28
29 except Exception as e:
30 raise CommandError("Failed to create admin: {0}".format(str(e)))
31
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/amy/workshops/management/commands/create_superuser.py b/amy/workshops/management/commands/create_superuser.py
--- a/amy/workshops/management/commands/create_superuser.py
+++ b/amy/workshops/management/commands/create_superuser.py
@@ -9,9 +9,15 @@
help = 'Create a superuser called "admin" with password "admin".'
def handle(self, *args, **options):
+ username = "admin"
+
+ if Person.objects.filter(username=username).exists():
+ print("Admin user exists, quitting.")
+ return
+
try:
admin = Person.objects.create_superuser(
- username="admin",
+ username=username,
personal="admin",
family="admin",
email="[email protected]",
| {"golden_diff": "diff --git a/amy/workshops/management/commands/create_superuser.py b/amy/workshops/management/commands/create_superuser.py\n--- a/amy/workshops/management/commands/create_superuser.py\n+++ b/amy/workshops/management/commands/create_superuser.py\n@@ -9,9 +9,15 @@\n help = 'Create a superuser called \"admin\" with password \"admin\".'\n \n def handle(self, *args, **options):\n+ username = \"admin\"\n+\n+ if Person.objects.filter(username=username).exists():\n+ print(\"Admin user exists, quitting.\")\n+ return\n+\n try:\n admin = Person.objects.create_superuser(\n- username=\"admin\",\n+ username=username,\n personal=\"admin\",\n family=\"admin\",\n email=\"[email protected]\",\n", "issue": "Prepare AMY staging instance for actual use\nNew test AMY server is running, but it lacks some features from the other server.\r\n\r\n- [x] Run fixtures (should be accompanied by #2239)\r\n- [x] Scaffold non-admin users for AMY database\r\n- [ ] Add default admin user\n", "before_files": [{"content": "from django.core.management.base import BaseCommand, CommandError\n\nfrom communityroles.models import CommunityRole, CommunityRoleConfig\nfrom workshops.models import Person\n\n\nclass Command(BaseCommand):\n args = \"no arguments\"\n help = 'Create a superuser called \"admin\" with password \"admin\".'\n\n def handle(self, *args, **options):\n try:\n admin = Person.objects.create_superuser(\n username=\"admin\",\n personal=\"admin\",\n family=\"admin\",\n email=\"[email protected]\",\n password=\"admin\",\n )\n print(\"Created admin user\")\n\n role_config = CommunityRoleConfig.objects.get(name=\"instructor\")\n CommunityRole.objects.create(\n config=role_config,\n person=admin,\n )\n print(\"Assigned Instructor community role to admin user\")\n\n except Exception as e:\n raise CommandError(\"Failed to create admin: {0}\".format(str(e)))\n", "path": "amy/workshops/management/commands/create_superuser.py"}], "after_files": [{"content": "from django.core.management.base import BaseCommand, CommandError\n\nfrom communityroles.models import CommunityRole, CommunityRoleConfig\nfrom workshops.models import Person\n\n\nclass Command(BaseCommand):\n args = \"no arguments\"\n help = 'Create a superuser called \"admin\" with password \"admin\".'\n\n def handle(self, *args, **options):\n username = \"admin\"\n\n if Person.objects.filter(username=username).exists():\n print(\"Admin user exists, quitting.\")\n return\n\n try:\n admin = Person.objects.create_superuser(\n username=username,\n personal=\"admin\",\n family=\"admin\",\n email=\"[email protected]\",\n password=\"admin\",\n )\n print(\"Created admin user\")\n\n role_config = CommunityRoleConfig.objects.get(name=\"instructor\")\n CommunityRole.objects.create(\n config=role_config,\n person=admin,\n )\n print(\"Assigned Instructor community role to admin user\")\n\n except Exception as e:\n raise CommandError(\"Failed to create admin: {0}\".format(str(e)))\n", "path": "amy/workshops/management/commands/create_superuser.py"}]} | 572 | 170 |
gh_patches_debug_22821 | rasdani/github-patches | git_diff | pyg-team__pytorch_geometric-5094 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
New attribute `self._load_hook` in linear class since 2.03 will raise KeyError when executing `load_state_dict` fucntion
### 🐛 Describe the bug
In Pytorch, the function `load_state_dict(state_dict, strict)` allows empty dict `state_dict=={}` when `strict` is False.
However, from version 2.03 the linear class in `torch_geometric.nn.dense.linear.py` has a new attribute `self._load_hook`, and when we execute `Linear(xxxx).load_state_dict({}, strict=False)`, the linear class will execute the `self._lazy_load_hook` function as follows
```
def _lazy_load_hook(self, state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs):
weight = state_dict[prefix + 'weight']
if is_uninitialized_parameter(weight):
self.in_channels = -1
self.weight = nn.parameter.UninitializedParameter()
if not hasattr(self, '_hook'):
self._hook = self.register_forward_pre_hook(
self.initialize_parameters)
elif is_uninitialized_parameter(self.weight):
self.in_channels = weight.size(-1)
self.weight.materialize((self.out_channels, self.in_channels))
if hasattr(self, '_hook'):
self._hook.remove()
delattr(self, '_hook')
```
Since the `state_dict` is empty, the line `weight = state_dict[prefix + 'weight']` will report KeyError.
### Environment
* PyG version:
* PyTorch version:
* OS:
* Python version:
* CUDA/cuDNN version:
* How you installed PyTorch and PyG (`conda`, `pip`, source):
* Any other relevant information (*e.g.*, version of `torch-scatter`):
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `torch_geometric/nn/dense/linear.py`
Content:
```
1 import copy
2 import math
3 from typing import Any, Optional
4
5 import torch
6 import torch.nn.functional as F
7 from torch import Tensor, nn
8 from torch.nn.parameter import Parameter
9
10 from torch_geometric.nn import inits
11
12
13 def is_uninitialized_parameter(x: Any) -> bool:
14 if not hasattr(nn.parameter, 'UninitializedParameter'):
15 return False
16 return isinstance(x, nn.parameter.UninitializedParameter)
17
18
19 class Linear(torch.nn.Module):
20 r"""Applies a linear tranformation to the incoming data
21
22 .. math::
23 \mathbf{x}^{\prime} = \mathbf{x} \mathbf{W}^{\top} + \mathbf{b}
24
25 similar to :class:`torch.nn.Linear`.
26 It supports lazy initialization and customizable weight and bias
27 initialization.
28
29 Args:
30 in_channels (int): Size of each input sample. Will be initialized
31 lazily in case it is given as :obj:`-1`.
32 out_channels (int): Size of each output sample.
33 bias (bool, optional): If set to :obj:`False`, the layer will not learn
34 an additive bias. (default: :obj:`True`)
35 weight_initializer (str, optional): The initializer for the weight
36 matrix (:obj:`"glorot"`, :obj:`"uniform"`, :obj:`"kaiming_uniform"`
37 or :obj:`None`).
38 If set to :obj:`None`, will match default weight initialization of
39 :class:`torch.nn.Linear`. (default: :obj:`None`)
40 bias_initializer (str, optional): The initializer for the bias vector
41 (:obj:`"zeros"` or :obj:`None`).
42 If set to :obj:`None`, will match default bias initialization of
43 :class:`torch.nn.Linear`. (default: :obj:`None`)
44
45 Shapes:
46 - **input:** features :math:`(*, F_{in})`
47 - **output:** features :math:`(*, F_{out})`
48 """
49 def __init__(self, in_channels: int, out_channels: int, bias: bool = True,
50 weight_initializer: Optional[str] = None,
51 bias_initializer: Optional[str] = None):
52 super().__init__()
53 self.in_channels = in_channels
54 self.out_channels = out_channels
55 self.weight_initializer = weight_initializer
56 self.bias_initializer = bias_initializer
57
58 if in_channels > 0:
59 self.weight = Parameter(torch.Tensor(out_channels, in_channels))
60 else:
61 self.weight = nn.parameter.UninitializedParameter()
62 self._hook = self.register_forward_pre_hook(
63 self.initialize_parameters)
64
65 if bias:
66 self.bias = Parameter(torch.Tensor(out_channels))
67 else:
68 self.register_parameter('bias', None)
69
70 self._load_hook = self._register_load_state_dict_pre_hook(
71 self._lazy_load_hook)
72
73 self.reset_parameters()
74
75 def __deepcopy__(self, memo):
76 out = Linear(self.in_channels, self.out_channels, self.bias
77 is not None, self.weight_initializer,
78 self.bias_initializer)
79 if self.in_channels > 0:
80 out.weight = copy.deepcopy(self.weight, memo)
81 if self.bias is not None:
82 out.bias = copy.deepcopy(self.bias, memo)
83 return out
84
85 def reset_parameters(self):
86 if self.in_channels <= 0:
87 pass
88 elif self.weight_initializer == 'glorot':
89 inits.glorot(self.weight)
90 elif self.weight_initializer == 'uniform':
91 bound = 1.0 / math.sqrt(self.weight.size(-1))
92 torch.nn.init.uniform_(self.weight.data, -bound, bound)
93 elif self.weight_initializer == 'kaiming_uniform':
94 inits.kaiming_uniform(self.weight, fan=self.in_channels,
95 a=math.sqrt(5))
96 elif self.weight_initializer is None:
97 inits.kaiming_uniform(self.weight, fan=self.in_channels,
98 a=math.sqrt(5))
99 else:
100 raise RuntimeError(f"Linear layer weight initializer "
101 f"'{self.weight_initializer}' is not supported")
102
103 if self.bias is None or self.in_channels <= 0:
104 pass
105 elif self.bias_initializer == 'zeros':
106 inits.zeros(self.bias)
107 elif self.bias_initializer is None:
108 inits.uniform(self.in_channels, self.bias)
109 else:
110 raise RuntimeError(f"Linear layer bias initializer "
111 f"'{self.bias_initializer}' is not supported")
112
113 def forward(self, x: Tensor) -> Tensor:
114 r"""
115 Args:
116 x (Tensor): The features.
117 """
118 return F.linear(x, self.weight, self.bias)
119
120 @torch.no_grad()
121 def initialize_parameters(self, module, input):
122 if is_uninitialized_parameter(self.weight):
123 self.in_channels = input[0].size(-1)
124 self.weight.materialize((self.out_channels, self.in_channels))
125 self.reset_parameters()
126 self._hook.remove()
127 delattr(self, '_hook')
128
129 def _save_to_state_dict(self, destination, prefix, keep_vars):
130 if is_uninitialized_parameter(self.weight):
131 destination[prefix + 'weight'] = self.weight
132 else:
133 destination[prefix + 'weight'] = self.weight.detach()
134 if self.bias is not None:
135 destination[prefix + 'bias'] = self.bias.detach()
136
137 def _lazy_load_hook(self, state_dict, prefix, local_metadata, strict,
138 missing_keys, unexpected_keys, error_msgs):
139
140 weight = state_dict[prefix + 'weight']
141 if is_uninitialized_parameter(weight):
142 self.in_channels = -1
143 self.weight = nn.parameter.UninitializedParameter()
144 if not hasattr(self, '_hook'):
145 self._hook = self.register_forward_pre_hook(
146 self.initialize_parameters)
147
148 elif is_uninitialized_parameter(self.weight):
149 self.in_channels = weight.size(-1)
150 self.weight.materialize((self.out_channels, self.in_channels))
151 if hasattr(self, '_hook'):
152 self._hook.remove()
153 delattr(self, '_hook')
154
155 def __repr__(self) -> str:
156 return (f'{self.__class__.__name__}({self.in_channels}, '
157 f'{self.out_channels}, bias={self.bias is not None})')
158
159
160 class HeteroLinear(torch.nn.Module):
161 r"""Applies separate linear tranformations to the incoming data according
162 to types
163
164 .. math::
165 \mathbf{x}^{\prime}_{\kappa} = \mathbf{x}_{\kappa}
166 \mathbf{W}^{\top}_{\kappa} + \mathbf{b}_{\kappa}
167
168 for type :math:`\kappa`.
169 It supports lazy initialization and customizable weight and bias
170 initialization.
171
172 Args:
173 in_channels (int): Size of each input sample. Will be initialized
174 lazily in case it is given as :obj:`-1`.
175 out_channels (int): Size of each output sample.
176 num_types (int): The number of types.
177 **kwargs (optional): Additional arguments of
178 :class:`torch_geometric.nn.Linear`.
179
180 Shapes:
181 - **input:**
182 features :math:`(*, F_{in})`,
183 type vector :math:`(*)`
184 - **output:** features :math:`(*, F_{out})`
185 """
186 def __init__(self, in_channels: int, out_channels: int, num_types: int,
187 **kwargs):
188 super().__init__()
189
190 self.in_channels = in_channels
191 self.out_channels = out_channels
192
193 self.lins = torch.nn.ModuleList([
194 Linear(in_channels, out_channels, **kwargs)
195 for _ in range(num_types)
196 ])
197
198 self.reset_parameters()
199
200 def reset_parameters(self):
201 for lin in self.lins:
202 lin.reset_parameters()
203
204 def forward(self, x: Tensor, type_vec: Tensor) -> Tensor:
205 r"""
206 Args:
207 x (Tensor): The input features.
208 type_vec (LongTensor): A vector that maps each entry to a type.
209 """
210 out = x.new_empty(x.size(0), self.out_channels)
211 for i, lin in enumerate(self.lins):
212 mask = type_vec == i
213 out[mask] = lin(x[mask])
214 return out
215
216 def __repr__(self) -> str:
217 return (f'{self.__class__.__name__}({self.in_channels}, '
218 f'{self.out_channels}, bias={self.lins[0].bias is not None})')
219
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/torch_geometric/nn/dense/linear.py b/torch_geometric/nn/dense/linear.py
--- a/torch_geometric/nn/dense/linear.py
+++ b/torch_geometric/nn/dense/linear.py
@@ -137,15 +137,16 @@
def _lazy_load_hook(self, state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs):
- weight = state_dict[prefix + 'weight']
- if is_uninitialized_parameter(weight):
+ weight = state_dict.get(prefix + 'weight', None)
+
+ if weight is not None and is_uninitialized_parameter(weight):
self.in_channels = -1
self.weight = nn.parameter.UninitializedParameter()
if not hasattr(self, '_hook'):
self._hook = self.register_forward_pre_hook(
self.initialize_parameters)
- elif is_uninitialized_parameter(self.weight):
+ elif weight is not None and is_uninitialized_parameter(self.weight):
self.in_channels = weight.size(-1)
self.weight.materialize((self.out_channels, self.in_channels))
if hasattr(self, '_hook'):
| {"golden_diff": "diff --git a/torch_geometric/nn/dense/linear.py b/torch_geometric/nn/dense/linear.py\n--- a/torch_geometric/nn/dense/linear.py\n+++ b/torch_geometric/nn/dense/linear.py\n@@ -137,15 +137,16 @@\n def _lazy_load_hook(self, state_dict, prefix, local_metadata, strict,\n missing_keys, unexpected_keys, error_msgs):\n \n- weight = state_dict[prefix + 'weight']\n- if is_uninitialized_parameter(weight):\n+ weight = state_dict.get(prefix + 'weight', None)\n+\n+ if weight is not None and is_uninitialized_parameter(weight):\n self.in_channels = -1\n self.weight = nn.parameter.UninitializedParameter()\n if not hasattr(self, '_hook'):\n self._hook = self.register_forward_pre_hook(\n self.initialize_parameters)\n \n- elif is_uninitialized_parameter(self.weight):\n+ elif weight is not None and is_uninitialized_parameter(self.weight):\n self.in_channels = weight.size(-1)\n self.weight.materialize((self.out_channels, self.in_channels))\n if hasattr(self, '_hook'):\n", "issue": "New attribute `self._load_hook` in linear class since 2.03 will raise KeyError when executing `load_state_dict` fucntion\n### \ud83d\udc1b Describe the bug\r\n\r\nIn Pytorch, the function `load_state_dict(state_dict, strict)` allows empty dict `state_dict=={}` when `strict` is False. \r\nHowever, from version 2.03 the linear class in `torch_geometric.nn.dense.linear.py` has a new attribute `self._load_hook`, and when we execute `Linear(xxxx).load_state_dict({}, strict=False)`, the linear class will execute the `self._lazy_load_hook` function as follows\r\n```\r\n def _lazy_load_hook(self, state_dict, prefix, local_metadata, strict,\r\n missing_keys, unexpected_keys, error_msgs):\r\n\r\n weight = state_dict[prefix + 'weight']\r\n if is_uninitialized_parameter(weight):\r\n self.in_channels = -1\r\n self.weight = nn.parameter.UninitializedParameter()\r\n if not hasattr(self, '_hook'):\r\n self._hook = self.register_forward_pre_hook(\r\n self.initialize_parameters)\r\n\r\n elif is_uninitialized_parameter(self.weight):\r\n self.in_channels = weight.size(-1)\r\n self.weight.materialize((self.out_channels, self.in_channels))\r\n if hasattr(self, '_hook'):\r\n self._hook.remove()\r\n delattr(self, '_hook')\r\n```\r\nSince the `state_dict` is empty, the line `weight = state_dict[prefix + 'weight']` will report KeyError. \r\n\r\n### Environment\r\n\r\n* PyG version:\r\n* PyTorch version:\r\n* OS:\r\n* Python version:\r\n* CUDA/cuDNN version:\r\n* How you installed PyTorch and PyG (`conda`, `pip`, source):\r\n* Any other relevant information (*e.g.*, version of `torch-scatter`):\r\n\n", "before_files": [{"content": "import copy\nimport math\nfrom typing import Any, Optional\n\nimport torch\nimport torch.nn.functional as F\nfrom torch import Tensor, nn\nfrom torch.nn.parameter import Parameter\n\nfrom torch_geometric.nn import inits\n\n\ndef is_uninitialized_parameter(x: Any) -> bool:\n if not hasattr(nn.parameter, 'UninitializedParameter'):\n return False\n return isinstance(x, nn.parameter.UninitializedParameter)\n\n\nclass Linear(torch.nn.Module):\n r\"\"\"Applies a linear tranformation to the incoming data\n\n .. math::\n \\mathbf{x}^{\\prime} = \\mathbf{x} \\mathbf{W}^{\\top} + \\mathbf{b}\n\n similar to :class:`torch.nn.Linear`.\n It supports lazy initialization and customizable weight and bias\n initialization.\n\n Args:\n in_channels (int): Size of each input sample. Will be initialized\n lazily in case it is given as :obj:`-1`.\n out_channels (int): Size of each output sample.\n bias (bool, optional): If set to :obj:`False`, the layer will not learn\n an additive bias. (default: :obj:`True`)\n weight_initializer (str, optional): The initializer for the weight\n matrix (:obj:`\"glorot\"`, :obj:`\"uniform\"`, :obj:`\"kaiming_uniform\"`\n or :obj:`None`).\n If set to :obj:`None`, will match default weight initialization of\n :class:`torch.nn.Linear`. (default: :obj:`None`)\n bias_initializer (str, optional): The initializer for the bias vector\n (:obj:`\"zeros\"` or :obj:`None`).\n If set to :obj:`None`, will match default bias initialization of\n :class:`torch.nn.Linear`. (default: :obj:`None`)\n\n Shapes:\n - **input:** features :math:`(*, F_{in})`\n - **output:** features :math:`(*, F_{out})`\n \"\"\"\n def __init__(self, in_channels: int, out_channels: int, bias: bool = True,\n weight_initializer: Optional[str] = None,\n bias_initializer: Optional[str] = None):\n super().__init__()\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.weight_initializer = weight_initializer\n self.bias_initializer = bias_initializer\n\n if in_channels > 0:\n self.weight = Parameter(torch.Tensor(out_channels, in_channels))\n else:\n self.weight = nn.parameter.UninitializedParameter()\n self._hook = self.register_forward_pre_hook(\n self.initialize_parameters)\n\n if bias:\n self.bias = Parameter(torch.Tensor(out_channels))\n else:\n self.register_parameter('bias', None)\n\n self._load_hook = self._register_load_state_dict_pre_hook(\n self._lazy_load_hook)\n\n self.reset_parameters()\n\n def __deepcopy__(self, memo):\n out = Linear(self.in_channels, self.out_channels, self.bias\n is not None, self.weight_initializer,\n self.bias_initializer)\n if self.in_channels > 0:\n out.weight = copy.deepcopy(self.weight, memo)\n if self.bias is not None:\n out.bias = copy.deepcopy(self.bias, memo)\n return out\n\n def reset_parameters(self):\n if self.in_channels <= 0:\n pass\n elif self.weight_initializer == 'glorot':\n inits.glorot(self.weight)\n elif self.weight_initializer == 'uniform':\n bound = 1.0 / math.sqrt(self.weight.size(-1))\n torch.nn.init.uniform_(self.weight.data, -bound, bound)\n elif self.weight_initializer == 'kaiming_uniform':\n inits.kaiming_uniform(self.weight, fan=self.in_channels,\n a=math.sqrt(5))\n elif self.weight_initializer is None:\n inits.kaiming_uniform(self.weight, fan=self.in_channels,\n a=math.sqrt(5))\n else:\n raise RuntimeError(f\"Linear layer weight initializer \"\n f\"'{self.weight_initializer}' is not supported\")\n\n if self.bias is None or self.in_channels <= 0:\n pass\n elif self.bias_initializer == 'zeros':\n inits.zeros(self.bias)\n elif self.bias_initializer is None:\n inits.uniform(self.in_channels, self.bias)\n else:\n raise RuntimeError(f\"Linear layer bias initializer \"\n f\"'{self.bias_initializer}' is not supported\")\n\n def forward(self, x: Tensor) -> Tensor:\n r\"\"\"\n Args:\n x (Tensor): The features.\n \"\"\"\n return F.linear(x, self.weight, self.bias)\n\n @torch.no_grad()\n def initialize_parameters(self, module, input):\n if is_uninitialized_parameter(self.weight):\n self.in_channels = input[0].size(-1)\n self.weight.materialize((self.out_channels, self.in_channels))\n self.reset_parameters()\n self._hook.remove()\n delattr(self, '_hook')\n\n def _save_to_state_dict(self, destination, prefix, keep_vars):\n if is_uninitialized_parameter(self.weight):\n destination[prefix + 'weight'] = self.weight\n else:\n destination[prefix + 'weight'] = self.weight.detach()\n if self.bias is not None:\n destination[prefix + 'bias'] = self.bias.detach()\n\n def _lazy_load_hook(self, state_dict, prefix, local_metadata, strict,\n missing_keys, unexpected_keys, error_msgs):\n\n weight = state_dict[prefix + 'weight']\n if is_uninitialized_parameter(weight):\n self.in_channels = -1\n self.weight = nn.parameter.UninitializedParameter()\n if not hasattr(self, '_hook'):\n self._hook = self.register_forward_pre_hook(\n self.initialize_parameters)\n\n elif is_uninitialized_parameter(self.weight):\n self.in_channels = weight.size(-1)\n self.weight.materialize((self.out_channels, self.in_channels))\n if hasattr(self, '_hook'):\n self._hook.remove()\n delattr(self, '_hook')\n\n def __repr__(self) -> str:\n return (f'{self.__class__.__name__}({self.in_channels}, '\n f'{self.out_channels}, bias={self.bias is not None})')\n\n\nclass HeteroLinear(torch.nn.Module):\n r\"\"\"Applies separate linear tranformations to the incoming data according\n to types\n\n .. math::\n \\mathbf{x}^{\\prime}_{\\kappa} = \\mathbf{x}_{\\kappa}\n \\mathbf{W}^{\\top}_{\\kappa} + \\mathbf{b}_{\\kappa}\n\n for type :math:`\\kappa`.\n It supports lazy initialization and customizable weight and bias\n initialization.\n\n Args:\n in_channels (int): Size of each input sample. Will be initialized\n lazily in case it is given as :obj:`-1`.\n out_channels (int): Size of each output sample.\n num_types (int): The number of types.\n **kwargs (optional): Additional arguments of\n :class:`torch_geometric.nn.Linear`.\n\n Shapes:\n - **input:**\n features :math:`(*, F_{in})`,\n type vector :math:`(*)`\n - **output:** features :math:`(*, F_{out})`\n \"\"\"\n def __init__(self, in_channels: int, out_channels: int, num_types: int,\n **kwargs):\n super().__init__()\n\n self.in_channels = in_channels\n self.out_channels = out_channels\n\n self.lins = torch.nn.ModuleList([\n Linear(in_channels, out_channels, **kwargs)\n for _ in range(num_types)\n ])\n\n self.reset_parameters()\n\n def reset_parameters(self):\n for lin in self.lins:\n lin.reset_parameters()\n\n def forward(self, x: Tensor, type_vec: Tensor) -> Tensor:\n r\"\"\"\n Args:\n x (Tensor): The input features.\n type_vec (LongTensor): A vector that maps each entry to a type.\n \"\"\"\n out = x.new_empty(x.size(0), self.out_channels)\n for i, lin in enumerate(self.lins):\n mask = type_vec == i\n out[mask] = lin(x[mask])\n return out\n\n def __repr__(self) -> str:\n return (f'{self.__class__.__name__}({self.in_channels}, '\n f'{self.out_channels}, bias={self.lins[0].bias is not None})')\n", "path": "torch_geometric/nn/dense/linear.py"}], "after_files": [{"content": "import copy\nimport math\nfrom typing import Any, Optional\n\nimport torch\nimport torch.nn.functional as F\nfrom torch import Tensor, nn\nfrom torch.nn.parameter import Parameter\n\nfrom torch_geometric.nn import inits\n\n\ndef is_uninitialized_parameter(x: Any) -> bool:\n if not hasattr(nn.parameter, 'UninitializedParameter'):\n return False\n return isinstance(x, nn.parameter.UninitializedParameter)\n\n\nclass Linear(torch.nn.Module):\n r\"\"\"Applies a linear tranformation to the incoming data\n\n .. math::\n \\mathbf{x}^{\\prime} = \\mathbf{x} \\mathbf{W}^{\\top} + \\mathbf{b}\n\n similar to :class:`torch.nn.Linear`.\n It supports lazy initialization and customizable weight and bias\n initialization.\n\n Args:\n in_channels (int): Size of each input sample. Will be initialized\n lazily in case it is given as :obj:`-1`.\n out_channels (int): Size of each output sample.\n bias (bool, optional): If set to :obj:`False`, the layer will not learn\n an additive bias. (default: :obj:`True`)\n weight_initializer (str, optional): The initializer for the weight\n matrix (:obj:`\"glorot\"`, :obj:`\"uniform\"`, :obj:`\"kaiming_uniform\"`\n or :obj:`None`).\n If set to :obj:`None`, will match default weight initialization of\n :class:`torch.nn.Linear`. (default: :obj:`None`)\n bias_initializer (str, optional): The initializer for the bias vector\n (:obj:`\"zeros\"` or :obj:`None`).\n If set to :obj:`None`, will match default bias initialization of\n :class:`torch.nn.Linear`. (default: :obj:`None`)\n\n Shapes:\n - **input:** features :math:`(*, F_{in})`\n - **output:** features :math:`(*, F_{out})`\n \"\"\"\n def __init__(self, in_channels: int, out_channels: int, bias: bool = True,\n weight_initializer: Optional[str] = None,\n bias_initializer: Optional[str] = None):\n super().__init__()\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.weight_initializer = weight_initializer\n self.bias_initializer = bias_initializer\n\n if in_channels > 0:\n self.weight = Parameter(torch.Tensor(out_channels, in_channels))\n else:\n self.weight = nn.parameter.UninitializedParameter()\n self._hook = self.register_forward_pre_hook(\n self.initialize_parameters)\n\n if bias:\n self.bias = Parameter(torch.Tensor(out_channels))\n else:\n self.register_parameter('bias', None)\n\n self._load_hook = self._register_load_state_dict_pre_hook(\n self._lazy_load_hook)\n\n self.reset_parameters()\n\n def __deepcopy__(self, memo):\n out = Linear(self.in_channels, self.out_channels, self.bias\n is not None, self.weight_initializer,\n self.bias_initializer)\n if self.in_channels > 0:\n out.weight = copy.deepcopy(self.weight, memo)\n if self.bias is not None:\n out.bias = copy.deepcopy(self.bias, memo)\n return out\n\n def reset_parameters(self):\n if self.in_channels <= 0:\n pass\n elif self.weight_initializer == 'glorot':\n inits.glorot(self.weight)\n elif self.weight_initializer == 'uniform':\n bound = 1.0 / math.sqrt(self.weight.size(-1))\n torch.nn.init.uniform_(self.weight.data, -bound, bound)\n elif self.weight_initializer == 'kaiming_uniform':\n inits.kaiming_uniform(self.weight, fan=self.in_channels,\n a=math.sqrt(5))\n elif self.weight_initializer is None:\n inits.kaiming_uniform(self.weight, fan=self.in_channels,\n a=math.sqrt(5))\n else:\n raise RuntimeError(f\"Linear layer weight initializer \"\n f\"'{self.weight_initializer}' is not supported\")\n\n if self.bias is None or self.in_channels <= 0:\n pass\n elif self.bias_initializer == 'zeros':\n inits.zeros(self.bias)\n elif self.bias_initializer is None:\n inits.uniform(self.in_channels, self.bias)\n else:\n raise RuntimeError(f\"Linear layer bias initializer \"\n f\"'{self.bias_initializer}' is not supported\")\n\n def forward(self, x: Tensor) -> Tensor:\n r\"\"\"\n Args:\n x (Tensor): The features.\n \"\"\"\n return F.linear(x, self.weight, self.bias)\n\n @torch.no_grad()\n def initialize_parameters(self, module, input):\n if is_uninitialized_parameter(self.weight):\n self.in_channels = input[0].size(-1)\n self.weight.materialize((self.out_channels, self.in_channels))\n self.reset_parameters()\n self._hook.remove()\n delattr(self, '_hook')\n\n def _save_to_state_dict(self, destination, prefix, keep_vars):\n if is_uninitialized_parameter(self.weight):\n destination[prefix + 'weight'] = self.weight\n else:\n destination[prefix + 'weight'] = self.weight.detach()\n if self.bias is not None:\n destination[prefix + 'bias'] = self.bias.detach()\n\n def _lazy_load_hook(self, state_dict, prefix, local_metadata, strict,\n missing_keys, unexpected_keys, error_msgs):\n\n weight = state_dict.get(prefix + 'weight', None)\n\n if weight is not None and is_uninitialized_parameter(weight):\n self.in_channels = -1\n self.weight = nn.parameter.UninitializedParameter()\n if not hasattr(self, '_hook'):\n self._hook = self.register_forward_pre_hook(\n self.initialize_parameters)\n\n elif weight is not None and is_uninitialized_parameter(self.weight):\n self.in_channels = weight.size(-1)\n self.weight.materialize((self.out_channels, self.in_channels))\n if hasattr(self, '_hook'):\n self._hook.remove()\n delattr(self, '_hook')\n\n def __repr__(self) -> str:\n return (f'{self.__class__.__name__}({self.in_channels}, '\n f'{self.out_channels}, bias={self.bias is not None})')\n\n\nclass HeteroLinear(torch.nn.Module):\n r\"\"\"Applies separate linear tranformations to the incoming data according\n to types\n\n .. math::\n \\mathbf{x}^{\\prime}_{\\kappa} = \\mathbf{x}_{\\kappa}\n \\mathbf{W}^{\\top}_{\\kappa} + \\mathbf{b}_{\\kappa}\n\n for type :math:`\\kappa`.\n It supports lazy initialization and customizable weight and bias\n initialization.\n\n Args:\n in_channels (int): Size of each input sample. Will be initialized\n lazily in case it is given as :obj:`-1`.\n out_channels (int): Size of each output sample.\n num_types (int): The number of types.\n **kwargs (optional): Additional arguments of\n :class:`torch_geometric.nn.Linear`.\n\n Shapes:\n - **input:**\n features :math:`(*, F_{in})`,\n type vector :math:`(*)`\n - **output:** features :math:`(*, F_{out})`\n \"\"\"\n def __init__(self, in_channels: int, out_channels: int, num_types: int,\n **kwargs):\n super().__init__()\n\n self.in_channels = in_channels\n self.out_channels = out_channels\n\n self.lins = torch.nn.ModuleList([\n Linear(in_channels, out_channels, **kwargs)\n for _ in range(num_types)\n ])\n\n self.reset_parameters()\n\n def reset_parameters(self):\n for lin in self.lins:\n lin.reset_parameters()\n\n def forward(self, x: Tensor, type_vec: Tensor) -> Tensor:\n r\"\"\"\n Args:\n x (Tensor): The input features.\n type_vec (LongTensor): A vector that maps each entry to a type.\n \"\"\"\n out = x.new_empty(x.size(0), self.out_channels)\n for i, lin in enumerate(self.lins):\n mask = type_vec == i\n out[mask] = lin(x[mask])\n return out\n\n def __repr__(self) -> str:\n return (f'{self.__class__.__name__}({self.in_channels}, '\n f'{self.out_channels}, bias={self.lins[0].bias is not None})')\n", "path": "torch_geometric/nn/dense/linear.py"}]} | 3,019 | 252 |
gh_patches_debug_4328 | rasdani/github-patches | git_diff | pytorch__TensorRT-2311 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Upstream Dynamo Backend to Torch
- Add a hook in the Torch repo to secure the namespace `"tensorrt"` and have it point to `"torch_tensorrt"`
- Add necessary imports and skipped tests
- Raise a PR in Torch to add this functionality
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `py/torch_tensorrt/dynamo/backend/backends.py`
Content:
```
1 from __future__ import annotations
2
3 import logging
4 from functools import partial
5 from typing import Any, Callable, Sequence
6
7 import torch
8 import torch._dynamo as td
9 from torch._functorch.aot_autograd import aot_module_simplified, make_boxed_compiler
10 from torch_tensorrt.dynamo import CompilationSettings
11 from torch_tensorrt.dynamo.compile import compile_module
12 from torch_tensorrt.dynamo.lowering._decompositions import get_decompositions
13 from torch_tensorrt.dynamo.lowering._pre_aot_lowering import pre_aot_substitutions
14 from torch_tensorrt.dynamo.utils import parse_dynamo_kwargs
15
16 logger = logging.getLogger(__name__)
17
18
19 @td.register_backend(name="torch_tensorrt") # type: ignore[misc]
20 def torch_tensorrt_backend(
21 gm: torch.fx.GraphModule, sample_inputs: Sequence[torch.Tensor], **kwargs: Any
22 ) -> torch.nn.Module:
23 # Set log level at the top of compilation (torch_tensorrt.dynamo)
24 if (
25 (
26 "options" in kwargs
27 and "debug" in kwargs["options"]
28 and kwargs["options"]["debug"]
29 )
30 or ("debug" in kwargs and kwargs["debug"])
31 ) and logger.parent:
32 logger.parent.setLevel(logging.DEBUG)
33
34 DEFAULT_BACKEND = aot_torch_tensorrt_aten_backend
35
36 compiled_mod: torch.nn.Module = DEFAULT_BACKEND(gm, sample_inputs, **kwargs)
37 return compiled_mod
38
39
40 @td.register_backend(name="aot_torch_tensorrt_aten") # type: ignore[misc]
41 def aot_torch_tensorrt_aten_backend(
42 gm: torch.fx.GraphModule, sample_inputs: Sequence[torch.Tensor], **kwargs: Any
43 ) -> torch.nn.Module:
44 settings = parse_dynamo_kwargs(kwargs)
45
46 custom_backend = partial(
47 _pretraced_backend,
48 settings=settings,
49 )
50
51 # Perform Pre-AOT Lowering for Module-Level Replacement
52 gm = pre_aot_substitutions(gm)
53
54 # Invoke AOTAutograd to translate operators to aten
55 return aot_module_simplified(
56 gm,
57 sample_inputs,
58 fw_compiler=make_boxed_compiler(custom_backend),
59 decompositions=get_decompositions(settings.enable_experimental_decompositions),
60 )
61
62
63 def _pretraced_backend(
64 gm: torch.fx.GraphModule,
65 sample_inputs: Sequence[torch.Tensor],
66 settings: CompilationSettings = CompilationSettings(),
67 ) -> torch.fx.GraphModule | Callable[..., Any]:
68 """Helper function to manage translation of traced FX module to TRT engines
69
70 Args:
71 module: FX GraphModule to convert
72 inputs: Inputs to the module
73 settings: Compilation settings
74 Returns:
75 Compiled FX GraphModule
76 """
77 try:
78 logger.debug("Post-AOT Autograd graph:\n" + str(gm.graph))
79
80 trt_compiled = compile_module(
81 gm,
82 sample_inputs,
83 settings=settings,
84 )
85 return trt_compiled
86 except AssertionError:
87 if not settings.pass_through_build_failures:
88 logger.warning(
89 "TRT conversion failed on the subgraph. See trace above. "
90 + "Returning GraphModule forward instead.",
91 exc_info=True,
92 )
93 return gm.forward
94 else:
95 logger.critical(
96 "Halting compilation on build failure since "
97 + "pass_through_build_failures was specified as True. "
98 + "To return the default Torch implementation and avoid "
99 + "halting compilation on engine build failures, "
100 + "specify pass_through_build_failures=False."
101 )
102 raise
103
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/py/torch_tensorrt/dynamo/backend/backends.py b/py/torch_tensorrt/dynamo/backend/backends.py
--- a/py/torch_tensorrt/dynamo/backend/backends.py
+++ b/py/torch_tensorrt/dynamo/backend/backends.py
@@ -16,6 +16,7 @@
logger = logging.getLogger(__name__)
[email protected]_backend(name="tensorrt") # type: ignore[misc]
@td.register_backend(name="torch_tensorrt") # type: ignore[misc]
def torch_tensorrt_backend(
gm: torch.fx.GraphModule, sample_inputs: Sequence[torch.Tensor], **kwargs: Any
| {"golden_diff": "diff --git a/py/torch_tensorrt/dynamo/backend/backends.py b/py/torch_tensorrt/dynamo/backend/backends.py\n--- a/py/torch_tensorrt/dynamo/backend/backends.py\n+++ b/py/torch_tensorrt/dynamo/backend/backends.py\n@@ -16,6 +16,7 @@\n logger = logging.getLogger(__name__)\n \n \[email protected]_backend(name=\"tensorrt\") # type: ignore[misc]\n @td.register_backend(name=\"torch_tensorrt\") # type: ignore[misc]\n def torch_tensorrt_backend(\n gm: torch.fx.GraphModule, sample_inputs: Sequence[torch.Tensor], **kwargs: Any\n", "issue": "Upstream Dynamo Backend to Torch\n- Add a hook in the Torch repo to secure the namespace `\"tensorrt\"` and have it point to `\"torch_tensorrt\"`\r\n- Add necessary imports and skipped tests\r\n- Raise a PR in Torch to add this functionality\n", "before_files": [{"content": "from __future__ import annotations\n\nimport logging\nfrom functools import partial\nfrom typing import Any, Callable, Sequence\n\nimport torch\nimport torch._dynamo as td\nfrom torch._functorch.aot_autograd import aot_module_simplified, make_boxed_compiler\nfrom torch_tensorrt.dynamo import CompilationSettings\nfrom torch_tensorrt.dynamo.compile import compile_module\nfrom torch_tensorrt.dynamo.lowering._decompositions import get_decompositions\nfrom torch_tensorrt.dynamo.lowering._pre_aot_lowering import pre_aot_substitutions\nfrom torch_tensorrt.dynamo.utils import parse_dynamo_kwargs\n\nlogger = logging.getLogger(__name__)\n\n\[email protected]_backend(name=\"torch_tensorrt\") # type: ignore[misc]\ndef torch_tensorrt_backend(\n gm: torch.fx.GraphModule, sample_inputs: Sequence[torch.Tensor], **kwargs: Any\n) -> torch.nn.Module:\n # Set log level at the top of compilation (torch_tensorrt.dynamo)\n if (\n (\n \"options\" in kwargs\n and \"debug\" in kwargs[\"options\"]\n and kwargs[\"options\"][\"debug\"]\n )\n or (\"debug\" in kwargs and kwargs[\"debug\"])\n ) and logger.parent:\n logger.parent.setLevel(logging.DEBUG)\n\n DEFAULT_BACKEND = aot_torch_tensorrt_aten_backend\n\n compiled_mod: torch.nn.Module = DEFAULT_BACKEND(gm, sample_inputs, **kwargs)\n return compiled_mod\n\n\[email protected]_backend(name=\"aot_torch_tensorrt_aten\") # type: ignore[misc]\ndef aot_torch_tensorrt_aten_backend(\n gm: torch.fx.GraphModule, sample_inputs: Sequence[torch.Tensor], **kwargs: Any\n) -> torch.nn.Module:\n settings = parse_dynamo_kwargs(kwargs)\n\n custom_backend = partial(\n _pretraced_backend,\n settings=settings,\n )\n\n # Perform Pre-AOT Lowering for Module-Level Replacement\n gm = pre_aot_substitutions(gm)\n\n # Invoke AOTAutograd to translate operators to aten\n return aot_module_simplified(\n gm,\n sample_inputs,\n fw_compiler=make_boxed_compiler(custom_backend),\n decompositions=get_decompositions(settings.enable_experimental_decompositions),\n )\n\n\ndef _pretraced_backend(\n gm: torch.fx.GraphModule,\n sample_inputs: Sequence[torch.Tensor],\n settings: CompilationSettings = CompilationSettings(),\n) -> torch.fx.GraphModule | Callable[..., Any]:\n \"\"\"Helper function to manage translation of traced FX module to TRT engines\n\n Args:\n module: FX GraphModule to convert\n inputs: Inputs to the module\n settings: Compilation settings\n Returns:\n Compiled FX GraphModule\n \"\"\"\n try:\n logger.debug(\"Post-AOT Autograd graph:\\n\" + str(gm.graph))\n\n trt_compiled = compile_module(\n gm,\n sample_inputs,\n settings=settings,\n )\n return trt_compiled\n except AssertionError:\n if not settings.pass_through_build_failures:\n logger.warning(\n \"TRT conversion failed on the subgraph. See trace above. \"\n + \"Returning GraphModule forward instead.\",\n exc_info=True,\n )\n return gm.forward\n else:\n logger.critical(\n \"Halting compilation on build failure since \"\n + \"pass_through_build_failures was specified as True. \"\n + \"To return the default Torch implementation and avoid \"\n + \"halting compilation on engine build failures, \"\n + \"specify pass_through_build_failures=False.\"\n )\n raise\n", "path": "py/torch_tensorrt/dynamo/backend/backends.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport logging\nfrom functools import partial\nfrom typing import Any, Callable, Sequence\n\nimport torch\nimport torch._dynamo as td\nfrom torch._functorch.aot_autograd import aot_module_simplified, make_boxed_compiler\nfrom torch_tensorrt.dynamo import CompilationSettings\nfrom torch_tensorrt.dynamo.compile import compile_module\nfrom torch_tensorrt.dynamo.lowering._decompositions import get_decompositions\nfrom torch_tensorrt.dynamo.lowering._pre_aot_lowering import pre_aot_substitutions\nfrom torch_tensorrt.dynamo.utils import parse_dynamo_kwargs\n\nlogger = logging.getLogger(__name__)\n\n\[email protected]_backend(name=\"tensorrt\") # type: ignore[misc]\[email protected]_backend(name=\"torch_tensorrt\") # type: ignore[misc]\ndef torch_tensorrt_backend(\n gm: torch.fx.GraphModule, sample_inputs: Sequence[torch.Tensor], **kwargs: Any\n) -> torch.nn.Module:\n # Set log level at the top of compilation (torch_tensorrt.dynamo)\n if (\n (\n \"options\" in kwargs\n and \"debug\" in kwargs[\"options\"]\n and kwargs[\"options\"][\"debug\"]\n )\n or (\"debug\" in kwargs and kwargs[\"debug\"])\n ) and logger.parent:\n logger.parent.setLevel(logging.DEBUG)\n\n DEFAULT_BACKEND = aot_torch_tensorrt_aten_backend\n\n compiled_mod: torch.nn.Module = DEFAULT_BACKEND(gm, sample_inputs, **kwargs)\n return compiled_mod\n\n\[email protected]_backend(name=\"aot_torch_tensorrt_aten\") # type: ignore[misc]\ndef aot_torch_tensorrt_aten_backend(\n gm: torch.fx.GraphModule, sample_inputs: Sequence[torch.Tensor], **kwargs: Any\n) -> torch.nn.Module:\n settings = parse_dynamo_kwargs(kwargs)\n\n custom_backend = partial(\n _pretraced_backend,\n settings=settings,\n )\n\n # Perform Pre-AOT Lowering for Module-Level Replacement\n gm = pre_aot_substitutions(gm)\n\n # Invoke AOTAutograd to translate operators to aten\n return aot_module_simplified(\n gm,\n sample_inputs,\n fw_compiler=make_boxed_compiler(custom_backend),\n decompositions=get_decompositions(settings.enable_experimental_decompositions),\n )\n\n\ndef _pretraced_backend(\n gm: torch.fx.GraphModule,\n sample_inputs: Sequence[torch.Tensor],\n settings: CompilationSettings = CompilationSettings(),\n) -> torch.fx.GraphModule | Callable[..., Any]:\n \"\"\"Helper function to manage translation of traced FX module to TRT engines\n\n Args:\n module: FX GraphModule to convert\n inputs: Inputs to the module\n settings: Compilation settings\n Returns:\n Compiled FX GraphModule\n \"\"\"\n try:\n logger.debug(\"Post-AOT Autograd graph:\\n\" + str(gm.graph))\n\n trt_compiled = compile_module(\n gm,\n sample_inputs,\n settings=settings,\n )\n return trt_compiled\n except AssertionError:\n if not settings.pass_through_build_failures:\n logger.warning(\n \"TRT conversion failed on the subgraph. See trace above. \"\n + \"Returning GraphModule forward instead.\",\n exc_info=True,\n )\n return gm.forward\n else:\n logger.critical(\n \"Halting compilation on build failure since \"\n + \"pass_through_build_failures was specified as True. \"\n + \"To return the default Torch implementation and avoid \"\n + \"halting compilation on engine build failures, \"\n + \"specify pass_through_build_failures=False.\"\n )\n raise\n", "path": "py/torch_tensorrt/dynamo/backend/backends.py"}]} | 1,281 | 139 |
gh_patches_debug_3116 | rasdani/github-patches | git_diff | rasterio__rasterio-490 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
rio merge error
When running `rio merge` on the latest version (`0.27`), I am receiving this error:
```
Traceback (most recent call last):
File "/Users/dnomadb/venv/bin/rio", line 11, in <module>
sys.exit(main_group())
File "/Users/dnomadb/venv/lib/python2.7/site-packages/click/core.py", line 700, in __call__
return self.main(*args, **kwargs)
File "/Users/dnomadb/venv/lib/python2.7/site-packages/click/core.py", line 680, in main
rv = self.invoke(ctx)
File "/Users/dnomadb/venv/lib/python2.7/site-packages/click/core.py", line 1027, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/Users/dnomadb/venv/lib/python2.7/site-packages/click/core.py", line 873, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/Users/dnomadb/venv/lib/python2.7/site-packages/click/core.py", line 508, in invoke
return callback(*args, **kwargs)
File "/Users/dnomadb/venv/lib/python2.7/site-packages/click/decorators.py", line 16, in new_func
return f(get_current_context(), *args, **kwargs)
File "/Users/dnomadb/venv/lib/python2.7/site-packages/rasterio/rio/merge.py", line 54, in merge
from rasterio.tools.merge import merge as merge_tool
ImportError: No module named tools.merge
```
When reverting back to `0.26`, I don't see it and the operation completes as expected.
cc @sgillies
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2
3 # Two environmental variables influence this script.
4 #
5 # GDAL_CONFIG: the path to a gdal-config program that points to GDAL headers,
6 # libraries, and data files.
7 #
8 # PACKAGE_DATA: if defined, GDAL and PROJ4 data files will be copied into the
9 # source or binary distribution. This is essential when creating self-contained
10 # binary wheels.
11
12 import logging
13 import os
14 import pprint
15 import shutil
16 import subprocess
17 import sys
18
19 from setuptools import setup
20 from setuptools.extension import Extension
21
22 logging.basicConfig()
23 log = logging.getLogger()
24
25 # python -W all setup.py ...
26 if 'all' in sys.warnoptions:
27 log.level = logging.DEBUG
28
29 def check_output(cmd):
30 # since subprocess.check_output doesn't exist in 2.6
31 # we wrap it here.
32 try:
33 out = subprocess.check_output(cmd)
34 return out.decode('utf')
35 except AttributeError:
36 # For some reasone check_output doesn't exist
37 # So fall back on Popen
38 p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
39 out, err = p.communicate()
40 return out
41
42 def copy_data_tree(datadir, destdir):
43 try:
44 shutil.rmtree(destdir)
45 except OSError:
46 pass
47 shutil.copytree(datadir, destdir)
48
49 # Parse the version from the rasterio module.
50 with open('rasterio/__init__.py') as f:
51 for line in f:
52 if line.find("__version__") >= 0:
53 version = line.split("=")[1].strip()
54 version = version.strip('"')
55 version = version.strip("'")
56 continue
57
58 with open('VERSION.txt', 'w') as f:
59 f.write(version)
60
61 # Use Cython if available.
62 try:
63 from Cython.Build import cythonize
64 except ImportError:
65 cythonize = None
66
67 # By default we'll try to get options via gdal-config. On systems without,
68 # options will need to be set in setup.cfg or on the setup command line.
69 include_dirs = []
70 library_dirs = []
71 libraries = []
72 extra_link_args = []
73 gdal_output = [None]*3
74
75 try:
76 import numpy
77 include_dirs.append(numpy.get_include())
78 except ImportError:
79 log.critical("Numpy and its headers are required to run setup(). Exiting.")
80 sys.exit(1)
81
82 try:
83 gdal_config = os.environ.get('GDAL_CONFIG', 'gdal-config')
84 for i, flag in enumerate(("--cflags", "--libs", "--datadir")):
85 gdal_output[i] = check_output([gdal_config, flag]).strip()
86
87 for item in gdal_output[0].split():
88 if item.startswith("-I"):
89 include_dirs.extend(item[2:].split(":"))
90 for item in gdal_output[1].split():
91 if item.startswith("-L"):
92 library_dirs.extend(item[2:].split(":"))
93 elif item.startswith("-l"):
94 libraries.append(item[2:])
95 else:
96 # e.g. -framework GDAL
97 extra_link_args.append(item)
98
99 except Exception as e:
100 if os.name == "nt":
101 log.info(("Building on Windows requires extra options to setup.py to locate needed GDAL files.\n"
102 "More information is available in the README."))
103 else:
104 log.warning("Failed to get options via gdal-config: %s", str(e))
105
106
107 # Conditionally copy the GDAL data. To be used in conjunction with
108 # the bdist_wheel command to make self-contained binary wheels.
109 if os.environ.get('PACKAGE_DATA'):
110 destdir = 'rasterio/gdal_data'
111 if gdal_output[2]:
112 log.info("Copying gdal data from %s" % gdal_output[2])
113 copy_data_tree(gdal_output[2], destdir)
114 else:
115 # check to see if GDAL_DATA is defined
116 gdal_data = os.environ.get('GDAL_DATA', None)
117 if gdal_data:
118 log.info("Copying gdal_data from %s" % gdal_data)
119 copy_data_tree(gdal_data, destdir)
120
121 # Conditionally copy PROJ.4 data.
122 projdatadir = os.environ.get('PROJ_LIB', '/usr/local/share/proj')
123 if os.path.exists(projdatadir):
124 log.info("Copying proj_data from %s" % projdatadir)
125 copy_data_tree(projdatadir, 'rasterio/proj_data')
126
127 ext_options = dict(
128 include_dirs=include_dirs,
129 library_dirs=library_dirs,
130 libraries=libraries,
131 extra_link_args=extra_link_args)
132
133 if not os.name == "nt":
134 # These options fail on Windows if using Visual Studio
135 ext_options['extra_compile_args'] = ['-Wno-unused-parameter',
136 '-Wno-unused-function']
137
138 log.debug('ext_options:\n%s', pprint.pformat(ext_options))
139
140 # When building from a repo, Cython is required.
141 if os.path.exists("MANIFEST.in") and "clean" not in sys.argv:
142 log.info("MANIFEST.in found, presume a repo, cythonizing...")
143 if not cythonize:
144 log.critical(
145 "Cython.Build.cythonize not found. "
146 "Cython is required to build from a repo.")
147 sys.exit(1)
148 ext_modules = cythonize([
149 Extension(
150 'rasterio._base', ['rasterio/_base.pyx'], **ext_options),
151 Extension(
152 'rasterio._io', ['rasterio/_io.pyx'], **ext_options),
153 Extension(
154 'rasterio._copy', ['rasterio/_copy.pyx'], **ext_options),
155 Extension(
156 'rasterio._features', ['rasterio/_features.pyx'], **ext_options),
157 Extension(
158 'rasterio._drivers', ['rasterio/_drivers.pyx'], **ext_options),
159 Extension(
160 'rasterio._warp', ['rasterio/_warp.pyx'], **ext_options),
161 Extension(
162 'rasterio._fill', ['rasterio/_fill.pyx', 'rasterio/rasterfill.cpp'], **ext_options),
163 Extension(
164 'rasterio._err', ['rasterio/_err.pyx'], **ext_options),
165 Extension(
166 'rasterio._example', ['rasterio/_example.pyx'], **ext_options),
167 ], quiet=True)
168
169 # If there's no manifest template, as in an sdist, we just specify .c files.
170 else:
171 ext_modules = [
172 Extension(
173 'rasterio._base', ['rasterio/_base.c'], **ext_options),
174 Extension(
175 'rasterio._io', ['rasterio/_io.c'], **ext_options),
176 Extension(
177 'rasterio._copy', ['rasterio/_copy.c'], **ext_options),
178 Extension(
179 'rasterio._features', ['rasterio/_features.c'], **ext_options),
180 Extension(
181 'rasterio._drivers', ['rasterio/_drivers.c'], **ext_options),
182 Extension(
183 'rasterio._warp', ['rasterio/_warp.cpp'], **ext_options),
184 Extension(
185 'rasterio._fill', ['rasterio/_fill.cpp', 'rasterio/rasterfill.cpp'], **ext_options),
186 Extension(
187 'rasterio._err', ['rasterio/_err.c'], **ext_options),
188 Extension(
189 'rasterio._example', ['rasterio/_example.c'], **ext_options),
190 ]
191
192 with open('README.rst') as f:
193 readme = f.read()
194
195 # Runtime requirements.
196 inst_reqs = [
197 'affine>=1.0',
198 'cligj>=0.2.0',
199 'Numpy>=1.7',
200 'snuggs>=1.3.1',
201 'click-plugins']
202
203 if sys.version_info < (3, 4):
204 inst_reqs.append('enum34')
205
206 setup_args = dict(
207 name='rasterio',
208 version=version,
209 description="Fast and direct raster I/O for use with Numpy and SciPy",
210 long_description=readme,
211 classifiers=[
212 'Development Status :: 4 - Beta',
213 'Intended Audience :: Developers',
214 'Intended Audience :: Information Technology',
215 'Intended Audience :: Science/Research',
216 'License :: OSI Approved :: BSD License',
217 'Programming Language :: C',
218 'Programming Language :: Python :: 2.6',
219 'Programming Language :: Python :: 2.7',
220 'Programming Language :: Python :: 3.3',
221 'Programming Language :: Python :: 3.4',
222 'Topic :: Multimedia :: Graphics :: Graphics Conversion',
223 'Topic :: Scientific/Engineering :: GIS'],
224 keywords='raster gdal',
225 author='Sean Gillies',
226 author_email='[email protected]',
227 url='https://github.com/mapbox/rasterio',
228 license='BSD',
229 package_dir={'': '.'},
230 packages=['rasterio', 'rasterio.rio'],
231 entry_points='''
232 [console_scripts]
233 rio=rasterio.rio.main:main_group
234
235 [rasterio.rio_commands]
236 bounds=rasterio.rio.features:bounds
237 calc=rasterio.rio.calc:calc
238 clip=rasterio.rio.convert:clip
239 convert=rasterio.rio.convert:convert
240 edit-info=rasterio.rio.info:edit
241 env=rasterio.rio.info:env
242 info=rasterio.rio.info:info
243 insp=rasterio.rio.info:insp
244 mask=rasterio.rio.features:mask
245 merge=rasterio.rio.merge:merge
246 overview=rasterio.rio.overview:overview
247 rasterize=rasterio.rio.features:rasterize
248 sample=rasterio.rio.sample:sample
249 shapes=rasterio.rio.features:shapes
250 stack=rasterio.rio.bands:stack
251 warp=rasterio.rio.warp:warp
252 transform=rasterio.rio.info:transform
253 ''',
254 include_package_data=True,
255 ext_modules=ext_modules,
256 zip_safe=False,
257 install_requires=inst_reqs,
258 extras_require={
259 'ipython': ['ipython>=2.0']})
260
261 if os.environ.get('PACKAGE_DATA'):
262 setup_args['package_data'] = {'rasterio': ['gdal_data/*', 'proj_data/*']}
263
264 setup(**setup_args)
265
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -227,7 +227,7 @@
url='https://github.com/mapbox/rasterio',
license='BSD',
package_dir={'': '.'},
- packages=['rasterio', 'rasterio.rio'],
+ packages=['rasterio', 'rasterio.rio', 'rasterio.tools'],
entry_points='''
[console_scripts]
rio=rasterio.rio.main:main_group
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -227,7 +227,7 @@\n url='https://github.com/mapbox/rasterio',\n license='BSD',\n package_dir={'': '.'},\n- packages=['rasterio', 'rasterio.rio'],\n+ packages=['rasterio', 'rasterio.rio', 'rasterio.tools'],\n entry_points='''\n [console_scripts]\n rio=rasterio.rio.main:main_group\n", "issue": "rio merge error\nWhen running `rio merge` on the latest version (`0.27`), I am receiving this error:\n\n```\nTraceback (most recent call last):\n File \"/Users/dnomadb/venv/bin/rio\", line 11, in <module>\n sys.exit(main_group())\n File \"/Users/dnomadb/venv/lib/python2.7/site-packages/click/core.py\", line 700, in __call__\n return self.main(*args, **kwargs)\n File \"/Users/dnomadb/venv/lib/python2.7/site-packages/click/core.py\", line 680, in main\n rv = self.invoke(ctx)\n File \"/Users/dnomadb/venv/lib/python2.7/site-packages/click/core.py\", line 1027, in invoke\n return _process_result(sub_ctx.command.invoke(sub_ctx))\n File \"/Users/dnomadb/venv/lib/python2.7/site-packages/click/core.py\", line 873, in invoke\n return ctx.invoke(self.callback, **ctx.params)\n File \"/Users/dnomadb/venv/lib/python2.7/site-packages/click/core.py\", line 508, in invoke\n return callback(*args, **kwargs)\n File \"/Users/dnomadb/venv/lib/python2.7/site-packages/click/decorators.py\", line 16, in new_func\n return f(get_current_context(), *args, **kwargs)\n File \"/Users/dnomadb/venv/lib/python2.7/site-packages/rasterio/rio/merge.py\", line 54, in merge\n from rasterio.tools.merge import merge as merge_tool\nImportError: No module named tools.merge\n```\n\nWhen reverting back to `0.26`, I don't see it and the operation completes as expected.\n\ncc @sgillies \n\n", "before_files": [{"content": "#!/usr/bin/env python\n\n# Two environmental variables influence this script.\n#\n# GDAL_CONFIG: the path to a gdal-config program that points to GDAL headers,\n# libraries, and data files.\n#\n# PACKAGE_DATA: if defined, GDAL and PROJ4 data files will be copied into the\n# source or binary distribution. This is essential when creating self-contained\n# binary wheels.\n\nimport logging\nimport os\nimport pprint\nimport shutil\nimport subprocess\nimport sys\n\nfrom setuptools import setup\nfrom setuptools.extension import Extension\n\nlogging.basicConfig()\nlog = logging.getLogger()\n\n# python -W all setup.py ...\nif 'all' in sys.warnoptions:\n log.level = logging.DEBUG\n\ndef check_output(cmd):\n # since subprocess.check_output doesn't exist in 2.6\n # we wrap it here.\n try:\n out = subprocess.check_output(cmd)\n return out.decode('utf')\n except AttributeError:\n # For some reasone check_output doesn't exist\n # So fall back on Popen\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE)\n out, err = p.communicate()\n return out\n\ndef copy_data_tree(datadir, destdir):\n try:\n shutil.rmtree(destdir)\n except OSError:\n pass\n shutil.copytree(datadir, destdir)\n\n# Parse the version from the rasterio module.\nwith open('rasterio/__init__.py') as f:\n for line in f:\n if line.find(\"__version__\") >= 0:\n version = line.split(\"=\")[1].strip()\n version = version.strip('\"')\n version = version.strip(\"'\")\n continue\n\nwith open('VERSION.txt', 'w') as f:\n f.write(version)\n\n# Use Cython if available.\ntry:\n from Cython.Build import cythonize\nexcept ImportError:\n cythonize = None\n\n# By default we'll try to get options via gdal-config. On systems without,\n# options will need to be set in setup.cfg or on the setup command line.\ninclude_dirs = []\nlibrary_dirs = []\nlibraries = []\nextra_link_args = []\ngdal_output = [None]*3\n\ntry:\n import numpy\n include_dirs.append(numpy.get_include())\nexcept ImportError:\n log.critical(\"Numpy and its headers are required to run setup(). Exiting.\")\n sys.exit(1)\n\ntry:\n gdal_config = os.environ.get('GDAL_CONFIG', 'gdal-config')\n for i, flag in enumerate((\"--cflags\", \"--libs\", \"--datadir\")):\n gdal_output[i] = check_output([gdal_config, flag]).strip()\n\n for item in gdal_output[0].split():\n if item.startswith(\"-I\"):\n include_dirs.extend(item[2:].split(\":\"))\n for item in gdal_output[1].split():\n if item.startswith(\"-L\"):\n library_dirs.extend(item[2:].split(\":\"))\n elif item.startswith(\"-l\"):\n libraries.append(item[2:])\n else:\n # e.g. -framework GDAL\n extra_link_args.append(item)\n\nexcept Exception as e:\n if os.name == \"nt\":\n log.info((\"Building on Windows requires extra options to setup.py to locate needed GDAL files.\\n\"\n \"More information is available in the README.\"))\n else:\n log.warning(\"Failed to get options via gdal-config: %s\", str(e))\n\n\n# Conditionally copy the GDAL data. To be used in conjunction with\n# the bdist_wheel command to make self-contained binary wheels.\nif os.environ.get('PACKAGE_DATA'):\n destdir = 'rasterio/gdal_data'\n if gdal_output[2]:\n log.info(\"Copying gdal data from %s\" % gdal_output[2])\n copy_data_tree(gdal_output[2], destdir)\n else:\n # check to see if GDAL_DATA is defined\n gdal_data = os.environ.get('GDAL_DATA', None)\n if gdal_data:\n log.info(\"Copying gdal_data from %s\" % gdal_data)\n copy_data_tree(gdal_data, destdir)\n\n # Conditionally copy PROJ.4 data.\n projdatadir = os.environ.get('PROJ_LIB', '/usr/local/share/proj')\n if os.path.exists(projdatadir):\n log.info(\"Copying proj_data from %s\" % projdatadir)\n copy_data_tree(projdatadir, 'rasterio/proj_data')\n\next_options = dict(\n include_dirs=include_dirs,\n library_dirs=library_dirs,\n libraries=libraries,\n extra_link_args=extra_link_args)\n\nif not os.name == \"nt\":\n # These options fail on Windows if using Visual Studio\n ext_options['extra_compile_args'] = ['-Wno-unused-parameter',\n '-Wno-unused-function']\n\nlog.debug('ext_options:\\n%s', pprint.pformat(ext_options))\n\n# When building from a repo, Cython is required.\nif os.path.exists(\"MANIFEST.in\") and \"clean\" not in sys.argv:\n log.info(\"MANIFEST.in found, presume a repo, cythonizing...\")\n if not cythonize:\n log.critical(\n \"Cython.Build.cythonize not found. \"\n \"Cython is required to build from a repo.\")\n sys.exit(1)\n ext_modules = cythonize([\n Extension(\n 'rasterio._base', ['rasterio/_base.pyx'], **ext_options),\n Extension(\n 'rasterio._io', ['rasterio/_io.pyx'], **ext_options),\n Extension(\n 'rasterio._copy', ['rasterio/_copy.pyx'], **ext_options),\n Extension(\n 'rasterio._features', ['rasterio/_features.pyx'], **ext_options),\n Extension(\n 'rasterio._drivers', ['rasterio/_drivers.pyx'], **ext_options),\n Extension(\n 'rasterio._warp', ['rasterio/_warp.pyx'], **ext_options),\n Extension(\n 'rasterio._fill', ['rasterio/_fill.pyx', 'rasterio/rasterfill.cpp'], **ext_options),\n Extension(\n 'rasterio._err', ['rasterio/_err.pyx'], **ext_options),\n Extension(\n 'rasterio._example', ['rasterio/_example.pyx'], **ext_options),\n ], quiet=True)\n\n# If there's no manifest template, as in an sdist, we just specify .c files.\nelse:\n ext_modules = [\n Extension(\n 'rasterio._base', ['rasterio/_base.c'], **ext_options),\n Extension(\n 'rasterio._io', ['rasterio/_io.c'], **ext_options),\n Extension(\n 'rasterio._copy', ['rasterio/_copy.c'], **ext_options),\n Extension(\n 'rasterio._features', ['rasterio/_features.c'], **ext_options),\n Extension(\n 'rasterio._drivers', ['rasterio/_drivers.c'], **ext_options),\n Extension(\n 'rasterio._warp', ['rasterio/_warp.cpp'], **ext_options),\n Extension(\n 'rasterio._fill', ['rasterio/_fill.cpp', 'rasterio/rasterfill.cpp'], **ext_options),\n Extension(\n 'rasterio._err', ['rasterio/_err.c'], **ext_options),\n Extension(\n 'rasterio._example', ['rasterio/_example.c'], **ext_options),\n ]\n\nwith open('README.rst') as f:\n readme = f.read()\n\n# Runtime requirements.\ninst_reqs = [\n 'affine>=1.0',\n 'cligj>=0.2.0',\n 'Numpy>=1.7',\n 'snuggs>=1.3.1',\n 'click-plugins']\n\nif sys.version_info < (3, 4):\n inst_reqs.append('enum34')\n\nsetup_args = dict(\n name='rasterio',\n version=version,\n description=\"Fast and direct raster I/O for use with Numpy and SciPy\",\n long_description=readme,\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Information Technology',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: C',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Topic :: Multimedia :: Graphics :: Graphics Conversion',\n 'Topic :: Scientific/Engineering :: GIS'],\n keywords='raster gdal',\n author='Sean Gillies',\n author_email='[email protected]',\n url='https://github.com/mapbox/rasterio',\n license='BSD',\n package_dir={'': '.'},\n packages=['rasterio', 'rasterio.rio'],\n entry_points='''\n [console_scripts]\n rio=rasterio.rio.main:main_group\n\n [rasterio.rio_commands]\n bounds=rasterio.rio.features:bounds\n calc=rasterio.rio.calc:calc\n clip=rasterio.rio.convert:clip\n convert=rasterio.rio.convert:convert\n edit-info=rasterio.rio.info:edit\n env=rasterio.rio.info:env\n info=rasterio.rio.info:info\n insp=rasterio.rio.info:insp\n mask=rasterio.rio.features:mask\n merge=rasterio.rio.merge:merge\n overview=rasterio.rio.overview:overview\n rasterize=rasterio.rio.features:rasterize\n sample=rasterio.rio.sample:sample\n shapes=rasterio.rio.features:shapes\n stack=rasterio.rio.bands:stack\n warp=rasterio.rio.warp:warp\n transform=rasterio.rio.info:transform\n ''',\n include_package_data=True,\n ext_modules=ext_modules,\n zip_safe=False,\n install_requires=inst_reqs,\n extras_require={\n 'ipython': ['ipython>=2.0']})\n\nif os.environ.get('PACKAGE_DATA'):\n setup_args['package_data'] = {'rasterio': ['gdal_data/*', 'proj_data/*']}\n\nsetup(**setup_args)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\n# Two environmental variables influence this script.\n#\n# GDAL_CONFIG: the path to a gdal-config program that points to GDAL headers,\n# libraries, and data files.\n#\n# PACKAGE_DATA: if defined, GDAL and PROJ4 data files will be copied into the\n# source or binary distribution. This is essential when creating self-contained\n# binary wheels.\n\nimport logging\nimport os\nimport pprint\nimport shutil\nimport subprocess\nimport sys\n\nfrom setuptools import setup\nfrom setuptools.extension import Extension\n\nlogging.basicConfig()\nlog = logging.getLogger()\n\n# python -W all setup.py ...\nif 'all' in sys.warnoptions:\n log.level = logging.DEBUG\n\ndef check_output(cmd):\n # since subprocess.check_output doesn't exist in 2.6\n # we wrap it here.\n try:\n out = subprocess.check_output(cmd)\n return out.decode('utf')\n except AttributeError:\n # For some reasone check_output doesn't exist\n # So fall back on Popen\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE)\n out, err = p.communicate()\n return out\n\ndef copy_data_tree(datadir, destdir):\n try:\n shutil.rmtree(destdir)\n except OSError:\n pass\n shutil.copytree(datadir, destdir)\n\n# Parse the version from the rasterio module.\nwith open('rasterio/__init__.py') as f:\n for line in f:\n if line.find(\"__version__\") >= 0:\n version = line.split(\"=\")[1].strip()\n version = version.strip('\"')\n version = version.strip(\"'\")\n continue\n\nwith open('VERSION.txt', 'w') as f:\n f.write(version)\n\n# Use Cython if available.\ntry:\n from Cython.Build import cythonize\nexcept ImportError:\n cythonize = None\n\n# By default we'll try to get options via gdal-config. On systems without,\n# options will need to be set in setup.cfg or on the setup command line.\ninclude_dirs = []\nlibrary_dirs = []\nlibraries = []\nextra_link_args = []\ngdal_output = [None]*3\n\ntry:\n import numpy\n include_dirs.append(numpy.get_include())\nexcept ImportError:\n log.critical(\"Numpy and its headers are required to run setup(). Exiting.\")\n sys.exit(1)\n\ntry:\n gdal_config = os.environ.get('GDAL_CONFIG', 'gdal-config')\n for i, flag in enumerate((\"--cflags\", \"--libs\", \"--datadir\")):\n gdal_output[i] = check_output([gdal_config, flag]).strip()\n\n for item in gdal_output[0].split():\n if item.startswith(\"-I\"):\n include_dirs.extend(item[2:].split(\":\"))\n for item in gdal_output[1].split():\n if item.startswith(\"-L\"):\n library_dirs.extend(item[2:].split(\":\"))\n elif item.startswith(\"-l\"):\n libraries.append(item[2:])\n else:\n # e.g. -framework GDAL\n extra_link_args.append(item)\n\nexcept Exception as e:\n if os.name == \"nt\":\n log.info((\"Building on Windows requires extra options to setup.py to locate needed GDAL files.\\n\"\n \"More information is available in the README.\"))\n else:\n log.warning(\"Failed to get options via gdal-config: %s\", str(e))\n\n\n# Conditionally copy the GDAL data. To be used in conjunction with\n# the bdist_wheel command to make self-contained binary wheels.\nif os.environ.get('PACKAGE_DATA'):\n destdir = 'rasterio/gdal_data'\n if gdal_output[2]:\n log.info(\"Copying gdal data from %s\" % gdal_output[2])\n copy_data_tree(gdal_output[2], destdir)\n else:\n # check to see if GDAL_DATA is defined\n gdal_data = os.environ.get('GDAL_DATA', None)\n if gdal_data:\n log.info(\"Copying gdal_data from %s\" % gdal_data)\n copy_data_tree(gdal_data, destdir)\n\n # Conditionally copy PROJ.4 data.\n projdatadir = os.environ.get('PROJ_LIB', '/usr/local/share/proj')\n if os.path.exists(projdatadir):\n log.info(\"Copying proj_data from %s\" % projdatadir)\n copy_data_tree(projdatadir, 'rasterio/proj_data')\n\next_options = dict(\n include_dirs=include_dirs,\n library_dirs=library_dirs,\n libraries=libraries,\n extra_link_args=extra_link_args)\n\nif not os.name == \"nt\":\n # These options fail on Windows if using Visual Studio\n ext_options['extra_compile_args'] = ['-Wno-unused-parameter',\n '-Wno-unused-function']\n\nlog.debug('ext_options:\\n%s', pprint.pformat(ext_options))\n\n# When building from a repo, Cython is required.\nif os.path.exists(\"MANIFEST.in\") and \"clean\" not in sys.argv:\n log.info(\"MANIFEST.in found, presume a repo, cythonizing...\")\n if not cythonize:\n log.critical(\n \"Cython.Build.cythonize not found. \"\n \"Cython is required to build from a repo.\")\n sys.exit(1)\n ext_modules = cythonize([\n Extension(\n 'rasterio._base', ['rasterio/_base.pyx'], **ext_options),\n Extension(\n 'rasterio._io', ['rasterio/_io.pyx'], **ext_options),\n Extension(\n 'rasterio._copy', ['rasterio/_copy.pyx'], **ext_options),\n Extension(\n 'rasterio._features', ['rasterio/_features.pyx'], **ext_options),\n Extension(\n 'rasterio._drivers', ['rasterio/_drivers.pyx'], **ext_options),\n Extension(\n 'rasterio._warp', ['rasterio/_warp.pyx'], **ext_options),\n Extension(\n 'rasterio._fill', ['rasterio/_fill.pyx', 'rasterio/rasterfill.cpp'], **ext_options),\n Extension(\n 'rasterio._err', ['rasterio/_err.pyx'], **ext_options),\n Extension(\n 'rasterio._example', ['rasterio/_example.pyx'], **ext_options),\n ], quiet=True)\n\n# If there's no manifest template, as in an sdist, we just specify .c files.\nelse:\n ext_modules = [\n Extension(\n 'rasterio._base', ['rasterio/_base.c'], **ext_options),\n Extension(\n 'rasterio._io', ['rasterio/_io.c'], **ext_options),\n Extension(\n 'rasterio._copy', ['rasterio/_copy.c'], **ext_options),\n Extension(\n 'rasterio._features', ['rasterio/_features.c'], **ext_options),\n Extension(\n 'rasterio._drivers', ['rasterio/_drivers.c'], **ext_options),\n Extension(\n 'rasterio._warp', ['rasterio/_warp.cpp'], **ext_options),\n Extension(\n 'rasterio._fill', ['rasterio/_fill.cpp', 'rasterio/rasterfill.cpp'], **ext_options),\n Extension(\n 'rasterio._err', ['rasterio/_err.c'], **ext_options),\n Extension(\n 'rasterio._example', ['rasterio/_example.c'], **ext_options),\n ]\n\nwith open('README.rst') as f:\n readme = f.read()\n\n# Runtime requirements.\ninst_reqs = [\n 'affine>=1.0',\n 'cligj>=0.2.0',\n 'Numpy>=1.7',\n 'snuggs>=1.3.1',\n 'click-plugins']\n\nif sys.version_info < (3, 4):\n inst_reqs.append('enum34')\n\nsetup_args = dict(\n name='rasterio',\n version=version,\n description=\"Fast and direct raster I/O for use with Numpy and SciPy\",\n long_description=readme,\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Information Technology',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: C',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Topic :: Multimedia :: Graphics :: Graphics Conversion',\n 'Topic :: Scientific/Engineering :: GIS'],\n keywords='raster gdal',\n author='Sean Gillies',\n author_email='[email protected]',\n url='https://github.com/mapbox/rasterio',\n license='BSD',\n package_dir={'': '.'},\n packages=['rasterio', 'rasterio.rio', 'rasterio.tools'],\n entry_points='''\n [console_scripts]\n rio=rasterio.rio.main:main_group\n\n [rasterio.rio_commands]\n bounds=rasterio.rio.features:bounds\n calc=rasterio.rio.calc:calc\n clip=rasterio.rio.convert:clip\n convert=rasterio.rio.convert:convert\n edit-info=rasterio.rio.info:edit\n env=rasterio.rio.info:env\n info=rasterio.rio.info:info\n insp=rasterio.rio.info:insp\n mask=rasterio.rio.features:mask\n merge=rasterio.rio.merge:merge\n overview=rasterio.rio.overview:overview\n rasterize=rasterio.rio.features:rasterize\n sample=rasterio.rio.sample:sample\n shapes=rasterio.rio.features:shapes\n stack=rasterio.rio.bands:stack\n warp=rasterio.rio.warp:warp\n transform=rasterio.rio.info:transform\n ''',\n include_package_data=True,\n ext_modules=ext_modules,\n zip_safe=False,\n install_requires=inst_reqs,\n extras_require={\n 'ipython': ['ipython>=2.0']})\n\nif os.environ.get('PACKAGE_DATA'):\n setup_args['package_data'] = {'rasterio': ['gdal_data/*', 'proj_data/*']}\n\nsetup(**setup_args)\n", "path": "setup.py"}]} | 3,621 | 117 |
gh_patches_debug_32299 | rasdani/github-patches | git_diff | spack__spack-32550 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
spfft: inherit from ROCmPackage
Following https://github.com/spack/spack/pull/31207#issuecomment-1161932478 this PR adapts `spfft` the same way by inheriting from `ROCmPackage`.
Also for this one, I would ask one of the reviewer if they can test it works correctly.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `var/spack/repos/builtin/packages/spfft/package.py`
Content:
```
1 # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
2 # Spack Project Developers. See the top-level COPYRIGHT file for details.
3 #
4 # SPDX-License-Identifier: (Apache-2.0 OR MIT)
5
6
7 from spack.package import *
8
9
10 class Spfft(CMakePackage, CudaPackage):
11 """Sparse 3D FFT library with MPI, OpenMP, CUDA and ROCm support."""
12
13 homepage = "https://github.com/eth-cscs/SpFFT"
14 url = "https://github.com/eth-cscs/SpFFT/archive/v0.9.8.zip"
15 git = "https://github.com/eth-cscs/SpFFT.git"
16
17 maintainers = ["AdhocMan", "haampie"]
18
19 version("develop", branch="develop")
20 version("master", branch="master")
21
22 version("1.0.6", sha256="e1b927c61f8abbb4a9937653f917169e6253e8c40b850df491594310943ca14b")
23 version("1.0.5", sha256="2a59d856286ea8559f00a32fc38f9f7546209cfa90112232a5288a69689a6e05")
24 version("1.0.4", sha256="41e63880d95343da0d8c3dbe5bfb3d46a1d612199cc9cc13a936f1628a7fdb8e")
25 version("1.0.3", sha256="4f87734e3582ef96ddc0402d0db78cfc173bed9cab3e0d9c6a6bf8b660d69559")
26 version("1.0.2", sha256="9b1296bda0b9ec3d37c74fd64354a01ebc6e2da7cb026c1f821882160b03c692")
27 version("1.0.1", sha256="f8ab706309776cfbd2bfd8e29a6a9ffb5c8f3cd62399bf82db1e416ae5c490c8")
28 version("1.0.0", sha256="bd98897aa6734563ec63cd84168e731ef2e2bbc01a574c6dc59b74475742b6ee")
29 version("0.9.13", sha256="5ccc93c9362bec14cfb6e31dd0e7ae7e48db0453ab49ebc9722041b69db759ef")
30 version("0.9.12", sha256="1f7bf5164dcceb0e3bbce7d6ff9faef3145ad17cf3430149d40a98c43c010acc")
31 version("0.9.11", sha256="36542a60378e8672654188dee006975ef9e10f502791459ff7ebf4b38451cb9b")
32 version("0.9.10", sha256="9cbbb7ba5e53e17eeb45e809841d8272e5333f739c2442a99c3e255c1ddec3e9")
33 version("0.9.9", sha256="a8fd7a2d767716bb73185ca03bf4c106c6981b79130f3e456e5d2e744a2b3ba0")
34 version("0.9.8", sha256="f49fa51316bbfa68309e951d2375e1f6904120c93868cbe13bc2974c0b801a3f")
35
36 variant("openmp", default=True, description="Build with OpenMP support")
37 variant("mpi", default=True, description="enable MPI")
38 variant("single_precision", default=False, description="Sinlge precision")
39 variant("gpu_direct", default=False, description="GPU aware MPI")
40 variant("static", default=False, description="build static library")
41 variant("fortran", default=False, description="enable fortran")
42 variant(
43 "build_type",
44 default="Release",
45 description="CMake build type",
46 values=("Debug", "Release", "RelWithDebInfo"),
47 )
48 depends_on("fftw-api@3")
49 depends_on("mpi", when="+mpi")
50 depends_on("[email protected]:", type="build")
51
52 # ROCM variants + dependencies
53 variant("rocm", default=False, description="Use ROCm backend")
54
55 depends_on("cuda@:10", when="@:0.9.11 +cuda")
56
57 with when("+rocm"):
58 # FindHIP cmake script only works for < 4.1
59 depends_on("hip@:4.0", when="@:1.0.1")
60 # Workaround for compiler bug in ROCm 4.5 added in SpFFT 1.0.6
61 depends_on("hip@:4.3.1", when="@:1.0.5")
62 depends_on("hip")
63 depends_on("rocfft")
64 # rocFFT and hipFFT have split with latest versions
65 depends_on("hipfft", when="^[email protected]:")
66
67 amdgpu_targets = (
68 "gfx701",
69 "gfx801",
70 "gfx802",
71 "gfx803",
72 "gfx900",
73 "gfx906",
74 "gfx908",
75 "gfx1010",
76 "gfx1011",
77 "gfx1012",
78 )
79 variant("amdgpu_target", default="gfx803,gfx900,gfx906", multi=True, values=amdgpu_targets)
80
81 # Fix compilation error in some cases due to missing include statement
82 # before version 1.0.3
83 patch("0001-fix-missing-limits-include.patch", when="@:1.0.2")
84
85 def cmake_args(self):
86 spec = self.spec
87 args = [
88 self.define_from_variant("SPFFT_OMP", "openmp"),
89 self.define_from_variant("SPFFT_MPI", "mpi"),
90 self.define_from_variant("SPFFT_SINGLE_PRECISION", "single_precision"),
91 self.define_from_variant("SPFFT_GPU_DIRECT", "gpu_direct"),
92 self.define_from_variant("SPFFT_FORTRAN", "fortran"),
93 self.define_from_variant("SPFFT_STATIC", "static"),
94 ]
95
96 if spec.satisfies("+cuda"):
97 args += ["-DSPFFT_GPU_BACKEND=CUDA"]
98
99 cuda_arch = self.spec.variants["cuda_arch"].value
100 if cuda_arch[0] != "none":
101 args += [self.define("CMAKE_CUDA_ARCHITECTURES", cuda_arch)]
102
103 if spec.satisfies("+rocm"):
104 archs = ",".join(self.spec.variants["amdgpu_target"].value)
105 args += [
106 "-DSPFFT_GPU_BACKEND=ROCM",
107 "-DHIP_ROOT_DIR={0}".format(spec["hip"].prefix),
108 "-DHIP_HCC_FLAGS=--amdgpu-target={0}".format(archs),
109 "-DHIP_CXX_COMPILER={0}".format(self.spec["hip"].hipcc),
110 ]
111
112 if "fftw" in spec:
113 args += ["-DSPFFT_FFTW_LIB=FFTW"]
114 elif "intel-mkl" in spec:
115 args += ["-DSPFFT_FFTW_LIB=MKL"]
116
117 return args
118
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/var/spack/repos/builtin/packages/spfft/package.py b/var/spack/repos/builtin/packages/spfft/package.py
--- a/var/spack/repos/builtin/packages/spfft/package.py
+++ b/var/spack/repos/builtin/packages/spfft/package.py
@@ -7,7 +7,7 @@
from spack.package import *
-class Spfft(CMakePackage, CudaPackage):
+class Spfft(CMakePackage, CudaPackage, ROCmPackage):
"""Sparse 3D FFT library with MPI, OpenMP, CUDA and ROCm support."""
homepage = "https://github.com/eth-cscs/SpFFT"
@@ -49,9 +49,6 @@
depends_on("mpi", when="+mpi")
depends_on("[email protected]:", type="build")
- # ROCM variants + dependencies
- variant("rocm", default=False, description="Use ROCm backend")
-
depends_on("cuda@:10", when="@:0.9.11 +cuda")
with when("+rocm"):
@@ -59,25 +56,10 @@
depends_on("hip@:4.0", when="@:1.0.1")
# Workaround for compiler bug in ROCm 4.5 added in SpFFT 1.0.6
depends_on("hip@:4.3.1", when="@:1.0.5")
- depends_on("hip")
depends_on("rocfft")
# rocFFT and hipFFT have split with latest versions
depends_on("hipfft", when="^[email protected]:")
- amdgpu_targets = (
- "gfx701",
- "gfx801",
- "gfx802",
- "gfx803",
- "gfx900",
- "gfx906",
- "gfx908",
- "gfx1010",
- "gfx1011",
- "gfx1012",
- )
- variant("amdgpu_target", default="gfx803,gfx900,gfx906", multi=True, values=amdgpu_targets)
-
# Fix compilation error in some cases due to missing include statement
# before version 1.0.3
patch("0001-fix-missing-limits-include.patch", when="@:1.0.2")
| {"golden_diff": "diff --git a/var/spack/repos/builtin/packages/spfft/package.py b/var/spack/repos/builtin/packages/spfft/package.py\n--- a/var/spack/repos/builtin/packages/spfft/package.py\n+++ b/var/spack/repos/builtin/packages/spfft/package.py\n@@ -7,7 +7,7 @@\n from spack.package import *\n \n \n-class Spfft(CMakePackage, CudaPackage):\n+class Spfft(CMakePackage, CudaPackage, ROCmPackage):\n \"\"\"Sparse 3D FFT library with MPI, OpenMP, CUDA and ROCm support.\"\"\"\n \n homepage = \"https://github.com/eth-cscs/SpFFT\"\n@@ -49,9 +49,6 @@\n depends_on(\"mpi\", when=\"+mpi\")\n depends_on(\"[email protected]:\", type=\"build\")\n \n- # ROCM variants + dependencies\n- variant(\"rocm\", default=False, description=\"Use ROCm backend\")\n-\n depends_on(\"cuda@:10\", when=\"@:0.9.11 +cuda\")\n \n with when(\"+rocm\"):\n@@ -59,25 +56,10 @@\n depends_on(\"hip@:4.0\", when=\"@:1.0.1\")\n # Workaround for compiler bug in ROCm 4.5 added in SpFFT 1.0.6\n depends_on(\"hip@:4.3.1\", when=\"@:1.0.5\")\n- depends_on(\"hip\")\n depends_on(\"rocfft\")\n # rocFFT and hipFFT have split with latest versions\n depends_on(\"hipfft\", when=\"^[email protected]:\")\n \n- amdgpu_targets = (\n- \"gfx701\",\n- \"gfx801\",\n- \"gfx802\",\n- \"gfx803\",\n- \"gfx900\",\n- \"gfx906\",\n- \"gfx908\",\n- \"gfx1010\",\n- \"gfx1011\",\n- \"gfx1012\",\n- )\n- variant(\"amdgpu_target\", default=\"gfx803,gfx900,gfx906\", multi=True, values=amdgpu_targets)\n-\n # Fix compilation error in some cases due to missing include statement\n # before version 1.0.3\n patch(\"0001-fix-missing-limits-include.patch\", when=\"@:1.0.2\")\n", "issue": "spfft: inherit from ROCmPackage\nFollowing https://github.com/spack/spack/pull/31207#issuecomment-1161932478 this PR adapts `spfft` the same way by inheriting from `ROCmPackage`.\r\n\r\nAlso for this one, I would ask one of the reviewer if they can test it works correctly.\n", "before_files": [{"content": "# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\n\nfrom spack.package import *\n\n\nclass Spfft(CMakePackage, CudaPackage):\n \"\"\"Sparse 3D FFT library with MPI, OpenMP, CUDA and ROCm support.\"\"\"\n\n homepage = \"https://github.com/eth-cscs/SpFFT\"\n url = \"https://github.com/eth-cscs/SpFFT/archive/v0.9.8.zip\"\n git = \"https://github.com/eth-cscs/SpFFT.git\"\n\n maintainers = [\"AdhocMan\", \"haampie\"]\n\n version(\"develop\", branch=\"develop\")\n version(\"master\", branch=\"master\")\n\n version(\"1.0.6\", sha256=\"e1b927c61f8abbb4a9937653f917169e6253e8c40b850df491594310943ca14b\")\n version(\"1.0.5\", sha256=\"2a59d856286ea8559f00a32fc38f9f7546209cfa90112232a5288a69689a6e05\")\n version(\"1.0.4\", sha256=\"41e63880d95343da0d8c3dbe5bfb3d46a1d612199cc9cc13a936f1628a7fdb8e\")\n version(\"1.0.3\", sha256=\"4f87734e3582ef96ddc0402d0db78cfc173bed9cab3e0d9c6a6bf8b660d69559\")\n version(\"1.0.2\", sha256=\"9b1296bda0b9ec3d37c74fd64354a01ebc6e2da7cb026c1f821882160b03c692\")\n version(\"1.0.1\", sha256=\"f8ab706309776cfbd2bfd8e29a6a9ffb5c8f3cd62399bf82db1e416ae5c490c8\")\n version(\"1.0.0\", sha256=\"bd98897aa6734563ec63cd84168e731ef2e2bbc01a574c6dc59b74475742b6ee\")\n version(\"0.9.13\", sha256=\"5ccc93c9362bec14cfb6e31dd0e7ae7e48db0453ab49ebc9722041b69db759ef\")\n version(\"0.9.12\", sha256=\"1f7bf5164dcceb0e3bbce7d6ff9faef3145ad17cf3430149d40a98c43c010acc\")\n version(\"0.9.11\", sha256=\"36542a60378e8672654188dee006975ef9e10f502791459ff7ebf4b38451cb9b\")\n version(\"0.9.10\", sha256=\"9cbbb7ba5e53e17eeb45e809841d8272e5333f739c2442a99c3e255c1ddec3e9\")\n version(\"0.9.9\", sha256=\"a8fd7a2d767716bb73185ca03bf4c106c6981b79130f3e456e5d2e744a2b3ba0\")\n version(\"0.9.8\", sha256=\"f49fa51316bbfa68309e951d2375e1f6904120c93868cbe13bc2974c0b801a3f\")\n\n variant(\"openmp\", default=True, description=\"Build with OpenMP support\")\n variant(\"mpi\", default=True, description=\"enable MPI\")\n variant(\"single_precision\", default=False, description=\"Sinlge precision\")\n variant(\"gpu_direct\", default=False, description=\"GPU aware MPI\")\n variant(\"static\", default=False, description=\"build static library\")\n variant(\"fortran\", default=False, description=\"enable fortran\")\n variant(\n \"build_type\",\n default=\"Release\",\n description=\"CMake build type\",\n values=(\"Debug\", \"Release\", \"RelWithDebInfo\"),\n )\n depends_on(\"fftw-api@3\")\n depends_on(\"mpi\", when=\"+mpi\")\n depends_on(\"[email protected]:\", type=\"build\")\n\n # ROCM variants + dependencies\n variant(\"rocm\", default=False, description=\"Use ROCm backend\")\n\n depends_on(\"cuda@:10\", when=\"@:0.9.11 +cuda\")\n\n with when(\"+rocm\"):\n # FindHIP cmake script only works for < 4.1\n depends_on(\"hip@:4.0\", when=\"@:1.0.1\")\n # Workaround for compiler bug in ROCm 4.5 added in SpFFT 1.0.6\n depends_on(\"hip@:4.3.1\", when=\"@:1.0.5\")\n depends_on(\"hip\")\n depends_on(\"rocfft\")\n # rocFFT and hipFFT have split with latest versions\n depends_on(\"hipfft\", when=\"^[email protected]:\")\n\n amdgpu_targets = (\n \"gfx701\",\n \"gfx801\",\n \"gfx802\",\n \"gfx803\",\n \"gfx900\",\n \"gfx906\",\n \"gfx908\",\n \"gfx1010\",\n \"gfx1011\",\n \"gfx1012\",\n )\n variant(\"amdgpu_target\", default=\"gfx803,gfx900,gfx906\", multi=True, values=amdgpu_targets)\n\n # Fix compilation error in some cases due to missing include statement\n # before version 1.0.3\n patch(\"0001-fix-missing-limits-include.patch\", when=\"@:1.0.2\")\n\n def cmake_args(self):\n spec = self.spec\n args = [\n self.define_from_variant(\"SPFFT_OMP\", \"openmp\"),\n self.define_from_variant(\"SPFFT_MPI\", \"mpi\"),\n self.define_from_variant(\"SPFFT_SINGLE_PRECISION\", \"single_precision\"),\n self.define_from_variant(\"SPFFT_GPU_DIRECT\", \"gpu_direct\"),\n self.define_from_variant(\"SPFFT_FORTRAN\", \"fortran\"),\n self.define_from_variant(\"SPFFT_STATIC\", \"static\"),\n ]\n\n if spec.satisfies(\"+cuda\"):\n args += [\"-DSPFFT_GPU_BACKEND=CUDA\"]\n\n cuda_arch = self.spec.variants[\"cuda_arch\"].value\n if cuda_arch[0] != \"none\":\n args += [self.define(\"CMAKE_CUDA_ARCHITECTURES\", cuda_arch)]\n\n if spec.satisfies(\"+rocm\"):\n archs = \",\".join(self.spec.variants[\"amdgpu_target\"].value)\n args += [\n \"-DSPFFT_GPU_BACKEND=ROCM\",\n \"-DHIP_ROOT_DIR={0}\".format(spec[\"hip\"].prefix),\n \"-DHIP_HCC_FLAGS=--amdgpu-target={0}\".format(archs),\n \"-DHIP_CXX_COMPILER={0}\".format(self.spec[\"hip\"].hipcc),\n ]\n\n if \"fftw\" in spec:\n args += [\"-DSPFFT_FFTW_LIB=FFTW\"]\n elif \"intel-mkl\" in spec:\n args += [\"-DSPFFT_FFTW_LIB=MKL\"]\n\n return args\n", "path": "var/spack/repos/builtin/packages/spfft/package.py"}], "after_files": [{"content": "# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\n\nfrom spack.package import *\n\n\nclass Spfft(CMakePackage, CudaPackage, ROCmPackage):\n \"\"\"Sparse 3D FFT library with MPI, OpenMP, CUDA and ROCm support.\"\"\"\n\n homepage = \"https://github.com/eth-cscs/SpFFT\"\n url = \"https://github.com/eth-cscs/SpFFT/archive/v0.9.8.zip\"\n git = \"https://github.com/eth-cscs/SpFFT.git\"\n\n maintainers = [\"AdhocMan\", \"haampie\"]\n\n version(\"develop\", branch=\"develop\")\n version(\"master\", branch=\"master\")\n\n version(\"1.0.6\", sha256=\"e1b927c61f8abbb4a9937653f917169e6253e8c40b850df491594310943ca14b\")\n version(\"1.0.5\", sha256=\"2a59d856286ea8559f00a32fc38f9f7546209cfa90112232a5288a69689a6e05\")\n version(\"1.0.4\", sha256=\"41e63880d95343da0d8c3dbe5bfb3d46a1d612199cc9cc13a936f1628a7fdb8e\")\n version(\"1.0.3\", sha256=\"4f87734e3582ef96ddc0402d0db78cfc173bed9cab3e0d9c6a6bf8b660d69559\")\n version(\"1.0.2\", sha256=\"9b1296bda0b9ec3d37c74fd64354a01ebc6e2da7cb026c1f821882160b03c692\")\n version(\"1.0.1\", sha256=\"f8ab706309776cfbd2bfd8e29a6a9ffb5c8f3cd62399bf82db1e416ae5c490c8\")\n version(\"1.0.0\", sha256=\"bd98897aa6734563ec63cd84168e731ef2e2bbc01a574c6dc59b74475742b6ee\")\n version(\"0.9.13\", sha256=\"5ccc93c9362bec14cfb6e31dd0e7ae7e48db0453ab49ebc9722041b69db759ef\")\n version(\"0.9.12\", sha256=\"1f7bf5164dcceb0e3bbce7d6ff9faef3145ad17cf3430149d40a98c43c010acc\")\n version(\"0.9.11\", sha256=\"36542a60378e8672654188dee006975ef9e10f502791459ff7ebf4b38451cb9b\")\n version(\"0.9.10\", sha256=\"9cbbb7ba5e53e17eeb45e809841d8272e5333f739c2442a99c3e255c1ddec3e9\")\n version(\"0.9.9\", sha256=\"a8fd7a2d767716bb73185ca03bf4c106c6981b79130f3e456e5d2e744a2b3ba0\")\n version(\"0.9.8\", sha256=\"f49fa51316bbfa68309e951d2375e1f6904120c93868cbe13bc2974c0b801a3f\")\n\n variant(\"openmp\", default=True, description=\"Build with OpenMP support\")\n variant(\"mpi\", default=True, description=\"enable MPI\")\n variant(\"single_precision\", default=False, description=\"Sinlge precision\")\n variant(\"gpu_direct\", default=False, description=\"GPU aware MPI\")\n variant(\"static\", default=False, description=\"build static library\")\n variant(\"fortran\", default=False, description=\"enable fortran\")\n variant(\n \"build_type\",\n default=\"Release\",\n description=\"CMake build type\",\n values=(\"Debug\", \"Release\", \"RelWithDebInfo\"),\n )\n depends_on(\"fftw-api@3\")\n depends_on(\"mpi\", when=\"+mpi\")\n depends_on(\"[email protected]:\", type=\"build\")\n\n depends_on(\"cuda@:10\", when=\"@:0.9.11 +cuda\")\n\n with when(\"+rocm\"):\n # FindHIP cmake script only works for < 4.1\n depends_on(\"hip@:4.0\", when=\"@:1.0.1\")\n # Workaround for compiler bug in ROCm 4.5 added in SpFFT 1.0.6\n depends_on(\"hip@:4.3.1\", when=\"@:1.0.5\")\n depends_on(\"rocfft\")\n # rocFFT and hipFFT have split with latest versions\n depends_on(\"hipfft\", when=\"^[email protected]:\")\n\n # Fix compilation error in some cases due to missing include statement\n # before version 1.0.3\n patch(\"0001-fix-missing-limits-include.patch\", when=\"@:1.0.2\")\n\n def cmake_args(self):\n spec = self.spec\n args = [\n self.define_from_variant(\"SPFFT_OMP\", \"openmp\"),\n self.define_from_variant(\"SPFFT_MPI\", \"mpi\"),\n self.define_from_variant(\"SPFFT_SINGLE_PRECISION\", \"single_precision\"),\n self.define_from_variant(\"SPFFT_GPU_DIRECT\", \"gpu_direct\"),\n self.define_from_variant(\"SPFFT_FORTRAN\", \"fortran\"),\n self.define_from_variant(\"SPFFT_STATIC\", \"static\"),\n ]\n\n if spec.satisfies(\"+cuda\"):\n args += [\"-DSPFFT_GPU_BACKEND=CUDA\"]\n\n cuda_arch = self.spec.variants[\"cuda_arch\"].value\n if cuda_arch[0] != \"none\":\n args += [self.define(\"CMAKE_CUDA_ARCHITECTURES\", cuda_arch)]\n\n if spec.satisfies(\"+rocm\"):\n archs = \",\".join(self.spec.variants[\"amdgpu_target\"].value)\n args += [\n \"-DSPFFT_GPU_BACKEND=ROCM\",\n \"-DHIP_ROOT_DIR={0}\".format(spec[\"hip\"].prefix),\n \"-DHIP_HCC_FLAGS=--amdgpu-target={0}\".format(archs),\n \"-DHIP_CXX_COMPILER={0}\".format(self.spec[\"hip\"].hipcc),\n ]\n\n if \"fftw\" in spec:\n args += [\"-DSPFFT_FFTW_LIB=FFTW\"]\n elif \"intel-mkl\" in spec:\n args += [\"-DSPFFT_FFTW_LIB=MKL\"]\n\n return args\n", "path": "var/spack/repos/builtin/packages/spfft/package.py"}]} | 2,559 | 543 |
gh_patches_debug_64304 | rasdani/github-patches | git_diff | zigpy__zha-device-handlers-1280 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] TS0601 _TZE200_yvx5lh6k (Smart Air Box) not finding the quirk
I purchased a Tuya Smart Air Box with a Zigbee ID of: "_TZE200_8ygsuhe1" and it is working as expected. I purchased a second and it came with a Zigbee ID of: "_TZE200_yvx5lh6k" and it is not working as expected. The [Zigbee Device Compatibility Repository](https://zigbee.blakadder.com/Tuya_RSH-AirBox01.html) claims that both of the Zigbee IDs should be working.
**Here is the Zigbee device signature from the Smart Air box that is working correctly (_TZE200_8ygsuhe1).**
```
{
"node_descriptor": "NodeDescriptor(logical_type=<LogicalType.Router: 1>, complex_descriptor_available=0, user_descriptor_available=0, reserved=0, aps_flags=0, frequency_band=<FrequencyBand.Freq2400MHz: 8>, mac_capability_flags=<MACCapabilityFlags.AllocateAddress|RxOnWhenIdle|MainsPowered|FullFunctionDevice: 142>, manufacturer_code=4098, maximum_buffer_size=82, maximum_incoming_transfer_size=82, server_mask=11264, maximum_outgoing_transfer_size=82, descriptor_capability_field=<DescriptorCapability.NONE: 0>, *allocate_address=True, *is_alternate_pan_coordinator=False, *is_coordinator=False, *is_end_device=False, *is_full_function_device=True, *is_mains_powered=True, *is_receiver_on_when_idle=True, *is_router=True, *is_security_capable=False)",
"endpoints": {
"1": {
"profile_id": 260,
"device_type": "0x0100",
"in_clusters": [
"0x0000",
"0x0004",
"0x0005",
"0x0402",
"0x0405",
"0x040d",
"0x042b",
"0x042e",
"0xef00"
],
"out_clusters": [
"0x000a",
"0x0019"
]
}
},
"manufacturer": "_TZE200_8ygsuhe1",
"model": "TS0601",
"class": "zhaquirks.tuya.air.ts0601_air_quality.TuyaCO2Sensor"
}
```
**Here is the Zigbee device signature from the Smart Air Box that is not working (_TZE200_yvx5lh6k)**
```
{
"node_descriptor": "NodeDescriptor(logical_type=<LogicalType.Router: 1>, complex_descriptor_available=0, user_descriptor_available=0, reserved=0, aps_flags=0, frequency_band=<FrequencyBand.Freq2400MHz: 8>, mac_capability_flags=<MACCapabilityFlags.AllocateAddress|RxOnWhenIdle|MainsPowered|FullFunctionDevice: 142>, manufacturer_code=4098, maximum_buffer_size=82, maximum_incoming_transfer_size=82, server_mask=11264, maximum_outgoing_transfer_size=82, descriptor_capability_field=<DescriptorCapability.NONE: 0>, *allocate_address=True, *is_alternate_pan_coordinator=False, *is_coordinator=False, *is_end_device=False, *is_full_function_device=True, *is_mains_powered=True, *is_receiver_on_when_idle=True, *is_router=True, *is_security_capable=False)",
"endpoints": {
"1": {
"profile_id": 260,
"device_type": "0x0051",
"in_clusters": [
"0x0000",
"0x0004",
"0x0005",
"0xef00"
],
"out_clusters": [
"0x000a",
"0x0019"
]
}
},
"manufacturer": "_TZE200_yvx5lh6k",
"model": "TS0601",
"class": "zigpy.device.Device"
}
```
**Here is an interesting excerpt from the logs:**
```
2021-08-23 10:08:18 DEBUG (MainThread) [zigpy.quirks.registry] Checking quirks for _TZE200_8ygsuhe1 TS0601 (84:fd:27:ff:fe:d6:98:2f)
2021-08-23 10:08:18 DEBUG (MainThread) [zigpy.quirks.registry] Considering <class 'zhaquirks.tuya.air.ts0601_air_quality.TuyaCO2Sensor'>
2021-08-23 10:08:18 DEBUG (MainThread) [zigpy.quirks.registry] Found custom device replacement for 84:fd:27:ff:fe:d6:98:2f: <class 'zhaquirks.tuya.air.ts0601_air_quality.TuyaCO2Sensor'>
2021-08-23 10:08:18 DEBUG (MainThread) [zigpy.quirks.registry] Checking quirks for _TZE200_yvx5lh6k TS0601 (0c:43:14:ff:fe:88:14:b4)
2021-08-23 10:08:18 DEBUG (MainThread) [zigpy.quirks.registry] Considering <class 'zhaquirks.xbee.xbee_io.XBeeSensor'>
2021-08-23 10:08:18 DEBUG (MainThread) [zigpy.quirks.registry] Fail because endpoint list mismatch: {232, 230} {1}
2021-08-23 10:08:18 DEBUG (MainThread) [zigpy.quirks.registry] Considering <class 'zhaquirks.xbee.xbee3_io.XBee3Sensor'>
2021-08-23 10:08:18 DEBUG (MainThread) [zigpy.quirks.registry] Fail because endpoint list mismatch: {232, 230} {1}
2021-08-23 10:08:18 DEBUG (MainThread) [zigpy.quirks.registry] Considering <class 'zhaquirks.smartthings.tag_v4.SmartThingsTagV4'>
2021-08-23 10:08:18 DEBUG (MainThread) [zigpy.quirks.registry] Fail because device_type mismatch on at least one endpoint
2021-08-23 10:08:18 DEBUG (MainThread) [zigpy.quirks.registry] Considering <class 'zhaquirks.smartthings.multi.SmartthingsMultiPurposeSensor'>
2021-08-23 10:08:18 DEBUG (MainThread) [zigpy.quirks.registry] Fail because device_type mismatch on at least one endpoint
2021-08-23 10:08:18 DEBUG (MainThread) [zigpy.quirks.registry] Considering <class 'zhaquirks.netvox.z308e3ed.Z308E3ED'>
2021-08-23 10:08:18 DEBUG (MainThread) [zigpy.quirks.registry] Fail because device_type mismatch on at least one endpoint
2021-08-23 10:08:18 DEBUG (MainThread) [zigpy.quirks.registry] Considering <class 'zhaquirks.gledopto.soposhgu10.SoposhGU10'>
2021-08-23 10:08:18 DEBUG (MainThread) [zigpy.quirks.registry] Fail because endpoint list mismatch: {11, 13} {1}
2021-08-23 10:08:18 DEBUG (MainThread) [zigpy.quirks.registry] Considering <class 'bellows.zigbee.application.EZSPCoordinator'>
2021-08-23 10:08:18 DEBUG (MainThread) [zigpy.quirks.registry] Fail because device_type mismatch on at least one endpoint
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `zhaquirks/tuya/air/ts0601_air_quality.py`
Content:
```
1 """Tuya Air Quality sensor."""
2
3 from zigpy.profiles import zha
4 from zigpy.quirks import CustomDevice
5 from zigpy.zcl.clusters.general import Basic, GreenPowerProxy, Groups, Ota, Scenes, Time
6
7 from zhaquirks.const import (
8 DEVICE_TYPE,
9 ENDPOINTS,
10 INPUT_CLUSTERS,
11 MODELS_INFO,
12 OUTPUT_CLUSTERS,
13 PROFILE_ID,
14 )
15 from zhaquirks.tuya.air import (
16 TuyaAirQualityCO2,
17 TuyaAirQualityFormaldehyde,
18 TuyaAirQualityHumidity,
19 TuyaAirQualityTemperature,
20 TuyaAirQualityVOC,
21 TuyaCO2ManufCluster,
22 )
23
24
25 class TuyaCO2Sensor(CustomDevice):
26 """Tuya Air quality device."""
27
28 signature = {
29 # NodeDescriptor(logical_type=<LogicalType.Router: 1>, complex_descriptor_available=0, user_descriptor_available=0, reserved=0, aps_flags=0, frequency_band=<FrequencyBand.Freq2400MHz: 8>, mac_capability_flags=<MACCapabilityFlags.AllocateAddress|RxOnWhenIdle|MainsPowered|FullFunctionDevice: 142>, manufacturer_code=4098, maximum_buffer_size=82, maximum_incoming_transfer_size=82, server_mask=11264, maximum_outgoing_transfer_size=82, descriptor_capability_field=<DescriptorCapability.0: 0>, *allocate_address=True, *is_alternate_pan_coordinator=False, *is_coordinator=False, *is_end_device=False, *is_full_function_device=True, *is_mains_powered=True, *is_receiver_on_when_idle=True, *is_router=True, *is_security_capable=False)]
30 # device_version=1
31 # SizePrefixedSimpleDescriptor(endpoint=1, profile=260, device_type=81, device_version=1,
32 # input_clusters=[0, 4, 5, 61184],
33 # output_clusters=[25, 10])
34 MODELS_INFO: [
35 ("_TZE200_8ygsuhe1", "TS0601"),
36 ("_TZE200_yvx5lh6k", "TS0601"),
37 ],
38 ENDPOINTS: {
39 1: {
40 PROFILE_ID: zha.PROFILE_ID,
41 DEVICE_TYPE: zha.DeviceType.SMART_PLUG,
42 INPUT_CLUSTERS: [
43 Basic.cluster_id,
44 Groups.cluster_id,
45 Scenes.cluster_id,
46 TuyaCO2ManufCluster.cluster_id,
47 ],
48 OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],
49 }
50 },
51 }
52
53 replacement = {
54 ENDPOINTS: {
55 1: {
56 DEVICE_TYPE: zha.DeviceType.ON_OFF_LIGHT,
57 INPUT_CLUSTERS: [
58 Basic.cluster_id,
59 Groups.cluster_id,
60 Scenes.cluster_id,
61 TuyaCO2ManufCluster,
62 TuyaAirQualityCO2,
63 TuyaAirQualityFormaldehyde,
64 TuyaAirQualityHumidity,
65 TuyaAirQualityTemperature,
66 TuyaAirQualityVOC,
67 ],
68 OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],
69 }
70 }
71 }
72
73
74 class TuyaCO2SensorGPP(CustomDevice):
75 """Tuya Air quality device with GPP."""
76
77 signature = {
78 # NodeDescriptor(logical_type=<LogicalType.Router: 1>, complex_descriptor_available=0, user_descriptor_available=0, reserved=0, aps_flags=0, frequency_band=<FrequencyBand.Freq2400MHz: 8>, mac_capability_flags=<MACCapabilityFlags.AllocateAddress|RxOnWhenIdle|MainsPowered|FullFunctionDevice: 142>, manufacturer_code=4098, maximum_buffer_size=82, maximum_incoming_transfer_size=82, server_mask=11264, maximum_outgoing_transfer_size=82, descriptor_capability_field=<DescriptorCapability.0: 0>, *allocate_address=True, *is_alternate_pan_coordinator=False, *is_coordinator=False, *is_end_device=False, *is_full_function_device=True, *is_mains_powered=True, *is_receiver_on_when_idle=True, *is_router=True, *is_security_capable=False)]
79 # device_version=1
80 # SizePrefixedSimpleDescriptor(endpoint=1, profile=260, device_type=81, device_version=1,
81 # input_clusters=[0, 4, 5, 61184],
82 # output_clusters=[25, 10])
83 MODELS_INFO: [
84 ("_TZE200_ryfmq5rl", "TS0601"),
85 ],
86 ENDPOINTS: {
87 1: {
88 PROFILE_ID: zha.PROFILE_ID,
89 DEVICE_TYPE: zha.DeviceType.SMART_PLUG,
90 INPUT_CLUSTERS: [
91 Basic.cluster_id,
92 Groups.cluster_id,
93 Scenes.cluster_id,
94 TuyaCO2ManufCluster.cluster_id,
95 ],
96 OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],
97 },
98 242: {
99 # <SimpleDescriptor endpoint=242 profile=41440 device_type=97
100 # input_clusters=[]
101 # output_clusters=[33]
102 PROFILE_ID: 41440,
103 DEVICE_TYPE: 97,
104 INPUT_CLUSTERS: [],
105 OUTPUT_CLUSTERS: [GreenPowerProxy.cluster_id],
106 },
107 },
108 }
109
110 replacement = {
111 ENDPOINTS: {
112 1: {
113 DEVICE_TYPE: zha.DeviceType.ON_OFF_LIGHT,
114 INPUT_CLUSTERS: [
115 Basic.cluster_id,
116 Groups.cluster_id,
117 Scenes.cluster_id,
118 TuyaCO2ManufCluster,
119 TuyaAirQualityCO2,
120 TuyaAirQualityFormaldehyde,
121 TuyaAirQualityHumidity,
122 TuyaAirQualityTemperature,
123 TuyaAirQualityVOC,
124 ],
125 OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],
126 },
127 242: {
128 PROFILE_ID: 41440,
129 DEVICE_TYPE: 97,
130 INPUT_CLUSTERS: [],
131 OUTPUT_CLUSTERS: [GreenPowerProxy.cluster_id],
132 },
133 }
134 }
135
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/zhaquirks/tuya/air/ts0601_air_quality.py b/zhaquirks/tuya/air/ts0601_air_quality.py
--- a/zhaquirks/tuya/air/ts0601_air_quality.py
+++ b/zhaquirks/tuya/air/ts0601_air_quality.py
@@ -82,6 +82,7 @@
# output_clusters=[25, 10])
MODELS_INFO: [
("_TZE200_ryfmq5rl", "TS0601"),
+ ("_TZE200_yvx5lh6k", "TS0601"),
],
ENDPOINTS: {
1: {
| {"golden_diff": "diff --git a/zhaquirks/tuya/air/ts0601_air_quality.py b/zhaquirks/tuya/air/ts0601_air_quality.py\n--- a/zhaquirks/tuya/air/ts0601_air_quality.py\n+++ b/zhaquirks/tuya/air/ts0601_air_quality.py\n@@ -82,6 +82,7 @@\n # output_clusters=[25, 10])\n MODELS_INFO: [\n (\"_TZE200_ryfmq5rl\", \"TS0601\"),\n+ (\"_TZE200_yvx5lh6k\", \"TS0601\"),\n ],\n ENDPOINTS: {\n 1: {\n", "issue": "[BUG] TS0601 _TZE200_yvx5lh6k (Smart Air Box) not finding the quirk\nI purchased a Tuya Smart Air Box with a Zigbee ID of: \"_TZE200_8ygsuhe1\" and it is working as expected. I purchased a second and it came with a Zigbee ID of: \"_TZE200_yvx5lh6k\" and it is not working as expected. The [Zigbee Device Compatibility Repository](https://zigbee.blakadder.com/Tuya_RSH-AirBox01.html) claims that both of the Zigbee IDs should be working.\r\n\r\n**Here is the Zigbee device signature from the Smart Air box that is working correctly (_TZE200_8ygsuhe1).**\r\n\r\n```\r\n{\r\n \"node_descriptor\": \"NodeDescriptor(logical_type=<LogicalType.Router: 1>, complex_descriptor_available=0, user_descriptor_available=0, reserved=0, aps_flags=0, frequency_band=<FrequencyBand.Freq2400MHz: 8>, mac_capability_flags=<MACCapabilityFlags.AllocateAddress|RxOnWhenIdle|MainsPowered|FullFunctionDevice: 142>, manufacturer_code=4098, maximum_buffer_size=82, maximum_incoming_transfer_size=82, server_mask=11264, maximum_outgoing_transfer_size=82, descriptor_capability_field=<DescriptorCapability.NONE: 0>, *allocate_address=True, *is_alternate_pan_coordinator=False, *is_coordinator=False, *is_end_device=False, *is_full_function_device=True, *is_mains_powered=True, *is_receiver_on_when_idle=True, *is_router=True, *is_security_capable=False)\",\r\n \"endpoints\": {\r\n \"1\": {\r\n \"profile_id\": 260,\r\n \"device_type\": \"0x0100\",\r\n \"in_clusters\": [\r\n \"0x0000\",\r\n \"0x0004\",\r\n \"0x0005\",\r\n \"0x0402\",\r\n \"0x0405\",\r\n \"0x040d\",\r\n \"0x042b\",\r\n \"0x042e\",\r\n \"0xef00\"\r\n ],\r\n \"out_clusters\": [\r\n \"0x000a\",\r\n \"0x0019\"\r\n ]\r\n }\r\n },\r\n \"manufacturer\": \"_TZE200_8ygsuhe1\",\r\n \"model\": \"TS0601\",\r\n \"class\": \"zhaquirks.tuya.air.ts0601_air_quality.TuyaCO2Sensor\"\r\n}\r\n\r\n```\r\n**Here is the Zigbee device signature from the Smart Air Box that is not working (_TZE200_yvx5lh6k)**\r\n\r\n```\r\n{\r\n \"node_descriptor\": \"NodeDescriptor(logical_type=<LogicalType.Router: 1>, complex_descriptor_available=0, user_descriptor_available=0, reserved=0, aps_flags=0, frequency_band=<FrequencyBand.Freq2400MHz: 8>, mac_capability_flags=<MACCapabilityFlags.AllocateAddress|RxOnWhenIdle|MainsPowered|FullFunctionDevice: 142>, manufacturer_code=4098, maximum_buffer_size=82, maximum_incoming_transfer_size=82, server_mask=11264, maximum_outgoing_transfer_size=82, descriptor_capability_field=<DescriptorCapability.NONE: 0>, *allocate_address=True, *is_alternate_pan_coordinator=False, *is_coordinator=False, *is_end_device=False, *is_full_function_device=True, *is_mains_powered=True, *is_receiver_on_when_idle=True, *is_router=True, *is_security_capable=False)\",\r\n \"endpoints\": {\r\n \"1\": {\r\n \"profile_id\": 260,\r\n \"device_type\": \"0x0051\",\r\n \"in_clusters\": [\r\n \"0x0000\",\r\n \"0x0004\",\r\n \"0x0005\",\r\n \"0xef00\"\r\n ],\r\n \"out_clusters\": [\r\n \"0x000a\",\r\n \"0x0019\"\r\n ]\r\n }\r\n },\r\n \"manufacturer\": \"_TZE200_yvx5lh6k\",\r\n \"model\": \"TS0601\",\r\n \"class\": \"zigpy.device.Device\"\r\n}\r\n\r\n```\r\n**Here is an interesting excerpt from the logs:**\r\n\r\n```\r\n2021-08-23 10:08:18 DEBUG (MainThread) [zigpy.quirks.registry] Checking quirks for _TZE200_8ygsuhe1 TS0601 (84:fd:27:ff:fe:d6:98:2f)\r\n2021-08-23 10:08:18 DEBUG (MainThread) [zigpy.quirks.registry] Considering <class 'zhaquirks.tuya.air.ts0601_air_quality.TuyaCO2Sensor'>\r\n2021-08-23 10:08:18 DEBUG (MainThread) [zigpy.quirks.registry] Found custom device replacement for 84:fd:27:ff:fe:d6:98:2f: <class 'zhaquirks.tuya.air.ts0601_air_quality.TuyaCO2Sensor'>\r\n2021-08-23 10:08:18 DEBUG (MainThread) [zigpy.quirks.registry] Checking quirks for _TZE200_yvx5lh6k TS0601 (0c:43:14:ff:fe:88:14:b4)\r\n2021-08-23 10:08:18 DEBUG (MainThread) [zigpy.quirks.registry] Considering <class 'zhaquirks.xbee.xbee_io.XBeeSensor'>\r\n2021-08-23 10:08:18 DEBUG (MainThread) [zigpy.quirks.registry] Fail because endpoint list mismatch: {232, 230} {1}\r\n2021-08-23 10:08:18 DEBUG (MainThread) [zigpy.quirks.registry] Considering <class 'zhaquirks.xbee.xbee3_io.XBee3Sensor'>\r\n2021-08-23 10:08:18 DEBUG (MainThread) [zigpy.quirks.registry] Fail because endpoint list mismatch: {232, 230} {1}\r\n2021-08-23 10:08:18 DEBUG (MainThread) [zigpy.quirks.registry] Considering <class 'zhaquirks.smartthings.tag_v4.SmartThingsTagV4'>\r\n2021-08-23 10:08:18 DEBUG (MainThread) [zigpy.quirks.registry] Fail because device_type mismatch on at least one endpoint\r\n2021-08-23 10:08:18 DEBUG (MainThread) [zigpy.quirks.registry] Considering <class 'zhaquirks.smartthings.multi.SmartthingsMultiPurposeSensor'>\r\n2021-08-23 10:08:18 DEBUG (MainThread) [zigpy.quirks.registry] Fail because device_type mismatch on at least one endpoint\r\n2021-08-23 10:08:18 DEBUG (MainThread) [zigpy.quirks.registry] Considering <class 'zhaquirks.netvox.z308e3ed.Z308E3ED'>\r\n2021-08-23 10:08:18 DEBUG (MainThread) [zigpy.quirks.registry] Fail because device_type mismatch on at least one endpoint\r\n2021-08-23 10:08:18 DEBUG (MainThread) [zigpy.quirks.registry] Considering <class 'zhaquirks.gledopto.soposhgu10.SoposhGU10'>\r\n2021-08-23 10:08:18 DEBUG (MainThread) [zigpy.quirks.registry] Fail because endpoint list mismatch: {11, 13} {1}\r\n2021-08-23 10:08:18 DEBUG (MainThread) [zigpy.quirks.registry] Considering <class 'bellows.zigbee.application.EZSPCoordinator'>\r\n2021-08-23 10:08:18 DEBUG (MainThread) [zigpy.quirks.registry] Fail because device_type mismatch on at least one endpoint\r\n\r\n\r\n```\n", "before_files": [{"content": "\"\"\"Tuya Air Quality sensor.\"\"\"\n\nfrom zigpy.profiles import zha\nfrom zigpy.quirks import CustomDevice\nfrom zigpy.zcl.clusters.general import Basic, GreenPowerProxy, Groups, Ota, Scenes, Time\n\nfrom zhaquirks.const import (\n DEVICE_TYPE,\n ENDPOINTS,\n INPUT_CLUSTERS,\n MODELS_INFO,\n OUTPUT_CLUSTERS,\n PROFILE_ID,\n)\nfrom zhaquirks.tuya.air import (\n TuyaAirQualityCO2,\n TuyaAirQualityFormaldehyde,\n TuyaAirQualityHumidity,\n TuyaAirQualityTemperature,\n TuyaAirQualityVOC,\n TuyaCO2ManufCluster,\n)\n\n\nclass TuyaCO2Sensor(CustomDevice):\n \"\"\"Tuya Air quality device.\"\"\"\n\n signature = {\n # NodeDescriptor(logical_type=<LogicalType.Router: 1>, complex_descriptor_available=0, user_descriptor_available=0, reserved=0, aps_flags=0, frequency_band=<FrequencyBand.Freq2400MHz: 8>, mac_capability_flags=<MACCapabilityFlags.AllocateAddress|RxOnWhenIdle|MainsPowered|FullFunctionDevice: 142>, manufacturer_code=4098, maximum_buffer_size=82, maximum_incoming_transfer_size=82, server_mask=11264, maximum_outgoing_transfer_size=82, descriptor_capability_field=<DescriptorCapability.0: 0>, *allocate_address=True, *is_alternate_pan_coordinator=False, *is_coordinator=False, *is_end_device=False, *is_full_function_device=True, *is_mains_powered=True, *is_receiver_on_when_idle=True, *is_router=True, *is_security_capable=False)]\n # device_version=1\n # SizePrefixedSimpleDescriptor(endpoint=1, profile=260, device_type=81, device_version=1,\n # input_clusters=[0, 4, 5, 61184],\n # output_clusters=[25, 10])\n MODELS_INFO: [\n (\"_TZE200_8ygsuhe1\", \"TS0601\"),\n (\"_TZE200_yvx5lh6k\", \"TS0601\"),\n ],\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.SMART_PLUG,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n Groups.cluster_id,\n Scenes.cluster_id,\n TuyaCO2ManufCluster.cluster_id,\n ],\n OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],\n }\n },\n }\n\n replacement = {\n ENDPOINTS: {\n 1: {\n DEVICE_TYPE: zha.DeviceType.ON_OFF_LIGHT,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n Groups.cluster_id,\n Scenes.cluster_id,\n TuyaCO2ManufCluster,\n TuyaAirQualityCO2,\n TuyaAirQualityFormaldehyde,\n TuyaAirQualityHumidity,\n TuyaAirQualityTemperature,\n TuyaAirQualityVOC,\n ],\n OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],\n }\n }\n }\n\n\nclass TuyaCO2SensorGPP(CustomDevice):\n \"\"\"Tuya Air quality device with GPP.\"\"\"\n\n signature = {\n # NodeDescriptor(logical_type=<LogicalType.Router: 1>, complex_descriptor_available=0, user_descriptor_available=0, reserved=0, aps_flags=0, frequency_band=<FrequencyBand.Freq2400MHz: 8>, mac_capability_flags=<MACCapabilityFlags.AllocateAddress|RxOnWhenIdle|MainsPowered|FullFunctionDevice: 142>, manufacturer_code=4098, maximum_buffer_size=82, maximum_incoming_transfer_size=82, server_mask=11264, maximum_outgoing_transfer_size=82, descriptor_capability_field=<DescriptorCapability.0: 0>, *allocate_address=True, *is_alternate_pan_coordinator=False, *is_coordinator=False, *is_end_device=False, *is_full_function_device=True, *is_mains_powered=True, *is_receiver_on_when_idle=True, *is_router=True, *is_security_capable=False)]\n # device_version=1\n # SizePrefixedSimpleDescriptor(endpoint=1, profile=260, device_type=81, device_version=1,\n # input_clusters=[0, 4, 5, 61184],\n # output_clusters=[25, 10])\n MODELS_INFO: [\n (\"_TZE200_ryfmq5rl\", \"TS0601\"),\n ],\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.SMART_PLUG,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n Groups.cluster_id,\n Scenes.cluster_id,\n TuyaCO2ManufCluster.cluster_id,\n ],\n OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],\n },\n 242: {\n # <SimpleDescriptor endpoint=242 profile=41440 device_type=97\n # input_clusters=[]\n # output_clusters=[33]\n PROFILE_ID: 41440,\n DEVICE_TYPE: 97,\n INPUT_CLUSTERS: [],\n OUTPUT_CLUSTERS: [GreenPowerProxy.cluster_id],\n },\n },\n }\n\n replacement = {\n ENDPOINTS: {\n 1: {\n DEVICE_TYPE: zha.DeviceType.ON_OFF_LIGHT,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n Groups.cluster_id,\n Scenes.cluster_id,\n TuyaCO2ManufCluster,\n TuyaAirQualityCO2,\n TuyaAirQualityFormaldehyde,\n TuyaAirQualityHumidity,\n TuyaAirQualityTemperature,\n TuyaAirQualityVOC,\n ],\n OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],\n },\n 242: {\n PROFILE_ID: 41440,\n DEVICE_TYPE: 97,\n INPUT_CLUSTERS: [],\n OUTPUT_CLUSTERS: [GreenPowerProxy.cluster_id],\n },\n }\n }\n", "path": "zhaquirks/tuya/air/ts0601_air_quality.py"}], "after_files": [{"content": "\"\"\"Tuya Air Quality sensor.\"\"\"\n\nfrom zigpy.profiles import zha\nfrom zigpy.quirks import CustomDevice\nfrom zigpy.zcl.clusters.general import Basic, GreenPowerProxy, Groups, Ota, Scenes, Time\n\nfrom zhaquirks.const import (\n DEVICE_TYPE,\n ENDPOINTS,\n INPUT_CLUSTERS,\n MODELS_INFO,\n OUTPUT_CLUSTERS,\n PROFILE_ID,\n)\nfrom zhaquirks.tuya.air import (\n TuyaAirQualityCO2,\n TuyaAirQualityFormaldehyde,\n TuyaAirQualityHumidity,\n TuyaAirQualityTemperature,\n TuyaAirQualityVOC,\n TuyaCO2ManufCluster,\n)\n\n\nclass TuyaCO2Sensor(CustomDevice):\n \"\"\"Tuya Air quality device.\"\"\"\n\n signature = {\n # NodeDescriptor(logical_type=<LogicalType.Router: 1>, complex_descriptor_available=0, user_descriptor_available=0, reserved=0, aps_flags=0, frequency_band=<FrequencyBand.Freq2400MHz: 8>, mac_capability_flags=<MACCapabilityFlags.AllocateAddress|RxOnWhenIdle|MainsPowered|FullFunctionDevice: 142>, manufacturer_code=4098, maximum_buffer_size=82, maximum_incoming_transfer_size=82, server_mask=11264, maximum_outgoing_transfer_size=82, descriptor_capability_field=<DescriptorCapability.0: 0>, *allocate_address=True, *is_alternate_pan_coordinator=False, *is_coordinator=False, *is_end_device=False, *is_full_function_device=True, *is_mains_powered=True, *is_receiver_on_when_idle=True, *is_router=True, *is_security_capable=False)]\n # device_version=1\n # SizePrefixedSimpleDescriptor(endpoint=1, profile=260, device_type=81, device_version=1,\n # input_clusters=[0, 4, 5, 61184],\n # output_clusters=[25, 10])\n MODELS_INFO: [\n (\"_TZE200_8ygsuhe1\", \"TS0601\"),\n (\"_TZE200_yvx5lh6k\", \"TS0601\"),\n ],\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.SMART_PLUG,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n Groups.cluster_id,\n Scenes.cluster_id,\n TuyaCO2ManufCluster.cluster_id,\n ],\n OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],\n }\n },\n }\n\n replacement = {\n ENDPOINTS: {\n 1: {\n DEVICE_TYPE: zha.DeviceType.ON_OFF_LIGHT,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n Groups.cluster_id,\n Scenes.cluster_id,\n TuyaCO2ManufCluster,\n TuyaAirQualityCO2,\n TuyaAirQualityFormaldehyde,\n TuyaAirQualityHumidity,\n TuyaAirQualityTemperature,\n TuyaAirQualityVOC,\n ],\n OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],\n }\n }\n }\n\n\nclass TuyaCO2SensorGPP(CustomDevice):\n \"\"\"Tuya Air quality device with GPP.\"\"\"\n\n signature = {\n # NodeDescriptor(logical_type=<LogicalType.Router: 1>, complex_descriptor_available=0, user_descriptor_available=0, reserved=0, aps_flags=0, frequency_band=<FrequencyBand.Freq2400MHz: 8>, mac_capability_flags=<MACCapabilityFlags.AllocateAddress|RxOnWhenIdle|MainsPowered|FullFunctionDevice: 142>, manufacturer_code=4098, maximum_buffer_size=82, maximum_incoming_transfer_size=82, server_mask=11264, maximum_outgoing_transfer_size=82, descriptor_capability_field=<DescriptorCapability.0: 0>, *allocate_address=True, *is_alternate_pan_coordinator=False, *is_coordinator=False, *is_end_device=False, *is_full_function_device=True, *is_mains_powered=True, *is_receiver_on_when_idle=True, *is_router=True, *is_security_capable=False)]\n # device_version=1\n # SizePrefixedSimpleDescriptor(endpoint=1, profile=260, device_type=81, device_version=1,\n # input_clusters=[0, 4, 5, 61184],\n # output_clusters=[25, 10])\n MODELS_INFO: [\n (\"_TZE200_ryfmq5rl\", \"TS0601\"),\n (\"_TZE200_yvx5lh6k\", \"TS0601\"),\n ],\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.SMART_PLUG,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n Groups.cluster_id,\n Scenes.cluster_id,\n TuyaCO2ManufCluster.cluster_id,\n ],\n OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],\n },\n 242: {\n # <SimpleDescriptor endpoint=242 profile=41440 device_type=97\n # input_clusters=[]\n # output_clusters=[33]\n PROFILE_ID: 41440,\n DEVICE_TYPE: 97,\n INPUT_CLUSTERS: [],\n OUTPUT_CLUSTERS: [GreenPowerProxy.cluster_id],\n },\n },\n }\n\n replacement = {\n ENDPOINTS: {\n 1: {\n DEVICE_TYPE: zha.DeviceType.ON_OFF_LIGHT,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n Groups.cluster_id,\n Scenes.cluster_id,\n TuyaCO2ManufCluster,\n TuyaAirQualityCO2,\n TuyaAirQualityFormaldehyde,\n TuyaAirQualityHumidity,\n TuyaAirQualityTemperature,\n TuyaAirQualityVOC,\n ],\n OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],\n },\n 242: {\n PROFILE_ID: 41440,\n DEVICE_TYPE: 97,\n INPUT_CLUSTERS: [],\n OUTPUT_CLUSTERS: [GreenPowerProxy.cluster_id],\n },\n }\n }\n", "path": "zhaquirks/tuya/air/ts0601_air_quality.py"}]} | 3,869 | 163 |
gh_patches_debug_782 | rasdani/github-patches | git_diff | safe-global__safe-config-service-76 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Serve static files with Nginx
When running the application with Nginx as reverse-proxy, static files (such as Admin CSS) are not correctly collected and served
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/config/settings.py`
Content:
```
1 """
2 Django settings for safe_client_config_service project.
3
4 Generated by 'django-admin startproject' using Django 3.2.
5
6 For more information on this file, see
7 https://docs.djangoproject.com/en/3.2/topics/settings/
8
9 For the full list of settings and their values, see
10 https://docs.djangoproject.com/en/3.2/ref/settings/
11 """
12 import os
13 from distutils.util import strtobool
14 from pathlib import Path
15
16 # Build paths inside the project like this: BASE_DIR / 'subdir'.
17 BASE_DIR = Path(__file__).resolve().parent.parent
18
19 # Quick-start development settings - unsuitable for production
20 # See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
21
22 # SECURITY WARNING: keep the secret key used in production secret!
23 SECRET_KEY = os.getenv("SECRET_KEY", None)
24
25 # SECURITY WARNING: don't run with debug turned on in production!
26 DEBUG = bool(strtobool(os.getenv("DEBUG", "false")))
27
28 # https://docs.djangoproject.com/en/3.2/ref/settings/#std:setting-ALLOWED_HOSTS
29 allowed_hosts = os.getenv("DJANGO_ALLOWED_HOSTS", ".localhost,127.0.0.1,[::1]")
30 ALLOWED_HOSTS = [allowed_host.strip() for allowed_host in allowed_hosts.split(",")]
31
32 # Application definition
33
34 REST_FRAMEWORK = {
35 # https://www.django-rest-framework.org/api-guide/renderers/
36 "DEFAULT_RENDERER_CLASSES": [
37 "djangorestframework_camel_case.render.CamelCaseJSONRenderer",
38 ]
39 }
40
41 INSTALLED_APPS = [
42 "safe_apps.apps.AppsConfig",
43 "django.contrib.admin",
44 "django.contrib.auth",
45 "django.contrib.contenttypes",
46 "django.contrib.sessions",
47 "django.contrib.messages",
48 "django.contrib.staticfiles",
49 "rest_framework",
50 ]
51
52 MIDDLEWARE = [
53 "config.middleware.LoggingMiddleware",
54 "django.middleware.security.SecurityMiddleware",
55 "django.contrib.sessions.middleware.SessionMiddleware",
56 "django.middleware.common.CommonMiddleware",
57 "django.middleware.csrf.CsrfViewMiddleware",
58 "django.contrib.auth.middleware.AuthenticationMiddleware",
59 "django.contrib.messages.middleware.MessageMiddleware",
60 "django.middleware.clickjacking.XFrameOptionsMiddleware",
61 ]
62
63 CACHES = {
64 "default": {
65 "BACKEND": "django.core.cache.backends.locmem.LocMemCache",
66 },
67 "safe-apps": {
68 "BACKEND": "django.core.cache.backends.locmem.LocMemCache",
69 },
70 }
71
72 LOGGING = {
73 "version": 1,
74 "disable_existing_loggers": False,
75 "formatters": {
76 "short": {"format": "%(asctime)s %(message)s"},
77 "verbose": {
78 "format": "%(asctime)s [%(levelname)s] [%(processName)s] %(message)s"
79 },
80 },
81 "handlers": {
82 "console": {
83 "class": "logging.StreamHandler",
84 "formatter": "verbose",
85 },
86 "console_short": {
87 "class": "logging.StreamHandler",
88 "formatter": "short",
89 },
90 },
91 "root": {
92 "handlers": ["console"],
93 "level": os.getenv("ROOT_LOG_LEVEL", "INFO"),
94 },
95 "loggers": {
96 "LoggingMiddleware": {
97 "handlers": ["console_short"],
98 "level": "INFO",
99 "propagate": False,
100 },
101 },
102 }
103
104 ROOT_URLCONF = "config.urls"
105
106 TEMPLATES = [
107 {
108 "BACKEND": "django.template.backends.django.DjangoTemplates",
109 "DIRS": [],
110 "APP_DIRS": True,
111 "OPTIONS": {
112 "context_processors": [
113 "django.template.context_processors.debug",
114 "django.template.context_processors.request",
115 "django.contrib.auth.context_processors.auth",
116 "django.contrib.messages.context_processors.messages",
117 ],
118 },
119 },
120 ]
121
122 WSGI_APPLICATION = "config.wsgi.application"
123
124 # Database
125 # https://docs.djangoproject.com/en/3.2/ref/settings/#databases
126
127 DATABASES = {
128 "default": {
129 "ENGINE": "django.db.backends.postgresql",
130 "NAME": os.getenv("POSTGRES_NAME", "postgres"),
131 "USER": os.getenv("POSTGRES_USER", "postgres"),
132 "PASSWORD": os.getenv("POSTGRES_PASSWORD", "postgres"),
133 "HOST": os.getenv("POSTGRES_HOST", "db"),
134 "PORT": os.getenv("POSTGRES_PORT", "5432"),
135 }
136 }
137
138 # Password validation
139 # https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
140
141 AUTH_PASSWORD_VALIDATORS = [
142 {
143 "NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
144 },
145 {
146 "NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
147 },
148 {
149 "NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
150 },
151 {
152 "NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
153 },
154 ]
155
156 # Internationalization
157 # https://docs.djangoproject.com/en/3.2/topics/i18n/
158
159 LANGUAGE_CODE = "en-us"
160
161 TIME_ZONE = "UTC"
162
163 USE_I18N = True
164
165 USE_L10N = True
166
167 USE_TZ = True
168
169 # Static files (CSS, JavaScript, Images)
170 # https://docs.djangoproject.com/en/3.2/howto/static-files/
171
172 STATIC_URL = "/static/"
173
174 # Default primary key field type
175 # https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
176
177 DEFAULT_AUTO_FIELD = "django.db.models.BigAutoField"
178
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/config/settings.py b/src/config/settings.py
--- a/src/config/settings.py
+++ b/src/config/settings.py
@@ -171,6 +171,8 @@
STATIC_URL = "/static/"
+STATIC_ROOT = "staticfiles"
+
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
| {"golden_diff": "diff --git a/src/config/settings.py b/src/config/settings.py\n--- a/src/config/settings.py\n+++ b/src/config/settings.py\n@@ -171,6 +171,8 @@\n \n STATIC_URL = \"/static/\"\n \n+STATIC_ROOT = \"staticfiles\"\n+\n # Default primary key field type\n # https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field\n", "issue": "Serve static files with Nginx\nWhen running the application with Nginx as reverse-proxy, static files (such as Admin CSS) are not correctly collected and served\n", "before_files": [{"content": "\"\"\"\nDjango settings for safe_client_config_service project.\n\nGenerated by 'django-admin startproject' using Django 3.2.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/3.2/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/3.2/ref/settings/\n\"\"\"\nimport os\nfrom distutils.util import strtobool\nfrom pathlib import Path\n\n# Build paths inside the project like this: BASE_DIR / 'subdir'.\nBASE_DIR = Path(__file__).resolve().parent.parent\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.getenv(\"SECRET_KEY\", None)\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = bool(strtobool(os.getenv(\"DEBUG\", \"false\")))\n\n# https://docs.djangoproject.com/en/3.2/ref/settings/#std:setting-ALLOWED_HOSTS\nallowed_hosts = os.getenv(\"DJANGO_ALLOWED_HOSTS\", \".localhost,127.0.0.1,[::1]\")\nALLOWED_HOSTS = [allowed_host.strip() for allowed_host in allowed_hosts.split(\",\")]\n\n# Application definition\n\nREST_FRAMEWORK = {\n # https://www.django-rest-framework.org/api-guide/renderers/\n \"DEFAULT_RENDERER_CLASSES\": [\n \"djangorestframework_camel_case.render.CamelCaseJSONRenderer\",\n ]\n}\n\nINSTALLED_APPS = [\n \"safe_apps.apps.AppsConfig\",\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"rest_framework\",\n]\n\nMIDDLEWARE = [\n \"config.middleware.LoggingMiddleware\",\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n]\n\nCACHES = {\n \"default\": {\n \"BACKEND\": \"django.core.cache.backends.locmem.LocMemCache\",\n },\n \"safe-apps\": {\n \"BACKEND\": \"django.core.cache.backends.locmem.LocMemCache\",\n },\n}\n\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\n \"short\": {\"format\": \"%(asctime)s %(message)s\"},\n \"verbose\": {\n \"format\": \"%(asctime)s [%(levelname)s] [%(processName)s] %(message)s\"\n },\n },\n \"handlers\": {\n \"console\": {\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"verbose\",\n },\n \"console_short\": {\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"short\",\n },\n },\n \"root\": {\n \"handlers\": [\"console\"],\n \"level\": os.getenv(\"ROOT_LOG_LEVEL\", \"INFO\"),\n },\n \"loggers\": {\n \"LoggingMiddleware\": {\n \"handlers\": [\"console_short\"],\n \"level\": \"INFO\",\n \"propagate\": False,\n },\n },\n}\n\nROOT_URLCONF = \"config.urls\"\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n ],\n },\n },\n]\n\nWSGI_APPLICATION = \"config.wsgi.application\"\n\n# Database\n# https://docs.djangoproject.com/en/3.2/ref/settings/#databases\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.postgresql\",\n \"NAME\": os.getenv(\"POSTGRES_NAME\", \"postgres\"),\n \"USER\": os.getenv(\"POSTGRES_USER\", \"postgres\"),\n \"PASSWORD\": os.getenv(\"POSTGRES_PASSWORD\", \"postgres\"),\n \"HOST\": os.getenv(\"POSTGRES_HOST\", \"db\"),\n \"PORT\": os.getenv(\"POSTGRES_PORT\", \"5432\"),\n }\n}\n\n# Password validation\n# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n]\n\n# Internationalization\n# https://docs.djangoproject.com/en/3.2/topics/i18n/\n\nLANGUAGE_CODE = \"en-us\"\n\nTIME_ZONE = \"UTC\"\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/3.2/howto/static-files/\n\nSTATIC_URL = \"/static/\"\n\n# Default primary key field type\n# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field\n\nDEFAULT_AUTO_FIELD = \"django.db.models.BigAutoField\"\n", "path": "src/config/settings.py"}], "after_files": [{"content": "\"\"\"\nDjango settings for safe_client_config_service project.\n\nGenerated by 'django-admin startproject' using Django 3.2.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/3.2/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/3.2/ref/settings/\n\"\"\"\nimport os\nfrom distutils.util import strtobool\nfrom pathlib import Path\n\n# Build paths inside the project like this: BASE_DIR / 'subdir'.\nBASE_DIR = Path(__file__).resolve().parent.parent\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.getenv(\"SECRET_KEY\", None)\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = bool(strtobool(os.getenv(\"DEBUG\", \"false\")))\n\n# https://docs.djangoproject.com/en/3.2/ref/settings/#std:setting-ALLOWED_HOSTS\nallowed_hosts = os.getenv(\"DJANGO_ALLOWED_HOSTS\", \".localhost,127.0.0.1,[::1]\")\nALLOWED_HOSTS = [allowed_host.strip() for allowed_host in allowed_hosts.split(\",\")]\n\n# Application definition\n\nREST_FRAMEWORK = {\n # https://www.django-rest-framework.org/api-guide/renderers/\n \"DEFAULT_RENDERER_CLASSES\": [\n \"djangorestframework_camel_case.render.CamelCaseJSONRenderer\",\n ]\n}\n\nINSTALLED_APPS = [\n \"safe_apps.apps.AppsConfig\",\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"rest_framework\",\n]\n\nMIDDLEWARE = [\n \"config.middleware.LoggingMiddleware\",\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n]\n\nCACHES = {\n \"default\": {\n \"BACKEND\": \"django.core.cache.backends.locmem.LocMemCache\",\n },\n \"safe-apps\": {\n \"BACKEND\": \"django.core.cache.backends.locmem.LocMemCache\",\n },\n}\n\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\n \"short\": {\"format\": \"%(asctime)s %(message)s\"},\n \"verbose\": {\n \"format\": \"%(asctime)s [%(levelname)s] [%(processName)s] %(message)s\"\n },\n },\n \"handlers\": {\n \"console\": {\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"verbose\",\n },\n \"console_short\": {\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"short\",\n },\n },\n \"root\": {\n \"handlers\": [\"console\"],\n \"level\": os.getenv(\"ROOT_LOG_LEVEL\", \"INFO\"),\n },\n \"loggers\": {\n \"LoggingMiddleware\": {\n \"handlers\": [\"console_short\"],\n \"level\": \"INFO\",\n \"propagate\": False,\n },\n },\n}\n\nROOT_URLCONF = \"config.urls\"\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n ],\n },\n },\n]\n\nWSGI_APPLICATION = \"config.wsgi.application\"\n\n# Database\n# https://docs.djangoproject.com/en/3.2/ref/settings/#databases\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.postgresql\",\n \"NAME\": os.getenv(\"POSTGRES_NAME\", \"postgres\"),\n \"USER\": os.getenv(\"POSTGRES_USER\", \"postgres\"),\n \"PASSWORD\": os.getenv(\"POSTGRES_PASSWORD\", \"postgres\"),\n \"HOST\": os.getenv(\"POSTGRES_HOST\", \"db\"),\n \"PORT\": os.getenv(\"POSTGRES_PORT\", \"5432\"),\n }\n}\n\n# Password validation\n# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n]\n\n# Internationalization\n# https://docs.djangoproject.com/en/3.2/topics/i18n/\n\nLANGUAGE_CODE = \"en-us\"\n\nTIME_ZONE = \"UTC\"\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/3.2/howto/static-files/\n\nSTATIC_URL = \"/static/\"\n\nSTATIC_ROOT = \"staticfiles\"\n\n# Default primary key field type\n# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field\n\nDEFAULT_AUTO_FIELD = \"django.db.models.BigAutoField\"\n", "path": "src/config/settings.py"}]} | 1,910 | 84 |
gh_patches_debug_15947 | rasdani/github-patches | git_diff | mesonbuild__meson-6952 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`meson_options.txt` cannot be used to specify negative integer values
*Actual:*
When trying to specify a negative number in `meson_options.txt`, I get the error `Arguments may only be string, int, bool, or array of those.`.
*Expected:*
No error is raised, and negative values are accepted.
*Rationale:*
These options are ideal for setting defaults for build configuration via `#mesondefine`. It's fairly common for integer options to treat zero and negative values as having special meanings. And even if not, sometimes you'd like to have negative values as options.
*Workaround:*
- Add a second option for the special meaning (ugh!)
- Pass the option as a string, and use `#define` instead of `#mesondefine` (ugh!)
*Details:*
The error is raised here:
https://github.com/mesonbuild/meson/blob/6e865fc08d0d13b3846d00e80c9f3ea090645fb8/mesonbuild/optinterpreter.py#L170
The culprit looks like it may be here: https://github.com/mesonbuild/meson/blob/6e865fc08d0d13b3846d00e80c9f3ea090645fb8/mesonbuild/mparser.py#L107
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mesonbuild/optinterpreter.py`
Content:
```
1 # Copyright 2013-2014 The Meson development team
2
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6
7 # http://www.apache.org/licenses/LICENSE-2.0
8
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import re
16 import functools
17 import typing as T
18
19 from . import mparser
20 from . import coredata
21 from . import mesonlib
22 from . import compilers
23
24 forbidden_option_names = set(coredata.builtin_options.keys())
25 forbidden_prefixes = [lang + '_' for lang in compilers.all_languages] + ['b_', 'backend_']
26 reserved_prefixes = ['cross_']
27
28 def is_invalid_name(name: str, *, log: bool = True) -> bool:
29 if name in forbidden_option_names:
30 return True
31 pref = name.split('_')[0] + '_'
32 if pref in forbidden_prefixes:
33 return True
34 if pref in reserved_prefixes:
35 if log:
36 from . import mlog
37 mlog.deprecation('Option uses prefix "%s", which is reserved for Meson. This will become an error in the future.' % pref)
38 return False
39
40 class OptionException(mesonlib.MesonException):
41 pass
42
43
44 def permitted_kwargs(permitted):
45 """Function that validates kwargs for options."""
46 def _wraps(func):
47 @functools.wraps(func)
48 def _inner(name, description, kwargs):
49 bad = [a for a in kwargs.keys() if a not in permitted]
50 if bad:
51 raise OptionException('Invalid kwargs for option "{}": "{}"'.format(
52 name, ' '.join(bad)))
53 return func(description, kwargs)
54 return _inner
55 return _wraps
56
57
58 optname_regex = re.compile('[^a-zA-Z0-9_-]')
59
60 @permitted_kwargs({'value', 'yield'})
61 def StringParser(description, kwargs):
62 return coredata.UserStringOption(description,
63 kwargs.get('value', ''),
64 kwargs.get('choices', []),
65 kwargs.get('yield', coredata.default_yielding))
66
67 @permitted_kwargs({'value', 'yield'})
68 def BooleanParser(description, kwargs):
69 return coredata.UserBooleanOption(description,
70 kwargs.get('value', True),
71 kwargs.get('yield', coredata.default_yielding))
72
73 @permitted_kwargs({'value', 'yield', 'choices'})
74 def ComboParser(description, kwargs):
75 if 'choices' not in kwargs:
76 raise OptionException('Combo option missing "choices" keyword.')
77 choices = kwargs['choices']
78 if not isinstance(choices, list):
79 raise OptionException('Combo choices must be an array.')
80 for i in choices:
81 if not isinstance(i, str):
82 raise OptionException('Combo choice elements must be strings.')
83 return coredata.UserComboOption(description,
84 choices,
85 kwargs.get('value', choices[0]),
86 kwargs.get('yield', coredata.default_yielding),)
87
88
89 @permitted_kwargs({'value', 'min', 'max', 'yield'})
90 def IntegerParser(description, kwargs):
91 if 'value' not in kwargs:
92 raise OptionException('Integer option must contain value argument.')
93 inttuple = (kwargs.get('min', None), kwargs.get('max', None), kwargs['value'])
94 return coredata.UserIntegerOption(description,
95 inttuple,
96 kwargs.get('yield', coredata.default_yielding))
97
98 # FIXME: Cannot use FeatureNew while parsing options because we parse it before
99 # reading options in project(). See func_project() in interpreter.py
100 #@FeatureNew('array type option()', '0.44.0')
101 @permitted_kwargs({'value', 'yield', 'choices'})
102 def string_array_parser(description, kwargs):
103 if 'choices' in kwargs:
104 choices = kwargs['choices']
105 if not isinstance(choices, list):
106 raise OptionException('Array choices must be an array.')
107 for i in choices:
108 if not isinstance(i, str):
109 raise OptionException('Array choice elements must be strings.')
110 value = kwargs.get('value', choices)
111 else:
112 choices = None
113 value = kwargs.get('value', [])
114 if not isinstance(value, list):
115 raise OptionException('Array choices must be passed as an array.')
116 return coredata.UserArrayOption(description,
117 value,
118 choices=choices,
119 yielding=kwargs.get('yield', coredata.default_yielding))
120
121 @permitted_kwargs({'value', 'yield'})
122 def FeatureParser(description, kwargs):
123 return coredata.UserFeatureOption(description,
124 kwargs.get('value', 'auto'),
125 yielding=kwargs.get('yield', coredata.default_yielding))
126
127 option_types = {'string': StringParser,
128 'boolean': BooleanParser,
129 'combo': ComboParser,
130 'integer': IntegerParser,
131 'array': string_array_parser,
132 'feature': FeatureParser,
133 } # type: T.Dict[str, T.Callable[[str, T.Dict], coredata.UserOption]]
134
135 class OptionInterpreter:
136 def __init__(self, subproject):
137 self.options = {}
138 self.subproject = subproject
139
140 def process(self, option_file):
141 try:
142 with open(option_file, 'r', encoding='utf8') as f:
143 ast = mparser.Parser(f.read(), option_file).parse()
144 except mesonlib.MesonException as me:
145 me.file = option_file
146 raise me
147 if not isinstance(ast, mparser.CodeBlockNode):
148 e = OptionException('Option file is malformed.')
149 e.lineno = ast.lineno()
150 e.file = option_file
151 raise e
152 for cur in ast.lines:
153 try:
154 self.evaluate_statement(cur)
155 except Exception as e:
156 e.lineno = cur.lineno
157 e.colno = cur.colno
158 e.file = option_file
159 raise e
160
161 def reduce_single(self, arg):
162 if isinstance(arg, str):
163 return arg
164 elif isinstance(arg, (mparser.StringNode, mparser.BooleanNode,
165 mparser.NumberNode)):
166 return arg.value
167 elif isinstance(arg, mparser.ArrayNode):
168 return [self.reduce_single(curarg) for curarg in arg.args.arguments]
169 else:
170 raise OptionException('Arguments may only be string, int, bool, or array of those.')
171
172 def reduce_arguments(self, args):
173 assert(isinstance(args, mparser.ArgumentNode))
174 if args.incorrect_order():
175 raise OptionException('All keyword arguments must be after positional arguments.')
176 reduced_pos = [self.reduce_single(arg) for arg in args.arguments]
177 reduced_kw = {}
178 for key in args.kwargs.keys():
179 if not isinstance(key, mparser.IdNode):
180 raise OptionException('Keyword argument name is not a string.')
181 a = args.kwargs[key]
182 reduced_kw[key.value] = self.reduce_single(a)
183 return reduced_pos, reduced_kw
184
185 def evaluate_statement(self, node):
186 if not isinstance(node, mparser.FunctionNode):
187 raise OptionException('Option file may only contain option definitions')
188 func_name = node.func_name
189 if func_name != 'option':
190 raise OptionException('Only calls to option() are allowed in option files.')
191 (posargs, kwargs) = self.reduce_arguments(node.args)
192
193 # FIXME: Cannot use FeatureNew while parsing options because we parse
194 # it before reading options in project(). See func_project() in
195 # interpreter.py
196 #if 'yield' in kwargs:
197 # FeatureNew('option yield', '0.45.0').use(self.subproject)
198
199 if 'type' not in kwargs:
200 raise OptionException('Option call missing mandatory "type" keyword argument')
201 opt_type = kwargs.pop('type')
202 if opt_type not in option_types:
203 raise OptionException('Unknown type %s.' % opt_type)
204 if len(posargs) != 1:
205 raise OptionException('Option() must have one (and only one) positional argument')
206 opt_name = posargs[0]
207 if not isinstance(opt_name, str):
208 raise OptionException('Positional argument must be a string.')
209 if optname_regex.search(opt_name) is not None:
210 raise OptionException('Option names can only contain letters, numbers or dashes.')
211 if is_invalid_name(opt_name):
212 raise OptionException('Option name %s is reserved.' % opt_name)
213 if self.subproject != '':
214 opt_name = self.subproject + ':' + opt_name
215 opt = option_types[opt_type](opt_name, kwargs.pop('description', ''), kwargs)
216 if opt.description == '':
217 opt.description = opt_name
218 self.options[opt_name] = opt
219
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mesonbuild/optinterpreter.py b/mesonbuild/optinterpreter.py
--- a/mesonbuild/optinterpreter.py
+++ b/mesonbuild/optinterpreter.py
@@ -166,6 +166,16 @@
return arg.value
elif isinstance(arg, mparser.ArrayNode):
return [self.reduce_single(curarg) for curarg in arg.args.arguments]
+ elif isinstance(arg, mparser.UMinusNode):
+ res = self.reduce_single(arg.value)
+ if not isinstance(res, (int, float)):
+ raise OptionException('Token after "-" is not a number')
+ return -res
+ elif isinstance(arg, mparser.NotNode):
+ res = self.reduce_single(arg.value)
+ if not isinstance(res, bool):
+ raise OptionException('Token after "not" is not a a boolean')
+ return not res
else:
raise OptionException('Arguments may only be string, int, bool, or array of those.')
| {"golden_diff": "diff --git a/mesonbuild/optinterpreter.py b/mesonbuild/optinterpreter.py\n--- a/mesonbuild/optinterpreter.py\n+++ b/mesonbuild/optinterpreter.py\n@@ -166,6 +166,16 @@\n return arg.value\n elif isinstance(arg, mparser.ArrayNode):\n return [self.reduce_single(curarg) for curarg in arg.args.arguments]\n+ elif isinstance(arg, mparser.UMinusNode):\n+ res = self.reduce_single(arg.value)\n+ if not isinstance(res, (int, float)):\n+ raise OptionException('Token after \"-\" is not a number')\n+ return -res\n+ elif isinstance(arg, mparser.NotNode):\n+ res = self.reduce_single(arg.value)\n+ if not isinstance(res, bool):\n+ raise OptionException('Token after \"not\" is not a a boolean')\n+ return not res\n else:\n raise OptionException('Arguments may only be string, int, bool, or array of those.')\n", "issue": "`meson_options.txt` cannot be used to specify negative integer values\n*Actual:*\r\nWhen trying to specify a negative number in `meson_options.txt`, I get the error `Arguments may only be string, int, bool, or array of those.`.\r\n\r\n*Expected:*\r\nNo error is raised, and negative values are accepted.\r\n\r\n*Rationale:*\r\nThese options are ideal for setting defaults for build configuration via `#mesondefine`. It's fairly common for integer options to treat zero and negative values as having special meanings. And even if not, sometimes you'd like to have negative values as options.\r\n\r\n*Workaround:*\r\n- Add a second option for the special meaning (ugh!)\r\n- Pass the option as a string, and use `#define` instead of `#mesondefine` (ugh!)\r\n\r\n*Details:*\r\nThe error is raised here:\r\nhttps://github.com/mesonbuild/meson/blob/6e865fc08d0d13b3846d00e80c9f3ea090645fb8/mesonbuild/optinterpreter.py#L170\r\n\r\nThe culprit looks like it may be here: https://github.com/mesonbuild/meson/blob/6e865fc08d0d13b3846d00e80c9f3ea090645fb8/mesonbuild/mparser.py#L107\n", "before_files": [{"content": "# Copyright 2013-2014 The Meson development team\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport re\nimport functools\nimport typing as T\n\nfrom . import mparser\nfrom . import coredata\nfrom . import mesonlib\nfrom . import compilers\n\nforbidden_option_names = set(coredata.builtin_options.keys())\nforbidden_prefixes = [lang + '_' for lang in compilers.all_languages] + ['b_', 'backend_']\nreserved_prefixes = ['cross_']\n\ndef is_invalid_name(name: str, *, log: bool = True) -> bool:\n if name in forbidden_option_names:\n return True\n pref = name.split('_')[0] + '_'\n if pref in forbidden_prefixes:\n return True\n if pref in reserved_prefixes:\n if log:\n from . import mlog\n mlog.deprecation('Option uses prefix \"%s\", which is reserved for Meson. This will become an error in the future.' % pref)\n return False\n\nclass OptionException(mesonlib.MesonException):\n pass\n\n\ndef permitted_kwargs(permitted):\n \"\"\"Function that validates kwargs for options.\"\"\"\n def _wraps(func):\n @functools.wraps(func)\n def _inner(name, description, kwargs):\n bad = [a for a in kwargs.keys() if a not in permitted]\n if bad:\n raise OptionException('Invalid kwargs for option \"{}\": \"{}\"'.format(\n name, ' '.join(bad)))\n return func(description, kwargs)\n return _inner\n return _wraps\n\n\noptname_regex = re.compile('[^a-zA-Z0-9_-]')\n\n@permitted_kwargs({'value', 'yield'})\ndef StringParser(description, kwargs):\n return coredata.UserStringOption(description,\n kwargs.get('value', ''),\n kwargs.get('choices', []),\n kwargs.get('yield', coredata.default_yielding))\n\n@permitted_kwargs({'value', 'yield'})\ndef BooleanParser(description, kwargs):\n return coredata.UserBooleanOption(description,\n kwargs.get('value', True),\n kwargs.get('yield', coredata.default_yielding))\n\n@permitted_kwargs({'value', 'yield', 'choices'})\ndef ComboParser(description, kwargs):\n if 'choices' not in kwargs:\n raise OptionException('Combo option missing \"choices\" keyword.')\n choices = kwargs['choices']\n if not isinstance(choices, list):\n raise OptionException('Combo choices must be an array.')\n for i in choices:\n if not isinstance(i, str):\n raise OptionException('Combo choice elements must be strings.')\n return coredata.UserComboOption(description,\n choices,\n kwargs.get('value', choices[0]),\n kwargs.get('yield', coredata.default_yielding),)\n\n\n@permitted_kwargs({'value', 'min', 'max', 'yield'})\ndef IntegerParser(description, kwargs):\n if 'value' not in kwargs:\n raise OptionException('Integer option must contain value argument.')\n inttuple = (kwargs.get('min', None), kwargs.get('max', None), kwargs['value'])\n return coredata.UserIntegerOption(description,\n inttuple,\n kwargs.get('yield', coredata.default_yielding))\n\n# FIXME: Cannot use FeatureNew while parsing options because we parse it before\n# reading options in project(). See func_project() in interpreter.py\n#@FeatureNew('array type option()', '0.44.0')\n@permitted_kwargs({'value', 'yield', 'choices'})\ndef string_array_parser(description, kwargs):\n if 'choices' in kwargs:\n choices = kwargs['choices']\n if not isinstance(choices, list):\n raise OptionException('Array choices must be an array.')\n for i in choices:\n if not isinstance(i, str):\n raise OptionException('Array choice elements must be strings.')\n value = kwargs.get('value', choices)\n else:\n choices = None\n value = kwargs.get('value', [])\n if not isinstance(value, list):\n raise OptionException('Array choices must be passed as an array.')\n return coredata.UserArrayOption(description,\n value,\n choices=choices,\n yielding=kwargs.get('yield', coredata.default_yielding))\n\n@permitted_kwargs({'value', 'yield'})\ndef FeatureParser(description, kwargs):\n return coredata.UserFeatureOption(description,\n kwargs.get('value', 'auto'),\n yielding=kwargs.get('yield', coredata.default_yielding))\n\noption_types = {'string': StringParser,\n 'boolean': BooleanParser,\n 'combo': ComboParser,\n 'integer': IntegerParser,\n 'array': string_array_parser,\n 'feature': FeatureParser,\n } # type: T.Dict[str, T.Callable[[str, T.Dict], coredata.UserOption]]\n\nclass OptionInterpreter:\n def __init__(self, subproject):\n self.options = {}\n self.subproject = subproject\n\n def process(self, option_file):\n try:\n with open(option_file, 'r', encoding='utf8') as f:\n ast = mparser.Parser(f.read(), option_file).parse()\n except mesonlib.MesonException as me:\n me.file = option_file\n raise me\n if not isinstance(ast, mparser.CodeBlockNode):\n e = OptionException('Option file is malformed.')\n e.lineno = ast.lineno()\n e.file = option_file\n raise e\n for cur in ast.lines:\n try:\n self.evaluate_statement(cur)\n except Exception as e:\n e.lineno = cur.lineno\n e.colno = cur.colno\n e.file = option_file\n raise e\n\n def reduce_single(self, arg):\n if isinstance(arg, str):\n return arg\n elif isinstance(arg, (mparser.StringNode, mparser.BooleanNode,\n mparser.NumberNode)):\n return arg.value\n elif isinstance(arg, mparser.ArrayNode):\n return [self.reduce_single(curarg) for curarg in arg.args.arguments]\n else:\n raise OptionException('Arguments may only be string, int, bool, or array of those.')\n\n def reduce_arguments(self, args):\n assert(isinstance(args, mparser.ArgumentNode))\n if args.incorrect_order():\n raise OptionException('All keyword arguments must be after positional arguments.')\n reduced_pos = [self.reduce_single(arg) for arg in args.arguments]\n reduced_kw = {}\n for key in args.kwargs.keys():\n if not isinstance(key, mparser.IdNode):\n raise OptionException('Keyword argument name is not a string.')\n a = args.kwargs[key]\n reduced_kw[key.value] = self.reduce_single(a)\n return reduced_pos, reduced_kw\n\n def evaluate_statement(self, node):\n if not isinstance(node, mparser.FunctionNode):\n raise OptionException('Option file may only contain option definitions')\n func_name = node.func_name\n if func_name != 'option':\n raise OptionException('Only calls to option() are allowed in option files.')\n (posargs, kwargs) = self.reduce_arguments(node.args)\n\n # FIXME: Cannot use FeatureNew while parsing options because we parse\n # it before reading options in project(). See func_project() in\n # interpreter.py\n #if 'yield' in kwargs:\n # FeatureNew('option yield', '0.45.0').use(self.subproject)\n\n if 'type' not in kwargs:\n raise OptionException('Option call missing mandatory \"type\" keyword argument')\n opt_type = kwargs.pop('type')\n if opt_type not in option_types:\n raise OptionException('Unknown type %s.' % opt_type)\n if len(posargs) != 1:\n raise OptionException('Option() must have one (and only one) positional argument')\n opt_name = posargs[0]\n if not isinstance(opt_name, str):\n raise OptionException('Positional argument must be a string.')\n if optname_regex.search(opt_name) is not None:\n raise OptionException('Option names can only contain letters, numbers or dashes.')\n if is_invalid_name(opt_name):\n raise OptionException('Option name %s is reserved.' % opt_name)\n if self.subproject != '':\n opt_name = self.subproject + ':' + opt_name\n opt = option_types[opt_type](opt_name, kwargs.pop('description', ''), kwargs)\n if opt.description == '':\n opt.description = opt_name\n self.options[opt_name] = opt\n", "path": "mesonbuild/optinterpreter.py"}], "after_files": [{"content": "# Copyright 2013-2014 The Meson development team\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport re\nimport functools\nimport typing as T\n\nfrom . import mparser\nfrom . import coredata\nfrom . import mesonlib\nfrom . import compilers\n\nforbidden_option_names = set(coredata.builtin_options.keys())\nforbidden_prefixes = [lang + '_' for lang in compilers.all_languages] + ['b_', 'backend_']\nreserved_prefixes = ['cross_']\n\ndef is_invalid_name(name: str, *, log: bool = True) -> bool:\n if name in forbidden_option_names:\n return True\n pref = name.split('_')[0] + '_'\n if pref in forbidden_prefixes:\n return True\n if pref in reserved_prefixes:\n if log:\n from . import mlog\n mlog.deprecation('Option uses prefix \"%s\", which is reserved for Meson. This will become an error in the future.' % pref)\n return False\n\nclass OptionException(mesonlib.MesonException):\n pass\n\n\ndef permitted_kwargs(permitted):\n \"\"\"Function that validates kwargs for options.\"\"\"\n def _wraps(func):\n @functools.wraps(func)\n def _inner(name, description, kwargs):\n bad = [a for a in kwargs.keys() if a not in permitted]\n if bad:\n raise OptionException('Invalid kwargs for option \"{}\": \"{}\"'.format(\n name, ' '.join(bad)))\n return func(description, kwargs)\n return _inner\n return _wraps\n\n\noptname_regex = re.compile('[^a-zA-Z0-9_-]')\n\n@permitted_kwargs({'value', 'yield'})\ndef StringParser(description, kwargs):\n return coredata.UserStringOption(description,\n kwargs.get('value', ''),\n kwargs.get('choices', []),\n kwargs.get('yield', coredata.default_yielding))\n\n@permitted_kwargs({'value', 'yield'})\ndef BooleanParser(description, kwargs):\n return coredata.UserBooleanOption(description,\n kwargs.get('value', True),\n kwargs.get('yield', coredata.default_yielding))\n\n@permitted_kwargs({'value', 'yield', 'choices'})\ndef ComboParser(description, kwargs):\n if 'choices' not in kwargs:\n raise OptionException('Combo option missing \"choices\" keyword.')\n choices = kwargs['choices']\n if not isinstance(choices, list):\n raise OptionException('Combo choices must be an array.')\n for i in choices:\n if not isinstance(i, str):\n raise OptionException('Combo choice elements must be strings.')\n return coredata.UserComboOption(description,\n choices,\n kwargs.get('value', choices[0]),\n kwargs.get('yield', coredata.default_yielding),)\n\n\n@permitted_kwargs({'value', 'min', 'max', 'yield'})\ndef IntegerParser(description, kwargs):\n if 'value' not in kwargs:\n raise OptionException('Integer option must contain value argument.')\n inttuple = (kwargs.get('min', None), kwargs.get('max', None), kwargs['value'])\n return coredata.UserIntegerOption(description,\n inttuple,\n kwargs.get('yield', coredata.default_yielding))\n\n# FIXME: Cannot use FeatureNew while parsing options because we parse it before\n# reading options in project(). See func_project() in interpreter.py\n#@FeatureNew('array type option()', '0.44.0')\n@permitted_kwargs({'value', 'yield', 'choices'})\ndef string_array_parser(description, kwargs):\n if 'choices' in kwargs:\n choices = kwargs['choices']\n if not isinstance(choices, list):\n raise OptionException('Array choices must be an array.')\n for i in choices:\n if not isinstance(i, str):\n raise OptionException('Array choice elements must be strings.')\n value = kwargs.get('value', choices)\n else:\n choices = None\n value = kwargs.get('value', [])\n if not isinstance(value, list):\n raise OptionException('Array choices must be passed as an array.')\n return coredata.UserArrayOption(description,\n value,\n choices=choices,\n yielding=kwargs.get('yield', coredata.default_yielding))\n\n@permitted_kwargs({'value', 'yield'})\ndef FeatureParser(description, kwargs):\n return coredata.UserFeatureOption(description,\n kwargs.get('value', 'auto'),\n yielding=kwargs.get('yield', coredata.default_yielding))\n\noption_types = {'string': StringParser,\n 'boolean': BooleanParser,\n 'combo': ComboParser,\n 'integer': IntegerParser,\n 'array': string_array_parser,\n 'feature': FeatureParser,\n } # type: T.Dict[str, T.Callable[[str, T.Dict], coredata.UserOption]]\n\nclass OptionInterpreter:\n def __init__(self, subproject):\n self.options = {}\n self.subproject = subproject\n\n def process(self, option_file):\n try:\n with open(option_file, 'r', encoding='utf8') as f:\n ast = mparser.Parser(f.read(), option_file).parse()\n except mesonlib.MesonException as me:\n me.file = option_file\n raise me\n if not isinstance(ast, mparser.CodeBlockNode):\n e = OptionException('Option file is malformed.')\n e.lineno = ast.lineno()\n e.file = option_file\n raise e\n for cur in ast.lines:\n try:\n self.evaluate_statement(cur)\n except Exception as e:\n e.lineno = cur.lineno\n e.colno = cur.colno\n e.file = option_file\n raise e\n\n def reduce_single(self, arg):\n if isinstance(arg, str):\n return arg\n elif isinstance(arg, (mparser.StringNode, mparser.BooleanNode,\n mparser.NumberNode)):\n return arg.value\n elif isinstance(arg, mparser.ArrayNode):\n return [self.reduce_single(curarg) for curarg in arg.args.arguments]\n elif isinstance(arg, mparser.UMinusNode):\n res = self.reduce_single(arg.value)\n if not isinstance(res, (int, float)):\n raise OptionException('Token after \"-\" is not a number')\n return -res\n elif isinstance(arg, mparser.NotNode):\n res = self.reduce_single(arg.value)\n if not isinstance(res, bool):\n raise OptionException('Token after \"not\" is not a a boolean')\n return not res\n else:\n raise OptionException('Arguments may only be string, int, bool, or array of those.')\n\n def reduce_arguments(self, args):\n assert(isinstance(args, mparser.ArgumentNode))\n if args.incorrect_order():\n raise OptionException('All keyword arguments must be after positional arguments.')\n reduced_pos = [self.reduce_single(arg) for arg in args.arguments]\n reduced_kw = {}\n for key in args.kwargs.keys():\n if not isinstance(key, mparser.IdNode):\n raise OptionException('Keyword argument name is not a string.')\n a = args.kwargs[key]\n reduced_kw[key.value] = self.reduce_single(a)\n return reduced_pos, reduced_kw\n\n def evaluate_statement(self, node):\n if not isinstance(node, mparser.FunctionNode):\n raise OptionException('Option file may only contain option definitions')\n func_name = node.func_name\n if func_name != 'option':\n raise OptionException('Only calls to option() are allowed in option files.')\n (posargs, kwargs) = self.reduce_arguments(node.args)\n\n # FIXME: Cannot use FeatureNew while parsing options because we parse\n # it before reading options in project(). See func_project() in\n # interpreter.py\n #if 'yield' in kwargs:\n # FeatureNew('option yield', '0.45.0').use(self.subproject)\n\n if 'type' not in kwargs:\n raise OptionException('Option call missing mandatory \"type\" keyword argument')\n opt_type = kwargs.pop('type')\n if opt_type not in option_types:\n raise OptionException('Unknown type %s.' % opt_type)\n if len(posargs) != 1:\n raise OptionException('Option() must have one (and only one) positional argument')\n opt_name = posargs[0]\n if not isinstance(opt_name, str):\n raise OptionException('Positional argument must be a string.')\n if optname_regex.search(opt_name) is not None:\n raise OptionException('Option names can only contain letters, numbers or dashes.')\n if is_invalid_name(opt_name):\n raise OptionException('Option name %s is reserved.' % opt_name)\n if self.subproject != '':\n opt_name = self.subproject + ':' + opt_name\n opt = option_types[opt_type](opt_name, kwargs.pop('description', ''), kwargs)\n if opt.description == '':\n opt.description = opt_name\n self.options[opt_name] = opt\n", "path": "mesonbuild/optinterpreter.py"}]} | 3,016 | 214 |
gh_patches_debug_61106 | rasdani/github-patches | git_diff | pre-commit__pre-commit-2272 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add GIT_HTTP_PROXY_AUTHMETHOD to kept environment variables
### describe your issue
On the cluster I work on, there’s a proxy. It’s… Let’s just say that proxy is being a pain, and to make it work we have to set the environment variable GIT_HTTP_PROXY_AUTHMETHOD.
In pre_commit/git.py however, only a small subset of variables are kept, and that one is not among them. So, sure, I can (and did) edit the script to keep that one too (and it works now) but it’s not ideal.
### pre-commit --version
pre-commit 2.17.0
### .pre-commit-config.yaml
```yaml
Not relevant
```
### ~/.cache/pre-commit/pre-commit.log (if present)
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pre_commit/git.py`
Content:
```
1 from __future__ import annotations
2
3 import logging
4 import os.path
5 import sys
6 from typing import MutableMapping
7
8 from pre_commit.errors import FatalError
9 from pre_commit.util import CalledProcessError
10 from pre_commit.util import cmd_output
11 from pre_commit.util import cmd_output_b
12
13 logger = logging.getLogger(__name__)
14
15 # see #2046
16 NO_FS_MONITOR = ('-c', 'core.useBuiltinFSMonitor=false')
17
18
19 def zsplit(s: str) -> list[str]:
20 s = s.strip('\0')
21 if s:
22 return s.split('\0')
23 else:
24 return []
25
26
27 def no_git_env(
28 _env: MutableMapping[str, str] | None = None,
29 ) -> dict[str, str]:
30 # Too many bugs dealing with environment variables and GIT:
31 # https://github.com/pre-commit/pre-commit/issues/300
32 # In git 2.6.3 (maybe others), git exports GIT_WORK_TREE while running
33 # pre-commit hooks
34 # In git 1.9.1 (maybe others), git exports GIT_DIR and GIT_INDEX_FILE
35 # while running pre-commit hooks in submodules.
36 # GIT_DIR: Causes git clone to clone wrong thing
37 # GIT_INDEX_FILE: Causes 'error invalid object ...' during commit
38 _env = _env if _env is not None else os.environ
39 return {
40 k: v for k, v in _env.items()
41 if not k.startswith('GIT_') or
42 k.startswith(('GIT_CONFIG_KEY_', 'GIT_CONFIG_VALUE_')) or
43 k in {
44 'GIT_EXEC_PATH', 'GIT_SSH', 'GIT_SSH_COMMAND', 'GIT_SSL_CAINFO',
45 'GIT_SSL_NO_VERIFY', 'GIT_CONFIG_COUNT',
46 }
47 }
48
49
50 def get_root() -> str:
51 # Git 2.25 introduced a change to "rev-parse --show-toplevel" that exposed
52 # underlying volumes for Windows drives mapped with SUBST. We use
53 # "rev-parse --show-cdup" to get the appropriate path, but must perform
54 # an extra check to see if we are in the .git directory.
55 try:
56 root = os.path.abspath(
57 cmd_output('git', 'rev-parse', '--show-cdup')[1].strip(),
58 )
59 git_dir = os.path.abspath(get_git_dir())
60 except CalledProcessError:
61 raise FatalError(
62 'git failed. Is it installed, and are you in a Git repository '
63 'directory?',
64 )
65 if os.path.samefile(root, git_dir):
66 raise FatalError(
67 'git toplevel unexpectedly empty! make sure you are not '
68 'inside the `.git` directory of your repository.',
69 )
70 return root
71
72
73 def get_git_dir(git_root: str = '.') -> str:
74 opts = ('--git-common-dir', '--git-dir')
75 _, out, _ = cmd_output('git', 'rev-parse', *opts, cwd=git_root)
76 for line, opt in zip(out.splitlines(), opts):
77 if line != opt: # pragma: no branch (git < 2.5)
78 return os.path.normpath(os.path.join(git_root, line))
79 else:
80 raise AssertionError('unreachable: no git dir')
81
82
83 def get_remote_url(git_root: str) -> str:
84 _, out, _ = cmd_output('git', 'config', 'remote.origin.url', cwd=git_root)
85 return out.strip()
86
87
88 def is_in_merge_conflict() -> bool:
89 git_dir = get_git_dir('.')
90 return (
91 os.path.exists(os.path.join(git_dir, 'MERGE_MSG')) and
92 os.path.exists(os.path.join(git_dir, 'MERGE_HEAD'))
93 )
94
95
96 def parse_merge_msg_for_conflicts(merge_msg: bytes) -> list[str]:
97 # Conflicted files start with tabs
98 return [
99 line.lstrip(b'#').strip().decode()
100 for line in merge_msg.splitlines()
101 # '#\t' for git 2.4.1
102 if line.startswith((b'\t', b'#\t'))
103 ]
104
105
106 def get_conflicted_files() -> set[str]:
107 logger.info('Checking merge-conflict files only.')
108 # Need to get the conflicted files from the MERGE_MSG because they could
109 # have resolved the conflict by choosing one side or the other
110 with open(os.path.join(get_git_dir('.'), 'MERGE_MSG'), 'rb') as f:
111 merge_msg = f.read()
112 merge_conflict_filenames = parse_merge_msg_for_conflicts(merge_msg)
113
114 # This will get the rest of the changes made after the merge.
115 # If they resolved the merge conflict by choosing a mesh of both sides
116 # this will also include the conflicted files
117 tree_hash = cmd_output('git', 'write-tree')[1].strip()
118 merge_diff_filenames = zsplit(
119 cmd_output(
120 'git', 'diff', '--name-only', '--no-ext-diff', '-z',
121 '-m', tree_hash, 'HEAD', 'MERGE_HEAD',
122 )[1],
123 )
124 return set(merge_conflict_filenames) | set(merge_diff_filenames)
125
126
127 def get_staged_files(cwd: str | None = None) -> list[str]:
128 return zsplit(
129 cmd_output(
130 'git', 'diff', '--staged', '--name-only', '--no-ext-diff', '-z',
131 # Everything except for D
132 '--diff-filter=ACMRTUXB',
133 cwd=cwd,
134 )[1],
135 )
136
137
138 def intent_to_add_files() -> list[str]:
139 _, stdout, _ = cmd_output(
140 'git', 'status', '--ignore-submodules', '--porcelain', '-z',
141 )
142 parts = list(reversed(zsplit(stdout)))
143 intent_to_add = []
144 while parts:
145 line = parts.pop()
146 status, filename = line[:3], line[3:]
147 if status[0] in {'C', 'R'}: # renames / moves have an additional arg
148 parts.pop()
149 if status[1] == 'A':
150 intent_to_add.append(filename)
151 return intent_to_add
152
153
154 def get_all_files() -> list[str]:
155 return zsplit(cmd_output('git', 'ls-files', '-z')[1])
156
157
158 def get_changed_files(old: str, new: str) -> list[str]:
159 diff_cmd = ('git', 'diff', '--name-only', '--no-ext-diff', '-z')
160 try:
161 _, out, _ = cmd_output(*diff_cmd, f'{old}...{new}')
162 except CalledProcessError: # pragma: no cover (new git)
163 # on newer git where old and new do not have a merge base git fails
164 # so we try a full diff (this is what old git did for us!)
165 _, out, _ = cmd_output(*diff_cmd, f'{old}..{new}')
166
167 return zsplit(out)
168
169
170 def head_rev(remote: str) -> str:
171 _, out, _ = cmd_output('git', 'ls-remote', '--exit-code', remote, 'HEAD')
172 return out.split()[0]
173
174
175 def has_diff(*args: str, repo: str = '.') -> bool:
176 cmd = ('git', 'diff', '--quiet', '--no-ext-diff', *args)
177 return cmd_output_b(*cmd, cwd=repo, retcode=None)[0] == 1
178
179
180 def has_core_hookpaths_set() -> bool:
181 _, out, _ = cmd_output_b('git', 'config', 'core.hooksPath', retcode=None)
182 return bool(out.strip())
183
184
185 def init_repo(path: str, remote: str) -> None:
186 if os.path.isdir(remote):
187 remote = os.path.abspath(remote)
188
189 git = ('git', *NO_FS_MONITOR)
190 env = no_git_env()
191 # avoid the user's template so that hooks do not recurse
192 cmd_output_b(*git, 'init', '--template=', path, env=env)
193 cmd_output_b(*git, 'remote', 'add', 'origin', remote, cwd=path, env=env)
194
195
196 def commit(repo: str = '.') -> None:
197 env = no_git_env()
198 name, email = 'pre-commit', '[email protected]'
199 env['GIT_AUTHOR_NAME'] = env['GIT_COMMITTER_NAME'] = name
200 env['GIT_AUTHOR_EMAIL'] = env['GIT_COMMITTER_EMAIL'] = email
201 cmd = ('git', 'commit', '--no-edit', '--no-gpg-sign', '-n', '-minit')
202 cmd_output_b(*cmd, cwd=repo, env=env)
203
204
205 def git_path(name: str, repo: str = '.') -> str:
206 _, out, _ = cmd_output('git', 'rev-parse', '--git-path', name, cwd=repo)
207 return os.path.join(repo, out.strip())
208
209
210 def check_for_cygwin_mismatch() -> None:
211 """See https://github.com/pre-commit/pre-commit/issues/354"""
212 if sys.platform in ('cygwin', 'win32'): # pragma: no cover (windows)
213 is_cygwin_python = sys.platform == 'cygwin'
214 try:
215 toplevel = get_root()
216 except FatalError: # skip the check if we're not in a git repo
217 return
218 is_cygwin_git = toplevel.startswith('/')
219
220 if is_cygwin_python ^ is_cygwin_git:
221 exe_type = {True: '(cygwin)', False: '(windows)'}
222 logger.warn(
223 f'pre-commit has detected a mix of cygwin python / git\n'
224 f'This combination is not supported, it is likely you will '
225 f'receive an error later in the program.\n'
226 f'Make sure to use cygwin git+python while using cygwin\n'
227 f'These can be installed through the cygwin installer.\n'
228 f' - python {exe_type[is_cygwin_python]}\n'
229 f' - git {exe_type[is_cygwin_git]}\n',
230 )
231
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pre_commit/git.py b/pre_commit/git.py
--- a/pre_commit/git.py
+++ b/pre_commit/git.py
@@ -43,6 +43,7 @@
k in {
'GIT_EXEC_PATH', 'GIT_SSH', 'GIT_SSH_COMMAND', 'GIT_SSL_CAINFO',
'GIT_SSL_NO_VERIFY', 'GIT_CONFIG_COUNT',
+ 'GIT_HTTP_PROXY_AUTHMETHOD',
}
}
| {"golden_diff": "diff --git a/pre_commit/git.py b/pre_commit/git.py\n--- a/pre_commit/git.py\n+++ b/pre_commit/git.py\n@@ -43,6 +43,7 @@\n k in {\n 'GIT_EXEC_PATH', 'GIT_SSH', 'GIT_SSH_COMMAND', 'GIT_SSL_CAINFO',\n 'GIT_SSL_NO_VERIFY', 'GIT_CONFIG_COUNT',\n+ 'GIT_HTTP_PROXY_AUTHMETHOD',\n }\n }\n", "issue": "Add GIT_HTTP_PROXY_AUTHMETHOD to kept environment variables\n### describe your issue\n\nOn the cluster I work on, there\u2019s a proxy. It\u2019s\u2026 Let\u2019s just say that proxy is being a pain, and to make it work we have to set the environment variable GIT_HTTP_PROXY_AUTHMETHOD.\r\n\r\nIn pre_commit/git.py however, only a small subset of variables are kept, and that one is not among them. So, sure, I can (and did) edit the script to keep that one too (and it works now) but it\u2019s not ideal.\n\n### pre-commit --version\n\npre-commit 2.17.0\n\n### .pre-commit-config.yaml\n\n```yaml\nNot relevant\n```\n\n\n### ~/.cache/pre-commit/pre-commit.log (if present)\n\n_No response_\n", "before_files": [{"content": "from __future__ import annotations\n\nimport logging\nimport os.path\nimport sys\nfrom typing import MutableMapping\n\nfrom pre_commit.errors import FatalError\nfrom pre_commit.util import CalledProcessError\nfrom pre_commit.util import cmd_output\nfrom pre_commit.util import cmd_output_b\n\nlogger = logging.getLogger(__name__)\n\n# see #2046\nNO_FS_MONITOR = ('-c', 'core.useBuiltinFSMonitor=false')\n\n\ndef zsplit(s: str) -> list[str]:\n s = s.strip('\\0')\n if s:\n return s.split('\\0')\n else:\n return []\n\n\ndef no_git_env(\n _env: MutableMapping[str, str] | None = None,\n) -> dict[str, str]:\n # Too many bugs dealing with environment variables and GIT:\n # https://github.com/pre-commit/pre-commit/issues/300\n # In git 2.6.3 (maybe others), git exports GIT_WORK_TREE while running\n # pre-commit hooks\n # In git 1.9.1 (maybe others), git exports GIT_DIR and GIT_INDEX_FILE\n # while running pre-commit hooks in submodules.\n # GIT_DIR: Causes git clone to clone wrong thing\n # GIT_INDEX_FILE: Causes 'error invalid object ...' during commit\n _env = _env if _env is not None else os.environ\n return {\n k: v for k, v in _env.items()\n if not k.startswith('GIT_') or\n k.startswith(('GIT_CONFIG_KEY_', 'GIT_CONFIG_VALUE_')) or\n k in {\n 'GIT_EXEC_PATH', 'GIT_SSH', 'GIT_SSH_COMMAND', 'GIT_SSL_CAINFO',\n 'GIT_SSL_NO_VERIFY', 'GIT_CONFIG_COUNT',\n }\n }\n\n\ndef get_root() -> str:\n # Git 2.25 introduced a change to \"rev-parse --show-toplevel\" that exposed\n # underlying volumes for Windows drives mapped with SUBST. We use\n # \"rev-parse --show-cdup\" to get the appropriate path, but must perform\n # an extra check to see if we are in the .git directory.\n try:\n root = os.path.abspath(\n cmd_output('git', 'rev-parse', '--show-cdup')[1].strip(),\n )\n git_dir = os.path.abspath(get_git_dir())\n except CalledProcessError:\n raise FatalError(\n 'git failed. Is it installed, and are you in a Git repository '\n 'directory?',\n )\n if os.path.samefile(root, git_dir):\n raise FatalError(\n 'git toplevel unexpectedly empty! make sure you are not '\n 'inside the `.git` directory of your repository.',\n )\n return root\n\n\ndef get_git_dir(git_root: str = '.') -> str:\n opts = ('--git-common-dir', '--git-dir')\n _, out, _ = cmd_output('git', 'rev-parse', *opts, cwd=git_root)\n for line, opt in zip(out.splitlines(), opts):\n if line != opt: # pragma: no branch (git < 2.5)\n return os.path.normpath(os.path.join(git_root, line))\n else:\n raise AssertionError('unreachable: no git dir')\n\n\ndef get_remote_url(git_root: str) -> str:\n _, out, _ = cmd_output('git', 'config', 'remote.origin.url', cwd=git_root)\n return out.strip()\n\n\ndef is_in_merge_conflict() -> bool:\n git_dir = get_git_dir('.')\n return (\n os.path.exists(os.path.join(git_dir, 'MERGE_MSG')) and\n os.path.exists(os.path.join(git_dir, 'MERGE_HEAD'))\n )\n\n\ndef parse_merge_msg_for_conflicts(merge_msg: bytes) -> list[str]:\n # Conflicted files start with tabs\n return [\n line.lstrip(b'#').strip().decode()\n for line in merge_msg.splitlines()\n # '#\\t' for git 2.4.1\n if line.startswith((b'\\t', b'#\\t'))\n ]\n\n\ndef get_conflicted_files() -> set[str]:\n logger.info('Checking merge-conflict files only.')\n # Need to get the conflicted files from the MERGE_MSG because they could\n # have resolved the conflict by choosing one side or the other\n with open(os.path.join(get_git_dir('.'), 'MERGE_MSG'), 'rb') as f:\n merge_msg = f.read()\n merge_conflict_filenames = parse_merge_msg_for_conflicts(merge_msg)\n\n # This will get the rest of the changes made after the merge.\n # If they resolved the merge conflict by choosing a mesh of both sides\n # this will also include the conflicted files\n tree_hash = cmd_output('git', 'write-tree')[1].strip()\n merge_diff_filenames = zsplit(\n cmd_output(\n 'git', 'diff', '--name-only', '--no-ext-diff', '-z',\n '-m', tree_hash, 'HEAD', 'MERGE_HEAD',\n )[1],\n )\n return set(merge_conflict_filenames) | set(merge_diff_filenames)\n\n\ndef get_staged_files(cwd: str | None = None) -> list[str]:\n return zsplit(\n cmd_output(\n 'git', 'diff', '--staged', '--name-only', '--no-ext-diff', '-z',\n # Everything except for D\n '--diff-filter=ACMRTUXB',\n cwd=cwd,\n )[1],\n )\n\n\ndef intent_to_add_files() -> list[str]:\n _, stdout, _ = cmd_output(\n 'git', 'status', '--ignore-submodules', '--porcelain', '-z',\n )\n parts = list(reversed(zsplit(stdout)))\n intent_to_add = []\n while parts:\n line = parts.pop()\n status, filename = line[:3], line[3:]\n if status[0] in {'C', 'R'}: # renames / moves have an additional arg\n parts.pop()\n if status[1] == 'A':\n intent_to_add.append(filename)\n return intent_to_add\n\n\ndef get_all_files() -> list[str]:\n return zsplit(cmd_output('git', 'ls-files', '-z')[1])\n\n\ndef get_changed_files(old: str, new: str) -> list[str]:\n diff_cmd = ('git', 'diff', '--name-only', '--no-ext-diff', '-z')\n try:\n _, out, _ = cmd_output(*diff_cmd, f'{old}...{new}')\n except CalledProcessError: # pragma: no cover (new git)\n # on newer git where old and new do not have a merge base git fails\n # so we try a full diff (this is what old git did for us!)\n _, out, _ = cmd_output(*diff_cmd, f'{old}..{new}')\n\n return zsplit(out)\n\n\ndef head_rev(remote: str) -> str:\n _, out, _ = cmd_output('git', 'ls-remote', '--exit-code', remote, 'HEAD')\n return out.split()[0]\n\n\ndef has_diff(*args: str, repo: str = '.') -> bool:\n cmd = ('git', 'diff', '--quiet', '--no-ext-diff', *args)\n return cmd_output_b(*cmd, cwd=repo, retcode=None)[0] == 1\n\n\ndef has_core_hookpaths_set() -> bool:\n _, out, _ = cmd_output_b('git', 'config', 'core.hooksPath', retcode=None)\n return bool(out.strip())\n\n\ndef init_repo(path: str, remote: str) -> None:\n if os.path.isdir(remote):\n remote = os.path.abspath(remote)\n\n git = ('git', *NO_FS_MONITOR)\n env = no_git_env()\n # avoid the user's template so that hooks do not recurse\n cmd_output_b(*git, 'init', '--template=', path, env=env)\n cmd_output_b(*git, 'remote', 'add', 'origin', remote, cwd=path, env=env)\n\n\ndef commit(repo: str = '.') -> None:\n env = no_git_env()\n name, email = 'pre-commit', '[email protected]'\n env['GIT_AUTHOR_NAME'] = env['GIT_COMMITTER_NAME'] = name\n env['GIT_AUTHOR_EMAIL'] = env['GIT_COMMITTER_EMAIL'] = email\n cmd = ('git', 'commit', '--no-edit', '--no-gpg-sign', '-n', '-minit')\n cmd_output_b(*cmd, cwd=repo, env=env)\n\n\ndef git_path(name: str, repo: str = '.') -> str:\n _, out, _ = cmd_output('git', 'rev-parse', '--git-path', name, cwd=repo)\n return os.path.join(repo, out.strip())\n\n\ndef check_for_cygwin_mismatch() -> None:\n \"\"\"See https://github.com/pre-commit/pre-commit/issues/354\"\"\"\n if sys.platform in ('cygwin', 'win32'): # pragma: no cover (windows)\n is_cygwin_python = sys.platform == 'cygwin'\n try:\n toplevel = get_root()\n except FatalError: # skip the check if we're not in a git repo\n return\n is_cygwin_git = toplevel.startswith('/')\n\n if is_cygwin_python ^ is_cygwin_git:\n exe_type = {True: '(cygwin)', False: '(windows)'}\n logger.warn(\n f'pre-commit has detected a mix of cygwin python / git\\n'\n f'This combination is not supported, it is likely you will '\n f'receive an error later in the program.\\n'\n f'Make sure to use cygwin git+python while using cygwin\\n'\n f'These can be installed through the cygwin installer.\\n'\n f' - python {exe_type[is_cygwin_python]}\\n'\n f' - git {exe_type[is_cygwin_git]}\\n',\n )\n", "path": "pre_commit/git.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport logging\nimport os.path\nimport sys\nfrom typing import MutableMapping\n\nfrom pre_commit.errors import FatalError\nfrom pre_commit.util import CalledProcessError\nfrom pre_commit.util import cmd_output\nfrom pre_commit.util import cmd_output_b\n\nlogger = logging.getLogger(__name__)\n\n# see #2046\nNO_FS_MONITOR = ('-c', 'core.useBuiltinFSMonitor=false')\n\n\ndef zsplit(s: str) -> list[str]:\n s = s.strip('\\0')\n if s:\n return s.split('\\0')\n else:\n return []\n\n\ndef no_git_env(\n _env: MutableMapping[str, str] | None = None,\n) -> dict[str, str]:\n # Too many bugs dealing with environment variables and GIT:\n # https://github.com/pre-commit/pre-commit/issues/300\n # In git 2.6.3 (maybe others), git exports GIT_WORK_TREE while running\n # pre-commit hooks\n # In git 1.9.1 (maybe others), git exports GIT_DIR and GIT_INDEX_FILE\n # while running pre-commit hooks in submodules.\n # GIT_DIR: Causes git clone to clone wrong thing\n # GIT_INDEX_FILE: Causes 'error invalid object ...' during commit\n _env = _env if _env is not None else os.environ\n return {\n k: v for k, v in _env.items()\n if not k.startswith('GIT_') or\n k.startswith(('GIT_CONFIG_KEY_', 'GIT_CONFIG_VALUE_')) or\n k in {\n 'GIT_EXEC_PATH', 'GIT_SSH', 'GIT_SSH_COMMAND', 'GIT_SSL_CAINFO',\n 'GIT_SSL_NO_VERIFY', 'GIT_CONFIG_COUNT',\n 'GIT_HTTP_PROXY_AUTHMETHOD',\n }\n }\n\n\ndef get_root() -> str:\n # Git 2.25 introduced a change to \"rev-parse --show-toplevel\" that exposed\n # underlying volumes for Windows drives mapped with SUBST. We use\n # \"rev-parse --show-cdup\" to get the appropriate path, but must perform\n # an extra check to see if we are in the .git directory.\n try:\n root = os.path.abspath(\n cmd_output('git', 'rev-parse', '--show-cdup')[1].strip(),\n )\n git_dir = os.path.abspath(get_git_dir())\n except CalledProcessError:\n raise FatalError(\n 'git failed. Is it installed, and are you in a Git repository '\n 'directory?',\n )\n if os.path.samefile(root, git_dir):\n raise FatalError(\n 'git toplevel unexpectedly empty! make sure you are not '\n 'inside the `.git` directory of your repository.',\n )\n return root\n\n\ndef get_git_dir(git_root: str = '.') -> str:\n opts = ('--git-common-dir', '--git-dir')\n _, out, _ = cmd_output('git', 'rev-parse', *opts, cwd=git_root)\n for line, opt in zip(out.splitlines(), opts):\n if line != opt: # pragma: no branch (git < 2.5)\n return os.path.normpath(os.path.join(git_root, line))\n else:\n raise AssertionError('unreachable: no git dir')\n\n\ndef get_remote_url(git_root: str) -> str:\n _, out, _ = cmd_output('git', 'config', 'remote.origin.url', cwd=git_root)\n return out.strip()\n\n\ndef is_in_merge_conflict() -> bool:\n git_dir = get_git_dir('.')\n return (\n os.path.exists(os.path.join(git_dir, 'MERGE_MSG')) and\n os.path.exists(os.path.join(git_dir, 'MERGE_HEAD'))\n )\n\n\ndef parse_merge_msg_for_conflicts(merge_msg: bytes) -> list[str]:\n # Conflicted files start with tabs\n return [\n line.lstrip(b'#').strip().decode()\n for line in merge_msg.splitlines()\n # '#\\t' for git 2.4.1\n if line.startswith((b'\\t', b'#\\t'))\n ]\n\n\ndef get_conflicted_files() -> set[str]:\n logger.info('Checking merge-conflict files only.')\n # Need to get the conflicted files from the MERGE_MSG because they could\n # have resolved the conflict by choosing one side or the other\n with open(os.path.join(get_git_dir('.'), 'MERGE_MSG'), 'rb') as f:\n merge_msg = f.read()\n merge_conflict_filenames = parse_merge_msg_for_conflicts(merge_msg)\n\n # This will get the rest of the changes made after the merge.\n # If they resolved the merge conflict by choosing a mesh of both sides\n # this will also include the conflicted files\n tree_hash = cmd_output('git', 'write-tree')[1].strip()\n merge_diff_filenames = zsplit(\n cmd_output(\n 'git', 'diff', '--name-only', '--no-ext-diff', '-z',\n '-m', tree_hash, 'HEAD', 'MERGE_HEAD',\n )[1],\n )\n return set(merge_conflict_filenames) | set(merge_diff_filenames)\n\n\ndef get_staged_files(cwd: str | None = None) -> list[str]:\n return zsplit(\n cmd_output(\n 'git', 'diff', '--staged', '--name-only', '--no-ext-diff', '-z',\n # Everything except for D\n '--diff-filter=ACMRTUXB',\n cwd=cwd,\n )[1],\n )\n\n\ndef intent_to_add_files() -> list[str]:\n _, stdout, _ = cmd_output(\n 'git', 'status', '--ignore-submodules', '--porcelain', '-z',\n )\n parts = list(reversed(zsplit(stdout)))\n intent_to_add = []\n while parts:\n line = parts.pop()\n status, filename = line[:3], line[3:]\n if status[0] in {'C', 'R'}: # renames / moves have an additional arg\n parts.pop()\n if status[1] == 'A':\n intent_to_add.append(filename)\n return intent_to_add\n\n\ndef get_all_files() -> list[str]:\n return zsplit(cmd_output('git', 'ls-files', '-z')[1])\n\n\ndef get_changed_files(old: str, new: str) -> list[str]:\n diff_cmd = ('git', 'diff', '--name-only', '--no-ext-diff', '-z')\n try:\n _, out, _ = cmd_output(*diff_cmd, f'{old}...{new}')\n except CalledProcessError: # pragma: no cover (new git)\n # on newer git where old and new do not have a merge base git fails\n # so we try a full diff (this is what old git did for us!)\n _, out, _ = cmd_output(*diff_cmd, f'{old}..{new}')\n\n return zsplit(out)\n\n\ndef head_rev(remote: str) -> str:\n _, out, _ = cmd_output('git', 'ls-remote', '--exit-code', remote, 'HEAD')\n return out.split()[0]\n\n\ndef has_diff(*args: str, repo: str = '.') -> bool:\n cmd = ('git', 'diff', '--quiet', '--no-ext-diff', *args)\n return cmd_output_b(*cmd, cwd=repo, retcode=None)[0] == 1\n\n\ndef has_core_hookpaths_set() -> bool:\n _, out, _ = cmd_output_b('git', 'config', 'core.hooksPath', retcode=None)\n return bool(out.strip())\n\n\ndef init_repo(path: str, remote: str) -> None:\n if os.path.isdir(remote):\n remote = os.path.abspath(remote)\n\n git = ('git', *NO_FS_MONITOR)\n env = no_git_env()\n # avoid the user's template so that hooks do not recurse\n cmd_output_b(*git, 'init', '--template=', path, env=env)\n cmd_output_b(*git, 'remote', 'add', 'origin', remote, cwd=path, env=env)\n\n\ndef commit(repo: str = '.') -> None:\n env = no_git_env()\n name, email = 'pre-commit', '[email protected]'\n env['GIT_AUTHOR_NAME'] = env['GIT_COMMITTER_NAME'] = name\n env['GIT_AUTHOR_EMAIL'] = env['GIT_COMMITTER_EMAIL'] = email\n cmd = ('git', 'commit', '--no-edit', '--no-gpg-sign', '-n', '-minit')\n cmd_output_b(*cmd, cwd=repo, env=env)\n\n\ndef git_path(name: str, repo: str = '.') -> str:\n _, out, _ = cmd_output('git', 'rev-parse', '--git-path', name, cwd=repo)\n return os.path.join(repo, out.strip())\n\n\ndef check_for_cygwin_mismatch() -> None:\n \"\"\"See https://github.com/pre-commit/pre-commit/issues/354\"\"\"\n if sys.platform in ('cygwin', 'win32'): # pragma: no cover (windows)\n is_cygwin_python = sys.platform == 'cygwin'\n try:\n toplevel = get_root()\n except FatalError: # skip the check if we're not in a git repo\n return\n is_cygwin_git = toplevel.startswith('/')\n\n if is_cygwin_python ^ is_cygwin_git:\n exe_type = {True: '(cygwin)', False: '(windows)'}\n logger.warn(\n f'pre-commit has detected a mix of cygwin python / git\\n'\n f'This combination is not supported, it is likely you will '\n f'receive an error later in the program.\\n'\n f'Make sure to use cygwin git+python while using cygwin\\n'\n f'These can be installed through the cygwin installer.\\n'\n f' - python {exe_type[is_cygwin_python]}\\n'\n f' - git {exe_type[is_cygwin_git]}\\n',\n )\n", "path": "pre_commit/git.py"}]} | 3,203 | 92 |
gh_patches_debug_27231 | rasdani/github-patches | git_diff | vllm-project__vllm-4886 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[New Model]: Phi-2 support for LoRA
### The model to consider.
Microsoft/Phi-2 with LoRA
### The closest model vllm already supports.
_No response_
### What's your difficulty of supporting the model you want?
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `vllm/model_executor/models/phi.py`
Content:
```
1 # coding=utf-8
2 # Adapted from
3 # https://huggingface.co/microsoft/phi-1_5/blob/main/modeling_phi.py
4 # Copyright 2023 The vLLM team.
5 # Copyright (c) Microsoft Corporation.
6 # Licensed under the MIT license.
7 #
8 # BSD 3-Clause License
9 #
10 # Copyright (c) 2022, Tri Dao, [email protected].
11 # All rights reserved.
12 #
13 # Redistribution and use in source and binary forms, with or without
14 # modification, are permitted provided that the following conditions are met:
15 #
16 # * Redistributions of source code must retain the above copyright notice, this
17 # list of conditions and the following disclaimer.
18 #
19 # * Redistributions in binary form must reproduce the above copyright notice,
20 # this list of conditions and the following disclaimer in the documentation
21 # and/or other materials provided with the distribution.
22 #
23 # * Neither the name of the copyright holder nor the names of its
24 # contributors may be used to endorse or promote products derived from
25 # this software without specific prior written permission.
26 #
27 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
28 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
30 # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
31 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
33 # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
34 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
35 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
36 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 """Inference-only Phi-1.5 model compatible with HuggingFace weights."""
38 from typing import Iterable, List, Optional, Tuple
39
40 import torch
41 from torch import nn
42 from transformers import PretrainedConfig
43
44 from vllm.attention import Attention, AttentionMetadata
45 from vllm.config import CacheConfig
46 from vllm.distributed import get_tensor_model_parallel_world_size
47 from vllm.model_executor.layers.activation import get_act_fn
48 from vllm.model_executor.layers.linear import (ColumnParallelLinear,
49 QKVParallelLinear,
50 RowParallelLinear)
51 from vllm.model_executor.layers.logits_processor import LogitsProcessor
52 from vllm.model_executor.layers.quantization.base_config import (
53 QuantizationConfig)
54 from vllm.model_executor.layers.rotary_embedding import get_rope
55 from vllm.model_executor.layers.sampler import Sampler
56 from vllm.model_executor.layers.vocab_parallel_embedding import (
57 ParallelLMHead, VocabParallelEmbedding)
58 from vllm.model_executor.model_loader.weight_utils import default_weight_loader
59 from vllm.model_executor.sampling_metadata import SamplingMetadata
60 from vllm.sequence import SamplerOutput
61
62
63 class PhiAttention(nn.Module):
64
65 def __init__(self,
66 config: PretrainedConfig,
67 cache_config: Optional[CacheConfig] = None,
68 quant_config: Optional[QuantizationConfig] = None):
69 super().__init__()
70 self.total_num_heads = config.num_attention_heads
71 self.hidden_size = config.hidden_size
72 self.head_size = self.hidden_size // self.total_num_heads
73
74 tensor_model_parallel_world_size = (
75 get_tensor_model_parallel_world_size())
76 assert self.total_num_heads % tensor_model_parallel_world_size == 0
77 self.num_heads = (self.total_num_heads //
78 tensor_model_parallel_world_size)
79
80 # pylint: disable=C0103
81 self.qkv_proj = QKVParallelLinear(
82 self.hidden_size,
83 self.head_size,
84 self.total_num_heads,
85 bias=True,
86 quant_config=quant_config,
87 )
88 self.dense = RowParallelLinear(
89 self.hidden_size,
90 self.hidden_size,
91 quant_config=quant_config,
92 )
93
94 scaling = self.head_size**-0.5
95 rotary_dim = int(config.partial_rotary_factor *
96 (config.hidden_size // config.num_attention_heads))
97 assert rotary_dim % 2 == 0
98
99 # pylint: disable=C0301
100 # Refer to:
101 # https://huggingface.co/microsoft/phi-1_5/blob/d212a789620c380ff32ca1d1ee9943a777360987/modeling_phi.py#L518
102 rope_theta = 10000
103 max_position_embeddings = getattr(config, "n_positions", 2048)
104 self.rotary_emb = get_rope(
105 self.head_size,
106 rotary_dim=rotary_dim,
107 max_position=max_position_embeddings,
108 base=rope_theta,
109 )
110 self.attn = Attention(self.num_heads,
111 self.head_size,
112 scaling,
113 cache_config=cache_config)
114
115 def forward(
116 self,
117 position_ids: torch.Tensor,
118 hidden_states: torch.Tensor,
119 kv_cache: torch.Tensor,
120 attn_metadata: AttentionMetadata,
121 ) -> torch.Tensor:
122 qkv, _ = self.qkv_proj(hidden_states)
123 q, k, v = qkv.chunk(chunks=3, dim=-1)
124 q, k = self.rotary_emb(position_ids, q, k)
125 attn_output = self.attn(q, k, v, kv_cache, attn_metadata)
126 output, _ = self.dense(attn_output)
127 return output
128
129
130 class PhiMLP(nn.Module):
131
132 def __init__(self,
133 config: PretrainedConfig,
134 quant_config: Optional[QuantizationConfig] = None):
135 super().__init__()
136
137 n_inner = getattr(config, "n_inner", None)
138 n_inner = n_inner if n_inner is not None else 4 * config.hidden_size
139
140 self.fc1 = ColumnParallelLinear(
141 config.hidden_size,
142 n_inner,
143 quant_config=quant_config,
144 )
145 self.fc2 = RowParallelLinear(
146 n_inner,
147 config.hidden_size,
148 quant_config=quant_config,
149 )
150 self.act = get_act_fn(config.hidden_act, quant_config, n_inner)
151
152 def forward(self, hidden_states):
153 hidden_states, _ = self.fc1(hidden_states)
154 hidden_states = self.act(hidden_states)
155 hidden_states, _ = self.fc2(hidden_states)
156 return hidden_states
157
158
159 class PhiLayer(nn.Module):
160
161 def __init__(self,
162 config: PretrainedConfig,
163 cache_config: Optional[CacheConfig] = None,
164 quant_config: Optional[QuantizationConfig] = None):
165 super().__init__()
166 self.input_layernorm = nn.LayerNorm(config.hidden_size,
167 eps=config.layer_norm_eps)
168 self.self_attn = PhiAttention(config, cache_config, quant_config)
169 self.mlp = PhiMLP(config, quant_config)
170
171 def forward(
172 self,
173 position_ids: torch.Tensor,
174 hidden_states: torch.Tensor,
175 kv_cache: torch.Tensor,
176 attn_metadata: AttentionMetadata,
177 ) -> torch.Tensor:
178 residual = hidden_states
179 hidden_states = self.input_layernorm(hidden_states)
180 attn_outputs = self.self_attn(
181 position_ids=position_ids,
182 hidden_states=hidden_states,
183 kv_cache=kv_cache,
184 attn_metadata=attn_metadata,
185 )
186 feed_forward_hidden_states = self.mlp(hidden_states)
187 hidden_states = attn_outputs + feed_forward_hidden_states + residual
188 return hidden_states
189
190
191 class PhiModel(nn.Module):
192
193 def __init__(self,
194 config: PretrainedConfig,
195 cache_config: Optional[CacheConfig] = None,
196 quant_config: Optional[QuantizationConfig] = None):
197 super().__init__()
198 self.config = config
199 self.quant_config = quant_config
200 self.embed_tokens = VocabParallelEmbedding(config.vocab_size,
201 config.hidden_size)
202 self.layers = nn.ModuleList([
203 PhiLayer(config, cache_config, quant_config)
204 for _ in range(config.num_hidden_layers)
205 ])
206 self.final_layernorm = nn.LayerNorm(config.hidden_size,
207 eps=config.layer_norm_eps)
208
209 def forward(
210 self,
211 input_ids: torch.Tensor,
212 positions: torch.Tensor,
213 kv_caches: List[torch.Tensor],
214 attn_metadata: AttentionMetadata,
215 ) -> torch.Tensor:
216 hidden_states = self.embed_tokens(input_ids)
217 for i in range(self.config.num_hidden_layers):
218 layer = self.layers[i]
219 hidden_states = layer(
220 positions,
221 hidden_states,
222 kv_caches[i],
223 attn_metadata,
224 )
225
226 hidden_states = self.final_layernorm(hidden_states)
227
228 return hidden_states
229
230
231 class PhiForCausalLM(nn.Module):
232
233 def __init__(self,
234 config: PretrainedConfig,
235 cache_config: Optional[CacheConfig] = None,
236 quant_config: Optional[QuantizationConfig] = None):
237 super().__init__()
238 self.config = config
239 self.quant_config = quant_config
240
241 self.model = PhiModel(config, cache_config, quant_config)
242
243 self.lm_head = ParallelLMHead(config.vocab_size,
244 config.hidden_size,
245 bias=True)
246 self.logits_processor = LogitsProcessor(config.vocab_size)
247 self.sampler = Sampler()
248
249 def forward(
250 self,
251 input_ids: torch.Tensor,
252 positions: torch.Tensor,
253 kv_caches: List[torch.Tensor],
254 attn_metadata: AttentionMetadata,
255 ) -> torch.Tensor:
256 hidden_states = self.model(input_ids, positions, kv_caches,
257 attn_metadata)
258
259 return hidden_states
260
261 def compute_logits(self, hidden_states: torch.Tensor,
262 sampling_metadata: SamplingMetadata) -> torch.Tensor:
263 logits = self.logits_processor(self.lm_head.weight, hidden_states,
264 sampling_metadata, self.lm_head.bias)
265 return logits
266
267 def sample(
268 self,
269 logits: torch.Tensor,
270 sampling_metadata: SamplingMetadata,
271 ) -> Optional[SamplerOutput]:
272 next_tokens = self.sampler(logits, sampling_metadata)
273 return next_tokens
274
275 def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]):
276 stacked_params_mapping = [
277 # (param_name, shard_name, shard_id)
278 ("qkv_proj", "q_proj", "q"),
279 ("qkv_proj", "k_proj", "k"),
280 ("qkv_proj", "v_proj", "v")
281 ]
282 params_dict = dict(self.named_parameters())
283
284 for name, loaded_weight in weights:
285 if "rotary_emb.inv_freq" in name:
286 continue
287
288 for (param_name, weight_name, shard_id) in stacked_params_mapping:
289 if weight_name not in name:
290 continue
291 name = name.replace(weight_name, param_name)
292 # Skip loading extra bias for GPTQ models.
293 if name.endswith(".bias") and name not in params_dict:
294 continue
295 param = params_dict[name]
296 weight_loader = param.weight_loader
297 weight_loader(param, loaded_weight, shard_id)
298 break
299 else:
300 # Skip loading extra bias for GPTQ models.
301 if name.endswith(".bias") and name not in params_dict:
302 continue
303 # pylint: disable=E1136
304
305 param = params_dict[name]
306 weight_loader = getattr(param, "weight_loader",
307 default_weight_loader)
308 weight_loader(param, loaded_weight)
309
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/vllm/model_executor/models/phi.py b/vllm/model_executor/models/phi.py
--- a/vllm/model_executor/models/phi.py
+++ b/vllm/model_executor/models/phi.py
@@ -42,7 +42,7 @@
from transformers import PretrainedConfig
from vllm.attention import Attention, AttentionMetadata
-from vllm.config import CacheConfig
+from vllm.config import CacheConfig, LoRAConfig
from vllm.distributed import get_tensor_model_parallel_world_size
from vllm.model_executor.layers.activation import get_act_fn
from vllm.model_executor.layers.linear import (ColumnParallelLinear,
@@ -229,11 +229,32 @@
class PhiForCausalLM(nn.Module):
-
- def __init__(self,
- config: PretrainedConfig,
- cache_config: Optional[CacheConfig] = None,
- quant_config: Optional[QuantizationConfig] = None):
+ packed_modules_mapping = {
+ "qkv_proj": [
+ "q_proj",
+ "k_proj",
+ "v_proj",
+ ]
+ }
+
+ # LoRA specific attributes
+ supported_lora_modules = [
+ "qkv_proj",
+ "dense",
+ "fc1",
+ "fc2",
+ ]
+ embedding_modules = {}
+ embedding_padding_modules = []
+
+ def __init__(
+ self,
+ config: PretrainedConfig,
+ cache_config: Optional[CacheConfig] = None,
+ quant_config: Optional[QuantizationConfig] = None,
+ lora_config: Optional[LoRAConfig] = None,
+ ):
+ del lora_config # Unused.
super().__init__()
self.config = config
self.quant_config = quant_config
| {"golden_diff": "diff --git a/vllm/model_executor/models/phi.py b/vllm/model_executor/models/phi.py\n--- a/vllm/model_executor/models/phi.py\n+++ b/vllm/model_executor/models/phi.py\n@@ -42,7 +42,7 @@\n from transformers import PretrainedConfig\n \n from vllm.attention import Attention, AttentionMetadata\n-from vllm.config import CacheConfig\n+from vllm.config import CacheConfig, LoRAConfig\n from vllm.distributed import get_tensor_model_parallel_world_size\n from vllm.model_executor.layers.activation import get_act_fn\n from vllm.model_executor.layers.linear import (ColumnParallelLinear,\n@@ -229,11 +229,32 @@\n \n \n class PhiForCausalLM(nn.Module):\n-\n- def __init__(self,\n- config: PretrainedConfig,\n- cache_config: Optional[CacheConfig] = None,\n- quant_config: Optional[QuantizationConfig] = None):\n+ packed_modules_mapping = {\n+ \"qkv_proj\": [\n+ \"q_proj\",\n+ \"k_proj\",\n+ \"v_proj\",\n+ ]\n+ }\n+\n+ # LoRA specific attributes\n+ supported_lora_modules = [\n+ \"qkv_proj\",\n+ \"dense\",\n+ \"fc1\",\n+ \"fc2\",\n+ ]\n+ embedding_modules = {}\n+ embedding_padding_modules = []\n+\n+ def __init__(\n+ self,\n+ config: PretrainedConfig,\n+ cache_config: Optional[CacheConfig] = None,\n+ quant_config: Optional[QuantizationConfig] = None,\n+ lora_config: Optional[LoRAConfig] = None,\n+ ):\n+ del lora_config # Unused.\n super().__init__()\n self.config = config\n self.quant_config = quant_config\n", "issue": "[New Model]: Phi-2 support for LoRA\n### The model to consider.\n\nMicrosoft/Phi-2 with LoRA\n\n### The closest model vllm already supports.\n\n_No response_\n\n### What's your difficulty of supporting the model you want?\n\n_No response_\n", "before_files": [{"content": "# coding=utf-8\n# Adapted from\n# https://huggingface.co/microsoft/phi-1_5/blob/main/modeling_phi.py\n# Copyright 2023 The vLLM team.\n# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT license.\n#\n# BSD 3-Clause License\n#\n# Copyright (c) 2022, Tri Dao, [email protected].\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\"Inference-only Phi-1.5 model compatible with HuggingFace weights.\"\"\"\nfrom typing import Iterable, List, Optional, Tuple\n\nimport torch\nfrom torch import nn\nfrom transformers import PretrainedConfig\n\nfrom vllm.attention import Attention, AttentionMetadata\nfrom vllm.config import CacheConfig\nfrom vllm.distributed import get_tensor_model_parallel_world_size\nfrom vllm.model_executor.layers.activation import get_act_fn\nfrom vllm.model_executor.layers.linear import (ColumnParallelLinear,\n QKVParallelLinear,\n RowParallelLinear)\nfrom vllm.model_executor.layers.logits_processor import LogitsProcessor\nfrom vllm.model_executor.layers.quantization.base_config import (\n QuantizationConfig)\nfrom vllm.model_executor.layers.rotary_embedding import get_rope\nfrom vllm.model_executor.layers.sampler import Sampler\nfrom vllm.model_executor.layers.vocab_parallel_embedding import (\n ParallelLMHead, VocabParallelEmbedding)\nfrom vllm.model_executor.model_loader.weight_utils import default_weight_loader\nfrom vllm.model_executor.sampling_metadata import SamplingMetadata\nfrom vllm.sequence import SamplerOutput\n\n\nclass PhiAttention(nn.Module):\n\n def __init__(self,\n config: PretrainedConfig,\n cache_config: Optional[CacheConfig] = None,\n quant_config: Optional[QuantizationConfig] = None):\n super().__init__()\n self.total_num_heads = config.num_attention_heads\n self.hidden_size = config.hidden_size\n self.head_size = self.hidden_size // self.total_num_heads\n\n tensor_model_parallel_world_size = (\n get_tensor_model_parallel_world_size())\n assert self.total_num_heads % tensor_model_parallel_world_size == 0\n self.num_heads = (self.total_num_heads //\n tensor_model_parallel_world_size)\n\n # pylint: disable=C0103\n self.qkv_proj = QKVParallelLinear(\n self.hidden_size,\n self.head_size,\n self.total_num_heads,\n bias=True,\n quant_config=quant_config,\n )\n self.dense = RowParallelLinear(\n self.hidden_size,\n self.hidden_size,\n quant_config=quant_config,\n )\n\n scaling = self.head_size**-0.5\n rotary_dim = int(config.partial_rotary_factor *\n (config.hidden_size // config.num_attention_heads))\n assert rotary_dim % 2 == 0\n\n # pylint: disable=C0301\n # Refer to:\n # https://huggingface.co/microsoft/phi-1_5/blob/d212a789620c380ff32ca1d1ee9943a777360987/modeling_phi.py#L518\n rope_theta = 10000\n max_position_embeddings = getattr(config, \"n_positions\", 2048)\n self.rotary_emb = get_rope(\n self.head_size,\n rotary_dim=rotary_dim,\n max_position=max_position_embeddings,\n base=rope_theta,\n )\n self.attn = Attention(self.num_heads,\n self.head_size,\n scaling,\n cache_config=cache_config)\n\n def forward(\n self,\n position_ids: torch.Tensor,\n hidden_states: torch.Tensor,\n kv_cache: torch.Tensor,\n attn_metadata: AttentionMetadata,\n ) -> torch.Tensor:\n qkv, _ = self.qkv_proj(hidden_states)\n q, k, v = qkv.chunk(chunks=3, dim=-1)\n q, k = self.rotary_emb(position_ids, q, k)\n attn_output = self.attn(q, k, v, kv_cache, attn_metadata)\n output, _ = self.dense(attn_output)\n return output\n\n\nclass PhiMLP(nn.Module):\n\n def __init__(self,\n config: PretrainedConfig,\n quant_config: Optional[QuantizationConfig] = None):\n super().__init__()\n\n n_inner = getattr(config, \"n_inner\", None)\n n_inner = n_inner if n_inner is not None else 4 * config.hidden_size\n\n self.fc1 = ColumnParallelLinear(\n config.hidden_size,\n n_inner,\n quant_config=quant_config,\n )\n self.fc2 = RowParallelLinear(\n n_inner,\n config.hidden_size,\n quant_config=quant_config,\n )\n self.act = get_act_fn(config.hidden_act, quant_config, n_inner)\n\n def forward(self, hidden_states):\n hidden_states, _ = self.fc1(hidden_states)\n hidden_states = self.act(hidden_states)\n hidden_states, _ = self.fc2(hidden_states)\n return hidden_states\n\n\nclass PhiLayer(nn.Module):\n\n def __init__(self,\n config: PretrainedConfig,\n cache_config: Optional[CacheConfig] = None,\n quant_config: Optional[QuantizationConfig] = None):\n super().__init__()\n self.input_layernorm = nn.LayerNorm(config.hidden_size,\n eps=config.layer_norm_eps)\n self.self_attn = PhiAttention(config, cache_config, quant_config)\n self.mlp = PhiMLP(config, quant_config)\n\n def forward(\n self,\n position_ids: torch.Tensor,\n hidden_states: torch.Tensor,\n kv_cache: torch.Tensor,\n attn_metadata: AttentionMetadata,\n ) -> torch.Tensor:\n residual = hidden_states\n hidden_states = self.input_layernorm(hidden_states)\n attn_outputs = self.self_attn(\n position_ids=position_ids,\n hidden_states=hidden_states,\n kv_cache=kv_cache,\n attn_metadata=attn_metadata,\n )\n feed_forward_hidden_states = self.mlp(hidden_states)\n hidden_states = attn_outputs + feed_forward_hidden_states + residual\n return hidden_states\n\n\nclass PhiModel(nn.Module):\n\n def __init__(self,\n config: PretrainedConfig,\n cache_config: Optional[CacheConfig] = None,\n quant_config: Optional[QuantizationConfig] = None):\n super().__init__()\n self.config = config\n self.quant_config = quant_config\n self.embed_tokens = VocabParallelEmbedding(config.vocab_size,\n config.hidden_size)\n self.layers = nn.ModuleList([\n PhiLayer(config, cache_config, quant_config)\n for _ in range(config.num_hidden_layers)\n ])\n self.final_layernorm = nn.LayerNorm(config.hidden_size,\n eps=config.layer_norm_eps)\n\n def forward(\n self,\n input_ids: torch.Tensor,\n positions: torch.Tensor,\n kv_caches: List[torch.Tensor],\n attn_metadata: AttentionMetadata,\n ) -> torch.Tensor:\n hidden_states = self.embed_tokens(input_ids)\n for i in range(self.config.num_hidden_layers):\n layer = self.layers[i]\n hidden_states = layer(\n positions,\n hidden_states,\n kv_caches[i],\n attn_metadata,\n )\n\n hidden_states = self.final_layernorm(hidden_states)\n\n return hidden_states\n\n\nclass PhiForCausalLM(nn.Module):\n\n def __init__(self,\n config: PretrainedConfig,\n cache_config: Optional[CacheConfig] = None,\n quant_config: Optional[QuantizationConfig] = None):\n super().__init__()\n self.config = config\n self.quant_config = quant_config\n\n self.model = PhiModel(config, cache_config, quant_config)\n\n self.lm_head = ParallelLMHead(config.vocab_size,\n config.hidden_size,\n bias=True)\n self.logits_processor = LogitsProcessor(config.vocab_size)\n self.sampler = Sampler()\n\n def forward(\n self,\n input_ids: torch.Tensor,\n positions: torch.Tensor,\n kv_caches: List[torch.Tensor],\n attn_metadata: AttentionMetadata,\n ) -> torch.Tensor:\n hidden_states = self.model(input_ids, positions, kv_caches,\n attn_metadata)\n\n return hidden_states\n\n def compute_logits(self, hidden_states: torch.Tensor,\n sampling_metadata: SamplingMetadata) -> torch.Tensor:\n logits = self.logits_processor(self.lm_head.weight, hidden_states,\n sampling_metadata, self.lm_head.bias)\n return logits\n\n def sample(\n self,\n logits: torch.Tensor,\n sampling_metadata: SamplingMetadata,\n ) -> Optional[SamplerOutput]:\n next_tokens = self.sampler(logits, sampling_metadata)\n return next_tokens\n\n def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]):\n stacked_params_mapping = [\n # (param_name, shard_name, shard_id)\n (\"qkv_proj\", \"q_proj\", \"q\"),\n (\"qkv_proj\", \"k_proj\", \"k\"),\n (\"qkv_proj\", \"v_proj\", \"v\")\n ]\n params_dict = dict(self.named_parameters())\n\n for name, loaded_weight in weights:\n if \"rotary_emb.inv_freq\" in name:\n continue\n\n for (param_name, weight_name, shard_id) in stacked_params_mapping:\n if weight_name not in name:\n continue\n name = name.replace(weight_name, param_name)\n # Skip loading extra bias for GPTQ models.\n if name.endswith(\".bias\") and name not in params_dict:\n continue\n param = params_dict[name]\n weight_loader = param.weight_loader\n weight_loader(param, loaded_weight, shard_id)\n break\n else:\n # Skip loading extra bias for GPTQ models.\n if name.endswith(\".bias\") and name not in params_dict:\n continue\n # pylint: disable=E1136\n\n param = params_dict[name]\n weight_loader = getattr(param, \"weight_loader\",\n default_weight_loader)\n weight_loader(param, loaded_weight)\n", "path": "vllm/model_executor/models/phi.py"}], "after_files": [{"content": "# coding=utf-8\n# Adapted from\n# https://huggingface.co/microsoft/phi-1_5/blob/main/modeling_phi.py\n# Copyright 2023 The vLLM team.\n# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT license.\n#\n# BSD 3-Clause License\n#\n# Copyright (c) 2022, Tri Dao, [email protected].\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\"Inference-only Phi-1.5 model compatible with HuggingFace weights.\"\"\"\nfrom typing import Iterable, List, Optional, Tuple\n\nimport torch\nfrom torch import nn\nfrom transformers import PretrainedConfig\n\nfrom vllm.attention import Attention, AttentionMetadata\nfrom vllm.config import CacheConfig, LoRAConfig\nfrom vllm.distributed import get_tensor_model_parallel_world_size\nfrom vllm.model_executor.layers.activation import get_act_fn\nfrom vllm.model_executor.layers.linear import (ColumnParallelLinear,\n QKVParallelLinear,\n RowParallelLinear)\nfrom vllm.model_executor.layers.logits_processor import LogitsProcessor\nfrom vllm.model_executor.layers.quantization.base_config import (\n QuantizationConfig)\nfrom vllm.model_executor.layers.rotary_embedding import get_rope\nfrom vllm.model_executor.layers.sampler import Sampler\nfrom vllm.model_executor.layers.vocab_parallel_embedding import (\n ParallelLMHead, VocabParallelEmbedding)\nfrom vllm.model_executor.model_loader.weight_utils import default_weight_loader\nfrom vllm.model_executor.sampling_metadata import SamplingMetadata\nfrom vllm.sequence import SamplerOutput\n\n\nclass PhiAttention(nn.Module):\n\n def __init__(self,\n config: PretrainedConfig,\n cache_config: Optional[CacheConfig] = None,\n quant_config: Optional[QuantizationConfig] = None):\n super().__init__()\n self.total_num_heads = config.num_attention_heads\n self.hidden_size = config.hidden_size\n self.head_size = self.hidden_size // self.total_num_heads\n\n tensor_model_parallel_world_size = (\n get_tensor_model_parallel_world_size())\n assert self.total_num_heads % tensor_model_parallel_world_size == 0\n self.num_heads = (self.total_num_heads //\n tensor_model_parallel_world_size)\n\n # pylint: disable=C0103\n self.qkv_proj = QKVParallelLinear(\n self.hidden_size,\n self.head_size,\n self.total_num_heads,\n bias=True,\n quant_config=quant_config,\n )\n self.dense = RowParallelLinear(\n self.hidden_size,\n self.hidden_size,\n quant_config=quant_config,\n )\n\n scaling = self.head_size**-0.5\n rotary_dim = int(config.partial_rotary_factor *\n (config.hidden_size // config.num_attention_heads))\n assert rotary_dim % 2 == 0\n\n # pylint: disable=C0301\n # Refer to:\n # https://huggingface.co/microsoft/phi-1_5/blob/d212a789620c380ff32ca1d1ee9943a777360987/modeling_phi.py#L518\n rope_theta = 10000\n max_position_embeddings = getattr(config, \"n_positions\", 2048)\n self.rotary_emb = get_rope(\n self.head_size,\n rotary_dim=rotary_dim,\n max_position=max_position_embeddings,\n base=rope_theta,\n )\n self.attn = Attention(self.num_heads,\n self.head_size,\n scaling,\n cache_config=cache_config)\n\n def forward(\n self,\n position_ids: torch.Tensor,\n hidden_states: torch.Tensor,\n kv_cache: torch.Tensor,\n attn_metadata: AttentionMetadata,\n ) -> torch.Tensor:\n qkv, _ = self.qkv_proj(hidden_states)\n q, k, v = qkv.chunk(chunks=3, dim=-1)\n q, k = self.rotary_emb(position_ids, q, k)\n attn_output = self.attn(q, k, v, kv_cache, attn_metadata)\n output, _ = self.dense(attn_output)\n return output\n\n\nclass PhiMLP(nn.Module):\n\n def __init__(self,\n config: PretrainedConfig,\n quant_config: Optional[QuantizationConfig] = None):\n super().__init__()\n\n n_inner = getattr(config, \"n_inner\", None)\n n_inner = n_inner if n_inner is not None else 4 * config.hidden_size\n\n self.fc1 = ColumnParallelLinear(\n config.hidden_size,\n n_inner,\n quant_config=quant_config,\n )\n self.fc2 = RowParallelLinear(\n n_inner,\n config.hidden_size,\n quant_config=quant_config,\n )\n self.act = get_act_fn(config.hidden_act, quant_config, n_inner)\n\n def forward(self, hidden_states):\n hidden_states, _ = self.fc1(hidden_states)\n hidden_states = self.act(hidden_states)\n hidden_states, _ = self.fc2(hidden_states)\n return hidden_states\n\n\nclass PhiLayer(nn.Module):\n\n def __init__(self,\n config: PretrainedConfig,\n cache_config: Optional[CacheConfig] = None,\n quant_config: Optional[QuantizationConfig] = None):\n super().__init__()\n self.input_layernorm = nn.LayerNorm(config.hidden_size,\n eps=config.layer_norm_eps)\n self.self_attn = PhiAttention(config, cache_config, quant_config)\n self.mlp = PhiMLP(config, quant_config)\n\n def forward(\n self,\n position_ids: torch.Tensor,\n hidden_states: torch.Tensor,\n kv_cache: torch.Tensor,\n attn_metadata: AttentionMetadata,\n ) -> torch.Tensor:\n residual = hidden_states\n hidden_states = self.input_layernorm(hidden_states)\n attn_outputs = self.self_attn(\n position_ids=position_ids,\n hidden_states=hidden_states,\n kv_cache=kv_cache,\n attn_metadata=attn_metadata,\n )\n feed_forward_hidden_states = self.mlp(hidden_states)\n hidden_states = attn_outputs + feed_forward_hidden_states + residual\n return hidden_states\n\n\nclass PhiModel(nn.Module):\n\n def __init__(self,\n config: PretrainedConfig,\n cache_config: Optional[CacheConfig] = None,\n quant_config: Optional[QuantizationConfig] = None):\n super().__init__()\n self.config = config\n self.quant_config = quant_config\n self.embed_tokens = VocabParallelEmbedding(config.vocab_size,\n config.hidden_size)\n self.layers = nn.ModuleList([\n PhiLayer(config, cache_config, quant_config)\n for _ in range(config.num_hidden_layers)\n ])\n self.final_layernorm = nn.LayerNorm(config.hidden_size,\n eps=config.layer_norm_eps)\n\n def forward(\n self,\n input_ids: torch.Tensor,\n positions: torch.Tensor,\n kv_caches: List[torch.Tensor],\n attn_metadata: AttentionMetadata,\n ) -> torch.Tensor:\n hidden_states = self.embed_tokens(input_ids)\n for i in range(self.config.num_hidden_layers):\n layer = self.layers[i]\n hidden_states = layer(\n positions,\n hidden_states,\n kv_caches[i],\n attn_metadata,\n )\n\n hidden_states = self.final_layernorm(hidden_states)\n\n return hidden_states\n\n\nclass PhiForCausalLM(nn.Module):\n packed_modules_mapping = {\n \"qkv_proj\": [\n \"q_proj\",\n \"k_proj\",\n \"v_proj\",\n ]\n }\n\n # LoRA specific attributes\n supported_lora_modules = [\n \"qkv_proj\",\n \"dense\",\n \"fc1\",\n \"fc2\",\n ]\n embedding_modules = {}\n embedding_padding_modules = []\n\n def __init__(\n self,\n config: PretrainedConfig,\n cache_config: Optional[CacheConfig] = None,\n quant_config: Optional[QuantizationConfig] = None,\n lora_config: Optional[LoRAConfig] = None,\n ):\n del lora_config # Unused.\n super().__init__()\n self.config = config\n self.quant_config = quant_config\n\n self.model = PhiModel(config, cache_config, quant_config)\n\n self.lm_head = ParallelLMHead(config.vocab_size,\n config.hidden_size,\n bias=True)\n self.logits_processor = LogitsProcessor(config.vocab_size)\n self.sampler = Sampler()\n\n def forward(\n self,\n input_ids: torch.Tensor,\n positions: torch.Tensor,\n kv_caches: List[torch.Tensor],\n attn_metadata: AttentionMetadata,\n ) -> torch.Tensor:\n hidden_states = self.model(input_ids, positions, kv_caches,\n attn_metadata)\n\n return hidden_states\n\n def compute_logits(self, hidden_states: torch.Tensor,\n sampling_metadata: SamplingMetadata) -> torch.Tensor:\n logits = self.logits_processor(self.lm_head.weight, hidden_states,\n sampling_metadata, self.lm_head.bias)\n return logits\n\n def sample(\n self,\n logits: torch.Tensor,\n sampling_metadata: SamplingMetadata,\n ) -> Optional[SamplerOutput]:\n next_tokens = self.sampler(logits, sampling_metadata)\n return next_tokens\n\n def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]):\n stacked_params_mapping = [\n # (param_name, shard_name, shard_id)\n (\"qkv_proj\", \"q_proj\", \"q\"),\n (\"qkv_proj\", \"k_proj\", \"k\"),\n (\"qkv_proj\", \"v_proj\", \"v\")\n ]\n params_dict = dict(self.named_parameters())\n\n for name, loaded_weight in weights:\n if \"rotary_emb.inv_freq\" in name:\n continue\n\n for (param_name, weight_name, shard_id) in stacked_params_mapping:\n if weight_name not in name:\n continue\n name = name.replace(weight_name, param_name)\n # Skip loading extra bias for GPTQ models.\n if name.endswith(\".bias\") and name not in params_dict:\n continue\n param = params_dict[name]\n weight_loader = param.weight_loader\n weight_loader(param, loaded_weight, shard_id)\n break\n else:\n # Skip loading extra bias for GPTQ models.\n if name.endswith(\".bias\") and name not in params_dict:\n continue\n # pylint: disable=E1136\n\n param = params_dict[name]\n weight_loader = getattr(param, \"weight_loader\",\n default_weight_loader)\n weight_loader(param, loaded_weight)\n", "path": "vllm/model_executor/models/phi.py"}]} | 3,594 | 402 |
gh_patches_debug_28828 | rasdani/github-patches | git_diff | edgedb__edgedb-1753 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Rename some string handling functions and deprecate the old versions.
From discussion in Slack a point was raised:
> Just noticed something now of an engineering nature though:
> https://edgedb.com/docs/edgeql/funcops/string#function::std::str_lpad
> This reminded me of similar methods in Rust that were eventually deprecated in favour of calling them "start" and "end" instead of "right" and "left" due to languages that write right to left (and even up to down):
> https://doc.rust-lang.org/std/primitive.str.html#method.trim_left
> The new methods did the exact same thing but were just given different names, while the old method names were left alone to accommodate legacy code.
This means that we want to rename string handling functions to use "start" and "end" such as `std::str_trim_start`, etc. The old functions should remain for some time, but they need to be marked as deprecated. To this end we also introduce a `deprecated` annotation which can mark deprecated items as well as provide a useful note like what should be used instead.
The documentation needs to include deprecated functions, but they should also be clearly labeled as such.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `edb/edgeql/pygments/meta.py`
Content:
```
1 # AUTOGENERATED BY EdgeDB WITH
2 # $ edb gen-meta-grammars edgeql
3
4
5 from __future__ import annotations
6
7
8 class EdgeQL:
9 reserved_keywords = (
10 "__source__",
11 "__std__",
12 "__subject__",
13 "__type__",
14 "abort",
15 "alter",
16 "analyze",
17 "and",
18 "anyarray",
19 "anytuple",
20 "anytype",
21 "begin",
22 "case",
23 "check",
24 "commit",
25 "configure",
26 "create",
27 "deallocate",
28 "declare",
29 "delete",
30 "describe",
31 "detached",
32 "discard",
33 "distinct",
34 "do",
35 "drop",
36 "else",
37 "empty",
38 "end",
39 "execute",
40 "exists",
41 "explain",
42 "extending",
43 "fetch",
44 "filter",
45 "for",
46 "get",
47 "global",
48 "grant",
49 "group",
50 "if",
51 "ilike",
52 "import",
53 "in",
54 "insert",
55 "introspect",
56 "is",
57 "like",
58 "limit",
59 "listen",
60 "load",
61 "lock",
62 "match",
63 "module",
64 "move",
65 "not",
66 "notify",
67 "offset",
68 "optional",
69 "or",
70 "order",
71 "over",
72 "partition",
73 "policy",
74 "populate",
75 "prepare",
76 "raise",
77 "refresh",
78 "reindex",
79 "release",
80 "reset",
81 "revoke",
82 "rollback",
83 "select",
84 "set",
85 "start",
86 "typeof",
87 "union",
88 "update",
89 "variadic",
90 "when",
91 "window",
92 "with",
93 )
94 unreserved_keywords = (
95 "abstract",
96 "after",
97 "alias",
98 "all",
99 "allow",
100 "annotation",
101 "as",
102 "asc",
103 "assignment",
104 "before",
105 "by",
106 "cardinality",
107 "cast",
108 "config",
109 "constraint",
110 "current",
111 "database",
112 "ddl",
113 "default",
114 "deferrable",
115 "deferred",
116 "delegated",
117 "desc",
118 "emit",
119 "explicit",
120 "expression",
121 "final",
122 "first",
123 "from",
124 "function",
125 "implicit",
126 "index",
127 "infix",
128 "inheritable",
129 "into",
130 "isolation",
131 "json",
132 "last",
133 "link",
134 "migration",
135 "multi",
136 "named",
137 "object",
138 "of",
139 "oids",
140 "on",
141 "only",
142 "operator",
143 "overloaded",
144 "postfix",
145 "prefix",
146 "property",
147 "pseudo",
148 "read",
149 "rename",
150 "repeatable",
151 "required",
152 "restrict",
153 "role",
154 "roles",
155 "savepoint",
156 "scalar",
157 "schema",
158 "sdl",
159 "serializable",
160 "session",
161 "single",
162 "source",
163 "superuser",
164 "system",
165 "target",
166 "ternary",
167 "text",
168 "then",
169 "to",
170 "transaction",
171 "type",
172 "using",
173 "verbose",
174 "view",
175 "write",
176 )
177 bool_literals = (
178 "false",
179 "true",
180 )
181 type_builtins = (
182 "BaseObject",
183 "Object",
184 "anyenum",
185 "anyfloat",
186 "anyint",
187 "anynumeric",
188 "anyreal",
189 "anyscalar",
190 "array",
191 "bigint",
192 "bool",
193 "bytes",
194 "datetime",
195 "decimal",
196 "duration",
197 "enum",
198 "float32",
199 "float64",
200 "int16",
201 "int32",
202 "int64",
203 "json",
204 "local_date",
205 "local_datetime",
206 "local_time",
207 "sequence",
208 "str",
209 "tuple",
210 "uuid",
211 )
212 module_builtins = (
213 "cal",
214 "cfg",
215 "math",
216 "schema",
217 "std",
218 "stdgraphql",
219 "sys",
220 )
221 constraint_builtins = (
222 "constraint",
223 "exclusive",
224 "expression",
225 "len_value",
226 "max_ex_value",
227 "max_len_value",
228 "max_value",
229 "min_ex_value",
230 "min_len_value",
231 "min_value",
232 "one_of",
233 "regexp",
234 )
235 fn_builtins = (
236 "abs",
237 "advisory_lock",
238 "advisory_unlock",
239 "advisory_unlock_all",
240 "all",
241 "any",
242 "array_agg",
243 "array_get",
244 "array_join",
245 "array_unpack",
246 "bytes_get_bit",
247 "ceil",
248 "contains",
249 "count",
250 "date_get",
251 "datetime_current",
252 "datetime_get",
253 "datetime_of_statement",
254 "datetime_of_transaction",
255 "datetime_truncate",
256 "duration_to_seconds",
257 "duration_truncate",
258 "enumerate",
259 "find",
260 "floor",
261 "get_current_database",
262 "get_transaction_isolation",
263 "get_version",
264 "get_version_as_str",
265 "json_array_unpack",
266 "json_get",
267 "json_object_unpack",
268 "json_typeof",
269 "len",
270 "lg",
271 "ln",
272 "log",
273 "max",
274 "mean",
275 "min",
276 "random",
277 "re_match",
278 "re_match_all",
279 "re_replace",
280 "re_test",
281 "round",
282 "sleep",
283 "stddev",
284 "stddev_pop",
285 "str_lower",
286 "str_lpad",
287 "str_ltrim",
288 "str_repeat",
289 "str_rpad",
290 "str_rtrim",
291 "str_split",
292 "str_title",
293 "str_trim",
294 "str_upper",
295 "sum",
296 "time_get",
297 "to_bigint",
298 "to_datetime",
299 "to_decimal",
300 "to_duration",
301 "to_float32",
302 "to_float64",
303 "to_int16",
304 "to_int32",
305 "to_int64",
306 "to_json",
307 "to_local_date",
308 "to_local_datetime",
309 "to_local_time",
310 "to_str",
311 "uuid_generate_v1mc",
312 "var",
313 "var_pop",
314 )
315 operators = (
316 "!=",
317 "%",
318 "*",
319 "+",
320 "++",
321 "-",
322 "/",
323 "//",
324 ":=",
325 "<",
326 "<=",
327 "=",
328 ">",
329 ">=",
330 "?!=",
331 "?=",
332 "??",
333 "^",
334 )
335 navigation = (
336 ".<",
337 ".>",
338 "@",
339 ".",
340 )
341
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/edb/edgeql/pygments/meta.py b/edb/edgeql/pygments/meta.py
--- a/edb/edgeql/pygments/meta.py
+++ b/edb/edgeql/pygments/meta.py
@@ -106,6 +106,7 @@
"cardinality",
"cast",
"config",
+ "conflict",
"constraint",
"current",
"database",
@@ -139,13 +140,17 @@
"oids",
"on",
"only",
+ "onto",
"operator",
"overloaded",
+ "owned",
"postfix",
"prefix",
"property",
+ "proposed",
"pseudo",
"read",
+ "reject",
"rename",
"repeatable",
"required",
@@ -169,6 +174,7 @@
"to",
"transaction",
"type",
+ "unless",
"using",
"verbose",
"view",
@@ -285,12 +291,16 @@
"str_lower",
"str_lpad",
"str_ltrim",
+ "str_pad_end",
+ "str_pad_start",
"str_repeat",
"str_rpad",
"str_rtrim",
"str_split",
"str_title",
"str_trim",
+ "str_trim_end",
+ "str_trim_start",
"str_upper",
"sum",
"time_get",
| {"golden_diff": "diff --git a/edb/edgeql/pygments/meta.py b/edb/edgeql/pygments/meta.py\n--- a/edb/edgeql/pygments/meta.py\n+++ b/edb/edgeql/pygments/meta.py\n@@ -106,6 +106,7 @@\n \"cardinality\",\n \"cast\",\n \"config\",\n+ \"conflict\",\n \"constraint\",\n \"current\",\n \"database\",\n@@ -139,13 +140,17 @@\n \"oids\",\n \"on\",\n \"only\",\n+ \"onto\",\n \"operator\",\n \"overloaded\",\n+ \"owned\",\n \"postfix\",\n \"prefix\",\n \"property\",\n+ \"proposed\",\n \"pseudo\",\n \"read\",\n+ \"reject\",\n \"rename\",\n \"repeatable\",\n \"required\",\n@@ -169,6 +174,7 @@\n \"to\",\n \"transaction\",\n \"type\",\n+ \"unless\",\n \"using\",\n \"verbose\",\n \"view\",\n@@ -285,12 +291,16 @@\n \"str_lower\",\n \"str_lpad\",\n \"str_ltrim\",\n+ \"str_pad_end\",\n+ \"str_pad_start\",\n \"str_repeat\",\n \"str_rpad\",\n \"str_rtrim\",\n \"str_split\",\n \"str_title\",\n \"str_trim\",\n+ \"str_trim_end\",\n+ \"str_trim_start\",\n \"str_upper\",\n \"sum\",\n \"time_get\",\n", "issue": "Rename some string handling functions and deprecate the old versions.\nFrom discussion in Slack a point was raised:\r\n> Just noticed something now of an engineering nature though:\r\n> https://edgedb.com/docs/edgeql/funcops/string#function::std::str_lpad\r\n> This reminded me of similar methods in Rust that were eventually deprecated in favour of calling them \"start\" and \"end\" instead of \"right\" and \"left\" due to languages that write right to left (and even up to down):\r\n> https://doc.rust-lang.org/std/primitive.str.html#method.trim_left\r\n> The new methods did the exact same thing but were just given different names, while the old method names were left alone to accommodate legacy code.\r\n\r\nThis means that we want to rename string handling functions to use \"start\" and \"end\" such as `std::str_trim_start`, etc. The old functions should remain for some time, but they need to be marked as deprecated. To this end we also introduce a `deprecated` annotation which can mark deprecated items as well as provide a useful note like what should be used instead.\r\n\r\nThe documentation needs to include deprecated functions, but they should also be clearly labeled as such.\n", "before_files": [{"content": "# AUTOGENERATED BY EdgeDB WITH\n# $ edb gen-meta-grammars edgeql\n\n\nfrom __future__ import annotations\n\n\nclass EdgeQL:\n reserved_keywords = (\n \"__source__\",\n \"__std__\",\n \"__subject__\",\n \"__type__\",\n \"abort\",\n \"alter\",\n \"analyze\",\n \"and\",\n \"anyarray\",\n \"anytuple\",\n \"anytype\",\n \"begin\",\n \"case\",\n \"check\",\n \"commit\",\n \"configure\",\n \"create\",\n \"deallocate\",\n \"declare\",\n \"delete\",\n \"describe\",\n \"detached\",\n \"discard\",\n \"distinct\",\n \"do\",\n \"drop\",\n \"else\",\n \"empty\",\n \"end\",\n \"execute\",\n \"exists\",\n \"explain\",\n \"extending\",\n \"fetch\",\n \"filter\",\n \"for\",\n \"get\",\n \"global\",\n \"grant\",\n \"group\",\n \"if\",\n \"ilike\",\n \"import\",\n \"in\",\n \"insert\",\n \"introspect\",\n \"is\",\n \"like\",\n \"limit\",\n \"listen\",\n \"load\",\n \"lock\",\n \"match\",\n \"module\",\n \"move\",\n \"not\",\n \"notify\",\n \"offset\",\n \"optional\",\n \"or\",\n \"order\",\n \"over\",\n \"partition\",\n \"policy\",\n \"populate\",\n \"prepare\",\n \"raise\",\n \"refresh\",\n \"reindex\",\n \"release\",\n \"reset\",\n \"revoke\",\n \"rollback\",\n \"select\",\n \"set\",\n \"start\",\n \"typeof\",\n \"union\",\n \"update\",\n \"variadic\",\n \"when\",\n \"window\",\n \"with\",\n )\n unreserved_keywords = (\n \"abstract\",\n \"after\",\n \"alias\",\n \"all\",\n \"allow\",\n \"annotation\",\n \"as\",\n \"asc\",\n \"assignment\",\n \"before\",\n \"by\",\n \"cardinality\",\n \"cast\",\n \"config\",\n \"constraint\",\n \"current\",\n \"database\",\n \"ddl\",\n \"default\",\n \"deferrable\",\n \"deferred\",\n \"delegated\",\n \"desc\",\n \"emit\",\n \"explicit\",\n \"expression\",\n \"final\",\n \"first\",\n \"from\",\n \"function\",\n \"implicit\",\n \"index\",\n \"infix\",\n \"inheritable\",\n \"into\",\n \"isolation\",\n \"json\",\n \"last\",\n \"link\",\n \"migration\",\n \"multi\",\n \"named\",\n \"object\",\n \"of\",\n \"oids\",\n \"on\",\n \"only\",\n \"operator\",\n \"overloaded\",\n \"postfix\",\n \"prefix\",\n \"property\",\n \"pseudo\",\n \"read\",\n \"rename\",\n \"repeatable\",\n \"required\",\n \"restrict\",\n \"role\",\n \"roles\",\n \"savepoint\",\n \"scalar\",\n \"schema\",\n \"sdl\",\n \"serializable\",\n \"session\",\n \"single\",\n \"source\",\n \"superuser\",\n \"system\",\n \"target\",\n \"ternary\",\n \"text\",\n \"then\",\n \"to\",\n \"transaction\",\n \"type\",\n \"using\",\n \"verbose\",\n \"view\",\n \"write\",\n )\n bool_literals = (\n \"false\",\n \"true\",\n )\n type_builtins = (\n \"BaseObject\",\n \"Object\",\n \"anyenum\",\n \"anyfloat\",\n \"anyint\",\n \"anynumeric\",\n \"anyreal\",\n \"anyscalar\",\n \"array\",\n \"bigint\",\n \"bool\",\n \"bytes\",\n \"datetime\",\n \"decimal\",\n \"duration\",\n \"enum\",\n \"float32\",\n \"float64\",\n \"int16\",\n \"int32\",\n \"int64\",\n \"json\",\n \"local_date\",\n \"local_datetime\",\n \"local_time\",\n \"sequence\",\n \"str\",\n \"tuple\",\n \"uuid\",\n )\n module_builtins = (\n \"cal\",\n \"cfg\",\n \"math\",\n \"schema\",\n \"std\",\n \"stdgraphql\",\n \"sys\",\n )\n constraint_builtins = (\n \"constraint\",\n \"exclusive\",\n \"expression\",\n \"len_value\",\n \"max_ex_value\",\n \"max_len_value\",\n \"max_value\",\n \"min_ex_value\",\n \"min_len_value\",\n \"min_value\",\n \"one_of\",\n \"regexp\",\n )\n fn_builtins = (\n \"abs\",\n \"advisory_lock\",\n \"advisory_unlock\",\n \"advisory_unlock_all\",\n \"all\",\n \"any\",\n \"array_agg\",\n \"array_get\",\n \"array_join\",\n \"array_unpack\",\n \"bytes_get_bit\",\n \"ceil\",\n \"contains\",\n \"count\",\n \"date_get\",\n \"datetime_current\",\n \"datetime_get\",\n \"datetime_of_statement\",\n \"datetime_of_transaction\",\n \"datetime_truncate\",\n \"duration_to_seconds\",\n \"duration_truncate\",\n \"enumerate\",\n \"find\",\n \"floor\",\n \"get_current_database\",\n \"get_transaction_isolation\",\n \"get_version\",\n \"get_version_as_str\",\n \"json_array_unpack\",\n \"json_get\",\n \"json_object_unpack\",\n \"json_typeof\",\n \"len\",\n \"lg\",\n \"ln\",\n \"log\",\n \"max\",\n \"mean\",\n \"min\",\n \"random\",\n \"re_match\",\n \"re_match_all\",\n \"re_replace\",\n \"re_test\",\n \"round\",\n \"sleep\",\n \"stddev\",\n \"stddev_pop\",\n \"str_lower\",\n \"str_lpad\",\n \"str_ltrim\",\n \"str_repeat\",\n \"str_rpad\",\n \"str_rtrim\",\n \"str_split\",\n \"str_title\",\n \"str_trim\",\n \"str_upper\",\n \"sum\",\n \"time_get\",\n \"to_bigint\",\n \"to_datetime\",\n \"to_decimal\",\n \"to_duration\",\n \"to_float32\",\n \"to_float64\",\n \"to_int16\",\n \"to_int32\",\n \"to_int64\",\n \"to_json\",\n \"to_local_date\",\n \"to_local_datetime\",\n \"to_local_time\",\n \"to_str\",\n \"uuid_generate_v1mc\",\n \"var\",\n \"var_pop\",\n )\n operators = (\n \"!=\",\n \"%\",\n \"*\",\n \"+\",\n \"++\",\n \"-\",\n \"/\",\n \"//\",\n \":=\",\n \"<\",\n \"<=\",\n \"=\",\n \">\",\n \">=\",\n \"?!=\",\n \"?=\",\n \"??\",\n \"^\",\n )\n navigation = (\n \".<\",\n \".>\",\n \"@\",\n \".\",\n )\n", "path": "edb/edgeql/pygments/meta.py"}], "after_files": [{"content": "# AUTOGENERATED BY EdgeDB WITH\n# $ edb gen-meta-grammars edgeql\n\n\nfrom __future__ import annotations\n\n\nclass EdgeQL:\n reserved_keywords = (\n \"__source__\",\n \"__std__\",\n \"__subject__\",\n \"__type__\",\n \"abort\",\n \"alter\",\n \"analyze\",\n \"and\",\n \"anyarray\",\n \"anytuple\",\n \"anytype\",\n \"begin\",\n \"case\",\n \"check\",\n \"commit\",\n \"configure\",\n \"create\",\n \"deallocate\",\n \"declare\",\n \"delete\",\n \"describe\",\n \"detached\",\n \"discard\",\n \"distinct\",\n \"do\",\n \"drop\",\n \"else\",\n \"empty\",\n \"end\",\n \"execute\",\n \"exists\",\n \"explain\",\n \"extending\",\n \"fetch\",\n \"filter\",\n \"for\",\n \"get\",\n \"global\",\n \"grant\",\n \"group\",\n \"if\",\n \"ilike\",\n \"import\",\n \"in\",\n \"insert\",\n \"introspect\",\n \"is\",\n \"like\",\n \"limit\",\n \"listen\",\n \"load\",\n \"lock\",\n \"match\",\n \"module\",\n \"move\",\n \"not\",\n \"notify\",\n \"offset\",\n \"optional\",\n \"or\",\n \"order\",\n \"over\",\n \"partition\",\n \"policy\",\n \"populate\",\n \"prepare\",\n \"raise\",\n \"refresh\",\n \"reindex\",\n \"release\",\n \"reset\",\n \"revoke\",\n \"rollback\",\n \"select\",\n \"set\",\n \"start\",\n \"typeof\",\n \"union\",\n \"update\",\n \"variadic\",\n \"when\",\n \"window\",\n \"with\",\n )\n unreserved_keywords = (\n \"abstract\",\n \"after\",\n \"alias\",\n \"all\",\n \"allow\",\n \"annotation\",\n \"as\",\n \"asc\",\n \"assignment\",\n \"before\",\n \"by\",\n \"cardinality\",\n \"cast\",\n \"config\",\n \"conflict\",\n \"constraint\",\n \"current\",\n \"database\",\n \"ddl\",\n \"default\",\n \"deferrable\",\n \"deferred\",\n \"delegated\",\n \"desc\",\n \"emit\",\n \"explicit\",\n \"expression\",\n \"final\",\n \"first\",\n \"from\",\n \"function\",\n \"implicit\",\n \"index\",\n \"infix\",\n \"inheritable\",\n \"into\",\n \"isolation\",\n \"json\",\n \"last\",\n \"link\",\n \"migration\",\n \"multi\",\n \"named\",\n \"object\",\n \"of\",\n \"oids\",\n \"on\",\n \"only\",\n \"onto\",\n \"operator\",\n \"overloaded\",\n \"owned\",\n \"postfix\",\n \"prefix\",\n \"property\",\n \"proposed\",\n \"pseudo\",\n \"read\",\n \"reject\",\n \"rename\",\n \"repeatable\",\n \"required\",\n \"restrict\",\n \"role\",\n \"roles\",\n \"savepoint\",\n \"scalar\",\n \"schema\",\n \"sdl\",\n \"serializable\",\n \"session\",\n \"single\",\n \"source\",\n \"superuser\",\n \"system\",\n \"target\",\n \"ternary\",\n \"text\",\n \"then\",\n \"to\",\n \"transaction\",\n \"type\",\n \"unless\",\n \"using\",\n \"verbose\",\n \"view\",\n \"write\",\n )\n bool_literals = (\n \"false\",\n \"true\",\n )\n type_builtins = (\n \"BaseObject\",\n \"Object\",\n \"anyenum\",\n \"anyfloat\",\n \"anyint\",\n \"anynumeric\",\n \"anyreal\",\n \"anyscalar\",\n \"array\",\n \"bigint\",\n \"bool\",\n \"bytes\",\n \"datetime\",\n \"decimal\",\n \"duration\",\n \"enum\",\n \"float32\",\n \"float64\",\n \"int16\",\n \"int32\",\n \"int64\",\n \"json\",\n \"local_date\",\n \"local_datetime\",\n \"local_time\",\n \"sequence\",\n \"str\",\n \"tuple\",\n \"uuid\",\n )\n module_builtins = (\n \"cal\",\n \"cfg\",\n \"math\",\n \"schema\",\n \"std\",\n \"stdgraphql\",\n \"sys\",\n )\n constraint_builtins = (\n \"constraint\",\n \"exclusive\",\n \"expression\",\n \"len_value\",\n \"max_ex_value\",\n \"max_len_value\",\n \"max_value\",\n \"min_ex_value\",\n \"min_len_value\",\n \"min_value\",\n \"one_of\",\n \"regexp\",\n )\n fn_builtins = (\n \"abs\",\n \"advisory_lock\",\n \"advisory_unlock\",\n \"advisory_unlock_all\",\n \"all\",\n \"any\",\n \"array_agg\",\n \"array_get\",\n \"array_join\",\n \"array_unpack\",\n \"bytes_get_bit\",\n \"ceil\",\n \"contains\",\n \"count\",\n \"date_get\",\n \"datetime_current\",\n \"datetime_get\",\n \"datetime_of_statement\",\n \"datetime_of_transaction\",\n \"datetime_truncate\",\n \"duration_to_seconds\",\n \"duration_truncate\",\n \"enumerate\",\n \"find\",\n \"floor\",\n \"get_current_database\",\n \"get_transaction_isolation\",\n \"get_version\",\n \"get_version_as_str\",\n \"json_array_unpack\",\n \"json_get\",\n \"json_object_unpack\",\n \"json_typeof\",\n \"len\",\n \"lg\",\n \"ln\",\n \"log\",\n \"max\",\n \"mean\",\n \"min\",\n \"random\",\n \"re_match\",\n \"re_match_all\",\n \"re_replace\",\n \"re_test\",\n \"round\",\n \"sleep\",\n \"stddev\",\n \"stddev_pop\",\n \"str_lower\",\n \"str_lpad\",\n \"str_ltrim\",\n \"str_pad_end\",\n \"str_pad_start\",\n \"str_repeat\",\n \"str_rpad\",\n \"str_rtrim\",\n \"str_split\",\n \"str_title\",\n \"str_trim\",\n \"str_trim_end\",\n \"str_trim_start\",\n \"str_upper\",\n \"sum\",\n \"time_get\",\n \"to_bigint\",\n \"to_datetime\",\n \"to_decimal\",\n \"to_duration\",\n \"to_float32\",\n \"to_float64\",\n \"to_int16\",\n \"to_int32\",\n \"to_int64\",\n \"to_json\",\n \"to_local_date\",\n \"to_local_datetime\",\n \"to_local_time\",\n \"to_str\",\n \"uuid_generate_v1mc\",\n \"var\",\n \"var_pop\",\n )\n operators = (\n \"!=\",\n \"%\",\n \"*\",\n \"+\",\n \"++\",\n \"-\",\n \"/\",\n \"//\",\n \":=\",\n \"<\",\n \"<=\",\n \"=\",\n \">\",\n \">=\",\n \"?!=\",\n \"?=\",\n \"??\",\n \"^\",\n )\n navigation = (\n \".<\",\n \".>\",\n \"@\",\n \".\",\n )\n", "path": "edb/edgeql/pygments/meta.py"}]} | 2,944 | 335 |
gh_patches_debug_16886 | rasdani/github-patches | git_diff | urllib3__urllib3-2335 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add decode_content parameter to top-level APIs
Like the title says, add `decode_content` to the top-level APIs `urllib3.request()`.
See https://github.com/urllib3/urllib3/commit/ddf7361ac0467431a2f3df6ba346c9c506c29d56 for an example.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/urllib3/__init__.py`
Content:
```
1 """
2 Python HTTP library with thread-safe connection pooling, file post support, user friendly, and more
3 """
4
5 # Set default logging handler to avoid "No handler found" warnings.
6 import logging
7 import warnings
8 from logging import NullHandler
9 from typing import Mapping, Optional, Type, Union
10
11 from . import exceptions
12 from ._collections import HTTPHeaderDict
13 from ._version import __version__
14 from .connection import _TYPE_BODY
15 from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool, connection_from_url
16 from .filepost import _TYPE_FIELDS, encode_multipart_formdata
17 from .poolmanager import PoolManager, ProxyManager, proxy_from_url
18 from .response import BaseHTTPResponse, HTTPResponse
19 from .util.request import make_headers
20 from .util.retry import Retry
21 from .util.timeout import Timeout
22
23 __author__ = "Andrey Petrov ([email protected])"
24 __license__ = "MIT"
25 __version__ = __version__
26
27 __all__ = (
28 "HTTPConnectionPool",
29 "HTTPHeaderDict",
30 "HTTPSConnectionPool",
31 "PoolManager",
32 "ProxyManager",
33 "HTTPResponse",
34 "Retry",
35 "Timeout",
36 "add_stderr_logger",
37 "connection_from_url",
38 "disable_warnings",
39 "encode_multipart_formdata",
40 "make_headers",
41 "proxy_from_url",
42 "request",
43 )
44
45 logging.getLogger(__name__).addHandler(NullHandler())
46
47
48 def add_stderr_logger(level: int = logging.DEBUG) -> logging.StreamHandler:
49 """
50 Helper for quickly adding a StreamHandler to the logger. Useful for
51 debugging.
52
53 Returns the handler after adding it.
54 """
55 # This method needs to be in this __init__.py to get the __name__ correct
56 # even if urllib3 is vendored within another package.
57 logger = logging.getLogger(__name__)
58 handler = logging.StreamHandler()
59 handler.setFormatter(logging.Formatter("%(asctime)s %(levelname)s %(message)s"))
60 logger.addHandler(handler)
61 logger.setLevel(level)
62 logger.debug("Added a stderr logging handler to logger: %s", __name__)
63 return handler
64
65
66 # ... Clean up.
67 del NullHandler
68
69
70 # All warning filters *must* be appended unless you're really certain that they
71 # shouldn't be: otherwise, it's very hard for users to use most Python
72 # mechanisms to silence them.
73 # SecurityWarning's always go off by default.
74 warnings.simplefilter("always", exceptions.SecurityWarning, append=True)
75 # InsecurePlatformWarning's don't vary between requests, so we keep it default.
76 warnings.simplefilter("default", exceptions.InsecurePlatformWarning, append=True)
77 # SNIMissingWarnings should go off only once.
78 warnings.simplefilter("default", exceptions.SNIMissingWarning, append=True)
79
80
81 def disable_warnings(category: Type[Warning] = exceptions.HTTPWarning) -> None:
82 """
83 Helper for quickly disabling all urllib3 warnings.
84 """
85 warnings.simplefilter("ignore", category)
86
87
88 _DEFAULT_POOL = PoolManager()
89
90
91 def request(
92 method: str,
93 url: str,
94 *,
95 body: Optional[_TYPE_BODY] = None,
96 fields: Optional[_TYPE_FIELDS] = None,
97 headers: Optional[Mapping[str, str]] = None,
98 preload_content: Optional[bool] = True,
99 redirect: Optional[bool] = True,
100 retries: Optional[Union[Retry, bool, int]] = None,
101 timeout: Optional[Union[Timeout, float, int]] = 3,
102 ) -> BaseHTTPResponse:
103 """
104 A convenience, top-level request method. It uses a module-global ``PoolManager`` instance.
105 Therefore, its side effects could be shared across dependencies relying on it.
106 To avoid side effects create a new ``PoolManager`` instance and use it instead.
107 The method does not accept low-level ``**urlopen_kw`` keyword arguments.
108 """
109
110 return _DEFAULT_POOL.request(
111 method,
112 url,
113 body=body,
114 fields=fields,
115 headers=headers,
116 preload_content=preload_content,
117 redirect=redirect,
118 retries=retries,
119 timeout=timeout,
120 )
121
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/urllib3/__init__.py b/src/urllib3/__init__.py
--- a/src/urllib3/__init__.py
+++ b/src/urllib3/__init__.py
@@ -96,6 +96,7 @@
fields: Optional[_TYPE_FIELDS] = None,
headers: Optional[Mapping[str, str]] = None,
preload_content: Optional[bool] = True,
+ decode_content: Optional[bool] = True,
redirect: Optional[bool] = True,
retries: Optional[Union[Retry, bool, int]] = None,
timeout: Optional[Union[Timeout, float, int]] = 3,
@@ -114,6 +115,7 @@
fields=fields,
headers=headers,
preload_content=preload_content,
+ decode_content=decode_content,
redirect=redirect,
retries=retries,
timeout=timeout,
| {"golden_diff": "diff --git a/src/urllib3/__init__.py b/src/urllib3/__init__.py\n--- a/src/urllib3/__init__.py\n+++ b/src/urllib3/__init__.py\n@@ -96,6 +96,7 @@\n fields: Optional[_TYPE_FIELDS] = None,\n headers: Optional[Mapping[str, str]] = None,\n preload_content: Optional[bool] = True,\n+ decode_content: Optional[bool] = True,\n redirect: Optional[bool] = True,\n retries: Optional[Union[Retry, bool, int]] = None,\n timeout: Optional[Union[Timeout, float, int]] = 3,\n@@ -114,6 +115,7 @@\n fields=fields,\n headers=headers,\n preload_content=preload_content,\n+ decode_content=decode_content,\n redirect=redirect,\n retries=retries,\n timeout=timeout,\n", "issue": "Add decode_content parameter to top-level APIs\nLike the title says, add `decode_content` to the top-level APIs `urllib3.request()`.\r\n\r\nSee https://github.com/urllib3/urllib3/commit/ddf7361ac0467431a2f3df6ba346c9c506c29d56 for an example.\n", "before_files": [{"content": "\"\"\"\nPython HTTP library with thread-safe connection pooling, file post support, user friendly, and more\n\"\"\"\n\n# Set default logging handler to avoid \"No handler found\" warnings.\nimport logging\nimport warnings\nfrom logging import NullHandler\nfrom typing import Mapping, Optional, Type, Union\n\nfrom . import exceptions\nfrom ._collections import HTTPHeaderDict\nfrom ._version import __version__\nfrom .connection import _TYPE_BODY\nfrom .connectionpool import HTTPConnectionPool, HTTPSConnectionPool, connection_from_url\nfrom .filepost import _TYPE_FIELDS, encode_multipart_formdata\nfrom .poolmanager import PoolManager, ProxyManager, proxy_from_url\nfrom .response import BaseHTTPResponse, HTTPResponse\nfrom .util.request import make_headers\nfrom .util.retry import Retry\nfrom .util.timeout import Timeout\n\n__author__ = \"Andrey Petrov ([email protected])\"\n__license__ = \"MIT\"\n__version__ = __version__\n\n__all__ = (\n \"HTTPConnectionPool\",\n \"HTTPHeaderDict\",\n \"HTTPSConnectionPool\",\n \"PoolManager\",\n \"ProxyManager\",\n \"HTTPResponse\",\n \"Retry\",\n \"Timeout\",\n \"add_stderr_logger\",\n \"connection_from_url\",\n \"disable_warnings\",\n \"encode_multipart_formdata\",\n \"make_headers\",\n \"proxy_from_url\",\n \"request\",\n)\n\nlogging.getLogger(__name__).addHandler(NullHandler())\n\n\ndef add_stderr_logger(level: int = logging.DEBUG) -> logging.StreamHandler:\n \"\"\"\n Helper for quickly adding a StreamHandler to the logger. Useful for\n debugging.\n\n Returns the handler after adding it.\n \"\"\"\n # This method needs to be in this __init__.py to get the __name__ correct\n # even if urllib3 is vendored within another package.\n logger = logging.getLogger(__name__)\n handler = logging.StreamHandler()\n handler.setFormatter(logging.Formatter(\"%(asctime)s %(levelname)s %(message)s\"))\n logger.addHandler(handler)\n logger.setLevel(level)\n logger.debug(\"Added a stderr logging handler to logger: %s\", __name__)\n return handler\n\n\n# ... Clean up.\ndel NullHandler\n\n\n# All warning filters *must* be appended unless you're really certain that they\n# shouldn't be: otherwise, it's very hard for users to use most Python\n# mechanisms to silence them.\n# SecurityWarning's always go off by default.\nwarnings.simplefilter(\"always\", exceptions.SecurityWarning, append=True)\n# InsecurePlatformWarning's don't vary between requests, so we keep it default.\nwarnings.simplefilter(\"default\", exceptions.InsecurePlatformWarning, append=True)\n# SNIMissingWarnings should go off only once.\nwarnings.simplefilter(\"default\", exceptions.SNIMissingWarning, append=True)\n\n\ndef disable_warnings(category: Type[Warning] = exceptions.HTTPWarning) -> None:\n \"\"\"\n Helper for quickly disabling all urllib3 warnings.\n \"\"\"\n warnings.simplefilter(\"ignore\", category)\n\n\n_DEFAULT_POOL = PoolManager()\n\n\ndef request(\n method: str,\n url: str,\n *,\n body: Optional[_TYPE_BODY] = None,\n fields: Optional[_TYPE_FIELDS] = None,\n headers: Optional[Mapping[str, str]] = None,\n preload_content: Optional[bool] = True,\n redirect: Optional[bool] = True,\n retries: Optional[Union[Retry, bool, int]] = None,\n timeout: Optional[Union[Timeout, float, int]] = 3,\n) -> BaseHTTPResponse:\n \"\"\"\n A convenience, top-level request method. It uses a module-global ``PoolManager`` instance.\n Therefore, its side effects could be shared across dependencies relying on it.\n To avoid side effects create a new ``PoolManager`` instance and use it instead.\n The method does not accept low-level ``**urlopen_kw`` keyword arguments.\n \"\"\"\n\n return _DEFAULT_POOL.request(\n method,\n url,\n body=body,\n fields=fields,\n headers=headers,\n preload_content=preload_content,\n redirect=redirect,\n retries=retries,\n timeout=timeout,\n )\n", "path": "src/urllib3/__init__.py"}], "after_files": [{"content": "\"\"\"\nPython HTTP library with thread-safe connection pooling, file post support, user friendly, and more\n\"\"\"\n\n# Set default logging handler to avoid \"No handler found\" warnings.\nimport logging\nimport warnings\nfrom logging import NullHandler\nfrom typing import Mapping, Optional, Type, Union\n\nfrom . import exceptions\nfrom ._collections import HTTPHeaderDict\nfrom ._version import __version__\nfrom .connection import _TYPE_BODY\nfrom .connectionpool import HTTPConnectionPool, HTTPSConnectionPool, connection_from_url\nfrom .filepost import _TYPE_FIELDS, encode_multipart_formdata\nfrom .poolmanager import PoolManager, ProxyManager, proxy_from_url\nfrom .response import BaseHTTPResponse, HTTPResponse\nfrom .util.request import make_headers\nfrom .util.retry import Retry\nfrom .util.timeout import Timeout\n\n__author__ = \"Andrey Petrov ([email protected])\"\n__license__ = \"MIT\"\n__version__ = __version__\n\n__all__ = (\n \"HTTPConnectionPool\",\n \"HTTPHeaderDict\",\n \"HTTPSConnectionPool\",\n \"PoolManager\",\n \"ProxyManager\",\n \"HTTPResponse\",\n \"Retry\",\n \"Timeout\",\n \"add_stderr_logger\",\n \"connection_from_url\",\n \"disable_warnings\",\n \"encode_multipart_formdata\",\n \"make_headers\",\n \"proxy_from_url\",\n \"request\",\n)\n\nlogging.getLogger(__name__).addHandler(NullHandler())\n\n\ndef add_stderr_logger(level: int = logging.DEBUG) -> logging.StreamHandler:\n \"\"\"\n Helper for quickly adding a StreamHandler to the logger. Useful for\n debugging.\n\n Returns the handler after adding it.\n \"\"\"\n # This method needs to be in this __init__.py to get the __name__ correct\n # even if urllib3 is vendored within another package.\n logger = logging.getLogger(__name__)\n handler = logging.StreamHandler()\n handler.setFormatter(logging.Formatter(\"%(asctime)s %(levelname)s %(message)s\"))\n logger.addHandler(handler)\n logger.setLevel(level)\n logger.debug(\"Added a stderr logging handler to logger: %s\", __name__)\n return handler\n\n\n# ... Clean up.\ndel NullHandler\n\n\n# All warning filters *must* be appended unless you're really certain that they\n# shouldn't be: otherwise, it's very hard for users to use most Python\n# mechanisms to silence them.\n# SecurityWarning's always go off by default.\nwarnings.simplefilter(\"always\", exceptions.SecurityWarning, append=True)\n# InsecurePlatformWarning's don't vary between requests, so we keep it default.\nwarnings.simplefilter(\"default\", exceptions.InsecurePlatformWarning, append=True)\n# SNIMissingWarnings should go off only once.\nwarnings.simplefilter(\"default\", exceptions.SNIMissingWarning, append=True)\n\n\ndef disable_warnings(category: Type[Warning] = exceptions.HTTPWarning) -> None:\n \"\"\"\n Helper for quickly disabling all urllib3 warnings.\n \"\"\"\n warnings.simplefilter(\"ignore\", category)\n\n\n_DEFAULT_POOL = PoolManager()\n\n\ndef request(\n method: str,\n url: str,\n *,\n body: Optional[_TYPE_BODY] = None,\n fields: Optional[_TYPE_FIELDS] = None,\n headers: Optional[Mapping[str, str]] = None,\n preload_content: Optional[bool] = True,\n decode_content: Optional[bool] = True,\n redirect: Optional[bool] = True,\n retries: Optional[Union[Retry, bool, int]] = None,\n timeout: Optional[Union[Timeout, float, int]] = 3,\n) -> BaseHTTPResponse:\n \"\"\"\n A convenience, top-level request method. It uses a module-global ``PoolManager`` instance.\n Therefore, its side effects could be shared across dependencies relying on it.\n To avoid side effects create a new ``PoolManager`` instance and use it instead.\n The method does not accept low-level ``**urlopen_kw`` keyword arguments.\n \"\"\"\n\n return _DEFAULT_POOL.request(\n method,\n url,\n body=body,\n fields=fields,\n headers=headers,\n preload_content=preload_content,\n decode_content=decode_content,\n redirect=redirect,\n retries=retries,\n timeout=timeout,\n )\n", "path": "src/urllib3/__init__.py"}]} | 1,480 | 203 |
gh_patches_debug_24800 | rasdani/github-patches | git_diff | cookiecutter__cookiecutter-1493 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Need help with generating GUID/UUID values for context variables
* Cookiecutter version: 1.6
* Template project url: none
* Python version: 3.7 (virtual env created using win python 3.7 x64)
* Operating System: Windows 10, 64 Bit
### Description:
First off many thanks for creating this project !
Here is some context of what I am trying to do and where I need some guidance
* I am trying to use CC to generate new a Visual Studio 2017 solution and project files with a particular folder/file organization that I like
* I was able to most of it working but for the below:
* Parts of the above project, solution files involves generating several unique GUIDs
* my first approach was creating a `pre_gen_project.py` inside the `hooks` folder and update/create new variables that could be added to the ones loaded from `cookiecutter.json` or entered by the user
* I was however blocked as I could not figure out how to access the context being used by CC and the jinja2 engine
* I proceeded to go over the many issues on github and found some related ones like the following: #60, #102, #180, #288 but no clear answer on how to achieve what I'd like
* I also followed some others issues that suggested creating custom jinja2 extension/filter (#944) but I couldnt figure out how or where to put them in the template folder so the cookiecutter.exe can identify them and pick them up
* Lastly, I also tried going over the CC source code and tried to create a new executable from my script (similar to `cli.py`) that passes the guids via the `extra_context` to `cookiecutter.main(...)` but ran into some other problems that I am still trying to figure out
Appreciate any pointers on how I can inject GUID values for the context variables
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cookiecutter/extensions.py`
Content:
```
1 """Jinja2 extensions."""
2 import json
3 import string
4 from secrets import choice
5
6 from jinja2.ext import Extension
7 from slugify import slugify as pyslugify
8
9
10 class JsonifyExtension(Extension):
11 """Jinja2 extension to convert a Python object to JSON."""
12
13 def __init__(self, environment):
14 """Initialize the extension with the given environment."""
15 super(JsonifyExtension, self).__init__(environment)
16
17 def jsonify(obj):
18 return json.dumps(obj, sort_keys=True, indent=4)
19
20 environment.filters['jsonify'] = jsonify
21
22
23 class RandomStringExtension(Extension):
24 """Jinja2 extension to create a random string."""
25
26 def __init__(self, environment):
27 """Jinja2 Extension Constructor."""
28 super(RandomStringExtension, self).__init__(environment)
29
30 def random_ascii_string(length, punctuation=False):
31 if punctuation:
32 corpus = "".join((string.ascii_letters, string.punctuation))
33 else:
34 corpus = string.ascii_letters
35 return "".join(choice(corpus) for _ in range(length))
36
37 environment.globals.update(random_ascii_string=random_ascii_string)
38
39
40 class SlugifyExtension(Extension):
41 """Jinja2 Extension to slugify string."""
42
43 def __init__(self, environment):
44 """Jinja2 Extension constructor."""
45 super(SlugifyExtension, self).__init__(environment)
46
47 def slugify(value, **kwargs):
48 """Slugifies the value."""
49 return pyslugify(value, **kwargs)
50
51 environment.filters['slugify'] = slugify
52
```
Path: `cookiecutter/environment.py`
Content:
```
1 """Jinja2 environment and extensions loading."""
2 from jinja2 import Environment, StrictUndefined
3
4 from cookiecutter.exceptions import UnknownExtension
5
6
7 class ExtensionLoaderMixin(object):
8 """Mixin providing sane loading of extensions specified in a given context.
9
10 The context is being extracted from the keyword arguments before calling
11 the next parent class in line of the child.
12 """
13
14 def __init__(self, **kwargs):
15 """Initialize the Jinja2 Environment object while loading extensions.
16
17 Does the following:
18
19 1. Establishes default_extensions (currently just a Time feature)
20 2. Reads extensions set in the cookiecutter.json _extensions key.
21 3. Attempts to load the extensions. Provides useful error if fails.
22 """
23 context = kwargs.pop('context', {})
24
25 default_extensions = [
26 'cookiecutter.extensions.JsonifyExtension',
27 'cookiecutter.extensions.RandomStringExtension',
28 'cookiecutter.extensions.SlugifyExtension',
29 'jinja2_time.TimeExtension',
30 ]
31 extensions = default_extensions + self._read_extensions(context)
32
33 try:
34 super(ExtensionLoaderMixin, self).__init__(extensions=extensions, **kwargs)
35 except ImportError as err:
36 raise UnknownExtension('Unable to load extension: {}'.format(err))
37
38 def _read_extensions(self, context):
39 """Return list of extensions as str to be passed on to the Jinja2 env.
40
41 If context does not contain the relevant info, return an empty
42 list instead.
43 """
44 try:
45 extensions = context['cookiecutter']['_extensions']
46 except KeyError:
47 return []
48 else:
49 return [str(ext) for ext in extensions]
50
51
52 class StrictEnvironment(ExtensionLoaderMixin, Environment):
53 """Create strict Jinja2 environment.
54
55 Jinja2 environment will raise error on undefined variable in template-
56 rendering context.
57 """
58
59 def __init__(self, **kwargs):
60 """Set the standard Cookiecutter StrictEnvironment.
61
62 Also loading extensions defined in cookiecutter.json's _extensions key.
63 """
64 super(StrictEnvironment, self).__init__(undefined=StrictUndefined, **kwargs)
65
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cookiecutter/environment.py b/cookiecutter/environment.py
--- a/cookiecutter/environment.py
+++ b/cookiecutter/environment.py
@@ -26,6 +26,7 @@
'cookiecutter.extensions.JsonifyExtension',
'cookiecutter.extensions.RandomStringExtension',
'cookiecutter.extensions.SlugifyExtension',
+ 'cookiecutter.extensions.UUIDExtension',
'jinja2_time.TimeExtension',
]
extensions = default_extensions + self._read_extensions(context)
diff --git a/cookiecutter/extensions.py b/cookiecutter/extensions.py
--- a/cookiecutter/extensions.py
+++ b/cookiecutter/extensions.py
@@ -1,6 +1,7 @@
"""Jinja2 extensions."""
import json
import string
+import uuid
from secrets import choice
from jinja2.ext import Extension
@@ -49,3 +50,17 @@
return pyslugify(value, **kwargs)
environment.filters['slugify'] = slugify
+
+
+class UUIDExtension(Extension):
+ """Jinja2 Extension to generate uuid4 string."""
+
+ def __init__(self, environment):
+ """Jinja2 Extension constructor."""
+ super(UUIDExtension, self).__init__(environment)
+
+ def uuid4():
+ """Generate UUID4."""
+ return str(uuid.uuid4())
+
+ environment.globals.update(uuid4=uuid4)
| {"golden_diff": "diff --git a/cookiecutter/environment.py b/cookiecutter/environment.py\n--- a/cookiecutter/environment.py\n+++ b/cookiecutter/environment.py\n@@ -26,6 +26,7 @@\n 'cookiecutter.extensions.JsonifyExtension',\n 'cookiecutter.extensions.RandomStringExtension',\n 'cookiecutter.extensions.SlugifyExtension',\n+ 'cookiecutter.extensions.UUIDExtension',\n 'jinja2_time.TimeExtension',\n ]\n extensions = default_extensions + self._read_extensions(context)\ndiff --git a/cookiecutter/extensions.py b/cookiecutter/extensions.py\n--- a/cookiecutter/extensions.py\n+++ b/cookiecutter/extensions.py\n@@ -1,6 +1,7 @@\n \"\"\"Jinja2 extensions.\"\"\"\n import json\n import string\n+import uuid\n from secrets import choice\n \n from jinja2.ext import Extension\n@@ -49,3 +50,17 @@\n return pyslugify(value, **kwargs)\n \n environment.filters['slugify'] = slugify\n+\n+\n+class UUIDExtension(Extension):\n+ \"\"\"Jinja2 Extension to generate uuid4 string.\"\"\"\n+\n+ def __init__(self, environment):\n+ \"\"\"Jinja2 Extension constructor.\"\"\"\n+ super(UUIDExtension, self).__init__(environment)\n+\n+ def uuid4():\n+ \"\"\"Generate UUID4.\"\"\"\n+ return str(uuid.uuid4())\n+\n+ environment.globals.update(uuid4=uuid4)\n", "issue": "Need help with generating GUID/UUID values for context variables\n* Cookiecutter version: 1.6\r\n* Template project url: none\r\n* Python version: 3.7 (virtual env created using win python 3.7 x64)\r\n* Operating System: Windows 10, 64 Bit\r\n\r\n### Description:\r\n\r\nFirst off many thanks for creating this project !\r\nHere is some context of what I am trying to do and where I need some guidance\r\n* I am trying to use CC to generate new a Visual Studio 2017 solution and project files with a particular folder/file organization that I like\r\n* I was able to most of it working but for the below:\r\n* Parts of the above project, solution files involves generating several unique GUIDs\r\n* my first approach was creating a `pre_gen_project.py` inside the `hooks` folder and update/create new variables that could be added to the ones loaded from `cookiecutter.json` or entered by the user\r\n* I was however blocked as I could not figure out how to access the context being used by CC and the jinja2 engine \r\n* I proceeded to go over the many issues on github and found some related ones like the following: #60, #102, #180, #288 but no clear answer on how to achieve what I'd like\r\n* I also followed some others issues that suggested creating custom jinja2 extension/filter (#944) but I couldnt figure out how or where to put them in the template folder so the cookiecutter.exe can identify them and pick them up\r\n* Lastly, I also tried going over the CC source code and tried to create a new executable from my script (similar to `cli.py`) that passes the guids via the `extra_context` to `cookiecutter.main(...)` but ran into some other problems that I am still trying to figure out\r\n\r\nAppreciate any pointers on how I can inject GUID values for the context variables\n", "before_files": [{"content": "\"\"\"Jinja2 extensions.\"\"\"\nimport json\nimport string\nfrom secrets import choice\n\nfrom jinja2.ext import Extension\nfrom slugify import slugify as pyslugify\n\n\nclass JsonifyExtension(Extension):\n \"\"\"Jinja2 extension to convert a Python object to JSON.\"\"\"\n\n def __init__(self, environment):\n \"\"\"Initialize the extension with the given environment.\"\"\"\n super(JsonifyExtension, self).__init__(environment)\n\n def jsonify(obj):\n return json.dumps(obj, sort_keys=True, indent=4)\n\n environment.filters['jsonify'] = jsonify\n\n\nclass RandomStringExtension(Extension):\n \"\"\"Jinja2 extension to create a random string.\"\"\"\n\n def __init__(self, environment):\n \"\"\"Jinja2 Extension Constructor.\"\"\"\n super(RandomStringExtension, self).__init__(environment)\n\n def random_ascii_string(length, punctuation=False):\n if punctuation:\n corpus = \"\".join((string.ascii_letters, string.punctuation))\n else:\n corpus = string.ascii_letters\n return \"\".join(choice(corpus) for _ in range(length))\n\n environment.globals.update(random_ascii_string=random_ascii_string)\n\n\nclass SlugifyExtension(Extension):\n \"\"\"Jinja2 Extension to slugify string.\"\"\"\n\n def __init__(self, environment):\n \"\"\"Jinja2 Extension constructor.\"\"\"\n super(SlugifyExtension, self).__init__(environment)\n\n def slugify(value, **kwargs):\n \"\"\"Slugifies the value.\"\"\"\n return pyslugify(value, **kwargs)\n\n environment.filters['slugify'] = slugify\n", "path": "cookiecutter/extensions.py"}, {"content": "\"\"\"Jinja2 environment and extensions loading.\"\"\"\nfrom jinja2 import Environment, StrictUndefined\n\nfrom cookiecutter.exceptions import UnknownExtension\n\n\nclass ExtensionLoaderMixin(object):\n \"\"\"Mixin providing sane loading of extensions specified in a given context.\n\n The context is being extracted from the keyword arguments before calling\n the next parent class in line of the child.\n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"Initialize the Jinja2 Environment object while loading extensions.\n\n Does the following:\n\n 1. Establishes default_extensions (currently just a Time feature)\n 2. Reads extensions set in the cookiecutter.json _extensions key.\n 3. Attempts to load the extensions. Provides useful error if fails.\n \"\"\"\n context = kwargs.pop('context', {})\n\n default_extensions = [\n 'cookiecutter.extensions.JsonifyExtension',\n 'cookiecutter.extensions.RandomStringExtension',\n 'cookiecutter.extensions.SlugifyExtension',\n 'jinja2_time.TimeExtension',\n ]\n extensions = default_extensions + self._read_extensions(context)\n\n try:\n super(ExtensionLoaderMixin, self).__init__(extensions=extensions, **kwargs)\n except ImportError as err:\n raise UnknownExtension('Unable to load extension: {}'.format(err))\n\n def _read_extensions(self, context):\n \"\"\"Return list of extensions as str to be passed on to the Jinja2 env.\n\n If context does not contain the relevant info, return an empty\n list instead.\n \"\"\"\n try:\n extensions = context['cookiecutter']['_extensions']\n except KeyError:\n return []\n else:\n return [str(ext) for ext in extensions]\n\n\nclass StrictEnvironment(ExtensionLoaderMixin, Environment):\n \"\"\"Create strict Jinja2 environment.\n\n Jinja2 environment will raise error on undefined variable in template-\n rendering context.\n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"Set the standard Cookiecutter StrictEnvironment.\n\n Also loading extensions defined in cookiecutter.json's _extensions key.\n \"\"\"\n super(StrictEnvironment, self).__init__(undefined=StrictUndefined, **kwargs)\n", "path": "cookiecutter/environment.py"}], "after_files": [{"content": "\"\"\"Jinja2 extensions.\"\"\"\nimport json\nimport string\nimport uuid\nfrom secrets import choice\n\nfrom jinja2.ext import Extension\nfrom slugify import slugify as pyslugify\n\n\nclass JsonifyExtension(Extension):\n \"\"\"Jinja2 extension to convert a Python object to JSON.\"\"\"\n\n def __init__(self, environment):\n \"\"\"Initialize the extension with the given environment.\"\"\"\n super(JsonifyExtension, self).__init__(environment)\n\n def jsonify(obj):\n return json.dumps(obj, sort_keys=True, indent=4)\n\n environment.filters['jsonify'] = jsonify\n\n\nclass RandomStringExtension(Extension):\n \"\"\"Jinja2 extension to create a random string.\"\"\"\n\n def __init__(self, environment):\n \"\"\"Jinja2 Extension Constructor.\"\"\"\n super(RandomStringExtension, self).__init__(environment)\n\n def random_ascii_string(length, punctuation=False):\n if punctuation:\n corpus = \"\".join((string.ascii_letters, string.punctuation))\n else:\n corpus = string.ascii_letters\n return \"\".join(choice(corpus) for _ in range(length))\n\n environment.globals.update(random_ascii_string=random_ascii_string)\n\n\nclass SlugifyExtension(Extension):\n \"\"\"Jinja2 Extension to slugify string.\"\"\"\n\n def __init__(self, environment):\n \"\"\"Jinja2 Extension constructor.\"\"\"\n super(SlugifyExtension, self).__init__(environment)\n\n def slugify(value, **kwargs):\n \"\"\"Slugifies the value.\"\"\"\n return pyslugify(value, **kwargs)\n\n environment.filters['slugify'] = slugify\n\n\nclass UUIDExtension(Extension):\n \"\"\"Jinja2 Extension to generate uuid4 string.\"\"\"\n\n def __init__(self, environment):\n \"\"\"Jinja2 Extension constructor.\"\"\"\n super(UUIDExtension, self).__init__(environment)\n\n def uuid4():\n \"\"\"Generate UUID4.\"\"\"\n return str(uuid.uuid4())\n\n environment.globals.update(uuid4=uuid4)\n", "path": "cookiecutter/extensions.py"}, {"content": "\"\"\"Jinja2 environment and extensions loading.\"\"\"\nfrom jinja2 import Environment, StrictUndefined\n\nfrom cookiecutter.exceptions import UnknownExtension\n\n\nclass ExtensionLoaderMixin(object):\n \"\"\"Mixin providing sane loading of extensions specified in a given context.\n\n The context is being extracted from the keyword arguments before calling\n the next parent class in line of the child.\n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"Initialize the Jinja2 Environment object while loading extensions.\n\n Does the following:\n\n 1. Establishes default_extensions (currently just a Time feature)\n 2. Reads extensions set in the cookiecutter.json _extensions key.\n 3. Attempts to load the extensions. Provides useful error if fails.\n \"\"\"\n context = kwargs.pop('context', {})\n\n default_extensions = [\n 'cookiecutter.extensions.JsonifyExtension',\n 'cookiecutter.extensions.RandomStringExtension',\n 'cookiecutter.extensions.SlugifyExtension',\n 'cookiecutter.extensions.UUIDExtension',\n 'jinja2_time.TimeExtension',\n ]\n extensions = default_extensions + self._read_extensions(context)\n\n try:\n super(ExtensionLoaderMixin, self).__init__(extensions=extensions, **kwargs)\n except ImportError as err:\n raise UnknownExtension('Unable to load extension: {}'.format(err))\n\n def _read_extensions(self, context):\n \"\"\"Return list of extensions as str to be passed on to the Jinja2 env.\n\n If context does not contain the relevant info, return an empty\n list instead.\n \"\"\"\n try:\n extensions = context['cookiecutter']['_extensions']\n except KeyError:\n return []\n else:\n return [str(ext) for ext in extensions]\n\n\nclass StrictEnvironment(ExtensionLoaderMixin, Environment):\n \"\"\"Create strict Jinja2 environment.\n\n Jinja2 environment will raise error on undefined variable in template-\n rendering context.\n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"Set the standard Cookiecutter StrictEnvironment.\n\n Also loading extensions defined in cookiecutter.json's _extensions key.\n \"\"\"\n super(StrictEnvironment, self).__init__(undefined=StrictUndefined, **kwargs)\n", "path": "cookiecutter/environment.py"}]} | 1,691 | 311 |
gh_patches_debug_8560 | rasdani/github-patches | git_diff | uccser__cs-unplugged-197 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove topic query from homepage
When the homepage is loaded, a database query is performed. This is currently not needed and should be removed.
Remove topic query from homepage
When the homepage is loaded, a database query is performed. This is currently not needed and should be removed.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `csunplugged/general/views.py`
Content:
```
1 from django.views.generic import TemplateView
2
3
4 class GeneralIndexView(TemplateView):
5 template_name = 'general/index.html'
6
7 def get_context_data(self, **kwargs):
8 # TODO: Investigate if importing model from another
9 # app is sensible/best approach.
10 from topics.models import Topic
11 context = super(GeneralIndexView, self).get_context_data(**kwargs)
12 context['total_topics'] = Topic.objects.count()
13 return context
14
15
16 class GeneralAboutView(TemplateView):
17 template_name = 'general/about.html'
18
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/csunplugged/general/views.py b/csunplugged/general/views.py
--- a/csunplugged/general/views.py
+++ b/csunplugged/general/views.py
@@ -4,14 +4,6 @@
class GeneralIndexView(TemplateView):
template_name = 'general/index.html'
- def get_context_data(self, **kwargs):
- # TODO: Investigate if importing model from another
- # app is sensible/best approach.
- from topics.models import Topic
- context = super(GeneralIndexView, self).get_context_data(**kwargs)
- context['total_topics'] = Topic.objects.count()
- return context
-
class GeneralAboutView(TemplateView):
template_name = 'general/about.html'
| {"golden_diff": "diff --git a/csunplugged/general/views.py b/csunplugged/general/views.py\n--- a/csunplugged/general/views.py\n+++ b/csunplugged/general/views.py\n@@ -4,14 +4,6 @@\n class GeneralIndexView(TemplateView):\n template_name = 'general/index.html'\n \n- def get_context_data(self, **kwargs):\n- # TODO: Investigate if importing model from another\n- # app is sensible/best approach.\n- from topics.models import Topic\n- context = super(GeneralIndexView, self).get_context_data(**kwargs)\n- context['total_topics'] = Topic.objects.count()\n- return context\n-\n \n class GeneralAboutView(TemplateView):\n template_name = 'general/about.html'\n", "issue": "Remove topic query from homepage\nWhen the homepage is loaded, a database query is performed. This is currently not needed and should be removed.\nRemove topic query from homepage\nWhen the homepage is loaded, a database query is performed. This is currently not needed and should be removed.\n", "before_files": [{"content": "from django.views.generic import TemplateView\n\n\nclass GeneralIndexView(TemplateView):\n template_name = 'general/index.html'\n\n def get_context_data(self, **kwargs):\n # TODO: Investigate if importing model from another\n # app is sensible/best approach.\n from topics.models import Topic\n context = super(GeneralIndexView, self).get_context_data(**kwargs)\n context['total_topics'] = Topic.objects.count()\n return context\n\n\nclass GeneralAboutView(TemplateView):\n template_name = 'general/about.html'\n", "path": "csunplugged/general/views.py"}], "after_files": [{"content": "from django.views.generic import TemplateView\n\n\nclass GeneralIndexView(TemplateView):\n template_name = 'general/index.html'\n\n\nclass GeneralAboutView(TemplateView):\n template_name = 'general/about.html'\n", "path": "csunplugged/general/views.py"}]} | 461 | 165 |
gh_patches_debug_17381 | rasdani/github-patches | git_diff | pypi__warehouse-6368 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
API tokens: Remove @token and pypi: cases
At some point in the future (perhaps at the end of the API token beta?), support for `@token` as a token username and `pypi:` as a token prefix should end in favor of `__token__` and `pypi-`, respectively.
See #6287, #6342.
cc @brainwane @di @ewdurbin @dstufft
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `warehouse/macaroons/services.py`
Content:
```
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13 import datetime
14 import json
15 import uuid
16
17 import pymacaroons
18
19 from pymacaroons.exceptions import MacaroonDeserializationException
20 from sqlalchemy.orm import joinedload
21 from sqlalchemy.orm.exc import NoResultFound
22 from zope.interface import implementer
23
24 from warehouse.accounts.models import User
25 from warehouse.macaroons.caveats import InvalidMacaroon, Verifier
26 from warehouse.macaroons.interfaces import IMacaroonService
27 from warehouse.macaroons.models import Macaroon
28
29
30 @implementer(IMacaroonService)
31 class DatabaseMacaroonService:
32 def __init__(self, db_session):
33 self.db = db_session
34
35 def _extract_raw_macaroon(self, prefixed_macaroon):
36 """
37 Returns the base64-encoded macaroon component of a PyPI macaroon,
38 dropping the prefix.
39
40 Returns None if the macaroon is None, has no prefix, or has the
41 wrong prefix.
42 """
43 if prefixed_macaroon is None:
44 return None
45
46 prefix, split, raw_macaroon = prefixed_macaroon.partition("-")
47 # TODO: Remove ':' as an acceptable delimiter for tokens (GH-6345)
48 if prefix != "pypi" or not split:
49 prefix, _, raw_macaroon = prefixed_macaroon.partition(":")
50
51 if prefix != "pypi":
52 return None
53
54 return raw_macaroon
55
56 def find_macaroon(self, macaroon_id):
57 """
58 Returns a macaroon model from the DB by its identifier.
59 Returns None if no macaroon has the given ID.
60 """
61 try:
62 dm = (
63 self.db.query(Macaroon)
64 .options(joinedload("user"))
65 .filter(Macaroon.id == uuid.UUID(macaroon_id))
66 .one()
67 )
68 except NoResultFound:
69 return None
70
71 return dm
72
73 def find_userid(self, raw_macaroon):
74 """
75 Returns the id of the user associated with the given raw (serialized)
76 macaroon.
77 """
78 raw_macaroon = self._extract_raw_macaroon(raw_macaroon)
79 if raw_macaroon is None:
80 return None
81
82 try:
83 m = pymacaroons.Macaroon.deserialize(raw_macaroon)
84 except MacaroonDeserializationException:
85 return None
86
87 dm = self.find_macaroon(m.identifier.decode())
88
89 if dm is None:
90 return None
91
92 return dm.user.id
93
94 def verify(self, raw_macaroon, context, principals, permission):
95 """
96 Returns True if the given raw (serialized) macaroon is
97 valid for the context, principals, and requested permission.
98
99 Raises InvalidMacaroon if the macaroon is not valid.
100 """
101 raw_macaroon = self._extract_raw_macaroon(raw_macaroon)
102 if raw_macaroon is None:
103 raise InvalidMacaroon("malformed or nonexistent macaroon")
104
105 try:
106 m = pymacaroons.Macaroon.deserialize(raw_macaroon)
107 except MacaroonDeserializationException:
108 raise InvalidMacaroon("malformed macaroon")
109
110 dm = self.find_macaroon(m.identifier.decode())
111
112 if dm is None:
113 raise InvalidMacaroon("deleted or nonexistent macaroon")
114
115 verifier = Verifier(m, context, principals, permission)
116 if verifier.verify(dm.key):
117 dm.last_used = datetime.datetime.now()
118 return True
119
120 raise InvalidMacaroon("invalid macaroon")
121
122 def create_macaroon(self, location, user_id, description, caveats):
123 """
124 Returns a tuple of a new raw (serialized) macaroon and its DB model.
125 The description provided is not embedded into the macaroon, only stored
126 in the DB model.
127 """
128 user = self.db.query(User).filter(User.id == user_id).one()
129
130 dm = Macaroon(user=user, description=description, caveats=caveats)
131 self.db.add(dm)
132 self.db.flush()
133
134 m = pymacaroons.Macaroon(
135 location=location,
136 identifier=str(dm.id),
137 key=dm.key,
138 version=pymacaroons.MACAROON_V2,
139 )
140 m.add_first_party_caveat(json.dumps(caveats))
141 serialized_macaroon = f"pypi-{m.serialize()}"
142 return serialized_macaroon, dm
143
144 def delete_macaroon(self, macaroon_id):
145 """
146 Deletes a macaroon from the DB by its identifier.
147 """
148 dm = self.find_macaroon(macaroon_id)
149 self.db.delete(dm)
150 self.db.flush()
151
152 def get_macaroon_by_description(self, user_id, description):
153 """
154 Returns a macaroon model from the DB with the given description,
155 if one exists for the given user.
156
157 Returns None if the user doesn't have a macaroon with this description.
158 """
159 try:
160 dm = (
161 self.db.query(Macaroon)
162 .options(joinedload("user"))
163 .filter(Macaroon.description == description)
164 .filter(Macaroon.user_id == user_id)
165 .one()
166 )
167 except NoResultFound:
168 return None
169
170 return dm
171
172
173 def database_macaroon_factory(context, request):
174 return DatabaseMacaroonService(request.db)
175
```
Path: `warehouse/macaroons/auth_policy.py`
Content:
```
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13 import base64
14
15 from pyramid.authentication import CallbackAuthenticationPolicy
16 from pyramid.interfaces import IAuthenticationPolicy, IAuthorizationPolicy
17 from pyramid.security import Denied
18 from pyramid.threadlocal import get_current_request
19 from zope.interface import implementer
20
21 from warehouse.cache.http import add_vary_callback
22 from warehouse.macaroons.interfaces import IMacaroonService
23 from warehouse.macaroons.services import InvalidMacaroon
24
25
26 def _extract_basic_macaroon(auth):
27 """
28 A helper function for extracting a macaroon from a
29 HTTP Basic Authentication-style header.
30
31 Returns None if the header doesn't contain a structurally
32 valid macaroon, or the candidate (not yet verified) macaroon
33 in a serialized form.
34 """
35 try:
36 authorization = base64.b64decode(auth).decode()
37 auth_method, _, auth = authorization.partition(":")
38 except ValueError:
39 return None
40
41 # TODO: Remove @token as an acceptable token username (GH-6345)
42 if auth_method != "@token" and auth_method != "__token__":
43 return None
44
45 return auth
46
47
48 def _extract_http_macaroon(request):
49 """
50 A helper function for the extraction of HTTP Macaroon from a given request.
51 Returns either a None if no macaroon could be found, or the string
52 that represents our serialized macaroon.
53 """
54 authorization = request.headers.get("Authorization")
55 if not authorization:
56 return None
57
58 try:
59 auth_method, auth = authorization.split(" ", 1)
60 except ValueError:
61 return None
62
63 if auth_method.lower() == "basic":
64 return _extract_basic_macaroon(auth)
65 elif auth_method.lower() == "token":
66 return auth
67
68 return None
69
70
71 @implementer(IAuthenticationPolicy)
72 class MacaroonAuthenticationPolicy(CallbackAuthenticationPolicy):
73 def __init__(self, callback=None):
74 self.callback = callback
75
76 def unauthenticated_userid(self, request):
77 # If we're calling into this API on a request, then we want to register
78 # a callback which will ensure that the response varies based on the
79 # Authorization header.
80 request.add_response_callback(add_vary_callback("Authorization"))
81
82 # We need to extract our Macaroon from the request.
83 macaroon = _extract_http_macaroon(request)
84 if macaroon is None:
85 return None
86
87 # Check to see if our Macaroon exists in the database, and if so
88 # fetch the user that is associated with it.
89 macaroon_service = request.find_service(IMacaroonService, context=None)
90 userid = macaroon_service.find_userid(macaroon)
91 if userid is not None:
92 return str(userid)
93
94 def remember(self, request, userid, **kw):
95 # This is a NO-OP because our Macaroon header policy doesn't allow
96 # the ability for authentication to "remember" the user id. This
97 # assumes it has been configured in clients somewhere out of band.
98 return []
99
100 def forget(self, request):
101 # This is a NO-OP because our Macaroon header policy doesn't allow
102 # the ability for authentication to "forget" the user id. This
103 # assumes it has been configured in clients somewhere out of band.
104 return []
105
106
107 @implementer(IAuthorizationPolicy)
108 class MacaroonAuthorizationPolicy:
109 def __init__(self, policy):
110 self.policy = policy
111
112 def permits(self, context, principals, permission):
113 # The Pyramid API doesn't let us access the request here, so we have to pull it
114 # out of the thread local instead.
115 # TODO: Work with Pyramid devs to figure out if there is a better way to support
116 # the worklow we are using here or not.
117 request = get_current_request()
118
119 # Our request could possibly be a None, if there isn't an active request, in
120 # that case we're going to always deny, because without a request, we can't
121 # determine if this request is authorized or not.
122 if request is None:
123 return Denied("There was no active request.")
124
125 # Re-extract our Macaroon from the request, it sucks to have to do this work
126 # twice, but I believe it is inevitable unless we pass the Macaroon back as
127 # a principal-- which doesn't seem to be the right fit for it.
128 macaroon = _extract_http_macaroon(request)
129
130 # This logic will only happen on requests that are being authenticated with
131 # Macaroons. Any other request will just fall back to the standard Authorization
132 # policy.
133 if macaroon is not None:
134 macaroon_service = request.find_service(IMacaroonService, context=None)
135
136 try:
137 macaroon_service.verify(macaroon, context, principals, permission)
138 except InvalidMacaroon as exc:
139 return Denied(f"The supplied token was invalid: {str(exc)!r}")
140
141 # If our Macaroon is verified, then we'll pass this request to our underlying
142 # Authorization policy, so it can handle its own authorization logic on
143 # the prinicpal.
144 return self.policy.permits(context, principals, permission)
145
146 def principals_allowed_by_permission(self, context, permission):
147 # We just dispatch this, because Macaroons don't restrict what principals are
148 # allowed by a particular permission, they just restrict specific requests
149 # to not have that permission.
150 return self.policy.principals_allowed_by_permission(context, permission)
151
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/warehouse/macaroons/auth_policy.py b/warehouse/macaroons/auth_policy.py
--- a/warehouse/macaroons/auth_policy.py
+++ b/warehouse/macaroons/auth_policy.py
@@ -38,8 +38,7 @@
except ValueError:
return None
- # TODO: Remove @token as an acceptable token username (GH-6345)
- if auth_method != "@token" and auth_method != "__token__":
+ if auth_method != "__token__":
return None
return auth
diff --git a/warehouse/macaroons/services.py b/warehouse/macaroons/services.py
--- a/warehouse/macaroons/services.py
+++ b/warehouse/macaroons/services.py
@@ -43,12 +43,8 @@
if prefixed_macaroon is None:
return None
- prefix, split, raw_macaroon = prefixed_macaroon.partition("-")
- # TODO: Remove ':' as an acceptable delimiter for tokens (GH-6345)
- if prefix != "pypi" or not split:
- prefix, _, raw_macaroon = prefixed_macaroon.partition(":")
-
- if prefix != "pypi":
+ prefix, _, raw_macaroon = prefixed_macaroon.partition("-")
+ if prefix != "pypi" or not raw_macaroon:
return None
return raw_macaroon
| {"golden_diff": "diff --git a/warehouse/macaroons/auth_policy.py b/warehouse/macaroons/auth_policy.py\n--- a/warehouse/macaroons/auth_policy.py\n+++ b/warehouse/macaroons/auth_policy.py\n@@ -38,8 +38,7 @@\n except ValueError:\n return None\n \n- # TODO: Remove @token as an acceptable token username (GH-6345)\n- if auth_method != \"@token\" and auth_method != \"__token__\":\n+ if auth_method != \"__token__\":\n return None\n \n return auth\ndiff --git a/warehouse/macaroons/services.py b/warehouse/macaroons/services.py\n--- a/warehouse/macaroons/services.py\n+++ b/warehouse/macaroons/services.py\n@@ -43,12 +43,8 @@\n if prefixed_macaroon is None:\n return None\n \n- prefix, split, raw_macaroon = prefixed_macaroon.partition(\"-\")\n- # TODO: Remove ':' as an acceptable delimiter for tokens (GH-6345)\n- if prefix != \"pypi\" or not split:\n- prefix, _, raw_macaroon = prefixed_macaroon.partition(\":\")\n-\n- if prefix != \"pypi\":\n+ prefix, _, raw_macaroon = prefixed_macaroon.partition(\"-\")\n+ if prefix != \"pypi\" or not raw_macaroon:\n return None\n \n return raw_macaroon\n", "issue": "API tokens: Remove @token and pypi: cases\nAt some point in the future (perhaps at the end of the API token beta?), support for `@token` as a token username and `pypi:` as a token prefix should end in favor of `__token__` and `pypi-`, respectively.\r\n\r\nSee #6287, #6342.\r\n\r\ncc @brainwane @di @ewdurbin @dstufft \r\n\r\n\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport datetime\nimport json\nimport uuid\n\nimport pymacaroons\n\nfrom pymacaroons.exceptions import MacaroonDeserializationException\nfrom sqlalchemy.orm import joinedload\nfrom sqlalchemy.orm.exc import NoResultFound\nfrom zope.interface import implementer\n\nfrom warehouse.accounts.models import User\nfrom warehouse.macaroons.caveats import InvalidMacaroon, Verifier\nfrom warehouse.macaroons.interfaces import IMacaroonService\nfrom warehouse.macaroons.models import Macaroon\n\n\n@implementer(IMacaroonService)\nclass DatabaseMacaroonService:\n def __init__(self, db_session):\n self.db = db_session\n\n def _extract_raw_macaroon(self, prefixed_macaroon):\n \"\"\"\n Returns the base64-encoded macaroon component of a PyPI macaroon,\n dropping the prefix.\n\n Returns None if the macaroon is None, has no prefix, or has the\n wrong prefix.\n \"\"\"\n if prefixed_macaroon is None:\n return None\n\n prefix, split, raw_macaroon = prefixed_macaroon.partition(\"-\")\n # TODO: Remove ':' as an acceptable delimiter for tokens (GH-6345)\n if prefix != \"pypi\" or not split:\n prefix, _, raw_macaroon = prefixed_macaroon.partition(\":\")\n\n if prefix != \"pypi\":\n return None\n\n return raw_macaroon\n\n def find_macaroon(self, macaroon_id):\n \"\"\"\n Returns a macaroon model from the DB by its identifier.\n Returns None if no macaroon has the given ID.\n \"\"\"\n try:\n dm = (\n self.db.query(Macaroon)\n .options(joinedload(\"user\"))\n .filter(Macaroon.id == uuid.UUID(macaroon_id))\n .one()\n )\n except NoResultFound:\n return None\n\n return dm\n\n def find_userid(self, raw_macaroon):\n \"\"\"\n Returns the id of the user associated with the given raw (serialized)\n macaroon.\n \"\"\"\n raw_macaroon = self._extract_raw_macaroon(raw_macaroon)\n if raw_macaroon is None:\n return None\n\n try:\n m = pymacaroons.Macaroon.deserialize(raw_macaroon)\n except MacaroonDeserializationException:\n return None\n\n dm = self.find_macaroon(m.identifier.decode())\n\n if dm is None:\n return None\n\n return dm.user.id\n\n def verify(self, raw_macaroon, context, principals, permission):\n \"\"\"\n Returns True if the given raw (serialized) macaroon is\n valid for the context, principals, and requested permission.\n\n Raises InvalidMacaroon if the macaroon is not valid.\n \"\"\"\n raw_macaroon = self._extract_raw_macaroon(raw_macaroon)\n if raw_macaroon is None:\n raise InvalidMacaroon(\"malformed or nonexistent macaroon\")\n\n try:\n m = pymacaroons.Macaroon.deserialize(raw_macaroon)\n except MacaroonDeserializationException:\n raise InvalidMacaroon(\"malformed macaroon\")\n\n dm = self.find_macaroon(m.identifier.decode())\n\n if dm is None:\n raise InvalidMacaroon(\"deleted or nonexistent macaroon\")\n\n verifier = Verifier(m, context, principals, permission)\n if verifier.verify(dm.key):\n dm.last_used = datetime.datetime.now()\n return True\n\n raise InvalidMacaroon(\"invalid macaroon\")\n\n def create_macaroon(self, location, user_id, description, caveats):\n \"\"\"\n Returns a tuple of a new raw (serialized) macaroon and its DB model.\n The description provided is not embedded into the macaroon, only stored\n in the DB model.\n \"\"\"\n user = self.db.query(User).filter(User.id == user_id).one()\n\n dm = Macaroon(user=user, description=description, caveats=caveats)\n self.db.add(dm)\n self.db.flush()\n\n m = pymacaroons.Macaroon(\n location=location,\n identifier=str(dm.id),\n key=dm.key,\n version=pymacaroons.MACAROON_V2,\n )\n m.add_first_party_caveat(json.dumps(caveats))\n serialized_macaroon = f\"pypi-{m.serialize()}\"\n return serialized_macaroon, dm\n\n def delete_macaroon(self, macaroon_id):\n \"\"\"\n Deletes a macaroon from the DB by its identifier.\n \"\"\"\n dm = self.find_macaroon(macaroon_id)\n self.db.delete(dm)\n self.db.flush()\n\n def get_macaroon_by_description(self, user_id, description):\n \"\"\"\n Returns a macaroon model from the DB with the given description,\n if one exists for the given user.\n\n Returns None if the user doesn't have a macaroon with this description.\n \"\"\"\n try:\n dm = (\n self.db.query(Macaroon)\n .options(joinedload(\"user\"))\n .filter(Macaroon.description == description)\n .filter(Macaroon.user_id == user_id)\n .one()\n )\n except NoResultFound:\n return None\n\n return dm\n\n\ndef database_macaroon_factory(context, request):\n return DatabaseMacaroonService(request.db)\n", "path": "warehouse/macaroons/services.py"}, {"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport base64\n\nfrom pyramid.authentication import CallbackAuthenticationPolicy\nfrom pyramid.interfaces import IAuthenticationPolicy, IAuthorizationPolicy\nfrom pyramid.security import Denied\nfrom pyramid.threadlocal import get_current_request\nfrom zope.interface import implementer\n\nfrom warehouse.cache.http import add_vary_callback\nfrom warehouse.macaroons.interfaces import IMacaroonService\nfrom warehouse.macaroons.services import InvalidMacaroon\n\n\ndef _extract_basic_macaroon(auth):\n \"\"\"\n A helper function for extracting a macaroon from a\n HTTP Basic Authentication-style header.\n\n Returns None if the header doesn't contain a structurally\n valid macaroon, or the candidate (not yet verified) macaroon\n in a serialized form.\n \"\"\"\n try:\n authorization = base64.b64decode(auth).decode()\n auth_method, _, auth = authorization.partition(\":\")\n except ValueError:\n return None\n\n # TODO: Remove @token as an acceptable token username (GH-6345)\n if auth_method != \"@token\" and auth_method != \"__token__\":\n return None\n\n return auth\n\n\ndef _extract_http_macaroon(request):\n \"\"\"\n A helper function for the extraction of HTTP Macaroon from a given request.\n Returns either a None if no macaroon could be found, or the string\n that represents our serialized macaroon.\n \"\"\"\n authorization = request.headers.get(\"Authorization\")\n if not authorization:\n return None\n\n try:\n auth_method, auth = authorization.split(\" \", 1)\n except ValueError:\n return None\n\n if auth_method.lower() == \"basic\":\n return _extract_basic_macaroon(auth)\n elif auth_method.lower() == \"token\":\n return auth\n\n return None\n\n\n@implementer(IAuthenticationPolicy)\nclass MacaroonAuthenticationPolicy(CallbackAuthenticationPolicy):\n def __init__(self, callback=None):\n self.callback = callback\n\n def unauthenticated_userid(self, request):\n # If we're calling into this API on a request, then we want to register\n # a callback which will ensure that the response varies based on the\n # Authorization header.\n request.add_response_callback(add_vary_callback(\"Authorization\"))\n\n # We need to extract our Macaroon from the request.\n macaroon = _extract_http_macaroon(request)\n if macaroon is None:\n return None\n\n # Check to see if our Macaroon exists in the database, and if so\n # fetch the user that is associated with it.\n macaroon_service = request.find_service(IMacaroonService, context=None)\n userid = macaroon_service.find_userid(macaroon)\n if userid is not None:\n return str(userid)\n\n def remember(self, request, userid, **kw):\n # This is a NO-OP because our Macaroon header policy doesn't allow\n # the ability for authentication to \"remember\" the user id. This\n # assumes it has been configured in clients somewhere out of band.\n return []\n\n def forget(self, request):\n # This is a NO-OP because our Macaroon header policy doesn't allow\n # the ability for authentication to \"forget\" the user id. This\n # assumes it has been configured in clients somewhere out of band.\n return []\n\n\n@implementer(IAuthorizationPolicy)\nclass MacaroonAuthorizationPolicy:\n def __init__(self, policy):\n self.policy = policy\n\n def permits(self, context, principals, permission):\n # The Pyramid API doesn't let us access the request here, so we have to pull it\n # out of the thread local instead.\n # TODO: Work with Pyramid devs to figure out if there is a better way to support\n # the worklow we are using here or not.\n request = get_current_request()\n\n # Our request could possibly be a None, if there isn't an active request, in\n # that case we're going to always deny, because without a request, we can't\n # determine if this request is authorized or not.\n if request is None:\n return Denied(\"There was no active request.\")\n\n # Re-extract our Macaroon from the request, it sucks to have to do this work\n # twice, but I believe it is inevitable unless we pass the Macaroon back as\n # a principal-- which doesn't seem to be the right fit for it.\n macaroon = _extract_http_macaroon(request)\n\n # This logic will only happen on requests that are being authenticated with\n # Macaroons. Any other request will just fall back to the standard Authorization\n # policy.\n if macaroon is not None:\n macaroon_service = request.find_service(IMacaroonService, context=None)\n\n try:\n macaroon_service.verify(macaroon, context, principals, permission)\n except InvalidMacaroon as exc:\n return Denied(f\"The supplied token was invalid: {str(exc)!r}\")\n\n # If our Macaroon is verified, then we'll pass this request to our underlying\n # Authorization policy, so it can handle its own authorization logic on\n # the prinicpal.\n return self.policy.permits(context, principals, permission)\n\n def principals_allowed_by_permission(self, context, permission):\n # We just dispatch this, because Macaroons don't restrict what principals are\n # allowed by a particular permission, they just restrict specific requests\n # to not have that permission.\n return self.policy.principals_allowed_by_permission(context, permission)\n", "path": "warehouse/macaroons/auth_policy.py"}], "after_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport datetime\nimport json\nimport uuid\n\nimport pymacaroons\n\nfrom pymacaroons.exceptions import MacaroonDeserializationException\nfrom sqlalchemy.orm import joinedload\nfrom sqlalchemy.orm.exc import NoResultFound\nfrom zope.interface import implementer\n\nfrom warehouse.accounts.models import User\nfrom warehouse.macaroons.caveats import InvalidMacaroon, Verifier\nfrom warehouse.macaroons.interfaces import IMacaroonService\nfrom warehouse.macaroons.models import Macaroon\n\n\n@implementer(IMacaroonService)\nclass DatabaseMacaroonService:\n def __init__(self, db_session):\n self.db = db_session\n\n def _extract_raw_macaroon(self, prefixed_macaroon):\n \"\"\"\n Returns the base64-encoded macaroon component of a PyPI macaroon,\n dropping the prefix.\n\n Returns None if the macaroon is None, has no prefix, or has the\n wrong prefix.\n \"\"\"\n if prefixed_macaroon is None:\n return None\n\n prefix, _, raw_macaroon = prefixed_macaroon.partition(\"-\")\n if prefix != \"pypi\" or not raw_macaroon:\n return None\n\n return raw_macaroon\n\n def find_macaroon(self, macaroon_id):\n \"\"\"\n Returns a macaroon model from the DB by its identifier.\n Returns None if no macaroon has the given ID.\n \"\"\"\n try:\n dm = (\n self.db.query(Macaroon)\n .options(joinedload(\"user\"))\n .filter(Macaroon.id == uuid.UUID(macaroon_id))\n .one()\n )\n except NoResultFound:\n return None\n\n return dm\n\n def find_userid(self, raw_macaroon):\n \"\"\"\n Returns the id of the user associated with the given raw (serialized)\n macaroon.\n \"\"\"\n raw_macaroon = self._extract_raw_macaroon(raw_macaroon)\n if raw_macaroon is None:\n return None\n\n try:\n m = pymacaroons.Macaroon.deserialize(raw_macaroon)\n except MacaroonDeserializationException:\n return None\n\n dm = self.find_macaroon(m.identifier.decode())\n\n if dm is None:\n return None\n\n return dm.user.id\n\n def verify(self, raw_macaroon, context, principals, permission):\n \"\"\"\n Returns True if the given raw (serialized) macaroon is\n valid for the context, principals, and requested permission.\n\n Raises InvalidMacaroon if the macaroon is not valid.\n \"\"\"\n raw_macaroon = self._extract_raw_macaroon(raw_macaroon)\n if raw_macaroon is None:\n raise InvalidMacaroon(\"malformed or nonexistent macaroon\")\n\n try:\n m = pymacaroons.Macaroon.deserialize(raw_macaroon)\n except MacaroonDeserializationException:\n raise InvalidMacaroon(\"malformed macaroon\")\n\n dm = self.find_macaroon(m.identifier.decode())\n\n if dm is None:\n raise InvalidMacaroon(\"deleted or nonexistent macaroon\")\n\n verifier = Verifier(m, context, principals, permission)\n if verifier.verify(dm.key):\n dm.last_used = datetime.datetime.now()\n return True\n\n raise InvalidMacaroon(\"invalid macaroon\")\n\n def create_macaroon(self, location, user_id, description, caveats):\n \"\"\"\n Returns a tuple of a new raw (serialized) macaroon and its DB model.\n The description provided is not embedded into the macaroon, only stored\n in the DB model.\n \"\"\"\n user = self.db.query(User).filter(User.id == user_id).one()\n\n dm = Macaroon(user=user, description=description, caveats=caveats)\n self.db.add(dm)\n self.db.flush()\n\n m = pymacaroons.Macaroon(\n location=location,\n identifier=str(dm.id),\n key=dm.key,\n version=pymacaroons.MACAROON_V2,\n )\n m.add_first_party_caveat(json.dumps(caveats))\n serialized_macaroon = f\"pypi-{m.serialize()}\"\n return serialized_macaroon, dm\n\n def delete_macaroon(self, macaroon_id):\n \"\"\"\n Deletes a macaroon from the DB by its identifier.\n \"\"\"\n dm = self.find_macaroon(macaroon_id)\n self.db.delete(dm)\n self.db.flush()\n\n def get_macaroon_by_description(self, user_id, description):\n \"\"\"\n Returns a macaroon model from the DB with the given description,\n if one exists for the given user.\n\n Returns None if the user doesn't have a macaroon with this description.\n \"\"\"\n try:\n dm = (\n self.db.query(Macaroon)\n .options(joinedload(\"user\"))\n .filter(Macaroon.description == description)\n .filter(Macaroon.user_id == user_id)\n .one()\n )\n except NoResultFound:\n return None\n\n return dm\n\n\ndef database_macaroon_factory(context, request):\n return DatabaseMacaroonService(request.db)\n", "path": "warehouse/macaroons/services.py"}, {"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport base64\n\nfrom pyramid.authentication import CallbackAuthenticationPolicy\nfrom pyramid.interfaces import IAuthenticationPolicy, IAuthorizationPolicy\nfrom pyramid.security import Denied\nfrom pyramid.threadlocal import get_current_request\nfrom zope.interface import implementer\n\nfrom warehouse.cache.http import add_vary_callback\nfrom warehouse.macaroons.interfaces import IMacaroonService\nfrom warehouse.macaroons.services import InvalidMacaroon\n\n\ndef _extract_basic_macaroon(auth):\n \"\"\"\n A helper function for extracting a macaroon from a\n HTTP Basic Authentication-style header.\n\n Returns None if the header doesn't contain a structurally\n valid macaroon, or the candidate (not yet verified) macaroon\n in a serialized form.\n \"\"\"\n try:\n authorization = base64.b64decode(auth).decode()\n auth_method, _, auth = authorization.partition(\":\")\n except ValueError:\n return None\n\n if auth_method != \"__token__\":\n return None\n\n return auth\n\n\ndef _extract_http_macaroon(request):\n \"\"\"\n A helper function for the extraction of HTTP Macaroon from a given request.\n Returns either a None if no macaroon could be found, or the string\n that represents our serialized macaroon.\n \"\"\"\n authorization = request.headers.get(\"Authorization\")\n if not authorization:\n return None\n\n try:\n auth_method, auth = authorization.split(\" \", 1)\n except ValueError:\n return None\n\n if auth_method.lower() == \"basic\":\n return _extract_basic_macaroon(auth)\n elif auth_method.lower() == \"token\":\n return auth\n\n return None\n\n\n@implementer(IAuthenticationPolicy)\nclass MacaroonAuthenticationPolicy(CallbackAuthenticationPolicy):\n def __init__(self, callback=None):\n self.callback = callback\n\n def unauthenticated_userid(self, request):\n # If we're calling into this API on a request, then we want to register\n # a callback which will ensure that the response varies based on the\n # Authorization header.\n request.add_response_callback(add_vary_callback(\"Authorization\"))\n\n # We need to extract our Macaroon from the request.\n macaroon = _extract_http_macaroon(request)\n if macaroon is None:\n return None\n\n # Check to see if our Macaroon exists in the database, and if so\n # fetch the user that is associated with it.\n macaroon_service = request.find_service(IMacaroonService, context=None)\n userid = macaroon_service.find_userid(macaroon)\n if userid is not None:\n return str(userid)\n\n def remember(self, request, userid, **kw):\n # This is a NO-OP because our Macaroon header policy doesn't allow\n # the ability for authentication to \"remember\" the user id. This\n # assumes it has been configured in clients somewhere out of band.\n return []\n\n def forget(self, request):\n # This is a NO-OP because our Macaroon header policy doesn't allow\n # the ability for authentication to \"forget\" the user id. This\n # assumes it has been configured in clients somewhere out of band.\n return []\n\n\n@implementer(IAuthorizationPolicy)\nclass MacaroonAuthorizationPolicy:\n def __init__(self, policy):\n self.policy = policy\n\n def permits(self, context, principals, permission):\n # The Pyramid API doesn't let us access the request here, so we have to pull it\n # out of the thread local instead.\n # TODO: Work with Pyramid devs to figure out if there is a better way to support\n # the worklow we are using here or not.\n request = get_current_request()\n\n # Our request could possibly be a None, if there isn't an active request, in\n # that case we're going to always deny, because without a request, we can't\n # determine if this request is authorized or not.\n if request is None:\n return Denied(\"There was no active request.\")\n\n # Re-extract our Macaroon from the request, it sucks to have to do this work\n # twice, but I believe it is inevitable unless we pass the Macaroon back as\n # a principal-- which doesn't seem to be the right fit for it.\n macaroon = _extract_http_macaroon(request)\n\n # This logic will only happen on requests that are being authenticated with\n # Macaroons. Any other request will just fall back to the standard Authorization\n # policy.\n if macaroon is not None:\n macaroon_service = request.find_service(IMacaroonService, context=None)\n\n try:\n macaroon_service.verify(macaroon, context, principals, permission)\n except InvalidMacaroon as exc:\n return Denied(f\"The supplied token was invalid: {str(exc)!r}\")\n\n # If our Macaroon is verified, then we'll pass this request to our underlying\n # Authorization policy, so it can handle its own authorization logic on\n # the prinicpal.\n return self.policy.permits(context, principals, permission)\n\n def principals_allowed_by_permission(self, context, permission):\n # We just dispatch this, because Macaroons don't restrict what principals are\n # allowed by a particular permission, they just restrict specific requests\n # to not have that permission.\n return self.policy.principals_allowed_by_permission(context, permission)\n", "path": "warehouse/macaroons/auth_policy.py"}]} | 3,762 | 307 |
gh_patches_debug_35530 | rasdani/github-patches | git_diff | hylang__hy-2214 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
curses and SIGWINCH do not work properly with Hy.
After importing curses under hy, curses is unable to detect the size of the terminal as it is resized.
This manifests as curses.LINES and curses.COLS not being updated, stdscr.getmaxyx not working and so on.
However, the workaround of launching the hy program from python with:
```
import hy
import curses
from main import main_event_loop
if __name__ == "__main__":
curses.wrapper(main_event_loop, ...)
```
allows curses to dynamically detect the size of the terminal.
I conclude therefore the problem is with the hy binary. My (limited) understanding acquired during tracking down the source of this problem is that curses uses the SIGWINCH signal, so perhaps that is a place to look.
Void linux x86 64bit, python 3.9.0
Freebsd 12.2, python 3.8.6
hy 0.19.0 (reported by pip) installed from git master branch via pip
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `hy/completer.py`
Content:
```
1 import contextlib
2 import os
3 import re
4 import sys
5 import builtins
6
7 import hy.macros
8 import hy.compiler
9
10
11 docomplete = True
12
13 try:
14 import readline
15 except AttributeError as e:
16 # https://github.com/pyreadline/pyreadline/issues/65
17 if "module 'collections' has no attribute 'Callable'" in str(e):
18 docomplete = False
19 else:
20 raise
21 except ImportError:
22 docomplete = False
23
24 if docomplete:
25 if sys.platform == 'darwin' and 'libedit' in readline.__doc__:
26 readline_bind = "bind ^I rl_complete"
27 else:
28 readline_bind = "tab: complete"
29
30
31 class Completer:
32
33 def __init__(self, namespace={}):
34 if not isinstance(namespace, dict):
35 raise TypeError('namespace must be a dictionary')
36 self.namespace = namespace
37 self.path = [builtins.__dict__,
38 namespace]
39
40 namespace.setdefault('__macros__', {})
41
42 self.path.append(namespace['__macros__'])
43
44 def attr_matches(self, text):
45 # Borrowed from IPython's completer
46 m = re.match(r"(\S+(\.[\w-]+)*)\.([\w-]*)$", text)
47
48 if m:
49 expr, attr = m.group(1, 3)
50 attr = attr.replace("-", "_")
51 expr = expr.replace("-", "_")
52 else:
53 return []
54
55 try:
56 obj = eval(expr, self.namespace)
57 words = dir(obj)
58 except Exception:
59 return []
60
61 n = len(attr)
62 matches = []
63 for w in words:
64 if w[:n] == attr:
65 matches.append("{}.{}".format(
66 expr.replace("_", "-"), w.replace("_", "-")))
67 return matches
68
69 def global_matches(self, text):
70 matches = []
71 for p in self.path:
72 for k in p.keys():
73 if isinstance(k, str):
74 k = k.replace("_", "-")
75 if k.startswith(text):
76 matches.append(k)
77 return matches
78
79 def complete(self, text, state):
80 if "." in text:
81 matches = self.attr_matches(text)
82 else:
83 matches = self.global_matches(text)
84 try:
85 return matches[state]
86 except IndexError:
87 return None
88
89
90 @contextlib.contextmanager
91 def completion(completer=None):
92 delims = "()[]{} "
93 if not completer:
94 completer = Completer()
95
96 if docomplete:
97 readline.set_completer(completer.complete)
98 readline.set_completer_delims(delims)
99
100 history = os.environ.get(
101 "HY_HISTORY", os.path.expanduser("~/.hy-history"))
102 readline.parse_and_bind("set blink-matching-paren on")
103
104 try:
105 readline.read_history_file(history)
106 except OSError:
107 pass
108
109 readline.parse_and_bind(readline_bind)
110
111 try:
112 yield
113 finally:
114 if docomplete:
115 try:
116 readline.write_history_file(history)
117 except OSError:
118 pass
119
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/hy/completer.py b/hy/completer.py
--- a/hy/completer.py
+++ b/hy/completer.py
@@ -8,24 +8,19 @@
import hy.compiler
-docomplete = True
-
-try:
- import readline
-except AttributeError as e:
- # https://github.com/pyreadline/pyreadline/issues/65
- if "module 'collections' has no attribute 'Callable'" in str(e):
- docomplete = False
- else:
- raise
-except ImportError:
- docomplete = False
-
-if docomplete:
- if sys.platform == 'darwin' and 'libedit' in readline.__doc__:
- readline_bind = "bind ^I rl_complete"
- else:
- readline_bind = "tab: complete"
+# Lazily import `readline` to work around
+# https://bugs.python.org/issue2675#msg265564
+readline = None
+def init_readline():
+ global readline
+ try:
+ import readline
+ except AttributeError as e:
+ # https://github.com/pyreadline/pyreadline/issues/65
+ if "module 'collections' has no attribute 'Callable'" not in str(e):
+ raise
+ except ImportError:
+ pass
class Completer:
@@ -86,33 +81,42 @@
except IndexError:
return None
-
@contextlib.contextmanager
def completion(completer=None):
delims = "()[]{} "
+
+ init_readline()
+ if not readline:
+ # We have nothing to do. Act like a null context manager.
+ yield
+ return
+
if not completer:
completer = Completer()
- if docomplete:
- readline.set_completer(completer.complete)
- readline.set_completer_delims(delims)
+ if sys.platform == 'darwin' and 'libedit' in readline.__doc__:
+ readline_bind = "bind ^I rl_complete"
+ else:
+ readline_bind = "tab: complete"
- history = os.environ.get(
- "HY_HISTORY", os.path.expanduser("~/.hy-history"))
- readline.parse_and_bind("set blink-matching-paren on")
+ readline.set_completer(completer.complete)
+ readline.set_completer_delims(delims)
- try:
- readline.read_history_file(history)
- except OSError:
- pass
+ history = os.environ.get(
+ "HY_HISTORY", os.path.expanduser("~/.hy-history"))
+ readline.parse_and_bind("set blink-matching-paren on")
- readline.parse_and_bind(readline_bind)
+ try:
+ readline.read_history_file(history)
+ except OSError:
+ pass
+
+ readline.parse_and_bind(readline_bind)
try:
yield
finally:
- if docomplete:
- try:
- readline.write_history_file(history)
- except OSError:
- pass
+ try:
+ readline.write_history_file(history)
+ except OSError:
+ pass
| {"golden_diff": "diff --git a/hy/completer.py b/hy/completer.py\n--- a/hy/completer.py\n+++ b/hy/completer.py\n@@ -8,24 +8,19 @@\n import hy.compiler\n \n \n-docomplete = True\n-\n-try:\n- import readline\n-except AttributeError as e:\n- # https://github.com/pyreadline/pyreadline/issues/65\n- if \"module 'collections' has no attribute 'Callable'\" in str(e):\n- docomplete = False\n- else:\n- raise\n-except ImportError:\n- docomplete = False\n-\n-if docomplete:\n- if sys.platform == 'darwin' and 'libedit' in readline.__doc__:\n- readline_bind = \"bind ^I rl_complete\"\n- else:\n- readline_bind = \"tab: complete\"\n+# Lazily import `readline` to work around\n+# https://bugs.python.org/issue2675#msg265564\n+readline = None\n+def init_readline():\n+ global readline\n+ try:\n+ import readline\n+ except AttributeError as e:\n+ # https://github.com/pyreadline/pyreadline/issues/65\n+ if \"module 'collections' has no attribute 'Callable'\" not in str(e):\n+ raise\n+ except ImportError:\n+ pass\n \n \n class Completer:\n@@ -86,33 +81,42 @@\n except IndexError:\n return None\n \n-\n @contextlib.contextmanager\n def completion(completer=None):\n delims = \"()[]{} \"\n+\n+ init_readline()\n+ if not readline:\n+ # We have nothing to do. Act like a null context manager.\n+ yield\n+ return\n+\n if not completer:\n completer = Completer()\n \n- if docomplete:\n- readline.set_completer(completer.complete)\n- readline.set_completer_delims(delims)\n+ if sys.platform == 'darwin' and 'libedit' in readline.__doc__:\n+ readline_bind = \"bind ^I rl_complete\"\n+ else:\n+ readline_bind = \"tab: complete\"\n \n- history = os.environ.get(\n- \"HY_HISTORY\", os.path.expanduser(\"~/.hy-history\"))\n- readline.parse_and_bind(\"set blink-matching-paren on\")\n+ readline.set_completer(completer.complete)\n+ readline.set_completer_delims(delims)\n \n- try:\n- readline.read_history_file(history)\n- except OSError:\n- pass\n+ history = os.environ.get(\n+ \"HY_HISTORY\", os.path.expanduser(\"~/.hy-history\"))\n+ readline.parse_and_bind(\"set blink-matching-paren on\")\n \n- readline.parse_and_bind(readline_bind)\n+ try:\n+ readline.read_history_file(history)\n+ except OSError:\n+ pass\n+\n+ readline.parse_and_bind(readline_bind)\n \n try:\n yield\n finally:\n- if docomplete:\n- try:\n- readline.write_history_file(history)\n- except OSError:\n- pass\n+ try:\n+ readline.write_history_file(history)\n+ except OSError:\n+ pass\n", "issue": "curses and SIGWINCH do not work properly with Hy.\nAfter importing curses under hy, curses is unable to detect the size of the terminal as it is resized.\r\nThis manifests as curses.LINES and curses.COLS not being updated, stdscr.getmaxyx not working and so on.\r\n\r\nHowever, the workaround of launching the hy program from python with:\r\n```\r\nimport hy\r\nimport curses\r\nfrom main import main_event_loop\r\n\r\nif __name__ == \"__main__\":\r\n curses.wrapper(main_event_loop, ...)\r\n```\r\nallows curses to dynamically detect the size of the terminal.\r\n\r\nI conclude therefore the problem is with the hy binary. My (limited) understanding acquired during tracking down the source of this problem is that curses uses the SIGWINCH signal, so perhaps that is a place to look.\r\n\r\nVoid linux x86 64bit, python 3.9.0\r\nFreebsd 12.2, python 3.8.6\r\nhy 0.19.0 (reported by pip) installed from git master branch via pip\n", "before_files": [{"content": "import contextlib\nimport os\nimport re\nimport sys\nimport builtins\n\nimport hy.macros\nimport hy.compiler\n\n\ndocomplete = True\n\ntry:\n import readline\nexcept AttributeError as e:\n # https://github.com/pyreadline/pyreadline/issues/65\n if \"module 'collections' has no attribute 'Callable'\" in str(e):\n docomplete = False\n else:\n raise\nexcept ImportError:\n docomplete = False\n\nif docomplete:\n if sys.platform == 'darwin' and 'libedit' in readline.__doc__:\n readline_bind = \"bind ^I rl_complete\"\n else:\n readline_bind = \"tab: complete\"\n\n\nclass Completer:\n\n def __init__(self, namespace={}):\n if not isinstance(namespace, dict):\n raise TypeError('namespace must be a dictionary')\n self.namespace = namespace\n self.path = [builtins.__dict__,\n namespace]\n\n namespace.setdefault('__macros__', {})\n\n self.path.append(namespace['__macros__'])\n\n def attr_matches(self, text):\n # Borrowed from IPython's completer\n m = re.match(r\"(\\S+(\\.[\\w-]+)*)\\.([\\w-]*)$\", text)\n\n if m:\n expr, attr = m.group(1, 3)\n attr = attr.replace(\"-\", \"_\")\n expr = expr.replace(\"-\", \"_\")\n else:\n return []\n\n try:\n obj = eval(expr, self.namespace)\n words = dir(obj)\n except Exception:\n return []\n\n n = len(attr)\n matches = []\n for w in words:\n if w[:n] == attr:\n matches.append(\"{}.{}\".format(\n expr.replace(\"_\", \"-\"), w.replace(\"_\", \"-\")))\n return matches\n\n def global_matches(self, text):\n matches = []\n for p in self.path:\n for k in p.keys():\n if isinstance(k, str):\n k = k.replace(\"_\", \"-\")\n if k.startswith(text):\n matches.append(k)\n return matches\n\n def complete(self, text, state):\n if \".\" in text:\n matches = self.attr_matches(text)\n else:\n matches = self.global_matches(text)\n try:\n return matches[state]\n except IndexError:\n return None\n\n\[email protected]\ndef completion(completer=None):\n delims = \"()[]{} \"\n if not completer:\n completer = Completer()\n\n if docomplete:\n readline.set_completer(completer.complete)\n readline.set_completer_delims(delims)\n\n history = os.environ.get(\n \"HY_HISTORY\", os.path.expanduser(\"~/.hy-history\"))\n readline.parse_and_bind(\"set blink-matching-paren on\")\n\n try:\n readline.read_history_file(history)\n except OSError:\n pass\n\n readline.parse_and_bind(readline_bind)\n\n try:\n yield\n finally:\n if docomplete:\n try:\n readline.write_history_file(history)\n except OSError:\n pass\n", "path": "hy/completer.py"}], "after_files": [{"content": "import contextlib\nimport os\nimport re\nimport sys\nimport builtins\n\nimport hy.macros\nimport hy.compiler\n\n\n# Lazily import `readline` to work around\n# https://bugs.python.org/issue2675#msg265564\nreadline = None\ndef init_readline():\n global readline\n try:\n import readline\n except AttributeError as e:\n # https://github.com/pyreadline/pyreadline/issues/65\n if \"module 'collections' has no attribute 'Callable'\" not in str(e):\n raise\n except ImportError:\n pass\n\n\nclass Completer:\n\n def __init__(self, namespace={}):\n if not isinstance(namespace, dict):\n raise TypeError('namespace must be a dictionary')\n self.namespace = namespace\n self.path = [builtins.__dict__,\n namespace]\n\n namespace.setdefault('__macros__', {})\n\n self.path.append(namespace['__macros__'])\n\n def attr_matches(self, text):\n # Borrowed from IPython's completer\n m = re.match(r\"(\\S+(\\.[\\w-]+)*)\\.([\\w-]*)$\", text)\n\n if m:\n expr, attr = m.group(1, 3)\n attr = attr.replace(\"-\", \"_\")\n expr = expr.replace(\"-\", \"_\")\n else:\n return []\n\n try:\n obj = eval(expr, self.namespace)\n words = dir(obj)\n except Exception:\n return []\n\n n = len(attr)\n matches = []\n for w in words:\n if w[:n] == attr:\n matches.append(\"{}.{}\".format(\n expr.replace(\"_\", \"-\"), w.replace(\"_\", \"-\")))\n return matches\n\n def global_matches(self, text):\n matches = []\n for p in self.path:\n for k in p.keys():\n if isinstance(k, str):\n k = k.replace(\"_\", \"-\")\n if k.startswith(text):\n matches.append(k)\n return matches\n\n def complete(self, text, state):\n if \".\" in text:\n matches = self.attr_matches(text)\n else:\n matches = self.global_matches(text)\n try:\n return matches[state]\n except IndexError:\n return None\n\[email protected]\ndef completion(completer=None):\n delims = \"()[]{} \"\n\n init_readline()\n if not readline:\n # We have nothing to do. Act like a null context manager.\n yield\n return\n\n if not completer:\n completer = Completer()\n\n if sys.platform == 'darwin' and 'libedit' in readline.__doc__:\n readline_bind = \"bind ^I rl_complete\"\n else:\n readline_bind = \"tab: complete\"\n\n readline.set_completer(completer.complete)\n readline.set_completer_delims(delims)\n\n history = os.environ.get(\n \"HY_HISTORY\", os.path.expanduser(\"~/.hy-history\"))\n readline.parse_and_bind(\"set blink-matching-paren on\")\n\n try:\n readline.read_history_file(history)\n except OSError:\n pass\n\n readline.parse_and_bind(readline_bind)\n\n try:\n yield\n finally:\n try:\n readline.write_history_file(history)\n except OSError:\n pass\n", "path": "hy/completer.py"}]} | 1,382 | 709 |
gh_patches_debug_19612 | rasdani/github-patches | git_diff | sanic-org__sanic-704 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Consistent module naming
I don't want to be the bad guy 😄 , but there is a module file named with camelCase. Disregard me if this is not a problem.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sanic/log.py`
Content:
```
1 import logging
2
3 log = logging.getLogger('sanic')
4 netlog = logging.getLogger('network')
5
```
Path: `sanic/defaultFilter.py`
Content:
```
1 import logging
2
3
4 class DefaultFilter(logging.Filter):
5 def __init__(self, param=None):
6 self.param = param
7
8 def filter(self, record):
9 if self.param is None:
10 return True
11 if record.levelno in self.param:
12 return True
13 return False
14
```
Path: `sanic/config.py`
Content:
```
1 from sanic.defaultFilter import DefaultFilter
2 import os
3 import sys
4 import syslog
5 import platform
6 import types
7
8 SANIC_PREFIX = 'SANIC_'
9
10 _address_dict = {
11 'Windows': ('localhost', 514),
12 'Darwin': '/var/run/syslog',
13 'Linux': '/dev/log',
14 'FreeBSD': '/dev/log'
15 }
16
17 LOGGING = {
18 'version': 1,
19 'filters': {
20 'accessFilter': {
21 '()': DefaultFilter,
22 'param': [0, 10, 20]
23 },
24 'errorFilter': {
25 '()': DefaultFilter,
26 'param': [30, 40, 50]
27 }
28 },
29 'formatters': {
30 'simple': {
31 'format': '%(asctime)s - (%(name)s)[%(levelname)s]: %(message)s',
32 'datefmt': '%Y-%m-%d %H:%M:%S'
33 },
34 'access': {
35 'format': '%(asctime)s - (%(name)s)[%(levelname)s][%(host)s]: ' +
36 '%(request)s %(message)s %(status)d %(byte)d',
37 'datefmt': '%Y-%m-%d %H:%M:%S'
38 }
39 },
40 'handlers': {
41 'internal': {
42 'class': 'logging.StreamHandler',
43 'filters': ['accessFilter'],
44 'formatter': 'simple',
45 'stream': sys.stderr
46 },
47 'accessStream': {
48 'class': 'logging.StreamHandler',
49 'filters': ['accessFilter'],
50 'formatter': 'access',
51 'stream': sys.stderr
52 },
53 'errorStream': {
54 'class': 'logging.StreamHandler',
55 'filters': ['errorFilter'],
56 'formatter': 'simple',
57 'stream': sys.stderr
58 },
59 # before you use accessSysLog, be sure that log levels
60 # 0, 10, 20 have been enabled in you syslog configuration
61 # otherwise you won't be able to see the output in syslog
62 # logging file.
63 'accessSysLog': {
64 'class': 'logging.handlers.SysLogHandler',
65 'address': _address_dict.get(platform.system(),
66 ('localhost', 514)),
67 'facility': syslog.LOG_DAEMON,
68 'filters': ['accessFilter'],
69 'formatter': 'access'
70 },
71 'errorSysLog': {
72 'class': 'logging.handlers.SysLogHandler',
73 'address': _address_dict.get(platform.system(),
74 ('localhost', 514)),
75 'facility': syslog.LOG_DAEMON,
76 'filters': ['errorFilter'],
77 'formatter': 'simple'
78 },
79 'accessTimedRotatingFile': {
80 'class': 'logging.handlers.TimedRotatingFileHandler',
81 'filters': ['accessFilter'],
82 'formatter': 'access',
83 'when': 'D',
84 'interval': 1,
85 'backupCount': 7,
86 'filename': 'access.log'
87 },
88 'errorTimedRotatingFile': {
89 'class': 'logging.handlers.TimedRotatingFileHandler',
90 'filters': ['errorFilter'],
91 'when': 'D',
92 'interval': 1,
93 'backupCount': 7,
94 'filename': 'error.log',
95 'formatter': 'simple'
96 }
97 },
98 'loggers': {
99 'sanic': {
100 'level': 'DEBUG',
101 'handlers': ['internal', 'errorStream']
102 },
103 'network': {
104 'level': 'DEBUG',
105 'handlers': ['accessStream', 'errorStream']
106 }
107 }
108 }
109
110 # this happens when using container or systems without syslog
111 # keep things in config would cause file not exists error
112 _addr = LOGGING['handlers']['accessSysLog']['address']
113 if type(_addr) is str and not os.path.exists(_addr):
114 LOGGING['handlers'].pop('accessSysLog')
115 LOGGING['handlers'].pop('errorSysLog')
116
117
118 class Config(dict):
119 def __init__(self, defaults=None, load_env=True, keep_alive=True):
120 super().__init__(defaults or {})
121 self.LOGO = """
122 ▄▄▄▄▄
123 ▀▀▀██████▄▄▄ _______________
124 ▄▄▄▄▄ █████████▄ / \\
125 ▀▀▀▀█████▌ ▀▐▄ ▀▐█ | Gotta go fast! |
126 ▀▀█████▄▄ ▀██████▄██ | _________________/
127 ▀▄▄▄▄▄ ▀▀█▄▀█════█▀ |/
128 ▀▀▀▄ ▀▀███ ▀ ▄▄
129 ▄███▀▀██▄████████▄ ▄▀▀▀▀▀▀█▌
130 ██▀▄▄▄██▀▄███▀ ▀▀████ ▄██
131 ▄▀▀▀▄██▄▀▀▌████▒▒▒▒▒▒███ ▌▄▄▀
132 ▌ ▐▀████▐███▒▒▒▒▒▐██▌
133 ▀▄▄▄▄▀ ▀▀████▒▒▒▒▄██▀
134 ▀▀█████████▀
135 ▄▄██▀██████▀█
136 ▄██▀ ▀▀▀ █
137 ▄█ ▐▌
138 ▄▄▄▄█▌ ▀█▄▄▄▄▀▀▄
139 ▌ ▐ ▀▀▄▄▄▀
140 ▀▀▄▄▀
141 """
142 self.REQUEST_MAX_SIZE = 100000000 # 100 megababies
143 self.REQUEST_TIMEOUT = 60 # 60 seconds
144 self.KEEP_ALIVE = keep_alive
145
146 if load_env:
147 self.load_environment_vars()
148
149 def __getattr__(self, attr):
150 try:
151 return self[attr]
152 except KeyError as ke:
153 raise AttributeError("Config has no '{}'".format(ke.args[0]))
154
155 def __setattr__(self, attr, value):
156 self[attr] = value
157
158 def from_envvar(self, variable_name):
159 """Load a configuration from an environment variable pointing to
160 a configuration file.
161
162 :param variable_name: name of the environment variable
163 :return: bool. ``True`` if able to load config, ``False`` otherwise.
164 """
165 config_file = os.environ.get(variable_name)
166 if not config_file:
167 raise RuntimeError('The environment variable %r is not set and '
168 'thus configuration could not be loaded.' %
169 variable_name)
170 return self.from_pyfile(config_file)
171
172 def from_pyfile(self, filename):
173 """Update the values in the config from a Python file.
174 Only the uppercase variables in that module are stored in the config.
175
176 :param filename: an absolute path to the config file
177 """
178 module = types.ModuleType('config')
179 module.__file__ = filename
180 try:
181 with open(filename) as config_file:
182 exec(compile(config_file.read(), filename, 'exec'),
183 module.__dict__)
184 except IOError as e:
185 e.strerror = 'Unable to load configuration file (%s)' % e.strerror
186 raise
187 self.from_object(module)
188 return True
189
190 def from_object(self, obj):
191 """Update the values from the given object.
192 Objects are usually either modules or classes.
193
194 Just the uppercase variables in that object are stored in the config.
195 Example usage::
196
197 from yourapplication import default_config
198 app.config.from_object(default_config)
199
200 You should not use this function to load the actual configuration but
201 rather configuration defaults. The actual config should be loaded
202 with :meth:`from_pyfile` and ideally from a location not within the
203 package because the package might be installed system wide.
204
205 :param obj: an object holding the configuration
206 """
207 for key in dir(obj):
208 if key.isupper():
209 self[key] = getattr(obj, key)
210
211 def load_environment_vars(self):
212 """
213 Looks for any SANIC_ prefixed environment variables and applies
214 them to the configuration if present.
215 """
216 for k, v in os.environ.items():
217 if k.startswith(SANIC_PREFIX):
218 _, config_key = k.split(SANIC_PREFIX, 1)
219 self[config_key] = v
220
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sanic/config.py b/sanic/config.py
--- a/sanic/config.py
+++ b/sanic/config.py
@@ -1,10 +1,11 @@
-from sanic.defaultFilter import DefaultFilter
import os
import sys
import syslog
import platform
import types
+from sanic.log import DefaultFilter
+
SANIC_PREFIX = 'SANIC_'
_address_dict = {
diff --git a/sanic/defaultFilter.py b/sanic/defaultFilter.py
deleted file mode 100644
--- a/sanic/defaultFilter.py
+++ /dev/null
@@ -1,13 +0,0 @@
-import logging
-
-
-class DefaultFilter(logging.Filter):
- def __init__(self, param=None):
- self.param = param
-
- def filter(self, record):
- if self.param is None:
- return True
- if record.levelno in self.param:
- return True
- return False
diff --git a/sanic/log.py b/sanic/log.py
--- a/sanic/log.py
+++ b/sanic/log.py
@@ -1,4 +1,18 @@
import logging
+
+class DefaultFilter(logging.Filter):
+
+ def __init__(self, param=None):
+ self.param = param
+
+ def filter(self, record):
+ if self.param is None:
+ return True
+ if record.levelno in self.param:
+ return True
+ return False
+
+
log = logging.getLogger('sanic')
netlog = logging.getLogger('network')
| {"golden_diff": "diff --git a/sanic/config.py b/sanic/config.py\n--- a/sanic/config.py\n+++ b/sanic/config.py\n@@ -1,10 +1,11 @@\n-from sanic.defaultFilter import DefaultFilter\n import os\n import sys\n import syslog\n import platform\n import types\n \n+from sanic.log import DefaultFilter\n+\n SANIC_PREFIX = 'SANIC_'\n \n _address_dict = {\ndiff --git a/sanic/defaultFilter.py b/sanic/defaultFilter.py\ndeleted file mode 100644\n--- a/sanic/defaultFilter.py\n+++ /dev/null\n@@ -1,13 +0,0 @@\n-import logging\n-\n-\n-class DefaultFilter(logging.Filter):\n- def __init__(self, param=None):\n- self.param = param\n-\n- def filter(self, record):\n- if self.param is None:\n- return True\n- if record.levelno in self.param:\n- return True\n- return False\ndiff --git a/sanic/log.py b/sanic/log.py\n--- a/sanic/log.py\n+++ b/sanic/log.py\n@@ -1,4 +1,18 @@\n import logging\n \n+\n+class DefaultFilter(logging.Filter):\n+\n+ def __init__(self, param=None):\n+ self.param = param\n+\n+ def filter(self, record):\n+ if self.param is None:\n+ return True\n+ if record.levelno in self.param:\n+ return True\n+ return False\n+\n+\n log = logging.getLogger('sanic')\n netlog = logging.getLogger('network')\n", "issue": "Consistent module naming\nI don't want to be the bad guy \ud83d\ude04 , but there is a module file named with camelCase. Disregard me if this is not a problem.\n", "before_files": [{"content": "import logging\n\nlog = logging.getLogger('sanic')\nnetlog = logging.getLogger('network')\n", "path": "sanic/log.py"}, {"content": "import logging\n\n\nclass DefaultFilter(logging.Filter):\n def __init__(self, param=None):\n self.param = param\n\n def filter(self, record):\n if self.param is None:\n return True\n if record.levelno in self.param:\n return True\n return False\n", "path": "sanic/defaultFilter.py"}, {"content": "from sanic.defaultFilter import DefaultFilter\nimport os\nimport sys\nimport syslog\nimport platform\nimport types\n\nSANIC_PREFIX = 'SANIC_'\n\n_address_dict = {\n 'Windows': ('localhost', 514),\n 'Darwin': '/var/run/syslog',\n 'Linux': '/dev/log',\n 'FreeBSD': '/dev/log'\n}\n\nLOGGING = {\n 'version': 1,\n 'filters': {\n 'accessFilter': {\n '()': DefaultFilter,\n 'param': [0, 10, 20]\n },\n 'errorFilter': {\n '()': DefaultFilter,\n 'param': [30, 40, 50]\n }\n },\n 'formatters': {\n 'simple': {\n 'format': '%(asctime)s - (%(name)s)[%(levelname)s]: %(message)s',\n 'datefmt': '%Y-%m-%d %H:%M:%S'\n },\n 'access': {\n 'format': '%(asctime)s - (%(name)s)[%(levelname)s][%(host)s]: ' +\n '%(request)s %(message)s %(status)d %(byte)d',\n 'datefmt': '%Y-%m-%d %H:%M:%S'\n }\n },\n 'handlers': {\n 'internal': {\n 'class': 'logging.StreamHandler',\n 'filters': ['accessFilter'],\n 'formatter': 'simple',\n 'stream': sys.stderr\n },\n 'accessStream': {\n 'class': 'logging.StreamHandler',\n 'filters': ['accessFilter'],\n 'formatter': 'access',\n 'stream': sys.stderr\n },\n 'errorStream': {\n 'class': 'logging.StreamHandler',\n 'filters': ['errorFilter'],\n 'formatter': 'simple',\n 'stream': sys.stderr\n },\n # before you use accessSysLog, be sure that log levels\n # 0, 10, 20 have been enabled in you syslog configuration\n # otherwise you won't be able to see the output in syslog\n # logging file.\n 'accessSysLog': {\n 'class': 'logging.handlers.SysLogHandler',\n 'address': _address_dict.get(platform.system(),\n ('localhost', 514)),\n 'facility': syslog.LOG_DAEMON,\n 'filters': ['accessFilter'],\n 'formatter': 'access'\n },\n 'errorSysLog': {\n 'class': 'logging.handlers.SysLogHandler',\n 'address': _address_dict.get(platform.system(),\n ('localhost', 514)),\n 'facility': syslog.LOG_DAEMON,\n 'filters': ['errorFilter'],\n 'formatter': 'simple'\n },\n 'accessTimedRotatingFile': {\n 'class': 'logging.handlers.TimedRotatingFileHandler',\n 'filters': ['accessFilter'],\n 'formatter': 'access',\n 'when': 'D',\n 'interval': 1,\n 'backupCount': 7,\n 'filename': 'access.log'\n },\n 'errorTimedRotatingFile': {\n 'class': 'logging.handlers.TimedRotatingFileHandler',\n 'filters': ['errorFilter'],\n 'when': 'D',\n 'interval': 1,\n 'backupCount': 7,\n 'filename': 'error.log',\n 'formatter': 'simple'\n }\n },\n 'loggers': {\n 'sanic': {\n 'level': 'DEBUG',\n 'handlers': ['internal', 'errorStream']\n },\n 'network': {\n 'level': 'DEBUG',\n 'handlers': ['accessStream', 'errorStream']\n }\n }\n}\n\n# this happens when using container or systems without syslog\n# keep things in config would cause file not exists error\n_addr = LOGGING['handlers']['accessSysLog']['address']\nif type(_addr) is str and not os.path.exists(_addr):\n LOGGING['handlers'].pop('accessSysLog')\n LOGGING['handlers'].pop('errorSysLog')\n\n\nclass Config(dict):\n def __init__(self, defaults=None, load_env=True, keep_alive=True):\n super().__init__(defaults or {})\n self.LOGO = \"\"\"\n \u2584\u2584\u2584\u2584\u2584\n \u2580\u2580\u2580\u2588\u2588\u2588\u2588\u2588\u2588\u2584\u2584\u2584 _______________\n \u2584\u2584\u2584\u2584\u2584 \u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2584 / \\\\\n \u2580\u2580\u2580\u2580\u2588\u2588\u2588\u2588\u2588\u258c \u2580\u2590\u2584 \u2580\u2590\u2588 | Gotta go fast! |\n \u2580\u2580\u2588\u2588\u2588\u2588\u2588\u2584\u2584 \u2580\u2588\u2588\u2588\u2588\u2588\u2588\u2584\u2588\u2588 | _________________/\n \u2580\u2584\u2584\u2584\u2584\u2584 \u2580\u2580\u2588\u2584\u2580\u2588\u2550\u2550\u2550\u2550\u2588\u2580 |/\n \u2580\u2580\u2580\u2584 \u2580\u2580\u2588\u2588\u2588 \u2580 \u2584\u2584\n \u2584\u2588\u2588\u2588\u2580\u2580\u2588\u2588\u2584\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2584 \u2584\u2580\u2580\u2580\u2580\u2580\u2580\u2588\u258c\n \u2588\u2588\u2580\u2584\u2584\u2584\u2588\u2588\u2580\u2584\u2588\u2588\u2588\u2580 \u2580\u2580\u2588\u2588\u2588\u2588 \u2584\u2588\u2588\n\u2584\u2580\u2580\u2580\u2584\u2588\u2588\u2584\u2580\u2580\u258c\u2588\u2588\u2588\u2588\u2592\u2592\u2592\u2592\u2592\u2592\u2588\u2588\u2588 \u258c\u2584\u2584\u2580\n\u258c \u2590\u2580\u2588\u2588\u2588\u2588\u2590\u2588\u2588\u2588\u2592\u2592\u2592\u2592\u2592\u2590\u2588\u2588\u258c\n\u2580\u2584\u2584\u2584\u2584\u2580 \u2580\u2580\u2588\u2588\u2588\u2588\u2592\u2592\u2592\u2592\u2584\u2588\u2588\u2580\n \u2580\u2580\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2580\n \u2584\u2584\u2588\u2588\u2580\u2588\u2588\u2588\u2588\u2588\u2588\u2580\u2588\n \u2584\u2588\u2588\u2580 \u2580\u2580\u2580 \u2588\n \u2584\u2588 \u2590\u258c\n \u2584\u2584\u2584\u2584\u2588\u258c \u2580\u2588\u2584\u2584\u2584\u2584\u2580\u2580\u2584\n\u258c \u2590 \u2580\u2580\u2584\u2584\u2584\u2580\n \u2580\u2580\u2584\u2584\u2580\n\"\"\"\n self.REQUEST_MAX_SIZE = 100000000 # 100 megababies\n self.REQUEST_TIMEOUT = 60 # 60 seconds\n self.KEEP_ALIVE = keep_alive\n\n if load_env:\n self.load_environment_vars()\n\n def __getattr__(self, attr):\n try:\n return self[attr]\n except KeyError as ke:\n raise AttributeError(\"Config has no '{}'\".format(ke.args[0]))\n\n def __setattr__(self, attr, value):\n self[attr] = value\n\n def from_envvar(self, variable_name):\n \"\"\"Load a configuration from an environment variable pointing to\n a configuration file.\n\n :param variable_name: name of the environment variable\n :return: bool. ``True`` if able to load config, ``False`` otherwise.\n \"\"\"\n config_file = os.environ.get(variable_name)\n if not config_file:\n raise RuntimeError('The environment variable %r is not set and '\n 'thus configuration could not be loaded.' %\n variable_name)\n return self.from_pyfile(config_file)\n\n def from_pyfile(self, filename):\n \"\"\"Update the values in the config from a Python file.\n Only the uppercase variables in that module are stored in the config.\n\n :param filename: an absolute path to the config file\n \"\"\"\n module = types.ModuleType('config')\n module.__file__ = filename\n try:\n with open(filename) as config_file:\n exec(compile(config_file.read(), filename, 'exec'),\n module.__dict__)\n except IOError as e:\n e.strerror = 'Unable to load configuration file (%s)' % e.strerror\n raise\n self.from_object(module)\n return True\n\n def from_object(self, obj):\n \"\"\"Update the values from the given object.\n Objects are usually either modules or classes.\n\n Just the uppercase variables in that object are stored in the config.\n Example usage::\n\n from yourapplication import default_config\n app.config.from_object(default_config)\n\n You should not use this function to load the actual configuration but\n rather configuration defaults. The actual config should be loaded\n with :meth:`from_pyfile` and ideally from a location not within the\n package because the package might be installed system wide.\n\n :param obj: an object holding the configuration\n \"\"\"\n for key in dir(obj):\n if key.isupper():\n self[key] = getattr(obj, key)\n\n def load_environment_vars(self):\n \"\"\"\n Looks for any SANIC_ prefixed environment variables and applies\n them to the configuration if present.\n \"\"\"\n for k, v in os.environ.items():\n if k.startswith(SANIC_PREFIX):\n _, config_key = k.split(SANIC_PREFIX, 1)\n self[config_key] = v\n", "path": "sanic/config.py"}], "after_files": [{"content": "import logging\n\n\nclass DefaultFilter(logging.Filter):\n\n def __init__(self, param=None):\n self.param = param\n\n def filter(self, record):\n if self.param is None:\n return True\n if record.levelno in self.param:\n return True\n return False\n\n\nlog = logging.getLogger('sanic')\nnetlog = logging.getLogger('network')\n", "path": "sanic/log.py"}, {"content": null, "path": "sanic/defaultFilter.py"}, {"content": "import os\nimport sys\nimport syslog\nimport platform\nimport types\n\nfrom sanic.log import DefaultFilter\n\nSANIC_PREFIX = 'SANIC_'\n\n_address_dict = {\n 'Windows': ('localhost', 514),\n 'Darwin': '/var/run/syslog',\n 'Linux': '/dev/log',\n 'FreeBSD': '/dev/log'\n}\n\nLOGGING = {\n 'version': 1,\n 'filters': {\n 'accessFilter': {\n '()': DefaultFilter,\n 'param': [0, 10, 20]\n },\n 'errorFilter': {\n '()': DefaultFilter,\n 'param': [30, 40, 50]\n }\n },\n 'formatters': {\n 'simple': {\n 'format': '%(asctime)s - (%(name)s)[%(levelname)s]: %(message)s',\n 'datefmt': '%Y-%m-%d %H:%M:%S'\n },\n 'access': {\n 'format': '%(asctime)s - (%(name)s)[%(levelname)s][%(host)s]: ' +\n '%(request)s %(message)s %(status)d %(byte)d',\n 'datefmt': '%Y-%m-%d %H:%M:%S'\n }\n },\n 'handlers': {\n 'internal': {\n 'class': 'logging.StreamHandler',\n 'filters': ['accessFilter'],\n 'formatter': 'simple',\n 'stream': sys.stderr\n },\n 'accessStream': {\n 'class': 'logging.StreamHandler',\n 'filters': ['accessFilter'],\n 'formatter': 'access',\n 'stream': sys.stderr\n },\n 'errorStream': {\n 'class': 'logging.StreamHandler',\n 'filters': ['errorFilter'],\n 'formatter': 'simple',\n 'stream': sys.stderr\n },\n # before you use accessSysLog, be sure that log levels\n # 0, 10, 20 have been enabled in you syslog configuration\n # otherwise you won't be able to see the output in syslog\n # logging file.\n 'accessSysLog': {\n 'class': 'logging.handlers.SysLogHandler',\n 'address': _address_dict.get(platform.system(),\n ('localhost', 514)),\n 'facility': syslog.LOG_DAEMON,\n 'filters': ['accessFilter'],\n 'formatter': 'access'\n },\n 'errorSysLog': {\n 'class': 'logging.handlers.SysLogHandler',\n 'address': _address_dict.get(platform.system(),\n ('localhost', 514)),\n 'facility': syslog.LOG_DAEMON,\n 'filters': ['errorFilter'],\n 'formatter': 'simple'\n },\n 'accessTimedRotatingFile': {\n 'class': 'logging.handlers.TimedRotatingFileHandler',\n 'filters': ['accessFilter'],\n 'formatter': 'access',\n 'when': 'D',\n 'interval': 1,\n 'backupCount': 7,\n 'filename': 'access.log'\n },\n 'errorTimedRotatingFile': {\n 'class': 'logging.handlers.TimedRotatingFileHandler',\n 'filters': ['errorFilter'],\n 'when': 'D',\n 'interval': 1,\n 'backupCount': 7,\n 'filename': 'error.log',\n 'formatter': 'simple'\n }\n },\n 'loggers': {\n 'sanic': {\n 'level': 'DEBUG',\n 'handlers': ['internal', 'errorStream']\n },\n 'network': {\n 'level': 'DEBUG',\n 'handlers': ['accessStream', 'errorStream']\n }\n }\n}\n\n# this happens when using container or systems without syslog\n# keep things in config would cause file not exists error\n_addr = LOGGING['handlers']['accessSysLog']['address']\nif type(_addr) is str and not os.path.exists(_addr):\n LOGGING['handlers'].pop('accessSysLog')\n LOGGING['handlers'].pop('errorSysLog')\n\n\nclass Config(dict):\n def __init__(self, defaults=None, load_env=True, keep_alive=True):\n super().__init__(defaults or {})\n self.LOGO = \"\"\"\n \u2584\u2584\u2584\u2584\u2584\n \u2580\u2580\u2580\u2588\u2588\u2588\u2588\u2588\u2588\u2584\u2584\u2584 _______________\n \u2584\u2584\u2584\u2584\u2584 \u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2584 / \\\\\n \u2580\u2580\u2580\u2580\u2588\u2588\u2588\u2588\u2588\u258c \u2580\u2590\u2584 \u2580\u2590\u2588 | Gotta go fast! |\n \u2580\u2580\u2588\u2588\u2588\u2588\u2588\u2584\u2584 \u2580\u2588\u2588\u2588\u2588\u2588\u2588\u2584\u2588\u2588 | _________________/\n \u2580\u2584\u2584\u2584\u2584\u2584 \u2580\u2580\u2588\u2584\u2580\u2588\u2550\u2550\u2550\u2550\u2588\u2580 |/\n \u2580\u2580\u2580\u2584 \u2580\u2580\u2588\u2588\u2588 \u2580 \u2584\u2584\n \u2584\u2588\u2588\u2588\u2580\u2580\u2588\u2588\u2584\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2584 \u2584\u2580\u2580\u2580\u2580\u2580\u2580\u2588\u258c\n \u2588\u2588\u2580\u2584\u2584\u2584\u2588\u2588\u2580\u2584\u2588\u2588\u2588\u2580 \u2580\u2580\u2588\u2588\u2588\u2588 \u2584\u2588\u2588\n\u2584\u2580\u2580\u2580\u2584\u2588\u2588\u2584\u2580\u2580\u258c\u2588\u2588\u2588\u2588\u2592\u2592\u2592\u2592\u2592\u2592\u2588\u2588\u2588 \u258c\u2584\u2584\u2580\n\u258c \u2590\u2580\u2588\u2588\u2588\u2588\u2590\u2588\u2588\u2588\u2592\u2592\u2592\u2592\u2592\u2590\u2588\u2588\u258c\n\u2580\u2584\u2584\u2584\u2584\u2580 \u2580\u2580\u2588\u2588\u2588\u2588\u2592\u2592\u2592\u2592\u2584\u2588\u2588\u2580\n \u2580\u2580\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2580\n \u2584\u2584\u2588\u2588\u2580\u2588\u2588\u2588\u2588\u2588\u2588\u2580\u2588\n \u2584\u2588\u2588\u2580 \u2580\u2580\u2580 \u2588\n \u2584\u2588 \u2590\u258c\n \u2584\u2584\u2584\u2584\u2588\u258c \u2580\u2588\u2584\u2584\u2584\u2584\u2580\u2580\u2584\n\u258c \u2590 \u2580\u2580\u2584\u2584\u2584\u2580\n \u2580\u2580\u2584\u2584\u2580\n\"\"\"\n self.REQUEST_MAX_SIZE = 100000000 # 100 megababies\n self.REQUEST_TIMEOUT = 60 # 60 seconds\n self.KEEP_ALIVE = keep_alive\n\n if load_env:\n self.load_environment_vars()\n\n def __getattr__(self, attr):\n try:\n return self[attr]\n except KeyError as ke:\n raise AttributeError(\"Config has no '{}'\".format(ke.args[0]))\n\n def __setattr__(self, attr, value):\n self[attr] = value\n\n def from_envvar(self, variable_name):\n \"\"\"Load a configuration from an environment variable pointing to\n a configuration file.\n\n :param variable_name: name of the environment variable\n :return: bool. ``True`` if able to load config, ``False`` otherwise.\n \"\"\"\n config_file = os.environ.get(variable_name)\n if not config_file:\n raise RuntimeError('The environment variable %r is not set and '\n 'thus configuration could not be loaded.' %\n variable_name)\n return self.from_pyfile(config_file)\n\n def from_pyfile(self, filename):\n \"\"\"Update the values in the config from a Python file.\n Only the uppercase variables in that module are stored in the config.\n\n :param filename: an absolute path to the config file\n \"\"\"\n module = types.ModuleType('config')\n module.__file__ = filename\n try:\n with open(filename) as config_file:\n exec(compile(config_file.read(), filename, 'exec'),\n module.__dict__)\n except IOError as e:\n e.strerror = 'Unable to load configuration file (%s)' % e.strerror\n raise\n self.from_object(module)\n return True\n\n def from_object(self, obj):\n \"\"\"Update the values from the given object.\n Objects are usually either modules or classes.\n\n Just the uppercase variables in that object are stored in the config.\n Example usage::\n\n from yourapplication import default_config\n app.config.from_object(default_config)\n\n You should not use this function to load the actual configuration but\n rather configuration defaults. The actual config should be loaded\n with :meth:`from_pyfile` and ideally from a location not within the\n package because the package might be installed system wide.\n\n :param obj: an object holding the configuration\n \"\"\"\n for key in dir(obj):\n if key.isupper():\n self[key] = getattr(obj, key)\n\n def load_environment_vars(self):\n \"\"\"\n Looks for any SANIC_ prefixed environment variables and applies\n them to the configuration if present.\n \"\"\"\n for k, v in os.environ.items():\n if k.startswith(SANIC_PREFIX):\n _, config_key = k.split(SANIC_PREFIX, 1)\n self[config_key] = v\n", "path": "sanic/config.py"}]} | 2,818 | 340 |
gh_patches_debug_13546 | rasdani/github-patches | git_diff | NVIDIA__NVFlare-292 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
HE aggregator has wrong check for data kind
This [block](https://github.com/NVIDIA/NVFlare/blob/52fa8fc989811526c50ecd4030cc89141b1b26cd/nvflare/app_common/homomorphic_encryption/he_intime_accumulate_model_aggregator.py#L104) should be replaced with
```
if dxo.data_kind != self.expected_data_kind:
self.log_error(fl_ctx, "expected {self.expected_data_kind} type DXO only, skipping this shareable.")
return False
```
_Originally posted by @holgerroth in https://github.com/NVIDIA/NVFlare/discussions/234#discussioncomment-2340297_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nvflare/app_common/homomorphic_encryption/he_intime_accumulate_model_aggregator.py`
Content:
```
1 # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import re
16 import time
17
18 import numpy as np
19 import tenseal as ts
20
21 import nvflare.app_common.homomorphic_encryption.he_constant as he
22 from nvflare.apis.dxo import DXO, DataKind, MetaKey, from_shareable
23 from nvflare.apis.event_type import EventType
24 from nvflare.apis.fl_constant import ReservedKey, ReturnCode
25 from nvflare.apis.fl_context import FLContext
26 from nvflare.apis.shareable import Shareable
27 from nvflare.app_common.abstract.aggregator import Aggregator
28 from nvflare.app_common.app_constant import AppConstants
29 from nvflare.app_common.homomorphic_encryption.homomorphic_encrypt import (
30 count_encrypted_layers,
31 load_tenseal_context_from_workspace,
32 )
33
34
35 class HEInTimeAccumulateWeightedAggregator(Aggregator):
36 def __init__(
37 self,
38 exclude_vars=None,
39 aggregation_weights=None,
40 tenseal_context_file="server_context.tenseal",
41 weigh_by_local_iter=False,
42 expected_data_kind="WEIGHT_DIFF",
43 expected_algorithm=he.HE_ALGORITHM_CKKS,
44 ):
45 """In time aggregator for `Shareables` encrypted using homomorphic encryption (HE) with TenSEAL https://github.com/OpenMined/TenSEAL.
46
47 Args:
48 exclude_vars ([list], optional): variable names that should be excluded from aggregation (use regular expression). Defaults to None.
49 aggregation_weights ([dict], optional): dictionary of client aggregation. Defaults to None.
50 tenseal_context_file (str, optional): [description]. Defaults to "server_context.tenseal".
51 weigh_by_local_iter (bool, optional): If true, multiply client weights on first in encryption space
52 (default: `False` which is recommended for HE, first multiply happens in `HEModelEncryptor`)].
53 expected_data_kind (str, optional): the data_kind this aggregator can process. Defaults to "WEIGHT_DIFF".
54 expected_algorithm ([str], optional): the HE algorithm it can process. Defaults to he.HE_ALGORITHM_CKKS.
55
56 Raises:
57 ValueError: mismatched data_kind or HE algorithm
58 """
59 super().__init__()
60 self.tenseal_context = None
61 self.tenseal_context_file = tenseal_context_file
62 if expected_data_kind not in [DataKind.WEIGHT_DIFF, DataKind.WEIGHTS]:
63 raise ValueError(f"expected_data_kind={expected_data_kind} not in WEIGHT_DIFF or WEIGHTS")
64 self.expected_data_kind = expected_data_kind
65 self.expected_algorithm = expected_algorithm
66 if self.expected_algorithm != he.HE_ALGORITHM_CKKS:
67 raise ValueError(f"expected algorithm {self.expected_algorithm} not supported")
68 self.exclude_vars = re.compile(exclude_vars) if exclude_vars else None
69 self.aggregation_weights = aggregation_weights or {}
70 self.reset_stats()
71 self.weigh_by_local_iter = weigh_by_local_iter
72 self.logger.info(f"client weights control: {self.aggregation_weights}")
73 if not self.weigh_by_local_iter:
74 if self.aggregation_weights:
75 self.logger.warning("aggregation_weights will be ignored if weigh_by_local_iter=False")
76 self.logger.info("Only divide by sum of local (weighted) iterations.")
77 self.warning_count = dict()
78 self.warning_limit = 0
79
80 def handle_event(self, event_type: str, fl_ctx: FLContext):
81 if event_type == EventType.START_RUN:
82 self.tenseal_context = load_tenseal_context_from_workspace(self.tenseal_context_file, fl_ctx)
83 elif event_type == EventType.END_RUN:
84 self.tenseal_context = None
85
86 def reset_stats(self):
87 self.total = dict()
88 self.counts = dict()
89 self.contribution_count = 0
90 self.history = list()
91 self.merged_encrypted_layers = dict() # thread-safety is handled by workflow
92
93 def accept(self, shareable: Shareable, fl_ctx: FLContext) -> bool:
94 """Accepts and adds the client updates to current average in HE encrypted space.
95
96 Args:
97 shareable: a shareable from client
98 fl_ctx: FL Contenxt associated with this shareable
99
100 Returns:
101 bool to indicate if this shareable is accepted.
102 """
103 dxo = from_shareable(shareable)
104 if dxo.data_kind != DataKind.WEIGHT_DIFF:
105 self.log_error(fl_ctx, "support WEIGHT_DIFF type DXO only, skipping this shareable.")
106 return False
107
108 enc_algo = dxo.get_meta_prop(key=MetaKey.PROCESSED_ALGORITHM, default=None)
109 if enc_algo != self.expected_algorithm:
110 self.log_error(fl_ctx, "unsupported encryption algorithm {enc_algo}")
111 return False
112
113 current_round = fl_ctx.get_prop(AppConstants.CURRENT_ROUND)
114 client_name = shareable.get_peer_prop(ReservedKey.IDENTITY_NAME, "?")
115 contribution_round = shareable.get_header(AppConstants.CONTRIBUTION_ROUND)
116
117 rc = shareable.get_return_code()
118 if rc and rc != ReturnCode.OK:
119 self.log_debug(fl_ctx, f"Client {client_name} returned rc: {rc}. Disregarding contribution.")
120 return False
121
122 self.log_debug(fl_ctx, f"current_round: {current_round}")
123
124 if contribution_round != current_round:
125 self.log_debug(
126 fl_ctx,
127 "Discarded the contribution from {client_name} for round: {contribution_round}. Current round is: {current_round}",
128 )
129 return False
130
131 start_time = time.time()
132
133 for item in self.history:
134 if client_name == item["client_name"]:
135 prev_round = item["round"]
136 self.log_info(
137 fl_ctx,
138 f"discarding shareable from {client_name} at round: {contribution_round} as {prev_round} accepted already",
139 )
140 return False
141
142 self.log_info(fl_ctx, f"Adding contribution from {client_name}.")
143
144 n_iter = dxo.get_meta_prop(key=MetaKey.NUM_STEPS_CURRENT_ROUND)
145 if n_iter is None:
146 if self.warning_count.get(client_name, 0) <= self.warning_limit:
147 self.log_warning(
148 fl_ctx,
149 f"NUM_STEPS_CURRENT_ROUND missing"
150 f" from {client_name} and set to default value, 1.0. "
151 f" This kind of message will show {self.warning_limit} times at most.",
152 )
153 if client_name in self.warning_count:
154 self.warning_count[client_name] = self.warning_count[client_name] + 1
155 else:
156 self.warning_count[client_name] = 0
157 n_iter = 1.0
158 float_n_iter = np.float(n_iter)
159
160 aggregation_weight = self.aggregation_weights.get(client_name)
161 if aggregation_weight is None:
162 aggregation_weight = 1.0
163
164 aggr_data = dxo.data
165 encrypted_layers = dxo.get_meta_prop(MetaKey.PROCESSED_KEYS)
166 # TODO: test support of different encrypted layers for different clients!
167
168 if encrypted_layers is None:
169 self.log_error(fl_ctx, "encrypted_layers is None!")
170 return False
171
172 for k, v in aggr_data.items():
173 if self.exclude_vars is not None and self.exclude_vars.search(k):
174 continue
175 if encrypted_layers[k]:
176 if self.weigh_by_local_iter:
177 weighted_value = ts.ckks_vector_from(self.tenseal_context, v) * (aggregation_weight * float_n_iter)
178 else:
179 weighted_value = ts.ckks_vector_from(self.tenseal_context, v)
180 self.merged_encrypted_layers[k] = True # any client can set this true
181 else:
182 if self.weigh_by_local_iter:
183 weighted_value = v * (aggregation_weight * float_n_iter)
184 else:
185 weighted_value = v
186 if k not in self.merged_encrypted_layers:
187 self.merged_encrypted_layers[k] = False # only set False if no other client set it to True
188 current_total = self.total.get(k, None)
189 if current_total is None:
190 self.total[k] = weighted_value
191 self.counts[k] = n_iter
192 else:
193 self.total[k] = current_total + weighted_value
194 self.counts[k] = self.counts[k] + n_iter
195
196 self.contribution_count += 1
197
198 end_time = time.time()
199 n_encrypted, n_total = count_encrypted_layers(self.merged_encrypted_layers)
200 self.log_info(fl_ctx, f"{n_encrypted} of {n_total} layers encrypted")
201 self.log_info(fl_ctx, f"Round {current_round} adding {client_name} time is {end_time - start_time} seconds")
202
203 self.history.append(
204 {
205 "client_name": client_name,
206 "round": contribution_round,
207 "aggregation_weight": aggregation_weight,
208 "n_iter": n_iter,
209 }
210 )
211 return True
212
213 def aggregate(self, fl_ctx: FLContext) -> Shareable:
214 start_time = time.time()
215 current_round = fl_ctx.get_prop(AppConstants.CURRENT_ROUND)
216
217 aggregated_dict = dict()
218 for k, v in self.total.items():
219 aggregated_dict[k] = v * (1.0 / self.counts[k])
220 end_time = time.time()
221 self.log_info(
222 fl_ctx,
223 f"Aggregated {self.contribution_count} contributions for round {current_round} time is {end_time - start_time} seconds",
224 )
225
226 dxo = DXO(data_kind=self.expected_data_kind, data=aggregated_dict)
227 dxo.set_meta_prop(MetaKey.PROCESSED_KEYS, self.merged_encrypted_layers)
228 dxo.set_meta_prop(MetaKey.PROCESSED_ALGORITHM, self.expected_algorithm)
229 n_encrypted, n_total = count_encrypted_layers(self.merged_encrypted_layers)
230 self.log_info(fl_ctx, f"{n_encrypted} of {n_total} layers encrypted")
231
232 fl_ctx.set_prop(AppConstants.DXO, dxo, private=True, sticky=False)
233
234 self.reset_stats() # only reset dictionary after adding merged_encrypted_layers to dictionary
235 return dxo.to_shareable()
236
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/nvflare/app_common/homomorphic_encryption/he_intime_accumulate_model_aggregator.py b/nvflare/app_common/homomorphic_encryption/he_intime_accumulate_model_aggregator.py
--- a/nvflare/app_common/homomorphic_encryption/he_intime_accumulate_model_aggregator.py
+++ b/nvflare/app_common/homomorphic_encryption/he_intime_accumulate_model_aggregator.py
@@ -101,8 +101,11 @@
bool to indicate if this shareable is accepted.
"""
dxo = from_shareable(shareable)
- if dxo.data_kind != DataKind.WEIGHT_DIFF:
- self.log_error(fl_ctx, "support WEIGHT_DIFF type DXO only, skipping this shareable.")
+ if dxo.data_kind != self.expected_data_kind:
+ self.log_error(
+ fl_ctx,
+ f"expected {self.expected_data_kind} type DXO only but received {dxo.data_kind}, skipping this shareable.",
+ )
return False
enc_algo = dxo.get_meta_prop(key=MetaKey.PROCESSED_ALGORITHM, default=None)
| {"golden_diff": "diff --git a/nvflare/app_common/homomorphic_encryption/he_intime_accumulate_model_aggregator.py b/nvflare/app_common/homomorphic_encryption/he_intime_accumulate_model_aggregator.py\n--- a/nvflare/app_common/homomorphic_encryption/he_intime_accumulate_model_aggregator.py\n+++ b/nvflare/app_common/homomorphic_encryption/he_intime_accumulate_model_aggregator.py\n@@ -101,8 +101,11 @@\n bool to indicate if this shareable is accepted.\n \"\"\"\n dxo = from_shareable(shareable)\n- if dxo.data_kind != DataKind.WEIGHT_DIFF:\n- self.log_error(fl_ctx, \"support WEIGHT_DIFF type DXO only, skipping this shareable.\")\n+ if dxo.data_kind != self.expected_data_kind:\n+ self.log_error(\n+ fl_ctx,\n+ f\"expected {self.expected_data_kind} type DXO only but received {dxo.data_kind}, skipping this shareable.\",\n+ )\n return False\n \n enc_algo = dxo.get_meta_prop(key=MetaKey.PROCESSED_ALGORITHM, default=None)\n", "issue": "HE aggregator has wrong check for data kind\nThis [block](https://github.com/NVIDIA/NVFlare/blob/52fa8fc989811526c50ecd4030cc89141b1b26cd/nvflare/app_common/homomorphic_encryption/he_intime_accumulate_model_aggregator.py#L104) should be replaced with \r\n\r\n```\r\n if dxo.data_kind != self.expected_data_kind:\r\n self.log_error(fl_ctx, \"expected {self.expected_data_kind} type DXO only, skipping this shareable.\")\r\n return False\r\n```\r\n\r\n_Originally posted by @holgerroth in https://github.com/NVIDIA/NVFlare/discussions/234#discussioncomment-2340297_\n", "before_files": [{"content": "# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport re\nimport time\n\nimport numpy as np\nimport tenseal as ts\n\nimport nvflare.app_common.homomorphic_encryption.he_constant as he\nfrom nvflare.apis.dxo import DXO, DataKind, MetaKey, from_shareable\nfrom nvflare.apis.event_type import EventType\nfrom nvflare.apis.fl_constant import ReservedKey, ReturnCode\nfrom nvflare.apis.fl_context import FLContext\nfrom nvflare.apis.shareable import Shareable\nfrom nvflare.app_common.abstract.aggregator import Aggregator\nfrom nvflare.app_common.app_constant import AppConstants\nfrom nvflare.app_common.homomorphic_encryption.homomorphic_encrypt import (\n count_encrypted_layers,\n load_tenseal_context_from_workspace,\n)\n\n\nclass HEInTimeAccumulateWeightedAggregator(Aggregator):\n def __init__(\n self,\n exclude_vars=None,\n aggregation_weights=None,\n tenseal_context_file=\"server_context.tenseal\",\n weigh_by_local_iter=False,\n expected_data_kind=\"WEIGHT_DIFF\",\n expected_algorithm=he.HE_ALGORITHM_CKKS,\n ):\n \"\"\"In time aggregator for `Shareables` encrypted using homomorphic encryption (HE) with TenSEAL https://github.com/OpenMined/TenSEAL.\n\n Args:\n exclude_vars ([list], optional): variable names that should be excluded from aggregation (use regular expression). Defaults to None.\n aggregation_weights ([dict], optional): dictionary of client aggregation. Defaults to None.\n tenseal_context_file (str, optional): [description]. Defaults to \"server_context.tenseal\".\n weigh_by_local_iter (bool, optional): If true, multiply client weights on first in encryption space\n (default: `False` which is recommended for HE, first multiply happens in `HEModelEncryptor`)].\n expected_data_kind (str, optional): the data_kind this aggregator can process. Defaults to \"WEIGHT_DIFF\".\n expected_algorithm ([str], optional): the HE algorithm it can process. Defaults to he.HE_ALGORITHM_CKKS.\n\n Raises:\n ValueError: mismatched data_kind or HE algorithm\n \"\"\"\n super().__init__()\n self.tenseal_context = None\n self.tenseal_context_file = tenseal_context_file\n if expected_data_kind not in [DataKind.WEIGHT_DIFF, DataKind.WEIGHTS]:\n raise ValueError(f\"expected_data_kind={expected_data_kind} not in WEIGHT_DIFF or WEIGHTS\")\n self.expected_data_kind = expected_data_kind\n self.expected_algorithm = expected_algorithm\n if self.expected_algorithm != he.HE_ALGORITHM_CKKS:\n raise ValueError(f\"expected algorithm {self.expected_algorithm} not supported\")\n self.exclude_vars = re.compile(exclude_vars) if exclude_vars else None\n self.aggregation_weights = aggregation_weights or {}\n self.reset_stats()\n self.weigh_by_local_iter = weigh_by_local_iter\n self.logger.info(f\"client weights control: {self.aggregation_weights}\")\n if not self.weigh_by_local_iter:\n if self.aggregation_weights:\n self.logger.warning(\"aggregation_weights will be ignored if weigh_by_local_iter=False\")\n self.logger.info(\"Only divide by sum of local (weighted) iterations.\")\n self.warning_count = dict()\n self.warning_limit = 0\n\n def handle_event(self, event_type: str, fl_ctx: FLContext):\n if event_type == EventType.START_RUN:\n self.tenseal_context = load_tenseal_context_from_workspace(self.tenseal_context_file, fl_ctx)\n elif event_type == EventType.END_RUN:\n self.tenseal_context = None\n\n def reset_stats(self):\n self.total = dict()\n self.counts = dict()\n self.contribution_count = 0\n self.history = list()\n self.merged_encrypted_layers = dict() # thread-safety is handled by workflow\n\n def accept(self, shareable: Shareable, fl_ctx: FLContext) -> bool:\n \"\"\"Accepts and adds the client updates to current average in HE encrypted space.\n\n Args:\n shareable: a shareable from client\n fl_ctx: FL Contenxt associated with this shareable\n\n Returns:\n bool to indicate if this shareable is accepted.\n \"\"\"\n dxo = from_shareable(shareable)\n if dxo.data_kind != DataKind.WEIGHT_DIFF:\n self.log_error(fl_ctx, \"support WEIGHT_DIFF type DXO only, skipping this shareable.\")\n return False\n\n enc_algo = dxo.get_meta_prop(key=MetaKey.PROCESSED_ALGORITHM, default=None)\n if enc_algo != self.expected_algorithm:\n self.log_error(fl_ctx, \"unsupported encryption algorithm {enc_algo}\")\n return False\n\n current_round = fl_ctx.get_prop(AppConstants.CURRENT_ROUND)\n client_name = shareable.get_peer_prop(ReservedKey.IDENTITY_NAME, \"?\")\n contribution_round = shareable.get_header(AppConstants.CONTRIBUTION_ROUND)\n\n rc = shareable.get_return_code()\n if rc and rc != ReturnCode.OK:\n self.log_debug(fl_ctx, f\"Client {client_name} returned rc: {rc}. Disregarding contribution.\")\n return False\n\n self.log_debug(fl_ctx, f\"current_round: {current_round}\")\n\n if contribution_round != current_round:\n self.log_debug(\n fl_ctx,\n \"Discarded the contribution from {client_name} for round: {contribution_round}. Current round is: {current_round}\",\n )\n return False\n\n start_time = time.time()\n\n for item in self.history:\n if client_name == item[\"client_name\"]:\n prev_round = item[\"round\"]\n self.log_info(\n fl_ctx,\n f\"discarding shareable from {client_name} at round: {contribution_round} as {prev_round} accepted already\",\n )\n return False\n\n self.log_info(fl_ctx, f\"Adding contribution from {client_name}.\")\n\n n_iter = dxo.get_meta_prop(key=MetaKey.NUM_STEPS_CURRENT_ROUND)\n if n_iter is None:\n if self.warning_count.get(client_name, 0) <= self.warning_limit:\n self.log_warning(\n fl_ctx,\n f\"NUM_STEPS_CURRENT_ROUND missing\"\n f\" from {client_name} and set to default value, 1.0. \"\n f\" This kind of message will show {self.warning_limit} times at most.\",\n )\n if client_name in self.warning_count:\n self.warning_count[client_name] = self.warning_count[client_name] + 1\n else:\n self.warning_count[client_name] = 0\n n_iter = 1.0\n float_n_iter = np.float(n_iter)\n\n aggregation_weight = self.aggregation_weights.get(client_name)\n if aggregation_weight is None:\n aggregation_weight = 1.0\n\n aggr_data = dxo.data\n encrypted_layers = dxo.get_meta_prop(MetaKey.PROCESSED_KEYS)\n # TODO: test support of different encrypted layers for different clients!\n\n if encrypted_layers is None:\n self.log_error(fl_ctx, \"encrypted_layers is None!\")\n return False\n\n for k, v in aggr_data.items():\n if self.exclude_vars is not None and self.exclude_vars.search(k):\n continue\n if encrypted_layers[k]:\n if self.weigh_by_local_iter:\n weighted_value = ts.ckks_vector_from(self.tenseal_context, v) * (aggregation_weight * float_n_iter)\n else:\n weighted_value = ts.ckks_vector_from(self.tenseal_context, v)\n self.merged_encrypted_layers[k] = True # any client can set this true\n else:\n if self.weigh_by_local_iter:\n weighted_value = v * (aggregation_weight * float_n_iter)\n else:\n weighted_value = v\n if k not in self.merged_encrypted_layers:\n self.merged_encrypted_layers[k] = False # only set False if no other client set it to True\n current_total = self.total.get(k, None)\n if current_total is None:\n self.total[k] = weighted_value\n self.counts[k] = n_iter\n else:\n self.total[k] = current_total + weighted_value\n self.counts[k] = self.counts[k] + n_iter\n\n self.contribution_count += 1\n\n end_time = time.time()\n n_encrypted, n_total = count_encrypted_layers(self.merged_encrypted_layers)\n self.log_info(fl_ctx, f\"{n_encrypted} of {n_total} layers encrypted\")\n self.log_info(fl_ctx, f\"Round {current_round} adding {client_name} time is {end_time - start_time} seconds\")\n\n self.history.append(\n {\n \"client_name\": client_name,\n \"round\": contribution_round,\n \"aggregation_weight\": aggregation_weight,\n \"n_iter\": n_iter,\n }\n )\n return True\n\n def aggregate(self, fl_ctx: FLContext) -> Shareable:\n start_time = time.time()\n current_round = fl_ctx.get_prop(AppConstants.CURRENT_ROUND)\n\n aggregated_dict = dict()\n for k, v in self.total.items():\n aggregated_dict[k] = v * (1.0 / self.counts[k])\n end_time = time.time()\n self.log_info(\n fl_ctx,\n f\"Aggregated {self.contribution_count} contributions for round {current_round} time is {end_time - start_time} seconds\",\n )\n\n dxo = DXO(data_kind=self.expected_data_kind, data=aggregated_dict)\n dxo.set_meta_prop(MetaKey.PROCESSED_KEYS, self.merged_encrypted_layers)\n dxo.set_meta_prop(MetaKey.PROCESSED_ALGORITHM, self.expected_algorithm)\n n_encrypted, n_total = count_encrypted_layers(self.merged_encrypted_layers)\n self.log_info(fl_ctx, f\"{n_encrypted} of {n_total} layers encrypted\")\n\n fl_ctx.set_prop(AppConstants.DXO, dxo, private=True, sticky=False)\n\n self.reset_stats() # only reset dictionary after adding merged_encrypted_layers to dictionary\n return dxo.to_shareable()\n", "path": "nvflare/app_common/homomorphic_encryption/he_intime_accumulate_model_aggregator.py"}], "after_files": [{"content": "# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport re\nimport time\n\nimport numpy as np\nimport tenseal as ts\n\nimport nvflare.app_common.homomorphic_encryption.he_constant as he\nfrom nvflare.apis.dxo import DXO, DataKind, MetaKey, from_shareable\nfrom nvflare.apis.event_type import EventType\nfrom nvflare.apis.fl_constant import ReservedKey, ReturnCode\nfrom nvflare.apis.fl_context import FLContext\nfrom nvflare.apis.shareable import Shareable\nfrom nvflare.app_common.abstract.aggregator import Aggregator\nfrom nvflare.app_common.app_constant import AppConstants\nfrom nvflare.app_common.homomorphic_encryption.homomorphic_encrypt import (\n count_encrypted_layers,\n load_tenseal_context_from_workspace,\n)\n\n\nclass HEInTimeAccumulateWeightedAggregator(Aggregator):\n def __init__(\n self,\n exclude_vars=None,\n aggregation_weights=None,\n tenseal_context_file=\"server_context.tenseal\",\n weigh_by_local_iter=False,\n expected_data_kind=\"WEIGHT_DIFF\",\n expected_algorithm=he.HE_ALGORITHM_CKKS,\n ):\n \"\"\"In time aggregator for `Shareables` encrypted using homomorphic encryption (HE) with TenSEAL https://github.com/OpenMined/TenSEAL.\n\n Args:\n exclude_vars ([list], optional): variable names that should be excluded from aggregation (use regular expression). Defaults to None.\n aggregation_weights ([dict], optional): dictionary of client aggregation. Defaults to None.\n tenseal_context_file (str, optional): [description]. Defaults to \"server_context.tenseal\".\n weigh_by_local_iter (bool, optional): If true, multiply client weights on first in encryption space\n (default: `False` which is recommended for HE, first multiply happens in `HEModelEncryptor`)].\n expected_data_kind (str, optional): the data_kind this aggregator can process. Defaults to \"WEIGHT_DIFF\".\n expected_algorithm ([str], optional): the HE algorithm it can process. Defaults to he.HE_ALGORITHM_CKKS.\n\n Raises:\n ValueError: mismatched data_kind or HE algorithm\n \"\"\"\n super().__init__()\n self.tenseal_context = None\n self.tenseal_context_file = tenseal_context_file\n if expected_data_kind not in [DataKind.WEIGHT_DIFF, DataKind.WEIGHTS]:\n raise ValueError(f\"expected_data_kind={expected_data_kind} not in WEIGHT_DIFF or WEIGHTS\")\n self.expected_data_kind = expected_data_kind\n self.expected_algorithm = expected_algorithm\n if self.expected_algorithm != he.HE_ALGORITHM_CKKS:\n raise ValueError(f\"expected algorithm {self.expected_algorithm} not supported\")\n self.exclude_vars = re.compile(exclude_vars) if exclude_vars else None\n self.aggregation_weights = aggregation_weights or {}\n self.reset_stats()\n self.weigh_by_local_iter = weigh_by_local_iter\n self.logger.info(f\"client weights control: {self.aggregation_weights}\")\n if not self.weigh_by_local_iter:\n if self.aggregation_weights:\n self.logger.warning(\"aggregation_weights will be ignored if weigh_by_local_iter=False\")\n self.logger.info(\"Only divide by sum of local (weighted) iterations.\")\n self.warning_count = dict()\n self.warning_limit = 0\n\n def handle_event(self, event_type: str, fl_ctx: FLContext):\n if event_type == EventType.START_RUN:\n self.tenseal_context = load_tenseal_context_from_workspace(self.tenseal_context_file, fl_ctx)\n elif event_type == EventType.END_RUN:\n self.tenseal_context = None\n\n def reset_stats(self):\n self.total = dict()\n self.counts = dict()\n self.contribution_count = 0\n self.history = list()\n self.merged_encrypted_layers = dict() # thread-safety is handled by workflow\n\n def accept(self, shareable: Shareable, fl_ctx: FLContext) -> bool:\n \"\"\"Accepts and adds the client updates to current average in HE encrypted space.\n\n Args:\n shareable: a shareable from client\n fl_ctx: FL Contenxt associated with this shareable\n\n Returns:\n bool to indicate if this shareable is accepted.\n \"\"\"\n dxo = from_shareable(shareable)\n if dxo.data_kind != self.expected_data_kind:\n self.log_error(\n fl_ctx,\n f\"expected {self.expected_data_kind} type DXO only but received {dxo.data_kind}, skipping this shareable.\",\n )\n return False\n\n enc_algo = dxo.get_meta_prop(key=MetaKey.PROCESSED_ALGORITHM, default=None)\n if enc_algo != self.expected_algorithm:\n self.log_error(fl_ctx, \"unsupported encryption algorithm {enc_algo}\")\n return False\n\n current_round = fl_ctx.get_prop(AppConstants.CURRENT_ROUND)\n client_name = shareable.get_peer_prop(ReservedKey.IDENTITY_NAME, \"?\")\n contribution_round = shareable.get_header(AppConstants.CONTRIBUTION_ROUND)\n\n rc = shareable.get_return_code()\n if rc and rc != ReturnCode.OK:\n self.log_debug(fl_ctx, f\"Client {client_name} returned rc: {rc}. Disregarding contribution.\")\n return False\n\n self.log_debug(fl_ctx, f\"current_round: {current_round}\")\n\n if contribution_round != current_round:\n self.log_debug(\n fl_ctx,\n \"Discarded the contribution from {client_name} for round: {contribution_round}. Current round is: {current_round}\",\n )\n return False\n\n start_time = time.time()\n\n for item in self.history:\n if client_name == item[\"client_name\"]:\n prev_round = item[\"round\"]\n self.log_info(\n fl_ctx,\n f\"discarding shareable from {client_name} at round: {contribution_round} as {prev_round} accepted already\",\n )\n return False\n\n self.log_info(fl_ctx, f\"Adding contribution from {client_name}.\")\n\n n_iter = dxo.get_meta_prop(key=MetaKey.NUM_STEPS_CURRENT_ROUND)\n if n_iter is None:\n if self.warning_count.get(client_name, 0) <= self.warning_limit:\n self.log_warning(\n fl_ctx,\n f\"NUM_STEPS_CURRENT_ROUND missing\"\n f\" from {client_name} and set to default value, 1.0. \"\n f\" This kind of message will show {self.warning_limit} times at most.\",\n )\n if client_name in self.warning_count:\n self.warning_count[client_name] = self.warning_count[client_name] + 1\n else:\n self.warning_count[client_name] = 0\n n_iter = 1.0\n float_n_iter = np.float(n_iter)\n\n aggregation_weight = self.aggregation_weights.get(client_name)\n if aggregation_weight is None:\n aggregation_weight = 1.0\n\n aggr_data = dxo.data\n encrypted_layers = dxo.get_meta_prop(MetaKey.PROCESSED_KEYS)\n # TODO: test support of different encrypted layers for different clients!\n\n if encrypted_layers is None:\n self.log_error(fl_ctx, \"encrypted_layers is None!\")\n return False\n\n for k, v in aggr_data.items():\n if self.exclude_vars is not None and self.exclude_vars.search(k):\n continue\n if encrypted_layers[k]:\n if self.weigh_by_local_iter:\n weighted_value = ts.ckks_vector_from(self.tenseal_context, v) * (aggregation_weight * float_n_iter)\n else:\n weighted_value = ts.ckks_vector_from(self.tenseal_context, v)\n self.merged_encrypted_layers[k] = True # any client can set this true\n else:\n if self.weigh_by_local_iter:\n weighted_value = v * (aggregation_weight * float_n_iter)\n else:\n weighted_value = v\n if k not in self.merged_encrypted_layers:\n self.merged_encrypted_layers[k] = False # only set False if no other client set it to True\n current_total = self.total.get(k, None)\n if current_total is None:\n self.total[k] = weighted_value\n self.counts[k] = n_iter\n else:\n self.total[k] = current_total + weighted_value\n self.counts[k] = self.counts[k] + n_iter\n\n self.contribution_count += 1\n\n end_time = time.time()\n n_encrypted, n_total = count_encrypted_layers(self.merged_encrypted_layers)\n self.log_info(fl_ctx, f\"{n_encrypted} of {n_total} layers encrypted\")\n self.log_info(fl_ctx, f\"Round {current_round} adding {client_name} time is {end_time - start_time} seconds\")\n\n self.history.append(\n {\n \"client_name\": client_name,\n \"round\": contribution_round,\n \"aggregation_weight\": aggregation_weight,\n \"n_iter\": n_iter,\n }\n )\n return True\n\n def aggregate(self, fl_ctx: FLContext) -> Shareable:\n start_time = time.time()\n current_round = fl_ctx.get_prop(AppConstants.CURRENT_ROUND)\n\n aggregated_dict = dict()\n for k, v in self.total.items():\n aggregated_dict[k] = v * (1.0 / self.counts[k])\n end_time = time.time()\n self.log_info(\n fl_ctx,\n f\"Aggregated {self.contribution_count} contributions for round {current_round} time is {end_time - start_time} seconds\",\n )\n\n dxo = DXO(data_kind=self.expected_data_kind, data=aggregated_dict)\n dxo.set_meta_prop(MetaKey.PROCESSED_KEYS, self.merged_encrypted_layers)\n dxo.set_meta_prop(MetaKey.PROCESSED_ALGORITHM, self.expected_algorithm)\n n_encrypted, n_total = count_encrypted_layers(self.merged_encrypted_layers)\n self.log_info(fl_ctx, f\"{n_encrypted} of {n_total} layers encrypted\")\n\n fl_ctx.set_prop(AppConstants.DXO, dxo, private=True, sticky=False)\n\n self.reset_stats() # only reset dictionary after adding merged_encrypted_layers to dictionary\n return dxo.to_shareable()\n", "path": "nvflare/app_common/homomorphic_encryption/he_intime_accumulate_model_aggregator.py"}]} | 3,366 | 245 |
gh_patches_debug_2360 | rasdani/github-patches | git_diff | encode__starlette-1118 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
HTTPConnection::__get_item__ has incorrect type signature
### Checklist
<!-- Please make sure you check all these items before submitting your bug report. -->
- [x] The bug is reproducible against the latest release and/or `master`.
- [x] There are no similar issues or pull requests to fix it yet.
### Describe the bug
`HTTPConnection`'s `__get_item__` has the return type `str`. However, it returns the value for the provided key in the scope. Not all values in the scope are strings, (See documentation here: https://asgi.readthedocs.io/en/latest/specs/www.html#http-connection-scope
Since `WebSocket` inherits this method it also has the wrong return type.
### To reproduce
This example uses WebSocket because that's how I originally found the issue.
Run mypy
```bash
mypy --pretty main.py
```
On the following code
```python
from starlette.types import Scope, Receive, Send
from starlette.websockets import WebSocket
async def app(scope: Scope, receive: Receive, send: Send) -> None:
websocket = WebSocket(scope=scope, receive=receive, send=send)
await websocket.accept()
ip, port = websocket['client'] # mypy fails to typecheck, because websocket['client'] typed as a str
# Succeeds at runtime, because websocket['client'] is not a string
assert isinstance(ip, str)
assert isinstance(port, int)
```
### Expected behavior
mypy typecheck should pass for valid code
### Actual behavior
Mypy fails to typecheck with the following error:
```
main.py:8: error: Unpacking a string is disallowed
ip, port = websocket['client'] # mypy fails to typecheck, because webso...
^
```
### Environment
- OS: linux/windows/mac
- Python version: 3.9
- Starlette version: 0.14.1
### Additional context
Changing the return type of `__get_item__` to `Any` is likely the right thing to do here, which would be backwards-compatible for those currently using mypy.
As an extra solution to this problem, it would be great to expose the correct types for the scope using a TypedDict (couldn't be done for `__getitem__`, but could be done for the scope attribute on `WebSocket` and `HTTPConnection`), although that would be a breaking change for MyPy users, and also complicate the middleware story.
I'd be happy to work on either/both of these solutions if there's a consensus on what the right direction is.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `starlette/requests.py`
Content:
```
1 import json
2 import typing
3 from collections.abc import Mapping
4 from http import cookies as http_cookies
5
6 import anyio
7
8 from starlette.datastructures import URL, Address, FormData, Headers, QueryParams, State
9 from starlette.formparsers import FormParser, MultiPartParser
10 from starlette.types import Message, Receive, Scope, Send
11
12 try:
13 from multipart.multipart import parse_options_header
14 except ImportError: # pragma: nocover
15 parse_options_header = None
16
17
18 SERVER_PUSH_HEADERS_TO_COPY = {
19 "accept",
20 "accept-encoding",
21 "accept-language",
22 "cache-control",
23 "user-agent",
24 }
25
26
27 def cookie_parser(cookie_string: str) -> typing.Dict[str, str]:
28 """
29 This function parses a ``Cookie`` HTTP header into a dict of key/value pairs.
30
31 It attempts to mimic browser cookie parsing behavior: browsers and web servers
32 frequently disregard the spec (RFC 6265) when setting and reading cookies,
33 so we attempt to suit the common scenarios here.
34
35 This function has been adapted from Django 3.1.0.
36 Note: we are explicitly _NOT_ using `SimpleCookie.load` because it is based
37 on an outdated spec and will fail on lots of input we want to support
38 """
39 cookie_dict: typing.Dict[str, str] = {}
40 for chunk in cookie_string.split(";"):
41 if "=" in chunk:
42 key, val = chunk.split("=", 1)
43 else:
44 # Assume an empty name per
45 # https://bugzilla.mozilla.org/show_bug.cgi?id=169091
46 key, val = "", chunk
47 key, val = key.strip(), val.strip()
48 if key or val:
49 # unquote using Python's algorithm.
50 cookie_dict[key] = http_cookies._unquote(val) # type: ignore
51 return cookie_dict
52
53
54 class ClientDisconnect(Exception):
55 pass
56
57
58 class HTTPConnection(Mapping):
59 """
60 A base class for incoming HTTP connections, that is used to provide
61 any functionality that is common to both `Request` and `WebSocket`.
62 """
63
64 def __init__(self, scope: Scope, receive: Receive = None) -> None:
65 assert scope["type"] in ("http", "websocket")
66 self.scope = scope
67
68 def __getitem__(self, key: str) -> str:
69 return self.scope[key]
70
71 def __iter__(self) -> typing.Iterator[str]:
72 return iter(self.scope)
73
74 def __len__(self) -> int:
75 return len(self.scope)
76
77 # Don't use the `abc.Mapping.__eq__` implementation.
78 # Connection instances should never be considered equal
79 # unless `self is other`.
80 __eq__ = object.__eq__
81 __hash__ = object.__hash__
82
83 @property
84 def app(self) -> typing.Any:
85 return self.scope["app"]
86
87 @property
88 def url(self) -> URL:
89 if not hasattr(self, "_url"):
90 self._url = URL(scope=self.scope)
91 return self._url
92
93 @property
94 def base_url(self) -> URL:
95 if not hasattr(self, "_base_url"):
96 base_url_scope = dict(self.scope)
97 base_url_scope["path"] = "/"
98 base_url_scope["query_string"] = b""
99 base_url_scope["root_path"] = base_url_scope.get(
100 "app_root_path", base_url_scope.get("root_path", "")
101 )
102 self._base_url = URL(scope=base_url_scope)
103 return self._base_url
104
105 @property
106 def headers(self) -> Headers:
107 if not hasattr(self, "_headers"):
108 self._headers = Headers(scope=self.scope)
109 return self._headers
110
111 @property
112 def query_params(self) -> QueryParams:
113 if not hasattr(self, "_query_params"):
114 self._query_params = QueryParams(self.scope["query_string"])
115 return self._query_params
116
117 @property
118 def path_params(self) -> dict:
119 return self.scope.get("path_params", {})
120
121 @property
122 def cookies(self) -> typing.Dict[str, str]:
123 if not hasattr(self, "_cookies"):
124 cookies: typing.Dict[str, str] = {}
125 cookie_header = self.headers.get("cookie")
126
127 if cookie_header:
128 cookies = cookie_parser(cookie_header)
129 self._cookies = cookies
130 return self._cookies
131
132 @property
133 def client(self) -> Address:
134 host, port = self.scope.get("client") or (None, None)
135 return Address(host=host, port=port)
136
137 @property
138 def session(self) -> dict:
139 assert (
140 "session" in self.scope
141 ), "SessionMiddleware must be installed to access request.session"
142 return self.scope["session"]
143
144 @property
145 def auth(self) -> typing.Any:
146 assert (
147 "auth" in self.scope
148 ), "AuthenticationMiddleware must be installed to access request.auth"
149 return self.scope["auth"]
150
151 @property
152 def user(self) -> typing.Any:
153 assert (
154 "user" in self.scope
155 ), "AuthenticationMiddleware must be installed to access request.user"
156 return self.scope["user"]
157
158 @property
159 def state(self) -> State:
160 if not hasattr(self, "_state"):
161 # Ensure 'state' has an empty dict if it's not already populated.
162 self.scope.setdefault("state", {})
163 # Create a state instance with a reference to the dict in which it should
164 # store info
165 self._state = State(self.scope["state"])
166 return self._state
167
168 def url_for(self, name: str, **path_params: typing.Any) -> str:
169 router = self.scope["router"]
170 url_path = router.url_path_for(name, **path_params)
171 return url_path.make_absolute_url(base_url=self.base_url)
172
173
174 async def empty_receive() -> Message:
175 raise RuntimeError("Receive channel has not been made available")
176
177
178 async def empty_send(message: Message) -> None:
179 raise RuntimeError("Send channel has not been made available")
180
181
182 class Request(HTTPConnection):
183 def __init__(
184 self, scope: Scope, receive: Receive = empty_receive, send: Send = empty_send
185 ):
186 super().__init__(scope)
187 assert scope["type"] == "http"
188 self._receive = receive
189 self._send = send
190 self._stream_consumed = False
191 self._is_disconnected = False
192
193 @property
194 def method(self) -> str:
195 return self.scope["method"]
196
197 @property
198 def receive(self) -> Receive:
199 return self._receive
200
201 async def stream(self) -> typing.AsyncGenerator[bytes, None]:
202 if hasattr(self, "_body"):
203 yield self._body
204 yield b""
205 return
206
207 if self._stream_consumed:
208 raise RuntimeError("Stream consumed")
209
210 self._stream_consumed = True
211 while True:
212 message = await self._receive()
213 if message["type"] == "http.request":
214 body = message.get("body", b"")
215 if body:
216 yield body
217 if not message.get("more_body", False):
218 break
219 elif message["type"] == "http.disconnect":
220 self._is_disconnected = True
221 raise ClientDisconnect()
222 yield b""
223
224 async def body(self) -> bytes:
225 if not hasattr(self, "_body"):
226 chunks = []
227 async for chunk in self.stream():
228 chunks.append(chunk)
229 self._body = b"".join(chunks)
230 return self._body
231
232 async def json(self) -> typing.Any:
233 if not hasattr(self, "_json"):
234 body = await self.body()
235 self._json = json.loads(body)
236 return self._json
237
238 async def form(self) -> FormData:
239 if not hasattr(self, "_form"):
240 assert (
241 parse_options_header is not None
242 ), "The `python-multipart` library must be installed to use form parsing."
243 content_type_header = self.headers.get("Content-Type")
244 content_type, options = parse_options_header(content_type_header)
245 if content_type == b"multipart/form-data":
246 multipart_parser = MultiPartParser(self.headers, self.stream())
247 self._form = await multipart_parser.parse()
248 elif content_type == b"application/x-www-form-urlencoded":
249 form_parser = FormParser(self.headers, self.stream())
250 self._form = await form_parser.parse()
251 else:
252 self._form = FormData()
253 return self._form
254
255 async def close(self) -> None:
256 if hasattr(self, "_form"):
257 await self._form.close()
258
259 async def is_disconnected(self) -> bool:
260 if not self._is_disconnected:
261 message: Message = {}
262
263 # If message isn't immediately available, move on
264 with anyio.CancelScope() as cs:
265 cs.cancel()
266 message = await self._receive()
267
268 if message.get("type") == "http.disconnect":
269 self._is_disconnected = True
270
271 return self._is_disconnected
272
273 async def send_push_promise(self, path: str) -> None:
274 if "http.response.push" in self.scope.get("extensions", {}):
275 raw_headers = []
276 for name in SERVER_PUSH_HEADERS_TO_COPY:
277 for value in self.headers.getlist(name):
278 raw_headers.append(
279 (name.encode("latin-1"), value.encode("latin-1"))
280 )
281 await self._send(
282 {"type": "http.response.push", "path": path, "headers": raw_headers}
283 )
284
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/starlette/requests.py b/starlette/requests.py
--- a/starlette/requests.py
+++ b/starlette/requests.py
@@ -65,7 +65,7 @@
assert scope["type"] in ("http", "websocket")
self.scope = scope
- def __getitem__(self, key: str) -> str:
+ def __getitem__(self, key: str) -> typing.Any:
return self.scope[key]
def __iter__(self) -> typing.Iterator[str]:
| {"golden_diff": "diff --git a/starlette/requests.py b/starlette/requests.py\n--- a/starlette/requests.py\n+++ b/starlette/requests.py\n@@ -65,7 +65,7 @@\n assert scope[\"type\"] in (\"http\", \"websocket\")\n self.scope = scope\n \n- def __getitem__(self, key: str) -> str:\n+ def __getitem__(self, key: str) -> typing.Any:\n return self.scope[key]\n \n def __iter__(self) -> typing.Iterator[str]:\n", "issue": "HTTPConnection::__get_item__ has incorrect type signature\n### Checklist\r\n\r\n<!-- Please make sure you check all these items before submitting your bug report. -->\r\n\r\n- [x] The bug is reproducible against the latest release and/or `master`.\r\n- [x] There are no similar issues or pull requests to fix it yet.\r\n\r\n### Describe the bug\r\n\r\n`HTTPConnection`'s `__get_item__` has the return type `str`. However, it returns the value for the provided key in the scope. Not all values in the scope are strings, (See documentation here: https://asgi.readthedocs.io/en/latest/specs/www.html#http-connection-scope\r\n\r\nSince `WebSocket` inherits this method it also has the wrong return type.\r\n\r\n### To reproduce\r\nThis example uses WebSocket because that's how I originally found the issue.\r\n\r\nRun mypy\r\n```bash\r\nmypy --pretty main.py\r\n```\r\nOn the following code\r\n```python\r\nfrom starlette.types import Scope, Receive, Send\r\nfrom starlette.websockets import WebSocket\r\n\r\n\r\nasync def app(scope: Scope, receive: Receive, send: Send) -> None:\r\n websocket = WebSocket(scope=scope, receive=receive, send=send)\r\n await websocket.accept()\r\n ip, port = websocket['client'] # mypy fails to typecheck, because websocket['client'] typed as a str\r\n # Succeeds at runtime, because websocket['client'] is not a string\r\n assert isinstance(ip, str)\r\n assert isinstance(port, int)\r\n```\r\n\r\n### Expected behavior\r\n\r\nmypy typecheck should pass for valid code\r\n\r\n### Actual behavior\r\n\r\nMypy fails to typecheck with the following error:\r\n\r\n```\r\nmain.py:8: error: Unpacking a string is disallowed\r\n ip, port = websocket['client'] # mypy fails to typecheck, because webso...\r\n ^\r\n```\r\n\r\n\r\n### Environment\r\n\r\n- OS: linux/windows/mac\r\n- Python version: 3.9\r\n- Starlette version: 0.14.1\r\n\r\n### Additional context\r\n\r\nChanging the return type of `__get_item__` to `Any` is likely the right thing to do here, which would be backwards-compatible for those currently using mypy. \r\n\r\nAs an extra solution to this problem, it would be great to expose the correct types for the scope using a TypedDict (couldn't be done for `__getitem__`, but could be done for the scope attribute on `WebSocket` and `HTTPConnection`), although that would be a breaking change for MyPy users, and also complicate the middleware story.\r\n\r\nI'd be happy to work on either/both of these solutions if there's a consensus on what the right direction is.\n", "before_files": [{"content": "import json\nimport typing\nfrom collections.abc import Mapping\nfrom http import cookies as http_cookies\n\nimport anyio\n\nfrom starlette.datastructures import URL, Address, FormData, Headers, QueryParams, State\nfrom starlette.formparsers import FormParser, MultiPartParser\nfrom starlette.types import Message, Receive, Scope, Send\n\ntry:\n from multipart.multipart import parse_options_header\nexcept ImportError: # pragma: nocover\n parse_options_header = None\n\n\nSERVER_PUSH_HEADERS_TO_COPY = {\n \"accept\",\n \"accept-encoding\",\n \"accept-language\",\n \"cache-control\",\n \"user-agent\",\n}\n\n\ndef cookie_parser(cookie_string: str) -> typing.Dict[str, str]:\n \"\"\"\n This function parses a ``Cookie`` HTTP header into a dict of key/value pairs.\n\n It attempts to mimic browser cookie parsing behavior: browsers and web servers\n frequently disregard the spec (RFC 6265) when setting and reading cookies,\n so we attempt to suit the common scenarios here.\n\n This function has been adapted from Django 3.1.0.\n Note: we are explicitly _NOT_ using `SimpleCookie.load` because it is based\n on an outdated spec and will fail on lots of input we want to support\n \"\"\"\n cookie_dict: typing.Dict[str, str] = {}\n for chunk in cookie_string.split(\";\"):\n if \"=\" in chunk:\n key, val = chunk.split(\"=\", 1)\n else:\n # Assume an empty name per\n # https://bugzilla.mozilla.org/show_bug.cgi?id=169091\n key, val = \"\", chunk\n key, val = key.strip(), val.strip()\n if key or val:\n # unquote using Python's algorithm.\n cookie_dict[key] = http_cookies._unquote(val) # type: ignore\n return cookie_dict\n\n\nclass ClientDisconnect(Exception):\n pass\n\n\nclass HTTPConnection(Mapping):\n \"\"\"\n A base class for incoming HTTP connections, that is used to provide\n any functionality that is common to both `Request` and `WebSocket`.\n \"\"\"\n\n def __init__(self, scope: Scope, receive: Receive = None) -> None:\n assert scope[\"type\"] in (\"http\", \"websocket\")\n self.scope = scope\n\n def __getitem__(self, key: str) -> str:\n return self.scope[key]\n\n def __iter__(self) -> typing.Iterator[str]:\n return iter(self.scope)\n\n def __len__(self) -> int:\n return len(self.scope)\n\n # Don't use the `abc.Mapping.__eq__` implementation.\n # Connection instances should never be considered equal\n # unless `self is other`.\n __eq__ = object.__eq__\n __hash__ = object.__hash__\n\n @property\n def app(self) -> typing.Any:\n return self.scope[\"app\"]\n\n @property\n def url(self) -> URL:\n if not hasattr(self, \"_url\"):\n self._url = URL(scope=self.scope)\n return self._url\n\n @property\n def base_url(self) -> URL:\n if not hasattr(self, \"_base_url\"):\n base_url_scope = dict(self.scope)\n base_url_scope[\"path\"] = \"/\"\n base_url_scope[\"query_string\"] = b\"\"\n base_url_scope[\"root_path\"] = base_url_scope.get(\n \"app_root_path\", base_url_scope.get(\"root_path\", \"\")\n )\n self._base_url = URL(scope=base_url_scope)\n return self._base_url\n\n @property\n def headers(self) -> Headers:\n if not hasattr(self, \"_headers\"):\n self._headers = Headers(scope=self.scope)\n return self._headers\n\n @property\n def query_params(self) -> QueryParams:\n if not hasattr(self, \"_query_params\"):\n self._query_params = QueryParams(self.scope[\"query_string\"])\n return self._query_params\n\n @property\n def path_params(self) -> dict:\n return self.scope.get(\"path_params\", {})\n\n @property\n def cookies(self) -> typing.Dict[str, str]:\n if not hasattr(self, \"_cookies\"):\n cookies: typing.Dict[str, str] = {}\n cookie_header = self.headers.get(\"cookie\")\n\n if cookie_header:\n cookies = cookie_parser(cookie_header)\n self._cookies = cookies\n return self._cookies\n\n @property\n def client(self) -> Address:\n host, port = self.scope.get(\"client\") or (None, None)\n return Address(host=host, port=port)\n\n @property\n def session(self) -> dict:\n assert (\n \"session\" in self.scope\n ), \"SessionMiddleware must be installed to access request.session\"\n return self.scope[\"session\"]\n\n @property\n def auth(self) -> typing.Any:\n assert (\n \"auth\" in self.scope\n ), \"AuthenticationMiddleware must be installed to access request.auth\"\n return self.scope[\"auth\"]\n\n @property\n def user(self) -> typing.Any:\n assert (\n \"user\" in self.scope\n ), \"AuthenticationMiddleware must be installed to access request.user\"\n return self.scope[\"user\"]\n\n @property\n def state(self) -> State:\n if not hasattr(self, \"_state\"):\n # Ensure 'state' has an empty dict if it's not already populated.\n self.scope.setdefault(\"state\", {})\n # Create a state instance with a reference to the dict in which it should\n # store info\n self._state = State(self.scope[\"state\"])\n return self._state\n\n def url_for(self, name: str, **path_params: typing.Any) -> str:\n router = self.scope[\"router\"]\n url_path = router.url_path_for(name, **path_params)\n return url_path.make_absolute_url(base_url=self.base_url)\n\n\nasync def empty_receive() -> Message:\n raise RuntimeError(\"Receive channel has not been made available\")\n\n\nasync def empty_send(message: Message) -> None:\n raise RuntimeError(\"Send channel has not been made available\")\n\n\nclass Request(HTTPConnection):\n def __init__(\n self, scope: Scope, receive: Receive = empty_receive, send: Send = empty_send\n ):\n super().__init__(scope)\n assert scope[\"type\"] == \"http\"\n self._receive = receive\n self._send = send\n self._stream_consumed = False\n self._is_disconnected = False\n\n @property\n def method(self) -> str:\n return self.scope[\"method\"]\n\n @property\n def receive(self) -> Receive:\n return self._receive\n\n async def stream(self) -> typing.AsyncGenerator[bytes, None]:\n if hasattr(self, \"_body\"):\n yield self._body\n yield b\"\"\n return\n\n if self._stream_consumed:\n raise RuntimeError(\"Stream consumed\")\n\n self._stream_consumed = True\n while True:\n message = await self._receive()\n if message[\"type\"] == \"http.request\":\n body = message.get(\"body\", b\"\")\n if body:\n yield body\n if not message.get(\"more_body\", False):\n break\n elif message[\"type\"] == \"http.disconnect\":\n self._is_disconnected = True\n raise ClientDisconnect()\n yield b\"\"\n\n async def body(self) -> bytes:\n if not hasattr(self, \"_body\"):\n chunks = []\n async for chunk in self.stream():\n chunks.append(chunk)\n self._body = b\"\".join(chunks)\n return self._body\n\n async def json(self) -> typing.Any:\n if not hasattr(self, \"_json\"):\n body = await self.body()\n self._json = json.loads(body)\n return self._json\n\n async def form(self) -> FormData:\n if not hasattr(self, \"_form\"):\n assert (\n parse_options_header is not None\n ), \"The `python-multipart` library must be installed to use form parsing.\"\n content_type_header = self.headers.get(\"Content-Type\")\n content_type, options = parse_options_header(content_type_header)\n if content_type == b\"multipart/form-data\":\n multipart_parser = MultiPartParser(self.headers, self.stream())\n self._form = await multipart_parser.parse()\n elif content_type == b\"application/x-www-form-urlencoded\":\n form_parser = FormParser(self.headers, self.stream())\n self._form = await form_parser.parse()\n else:\n self._form = FormData()\n return self._form\n\n async def close(self) -> None:\n if hasattr(self, \"_form\"):\n await self._form.close()\n\n async def is_disconnected(self) -> bool:\n if not self._is_disconnected:\n message: Message = {}\n\n # If message isn't immediately available, move on\n with anyio.CancelScope() as cs:\n cs.cancel()\n message = await self._receive()\n\n if message.get(\"type\") == \"http.disconnect\":\n self._is_disconnected = True\n\n return self._is_disconnected\n\n async def send_push_promise(self, path: str) -> None:\n if \"http.response.push\" in self.scope.get(\"extensions\", {}):\n raw_headers = []\n for name in SERVER_PUSH_HEADERS_TO_COPY:\n for value in self.headers.getlist(name):\n raw_headers.append(\n (name.encode(\"latin-1\"), value.encode(\"latin-1\"))\n )\n await self._send(\n {\"type\": \"http.response.push\", \"path\": path, \"headers\": raw_headers}\n )\n", "path": "starlette/requests.py"}], "after_files": [{"content": "import json\nimport typing\nfrom collections.abc import Mapping\nfrom http import cookies as http_cookies\n\nimport anyio\n\nfrom starlette.datastructures import URL, Address, FormData, Headers, QueryParams, State\nfrom starlette.formparsers import FormParser, MultiPartParser\nfrom starlette.types import Message, Receive, Scope, Send\n\ntry:\n from multipart.multipart import parse_options_header\nexcept ImportError: # pragma: nocover\n parse_options_header = None\n\n\nSERVER_PUSH_HEADERS_TO_COPY = {\n \"accept\",\n \"accept-encoding\",\n \"accept-language\",\n \"cache-control\",\n \"user-agent\",\n}\n\n\ndef cookie_parser(cookie_string: str) -> typing.Dict[str, str]:\n \"\"\"\n This function parses a ``Cookie`` HTTP header into a dict of key/value pairs.\n\n It attempts to mimic browser cookie parsing behavior: browsers and web servers\n frequently disregard the spec (RFC 6265) when setting and reading cookies,\n so we attempt to suit the common scenarios here.\n\n This function has been adapted from Django 3.1.0.\n Note: we are explicitly _NOT_ using `SimpleCookie.load` because it is based\n on an outdated spec and will fail on lots of input we want to support\n \"\"\"\n cookie_dict: typing.Dict[str, str] = {}\n for chunk in cookie_string.split(\";\"):\n if \"=\" in chunk:\n key, val = chunk.split(\"=\", 1)\n else:\n # Assume an empty name per\n # https://bugzilla.mozilla.org/show_bug.cgi?id=169091\n key, val = \"\", chunk\n key, val = key.strip(), val.strip()\n if key or val:\n # unquote using Python's algorithm.\n cookie_dict[key] = http_cookies._unquote(val) # type: ignore\n return cookie_dict\n\n\nclass ClientDisconnect(Exception):\n pass\n\n\nclass HTTPConnection(Mapping):\n \"\"\"\n A base class for incoming HTTP connections, that is used to provide\n any functionality that is common to both `Request` and `WebSocket`.\n \"\"\"\n\n def __init__(self, scope: Scope, receive: Receive = None) -> None:\n assert scope[\"type\"] in (\"http\", \"websocket\")\n self.scope = scope\n\n def __getitem__(self, key: str) -> typing.Any:\n return self.scope[key]\n\n def __iter__(self) -> typing.Iterator[str]:\n return iter(self.scope)\n\n def __len__(self) -> int:\n return len(self.scope)\n\n # Don't use the `abc.Mapping.__eq__` implementation.\n # Connection instances should never be considered equal\n # unless `self is other`.\n __eq__ = object.__eq__\n __hash__ = object.__hash__\n\n @property\n def app(self) -> typing.Any:\n return self.scope[\"app\"]\n\n @property\n def url(self) -> URL:\n if not hasattr(self, \"_url\"):\n self._url = URL(scope=self.scope)\n return self._url\n\n @property\n def base_url(self) -> URL:\n if not hasattr(self, \"_base_url\"):\n base_url_scope = dict(self.scope)\n base_url_scope[\"path\"] = \"/\"\n base_url_scope[\"query_string\"] = b\"\"\n base_url_scope[\"root_path\"] = base_url_scope.get(\n \"app_root_path\", base_url_scope.get(\"root_path\", \"\")\n )\n self._base_url = URL(scope=base_url_scope)\n return self._base_url\n\n @property\n def headers(self) -> Headers:\n if not hasattr(self, \"_headers\"):\n self._headers = Headers(scope=self.scope)\n return self._headers\n\n @property\n def query_params(self) -> QueryParams:\n if not hasattr(self, \"_query_params\"):\n self._query_params = QueryParams(self.scope[\"query_string\"])\n return self._query_params\n\n @property\n def path_params(self) -> dict:\n return self.scope.get(\"path_params\", {})\n\n @property\n def cookies(self) -> typing.Dict[str, str]:\n if not hasattr(self, \"_cookies\"):\n cookies: typing.Dict[str, str] = {}\n cookie_header = self.headers.get(\"cookie\")\n\n if cookie_header:\n cookies = cookie_parser(cookie_header)\n self._cookies = cookies\n return self._cookies\n\n @property\n def client(self) -> Address:\n host, port = self.scope.get(\"client\") or (None, None)\n return Address(host=host, port=port)\n\n @property\n def session(self) -> dict:\n assert (\n \"session\" in self.scope\n ), \"SessionMiddleware must be installed to access request.session\"\n return self.scope[\"session\"]\n\n @property\n def auth(self) -> typing.Any:\n assert (\n \"auth\" in self.scope\n ), \"AuthenticationMiddleware must be installed to access request.auth\"\n return self.scope[\"auth\"]\n\n @property\n def user(self) -> typing.Any:\n assert (\n \"user\" in self.scope\n ), \"AuthenticationMiddleware must be installed to access request.user\"\n return self.scope[\"user\"]\n\n @property\n def state(self) -> State:\n if not hasattr(self, \"_state\"):\n # Ensure 'state' has an empty dict if it's not already populated.\n self.scope.setdefault(\"state\", {})\n # Create a state instance with a reference to the dict in which it should\n # store info\n self._state = State(self.scope[\"state\"])\n return self._state\n\n def url_for(self, name: str, **path_params: typing.Any) -> str:\n router = self.scope[\"router\"]\n url_path = router.url_path_for(name, **path_params)\n return url_path.make_absolute_url(base_url=self.base_url)\n\n\nasync def empty_receive() -> Message:\n raise RuntimeError(\"Receive channel has not been made available\")\n\n\nasync def empty_send(message: Message) -> None:\n raise RuntimeError(\"Send channel has not been made available\")\n\n\nclass Request(HTTPConnection):\n def __init__(\n self, scope: Scope, receive: Receive = empty_receive, send: Send = empty_send\n ):\n super().__init__(scope)\n assert scope[\"type\"] == \"http\"\n self._receive = receive\n self._send = send\n self._stream_consumed = False\n self._is_disconnected = False\n\n @property\n def method(self) -> str:\n return self.scope[\"method\"]\n\n @property\n def receive(self) -> Receive:\n return self._receive\n\n async def stream(self) -> typing.AsyncGenerator[bytes, None]:\n if hasattr(self, \"_body\"):\n yield self._body\n yield b\"\"\n return\n\n if self._stream_consumed:\n raise RuntimeError(\"Stream consumed\")\n\n self._stream_consumed = True\n while True:\n message = await self._receive()\n if message[\"type\"] == \"http.request\":\n body = message.get(\"body\", b\"\")\n if body:\n yield body\n if not message.get(\"more_body\", False):\n break\n elif message[\"type\"] == \"http.disconnect\":\n self._is_disconnected = True\n raise ClientDisconnect()\n yield b\"\"\n\n async def body(self) -> bytes:\n if not hasattr(self, \"_body\"):\n chunks = []\n async for chunk in self.stream():\n chunks.append(chunk)\n self._body = b\"\".join(chunks)\n return self._body\n\n async def json(self) -> typing.Any:\n if not hasattr(self, \"_json\"):\n body = await self.body()\n self._json = json.loads(body)\n return self._json\n\n async def form(self) -> FormData:\n if not hasattr(self, \"_form\"):\n assert (\n parse_options_header is not None\n ), \"The `python-multipart` library must be installed to use form parsing.\"\n content_type_header = self.headers.get(\"Content-Type\")\n content_type, options = parse_options_header(content_type_header)\n if content_type == b\"multipart/form-data\":\n multipart_parser = MultiPartParser(self.headers, self.stream())\n self._form = await multipart_parser.parse()\n elif content_type == b\"application/x-www-form-urlencoded\":\n form_parser = FormParser(self.headers, self.stream())\n self._form = await form_parser.parse()\n else:\n self._form = FormData()\n return self._form\n\n async def close(self) -> None:\n if hasattr(self, \"_form\"):\n await self._form.close()\n\n async def is_disconnected(self) -> bool:\n if not self._is_disconnected:\n message: Message = {}\n\n # If message isn't immediately available, move on\n with anyio.CancelScope() as cs:\n cs.cancel()\n message = await self._receive()\n\n if message.get(\"type\") == \"http.disconnect\":\n self._is_disconnected = True\n\n return self._is_disconnected\n\n async def send_push_promise(self, path: str) -> None:\n if \"http.response.push\" in self.scope.get(\"extensions\", {}):\n raw_headers = []\n for name in SERVER_PUSH_HEADERS_TO_COPY:\n for value in self.headers.getlist(name):\n raw_headers.append(\n (name.encode(\"latin-1\"), value.encode(\"latin-1\"))\n )\n await self._send(\n {\"type\": \"http.response.push\", \"path\": path, \"headers\": raw_headers}\n )\n", "path": "starlette/requests.py"}]} | 3,626 | 113 |
gh_patches_debug_511 | rasdani/github-patches | git_diff | python-gitlab__python-gitlab-1437 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Missing API code owner approval for protected branches
## Summary
The branch manager is missing an attribute implementation of `code_owner_approval_required` as documented in [GitLab API documentation](https://docs.gitlab.com/ce/api/protected_branches.html#protect-repository-branches)
## Expected Behavior
`ProjectProtectedBranchManager.code_owner_approval_required` should be implemented to mirror the API as documented:
Attribute | Type | Required | Description
-- | -- | -- | --
code_owner_approval_required | boolean | no | Prevent pushes to this branch if it matches an item in the CODEOWNERS file. (defaults: false)
## Actual Behavior
`code_owner_approval_required` is not available as attribute in `ProjectProtectedBranchManager`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gitlab/v4/objects/branches.py`
Content:
```
1 from gitlab import cli
2 from gitlab import exceptions as exc
3 from gitlab.base import RequiredOptional, RESTManager, RESTObject
4 from gitlab.mixins import NoUpdateMixin, ObjectDeleteMixin
5
6
7 __all__ = [
8 "ProjectBranch",
9 "ProjectBranchManager",
10 "ProjectProtectedBranch",
11 "ProjectProtectedBranchManager",
12 ]
13
14
15 class ProjectBranch(ObjectDeleteMixin, RESTObject):
16 _id_attr = "name"
17
18 @cli.register_custom_action(
19 "ProjectBranch", tuple(), ("developers_can_push", "developers_can_merge")
20 )
21 @exc.on_http_error(exc.GitlabProtectError)
22 def protect(self, developers_can_push=False, developers_can_merge=False, **kwargs):
23 """Protect the branch.
24
25 Args:
26 developers_can_push (bool): Set to True if developers are allowed
27 to push to the branch
28 developers_can_merge (bool): Set to True if developers are allowed
29 to merge to the branch
30 **kwargs: Extra options to send to the server (e.g. sudo)
31
32 Raises:
33 GitlabAuthenticationError: If authentication is not correct
34 GitlabProtectError: If the branch could not be protected
35 """
36 id = self.get_id().replace("/", "%2F")
37 path = "%s/%s/protect" % (self.manager.path, id)
38 post_data = {
39 "developers_can_push": developers_can_push,
40 "developers_can_merge": developers_can_merge,
41 }
42 self.manager.gitlab.http_put(path, post_data=post_data, **kwargs)
43 self._attrs["protected"] = True
44
45 @cli.register_custom_action("ProjectBranch")
46 @exc.on_http_error(exc.GitlabProtectError)
47 def unprotect(self, **kwargs):
48 """Unprotect the branch.
49
50 Args:
51 **kwargs: Extra options to send to the server (e.g. sudo)
52
53 Raises:
54 GitlabAuthenticationError: If authentication is not correct
55 GitlabProtectError: If the branch could not be unprotected
56 """
57 id = self.get_id().replace("/", "%2F")
58 path = "%s/%s/unprotect" % (self.manager.path, id)
59 self.manager.gitlab.http_put(path, **kwargs)
60 self._attrs["protected"] = False
61
62
63 class ProjectBranchManager(NoUpdateMixin, RESTManager):
64 _path = "/projects/%(project_id)s/repository/branches"
65 _obj_cls = ProjectBranch
66 _from_parent_attrs = {"project_id": "id"}
67 _create_attrs = RequiredOptional(required=("branch", "ref"))
68
69
70 class ProjectProtectedBranch(ObjectDeleteMixin, RESTObject):
71 _id_attr = "name"
72
73
74 class ProjectProtectedBranchManager(NoUpdateMixin, RESTManager):
75 _path = "/projects/%(project_id)s/protected_branches"
76 _obj_cls = ProjectProtectedBranch
77 _from_parent_attrs = {"project_id": "id"}
78 _create_attrs = RequiredOptional(
79 required=("name",),
80 optional=(
81 "push_access_level",
82 "merge_access_level",
83 "unprotect_access_level",
84 "allowed_to_push",
85 "allowed_to_merge",
86 "allowed_to_unprotect",
87 ),
88 )
89
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/gitlab/v4/objects/branches.py b/gitlab/v4/objects/branches.py
--- a/gitlab/v4/objects/branches.py
+++ b/gitlab/v4/objects/branches.py
@@ -84,5 +84,6 @@
"allowed_to_push",
"allowed_to_merge",
"allowed_to_unprotect",
+ "code_owner_approval_required",
),
)
| {"golden_diff": "diff --git a/gitlab/v4/objects/branches.py b/gitlab/v4/objects/branches.py\n--- a/gitlab/v4/objects/branches.py\n+++ b/gitlab/v4/objects/branches.py\n@@ -84,5 +84,6 @@\n \"allowed_to_push\",\n \"allowed_to_merge\",\n \"allowed_to_unprotect\",\n+ \"code_owner_approval_required\",\n ),\n )\n", "issue": "Missing API code owner approval for protected branches\n## Summary\r\n\r\nThe branch manager is missing an attribute implementation of `code_owner_approval_required` as documented in [GitLab API documentation](https://docs.gitlab.com/ce/api/protected_branches.html#protect-repository-branches)\r\n\r\n## Expected Behavior\r\n\r\n`ProjectProtectedBranchManager.code_owner_approval_required` should be implemented to mirror the API as documented:\r\n\r\nAttribute | Type | Required | Description\r\n-- | -- | -- | --\r\ncode_owner_approval_required | boolean | no | Prevent pushes to this branch if it matches an item in the\u00a0CODEOWNERS\u00a0file. (defaults: false)\r\n\r\n## Actual Behavior\r\n\r\n`code_owner_approval_required` is not available as attribute in `ProjectProtectedBranchManager`.\r\n\n", "before_files": [{"content": "from gitlab import cli\nfrom gitlab import exceptions as exc\nfrom gitlab.base import RequiredOptional, RESTManager, RESTObject\nfrom gitlab.mixins import NoUpdateMixin, ObjectDeleteMixin\n\n\n__all__ = [\n \"ProjectBranch\",\n \"ProjectBranchManager\",\n \"ProjectProtectedBranch\",\n \"ProjectProtectedBranchManager\",\n]\n\n\nclass ProjectBranch(ObjectDeleteMixin, RESTObject):\n _id_attr = \"name\"\n\n @cli.register_custom_action(\n \"ProjectBranch\", tuple(), (\"developers_can_push\", \"developers_can_merge\")\n )\n @exc.on_http_error(exc.GitlabProtectError)\n def protect(self, developers_can_push=False, developers_can_merge=False, **kwargs):\n \"\"\"Protect the branch.\n\n Args:\n developers_can_push (bool): Set to True if developers are allowed\n to push to the branch\n developers_can_merge (bool): Set to True if developers are allowed\n to merge to the branch\n **kwargs: Extra options to send to the server (e.g. sudo)\n\n Raises:\n GitlabAuthenticationError: If authentication is not correct\n GitlabProtectError: If the branch could not be protected\n \"\"\"\n id = self.get_id().replace(\"/\", \"%2F\")\n path = \"%s/%s/protect\" % (self.manager.path, id)\n post_data = {\n \"developers_can_push\": developers_can_push,\n \"developers_can_merge\": developers_can_merge,\n }\n self.manager.gitlab.http_put(path, post_data=post_data, **kwargs)\n self._attrs[\"protected\"] = True\n\n @cli.register_custom_action(\"ProjectBranch\")\n @exc.on_http_error(exc.GitlabProtectError)\n def unprotect(self, **kwargs):\n \"\"\"Unprotect the branch.\n\n Args:\n **kwargs: Extra options to send to the server (e.g. sudo)\n\n Raises:\n GitlabAuthenticationError: If authentication is not correct\n GitlabProtectError: If the branch could not be unprotected\n \"\"\"\n id = self.get_id().replace(\"/\", \"%2F\")\n path = \"%s/%s/unprotect\" % (self.manager.path, id)\n self.manager.gitlab.http_put(path, **kwargs)\n self._attrs[\"protected\"] = False\n\n\nclass ProjectBranchManager(NoUpdateMixin, RESTManager):\n _path = \"/projects/%(project_id)s/repository/branches\"\n _obj_cls = ProjectBranch\n _from_parent_attrs = {\"project_id\": \"id\"}\n _create_attrs = RequiredOptional(required=(\"branch\", \"ref\"))\n\n\nclass ProjectProtectedBranch(ObjectDeleteMixin, RESTObject):\n _id_attr = \"name\"\n\n\nclass ProjectProtectedBranchManager(NoUpdateMixin, RESTManager):\n _path = \"/projects/%(project_id)s/protected_branches\"\n _obj_cls = ProjectProtectedBranch\n _from_parent_attrs = {\"project_id\": \"id\"}\n _create_attrs = RequiredOptional(\n required=(\"name\",),\n optional=(\n \"push_access_level\",\n \"merge_access_level\",\n \"unprotect_access_level\",\n \"allowed_to_push\",\n \"allowed_to_merge\",\n \"allowed_to_unprotect\",\n ),\n )\n", "path": "gitlab/v4/objects/branches.py"}], "after_files": [{"content": "from gitlab import cli\nfrom gitlab import exceptions as exc\nfrom gitlab.base import RequiredOptional, RESTManager, RESTObject\nfrom gitlab.mixins import NoUpdateMixin, ObjectDeleteMixin\n\n\n__all__ = [\n \"ProjectBranch\",\n \"ProjectBranchManager\",\n \"ProjectProtectedBranch\",\n \"ProjectProtectedBranchManager\",\n]\n\n\nclass ProjectBranch(ObjectDeleteMixin, RESTObject):\n _id_attr = \"name\"\n\n @cli.register_custom_action(\n \"ProjectBranch\", tuple(), (\"developers_can_push\", \"developers_can_merge\")\n )\n @exc.on_http_error(exc.GitlabProtectError)\n def protect(self, developers_can_push=False, developers_can_merge=False, **kwargs):\n \"\"\"Protect the branch.\n\n Args:\n developers_can_push (bool): Set to True if developers are allowed\n to push to the branch\n developers_can_merge (bool): Set to True if developers are allowed\n to merge to the branch\n **kwargs: Extra options to send to the server (e.g. sudo)\n\n Raises:\n GitlabAuthenticationError: If authentication is not correct\n GitlabProtectError: If the branch could not be protected\n \"\"\"\n id = self.get_id().replace(\"/\", \"%2F\")\n path = \"%s/%s/protect\" % (self.manager.path, id)\n post_data = {\n \"developers_can_push\": developers_can_push,\n \"developers_can_merge\": developers_can_merge,\n }\n self.manager.gitlab.http_put(path, post_data=post_data, **kwargs)\n self._attrs[\"protected\"] = True\n\n @cli.register_custom_action(\"ProjectBranch\")\n @exc.on_http_error(exc.GitlabProtectError)\n def unprotect(self, **kwargs):\n \"\"\"Unprotect the branch.\n\n Args:\n **kwargs: Extra options to send to the server (e.g. sudo)\n\n Raises:\n GitlabAuthenticationError: If authentication is not correct\n GitlabProtectError: If the branch could not be unprotected\n \"\"\"\n id = self.get_id().replace(\"/\", \"%2F\")\n path = \"%s/%s/unprotect\" % (self.manager.path, id)\n self.manager.gitlab.http_put(path, **kwargs)\n self._attrs[\"protected\"] = False\n\n\nclass ProjectBranchManager(NoUpdateMixin, RESTManager):\n _path = \"/projects/%(project_id)s/repository/branches\"\n _obj_cls = ProjectBranch\n _from_parent_attrs = {\"project_id\": \"id\"}\n _create_attrs = RequiredOptional(required=(\"branch\", \"ref\"))\n\n\nclass ProjectProtectedBranch(ObjectDeleteMixin, RESTObject):\n _id_attr = \"name\"\n\n\nclass ProjectProtectedBranchManager(NoUpdateMixin, RESTManager):\n _path = \"/projects/%(project_id)s/protected_branches\"\n _obj_cls = ProjectProtectedBranch\n _from_parent_attrs = {\"project_id\": \"id\"}\n _create_attrs = RequiredOptional(\n required=(\"name\",),\n optional=(\n \"push_access_level\",\n \"merge_access_level\",\n \"unprotect_access_level\",\n \"allowed_to_push\",\n \"allowed_to_merge\",\n \"allowed_to_unprotect\",\n \"code_owner_approval_required\",\n ),\n )\n", "path": "gitlab/v4/objects/branches.py"}]} | 1,269 | 91 |
gh_patches_debug_10250 | rasdani/github-patches | git_diff | kubeflow__pipelines-5650 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[sdk] `kfp run submit` fails when paramerer values contain '=' with "dictionary update sequence element"
### Environment
* KFP version:
<!-- For more information, see an overview of KFP installation options: https://www.kubeflow.org/docs/pipelines/installation/overview/. -->
* KFP SDK version: 1.4.0, 1.5.0
<!-- Specify the version of Kubeflow Pipelines that you are using. The version number appears in the left side navigation of user interface.
To find the version number, See version number shows on bottom of KFP UI left sidenav. -->
* All dependencies version: n/a
<!-- Specify the output of the following shell command: $pip list | grep kfp -->
### Steps to reproduce
* Compile test pipeline below (run py script)
```py
import kfp as kfp
@kfp.components.func_to_container_op
def print_func(param: str):
print(str(param))
return
@kfp.dsl.pipeline(name='pipeline')
def pipeline(param: str):
print_func(param)
return
if __name__ == '__main__':
kfp.compiler.Compiler().compile(pipeline, __file__ + ".zip")
```
* Upload to KF
* Grab the `pipeline-id` to use it in the command line sdk
* Using the command line SDK, run the following:
```sh
$ kfp run submit --pipeline-id <pipeline-id> --experiment-name 'Test = parsing' param=12345 # Succeeds
$ kfp run submit --pipeline-id <pipeline-id> --experiment-name 'Test = parsing' param=some_name=4567 # Fails
dictionary update sequence element #0 has length 3; 2 is required
```
Running the parameter directly from the KFP UI with value `some_name=4567` works fine and prints `some_name=4567`.
#### Problem Explanation
The pipeline expects an input parameter named `param` to run and it prints the value of that param. The SDK will parse pipeline parameters on the `=` sign [here](https://github.com/kubeflow/pipelines/blob/master/sdk/python/kfp/cli/run.py#L79). The value contains an `=` sign, so we are trying to update the `dict()` with `('param', 'some_name', '4567')` when the goal is to update it with `('param', 'some_name=4567')`.
#### Problem Significance
The equality sign `=` is typically used to partition datasets in distributed storage (S3, HDFS, etc) leading to paths like
```
hdfs://path/to/table/year_partition=2020/month_partition=01/other_partition=other_value/file1.parquet
hdfs://path/to/table/year_partition=2020/month_partition=02/other_partition=other_value/file1.parquet
...
```
Due to the problem discussed above, we cannot use the CLI SDK to provide parameters that contain the `=` sign. However, we _can_ use the KFP UI and pass such paramters, leading to inconsistent behavior.
<!--
Specify how to reproduce the problem.
This may include information such as: a description of the process, code snippets, log output, or screenshots.
-->
### Expected result
<!-- What should the correct behavior be? -->
Running
```sh
$ kfp run submit --pipeline-id <pipeline-id> --experiment-name 'Test = parsing' param=some_name=4567
```
should succeed just like running the pipeline via the KFP UI with the value `some_name=4567` for the run parameter **param**.
### Materials and Reference
<!-- Help us debug this issue by providing resources such as: sample code, background context, or links to references. -->
#### Suggested Solution
[This line](https://github.com/kubeflow/pipelines/blob/master/sdk/python/kfp/cli/run.py#L79) should be
```py
arg_dict = dict(arg.split('=', maxsplit=1) for arg in args)
```
---
<!-- Don't delete message below to encourage users to support your issue! -->
Impacted by this bug? Give it a 👍. We prioritise the issues with the most 👍.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sdk/python/kfp/cli/run.py`
Content:
```
1 # Copyright 2018 The Kubeflow Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15
16 import sys
17 import subprocess
18 import time
19 import json
20 import click
21 import shutil
22
23 from .output import print_output, OutputFormat
24
25
26 @click.group()
27 def run():
28 """manage run resources"""
29 pass
30
31
32 @run.command()
33 @click.option('-e', '--experiment-id', help='Parent experiment ID of listed runs.')
34 @click.option('-m', '--max-size', default=100, help='Max size of the listed runs.')
35 @click.pass_context
36 def list(ctx, experiment_id, max_size):
37 """list recent KFP runs"""
38 client = ctx.obj['client']
39 output_format = ctx.obj['output']
40 response = client.list_runs(experiment_id=experiment_id, page_size=max_size, sort_by='created_at desc')
41 if response and response.runs:
42 _print_runs(response.runs, output_format)
43 else:
44 if output_format == OutputFormat.json.name:
45 msg = json.dumps([])
46 else:
47 msg = 'No runs found.'
48 click.echo(msg)
49
50
51 @run.command()
52 @click.option('-e', '--experiment-name', required=True, help='Experiment name of the run.')
53 @click.option('-r', '--run-name', help='Name of the run.')
54 @click.option('-f', '--package-file', type=click.Path(exists=True, dir_okay=False),
55 help='Path of the pipeline package file.')
56 @click.option('-p', '--pipeline-id', help='ID of the pipeline template.')
57 @click.option('-n', '--pipeline-name', help='Name of the pipeline template.')
58 @click.option('-w', '--watch', is_flag=True, default=False,
59 help='Watch the run status until it finishes.')
60 @click.option('-v', '--version', help='ID of the pipeline version.')
61 @click.argument('args', nargs=-1)
62 @click.pass_context
63 def submit(ctx, experiment_name, run_name, package_file, pipeline_id, pipeline_name, watch,
64 version, args):
65 """submit a KFP run"""
66 client = ctx.obj['client']
67 namespace = ctx.obj['namespace']
68 output_format = ctx.obj['output']
69 if not run_name:
70 run_name = experiment_name
71
72 if not pipeline_id and pipeline_name:
73 pipeline_id = client.get_pipeline_id(name=pipeline_name)
74
75 if not package_file and not pipeline_id and not version:
76 click.echo('You must provide one of [package_file, pipeline_id, version].', err=True)
77 sys.exit(1)
78
79 arg_dict = dict(arg.split('=') for arg in args)
80 experiment = client.create_experiment(experiment_name)
81 run = client.run_pipeline(experiment.id, run_name, package_file, arg_dict, pipeline_id,
82 version_id=version)
83 _display_run(client, namespace, run.id, watch, output_format)
84
85
86 @run.command()
87 @click.option('-w', '--watch', is_flag=True, default=False,
88 help='Watch the run status until it finishes.')
89 @click.argument('run-id')
90 @click.pass_context
91 def get(ctx, watch, run_id):
92 """display the details of a KFP run"""
93 client = ctx.obj['client']
94 namespace = ctx.obj['namespace']
95 output_format = ctx.obj['output']
96 _display_run(client, namespace, run_id, watch, output_format)
97
98
99 def _display_run(client, namespace, run_id, watch, output_format):
100 run = client.get_run(run_id).run
101 _print_runs([run], output_format)
102 if not watch:
103 return
104 argo_path = shutil.which('argo')
105 if not argo_path:
106 raise RuntimeError("argo isn't found in $PATH. It's necessary for watch. "
107 "Please make sure it's installed and available. "
108 "Installation instructions be found here - "
109 "https://github.com/argoproj/argo/releases")
110
111 argo_workflow_name = None
112 while True:
113 time.sleep(1)
114 run_detail = client.get_run(run_id)
115 run = run_detail.run
116 if run_detail.pipeline_runtime and run_detail.pipeline_runtime.workflow_manifest:
117 manifest = json.loads(run_detail.pipeline_runtime.workflow_manifest)
118 if manifest['metadata'] and manifest['metadata']['name']:
119 argo_workflow_name = manifest['metadata']['name']
120 break
121 if run_detail.run.status in ['Succeeded', 'Skipped', 'Failed', 'Error']:
122 click.echo('Run is finished with status {}.'.format(run_detail.run.status))
123 return
124 if argo_workflow_name:
125 subprocess.run([argo_path, 'watch', argo_workflow_name, '-n', namespace])
126 _print_runs([run], output_format)
127
128
129 def _print_runs(runs, output_format):
130 headers = ['run id', 'name', 'status', 'created at']
131 data = [[run.id, run.name, run.status, run.created_at.isoformat()] for run in runs]
132 print_output(data, headers, output_format, table_format='grid')
133
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sdk/python/kfp/cli/run.py b/sdk/python/kfp/cli/run.py
--- a/sdk/python/kfp/cli/run.py
+++ b/sdk/python/kfp/cli/run.py
@@ -76,7 +76,8 @@
click.echo('You must provide one of [package_file, pipeline_id, version].', err=True)
sys.exit(1)
- arg_dict = dict(arg.split('=') for arg in args)
+ arg_dict = dict(arg.split('=', maxsplit=1) for arg in args)
+
experiment = client.create_experiment(experiment_name)
run = client.run_pipeline(experiment.id, run_name, package_file, arg_dict, pipeline_id,
version_id=version)
| {"golden_diff": "diff --git a/sdk/python/kfp/cli/run.py b/sdk/python/kfp/cli/run.py\n--- a/sdk/python/kfp/cli/run.py\n+++ b/sdk/python/kfp/cli/run.py\n@@ -76,7 +76,8 @@\n click.echo('You must provide one of [package_file, pipeline_id, version].', err=True)\n sys.exit(1)\n \n- arg_dict = dict(arg.split('=') for arg in args)\n+ arg_dict = dict(arg.split('=', maxsplit=1) for arg in args)\n+\n experiment = client.create_experiment(experiment_name)\n run = client.run_pipeline(experiment.id, run_name, package_file, arg_dict, pipeline_id,\n version_id=version)\n", "issue": "[sdk] `kfp run submit` fails when paramerer values contain '=' with \"dictionary update sequence element\"\n### Environment\r\n\r\n* KFP version: \r\n<!-- For more information, see an overview of KFP installation options: https://www.kubeflow.org/docs/pipelines/installation/overview/. -->\r\n* KFP SDK version: 1.4.0, 1.5.0\r\n<!-- Specify the version of Kubeflow Pipelines that you are using. The version number appears in the left side navigation of user interface.\r\nTo find the version number, See version number shows on bottom of KFP UI left sidenav. -->\r\n* All dependencies version: n/a\r\n<!-- Specify the output of the following shell command: $pip list | grep kfp -->\r\n\r\n### Steps to reproduce\r\n\r\n* Compile test pipeline below (run py script)\r\n\r\n```py\r\nimport kfp as kfp\r\n\r\[email protected]_to_container_op\r\ndef print_func(param: str):\r\n print(str(param))\r\n return\r\n\r\[email protected](name='pipeline')\r\ndef pipeline(param: str):\r\n print_func(param)\r\n return\r\n\r\nif __name__ == '__main__':\r\n kfp.compiler.Compiler().compile(pipeline, __file__ + \".zip\")\r\n```\r\n\r\n* Upload to KF\r\n* Grab the `pipeline-id` to use it in the command line sdk\r\n* Using the command line SDK, run the following:\r\n\r\n```sh\r\n$ kfp run submit --pipeline-id <pipeline-id> --experiment-name 'Test = parsing' param=12345 # Succeeds\r\n$ kfp run submit --pipeline-id <pipeline-id> --experiment-name 'Test = parsing' param=some_name=4567 # Fails\r\ndictionary update sequence element #0 has length 3; 2 is required\r\n```\r\n\r\nRunning the parameter directly from the KFP UI with value `some_name=4567` works fine and prints `some_name=4567`.\r\n\r\n\r\n#### Problem Explanation\r\nThe pipeline expects an input parameter named `param` to run and it prints the value of that param. The SDK will parse pipeline parameters on the `=` sign [here](https://github.com/kubeflow/pipelines/blob/master/sdk/python/kfp/cli/run.py#L79). The value contains an `=` sign, so we are trying to update the `dict()` with `('param', 'some_name', '4567')` when the goal is to update it with `('param', 'some_name=4567')`.\r\n\r\n#### Problem Significance\r\nThe equality sign `=` is typically used to partition datasets in distributed storage (S3, HDFS, etc) leading to paths like\r\n```\r\nhdfs://path/to/table/year_partition=2020/month_partition=01/other_partition=other_value/file1.parquet\r\nhdfs://path/to/table/year_partition=2020/month_partition=02/other_partition=other_value/file1.parquet\r\n...\r\n```\r\nDue to the problem discussed above, we cannot use the CLI SDK to provide parameters that contain the `=` sign. However, we _can_ use the KFP UI and pass such paramters, leading to inconsistent behavior.\r\n\r\n<!--\r\nSpecify how to reproduce the problem. \r\nThis may include information such as: a description of the process, code snippets, log output, or screenshots.\r\n-->\r\n\r\n### Expected result\r\n\r\n<!-- What should the correct behavior be? -->\r\n\r\nRunning\r\n```sh\r\n$ kfp run submit --pipeline-id <pipeline-id> --experiment-name 'Test = parsing' param=some_name=4567 \r\n```\r\nshould succeed just like running the pipeline via the KFP UI with the value `some_name=4567` for the run parameter **param**.\r\n\r\n\r\n\r\n### Materials and Reference\r\n\r\n<!-- Help us debug this issue by providing resources such as: sample code, background context, or links to references. -->\r\n\r\n#### Suggested Solution\r\n[This line](https://github.com/kubeflow/pipelines/blob/master/sdk/python/kfp/cli/run.py#L79) should be\r\n```py\r\n arg_dict = dict(arg.split('=', maxsplit=1) for arg in args)\r\n```\r\n\r\n---\r\n\r\n<!-- Don't delete message below to encourage users to support your issue! -->\r\nImpacted by this bug? Give it a \ud83d\udc4d. We prioritise the issues with the most \ud83d\udc4d.\r\n\n", "before_files": [{"content": "# Copyright 2018 The Kubeflow Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport sys\nimport subprocess\nimport time\nimport json\nimport click\nimport shutil\n\nfrom .output import print_output, OutputFormat\n\n\[email protected]()\ndef run():\n \"\"\"manage run resources\"\"\"\n pass\n\n\[email protected]()\[email protected]('-e', '--experiment-id', help='Parent experiment ID of listed runs.')\[email protected]('-m', '--max-size', default=100, help='Max size of the listed runs.')\[email protected]_context\ndef list(ctx, experiment_id, max_size):\n \"\"\"list recent KFP runs\"\"\"\n client = ctx.obj['client']\n output_format = ctx.obj['output']\n response = client.list_runs(experiment_id=experiment_id, page_size=max_size, sort_by='created_at desc')\n if response and response.runs:\n _print_runs(response.runs, output_format)\n else:\n if output_format == OutputFormat.json.name:\n msg = json.dumps([])\n else:\n msg = 'No runs found.'\n click.echo(msg)\n\n\[email protected]()\[email protected]('-e', '--experiment-name', required=True, help='Experiment name of the run.')\[email protected]('-r', '--run-name', help='Name of the run.')\[email protected]('-f', '--package-file', type=click.Path(exists=True, dir_okay=False),\n help='Path of the pipeline package file.')\[email protected]('-p', '--pipeline-id', help='ID of the pipeline template.')\[email protected]('-n', '--pipeline-name', help='Name of the pipeline template.')\[email protected]('-w', '--watch', is_flag=True, default=False,\n help='Watch the run status until it finishes.')\[email protected]('-v', '--version', help='ID of the pipeline version.')\[email protected]('args', nargs=-1)\[email protected]_context\ndef submit(ctx, experiment_name, run_name, package_file, pipeline_id, pipeline_name, watch,\n version, args):\n \"\"\"submit a KFP run\"\"\"\n client = ctx.obj['client']\n namespace = ctx.obj['namespace']\n output_format = ctx.obj['output']\n if not run_name:\n run_name = experiment_name\n\n if not pipeline_id and pipeline_name:\n pipeline_id = client.get_pipeline_id(name=pipeline_name)\n\n if not package_file and not pipeline_id and not version:\n click.echo('You must provide one of [package_file, pipeline_id, version].', err=True)\n sys.exit(1)\n\n arg_dict = dict(arg.split('=') for arg in args)\n experiment = client.create_experiment(experiment_name)\n run = client.run_pipeline(experiment.id, run_name, package_file, arg_dict, pipeline_id,\n version_id=version)\n _display_run(client, namespace, run.id, watch, output_format)\n\n\[email protected]()\[email protected]('-w', '--watch', is_flag=True, default=False,\n help='Watch the run status until it finishes.')\[email protected]('run-id')\[email protected]_context\ndef get(ctx, watch, run_id):\n \"\"\"display the details of a KFP run\"\"\"\n client = ctx.obj['client']\n namespace = ctx.obj['namespace']\n output_format = ctx.obj['output']\n _display_run(client, namespace, run_id, watch, output_format)\n\n\ndef _display_run(client, namespace, run_id, watch, output_format):\n run = client.get_run(run_id).run\n _print_runs([run], output_format)\n if not watch:\n return\n argo_path = shutil.which('argo')\n if not argo_path:\n raise RuntimeError(\"argo isn't found in $PATH. It's necessary for watch. \"\n \"Please make sure it's installed and available. \"\n \"Installation instructions be found here - \"\n \"https://github.com/argoproj/argo/releases\")\n\n argo_workflow_name = None\n while True:\n time.sleep(1)\n run_detail = client.get_run(run_id)\n run = run_detail.run\n if run_detail.pipeline_runtime and run_detail.pipeline_runtime.workflow_manifest:\n manifest = json.loads(run_detail.pipeline_runtime.workflow_manifest)\n if manifest['metadata'] and manifest['metadata']['name']:\n argo_workflow_name = manifest['metadata']['name']\n break\n if run_detail.run.status in ['Succeeded', 'Skipped', 'Failed', 'Error']:\n click.echo('Run is finished with status {}.'.format(run_detail.run.status))\n return\n if argo_workflow_name:\n subprocess.run([argo_path, 'watch', argo_workflow_name, '-n', namespace])\n _print_runs([run], output_format)\n\n\ndef _print_runs(runs, output_format):\n headers = ['run id', 'name', 'status', 'created at']\n data = [[run.id, run.name, run.status, run.created_at.isoformat()] for run in runs]\n print_output(data, headers, output_format, table_format='grid')\n", "path": "sdk/python/kfp/cli/run.py"}], "after_files": [{"content": "# Copyright 2018 The Kubeflow Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport sys\nimport subprocess\nimport time\nimport json\nimport click\nimport shutil\n\nfrom .output import print_output, OutputFormat\n\n\[email protected]()\ndef run():\n \"\"\"manage run resources\"\"\"\n pass\n\n\[email protected]()\[email protected]('-e', '--experiment-id', help='Parent experiment ID of listed runs.')\[email protected]('-m', '--max-size', default=100, help='Max size of the listed runs.')\[email protected]_context\ndef list(ctx, experiment_id, max_size):\n \"\"\"list recent KFP runs\"\"\"\n client = ctx.obj['client']\n output_format = ctx.obj['output']\n response = client.list_runs(experiment_id=experiment_id, page_size=max_size, sort_by='created_at desc')\n if response and response.runs:\n _print_runs(response.runs, output_format)\n else:\n if output_format == OutputFormat.json.name:\n msg = json.dumps([])\n else:\n msg = 'No runs found.'\n click.echo(msg)\n\n\[email protected]()\[email protected]('-e', '--experiment-name', required=True, help='Experiment name of the run.')\[email protected]('-r', '--run-name', help='Name of the run.')\[email protected]('-f', '--package-file', type=click.Path(exists=True, dir_okay=False),\n help='Path of the pipeline package file.')\[email protected]('-p', '--pipeline-id', help='ID of the pipeline template.')\[email protected]('-n', '--pipeline-name', help='Name of the pipeline template.')\[email protected]('-w', '--watch', is_flag=True, default=False,\n help='Watch the run status until it finishes.')\[email protected]('-v', '--version', help='ID of the pipeline version.')\[email protected]('args', nargs=-1)\[email protected]_context\ndef submit(ctx, experiment_name, run_name, package_file, pipeline_id, pipeline_name, watch,\n version, args):\n \"\"\"submit a KFP run\"\"\"\n client = ctx.obj['client']\n namespace = ctx.obj['namespace']\n output_format = ctx.obj['output']\n if not run_name:\n run_name = experiment_name\n\n if not pipeline_id and pipeline_name:\n pipeline_id = client.get_pipeline_id(name=pipeline_name)\n\n if not package_file and not pipeline_id and not version:\n click.echo('You must provide one of [package_file, pipeline_id, version].', err=True)\n sys.exit(1)\n\n arg_dict = dict(arg.split('=', maxsplit=1) for arg in args)\n\n experiment = client.create_experiment(experiment_name)\n run = client.run_pipeline(experiment.id, run_name, package_file, arg_dict, pipeline_id,\n version_id=version)\n _display_run(client, namespace, run.id, watch, output_format)\n\n\[email protected]()\[email protected]('-w', '--watch', is_flag=True, default=False,\n help='Watch the run status until it finishes.')\[email protected]('run-id')\[email protected]_context\ndef get(ctx, watch, run_id):\n \"\"\"display the details of a KFP run\"\"\"\n client = ctx.obj['client']\n namespace = ctx.obj['namespace']\n output_format = ctx.obj['output']\n _display_run(client, namespace, run_id, watch, output_format)\n\n\ndef _display_run(client, namespace, run_id, watch, output_format):\n run = client.get_run(run_id).run\n _print_runs([run], output_format)\n if not watch:\n return\n argo_path = shutil.which('argo')\n if not argo_path:\n raise RuntimeError(\"argo isn't found in $PATH. It's necessary for watch. \"\n \"Please make sure it's installed and available. \"\n \"Installation instructions be found here - \"\n \"https://github.com/argoproj/argo/releases\")\n\n argo_workflow_name = None\n while True:\n time.sleep(1)\n run_detail = client.get_run(run_id)\n run = run_detail.run\n if run_detail.pipeline_runtime and run_detail.pipeline_runtime.workflow_manifest:\n manifest = json.loads(run_detail.pipeline_runtime.workflow_manifest)\n if manifest['metadata'] and manifest['metadata']['name']:\n argo_workflow_name = manifest['metadata']['name']\n break\n if run_detail.run.status in ['Succeeded', 'Skipped', 'Failed', 'Error']:\n click.echo('Run is finished with status {}.'.format(run_detail.run.status))\n return\n if argo_workflow_name:\n subprocess.run([argo_path, 'watch', argo_workflow_name, '-n', namespace])\n _print_runs([run], output_format)\n\n\ndef _print_runs(runs, output_format):\n headers = ['run id', 'name', 'status', 'created at']\n data = [[run.id, run.name, run.status, run.created_at.isoformat()] for run in runs]\n print_output(data, headers, output_format, table_format='grid')\n", "path": "sdk/python/kfp/cli/run.py"}]} | 2,665 | 155 |
gh_patches_debug_29394 | rasdani/github-patches | git_diff | azavea__raster-vision-731 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
predict --channel-order not working
The `channel-order` option of `rastervision predict` does not work when the command is run as a stand-alone script.
```
root@122d4f0150f4:/opt/data/mar5# rastervision predict potsdam.zip example.jpg out.tif --channel-order "0 1 2"
Usage: python -m rastervision predict [OPTIONS] PREDICT_PACKAGE IMAGE_URI
OUTPUT_URI
Try "python -m rastervision predict --help" for help.
Error: Got unexpected extra arguments (1 2)
```
However, when it is run as a python module, it works.
```
root@122d4f0150f4:/opt/data/mar5# python -m rastervision.cli.main predict potsdam.zip example.jpg out.tif --channel-order "0 1 2"
/usr/local/lib/python3.5/dist-packages/pluginbase.py:439: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
fromlist, level)
2019-03-06 18:46:21.105826: I tensorflow/core/platform/cpu_feature_guard.cc:141] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2 FMA
/usr/local/lib/python3.5/dist-packages/rasterio/__init__.py:217: NotGeoreferencedWarning: Dataset has no geotransform set. The identity matrix may be returned.
s = DatasetReader(path, driver=driver, **kwargs)
[0, 1, 2]
[0]
2019-03-06 18:46:21:rastervision.task.semantic_segmentation: INFO - Making predictions for scene
/usr/local/lib/python3.5/dist-packages/rasterio/__init__.py:226: NotGeoreferencedWarning: Dataset has no geotransform set. The identity matrix may be returned.
**kwargs)
.........................
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `rastervision/cli/main.py`
Content:
```
1 """Raster Vision main program"""
2 import sys
3 import os
4
5 import click
6 import logging
7
8 import rastervision as rv
9 from rastervision.experiment import (ExperimentLoader, LoaderError)
10 from rastervision.runner import (ExperimentRunner)
11 from rastervision.rv_config import RVConfig
12
13 log = logging.getLogger(__name__)
14
15
16 def print_error(msg):
17 click.echo(click.style(msg, fg='red'), err=True)
18
19
20 @click.group()
21 @click.option(
22 '--profile', '-p', help='Sets the configuration profile name to use.')
23 @click.option(
24 '-v', '--verbose', help='Sets the output to be verbose.', count=True)
25 def main(profile, verbose):
26 # Make sure current directory is on PYTHON_PATH
27 # so that we can run against modules in current dir.
28 sys.path.append(os.curdir)
29
30 # Initialize configuration
31 rv._registry.initialize_config(profile=profile, verbosity=verbose + 1)
32
33
34 @main.command(
35 'run', short_help='Run Raster Vision commands against Experiments.')
36 @click.argument('runner')
37 @click.argument('commands', nargs=-1)
38 @click.option(
39 '--experiment_module',
40 '-e',
41 help=('Name of an importable module to look for experiment sets '
42 'in. If not supplied, experiments will be loaded '
43 'from __main__'))
44 @click.option(
45 '--path',
46 '-p',
47 metavar='PATTERN',
48 help=('Path of file containing ExprimentSet to run.'))
49 @click.option(
50 '--dry-run',
51 '-n',
52 is_flag=True,
53 help=('Execute a dry run, which will print out information '
54 'about the commands to be run, but will not actually '
55 'run the commands'))
56 @click.option(
57 '--skip-file-check',
58 '-x',
59 is_flag=True,
60 help=('Skip the step that verifies that file exist.'))
61 @click.option(
62 '--arg',
63 '-a',
64 type=(str, str),
65 multiple=True,
66 metavar='KEY VALUE',
67 help=('Pass a parameter to the experiments if the method '
68 'parameter list takes in a parameter with that key. '
69 'Multiple args can be supplied'))
70 @click.option(
71 '--prefix',
72 metavar='PREFIX',
73 default='exp_',
74 help=('Prefix for methods containing experiments. (default: "exp_")'))
75 @click.option(
76 '--method',
77 '-m',
78 'methods',
79 multiple=True,
80 metavar='PATTERN',
81 help=('Pattern to match method names to run.'))
82 @click.option(
83 '--filter',
84 '-f',
85 'filters',
86 multiple=True,
87 metavar='PATTERN',
88 help=('Pattern to match experiment names to run.'))
89 @click.option(
90 '--rerun',
91 '-r',
92 is_flag=True,
93 default=False,
94 help=('Rerun commands, regardless if '
95 'their output files already exist.'))
96 @click.option('--tempdir', help=('Temporary directory to use for this run.'))
97 @click.option(
98 '--splits',
99 '-s',
100 default=1,
101 metavar='INTEGER',
102 help=('The number of processes to attempt to split each stage into.'))
103 def run(runner, commands, experiment_module, dry_run, skip_file_check, arg,
104 prefix, methods, path, filters, rerun, tempdir, splits):
105 """Run Raster Vision commands from experiments, using the
106 experiment runner named RUNNER."""
107
108 if tempdir:
109 RVConfig.set_tmp_dir(tempdir)
110
111 # Validate runner
112 valid_runners = list(
113 map(lambda x: x.lower(), rv.ExperimentRunner.list_runners()))
114 if runner not in valid_runners:
115 print_error('Invalid experiment runner: "{}". '
116 'Must be one of: "{}"'.format(runner,
117 '", "'.join(valid_runners)))
118 sys.exit(1)
119
120 runner = ExperimentRunner.get_runner(runner)
121
122 if experiment_module and path:
123 print_error('Must specify only one of experiment_module or path')
124 sys.exit(1)
125
126 if not commands:
127 commands = rv.ALL_COMMANDS
128 else:
129 commands = list(map(lambda x: x.upper(), commands))
130
131 experiment_args = {}
132 for k, v in arg:
133 experiment_args[k] = v
134
135 loader = ExperimentLoader(
136 experiment_args=experiment_args,
137 experiment_method_prefix=prefix,
138 experiment_method_patterns=methods,
139 experiment_name_patterns=filters)
140 try:
141 if experiment_module:
142 experiments = loader.load_from_module(experiment_module)
143 elif path:
144 experiments = loader.load_from_file(path)
145 else:
146 experiments = loader.load_from_module('__main__')
147 except LoaderError as e:
148 print_error(str(e))
149 sys.exit(1)
150
151 if not experiments:
152 if experiment_module:
153 print_error(
154 'No experiments found in {}.'.format(experiment_module))
155 elif path:
156 print_error('No experiments found in {}.'.format(path))
157 else:
158 print_error('No experiments found.')
159
160 runner.run(
161 experiments,
162 commands_to_run=commands,
163 rerun_commands=rerun,
164 skip_file_check=skip_file_check,
165 dry_run=dry_run,
166 splits=splits)
167
168
169 @main.command()
170 @click.option(
171 '--experiment-module',
172 '-e',
173 help=('Name of an importable module to look for experiment sets '
174 'in. If not supplied, experiments will be loaded '
175 'from __main__'))
176 @click.option(
177 '--arg',
178 '-a',
179 type=(str, str),
180 multiple=True,
181 metavar='KEY VALUE',
182 help=('Pass a parameter to the experiments if the method '
183 'parameter list takes in a parameter with that key. '
184 'Multiple args can be supplied'))
185 def ls(experiment_module, arg):
186 """Print out a list of Experiment IDs."""
187 if experiment_module:
188 module_to_load = experiment_module
189 else:
190 module_to_load = '__main__'
191
192 experiment_args = {}
193 for k, v in arg:
194 experiment_args[k] = v
195
196 loader = ExperimentLoader(experiment_args=experiment_args)
197 try:
198 experiments = loader.load_from_module(module_to_load)
199 except LoaderError as e:
200 print_error(str(e))
201 sys.exit(1)
202
203 if not experiments:
204 if experiment_module:
205 print_error(
206 'No experiments found in {}.'.format(experiment_module))
207 else:
208 print_error('No experiments found.')
209
210 for e in experiments:
211 click.echo('{}'.format(e.id))
212
213
214 @main.command(
215 'predict', short_help='Make predictions using a predict package.')
216 @click.argument('predict_package')
217 @click.argument('image_uri')
218 @click.argument('output_uri')
219 @click.option(
220 '--update-stats',
221 '-a',
222 is_flag=True,
223 help=('Run an analysis on this individual image, as '
224 'opposed to using any analysis like statistics '
225 'that exist in the prediction package'))
226 @click.option(
227 '--channel-order',
228 help='String containing channel_order. Example: \"2 1 0\"')
229 @click.option(
230 '--export-config',
231 type=click.Path(exists=False),
232 help='Exports the configuration to the given output file.')
233 def predict(predict_package, image_uri, output_uri, update_stats,
234 channel_order, export_config):
235 """Make predictions on the image at IMAGE_URI
236 using PREDICT_PACKAGE and store the
237 prediciton output at OUTPUT_URI.
238 """
239 if channel_order is not None:
240 channel_order = [
241 int(channel_ind) for channel_ind in channel_order.split(' ')
242 ]
243
244 with RVConfig.get_tmp_dir() as tmp_dir:
245 predictor = rv.Predictor(predict_package, tmp_dir, update_stats,
246 channel_order)
247 predictor.predict(image_uri, output_uri, export_config)
248
249
250 @main.command(
251 'run_command', short_help='Run a command from configuration file.')
252 @click.argument('command_config_uri')
253 @click.option('--tempdir')
254 def run_command(command_config_uri, tempdir):
255 """Run a command from a serialized command configuration
256 at COMMAND_CONFIG_URI.
257 """
258 if tempdir is not None:
259 RVConfig.set_tmp_dir(tempdir)
260 rv.runner.CommandRunner.run(command_config_uri)
261
262
263 if __name__ == '__main__':
264 main()
265
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/rastervision/cli/main.py b/rastervision/cli/main.py
--- a/rastervision/cli/main.py
+++ b/rastervision/cli/main.py
@@ -211,6 +211,37 @@
click.echo('{}'.format(e.id))
+# https://stackoverflow.com/questions/48391777/nargs-equivalent-for-options-in-click
+class OptionEatAll(click.Option):
+ def __init__(self, *args, **kwargs):
+ self.save_other_options = kwargs.pop('save_other_options', True)
+ nargs = kwargs.pop('nargs', -1)
+ assert nargs == -1, 'nargs, if set, must be -1 not {}'.format(nargs)
+ super(OptionEatAll, self).__init__(*args, **kwargs)
+ self._previous_parser_process = None
+ self._eat_all_parser = None
+
+ def add_to_parser(self, parser, ctx):
+ def parser_process(value, state):
+ value = str(value)
+ while state.rargs:
+ value = '{} {}'.format(value, state.rargs.pop(0))
+ self._previous_parser_process(value, state)
+
+ retval = super(OptionEatAll, self).add_to_parser(parser, ctx)
+
+ for name in self.opts:
+ our_parser = parser._long_opt.get(name) or parser._short_opt.get(
+ name)
+ if our_parser:
+ self._eat_all_parser = our_parser
+ self._previous_parser_process = our_parser.process
+ our_parser.process = parser_process
+ break
+
+ return retval
+
+
@main.command(
'predict', short_help='Make predictions using a predict package.')
@click.argument('predict_package')
@@ -225,7 +256,8 @@
'that exist in the prediction package'))
@click.option(
'--channel-order',
- help='String containing channel_order. Example: \"2 1 0\"')
+ cls=OptionEatAll,
+ help='List of indices comprising channel_order. Example: 2 1 0')
@click.option(
'--export-config',
type=click.Path(exists=False),
| {"golden_diff": "diff --git a/rastervision/cli/main.py b/rastervision/cli/main.py\n--- a/rastervision/cli/main.py\n+++ b/rastervision/cli/main.py\n@@ -211,6 +211,37 @@\n click.echo('{}'.format(e.id))\n \n \n+# https://stackoverflow.com/questions/48391777/nargs-equivalent-for-options-in-click\n+class OptionEatAll(click.Option):\n+ def __init__(self, *args, **kwargs):\n+ self.save_other_options = kwargs.pop('save_other_options', True)\n+ nargs = kwargs.pop('nargs', -1)\n+ assert nargs == -1, 'nargs, if set, must be -1 not {}'.format(nargs)\n+ super(OptionEatAll, self).__init__(*args, **kwargs)\n+ self._previous_parser_process = None\n+ self._eat_all_parser = None\n+\n+ def add_to_parser(self, parser, ctx):\n+ def parser_process(value, state):\n+ value = str(value)\n+ while state.rargs:\n+ value = '{} {}'.format(value, state.rargs.pop(0))\n+ self._previous_parser_process(value, state)\n+\n+ retval = super(OptionEatAll, self).add_to_parser(parser, ctx)\n+\n+ for name in self.opts:\n+ our_parser = parser._long_opt.get(name) or parser._short_opt.get(\n+ name)\n+ if our_parser:\n+ self._eat_all_parser = our_parser\n+ self._previous_parser_process = our_parser.process\n+ our_parser.process = parser_process\n+ break\n+\n+ return retval\n+\n+\n @main.command(\n 'predict', short_help='Make predictions using a predict package.')\n @click.argument('predict_package')\n@@ -225,7 +256,8 @@\n 'that exist in the prediction package'))\n @click.option(\n '--channel-order',\n- help='String containing channel_order. Example: \\\"2 1 0\\\"')\n+ cls=OptionEatAll,\n+ help='List of indices comprising channel_order. Example: 2 1 0')\n @click.option(\n '--export-config',\n type=click.Path(exists=False),\n", "issue": "predict --channel-order not working\nThe `channel-order` option of `rastervision predict` does not work when the command is run as a stand-alone script.\r\n\r\n```\r\nroot@122d4f0150f4:/opt/data/mar5# rastervision predict potsdam.zip example.jpg out.tif --channel-order \"0 1 2\"\r\nUsage: python -m rastervision predict [OPTIONS] PREDICT_PACKAGE IMAGE_URI\r\n OUTPUT_URI\r\nTry \"python -m rastervision predict --help\" for help.\r\n\r\nError: Got unexpected extra arguments (1 2)\r\n```\r\n\r\nHowever, when it is run as a python module, it works. \r\n```\r\nroot@122d4f0150f4:/opt/data/mar5# python -m rastervision.cli.main predict potsdam.zip example.jpg out.tif --channel-order \"0 1 2\"\r\n/usr/local/lib/python3.5/dist-packages/pluginbase.py:439: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\r\n fromlist, level)\r\n2019-03-06 18:46:21.105826: I tensorflow/core/platform/cpu_feature_guard.cc:141] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2 FMA\r\n/usr/local/lib/python3.5/dist-packages/rasterio/__init__.py:217: NotGeoreferencedWarning: Dataset has no geotransform set. The identity matrix may be returned.\r\n s = DatasetReader(path, driver=driver, **kwargs)\r\n[0, 1, 2]\r\n[0]\r\n2019-03-06 18:46:21:rastervision.task.semantic_segmentation: INFO - Making predictions for scene\r\n/usr/local/lib/python3.5/dist-packages/rasterio/__init__.py:226: NotGeoreferencedWarning: Dataset has no geotransform set. The identity matrix may be returned.\r\n **kwargs)\r\n.........................\r\n```\n", "before_files": [{"content": "\"\"\"Raster Vision main program\"\"\"\nimport sys\nimport os\n\nimport click\nimport logging\n\nimport rastervision as rv\nfrom rastervision.experiment import (ExperimentLoader, LoaderError)\nfrom rastervision.runner import (ExperimentRunner)\nfrom rastervision.rv_config import RVConfig\n\nlog = logging.getLogger(__name__)\n\n\ndef print_error(msg):\n click.echo(click.style(msg, fg='red'), err=True)\n\n\[email protected]()\[email protected](\n '--profile', '-p', help='Sets the configuration profile name to use.')\[email protected](\n '-v', '--verbose', help='Sets the output to be verbose.', count=True)\ndef main(profile, verbose):\n # Make sure current directory is on PYTHON_PATH\n # so that we can run against modules in current dir.\n sys.path.append(os.curdir)\n\n # Initialize configuration\n rv._registry.initialize_config(profile=profile, verbosity=verbose + 1)\n\n\[email protected](\n 'run', short_help='Run Raster Vision commands against Experiments.')\[email protected]('runner')\[email protected]('commands', nargs=-1)\[email protected](\n '--experiment_module',\n '-e',\n help=('Name of an importable module to look for experiment sets '\n 'in. If not supplied, experiments will be loaded '\n 'from __main__'))\[email protected](\n '--path',\n '-p',\n metavar='PATTERN',\n help=('Path of file containing ExprimentSet to run.'))\[email protected](\n '--dry-run',\n '-n',\n is_flag=True,\n help=('Execute a dry run, which will print out information '\n 'about the commands to be run, but will not actually '\n 'run the commands'))\[email protected](\n '--skip-file-check',\n '-x',\n is_flag=True,\n help=('Skip the step that verifies that file exist.'))\[email protected](\n '--arg',\n '-a',\n type=(str, str),\n multiple=True,\n metavar='KEY VALUE',\n help=('Pass a parameter to the experiments if the method '\n 'parameter list takes in a parameter with that key. '\n 'Multiple args can be supplied'))\[email protected](\n '--prefix',\n metavar='PREFIX',\n default='exp_',\n help=('Prefix for methods containing experiments. (default: \"exp_\")'))\[email protected](\n '--method',\n '-m',\n 'methods',\n multiple=True,\n metavar='PATTERN',\n help=('Pattern to match method names to run.'))\[email protected](\n '--filter',\n '-f',\n 'filters',\n multiple=True,\n metavar='PATTERN',\n help=('Pattern to match experiment names to run.'))\[email protected](\n '--rerun',\n '-r',\n is_flag=True,\n default=False,\n help=('Rerun commands, regardless if '\n 'their output files already exist.'))\[email protected]('--tempdir', help=('Temporary directory to use for this run.'))\[email protected](\n '--splits',\n '-s',\n default=1,\n metavar='INTEGER',\n help=('The number of processes to attempt to split each stage into.'))\ndef run(runner, commands, experiment_module, dry_run, skip_file_check, arg,\n prefix, methods, path, filters, rerun, tempdir, splits):\n \"\"\"Run Raster Vision commands from experiments, using the\n experiment runner named RUNNER.\"\"\"\n\n if tempdir:\n RVConfig.set_tmp_dir(tempdir)\n\n # Validate runner\n valid_runners = list(\n map(lambda x: x.lower(), rv.ExperimentRunner.list_runners()))\n if runner not in valid_runners:\n print_error('Invalid experiment runner: \"{}\". '\n 'Must be one of: \"{}\"'.format(runner,\n '\", \"'.join(valid_runners)))\n sys.exit(1)\n\n runner = ExperimentRunner.get_runner(runner)\n\n if experiment_module and path:\n print_error('Must specify only one of experiment_module or path')\n sys.exit(1)\n\n if not commands:\n commands = rv.ALL_COMMANDS\n else:\n commands = list(map(lambda x: x.upper(), commands))\n\n experiment_args = {}\n for k, v in arg:\n experiment_args[k] = v\n\n loader = ExperimentLoader(\n experiment_args=experiment_args,\n experiment_method_prefix=prefix,\n experiment_method_patterns=methods,\n experiment_name_patterns=filters)\n try:\n if experiment_module:\n experiments = loader.load_from_module(experiment_module)\n elif path:\n experiments = loader.load_from_file(path)\n else:\n experiments = loader.load_from_module('__main__')\n except LoaderError as e:\n print_error(str(e))\n sys.exit(1)\n\n if not experiments:\n if experiment_module:\n print_error(\n 'No experiments found in {}.'.format(experiment_module))\n elif path:\n print_error('No experiments found in {}.'.format(path))\n else:\n print_error('No experiments found.')\n\n runner.run(\n experiments,\n commands_to_run=commands,\n rerun_commands=rerun,\n skip_file_check=skip_file_check,\n dry_run=dry_run,\n splits=splits)\n\n\[email protected]()\[email protected](\n '--experiment-module',\n '-e',\n help=('Name of an importable module to look for experiment sets '\n 'in. If not supplied, experiments will be loaded '\n 'from __main__'))\[email protected](\n '--arg',\n '-a',\n type=(str, str),\n multiple=True,\n metavar='KEY VALUE',\n help=('Pass a parameter to the experiments if the method '\n 'parameter list takes in a parameter with that key. '\n 'Multiple args can be supplied'))\ndef ls(experiment_module, arg):\n \"\"\"Print out a list of Experiment IDs.\"\"\"\n if experiment_module:\n module_to_load = experiment_module\n else:\n module_to_load = '__main__'\n\n experiment_args = {}\n for k, v in arg:\n experiment_args[k] = v\n\n loader = ExperimentLoader(experiment_args=experiment_args)\n try:\n experiments = loader.load_from_module(module_to_load)\n except LoaderError as e:\n print_error(str(e))\n sys.exit(1)\n\n if not experiments:\n if experiment_module:\n print_error(\n 'No experiments found in {}.'.format(experiment_module))\n else:\n print_error('No experiments found.')\n\n for e in experiments:\n click.echo('{}'.format(e.id))\n\n\[email protected](\n 'predict', short_help='Make predictions using a predict package.')\[email protected]('predict_package')\[email protected]('image_uri')\[email protected]('output_uri')\[email protected](\n '--update-stats',\n '-a',\n is_flag=True,\n help=('Run an analysis on this individual image, as '\n 'opposed to using any analysis like statistics '\n 'that exist in the prediction package'))\[email protected](\n '--channel-order',\n help='String containing channel_order. Example: \\\"2 1 0\\\"')\[email protected](\n '--export-config',\n type=click.Path(exists=False),\n help='Exports the configuration to the given output file.')\ndef predict(predict_package, image_uri, output_uri, update_stats,\n channel_order, export_config):\n \"\"\"Make predictions on the image at IMAGE_URI\n using PREDICT_PACKAGE and store the\n prediciton output at OUTPUT_URI.\n \"\"\"\n if channel_order is not None:\n channel_order = [\n int(channel_ind) for channel_ind in channel_order.split(' ')\n ]\n\n with RVConfig.get_tmp_dir() as tmp_dir:\n predictor = rv.Predictor(predict_package, tmp_dir, update_stats,\n channel_order)\n predictor.predict(image_uri, output_uri, export_config)\n\n\[email protected](\n 'run_command', short_help='Run a command from configuration file.')\[email protected]('command_config_uri')\[email protected]('--tempdir')\ndef run_command(command_config_uri, tempdir):\n \"\"\"Run a command from a serialized command configuration\n at COMMAND_CONFIG_URI.\n \"\"\"\n if tempdir is not None:\n RVConfig.set_tmp_dir(tempdir)\n rv.runner.CommandRunner.run(command_config_uri)\n\n\nif __name__ == '__main__':\n main()\n", "path": "rastervision/cli/main.py"}], "after_files": [{"content": "\"\"\"Raster Vision main program\"\"\"\nimport sys\nimport os\n\nimport click\nimport logging\n\nimport rastervision as rv\nfrom rastervision.experiment import (ExperimentLoader, LoaderError)\nfrom rastervision.runner import (ExperimentRunner)\nfrom rastervision.rv_config import RVConfig\n\nlog = logging.getLogger(__name__)\n\n\ndef print_error(msg):\n click.echo(click.style(msg, fg='red'), err=True)\n\n\[email protected]()\[email protected](\n '--profile', '-p', help='Sets the configuration profile name to use.')\[email protected](\n '-v', '--verbose', help='Sets the output to be verbose.', count=True)\ndef main(profile, verbose):\n # Make sure current directory is on PYTHON_PATH\n # so that we can run against modules in current dir.\n sys.path.append(os.curdir)\n\n # Initialize configuration\n rv._registry.initialize_config(profile=profile, verbosity=verbose + 1)\n\n\[email protected](\n 'run', short_help='Run Raster Vision commands against Experiments.')\[email protected]('runner')\[email protected]('commands', nargs=-1)\[email protected](\n '--experiment_module',\n '-e',\n help=('Name of an importable module to look for experiment sets '\n 'in. If not supplied, experiments will be loaded '\n 'from __main__'))\[email protected](\n '--path',\n '-p',\n metavar='PATTERN',\n help=('Path of file containing ExprimentSet to run.'))\[email protected](\n '--dry-run',\n '-n',\n is_flag=True,\n help=('Execute a dry run, which will print out information '\n 'about the commands to be run, but will not actually '\n 'run the commands'))\[email protected](\n '--skip-file-check',\n '-x',\n is_flag=True,\n help=('Skip the step that verifies that file exist.'))\[email protected](\n '--arg',\n '-a',\n type=(str, str),\n multiple=True,\n metavar='KEY VALUE',\n help=('Pass a parameter to the experiments if the method '\n 'parameter list takes in a parameter with that key. '\n 'Multiple args can be supplied'))\[email protected](\n '--prefix',\n metavar='PREFIX',\n default='exp_',\n help=('Prefix for methods containing experiments. (default: \"exp_\")'))\[email protected](\n '--method',\n '-m',\n 'methods',\n multiple=True,\n metavar='PATTERN',\n help=('Pattern to match method names to run.'))\[email protected](\n '--filter',\n '-f',\n 'filters',\n multiple=True,\n metavar='PATTERN',\n help=('Pattern to match experiment names to run.'))\[email protected](\n '--rerun',\n '-r',\n is_flag=True,\n default=False,\n help=('Rerun commands, regardless if '\n 'their output files already exist.'))\[email protected]('--tempdir', help=('Temporary directory to use for this run.'))\[email protected](\n '--splits',\n '-s',\n default=1,\n metavar='INTEGER',\n help=('The number of processes to attempt to split each stage into.'))\ndef run(runner, commands, experiment_module, dry_run, skip_file_check, arg,\n prefix, methods, path, filters, rerun, tempdir, splits):\n \"\"\"Run Raster Vision commands from experiments, using the\n experiment runner named RUNNER.\"\"\"\n\n if tempdir:\n RVConfig.set_tmp_dir(tempdir)\n\n # Validate runner\n valid_runners = list(\n map(lambda x: x.lower(), rv.ExperimentRunner.list_runners()))\n if runner not in valid_runners:\n print_error('Invalid experiment runner: \"{}\". '\n 'Must be one of: \"{}\"'.format(runner,\n '\", \"'.join(valid_runners)))\n sys.exit(1)\n\n runner = ExperimentRunner.get_runner(runner)\n\n if experiment_module and path:\n print_error('Must specify only one of experiment_module or path')\n sys.exit(1)\n\n if not commands:\n commands = rv.ALL_COMMANDS\n else:\n commands = list(map(lambda x: x.upper(), commands))\n\n experiment_args = {}\n for k, v in arg:\n experiment_args[k] = v\n\n loader = ExperimentLoader(\n experiment_args=experiment_args,\n experiment_method_prefix=prefix,\n experiment_method_patterns=methods,\n experiment_name_patterns=filters)\n try:\n if experiment_module:\n experiments = loader.load_from_module(experiment_module)\n elif path:\n experiments = loader.load_from_file(path)\n else:\n experiments = loader.load_from_module('__main__')\n except LoaderError as e:\n print_error(str(e))\n sys.exit(1)\n\n if not experiments:\n if experiment_module:\n print_error(\n 'No experiments found in {}.'.format(experiment_module))\n elif path:\n print_error('No experiments found in {}.'.format(path))\n else:\n print_error('No experiments found.')\n\n runner.run(\n experiments,\n commands_to_run=commands,\n rerun_commands=rerun,\n skip_file_check=skip_file_check,\n dry_run=dry_run,\n splits=splits)\n\n\[email protected]()\[email protected](\n '--experiment-module',\n '-e',\n help=('Name of an importable module to look for experiment sets '\n 'in. If not supplied, experiments will be loaded '\n 'from __main__'))\[email protected](\n '--arg',\n '-a',\n type=(str, str),\n multiple=True,\n metavar='KEY VALUE',\n help=('Pass a parameter to the experiments if the method '\n 'parameter list takes in a parameter with that key. '\n 'Multiple args can be supplied'))\ndef ls(experiment_module, arg):\n \"\"\"Print out a list of Experiment IDs.\"\"\"\n if experiment_module:\n module_to_load = experiment_module\n else:\n module_to_load = '__main__'\n\n experiment_args = {}\n for k, v in arg:\n experiment_args[k] = v\n\n loader = ExperimentLoader(experiment_args=experiment_args)\n try:\n experiments = loader.load_from_module(module_to_load)\n except LoaderError as e:\n print_error(str(e))\n sys.exit(1)\n\n if not experiments:\n if experiment_module:\n print_error(\n 'No experiments found in {}.'.format(experiment_module))\n else:\n print_error('No experiments found.')\n\n for e in experiments:\n click.echo('{}'.format(e.id))\n\n\n# https://stackoverflow.com/questions/48391777/nargs-equivalent-for-options-in-click\nclass OptionEatAll(click.Option):\n def __init__(self, *args, **kwargs):\n self.save_other_options = kwargs.pop('save_other_options', True)\n nargs = kwargs.pop('nargs', -1)\n assert nargs == -1, 'nargs, if set, must be -1 not {}'.format(nargs)\n super(OptionEatAll, self).__init__(*args, **kwargs)\n self._previous_parser_process = None\n self._eat_all_parser = None\n\n def add_to_parser(self, parser, ctx):\n def parser_process(value, state):\n value = str(value)\n while state.rargs:\n value = '{} {}'.format(value, state.rargs.pop(0))\n self._previous_parser_process(value, state)\n\n retval = super(OptionEatAll, self).add_to_parser(parser, ctx)\n\n for name in self.opts:\n our_parser = parser._long_opt.get(name) or parser._short_opt.get(\n name)\n if our_parser:\n self._eat_all_parser = our_parser\n self._previous_parser_process = our_parser.process\n our_parser.process = parser_process\n break\n\n return retval\n\n\[email protected](\n 'predict', short_help='Make predictions using a predict package.')\[email protected]('predict_package')\[email protected]('image_uri')\[email protected]('output_uri')\[email protected](\n '--update-stats',\n '-a',\n is_flag=True,\n help=('Run an analysis on this individual image, as '\n 'opposed to using any analysis like statistics '\n 'that exist in the prediction package'))\[email protected](\n '--channel-order',\n cls=OptionEatAll,\n help='List of indices comprising channel_order. Example: 2 1 0')\[email protected](\n '--export-config',\n type=click.Path(exists=False),\n help='Exports the configuration to the given output file.')\ndef predict(predict_package, image_uri, output_uri, update_stats,\n channel_order, export_config):\n \"\"\"Make predictions on the image at IMAGE_URI\n using PREDICT_PACKAGE and store the\n prediciton output at OUTPUT_URI.\n \"\"\"\n if channel_order is not None:\n channel_order = [\n int(channel_ind) for channel_ind in channel_order.split(' ')\n ]\n\n with RVConfig.get_tmp_dir() as tmp_dir:\n predictor = rv.Predictor(predict_package, tmp_dir, update_stats,\n channel_order)\n predictor.predict(image_uri, output_uri, export_config)\n\n\[email protected](\n 'run_command', short_help='Run a command from configuration file.')\[email protected]('command_config_uri')\[email protected]('--tempdir')\ndef run_command(command_config_uri, tempdir):\n \"\"\"Run a command from a serialized command configuration\n at COMMAND_CONFIG_URI.\n \"\"\"\n if tempdir is not None:\n RVConfig.set_tmp_dir(tempdir)\n rv.runner.CommandRunner.run(command_config_uri)\n\n\nif __name__ == '__main__':\n main()\n", "path": "rastervision/cli/main.py"}]} | 3,219 | 489 |
gh_patches_debug_52370 | rasdani/github-patches | git_diff | interlegis__sapl-2110 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Relatório Normas Jurídicas: segundo critério de ordem
O relatório de normas jurídicas tem, por primeiro critério de ordem, a data por ordem decrescente. O segundo critério é o Número, em ordem crescente, que deveria mudar para decrescente.
https://sapl.agudo.rs.leg.br/norma/pesquisar?tipo=12&numero=&ano=2018&data_0=&data_1=&data_publicacao_0=&data_publicacao_1=&ementa=&assuntos=

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sapl/norma/views.py`
Content:
```
1
2 import re
3 import weasyprint
4 from django.contrib.auth.mixins import PermissionRequiredMixin
5 from django.core.exceptions import ObjectDoesNotExist
6 from django.core.urlresolvers import reverse
7 from django.http import HttpResponse, JsonResponse
8 from django.template import RequestContext, loader
9 from django.utils import timezone
10 from django.utils.translation import ugettext_lazy as _
11 from django.views.generic import TemplateView, UpdateView
12 from django.views.generic.base import RedirectView
13 from django.views.generic.edit import FormView
14 from django_filters.views import FilterView
15
16 from sapl.base.models import AppConfig
17 from sapl.compilacao.views import IntegracaoTaView
18 from sapl.crud.base import (RP_DETAIL, RP_LIST, Crud, CrudAux,
19 MasterDetailCrud, make_pagination)
20 from sapl.utils import show_results_filter_set
21
22 from .forms import (NormaFilterSet, NormaJuridicaForm,
23 NormaPesquisaSimplesForm, NormaRelacionadaForm)
24 from .models import (AssuntoNorma, NormaJuridica, NormaRelacionada,
25 TipoNormaJuridica, TipoVinculoNormaJuridica)
26
27 # LegislacaoCitadaCrud = Crud.build(LegislacaoCitada, '')
28 AssuntoNormaCrud = CrudAux.build(AssuntoNorma, 'assunto_norma_juridica',
29 list_field_names=['assunto', 'descricao'])
30
31
32 TipoNormaCrud = CrudAux.build(
33 TipoNormaJuridica, 'tipo_norma_juridica',
34 list_field_names=['sigla', 'descricao', 'equivalente_lexml'])
35 TipoVinculoNormaJuridicaCrud = CrudAux.build(
36 TipoVinculoNormaJuridica, '',
37 list_field_names=['sigla', 'descricao_ativa', 'descricao_passiva'])
38
39
40 class NormaRelacionadaCrud(MasterDetailCrud):
41 model = NormaRelacionada
42 parent_field = 'norma_principal'
43 help_topic = 'norma_juridica'
44
45 class BaseMixin(MasterDetailCrud.BaseMixin):
46 list_field_names = ['norma_relacionada', 'tipo_vinculo']
47
48 class CreateView(MasterDetailCrud.CreateView):
49 form_class = NormaRelacionadaForm
50
51 class UpdateView(MasterDetailCrud.UpdateView):
52 form_class = NormaRelacionadaForm
53
54 def get_initial(self):
55 initial = super(UpdateView, self).get_initial()
56 initial['tipo'] = self.object.norma_relacionada.tipo.id
57 initial['numero'] = self.object.norma_relacionada.numero
58 initial['ano'] = self.object.norma_relacionada.ano
59 initial['ementa'] = self.object.norma_relacionada.ementa
60 return initial
61
62 class DetailView(MasterDetailCrud.DetailView):
63
64 layout_key = 'NormaRelacionadaDetail'
65
66
67 class NormaPesquisaView(FilterView):
68 model = NormaJuridica
69 filterset_class = NormaFilterSet
70 paginate_by = 10
71
72 def get_queryset(self):
73 qs = super().get_queryset()
74
75 qs.select_related('tipo', 'materia')
76
77 return qs
78
79 def get_context_data(self, **kwargs):
80 context = super(NormaPesquisaView, self).get_context_data(**kwargs)
81
82 context['title'] = _('Pesquisar Norma Jurídica')
83
84 qr = self.request.GET.copy()
85
86 if 'page' in qr:
87 del qr['page']
88
89 paginator = context['paginator']
90 page_obj = context['page_obj']
91
92 context['page_range'] = make_pagination(
93 page_obj.number, paginator.num_pages)
94
95 context['filter_url'] = ('&' + qr.urlencode()) if len(qr) > 0 else ''
96
97 context['show_results'] = show_results_filter_set(qr)
98
99 return context
100
101
102 class NormaTaView(IntegracaoTaView):
103 model = NormaJuridica
104 model_type_foreignkey = TipoNormaJuridica
105 map_fields = {
106 'data': 'data',
107 'ementa': 'ementa',
108 'observacao': 'observacao',
109 'numero': 'numero',
110 'ano': 'ano',
111 }
112
113 map_funcs = {
114 'publicacao_func': True
115 }
116
117 def get(self, request, *args, **kwargs):
118 """
119 Para manter a app compilacao isolada das outras aplicações,
120 este get foi implementado para tratar uma prerrogativa externa
121 de usuário.
122 """
123 if AppConfig.attr('texto_articulado_norma'):
124 return IntegracaoTaView.get(self, request, *args, **kwargs)
125 else:
126 return self.get_redirect_deactivated()
127
128
129 class NormaCrud(Crud):
130 model = NormaJuridica
131 help_topic = 'norma_juridica'
132 public = [RP_LIST, RP_DETAIL]
133
134 class BaseMixin(Crud.BaseMixin):
135 list_field_names = ['tipo', 'numero', 'ano', 'ementa']
136
137 list_url = ''
138
139 @property
140 def search_url(self):
141 namespace = self.model._meta.app_config.name
142 return reverse('%s:%s' % (namespace, 'norma_pesquisa'))
143
144 class DetailView(Crud.DetailView):
145 pass
146
147 class DeleteView(Crud.DeleteView):
148
149 def get_success_url(self):
150 return self.search_url
151
152 class CreateView(Crud.CreateView):
153 form_class = NormaJuridicaForm
154
155 @property
156 def cancel_url(self):
157 return self.search_url
158
159 layout_key = 'NormaJuridicaCreate'
160
161 class ListView(Crud.ListView, RedirectView):
162
163 def get_redirect_url(self, *args, **kwargs):
164 namespace = self.model._meta.app_config.name
165 return reverse('%s:%s' % (namespace, 'norma_pesquisa'))
166
167 def get(self, request, *args, **kwargs):
168 return RedirectView.get(self, request, *args, **kwargs)
169
170 class UpdateView(Crud.UpdateView):
171 form_class = NormaJuridicaForm
172
173 layout_key = 'NormaJuridicaCreate'
174
175 def get_initial(self):
176 initial = super(UpdateView, self).get_initial()
177 norma = NormaJuridica.objects.get(id=self.kwargs['pk'])
178 if norma.materia:
179 initial['tipo_materia'] = norma.materia.tipo
180 initial['ano_materia'] = norma.materia.ano
181 initial['numero_materia'] = norma.materia.numero
182 return initial
183
184
185 def recuperar_norma(request):
186 tipo = TipoNormaJuridica.objects.get(pk=request.GET['tipo'])
187 numero = request.GET['numero']
188 ano = request.GET['ano']
189
190 try:
191 norma = NormaJuridica.objects.get(tipo=tipo,
192 ano=ano,
193 numero=numero)
194 response = JsonResponse({'ementa': norma.ementa,
195 'id': norma.id})
196 except ObjectDoesNotExist:
197 response = JsonResponse({'ementa': '', 'id': 0})
198
199 return response
200
201
202 def recuperar_numero_norma(request):
203 tipo = TipoNormaJuridica.objects.get(pk=request.GET['tipo'])
204 ano = request.GET.get('ano', '')
205 param = {'tipo': tipo}
206 param['ano'] = ano if ano else timezone.now().year
207 norma = NormaJuridica.objects.filter(**param).order_by(
208 'tipo', 'ano').values_list('numero', 'ano').last()
209 if norma:
210 response = JsonResponse({'numero': int(re.sub("[^0-9].*", '', norma[0])) + 1,
211 'ano': norma[1]})
212 else:
213 response = JsonResponse(
214 {'numero': 1, 'ano': ano})
215
216 return response
217
218
219 class ImpressosView(PermissionRequiredMixin, TemplateView):
220 template_name = 'materia/impressos/impressos.html'
221 permission_required = ('materia.can_access_impressos', )
222
223
224 def gerar_pdf_impressos(request, context, template_name):
225 template = loader.get_template(template_name)
226 html = template.render(RequestContext(request, context))
227 pdf = weasyprint.HTML(string=html, base_url=request.build_absolute_uri()
228 ).write_pdf()
229
230 response = HttpResponse(pdf, content_type='application/pdf')
231 response['Content-Disposition'] = (
232 'inline; filename="relatorio_impressos.pdf"')
233 response['Content-Transfer-Encoding'] = 'binary'
234
235 return response
236
237
238 class NormaPesquisaSimplesView(PermissionRequiredMixin, FormView):
239 form_class = NormaPesquisaSimplesForm
240 template_name = 'materia/impressos/norma.html'
241 permission_required = ('materia.can_access_impressos', )
242
243 def form_valid(self, form):
244 normas = NormaJuridica.objects.all().order_by(
245 'numero')
246 template_norma = 'materia/impressos/normas_pdf.html'
247
248 titulo = form.cleaned_data['titulo']
249
250 if form.cleaned_data['tipo_norma']:
251 normas = normas.filter(tipo=form.cleaned_data['tipo_norma'])
252
253 if form.cleaned_data['data_inicial']:
254 normas = normas.filter(
255 data__gte=form.cleaned_data['data_inicial'],
256 data__lte=form.cleaned_data['data_final'])
257
258 qtd_resultados = len(normas)
259 if qtd_resultados > 2000:
260 normas = normas[:2000]
261
262 context = {'quantidade': qtd_resultados,
263 'titulo': titulo,
264 'normas': normas}
265
266 return gerar_pdf_impressos(self.request, context, template_norma)
267
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sapl/norma/views.py b/sapl/norma/views.py
--- a/sapl/norma/views.py
+++ b/sapl/norma/views.py
@@ -72,7 +72,7 @@
def get_queryset(self):
qs = super().get_queryset()
- qs.select_related('tipo', 'materia')
+ qs = qs.extra({'norma_i': "CAST(regexp_replace(numero,'[^0-9]','', 'g') AS INTEGER)", 'norma_letra': "regexp_replace(numero,'[^a-zA-Z]','', 'g')"}).order_by('-data', '-norma_i', '-norma_letra')
return qs
| {"golden_diff": "diff --git a/sapl/norma/views.py b/sapl/norma/views.py\n--- a/sapl/norma/views.py\n+++ b/sapl/norma/views.py\n@@ -72,7 +72,7 @@\n def get_queryset(self):\n qs = super().get_queryset()\n \n- qs.select_related('tipo', 'materia')\n+ qs = qs.extra({'norma_i': \"CAST(regexp_replace(numero,'[^0-9]','', 'g') AS INTEGER)\", 'norma_letra': \"regexp_replace(numero,'[^a-zA-Z]','', 'g')\"}).order_by('-data', '-norma_i', '-norma_letra')\n \n return qs\n", "issue": "Relat\u00f3rio Normas Jur\u00eddicas: segundo crit\u00e9rio de ordem\nO relat\u00f3rio de normas jur\u00eddicas tem, por primeiro crit\u00e9rio de ordem, a data por ordem decrescente. O segundo crit\u00e9rio \u00e9 o N\u00famero, em ordem crescente, que deveria mudar para decrescente.\r\nhttps://sapl.agudo.rs.leg.br/norma/pesquisar?tipo=12&numero=&ano=2018&data_0=&data_1=&data_publicacao_0=&data_publicacao_1=&ementa=&assuntos=\r\n\r\n\n", "before_files": [{"content": "\nimport re\nimport weasyprint\nfrom django.contrib.auth.mixins import PermissionRequiredMixin\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.core.urlresolvers import reverse\nfrom django.http import HttpResponse, JsonResponse\nfrom django.template import RequestContext, loader\nfrom django.utils import timezone\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views.generic import TemplateView, UpdateView\nfrom django.views.generic.base import RedirectView\nfrom django.views.generic.edit import FormView\nfrom django_filters.views import FilterView\n\nfrom sapl.base.models import AppConfig\nfrom sapl.compilacao.views import IntegracaoTaView\nfrom sapl.crud.base import (RP_DETAIL, RP_LIST, Crud, CrudAux,\n MasterDetailCrud, make_pagination)\nfrom sapl.utils import show_results_filter_set\n\nfrom .forms import (NormaFilterSet, NormaJuridicaForm,\n NormaPesquisaSimplesForm, NormaRelacionadaForm)\nfrom .models import (AssuntoNorma, NormaJuridica, NormaRelacionada,\n TipoNormaJuridica, TipoVinculoNormaJuridica)\n\n# LegislacaoCitadaCrud = Crud.build(LegislacaoCitada, '')\nAssuntoNormaCrud = CrudAux.build(AssuntoNorma, 'assunto_norma_juridica',\n list_field_names=['assunto', 'descricao'])\n\n\nTipoNormaCrud = CrudAux.build(\n TipoNormaJuridica, 'tipo_norma_juridica',\n list_field_names=['sigla', 'descricao', 'equivalente_lexml'])\nTipoVinculoNormaJuridicaCrud = CrudAux.build(\n TipoVinculoNormaJuridica, '',\n list_field_names=['sigla', 'descricao_ativa', 'descricao_passiva'])\n\n\nclass NormaRelacionadaCrud(MasterDetailCrud):\n model = NormaRelacionada\n parent_field = 'norma_principal'\n help_topic = 'norma_juridica'\n\n class BaseMixin(MasterDetailCrud.BaseMixin):\n list_field_names = ['norma_relacionada', 'tipo_vinculo']\n\n class CreateView(MasterDetailCrud.CreateView):\n form_class = NormaRelacionadaForm\n\n class UpdateView(MasterDetailCrud.UpdateView):\n form_class = NormaRelacionadaForm\n\n def get_initial(self):\n initial = super(UpdateView, self).get_initial()\n initial['tipo'] = self.object.norma_relacionada.tipo.id\n initial['numero'] = self.object.norma_relacionada.numero\n initial['ano'] = self.object.norma_relacionada.ano\n initial['ementa'] = self.object.norma_relacionada.ementa\n return initial\n\n class DetailView(MasterDetailCrud.DetailView):\n\n layout_key = 'NormaRelacionadaDetail'\n\n\nclass NormaPesquisaView(FilterView):\n model = NormaJuridica\n filterset_class = NormaFilterSet\n paginate_by = 10\n\n def get_queryset(self):\n qs = super().get_queryset()\n\n qs.select_related('tipo', 'materia')\n\n return qs\n\n def get_context_data(self, **kwargs):\n context = super(NormaPesquisaView, self).get_context_data(**kwargs)\n\n context['title'] = _('Pesquisar Norma Jur\u00eddica')\n\n qr = self.request.GET.copy()\n\n if 'page' in qr:\n del qr['page']\n\n paginator = context['paginator']\n page_obj = context['page_obj']\n\n context['page_range'] = make_pagination(\n page_obj.number, paginator.num_pages)\n\n context['filter_url'] = ('&' + qr.urlencode()) if len(qr) > 0 else ''\n\n context['show_results'] = show_results_filter_set(qr)\n\n return context\n\n\nclass NormaTaView(IntegracaoTaView):\n model = NormaJuridica\n model_type_foreignkey = TipoNormaJuridica\n map_fields = {\n 'data': 'data',\n 'ementa': 'ementa',\n 'observacao': 'observacao',\n 'numero': 'numero',\n 'ano': 'ano',\n }\n\n map_funcs = {\n 'publicacao_func': True\n }\n\n def get(self, request, *args, **kwargs):\n \"\"\"\n Para manter a app compilacao isolada das outras aplica\u00e7\u00f5es,\n este get foi implementado para tratar uma prerrogativa externa\n de usu\u00e1rio.\n \"\"\"\n if AppConfig.attr('texto_articulado_norma'):\n return IntegracaoTaView.get(self, request, *args, **kwargs)\n else:\n return self.get_redirect_deactivated()\n\n\nclass NormaCrud(Crud):\n model = NormaJuridica\n help_topic = 'norma_juridica'\n public = [RP_LIST, RP_DETAIL]\n\n class BaseMixin(Crud.BaseMixin):\n list_field_names = ['tipo', 'numero', 'ano', 'ementa']\n\n list_url = ''\n\n @property\n def search_url(self):\n namespace = self.model._meta.app_config.name\n return reverse('%s:%s' % (namespace, 'norma_pesquisa'))\n\n class DetailView(Crud.DetailView):\n pass\n\n class DeleteView(Crud.DeleteView):\n\n def get_success_url(self):\n return self.search_url\n\n class CreateView(Crud.CreateView):\n form_class = NormaJuridicaForm\n\n @property\n def cancel_url(self):\n return self.search_url\n\n layout_key = 'NormaJuridicaCreate'\n\n class ListView(Crud.ListView, RedirectView):\n\n def get_redirect_url(self, *args, **kwargs):\n namespace = self.model._meta.app_config.name\n return reverse('%s:%s' % (namespace, 'norma_pesquisa'))\n\n def get(self, request, *args, **kwargs):\n return RedirectView.get(self, request, *args, **kwargs)\n\n class UpdateView(Crud.UpdateView):\n form_class = NormaJuridicaForm\n\n layout_key = 'NormaJuridicaCreate'\n\n def get_initial(self):\n initial = super(UpdateView, self).get_initial()\n norma = NormaJuridica.objects.get(id=self.kwargs['pk'])\n if norma.materia:\n initial['tipo_materia'] = norma.materia.tipo\n initial['ano_materia'] = norma.materia.ano\n initial['numero_materia'] = norma.materia.numero\n return initial\n\n\ndef recuperar_norma(request):\n tipo = TipoNormaJuridica.objects.get(pk=request.GET['tipo'])\n numero = request.GET['numero']\n ano = request.GET['ano']\n\n try:\n norma = NormaJuridica.objects.get(tipo=tipo,\n ano=ano,\n numero=numero)\n response = JsonResponse({'ementa': norma.ementa,\n 'id': norma.id})\n except ObjectDoesNotExist:\n response = JsonResponse({'ementa': '', 'id': 0})\n\n return response\n\n\ndef recuperar_numero_norma(request):\n tipo = TipoNormaJuridica.objects.get(pk=request.GET['tipo'])\n ano = request.GET.get('ano', '')\n param = {'tipo': tipo}\n param['ano'] = ano if ano else timezone.now().year\n norma = NormaJuridica.objects.filter(**param).order_by(\n 'tipo', 'ano').values_list('numero', 'ano').last()\n if norma:\n response = JsonResponse({'numero': int(re.sub(\"[^0-9].*\", '', norma[0])) + 1,\n 'ano': norma[1]})\n else:\n response = JsonResponse(\n {'numero': 1, 'ano': ano})\n\n return response\n\n\nclass ImpressosView(PermissionRequiredMixin, TemplateView):\n template_name = 'materia/impressos/impressos.html'\n permission_required = ('materia.can_access_impressos', )\n\n\ndef gerar_pdf_impressos(request, context, template_name):\n template = loader.get_template(template_name)\n html = template.render(RequestContext(request, context))\n pdf = weasyprint.HTML(string=html, base_url=request.build_absolute_uri()\n ).write_pdf()\n\n response = HttpResponse(pdf, content_type='application/pdf')\n response['Content-Disposition'] = (\n 'inline; filename=\"relatorio_impressos.pdf\"')\n response['Content-Transfer-Encoding'] = 'binary'\n\n return response\n\n\nclass NormaPesquisaSimplesView(PermissionRequiredMixin, FormView):\n form_class = NormaPesquisaSimplesForm\n template_name = 'materia/impressos/norma.html'\n permission_required = ('materia.can_access_impressos', )\n\n def form_valid(self, form):\n normas = NormaJuridica.objects.all().order_by(\n 'numero')\n template_norma = 'materia/impressos/normas_pdf.html'\n\n titulo = form.cleaned_data['titulo']\n\n if form.cleaned_data['tipo_norma']:\n normas = normas.filter(tipo=form.cleaned_data['tipo_norma'])\n\n if form.cleaned_data['data_inicial']:\n normas = normas.filter(\n data__gte=form.cleaned_data['data_inicial'],\n data__lte=form.cleaned_data['data_final'])\n\n qtd_resultados = len(normas)\n if qtd_resultados > 2000:\n normas = normas[:2000]\n\n context = {'quantidade': qtd_resultados,\n 'titulo': titulo,\n 'normas': normas}\n\n return gerar_pdf_impressos(self.request, context, template_norma)\n", "path": "sapl/norma/views.py"}], "after_files": [{"content": "\nimport re\nimport weasyprint\nfrom django.contrib.auth.mixins import PermissionRequiredMixin\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.core.urlresolvers import reverse\nfrom django.http import HttpResponse, JsonResponse\nfrom django.template import RequestContext, loader\nfrom django.utils import timezone\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views.generic import TemplateView, UpdateView\nfrom django.views.generic.base import RedirectView\nfrom django.views.generic.edit import FormView\nfrom django_filters.views import FilterView\n\nfrom sapl.base.models import AppConfig\nfrom sapl.compilacao.views import IntegracaoTaView\nfrom sapl.crud.base import (RP_DETAIL, RP_LIST, Crud, CrudAux,\n MasterDetailCrud, make_pagination)\nfrom sapl.utils import show_results_filter_set\n\nfrom .forms import (NormaFilterSet, NormaJuridicaForm,\n NormaPesquisaSimplesForm, NormaRelacionadaForm)\nfrom .models import (AssuntoNorma, NormaJuridica, NormaRelacionada,\n TipoNormaJuridica, TipoVinculoNormaJuridica)\n\n# LegislacaoCitadaCrud = Crud.build(LegislacaoCitada, '')\nAssuntoNormaCrud = CrudAux.build(AssuntoNorma, 'assunto_norma_juridica',\n list_field_names=['assunto', 'descricao'])\n\n\nTipoNormaCrud = CrudAux.build(\n TipoNormaJuridica, 'tipo_norma_juridica',\n list_field_names=['sigla', 'descricao', 'equivalente_lexml'])\nTipoVinculoNormaJuridicaCrud = CrudAux.build(\n TipoVinculoNormaJuridica, '',\n list_field_names=['sigla', 'descricao_ativa', 'descricao_passiva'])\n\n\nclass NormaRelacionadaCrud(MasterDetailCrud):\n model = NormaRelacionada\n parent_field = 'norma_principal'\n help_topic = 'norma_juridica'\n\n class BaseMixin(MasterDetailCrud.BaseMixin):\n list_field_names = ['norma_relacionada', 'tipo_vinculo']\n\n class CreateView(MasterDetailCrud.CreateView):\n form_class = NormaRelacionadaForm\n\n class UpdateView(MasterDetailCrud.UpdateView):\n form_class = NormaRelacionadaForm\n\n def get_initial(self):\n initial = super(UpdateView, self).get_initial()\n initial['tipo'] = self.object.norma_relacionada.tipo.id\n initial['numero'] = self.object.norma_relacionada.numero\n initial['ano'] = self.object.norma_relacionada.ano\n initial['ementa'] = self.object.norma_relacionada.ementa\n return initial\n\n class DetailView(MasterDetailCrud.DetailView):\n\n layout_key = 'NormaRelacionadaDetail'\n\n\nclass NormaPesquisaView(FilterView):\n model = NormaJuridica\n filterset_class = NormaFilterSet\n paginate_by = 10\n\n def get_queryset(self):\n qs = super().get_queryset()\n\n qs = qs.extra({'norma_i': \"CAST(regexp_replace(numero,'[^0-9]','', 'g') AS INTEGER)\", 'norma_letra': \"regexp_replace(numero,'[^a-zA-Z]','', 'g')\"}).order_by('-data', '-norma_i', '-norma_letra')\n\n return qs\n\n def get_context_data(self, **kwargs):\n context = super(NormaPesquisaView, self).get_context_data(**kwargs)\n\n context['title'] = _('Pesquisar Norma Jur\u00eddica')\n\n qr = self.request.GET.copy()\n\n if 'page' in qr:\n del qr['page']\n\n paginator = context['paginator']\n page_obj = context['page_obj']\n\n context['page_range'] = make_pagination(\n page_obj.number, paginator.num_pages)\n\n context['filter_url'] = ('&' + qr.urlencode()) if len(qr) > 0 else ''\n\n context['show_results'] = show_results_filter_set(qr)\n\n return context\n\n\nclass NormaTaView(IntegracaoTaView):\n model = NormaJuridica\n model_type_foreignkey = TipoNormaJuridica\n map_fields = {\n 'data': 'data',\n 'ementa': 'ementa',\n 'observacao': 'observacao',\n 'numero': 'numero',\n 'ano': 'ano',\n }\n\n map_funcs = {\n 'publicacao_func': True\n }\n\n def get(self, request, *args, **kwargs):\n \"\"\"\n Para manter a app compilacao isolada das outras aplica\u00e7\u00f5es,\n este get foi implementado para tratar uma prerrogativa externa\n de usu\u00e1rio.\n \"\"\"\n if AppConfig.attr('texto_articulado_norma'):\n return IntegracaoTaView.get(self, request, *args, **kwargs)\n else:\n return self.get_redirect_deactivated()\n\n\nclass NormaCrud(Crud):\n model = NormaJuridica\n help_topic = 'norma_juridica'\n public = [RP_LIST, RP_DETAIL]\n\n class BaseMixin(Crud.BaseMixin):\n list_field_names = ['tipo', 'numero', 'ano', 'ementa']\n\n list_url = ''\n\n @property\n def search_url(self):\n namespace = self.model._meta.app_config.name\n return reverse('%s:%s' % (namespace, 'norma_pesquisa'))\n\n class DetailView(Crud.DetailView):\n pass\n\n class DeleteView(Crud.DeleteView):\n\n def get_success_url(self):\n return self.search_url\n\n class CreateView(Crud.CreateView):\n form_class = NormaJuridicaForm\n\n @property\n def cancel_url(self):\n return self.search_url\n\n layout_key = 'NormaJuridicaCreate'\n\n class ListView(Crud.ListView, RedirectView):\n\n def get_redirect_url(self, *args, **kwargs):\n namespace = self.model._meta.app_config.name\n return reverse('%s:%s' % (namespace, 'norma_pesquisa'))\n\n def get(self, request, *args, **kwargs):\n return RedirectView.get(self, request, *args, **kwargs)\n\n class UpdateView(Crud.UpdateView):\n form_class = NormaJuridicaForm\n\n layout_key = 'NormaJuridicaCreate'\n\n def get_initial(self):\n initial = super(UpdateView, self).get_initial()\n norma = NormaJuridica.objects.get(id=self.kwargs['pk'])\n if norma.materia:\n initial['tipo_materia'] = norma.materia.tipo\n initial['ano_materia'] = norma.materia.ano\n initial['numero_materia'] = norma.materia.numero\n return initial\n\n\ndef recuperar_norma(request):\n tipo = TipoNormaJuridica.objects.get(pk=request.GET['tipo'])\n numero = request.GET['numero']\n ano = request.GET['ano']\n\n try:\n norma = NormaJuridica.objects.get(tipo=tipo,\n ano=ano,\n numero=numero)\n response = JsonResponse({'ementa': norma.ementa,\n 'id': norma.id})\n except ObjectDoesNotExist:\n response = JsonResponse({'ementa': '', 'id': 0})\n\n return response\n\n\ndef recuperar_numero_norma(request):\n tipo = TipoNormaJuridica.objects.get(pk=request.GET['tipo'])\n ano = request.GET.get('ano', '')\n param = {'tipo': tipo}\n param['ano'] = ano if ano else timezone.now().year\n norma = NormaJuridica.objects.filter(**param).order_by(\n 'tipo', 'ano').values_list('numero', 'ano').last()\n if norma:\n response = JsonResponse({'numero': int(re.sub(\"[^0-9].*\", '', norma[0])) + 1,\n 'ano': norma[1]})\n else:\n response = JsonResponse(\n {'numero': 1, 'ano': ano})\n\n return response\n\n\nclass ImpressosView(PermissionRequiredMixin, TemplateView):\n template_name = 'materia/impressos/impressos.html'\n permission_required = ('materia.can_access_impressos', )\n\n\ndef gerar_pdf_impressos(request, context, template_name):\n template = loader.get_template(template_name)\n html = template.render(RequestContext(request, context))\n pdf = weasyprint.HTML(string=html, base_url=request.build_absolute_uri()\n ).write_pdf()\n\n response = HttpResponse(pdf, content_type='application/pdf')\n response['Content-Disposition'] = (\n 'inline; filename=\"relatorio_impressos.pdf\"')\n response['Content-Transfer-Encoding'] = 'binary'\n\n return response\n\n\nclass NormaPesquisaSimplesView(PermissionRequiredMixin, FormView):\n form_class = NormaPesquisaSimplesForm\n template_name = 'materia/impressos/norma.html'\n permission_required = ('materia.can_access_impressos', )\n\n def form_valid(self, form):\n normas = NormaJuridica.objects.all().order_by(\n 'numero')\n template_norma = 'materia/impressos/normas_pdf.html'\n\n titulo = form.cleaned_data['titulo']\n\n if form.cleaned_data['tipo_norma']:\n normas = normas.filter(tipo=form.cleaned_data['tipo_norma'])\n\n if form.cleaned_data['data_inicial']:\n normas = normas.filter(\n data__gte=form.cleaned_data['data_inicial'],\n data__lte=form.cleaned_data['data_final'])\n\n qtd_resultados = len(normas)\n if qtd_resultados > 2000:\n normas = normas[:2000]\n\n context = {'quantidade': qtd_resultados,\n 'titulo': titulo,\n 'normas': normas}\n\n return gerar_pdf_impressos(self.request, context, template_norma)\n", "path": "sapl/norma/views.py"}]} | 3,311 | 152 |
gh_patches_debug_31855 | rasdani/github-patches | git_diff | WeblateOrg__weblate-11225 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Orphan screenshot warning doesn't disappear after clear all of unused screenshots
### Describe the issue
Hello!
Recently I found that WebLate doesn't get rid of the "Here is at least one unused screenshot presented" warning after I removed all screenshots that has no assigned string. They appeared because of the recent project-side rework that caused removal of a pile of strings that got been replaced with another one. And once I got that warning, I started to remove these screenshots (as they are now obsolete on my side). However, the warning still not gone after one day since I removed all unused screnshots. And, additionally, there is totally no way to remove it manually...
### I already tried
- [X] I've read and searched [the documentation](https://docs.weblate.org/).
- [X] I've searched for similar filed issues in this repository.
### Steps to reproduce the behavior
1. Have a pile of strings.
2. Upload a screenshot for each of them.
3. On the project side, remove some number of strings, and add another one.
4. Wait when "Unused screenshots" warning will pops up.
5. Open screenshots list and remove every screenshot marked by red exclamation sign.
6. Wait again, and...
7. It still appears. I checked trice the whole list of screenshots on all 7 pages (on my end), I swear I removed every unused screenshot, and warning still wasn't gone.
### Expected behavior
The warning gets disappear after all unused screenshots got been removed or assigned to anything also.
### Screenshots

### Exception traceback
_No response_
### How do you run Weblate?
weblate.org service
### Weblate versions
_No response_
### Weblate deploy checks
_No response_
### Additional context
The project and its component where the problem ocurred: https://hosted.weblate.org/projects/thextech/engine-general/
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `weblate/screenshots/models.py`
Content:
```
1 # Copyright © Michal Čihař <[email protected]>
2 #
3 # SPDX-License-Identifier: GPL-3.0-or-later
4
5 from __future__ import annotations
6
7 import fnmatch
8 import os
9 from typing import Any, BinaryIO
10
11 from django.conf import settings
12 from django.core.exceptions import ValidationError
13 from django.core.files import File
14 from django.core.files.storage import default_storage
15 from django.db import models
16 from django.db.models import Q
17 from django.db.models.signals import m2m_changed
18 from django.dispatch import receiver
19 from django.urls import reverse
20 from django.utils.translation import gettext_lazy
21
22 from weblate.auth.models import get_anonymous
23 from weblate.checks.flags import Flags
24 from weblate.screenshots.fields import ScreenshotField
25 from weblate.trans.mixins import UserDisplayMixin
26 from weblate.trans.models import Translation, Unit
27 from weblate.trans.signals import vcs_post_update
28 from weblate.trans.tasks import component_alerts
29 from weblate.utils.decorators import disable_for_loaddata
30 from weblate.utils.errors import report_error
31 from weblate.utils.validators import validate_bitmap
32
33
34 class ScreenshotQuerySet(models.QuerySet):
35 def order(self):
36 return self.order_by("name")
37
38 def filter_access(self, user):
39 result = self
40 if user.needs_project_filter:
41 result = result.filter(
42 translation__component__project__in=user.allowed_projects
43 )
44 if user.needs_component_restrictions_filter:
45 result = result.filter(
46 Q(translation__component__restricted=False)
47 | Q(translation__component_id__in=user.component_permissions)
48 )
49 return result
50
51
52 class Screenshot(models.Model, UserDisplayMixin):
53 name = models.CharField(
54 verbose_name=gettext_lazy("Screenshot name"), max_length=200
55 )
56 repository_filename = models.CharField(
57 verbose_name=gettext_lazy("Repository path to screenshot"),
58 help_text=gettext_lazy("Scan for screenshot file change on repository update."),
59 blank=True,
60 max_length=200,
61 )
62 image = ScreenshotField(
63 verbose_name=gettext_lazy("Image"),
64 help_text=gettext_lazy("Upload image up to 2000x2000 pixels."),
65 upload_to="screenshots/",
66 )
67 translation = models.ForeignKey(Translation, on_delete=models.deletion.CASCADE)
68 units = models.ManyToManyField(Unit, blank=True, related_name="screenshots")
69 timestamp = models.DateTimeField(auto_now_add=True)
70 user = models.ForeignKey(
71 settings.AUTH_USER_MODEL,
72 null=True,
73 blank=True,
74 on_delete=models.deletion.SET_NULL,
75 )
76
77 objects = ScreenshotQuerySet.as_manager()
78
79 class Meta:
80 verbose_name = "Screenshot"
81 verbose_name_plural = "Screenshots"
82
83 def __str__(self):
84 return self.name
85
86 def get_absolute_url(self):
87 return reverse("screenshot", kwargs={"pk": self.pk})
88
89 def __init__(self, *args, **kwargs):
90 """Constructor to initialize some cache properties."""
91 super().__init__(*args, **kwargs)
92 # Project backup integration
93 self.import_data: dict[str, Any] = {}
94 self.import_handle: BinaryIO | None = None
95
96 @property
97 def filter_name(self):
98 return f"screenshot:{Flags.format_value(self.name)}"
99
100
101 @receiver(m2m_changed, sender=Screenshot.units.through)
102 @disable_for_loaddata
103 def change_screenshot_assignment(sender, instance, action, **kwargs):
104 # Update alerts in case there is change in string assignment
105 if instance.translation.component.alert_set.filter(
106 name="UnusedScreenshot"
107 ).exists():
108 component_alerts.delay([instance.pk])
109
110
111 def validate_screenshot_image(component, filename):
112 """Returns True if image is validated."""
113 try:
114 full_name = os.path.join(component.full_path, filename)
115 with open(full_name, "rb") as f:
116 image_file = File(f, name=os.path.basename(filename))
117 validate_bitmap(image_file)
118 except ValidationError as error:
119 component.log_error("failed to validate screenshot %s: %s", filename, error)
120 report_error(cause="Could not validate image from repository")
121 return False
122 return True
123
124
125 @receiver(vcs_post_update)
126 def sync_screenshots_from_repo(sender, component, previous_head: str, **kwargs):
127 repository = component.repository
128 changed_files = repository.get_changed_files(compare_to=previous_head)
129
130 screenshots = Screenshot.objects.filter(
131 translation__component=component, repository_filename__in=changed_files
132 )
133
134 # Update existing screenshots
135 for screenshot in screenshots:
136 filename = screenshot.repository_filename
137 component.log_debug("detected screenshot change in repository: %s", filename)
138 changed_files.remove(filename)
139
140 if validate_screenshot_image(component, filename):
141 full_name = os.path.join(component.full_path, filename)
142 with open(full_name, "rb") as f:
143 screenshot.image = File(
144 f,
145 name=default_storage.get_available_name(os.path.basename(filename)),
146 )
147 screenshot.save(update_fields=["image"])
148 component.log_info("updated screenshot from repository: %s", filename)
149
150 # Add new screenshots matching screenshot filemask
151 for filename in changed_files:
152 if fnmatch.fnmatch(
153 filename, component.screenshot_filemask
154 ) and validate_screenshot_image(component, filename):
155 full_name = os.path.join(component.full_path, filename)
156 with open(full_name, "rb") as f:
157 screenshot = Screenshot.objects.create(
158 name=filename,
159 repository_filename=filename,
160 image=File(
161 f,
162 name=default_storage.get_available_name(
163 os.path.basename(filename)
164 ),
165 ),
166 translation=component.source_translation,
167 user=get_anonymous(),
168 )
169 screenshot.save()
170 component.log_info("create screenshot from repository: %s", filename)
171
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/weblate/screenshots/models.py b/weblate/screenshots/models.py
--- a/weblate/screenshots/models.py
+++ b/weblate/screenshots/models.py
@@ -14,7 +14,7 @@
from django.core.files.storage import default_storage
from django.db import models
from django.db.models import Q
-from django.db.models.signals import m2m_changed
+from django.db.models.signals import m2m_changed, post_delete
from django.dispatch import receiver
from django.urls import reverse
from django.utils.translation import gettext_lazy
@@ -24,8 +24,8 @@
from weblate.screenshots.fields import ScreenshotField
from weblate.trans.mixins import UserDisplayMixin
from weblate.trans.models import Translation, Unit
+from weblate.trans.models.alert import update_alerts
from weblate.trans.signals import vcs_post_update
-from weblate.trans.tasks import component_alerts
from weblate.utils.decorators import disable_for_loaddata
from weblate.utils.errors import report_error
from weblate.utils.validators import validate_bitmap
@@ -105,7 +105,16 @@
if instance.translation.component.alert_set.filter(
name="UnusedScreenshot"
).exists():
- component_alerts.delay([instance.pk])
+ update_alerts(instance.translation.component, alerts={"UnusedScreenshot"})
+
+
+@receiver(post_delete, sender=Screenshot)
+def update_alerts_on_screenshot_delete(sender, instance, **kwargs):
+ # Update the unused screenshot alert if screenshot is deleted
+ if instance.translation.component.alert_set.filter(
+ name="UnusedScreenshot"
+ ).exists():
+ update_alerts(instance.translation.component, alerts={"UnusedScreenshot"})
def validate_screenshot_image(component, filename):
| {"golden_diff": "diff --git a/weblate/screenshots/models.py b/weblate/screenshots/models.py\n--- a/weblate/screenshots/models.py\n+++ b/weblate/screenshots/models.py\n@@ -14,7 +14,7 @@\n from django.core.files.storage import default_storage\n from django.db import models\n from django.db.models import Q\n-from django.db.models.signals import m2m_changed\n+from django.db.models.signals import m2m_changed, post_delete\n from django.dispatch import receiver\n from django.urls import reverse\n from django.utils.translation import gettext_lazy\n@@ -24,8 +24,8 @@\n from weblate.screenshots.fields import ScreenshotField\n from weblate.trans.mixins import UserDisplayMixin\n from weblate.trans.models import Translation, Unit\n+from weblate.trans.models.alert import update_alerts\n from weblate.trans.signals import vcs_post_update\n-from weblate.trans.tasks import component_alerts\n from weblate.utils.decorators import disable_for_loaddata\n from weblate.utils.errors import report_error\n from weblate.utils.validators import validate_bitmap\n@@ -105,7 +105,16 @@\n if instance.translation.component.alert_set.filter(\n name=\"UnusedScreenshot\"\n ).exists():\n- component_alerts.delay([instance.pk])\n+ update_alerts(instance.translation.component, alerts={\"UnusedScreenshot\"})\n+\n+\n+@receiver(post_delete, sender=Screenshot)\n+def update_alerts_on_screenshot_delete(sender, instance, **kwargs):\n+ # Update the unused screenshot alert if screenshot is deleted\n+ if instance.translation.component.alert_set.filter(\n+ name=\"UnusedScreenshot\"\n+ ).exists():\n+ update_alerts(instance.translation.component, alerts={\"UnusedScreenshot\"})\n \n \n def validate_screenshot_image(component, filename):\n", "issue": "Orphan screenshot warning doesn't disappear after clear all of unused screenshots\n### Describe the issue\n\nHello!\r\n\r\nRecently I found that WebLate doesn't get rid of the \"Here is at least one unused screenshot presented\" warning after I removed all screenshots that has no assigned string. They appeared because of the recent project-side rework that caused removal of a pile of strings that got been replaced with another one. And once I got that warning, I started to remove these screenshots (as they are now obsolete on my side). However, the warning still not gone after one day since I removed all unused screnshots. And, additionally, there is totally no way to remove it manually...\n\n### I already tried\n\n- [X] I've read and searched [the documentation](https://docs.weblate.org/).\n- [X] I've searched for similar filed issues in this repository.\n\n### Steps to reproduce the behavior\n\n1. Have a pile of strings.\r\n2. Upload a screenshot for each of them.\r\n3. On the project side, remove some number of strings, and add another one.\r\n4. Wait when \"Unused screenshots\" warning will pops up.\r\n5. Open screenshots list and remove every screenshot marked by red exclamation sign.\r\n6. Wait again, and...\r\n7. It still appears. I checked trice the whole list of screenshots on all 7 pages (on my end), I swear I removed every unused screenshot, and warning still wasn't gone.\n\n### Expected behavior\n\nThe warning gets disappear after all unused screenshots got been removed or assigned to anything also.\n\n### Screenshots\n\n\r\n\n\n### Exception traceback\n\n_No response_\n\n### How do you run Weblate?\n\nweblate.org service\n\n### Weblate versions\n\n_No response_\n\n### Weblate deploy checks\n\n_No response_\n\n### Additional context\n\nThe project and its component where the problem ocurred: https://hosted.weblate.org/projects/thextech/engine-general/\n", "before_files": [{"content": "# Copyright \u00a9 Michal \u010ciha\u0159 <[email protected]>\n#\n# SPDX-License-Identifier: GPL-3.0-or-later\n\nfrom __future__ import annotations\n\nimport fnmatch\nimport os\nfrom typing import Any, BinaryIO\n\nfrom django.conf import settings\nfrom django.core.exceptions import ValidationError\nfrom django.core.files import File\nfrom django.core.files.storage import default_storage\nfrom django.db import models\nfrom django.db.models import Q\nfrom django.db.models.signals import m2m_changed\nfrom django.dispatch import receiver\nfrom django.urls import reverse\nfrom django.utils.translation import gettext_lazy\n\nfrom weblate.auth.models import get_anonymous\nfrom weblate.checks.flags import Flags\nfrom weblate.screenshots.fields import ScreenshotField\nfrom weblate.trans.mixins import UserDisplayMixin\nfrom weblate.trans.models import Translation, Unit\nfrom weblate.trans.signals import vcs_post_update\nfrom weblate.trans.tasks import component_alerts\nfrom weblate.utils.decorators import disable_for_loaddata\nfrom weblate.utils.errors import report_error\nfrom weblate.utils.validators import validate_bitmap\n\n\nclass ScreenshotQuerySet(models.QuerySet):\n def order(self):\n return self.order_by(\"name\")\n\n def filter_access(self, user):\n result = self\n if user.needs_project_filter:\n result = result.filter(\n translation__component__project__in=user.allowed_projects\n )\n if user.needs_component_restrictions_filter:\n result = result.filter(\n Q(translation__component__restricted=False)\n | Q(translation__component_id__in=user.component_permissions)\n )\n return result\n\n\nclass Screenshot(models.Model, UserDisplayMixin):\n name = models.CharField(\n verbose_name=gettext_lazy(\"Screenshot name\"), max_length=200\n )\n repository_filename = models.CharField(\n verbose_name=gettext_lazy(\"Repository path to screenshot\"),\n help_text=gettext_lazy(\"Scan for screenshot file change on repository update.\"),\n blank=True,\n max_length=200,\n )\n image = ScreenshotField(\n verbose_name=gettext_lazy(\"Image\"),\n help_text=gettext_lazy(\"Upload image up to 2000x2000 pixels.\"),\n upload_to=\"screenshots/\",\n )\n translation = models.ForeignKey(Translation, on_delete=models.deletion.CASCADE)\n units = models.ManyToManyField(Unit, blank=True, related_name=\"screenshots\")\n timestamp = models.DateTimeField(auto_now_add=True)\n user = models.ForeignKey(\n settings.AUTH_USER_MODEL,\n null=True,\n blank=True,\n on_delete=models.deletion.SET_NULL,\n )\n\n objects = ScreenshotQuerySet.as_manager()\n\n class Meta:\n verbose_name = \"Screenshot\"\n verbose_name_plural = \"Screenshots\"\n\n def __str__(self):\n return self.name\n\n def get_absolute_url(self):\n return reverse(\"screenshot\", kwargs={\"pk\": self.pk})\n\n def __init__(self, *args, **kwargs):\n \"\"\"Constructor to initialize some cache properties.\"\"\"\n super().__init__(*args, **kwargs)\n # Project backup integration\n self.import_data: dict[str, Any] = {}\n self.import_handle: BinaryIO | None = None\n\n @property\n def filter_name(self):\n return f\"screenshot:{Flags.format_value(self.name)}\"\n\n\n@receiver(m2m_changed, sender=Screenshot.units.through)\n@disable_for_loaddata\ndef change_screenshot_assignment(sender, instance, action, **kwargs):\n # Update alerts in case there is change in string assignment\n if instance.translation.component.alert_set.filter(\n name=\"UnusedScreenshot\"\n ).exists():\n component_alerts.delay([instance.pk])\n\n\ndef validate_screenshot_image(component, filename):\n \"\"\"Returns True if image is validated.\"\"\"\n try:\n full_name = os.path.join(component.full_path, filename)\n with open(full_name, \"rb\") as f:\n image_file = File(f, name=os.path.basename(filename))\n validate_bitmap(image_file)\n except ValidationError as error:\n component.log_error(\"failed to validate screenshot %s: %s\", filename, error)\n report_error(cause=\"Could not validate image from repository\")\n return False\n return True\n\n\n@receiver(vcs_post_update)\ndef sync_screenshots_from_repo(sender, component, previous_head: str, **kwargs):\n repository = component.repository\n changed_files = repository.get_changed_files(compare_to=previous_head)\n\n screenshots = Screenshot.objects.filter(\n translation__component=component, repository_filename__in=changed_files\n )\n\n # Update existing screenshots\n for screenshot in screenshots:\n filename = screenshot.repository_filename\n component.log_debug(\"detected screenshot change in repository: %s\", filename)\n changed_files.remove(filename)\n\n if validate_screenshot_image(component, filename):\n full_name = os.path.join(component.full_path, filename)\n with open(full_name, \"rb\") as f:\n screenshot.image = File(\n f,\n name=default_storage.get_available_name(os.path.basename(filename)),\n )\n screenshot.save(update_fields=[\"image\"])\n component.log_info(\"updated screenshot from repository: %s\", filename)\n\n # Add new screenshots matching screenshot filemask\n for filename in changed_files:\n if fnmatch.fnmatch(\n filename, component.screenshot_filemask\n ) and validate_screenshot_image(component, filename):\n full_name = os.path.join(component.full_path, filename)\n with open(full_name, \"rb\") as f:\n screenshot = Screenshot.objects.create(\n name=filename,\n repository_filename=filename,\n image=File(\n f,\n name=default_storage.get_available_name(\n os.path.basename(filename)\n ),\n ),\n translation=component.source_translation,\n user=get_anonymous(),\n )\n screenshot.save()\n component.log_info(\"create screenshot from repository: %s\", filename)\n", "path": "weblate/screenshots/models.py"}], "after_files": [{"content": "# Copyright \u00a9 Michal \u010ciha\u0159 <[email protected]>\n#\n# SPDX-License-Identifier: GPL-3.0-or-later\n\nfrom __future__ import annotations\n\nimport fnmatch\nimport os\nfrom typing import Any, BinaryIO\n\nfrom django.conf import settings\nfrom django.core.exceptions import ValidationError\nfrom django.core.files import File\nfrom django.core.files.storage import default_storage\nfrom django.db import models\nfrom django.db.models import Q\nfrom django.db.models.signals import m2m_changed, post_delete\nfrom django.dispatch import receiver\nfrom django.urls import reverse\nfrom django.utils.translation import gettext_lazy\n\nfrom weblate.auth.models import get_anonymous\nfrom weblate.checks.flags import Flags\nfrom weblate.screenshots.fields import ScreenshotField\nfrom weblate.trans.mixins import UserDisplayMixin\nfrom weblate.trans.models import Translation, Unit\nfrom weblate.trans.models.alert import update_alerts\nfrom weblate.trans.signals import vcs_post_update\nfrom weblate.utils.decorators import disable_for_loaddata\nfrom weblate.utils.errors import report_error\nfrom weblate.utils.validators import validate_bitmap\n\n\nclass ScreenshotQuerySet(models.QuerySet):\n def order(self):\n return self.order_by(\"name\")\n\n def filter_access(self, user):\n result = self\n if user.needs_project_filter:\n result = result.filter(\n translation__component__project__in=user.allowed_projects\n )\n if user.needs_component_restrictions_filter:\n result = result.filter(\n Q(translation__component__restricted=False)\n | Q(translation__component_id__in=user.component_permissions)\n )\n return result\n\n\nclass Screenshot(models.Model, UserDisplayMixin):\n name = models.CharField(\n verbose_name=gettext_lazy(\"Screenshot name\"), max_length=200\n )\n repository_filename = models.CharField(\n verbose_name=gettext_lazy(\"Repository path to screenshot\"),\n help_text=gettext_lazy(\"Scan for screenshot file change on repository update.\"),\n blank=True,\n max_length=200,\n )\n image = ScreenshotField(\n verbose_name=gettext_lazy(\"Image\"),\n help_text=gettext_lazy(\"Upload image up to 2000x2000 pixels.\"),\n upload_to=\"screenshots/\",\n )\n translation = models.ForeignKey(Translation, on_delete=models.deletion.CASCADE)\n units = models.ManyToManyField(Unit, blank=True, related_name=\"screenshots\")\n timestamp = models.DateTimeField(auto_now_add=True)\n user = models.ForeignKey(\n settings.AUTH_USER_MODEL,\n null=True,\n blank=True,\n on_delete=models.deletion.SET_NULL,\n )\n\n objects = ScreenshotQuerySet.as_manager()\n\n class Meta:\n verbose_name = \"Screenshot\"\n verbose_name_plural = \"Screenshots\"\n\n def __str__(self):\n return self.name\n\n def get_absolute_url(self):\n return reverse(\"screenshot\", kwargs={\"pk\": self.pk})\n\n def __init__(self, *args, **kwargs):\n \"\"\"Constructor to initialize some cache properties.\"\"\"\n super().__init__(*args, **kwargs)\n # Project backup integration\n self.import_data: dict[str, Any] = {}\n self.import_handle: BinaryIO | None = None\n\n @property\n def filter_name(self):\n return f\"screenshot:{Flags.format_value(self.name)}\"\n\n\n@receiver(m2m_changed, sender=Screenshot.units.through)\n@disable_for_loaddata\ndef change_screenshot_assignment(sender, instance, action, **kwargs):\n # Update alerts in case there is change in string assignment\n if instance.translation.component.alert_set.filter(\n name=\"UnusedScreenshot\"\n ).exists():\n update_alerts(instance.translation.component, alerts={\"UnusedScreenshot\"})\n\n\n@receiver(post_delete, sender=Screenshot)\ndef update_alerts_on_screenshot_delete(sender, instance, **kwargs):\n # Update the unused screenshot alert if screenshot is deleted\n if instance.translation.component.alert_set.filter(\n name=\"UnusedScreenshot\"\n ).exists():\n update_alerts(instance.translation.component, alerts={\"UnusedScreenshot\"})\n\n\ndef validate_screenshot_image(component, filename):\n \"\"\"Returns True if image is validated.\"\"\"\n try:\n full_name = os.path.join(component.full_path, filename)\n with open(full_name, \"rb\") as f:\n image_file = File(f, name=os.path.basename(filename))\n validate_bitmap(image_file)\n except ValidationError as error:\n component.log_error(\"failed to validate screenshot %s: %s\", filename, error)\n report_error(cause=\"Could not validate image from repository\")\n return False\n return True\n\n\n@receiver(vcs_post_update)\ndef sync_screenshots_from_repo(sender, component, previous_head: str, **kwargs):\n repository = component.repository\n changed_files = repository.get_changed_files(compare_to=previous_head)\n\n screenshots = Screenshot.objects.filter(\n translation__component=component, repository_filename__in=changed_files\n )\n\n # Update existing screenshots\n for screenshot in screenshots:\n filename = screenshot.repository_filename\n component.log_debug(\"detected screenshot change in repository: %s\", filename)\n changed_files.remove(filename)\n\n if validate_screenshot_image(component, filename):\n full_name = os.path.join(component.full_path, filename)\n with open(full_name, \"rb\") as f:\n screenshot.image = File(\n f,\n name=default_storage.get_available_name(os.path.basename(filename)),\n )\n screenshot.save(update_fields=[\"image\"])\n component.log_info(\"updated screenshot from repository: %s\", filename)\n\n # Add new screenshots matching screenshot filemask\n for filename in changed_files:\n if fnmatch.fnmatch(\n filename, component.screenshot_filemask\n ) and validate_screenshot_image(component, filename):\n full_name = os.path.join(component.full_path, filename)\n with open(full_name, \"rb\") as f:\n screenshot = Screenshot.objects.create(\n name=filename,\n repository_filename=filename,\n image=File(\n f,\n name=default_storage.get_available_name(\n os.path.basename(filename)\n ),\n ),\n translation=component.source_translation,\n user=get_anonymous(),\n )\n screenshot.save()\n component.log_info(\"create screenshot from repository: %s\", filename)\n", "path": "weblate/screenshots/models.py"}]} | 2,400 | 381 |
gh_patches_debug_29411 | rasdani/github-patches | git_diff | cloudtools__troposphere-836 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add ResourceLifecycleConfig to AWS::ElasticBeanstalk::Application
[AWS::ElasticBeanstalk::Application](http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html)
Use the ResourceLifecycleConfig property to define lifecycle settings for resources that belong to the application, and the service role that Elastic Beanstalk assumes in order to apply lifecycle settings.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `troposphere/elasticbeanstalk.py`
Content:
```
1 # Copyright (c) 2013, Mark Peek <[email protected]>
2 # All rights reserved.
3 #
4 # See LICENSE file for full license.
5
6 from . import AWSObject, AWSProperty, Tags
7
8
9 WebServer = "WebServer"
10 Worker = "Worker"
11 WebServerType = "Standard"
12 WorkerType = "SQS/HTTP"
13
14
15 class SourceBundle(AWSProperty):
16 props = {
17 'S3Bucket': (basestring, True),
18 'S3Key': (basestring, True),
19 }
20
21
22 class SourceConfiguration(AWSProperty):
23 props = {
24 'ApplicationName': (basestring, True),
25 'TemplateName': (basestring, True),
26 }
27
28
29 class OptionSettings(AWSProperty):
30 props = {
31 'Namespace': (basestring, True),
32 'OptionName': (basestring, True),
33 'Value': (basestring, True),
34 }
35
36
37 class Application(AWSObject):
38 resource_type = "AWS::ElasticBeanstalk::Application"
39
40 props = {
41 'ApplicationName': (basestring, False),
42 'Description': (basestring, False),
43 }
44
45
46 class ApplicationVersion(AWSObject):
47 resource_type = "AWS::ElasticBeanstalk::ApplicationVersion"
48
49 props = {
50 'ApplicationName': (basestring, True),
51 'Description': (basestring, False),
52 'SourceBundle': (SourceBundle, False),
53 }
54
55
56 class ConfigurationTemplate(AWSObject):
57 resource_type = "AWS::ElasticBeanstalk::ConfigurationTemplate"
58
59 props = {
60 'ApplicationName': (basestring, True),
61 'Description': (basestring, False),
62 'EnvironmentId': (basestring, False),
63 'OptionSettings': ([OptionSettings], False),
64 'SolutionStackName': (basestring, False),
65 'SourceConfiguration': (SourceConfiguration, False),
66 }
67
68
69 def validate_tier_name(name):
70 valid_names = [WebServer, Worker]
71 if name not in valid_names:
72 raise ValueError('Tier name needs to be one of %r' % valid_names)
73 return name
74
75
76 def validate_tier_type(tier_type):
77 valid_types = [WebServerType, WorkerType]
78 if tier_type not in valid_types:
79 raise ValueError('Tier type needs to be one of %r' % valid_types)
80 return tier_type
81
82
83 class Tier(AWSProperty):
84 props = {
85 'Name': (validate_tier_name, False),
86 'Type': (validate_tier_type, False),
87 'Version': (basestring, False),
88 }
89
90
91 class Environment(AWSObject):
92 resource_type = "AWS::ElasticBeanstalk::Environment"
93
94 props = {
95 'ApplicationName': (basestring, True),
96 'CNAMEPrefix': (basestring, False),
97 'Description': (basestring, False),
98 'EnvironmentName': (basestring, False),
99 'OptionSettings': ([OptionSettings], False),
100 'SolutionStackName': (basestring, False),
101 'Tags': (Tags, False),
102 'TemplateName': (basestring, False),
103 'Tier': (Tier, False),
104 'VersionLabel': (basestring, False),
105 }
106
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/troposphere/elasticbeanstalk.py b/troposphere/elasticbeanstalk.py
--- a/troposphere/elasticbeanstalk.py
+++ b/troposphere/elasticbeanstalk.py
@@ -4,7 +4,7 @@
# See LICENSE file for full license.
from . import AWSObject, AWSProperty, Tags
-
+from .validators import boolean, integer
WebServer = "WebServer"
Worker = "Worker"
@@ -12,6 +12,29 @@
WorkerType = "SQS/HTTP"
+class MaxAgeRule(AWSProperty):
+ props = {
+ 'DeleteSourceFromS3': (boolean, False),
+ 'Enabled': (boolean, False),
+ 'MaxAgeInDays': (integer, False),
+ }
+
+
+class MaxCountRule(AWSProperty):
+ props = {
+ 'DeleteSourceFromS3': (boolean, False),
+ 'Enabled': (boolean, False),
+ 'MaxCount': (integer, False),
+ }
+
+
+class ApplicationVersionLifecycleConfig(AWSProperty):
+ props = {
+ 'MaxAgeRule': (MaxAgeRule, False),
+ 'MaxCountRule': (MaxCountRule, False),
+ }
+
+
class SourceBundle(AWSProperty):
props = {
'S3Bucket': (basestring, True),
@@ -26,6 +49,13 @@
}
+class ApplicationResourceLifecycleConfig(AWSProperty):
+ props = {
+ 'ServiceRole': (basestring, False),
+ 'VersionLifecycleConfig': (ApplicationVersionLifecycleConfig, False),
+ }
+
+
class OptionSettings(AWSProperty):
props = {
'Namespace': (basestring, True),
@@ -40,6 +70,7 @@
props = {
'ApplicationName': (basestring, False),
'Description': (basestring, False),
+ 'ResourceLifecycleConfig': (ApplicationResourceLifecycleConfig, False),
}
| {"golden_diff": "diff --git a/troposphere/elasticbeanstalk.py b/troposphere/elasticbeanstalk.py\n--- a/troposphere/elasticbeanstalk.py\n+++ b/troposphere/elasticbeanstalk.py\n@@ -4,7 +4,7 @@\n # See LICENSE file for full license.\n \n from . import AWSObject, AWSProperty, Tags\n-\n+from .validators import boolean, integer\n \n WebServer = \"WebServer\"\n Worker = \"Worker\"\n@@ -12,6 +12,29 @@\n WorkerType = \"SQS/HTTP\"\n \n \n+class MaxAgeRule(AWSProperty):\n+ props = {\n+ 'DeleteSourceFromS3': (boolean, False),\n+ 'Enabled': (boolean, False),\n+ 'MaxAgeInDays': (integer, False),\n+ }\n+\n+\n+class MaxCountRule(AWSProperty):\n+ props = {\n+ 'DeleteSourceFromS3': (boolean, False),\n+ 'Enabled': (boolean, False),\n+ 'MaxCount': (integer, False),\n+ }\n+\n+\n+class ApplicationVersionLifecycleConfig(AWSProperty):\n+ props = {\n+ 'MaxAgeRule': (MaxAgeRule, False),\n+ 'MaxCountRule': (MaxCountRule, False),\n+ }\n+\n+\n class SourceBundle(AWSProperty):\n props = {\n 'S3Bucket': (basestring, True),\n@@ -26,6 +49,13 @@\n }\n \n \n+class ApplicationResourceLifecycleConfig(AWSProperty):\n+ props = {\n+ 'ServiceRole': (basestring, False),\n+ 'VersionLifecycleConfig': (ApplicationVersionLifecycleConfig, False),\n+ }\n+\n+\n class OptionSettings(AWSProperty):\n props = {\n 'Namespace': (basestring, True),\n@@ -40,6 +70,7 @@\n props = {\n 'ApplicationName': (basestring, False),\n 'Description': (basestring, False),\n+ 'ResourceLifecycleConfig': (ApplicationResourceLifecycleConfig, False),\n }\n", "issue": "Add ResourceLifecycleConfig to AWS::ElasticBeanstalk::Application\n[AWS::ElasticBeanstalk::Application](http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html)\r\nUse the ResourceLifecycleConfig property to define lifecycle settings for resources that belong to the application, and the service role that Elastic Beanstalk assumes in order to apply lifecycle settings.\n", "before_files": [{"content": "# Copyright (c) 2013, Mark Peek <[email protected]>\n# All rights reserved.\n#\n# See LICENSE file for full license.\n\nfrom . import AWSObject, AWSProperty, Tags\n\n\nWebServer = \"WebServer\"\nWorker = \"Worker\"\nWebServerType = \"Standard\"\nWorkerType = \"SQS/HTTP\"\n\n\nclass SourceBundle(AWSProperty):\n props = {\n 'S3Bucket': (basestring, True),\n 'S3Key': (basestring, True),\n }\n\n\nclass SourceConfiguration(AWSProperty):\n props = {\n 'ApplicationName': (basestring, True),\n 'TemplateName': (basestring, True),\n }\n\n\nclass OptionSettings(AWSProperty):\n props = {\n 'Namespace': (basestring, True),\n 'OptionName': (basestring, True),\n 'Value': (basestring, True),\n }\n\n\nclass Application(AWSObject):\n resource_type = \"AWS::ElasticBeanstalk::Application\"\n\n props = {\n 'ApplicationName': (basestring, False),\n 'Description': (basestring, False),\n }\n\n\nclass ApplicationVersion(AWSObject):\n resource_type = \"AWS::ElasticBeanstalk::ApplicationVersion\"\n\n props = {\n 'ApplicationName': (basestring, True),\n 'Description': (basestring, False),\n 'SourceBundle': (SourceBundle, False),\n }\n\n\nclass ConfigurationTemplate(AWSObject):\n resource_type = \"AWS::ElasticBeanstalk::ConfigurationTemplate\"\n\n props = {\n 'ApplicationName': (basestring, True),\n 'Description': (basestring, False),\n 'EnvironmentId': (basestring, False),\n 'OptionSettings': ([OptionSettings], False),\n 'SolutionStackName': (basestring, False),\n 'SourceConfiguration': (SourceConfiguration, False),\n }\n\n\ndef validate_tier_name(name):\n valid_names = [WebServer, Worker]\n if name not in valid_names:\n raise ValueError('Tier name needs to be one of %r' % valid_names)\n return name\n\n\ndef validate_tier_type(tier_type):\n valid_types = [WebServerType, WorkerType]\n if tier_type not in valid_types:\n raise ValueError('Tier type needs to be one of %r' % valid_types)\n return tier_type\n\n\nclass Tier(AWSProperty):\n props = {\n 'Name': (validate_tier_name, False),\n 'Type': (validate_tier_type, False),\n 'Version': (basestring, False),\n }\n\n\nclass Environment(AWSObject):\n resource_type = \"AWS::ElasticBeanstalk::Environment\"\n\n props = {\n 'ApplicationName': (basestring, True),\n 'CNAMEPrefix': (basestring, False),\n 'Description': (basestring, False),\n 'EnvironmentName': (basestring, False),\n 'OptionSettings': ([OptionSettings], False),\n 'SolutionStackName': (basestring, False),\n 'Tags': (Tags, False),\n 'TemplateName': (basestring, False),\n 'Tier': (Tier, False),\n 'VersionLabel': (basestring, False),\n }\n", "path": "troposphere/elasticbeanstalk.py"}], "after_files": [{"content": "# Copyright (c) 2013, Mark Peek <[email protected]>\n# All rights reserved.\n#\n# See LICENSE file for full license.\n\nfrom . import AWSObject, AWSProperty, Tags\nfrom .validators import boolean, integer\n\nWebServer = \"WebServer\"\nWorker = \"Worker\"\nWebServerType = \"Standard\"\nWorkerType = \"SQS/HTTP\"\n\n\nclass MaxAgeRule(AWSProperty):\n props = {\n 'DeleteSourceFromS3': (boolean, False),\n 'Enabled': (boolean, False),\n 'MaxAgeInDays': (integer, False),\n }\n\n\nclass MaxCountRule(AWSProperty):\n props = {\n 'DeleteSourceFromS3': (boolean, False),\n 'Enabled': (boolean, False),\n 'MaxCount': (integer, False),\n }\n\n\nclass ApplicationVersionLifecycleConfig(AWSProperty):\n props = {\n 'MaxAgeRule': (MaxAgeRule, False),\n 'MaxCountRule': (MaxCountRule, False),\n }\n\n\nclass SourceBundle(AWSProperty):\n props = {\n 'S3Bucket': (basestring, True),\n 'S3Key': (basestring, True),\n }\n\n\nclass SourceConfiguration(AWSProperty):\n props = {\n 'ApplicationName': (basestring, True),\n 'TemplateName': (basestring, True),\n }\n\n\nclass ApplicationResourceLifecycleConfig(AWSProperty):\n props = {\n 'ServiceRole': (basestring, False),\n 'VersionLifecycleConfig': (ApplicationVersionLifecycleConfig, False),\n }\n\n\nclass OptionSettings(AWSProperty):\n props = {\n 'Namespace': (basestring, True),\n 'OptionName': (basestring, True),\n 'Value': (basestring, True),\n }\n\n\nclass Application(AWSObject):\n resource_type = \"AWS::ElasticBeanstalk::Application\"\n\n props = {\n 'ApplicationName': (basestring, False),\n 'Description': (basestring, False),\n 'ResourceLifecycleConfig': (ApplicationResourceLifecycleConfig, False),\n }\n\n\nclass ApplicationVersion(AWSObject):\n resource_type = \"AWS::ElasticBeanstalk::ApplicationVersion\"\n\n props = {\n 'ApplicationName': (basestring, True),\n 'Description': (basestring, False),\n 'SourceBundle': (SourceBundle, False),\n }\n\n\nclass ConfigurationTemplate(AWSObject):\n resource_type = \"AWS::ElasticBeanstalk::ConfigurationTemplate\"\n\n props = {\n 'ApplicationName': (basestring, True),\n 'Description': (basestring, False),\n 'EnvironmentId': (basestring, False),\n 'OptionSettings': ([OptionSettings], False),\n 'SolutionStackName': (basestring, False),\n 'SourceConfiguration': (SourceConfiguration, False),\n }\n\n\ndef validate_tier_name(name):\n valid_names = [WebServer, Worker]\n if name not in valid_names:\n raise ValueError('Tier name needs to be one of %r' % valid_names)\n return name\n\n\ndef validate_tier_type(tier_type):\n valid_types = [WebServerType, WorkerType]\n if tier_type not in valid_types:\n raise ValueError('Tier type needs to be one of %r' % valid_types)\n return tier_type\n\n\nclass Tier(AWSProperty):\n props = {\n 'Name': (validate_tier_name, False),\n 'Type': (validate_tier_type, False),\n 'Version': (basestring, False),\n }\n\n\nclass Environment(AWSObject):\n resource_type = \"AWS::ElasticBeanstalk::Environment\"\n\n props = {\n 'ApplicationName': (basestring, True),\n 'CNAMEPrefix': (basestring, False),\n 'Description': (basestring, False),\n 'EnvironmentName': (basestring, False),\n 'OptionSettings': ([OptionSettings], False),\n 'SolutionStackName': (basestring, False),\n 'Tags': (Tags, False),\n 'TemplateName': (basestring, False),\n 'Tier': (Tier, False),\n 'VersionLabel': (basestring, False),\n }\n", "path": "troposphere/elasticbeanstalk.py"}]} | 1,243 | 438 |
gh_patches_debug_30488 | rasdani/github-patches | git_diff | kivy__kivy-3652 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Python 3.4 packager KeyError: 'rthooks'
As described here:
http://stackoverflow.com/questions/31083061/kivy-1-9-0-windows-package-keyerror-rthooks/32366409#32366409
I've partially troubleshooted this:
---
The [kivy docs](http://kivy.org/docs/guide/packaging-windows.html) you reference mention adding the following three lines to the top of the `.spec` file:
```
from kivy.tools.packaging.pyinstaller_hooks import install_hooks
import os
install_hooks(globals())
```
The error is happening in `install_hooks(globals())`, which is defined at `\Lib\site-packages\kivy\tools\packaging\pyinstaller_hooks\__init__.py`:
```
from os.path import dirname, join
from functools import partial
curdir = dirname(__file__)
def install_hooks(sym, hookspath=None):
_hookspath = [curdir]
if hookspath is not None:
_hookspath += hookspath
sym['rthooks']['kivy'] = [join(curdir, 'rt-hook-kivy.py')]
sym['Analysis'] = partial(sym['Analysis'], hookspath=_hookspath)
```
But the second last line is causing the message: `WARNING: stderr: KeyError: 'rthooks'`.
So it looks like it's expecting a variable `rthooks` to be in the global namespace, but it's not.
I'm not sure what to do next.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kivy/tools/packaging/pyinstaller_hooks/__init__.py`
Content:
```
1 from os.path import dirname, join
2 from functools import partial
3
4 curdir = dirname(__file__)
5
6
7 def install_hooks(sym, hookspath=None):
8
9 _hookspath = [curdir]
10 if hookspath is not None:
11 _hookspath += hookspath
12
13 sym['rthooks']['kivy'] = [join(curdir, 'rt-hook-kivy.py')]
14 sym['Analysis'] = partial(sym['Analysis'], hookspath=_hookspath)
15
```
Path: `kivy/tools/packaging/pyinstaller_hooks/hook-kivy.py`
Content:
```
1 '''
2 Kivy hook for PyInstaller
3 =========================
4
5 Kivy load itself in a complete dynamic way. PyImported don't see most of the
6 import cause of the Factory and Core.
7 In addition, the data and missing module are not copied automatically.
8
9 With this hook, everything needed for running kivy is correctly copied.
10
11 Check kivy documentation about how to use these hook for packaging application.
12 '''
13
14 import kivy
15 from kivy.factory import Factory
16
17
18 def get_modules():
19 return [x.get('module', None) for x in Factory.classes.values()]
20
21
22 datas = [
23 (kivy.kivy_data_dir, 'kivy_install'),
24 (kivy.kivy_modules_dir, 'kivy_install'),
25 (kivy.kivy_exts_dir, 'kivy_install'),
26 ]
27
28 # extensions
29 _kivy_modules = [
30
31 # sdl2
32
33 # uncomment this if you need to package pygame.
34 # pygame
35 #'pygame.event',
36 #'pygame.video',
37 #'pygame.image',
38 #'pygame.display',
39 #'pygame',
40 'xml.etree.cElementTree',
41
42 # external modules
43 'kivy.cache',
44 'kivy.atlas',
45 'kivy.network',
46 'kivy.network.urlrequest',
47 'kivy.lib.osc',
48 'kivy.lib.osc.OSC',
49 'kivy.lib.osc.oscAPI',
50 'kivy.lib.mtdev',
51 'kivy.lib.sdl2',
52 'kivy.factory_registers',
53 'kivy.input.recorder',
54 'kivy.input.providers',
55 'kivy.input.providers.tuio',
56 'kivy.input.providers.mouse',
57 'kivy.input.providers.wm_common',
58 'kivy.input.providers.wm_touch',
59 'kivy.input.providers.wm_pen',
60 'kivy.input.providers.hidinput',
61 'kivy.input.providers.linuxwacom',
62 'kivy.input.providers.mactouch',
63 'kivy.input.providers.mouse',
64 'kivy.input.providers.mtdev',
65
66 # compiled modules
67 'kivy.event',
68 'kivy.graphics.buffer',
69 'kivy.graphics.c_opengl_debug',
70 'kivy.graphics.compiler',
71 'kivy.graphics.context_instructions',
72 'kivy.graphics.fbo',
73 'kivy.graphics.instructions',
74 'kivy.graphics.opengl',
75 'kivy.graphics.opengl_utils',
76 'kivy.graphics.shader',
77 'kivy.graphics.stenctil_instructions',
78 'kivy.graphics.texture',
79 'kivy.graphics.transformation',
80 'kivy.graphics.vbo',
81 'kivy.graphics.vertex',
82 'kivy.graphics.vertex_instructions',
83 'kivy.graphics.tesselator',
84 'kivy.properties',
85
86 # core
87 'kivy.core.audio.audio_gstplayer',
88 'kivy.core.audio.audio_pygst',
89 'kivy.core.audio.audio_sdl',
90 'kivy.core.audio.audio_pygame',
91 'kivy.core.camera.camera_avfoundation',
92 'kivy.core.camera.camera_pygst',
93 'kivy.core.camera.camera_opencv',
94 'kivy.core.camera.camera_videocapture',
95 'kivy.core.clipboard.clipboard_sdl2',
96 'kivy.core.clipboard.clipboard_android',
97 'kivy.core.clipboard.clipboard_pygame',
98 'kivy.core.clipboard.clipboard_dummy',
99 'kivy.core.image.img_imageio',
100 'kivy.core.image.img_tex',
101 'kivy.core.image.img_dds',
102 'kivy.core.image.img_sdl2',
103 'kivy.core.image.img_pygame',
104 'kivy.core.image.img_pil',
105 'kivy.core.image.img_gif',
106 'kivy.core.spelling.spelling_enchant',
107 'kivy.core.spelling.spelling_osxappkit',
108 'kivy.core.text.text_sdl2',
109 'kivy.core.text.text_pygame',
110 'kivy.core.text.text_sdlttf',
111 'kivy.core.text.text_pil',
112 'kivy.core.video.video_gstplayer',
113 'kivy.core.video.video_pygst',
114 'kivy.core.video.video_ffmpeg',
115 'kivy.core.video.video_pyglet',
116 'kivy.core.video.video_null',
117 'kivy.core.window.window_sdl2',
118 'kivy.core.window.window_egl_rpi',
119 'kivy.core.window.window_pygame',
120 'kivy.core.window.window_sdl',
121 'kivy.core.window.window_x11',
122 ]
123
124 hiddenimports = _kivy_modules + get_modules()
125 hiddenimports = list(set(hiddenimports))
126
127
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kivy/tools/packaging/pyinstaller_hooks/__init__.py b/kivy/tools/packaging/pyinstaller_hooks/__init__.py
--- a/kivy/tools/packaging/pyinstaller_hooks/__init__.py
+++ b/kivy/tools/packaging/pyinstaller_hooks/__init__.py
@@ -1,14 +1,15 @@
from os.path import dirname, join
-from functools import partial
curdir = dirname(__file__)
-def install_hooks(sym, hookspath=None):
+def runtime_hooks():
+ return [join(curdir, 'rt-hook-kivy.py')]
- _hookspath = [curdir]
- if hookspath is not None:
- _hookspath += hookspath
- sym['rthooks']['kivy'] = [join(curdir, 'rt-hook-kivy.py')]
- sym['Analysis'] = partial(sym['Analysis'], hookspath=_hookspath)
+def hookspath():
+ return [curdir]
+
+
+def get_hooks():
+ return {'hookspath': hookspath(), 'runtime_hooks': runtime_hooks()}
diff --git a/kivy/tools/packaging/pyinstaller_hooks/hook-kivy.py b/kivy/tools/packaging/pyinstaller_hooks/hook-kivy.py
--- a/kivy/tools/packaging/pyinstaller_hooks/hook-kivy.py
+++ b/kivy/tools/packaging/pyinstaller_hooks/hook-kivy.py
@@ -11,19 +11,37 @@
Check kivy documentation about how to use these hook for packaging application.
'''
+from os.path import join, basename
+from distutils.version import LooseVersion
+import PyInstaller
+
import kivy
from kivy.factory import Factory
+try:
+ pyinst_ver = PyInstaller.get_version() # pyinstaller < 3.0x
+except AttributeError:
+ pyinst_ver = PyInstaller.__version__
+
def get_modules():
return [x.get('module', None) for x in Factory.classes.values()]
-datas = [
- (kivy.kivy_data_dir, 'kivy_install'),
- (kivy.kivy_modules_dir, 'kivy_install'),
- (kivy.kivy_exts_dir, 'kivy_install'),
-]
+if LooseVersion(pyinst_ver) >= LooseVersion('3.0'):
+ # in pyinstaller 3, the directory contents rather than the directory itself
+ # is copied. See https://github.com/pyinstaller/pyinstaller/issues/1513.
+ datas = [
+ (kivy.kivy_data_dir,
+ join('kivy_install', basename(kivy.kivy_data_dir))),
+ (kivy.kivy_modules_dir,
+ join('kivy_install', basename(kivy.kivy_modules_dir))),
+ ]
+else:
+ datas = [
+ (kivy.kivy_data_dir, 'kivy_install'),
+ (kivy.kivy_modules_dir, 'kivy_install'),
+ ]
# extensions
_kivy_modules = [
| {"golden_diff": "diff --git a/kivy/tools/packaging/pyinstaller_hooks/__init__.py b/kivy/tools/packaging/pyinstaller_hooks/__init__.py\n--- a/kivy/tools/packaging/pyinstaller_hooks/__init__.py\n+++ b/kivy/tools/packaging/pyinstaller_hooks/__init__.py\n@@ -1,14 +1,15 @@\n from os.path import dirname, join\n-from functools import partial\n \n curdir = dirname(__file__)\n \n \n-def install_hooks(sym, hookspath=None):\n+def runtime_hooks():\n+ return [join(curdir, 'rt-hook-kivy.py')]\n \n- _hookspath = [curdir]\n- if hookspath is not None:\n- _hookspath += hookspath\n \n- sym['rthooks']['kivy'] = [join(curdir, 'rt-hook-kivy.py')]\n- sym['Analysis'] = partial(sym['Analysis'], hookspath=_hookspath)\n+def hookspath():\n+ return [curdir]\n+\n+\n+def get_hooks():\n+ return {'hookspath': hookspath(), 'runtime_hooks': runtime_hooks()}\ndiff --git a/kivy/tools/packaging/pyinstaller_hooks/hook-kivy.py b/kivy/tools/packaging/pyinstaller_hooks/hook-kivy.py\n--- a/kivy/tools/packaging/pyinstaller_hooks/hook-kivy.py\n+++ b/kivy/tools/packaging/pyinstaller_hooks/hook-kivy.py\n@@ -11,19 +11,37 @@\n Check kivy documentation about how to use these hook for packaging application.\n '''\n \n+from os.path import join, basename\n+from distutils.version import LooseVersion\n+import PyInstaller\n+\n import kivy\n from kivy.factory import Factory\n \n+try:\n+ pyinst_ver = PyInstaller.get_version() # pyinstaller < 3.0x\n+except AttributeError:\n+ pyinst_ver = PyInstaller.__version__\n+\n \n def get_modules():\n return [x.get('module', None) for x in Factory.classes.values()]\n \n \n-datas = [\n- (kivy.kivy_data_dir, 'kivy_install'),\n- (kivy.kivy_modules_dir, 'kivy_install'),\n- (kivy.kivy_exts_dir, 'kivy_install'),\n-]\n+if LooseVersion(pyinst_ver) >= LooseVersion('3.0'):\n+ # in pyinstaller 3, the directory contents rather than the directory itself\n+ # is copied. See https://github.com/pyinstaller/pyinstaller/issues/1513.\n+ datas = [\n+ (kivy.kivy_data_dir,\n+ join('kivy_install', basename(kivy.kivy_data_dir))),\n+ (kivy.kivy_modules_dir,\n+ join('kivy_install', basename(kivy.kivy_modules_dir))),\n+ ]\n+else:\n+ datas = [\n+ (kivy.kivy_data_dir, 'kivy_install'),\n+ (kivy.kivy_modules_dir, 'kivy_install'),\n+ ]\n \n # extensions\n _kivy_modules = [\n", "issue": "Python 3.4 packager KeyError: 'rthooks'\nAs described here:\nhttp://stackoverflow.com/questions/31083061/kivy-1-9-0-windows-package-keyerror-rthooks/32366409#32366409\n\nI've partially troubleshooted this:\n\n---\n\nThe [kivy docs](http://kivy.org/docs/guide/packaging-windows.html) you reference mention adding the following three lines to the top of the `.spec` file:\n\n```\nfrom kivy.tools.packaging.pyinstaller_hooks import install_hooks\nimport os\ninstall_hooks(globals())\n```\n\nThe error is happening in `install_hooks(globals())`, which is defined at `\\Lib\\site-packages\\kivy\\tools\\packaging\\pyinstaller_hooks\\__init__.py`:\n\n```\nfrom os.path import dirname, join\nfrom functools import partial\n\ncurdir = dirname(__file__)\n\ndef install_hooks(sym, hookspath=None):\n\n _hookspath = [curdir]\n if hookspath is not None:\n _hookspath += hookspath\n\n sym['rthooks']['kivy'] = [join(curdir, 'rt-hook-kivy.py')]\n sym['Analysis'] = partial(sym['Analysis'], hookspath=_hookspath)\n```\n\nBut the second last line is causing the message: `WARNING: stderr: KeyError: 'rthooks'`.\n\nSo it looks like it's expecting a variable `rthooks` to be in the global namespace, but it's not.\n\nI'm not sure what to do next.\n\n", "before_files": [{"content": "from os.path import dirname, join\nfrom functools import partial\n\ncurdir = dirname(__file__)\n\n\ndef install_hooks(sym, hookspath=None):\n\n _hookspath = [curdir]\n if hookspath is not None:\n _hookspath += hookspath\n\n sym['rthooks']['kivy'] = [join(curdir, 'rt-hook-kivy.py')]\n sym['Analysis'] = partial(sym['Analysis'], hookspath=_hookspath)\n", "path": "kivy/tools/packaging/pyinstaller_hooks/__init__.py"}, {"content": "'''\nKivy hook for PyInstaller\n=========================\n\nKivy load itself in a complete dynamic way. PyImported don't see most of the\nimport cause of the Factory and Core.\nIn addition, the data and missing module are not copied automatically.\n\nWith this hook, everything needed for running kivy is correctly copied.\n\nCheck kivy documentation about how to use these hook for packaging application.\n'''\n\nimport kivy\nfrom kivy.factory import Factory\n\n\ndef get_modules():\n return [x.get('module', None) for x in Factory.classes.values()]\n\n\ndatas = [\n (kivy.kivy_data_dir, 'kivy_install'),\n (kivy.kivy_modules_dir, 'kivy_install'),\n (kivy.kivy_exts_dir, 'kivy_install'),\n]\n\n# extensions\n_kivy_modules = [\n\n # sdl2\n\n # uncomment this if you need to package pygame.\n # pygame\n #'pygame.event',\n #'pygame.video',\n #'pygame.image',\n #'pygame.display',\n #'pygame',\n 'xml.etree.cElementTree',\n\n # external modules\n 'kivy.cache',\n 'kivy.atlas',\n 'kivy.network',\n 'kivy.network.urlrequest',\n 'kivy.lib.osc',\n 'kivy.lib.osc.OSC',\n 'kivy.lib.osc.oscAPI',\n 'kivy.lib.mtdev',\n 'kivy.lib.sdl2',\n 'kivy.factory_registers',\n 'kivy.input.recorder',\n 'kivy.input.providers',\n 'kivy.input.providers.tuio',\n 'kivy.input.providers.mouse',\n 'kivy.input.providers.wm_common',\n 'kivy.input.providers.wm_touch',\n 'kivy.input.providers.wm_pen',\n 'kivy.input.providers.hidinput',\n 'kivy.input.providers.linuxwacom',\n 'kivy.input.providers.mactouch',\n 'kivy.input.providers.mouse',\n 'kivy.input.providers.mtdev',\n\n # compiled modules\n 'kivy.event',\n 'kivy.graphics.buffer',\n 'kivy.graphics.c_opengl_debug',\n 'kivy.graphics.compiler',\n 'kivy.graphics.context_instructions',\n 'kivy.graphics.fbo',\n 'kivy.graphics.instructions',\n 'kivy.graphics.opengl',\n 'kivy.graphics.opengl_utils',\n 'kivy.graphics.shader',\n 'kivy.graphics.stenctil_instructions',\n 'kivy.graphics.texture',\n 'kivy.graphics.transformation',\n 'kivy.graphics.vbo',\n 'kivy.graphics.vertex',\n 'kivy.graphics.vertex_instructions',\n 'kivy.graphics.tesselator',\n 'kivy.properties',\n\n # core\n 'kivy.core.audio.audio_gstplayer',\n 'kivy.core.audio.audio_pygst',\n 'kivy.core.audio.audio_sdl',\n 'kivy.core.audio.audio_pygame',\n 'kivy.core.camera.camera_avfoundation',\n 'kivy.core.camera.camera_pygst',\n 'kivy.core.camera.camera_opencv',\n 'kivy.core.camera.camera_videocapture',\n 'kivy.core.clipboard.clipboard_sdl2',\n 'kivy.core.clipboard.clipboard_android',\n 'kivy.core.clipboard.clipboard_pygame',\n 'kivy.core.clipboard.clipboard_dummy',\n 'kivy.core.image.img_imageio',\n 'kivy.core.image.img_tex',\n 'kivy.core.image.img_dds',\n 'kivy.core.image.img_sdl2',\n 'kivy.core.image.img_pygame',\n 'kivy.core.image.img_pil',\n 'kivy.core.image.img_gif',\n 'kivy.core.spelling.spelling_enchant',\n 'kivy.core.spelling.spelling_osxappkit',\n 'kivy.core.text.text_sdl2',\n 'kivy.core.text.text_pygame',\n 'kivy.core.text.text_sdlttf',\n 'kivy.core.text.text_pil',\n 'kivy.core.video.video_gstplayer',\n 'kivy.core.video.video_pygst',\n 'kivy.core.video.video_ffmpeg',\n 'kivy.core.video.video_pyglet',\n 'kivy.core.video.video_null',\n 'kivy.core.window.window_sdl2',\n 'kivy.core.window.window_egl_rpi',\n 'kivy.core.window.window_pygame',\n 'kivy.core.window.window_sdl',\n 'kivy.core.window.window_x11',\n]\n\nhiddenimports = _kivy_modules + get_modules()\nhiddenimports = list(set(hiddenimports))\n\n", "path": "kivy/tools/packaging/pyinstaller_hooks/hook-kivy.py"}], "after_files": [{"content": "from os.path import dirname, join\n\ncurdir = dirname(__file__)\n\n\ndef runtime_hooks():\n return [join(curdir, 'rt-hook-kivy.py')]\n\n\ndef hookspath():\n return [curdir]\n\n\ndef get_hooks():\n return {'hookspath': hookspath(), 'runtime_hooks': runtime_hooks()}\n", "path": "kivy/tools/packaging/pyinstaller_hooks/__init__.py"}, {"content": "'''\nKivy hook for PyInstaller\n=========================\n\nKivy load itself in a complete dynamic way. PyImported don't see most of the\nimport cause of the Factory and Core.\nIn addition, the data and missing module are not copied automatically.\n\nWith this hook, everything needed for running kivy is correctly copied.\n\nCheck kivy documentation about how to use these hook for packaging application.\n'''\n\nfrom os.path import join, basename\nfrom distutils.version import LooseVersion\nimport PyInstaller\n\nimport kivy\nfrom kivy.factory import Factory\n\ntry:\n pyinst_ver = PyInstaller.get_version() # pyinstaller < 3.0x\nexcept AttributeError:\n pyinst_ver = PyInstaller.__version__\n\n\ndef get_modules():\n return [x.get('module', None) for x in Factory.classes.values()]\n\n\nif LooseVersion(pyinst_ver) >= LooseVersion('3.0'):\n # in pyinstaller 3, the directory contents rather than the directory itself\n # is copied. See https://github.com/pyinstaller/pyinstaller/issues/1513.\n datas = [\n (kivy.kivy_data_dir,\n join('kivy_install', basename(kivy.kivy_data_dir))),\n (kivy.kivy_modules_dir,\n join('kivy_install', basename(kivy.kivy_modules_dir))),\n ]\nelse:\n datas = [\n (kivy.kivy_data_dir, 'kivy_install'),\n (kivy.kivy_modules_dir, 'kivy_install'),\n ]\n\n# extensions\n_kivy_modules = [\n\n # sdl2\n\n # uncomment this if you need to package pygame.\n # pygame\n #'pygame.event',\n #'pygame.video',\n #'pygame.image',\n #'pygame.display',\n #'pygame',\n 'xml.etree.cElementTree',\n\n # external modules\n 'kivy.cache',\n 'kivy.atlas',\n 'kivy.network',\n 'kivy.network.urlrequest',\n 'kivy.lib.osc',\n 'kivy.lib.osc.OSC',\n 'kivy.lib.osc.oscAPI',\n 'kivy.lib.mtdev',\n 'kivy.lib.sdl2',\n 'kivy.factory_registers',\n 'kivy.input.recorder',\n 'kivy.input.providers',\n 'kivy.input.providers.tuio',\n 'kivy.input.providers.mouse',\n 'kivy.input.providers.wm_common',\n 'kivy.input.providers.wm_touch',\n 'kivy.input.providers.wm_pen',\n 'kivy.input.providers.hidinput',\n 'kivy.input.providers.linuxwacom',\n 'kivy.input.providers.mactouch',\n 'kivy.input.providers.mouse',\n 'kivy.input.providers.mtdev',\n\n # compiled modules\n 'kivy.event',\n 'kivy.graphics.buffer',\n 'kivy.graphics.c_opengl_debug',\n 'kivy.graphics.compiler',\n 'kivy.graphics.context_instructions',\n 'kivy.graphics.fbo',\n 'kivy.graphics.instructions',\n 'kivy.graphics.opengl',\n 'kivy.graphics.opengl_utils',\n 'kivy.graphics.shader',\n 'kivy.graphics.stenctil_instructions',\n 'kivy.graphics.texture',\n 'kivy.graphics.transformation',\n 'kivy.graphics.vbo',\n 'kivy.graphics.vertex',\n 'kivy.graphics.vertex_instructions',\n 'kivy.graphics.tesselator',\n 'kivy.properties',\n\n # core\n 'kivy.core.audio.audio_gstplayer',\n 'kivy.core.audio.audio_pygst',\n 'kivy.core.audio.audio_sdl',\n 'kivy.core.audio.audio_pygame',\n 'kivy.core.camera.camera_avfoundation',\n 'kivy.core.camera.camera_pygst',\n 'kivy.core.camera.camera_opencv',\n 'kivy.core.camera.camera_videocapture',\n 'kivy.core.clipboard.clipboard_sdl2',\n 'kivy.core.clipboard.clipboard_android',\n 'kivy.core.clipboard.clipboard_pygame',\n 'kivy.core.clipboard.clipboard_dummy',\n 'kivy.core.image.img_imageio',\n 'kivy.core.image.img_tex',\n 'kivy.core.image.img_dds',\n 'kivy.core.image.img_sdl2',\n 'kivy.core.image.img_pygame',\n 'kivy.core.image.img_pil',\n 'kivy.core.image.img_gif',\n 'kivy.core.spelling.spelling_enchant',\n 'kivy.core.spelling.spelling_osxappkit',\n 'kivy.core.text.text_sdl2',\n 'kivy.core.text.text_pygame',\n 'kivy.core.text.text_sdlttf',\n 'kivy.core.text.text_pil',\n 'kivy.core.video.video_gstplayer',\n 'kivy.core.video.video_pygst',\n 'kivy.core.video.video_ffmpeg',\n 'kivy.core.video.video_pyglet',\n 'kivy.core.video.video_null',\n 'kivy.core.window.window_sdl2',\n 'kivy.core.window.window_egl_rpi',\n 'kivy.core.window.window_pygame',\n 'kivy.core.window.window_sdl',\n 'kivy.core.window.window_x11',\n]\n\nhiddenimports = _kivy_modules + get_modules()\nhiddenimports = list(set(hiddenimports))\n\n", "path": "kivy/tools/packaging/pyinstaller_hooks/hook-kivy.py"}]} | 1,995 | 649 |
gh_patches_debug_19869 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-529 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add suppress_instrumentation flag in context for Metrics
Similar to [logic](https://github.com/open-telemetry/opentelemetry-python/blob/master/opentelemetry-sdk/src/opentelemetry/sdk/trace/export/__init__.py#L205) in SpanProcessors, this need to be done in Metrics to avoid duplicated telemetry when using Http ext or other packages relying on this
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `opentelemetry-sdk/src/opentelemetry/sdk/metrics/export/controller.py`
Content:
```
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import atexit
16 import threading
17
18
19 class PushController(threading.Thread):
20 """A push based controller, used for exporting.
21
22 Uses a worker thread that periodically collects metrics for exporting,
23 exports them and performs some post-processing.
24 """
25
26 daemon = True
27
28 def __init__(self, meter, exporter, interval, shutdown_on_exit=True):
29 super().__init__()
30 self.meter = meter
31 self.exporter = exporter
32 self.interval = interval
33 self.finished = threading.Event()
34 self._atexit_handler = None
35 if shutdown_on_exit:
36 self._atexit_handler = atexit.register(self.shutdown)
37 self.start()
38
39 def run(self):
40 while not self.finished.wait(self.interval):
41 self.tick()
42
43 def shutdown(self):
44 self.finished.set()
45 self.exporter.shutdown()
46 if self._atexit_handler is not None:
47 atexit.unregister(self._atexit_handler)
48 self._atexit_handler = None
49
50 def tick(self):
51 # Collect all of the meter's metrics to be exported
52 self.meter.collect()
53 # Export the given metrics in the batcher
54 self.exporter.export(self.meter.batcher.checkpoint_set())
55 # Perform post-exporting logic based on batcher configuration
56 self.meter.batcher.finished_collection()
57
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/export/controller.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/export/controller.py
--- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/export/controller.py
+++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/export/controller.py
@@ -15,6 +15,8 @@
import atexit
import threading
+from opentelemetry.context import attach, detach, set_value
+
class PushController(threading.Thread):
"""A push based controller, used for exporting.
@@ -50,7 +52,9 @@
def tick(self):
# Collect all of the meter's metrics to be exported
self.meter.collect()
+ token = attach(set_value("suppress_instrumentation", True))
# Export the given metrics in the batcher
self.exporter.export(self.meter.batcher.checkpoint_set())
+ detach(token)
# Perform post-exporting logic based on batcher configuration
self.meter.batcher.finished_collection()
| {"golden_diff": "diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/export/controller.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/export/controller.py\n--- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/export/controller.py\n+++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/export/controller.py\n@@ -15,6 +15,8 @@\n import atexit\n import threading\n \n+from opentelemetry.context import attach, detach, set_value\n+\n \n class PushController(threading.Thread):\n \"\"\"A push based controller, used for exporting.\n@@ -50,7 +52,9 @@\n def tick(self):\n # Collect all of the meter's metrics to be exported\n self.meter.collect()\n+ token = attach(set_value(\"suppress_instrumentation\", True))\n # Export the given metrics in the batcher\n self.exporter.export(self.meter.batcher.checkpoint_set())\n+ detach(token)\n # Perform post-exporting logic based on batcher configuration\n self.meter.batcher.finished_collection()\n", "issue": "Add suppress_instrumentation flag in context for Metrics\nSimilar to [logic](https://github.com/open-telemetry/opentelemetry-python/blob/master/opentelemetry-sdk/src/opentelemetry/sdk/trace/export/__init__.py#L205) in SpanProcessors, this need to be done in Metrics to avoid duplicated telemetry when using Http ext or other packages relying on this\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport atexit\nimport threading\n\n\nclass PushController(threading.Thread):\n \"\"\"A push based controller, used for exporting.\n\n Uses a worker thread that periodically collects metrics for exporting,\n exports them and performs some post-processing.\n \"\"\"\n\n daemon = True\n\n def __init__(self, meter, exporter, interval, shutdown_on_exit=True):\n super().__init__()\n self.meter = meter\n self.exporter = exporter\n self.interval = interval\n self.finished = threading.Event()\n self._atexit_handler = None\n if shutdown_on_exit:\n self._atexit_handler = atexit.register(self.shutdown)\n self.start()\n\n def run(self):\n while not self.finished.wait(self.interval):\n self.tick()\n\n def shutdown(self):\n self.finished.set()\n self.exporter.shutdown()\n if self._atexit_handler is not None:\n atexit.unregister(self._atexit_handler)\n self._atexit_handler = None\n\n def tick(self):\n # Collect all of the meter's metrics to be exported\n self.meter.collect()\n # Export the given metrics in the batcher\n self.exporter.export(self.meter.batcher.checkpoint_set())\n # Perform post-exporting logic based on batcher configuration\n self.meter.batcher.finished_collection()\n", "path": "opentelemetry-sdk/src/opentelemetry/sdk/metrics/export/controller.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport atexit\nimport threading\n\nfrom opentelemetry.context import attach, detach, set_value\n\n\nclass PushController(threading.Thread):\n \"\"\"A push based controller, used for exporting.\n\n Uses a worker thread that periodically collects metrics for exporting,\n exports them and performs some post-processing.\n \"\"\"\n\n daemon = True\n\n def __init__(self, meter, exporter, interval, shutdown_on_exit=True):\n super().__init__()\n self.meter = meter\n self.exporter = exporter\n self.interval = interval\n self.finished = threading.Event()\n self._atexit_handler = None\n if shutdown_on_exit:\n self._atexit_handler = atexit.register(self.shutdown)\n self.start()\n\n def run(self):\n while not self.finished.wait(self.interval):\n self.tick()\n\n def shutdown(self):\n self.finished.set()\n self.exporter.shutdown()\n if self._atexit_handler is not None:\n atexit.unregister(self._atexit_handler)\n self._atexit_handler = None\n\n def tick(self):\n # Collect all of the meter's metrics to be exported\n self.meter.collect()\n token = attach(set_value(\"suppress_instrumentation\", True))\n # Export the given metrics in the batcher\n self.exporter.export(self.meter.batcher.checkpoint_set())\n detach(token)\n # Perform post-exporting logic based on batcher configuration\n self.meter.batcher.finished_collection()\n", "path": "opentelemetry-sdk/src/opentelemetry/sdk/metrics/export/controller.py"}]} | 858 | 224 |
gh_patches_debug_8934 | rasdani/github-patches | git_diff | vispy__vispy-1595 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Load STL files into vispy
Hi there, I think I found a bug in vispy/vispy/io/mesh.py in col 42:
mesh = load_stl(fname)
when I try to import a *.stl file by read_mesh(fname), an error occured like this:
File "D:\Python3.5\lib\site-packages\vispy\io\mesh.py", line 43, in read_mesh
mesh = load_stl(fname)
File "D:\Python3.5\lib\site-packages\vispy\io\stl.py", line 43, in load_stl
file_pos = file_obj.tell()
AttributeError: 'str' object has no attribute 'tell'
by change col42 into :mesh = trimesh.load(fname), problem soved!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `vispy/io/mesh.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # Copyright (c) Vispy Development Team. All Rights Reserved.
3 # Distributed under the (new) BSD License. See LICENSE.txt for more info.
4
5 """ Reading and writing of data like images and meshes.
6 """
7
8 from os import path as op
9
10 from .wavefront import WavefrontReader, WavefrontWriter
11 from .stl import load_stl
12
13
14 def read_mesh(fname):
15 """Read mesh data from file.
16
17 Parameters
18 ----------
19 fname : str
20 File name to read. Format will be inferred from the filename.
21 Currently only '.obj' and '.obj.gz' are supported.
22
23 Returns
24 -------
25 vertices : array
26 Vertices.
27 faces : array | None
28 Triangle face definitions.
29 normals : array
30 Normals for the mesh.
31 texcoords : array | None
32 Texture coordinates.
33 """
34 # Check format
35 fmt = op.splitext(fname)[1].lower()
36 if fmt == '.gz':
37 fmt = op.splitext(op.splitext(fname)[0])[1].lower()
38
39 if fmt in ('.obj'):
40 return WavefrontReader.read(fname)
41 elif fmt in ('.stl'):
42 mesh = load_stl(fname)
43 vertices = mesh.vertices
44 faces = mesh.faces
45 normals = mesh.face_normals
46 texcoords = None
47 return vertices, faces, normals, texcoords
48 elif not format:
49 raise ValueError('read_mesh needs could not determine format.')
50 else:
51 raise ValueError('read_mesh does not understand format %s.' % fmt)
52
53
54 def write_mesh(fname, vertices, faces, normals, texcoords, name='',
55 format='obj', overwrite=False, reshape_faces=True):
56 """ Write mesh data to file.
57
58 Parameters
59 ----------
60 fname : str
61 Filename to write. Must end with ".obj" or ".gz".
62 vertices : array
63 Vertices.
64 faces : array | None
65 Triangle face definitions.
66 normals : array
67 Normals for the mesh.
68 texcoords : array | None
69 Texture coordinates.
70 name : str
71 Name of the object.
72 format : str
73 Currently only "obj" is supported.
74 overwrite : bool
75 If the file exists, overwrite it.
76 reshape_faces : bool
77 Reshape the `faces` array to (Nf, 3). Set to `False`
78 if you need to write a mesh with non triangular faces.
79 """
80 # Check file
81 if op.isfile(fname) and not overwrite:
82 raise IOError('file "%s" exists, use overwrite=True' % fname)
83
84 # Check format
85 if format not in ('obj'):
86 raise ValueError('Only "obj" format writing currently supported')
87 WavefrontWriter.write(fname, vertices, faces,
88 normals, texcoords, name, reshape_faces)
89
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/vispy/io/mesh.py b/vispy/io/mesh.py
--- a/vispy/io/mesh.py
+++ b/vispy/io/mesh.py
@@ -39,10 +39,11 @@
if fmt in ('.obj'):
return WavefrontReader.read(fname)
elif fmt in ('.stl'):
- mesh = load_stl(fname)
- vertices = mesh.vertices
- faces = mesh.faces
- normals = mesh.face_normals
+ file_obj = open(fname, mode='rb')
+ mesh = load_stl(file_obj)
+ vertices = mesh['vertices']
+ faces = mesh['faces']
+ normals = mesh['face_normals']
texcoords = None
return vertices, faces, normals, texcoords
elif not format:
| {"golden_diff": "diff --git a/vispy/io/mesh.py b/vispy/io/mesh.py\n--- a/vispy/io/mesh.py\n+++ b/vispy/io/mesh.py\n@@ -39,10 +39,11 @@\n if fmt in ('.obj'):\n return WavefrontReader.read(fname)\n elif fmt in ('.stl'):\n- mesh = load_stl(fname)\n- vertices = mesh.vertices\n- faces = mesh.faces\n- normals = mesh.face_normals\n+ file_obj = open(fname, mode='rb')\n+ mesh = load_stl(file_obj)\n+ vertices = mesh['vertices']\n+ faces = mesh['faces']\n+ normals = mesh['face_normals']\n texcoords = None\n return vertices, faces, normals, texcoords\n elif not format:\n", "issue": "Load STL files into vispy\nHi there, I think I found a bug in vispy/vispy/io/mesh.py in col 42:\r\nmesh = load_stl(fname)\r\nwhen I try to import a *.stl file by read_mesh(fname), an error occured like this: \r\n File \"D:\\Python3.5\\lib\\site-packages\\vispy\\io\\mesh.py\", line 43, in read_mesh\r\n mesh = load_stl(fname)\r\n File \"D:\\Python3.5\\lib\\site-packages\\vispy\\io\\stl.py\", line 43, in load_stl\r\n file_pos = file_obj.tell()\r\nAttributeError: 'str' object has no attribute 'tell'\r\nby change col42 into :mesh = trimesh.load(fname), problem soved!\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright (c) Vispy Development Team. All Rights Reserved.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n\n\"\"\" Reading and writing of data like images and meshes.\n\"\"\"\n\nfrom os import path as op\n\nfrom .wavefront import WavefrontReader, WavefrontWriter\nfrom .stl import load_stl\n\n\ndef read_mesh(fname):\n \"\"\"Read mesh data from file.\n\n Parameters\n ----------\n fname : str\n File name to read. Format will be inferred from the filename.\n Currently only '.obj' and '.obj.gz' are supported.\n\n Returns\n -------\n vertices : array\n Vertices.\n faces : array | None\n Triangle face definitions.\n normals : array\n Normals for the mesh.\n texcoords : array | None\n Texture coordinates.\n \"\"\"\n # Check format\n fmt = op.splitext(fname)[1].lower()\n if fmt == '.gz':\n fmt = op.splitext(op.splitext(fname)[0])[1].lower()\n\n if fmt in ('.obj'):\n return WavefrontReader.read(fname)\n elif fmt in ('.stl'):\n mesh = load_stl(fname)\n vertices = mesh.vertices\n faces = mesh.faces\n normals = mesh.face_normals\n texcoords = None\n return vertices, faces, normals, texcoords\n elif not format:\n raise ValueError('read_mesh needs could not determine format.')\n else:\n raise ValueError('read_mesh does not understand format %s.' % fmt)\n\n\ndef write_mesh(fname, vertices, faces, normals, texcoords, name='',\n format='obj', overwrite=False, reshape_faces=True):\n \"\"\" Write mesh data to file.\n\n Parameters\n ----------\n fname : str\n Filename to write. Must end with \".obj\" or \".gz\".\n vertices : array\n Vertices.\n faces : array | None\n Triangle face definitions.\n normals : array\n Normals for the mesh.\n texcoords : array | None\n Texture coordinates.\n name : str\n Name of the object.\n format : str\n Currently only \"obj\" is supported.\n overwrite : bool\n If the file exists, overwrite it.\n reshape_faces : bool\n Reshape the `faces` array to (Nf, 3). Set to `False`\n if you need to write a mesh with non triangular faces.\n \"\"\"\n # Check file\n if op.isfile(fname) and not overwrite:\n raise IOError('file \"%s\" exists, use overwrite=True' % fname)\n\n # Check format\n if format not in ('obj'):\n raise ValueError('Only \"obj\" format writing currently supported')\n WavefrontWriter.write(fname, vertices, faces,\n normals, texcoords, name, reshape_faces)\n", "path": "vispy/io/mesh.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright (c) Vispy Development Team. All Rights Reserved.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n\n\"\"\" Reading and writing of data like images and meshes.\n\"\"\"\n\nfrom os import path as op\n\nfrom .wavefront import WavefrontReader, WavefrontWriter\nfrom .stl import load_stl\n\n\ndef read_mesh(fname):\n \"\"\"Read mesh data from file.\n\n Parameters\n ----------\n fname : str\n File name to read. Format will be inferred from the filename.\n Currently only '.obj' and '.obj.gz' are supported.\n\n Returns\n -------\n vertices : array\n Vertices.\n faces : array | None\n Triangle face definitions.\n normals : array\n Normals for the mesh.\n texcoords : array | None\n Texture coordinates.\n \"\"\"\n # Check format\n fmt = op.splitext(fname)[1].lower()\n if fmt == '.gz':\n fmt = op.splitext(op.splitext(fname)[0])[1].lower()\n\n if fmt in ('.obj'):\n return WavefrontReader.read(fname)\n elif fmt in ('.stl'):\n file_obj = open(fname, mode='rb')\n mesh = load_stl(file_obj)\n vertices = mesh['vertices']\n faces = mesh['faces']\n normals = mesh['face_normals']\n texcoords = None\n return vertices, faces, normals, texcoords\n elif not format:\n raise ValueError('read_mesh needs could not determine format.')\n else:\n raise ValueError('read_mesh does not understand format %s.' % fmt)\n\n\ndef write_mesh(fname, vertices, faces, normals, texcoords, name='',\n format='obj', overwrite=False, reshape_faces=True):\n \"\"\" Write mesh data to file.\n\n Parameters\n ----------\n fname : str\n Filename to write. Must end with \".obj\" or \".gz\".\n vertices : array\n Vertices.\n faces : array | None\n Triangle face definitions.\n normals : array\n Normals for the mesh.\n texcoords : array | None\n Texture coordinates.\n name : str\n Name of the object.\n format : str\n Currently only \"obj\" is supported.\n overwrite : bool\n If the file exists, overwrite it.\n reshape_faces : bool\n Reshape the `faces` array to (Nf, 3). Set to `False`\n if you need to write a mesh with non triangular faces.\n \"\"\"\n # Check file\n if op.isfile(fname) and not overwrite:\n raise IOError('file \"%s\" exists, use overwrite=True' % fname)\n\n # Check format\n if format not in ('obj'):\n raise ValueError('Only \"obj\" format writing currently supported')\n WavefrontWriter.write(fname, vertices, faces,\n normals, texcoords, name, reshape_faces)\n", "path": "vispy/io/mesh.py"}]} | 1,213 | 179 |
gh_patches_debug_34641 | rasdani/github-patches | git_diff | bookwyrm-social__bookwyrm-1611 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
List Followers You Know When Looking at User's Follower List
**Is your feature request related to a problem? Please describe.**
When you look at a user's profile, the link to the user's followers will be titled 'X followers you follow', but the followers list is supplied in reverse-chronological order by date followed, with followers in common given no priority.
When trying to decide how to handle a follow request, seeing what people we both know is the most relevant information to me.
**Describe the solution you'd like**
Sort followers that I know (follow) ahead of all other followers when showing a users Followers list (honestly, sorting the Following list this way would be nice too)
**Describe alternatives you've considered**
Have a separate view for 'Followers you know' (and rename the link to the Followers list if it doesn't point to that list)
**Additional context**
Hi mouse
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bookwyrm/views/user.py`
Content:
```
1 """ non-interactive pages """
2 from django.contrib.auth.decorators import login_required
3 from django.core.paginator import Paginator
4 from django.http import Http404
5 from django.shortcuts import redirect
6 from django.template.response import TemplateResponse
7 from django.utils import timezone
8 from django.views import View
9 from django.views.decorators.http import require_POST
10
11 from bookwyrm import models
12 from bookwyrm.activitypub import ActivitypubResponse
13 from bookwyrm.settings import PAGE_LENGTH
14 from .helpers import get_user_from_username, is_api_request
15
16
17 # pylint: disable=no-self-use
18 class User(View):
19 """user profile page"""
20
21 def get(self, request, username):
22 """profile page for a user"""
23 user = get_user_from_username(request.user, username)
24
25 if is_api_request(request):
26 # we have a json request
27 return ActivitypubResponse(user.to_activity())
28 # otherwise we're at a UI view
29
30 shelf_preview = []
31
32 # only show other shelves that should be visible
33 shelves = user.shelf_set
34 is_self = request.user.id == user.id
35 if not is_self:
36 follower = user.followers.filter(id=request.user.id).exists()
37 if follower:
38 shelves = shelves.filter(privacy__in=["public", "followers"])
39 else:
40 shelves = shelves.filter(privacy="public")
41
42 for user_shelf in shelves.all():
43 if not user_shelf.books.count():
44 continue
45 shelf_preview.append(
46 {
47 "name": user_shelf.name,
48 "local_path": user_shelf.local_path,
49 "books": user_shelf.books.all()[:3],
50 "size": user_shelf.books.count(),
51 }
52 )
53 if len(shelf_preview) > 2:
54 break
55
56 # user's posts
57 activities = (
58 models.Status.privacy_filter(
59 request.user,
60 )
61 .filter(user=user)
62 .select_related(
63 "user",
64 "reply_parent",
65 "review__book",
66 "comment__book",
67 "quotation__book",
68 )
69 .prefetch_related(
70 "mention_books",
71 "mention_users",
72 "attachments",
73 )
74 )
75
76 paginated = Paginator(activities, PAGE_LENGTH)
77 goal = models.AnnualGoal.objects.filter(
78 user=user, year=timezone.now().year
79 ).first()
80 if goal:
81 try:
82 goal.raise_visible_to_user(request.user)
83 except Http404:
84 goal = None
85
86 data = {
87 "user": user,
88 "is_self": is_self,
89 "shelves": shelf_preview,
90 "shelf_count": shelves.count(),
91 "activities": paginated.get_page(request.GET.get("page", 1)),
92 "goal": goal,
93 }
94
95 return TemplateResponse(request, "user/user.html", data)
96
97
98 class Followers(View):
99 """list of followers view"""
100
101 def get(self, request, username):
102 """list of followers"""
103 user = get_user_from_username(request.user, username)
104
105 if is_api_request(request):
106 return ActivitypubResponse(user.to_followers_activity(**request.GET))
107
108 paginated = Paginator(
109 user.followers.order_by("-created_date").all(), PAGE_LENGTH
110 )
111 data = {
112 "user": user,
113 "is_self": request.user.id == user.id,
114 "follow_list": paginated.get_page(request.GET.get("page")),
115 }
116 return TemplateResponse(request, "user/relationships/followers.html", data)
117
118
119 class Following(View):
120 """list of following view"""
121
122 def get(self, request, username):
123 """list of followers"""
124 user = get_user_from_username(request.user, username)
125
126 if is_api_request(request):
127 return ActivitypubResponse(user.to_following_activity(**request.GET))
128
129 paginated = Paginator(
130 user.following.order_by("-created_date").all(), PAGE_LENGTH
131 )
132 data = {
133 "user": user,
134 "is_self": request.user.id == user.id,
135 "follow_list": paginated.get_page(request.GET.get("page")),
136 }
137 return TemplateResponse(request, "user/relationships/following.html", data)
138
139
140 class Groups(View):
141 """list of user's groups view"""
142
143 def get(self, request, username):
144 """list of groups"""
145 user = get_user_from_username(request.user, username)
146
147 paginated = Paginator(
148 models.Group.memberships.filter(user=user).order_by("-created_date"),
149 PAGE_LENGTH,
150 )
151 data = {
152 "user": user,
153 "is_self": request.user.id == user.id,
154 "group_list": paginated.get_page(request.GET.get("page")),
155 }
156 return TemplateResponse(request, "user/groups.html", data)
157
158
159 @require_POST
160 @login_required
161 def hide_suggestions(request):
162 """not everyone wants user suggestions"""
163 request.user.show_suggested_users = False
164 request.user.save(broadcast=False, update_fields=["show_suggested_users"])
165 return redirect(request.headers.get("Referer", "/"))
166
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bookwyrm/views/user.py b/bookwyrm/views/user.py
--- a/bookwyrm/views/user.py
+++ b/bookwyrm/views/user.py
@@ -1,6 +1,7 @@
""" non-interactive pages """
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator
+from django.db.models import Q, Count
from django.http import Http404
from django.shortcuts import redirect
from django.template.response import TemplateResponse
@@ -105,9 +106,8 @@
if is_api_request(request):
return ActivitypubResponse(user.to_followers_activity(**request.GET))
- paginated = Paginator(
- user.followers.order_by("-created_date").all(), PAGE_LENGTH
- )
+ followers = annotate_if_follows(request.user, user.followers)
+ paginated = Paginator(followers.all(), PAGE_LENGTH)
data = {
"user": user,
"is_self": request.user.id == user.id,
@@ -126,9 +126,8 @@
if is_api_request(request):
return ActivitypubResponse(user.to_following_activity(**request.GET))
- paginated = Paginator(
- user.following.order_by("-created_date").all(), PAGE_LENGTH
- )
+ following = annotate_if_follows(request.user, user.following)
+ paginated = Paginator(following.all(), PAGE_LENGTH)
data = {
"user": user,
"is_self": request.user.id == user.id,
@@ -137,6 +136,16 @@
return TemplateResponse(request, "user/relationships/following.html", data)
+def annotate_if_follows(user, queryset):
+ """Sort a list of users by if you follow them"""
+ if not user.is_authenticated:
+ return queryset.order_by("-created_date")
+
+ return queryset.annotate(
+ request_user_follows=Count("followers", filter=Q(followers=user))
+ ).order_by("-request_user_follows", "-created_date")
+
+
class Groups(View):
"""list of user's groups view"""
| {"golden_diff": "diff --git a/bookwyrm/views/user.py b/bookwyrm/views/user.py\n--- a/bookwyrm/views/user.py\n+++ b/bookwyrm/views/user.py\n@@ -1,6 +1,7 @@\n \"\"\" non-interactive pages \"\"\"\n from django.contrib.auth.decorators import login_required\n from django.core.paginator import Paginator\n+from django.db.models import Q, Count\n from django.http import Http404\n from django.shortcuts import redirect\n from django.template.response import TemplateResponse\n@@ -105,9 +106,8 @@\n if is_api_request(request):\n return ActivitypubResponse(user.to_followers_activity(**request.GET))\n \n- paginated = Paginator(\n- user.followers.order_by(\"-created_date\").all(), PAGE_LENGTH\n- )\n+ followers = annotate_if_follows(request.user, user.followers)\n+ paginated = Paginator(followers.all(), PAGE_LENGTH)\n data = {\n \"user\": user,\n \"is_self\": request.user.id == user.id,\n@@ -126,9 +126,8 @@\n if is_api_request(request):\n return ActivitypubResponse(user.to_following_activity(**request.GET))\n \n- paginated = Paginator(\n- user.following.order_by(\"-created_date\").all(), PAGE_LENGTH\n- )\n+ following = annotate_if_follows(request.user, user.following)\n+ paginated = Paginator(following.all(), PAGE_LENGTH)\n data = {\n \"user\": user,\n \"is_self\": request.user.id == user.id,\n@@ -137,6 +136,16 @@\n return TemplateResponse(request, \"user/relationships/following.html\", data)\n \n \n+def annotate_if_follows(user, queryset):\n+ \"\"\"Sort a list of users by if you follow them\"\"\"\n+ if not user.is_authenticated:\n+ return queryset.order_by(\"-created_date\")\n+\n+ return queryset.annotate(\n+ request_user_follows=Count(\"followers\", filter=Q(followers=user))\n+ ).order_by(\"-request_user_follows\", \"-created_date\")\n+\n+\n class Groups(View):\n \"\"\"list of user's groups view\"\"\"\n", "issue": "List Followers You Know When Looking at User's Follower List\n**Is your feature request related to a problem? Please describe.**\r\nWhen you look at a user's profile, the link to the user's followers will be titled 'X followers you follow', but the followers list is supplied in reverse-chronological order by date followed, with followers in common given no priority.\r\nWhen trying to decide how to handle a follow request, seeing what people we both know is the most relevant information to me.\r\n\r\n**Describe the solution you'd like**\r\nSort followers that I know (follow) ahead of all other followers when showing a users Followers list (honestly, sorting the Following list this way would be nice too)\r\n\r\n**Describe alternatives you've considered**\r\nHave a separate view for 'Followers you know' (and rename the link to the Followers list if it doesn't point to that list)\r\n\r\n**Additional context**\r\nHi mouse\r\n\n", "before_files": [{"content": "\"\"\" non-interactive pages \"\"\"\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.paginator import Paginator\nfrom django.http import Http404\nfrom django.shortcuts import redirect\nfrom django.template.response import TemplateResponse\nfrom django.utils import timezone\nfrom django.views import View\nfrom django.views.decorators.http import require_POST\n\nfrom bookwyrm import models\nfrom bookwyrm.activitypub import ActivitypubResponse\nfrom bookwyrm.settings import PAGE_LENGTH\nfrom .helpers import get_user_from_username, is_api_request\n\n\n# pylint: disable=no-self-use\nclass User(View):\n \"\"\"user profile page\"\"\"\n\n def get(self, request, username):\n \"\"\"profile page for a user\"\"\"\n user = get_user_from_username(request.user, username)\n\n if is_api_request(request):\n # we have a json request\n return ActivitypubResponse(user.to_activity())\n # otherwise we're at a UI view\n\n shelf_preview = []\n\n # only show other shelves that should be visible\n shelves = user.shelf_set\n is_self = request.user.id == user.id\n if not is_self:\n follower = user.followers.filter(id=request.user.id).exists()\n if follower:\n shelves = shelves.filter(privacy__in=[\"public\", \"followers\"])\n else:\n shelves = shelves.filter(privacy=\"public\")\n\n for user_shelf in shelves.all():\n if not user_shelf.books.count():\n continue\n shelf_preview.append(\n {\n \"name\": user_shelf.name,\n \"local_path\": user_shelf.local_path,\n \"books\": user_shelf.books.all()[:3],\n \"size\": user_shelf.books.count(),\n }\n )\n if len(shelf_preview) > 2:\n break\n\n # user's posts\n activities = (\n models.Status.privacy_filter(\n request.user,\n )\n .filter(user=user)\n .select_related(\n \"user\",\n \"reply_parent\",\n \"review__book\",\n \"comment__book\",\n \"quotation__book\",\n )\n .prefetch_related(\n \"mention_books\",\n \"mention_users\",\n \"attachments\",\n )\n )\n\n paginated = Paginator(activities, PAGE_LENGTH)\n goal = models.AnnualGoal.objects.filter(\n user=user, year=timezone.now().year\n ).first()\n if goal:\n try:\n goal.raise_visible_to_user(request.user)\n except Http404:\n goal = None\n\n data = {\n \"user\": user,\n \"is_self\": is_self,\n \"shelves\": shelf_preview,\n \"shelf_count\": shelves.count(),\n \"activities\": paginated.get_page(request.GET.get(\"page\", 1)),\n \"goal\": goal,\n }\n\n return TemplateResponse(request, \"user/user.html\", data)\n\n\nclass Followers(View):\n \"\"\"list of followers view\"\"\"\n\n def get(self, request, username):\n \"\"\"list of followers\"\"\"\n user = get_user_from_username(request.user, username)\n\n if is_api_request(request):\n return ActivitypubResponse(user.to_followers_activity(**request.GET))\n\n paginated = Paginator(\n user.followers.order_by(\"-created_date\").all(), PAGE_LENGTH\n )\n data = {\n \"user\": user,\n \"is_self\": request.user.id == user.id,\n \"follow_list\": paginated.get_page(request.GET.get(\"page\")),\n }\n return TemplateResponse(request, \"user/relationships/followers.html\", data)\n\n\nclass Following(View):\n \"\"\"list of following view\"\"\"\n\n def get(self, request, username):\n \"\"\"list of followers\"\"\"\n user = get_user_from_username(request.user, username)\n\n if is_api_request(request):\n return ActivitypubResponse(user.to_following_activity(**request.GET))\n\n paginated = Paginator(\n user.following.order_by(\"-created_date\").all(), PAGE_LENGTH\n )\n data = {\n \"user\": user,\n \"is_self\": request.user.id == user.id,\n \"follow_list\": paginated.get_page(request.GET.get(\"page\")),\n }\n return TemplateResponse(request, \"user/relationships/following.html\", data)\n\n\nclass Groups(View):\n \"\"\"list of user's groups view\"\"\"\n\n def get(self, request, username):\n \"\"\"list of groups\"\"\"\n user = get_user_from_username(request.user, username)\n\n paginated = Paginator(\n models.Group.memberships.filter(user=user).order_by(\"-created_date\"),\n PAGE_LENGTH,\n )\n data = {\n \"user\": user,\n \"is_self\": request.user.id == user.id,\n \"group_list\": paginated.get_page(request.GET.get(\"page\")),\n }\n return TemplateResponse(request, \"user/groups.html\", data)\n\n\n@require_POST\n@login_required\ndef hide_suggestions(request):\n \"\"\"not everyone wants user suggestions\"\"\"\n request.user.show_suggested_users = False\n request.user.save(broadcast=False, update_fields=[\"show_suggested_users\"])\n return redirect(request.headers.get(\"Referer\", \"/\"))\n", "path": "bookwyrm/views/user.py"}], "after_files": [{"content": "\"\"\" non-interactive pages \"\"\"\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.paginator import Paginator\nfrom django.db.models import Q, Count\nfrom django.http import Http404\nfrom django.shortcuts import redirect\nfrom django.template.response import TemplateResponse\nfrom django.utils import timezone\nfrom django.views import View\nfrom django.views.decorators.http import require_POST\n\nfrom bookwyrm import models\nfrom bookwyrm.activitypub import ActivitypubResponse\nfrom bookwyrm.settings import PAGE_LENGTH\nfrom .helpers import get_user_from_username, is_api_request\n\n\n# pylint: disable=no-self-use\nclass User(View):\n \"\"\"user profile page\"\"\"\n\n def get(self, request, username):\n \"\"\"profile page for a user\"\"\"\n user = get_user_from_username(request.user, username)\n\n if is_api_request(request):\n # we have a json request\n return ActivitypubResponse(user.to_activity())\n # otherwise we're at a UI view\n\n shelf_preview = []\n\n # only show other shelves that should be visible\n shelves = user.shelf_set\n is_self = request.user.id == user.id\n if not is_self:\n follower = user.followers.filter(id=request.user.id).exists()\n if follower:\n shelves = shelves.filter(privacy__in=[\"public\", \"followers\"])\n else:\n shelves = shelves.filter(privacy=\"public\")\n\n for user_shelf in shelves.all():\n if not user_shelf.books.count():\n continue\n shelf_preview.append(\n {\n \"name\": user_shelf.name,\n \"local_path\": user_shelf.local_path,\n \"books\": user_shelf.books.all()[:3],\n \"size\": user_shelf.books.count(),\n }\n )\n if len(shelf_preview) > 2:\n break\n\n # user's posts\n activities = (\n models.Status.privacy_filter(\n request.user,\n )\n .filter(user=user)\n .select_related(\n \"user\",\n \"reply_parent\",\n \"review__book\",\n \"comment__book\",\n \"quotation__book\",\n )\n .prefetch_related(\n \"mention_books\",\n \"mention_users\",\n \"attachments\",\n )\n )\n\n paginated = Paginator(activities, PAGE_LENGTH)\n goal = models.AnnualGoal.objects.filter(\n user=user, year=timezone.now().year\n ).first()\n if goal:\n try:\n goal.raise_visible_to_user(request.user)\n except Http404:\n goal = None\n\n data = {\n \"user\": user,\n \"is_self\": is_self,\n \"shelves\": shelf_preview,\n \"shelf_count\": shelves.count(),\n \"activities\": paginated.get_page(request.GET.get(\"page\", 1)),\n \"goal\": goal,\n }\n\n return TemplateResponse(request, \"user/user.html\", data)\n\n\nclass Followers(View):\n \"\"\"list of followers view\"\"\"\n\n def get(self, request, username):\n \"\"\"list of followers\"\"\"\n user = get_user_from_username(request.user, username)\n\n if is_api_request(request):\n return ActivitypubResponse(user.to_followers_activity(**request.GET))\n\n followers = annotate_if_follows(request.user, user.followers)\n paginated = Paginator(followers.all(), PAGE_LENGTH)\n data = {\n \"user\": user,\n \"is_self\": request.user.id == user.id,\n \"follow_list\": paginated.get_page(request.GET.get(\"page\")),\n }\n return TemplateResponse(request, \"user/relationships/followers.html\", data)\n\n\nclass Following(View):\n \"\"\"list of following view\"\"\"\n\n def get(self, request, username):\n \"\"\"list of followers\"\"\"\n user = get_user_from_username(request.user, username)\n\n if is_api_request(request):\n return ActivitypubResponse(user.to_following_activity(**request.GET))\n\n following = annotate_if_follows(request.user, user.following)\n paginated = Paginator(following.all(), PAGE_LENGTH)\n data = {\n \"user\": user,\n \"is_self\": request.user.id == user.id,\n \"follow_list\": paginated.get_page(request.GET.get(\"page\")),\n }\n return TemplateResponse(request, \"user/relationships/following.html\", data)\n\n\ndef annotate_if_follows(user, queryset):\n \"\"\"Sort a list of users by if you follow them\"\"\"\n if not user.is_authenticated:\n return queryset.order_by(\"-created_date\")\n\n return queryset.annotate(\n request_user_follows=Count(\"followers\", filter=Q(followers=user))\n ).order_by(\"-request_user_follows\", \"-created_date\")\n\n\nclass Groups(View):\n \"\"\"list of user's groups view\"\"\"\n\n def get(self, request, username):\n \"\"\"list of groups\"\"\"\n user = get_user_from_username(request.user, username)\n\n paginated = Paginator(\n models.Group.memberships.filter(user=user).order_by(\"-created_date\"),\n PAGE_LENGTH,\n )\n data = {\n \"user\": user,\n \"is_self\": request.user.id == user.id,\n \"group_list\": paginated.get_page(request.GET.get(\"page\")),\n }\n return TemplateResponse(request, \"user/groups.html\", data)\n\n\n@require_POST\n@login_required\ndef hide_suggestions(request):\n \"\"\"not everyone wants user suggestions\"\"\"\n request.user.show_suggested_users = False\n request.user.save(broadcast=False, update_fields=[\"show_suggested_users\"])\n return redirect(request.headers.get(\"Referer\", \"/\"))\n", "path": "bookwyrm/views/user.py"}]} | 1,908 | 458 |
gh_patches_debug_7515 | rasdani/github-patches | git_diff | numpy__numpy-5519 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
BUG np.broadcast_arrays does not work on void arrays with object fields
In the process of working on #4622, it was realised that `np.broadcas_arrays` do not work for void arrays with object fields, because in the process of changing new strides and shapes in `as_strided`, the `dtype` gets lost, and this cannot just be reset when object fields are present. While obviously this is somewhat of a corner case, it might still be good to see if it can be fixed easily, perhaps by doing something that preserves the dtype, or by bypassing the test for object fields.
```
import numpy as np
dt = np.dtype([('x', 'i8'), ('y', '?'), ('z', 'O')])
b = np.array([(1, True, None), (2, False, [3, 4, 5])], dtype=dt)
c = np.array([[-1], [-2]])
b_strided, c_strided = np.broadcast_arrays(b, c)
TypeError Traceback (most recent call last)
...
/usr/lib/python3/dist-packages/numpy/lib/stride_tricks.py in as_strided(x, shape, strides)
31 # Make sure dtype is correct in case of custom dtype
32 if array.dtype.kind == 'V':
---> 33 array.dtype = x.dtype
34 return array
35
TypeError: Cannot change data-type for object array.
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `numpy/lib/stride_tricks.py`
Content:
```
1 """
2 Utilities that manipulate strides to achieve desirable effects.
3
4 An explanation of strides can be found in the "ndarray.rst" file in the
5 NumPy reference guide.
6
7 """
8 from __future__ import division, absolute_import, print_function
9
10 import numpy as np
11
12 __all__ = ['broadcast_to', 'broadcast_arrays']
13
14
15 class DummyArray(object):
16 """Dummy object that just exists to hang __array_interface__ dictionaries
17 and possibly keep alive a reference to a base array.
18 """
19
20 def __init__(self, interface, base=None):
21 self.__array_interface__ = interface
22 self.base = base
23
24
25 def _maybe_view_as_subclass(original_array, new_array):
26 if type(original_array) is not type(new_array):
27 # if input was an ndarray subclass and subclasses were OK,
28 # then view the result as that subclass.
29 new_array = new_array.view(type=type(original_array))
30 # Since we have done something akin to a view from original_array, we
31 # should let the subclass finalize (if it has it implemented, i.e., is
32 # not None).
33 if new_array.__array_finalize__:
34 new_array.__array_finalize__(original_array)
35 return new_array
36
37
38 def as_strided(x, shape=None, strides=None, subok=False):
39 """ Make an ndarray from the given array with the given shape and strides.
40 """
41 # first convert input to array, possibly keeping subclass
42 x = np.array(x, copy=False, subok=subok)
43 interface = dict(x.__array_interface__)
44 if shape is not None:
45 interface['shape'] = tuple(shape)
46 if strides is not None:
47 interface['strides'] = tuple(strides)
48 array = np.asarray(DummyArray(interface, base=x))
49 # Make sure dtype is correct in case of custom dtype
50 if array.dtype.kind == 'V':
51 array.dtype = x.dtype
52 return _maybe_view_as_subclass(x, array)
53
54
55 def _broadcast_to(array, shape, subok, readonly):
56 shape = tuple(shape) if np.iterable(shape) else (shape,)
57 array = np.array(array, copy=False, subok=subok)
58 if not shape and array.shape:
59 raise ValueError('cannot broadcast a non-scalar to a scalar array')
60 if any(size < 0 for size in shape):
61 raise ValueError('all elements of broadcast shape must be non-'
62 'negative')
63 broadcast = np.nditer(
64 (array,), flags=['multi_index', 'refs_ok', 'zerosize_ok'],
65 op_flags=['readonly'], itershape=shape, order='C').itviews[0]
66 result = _maybe_view_as_subclass(array, broadcast)
67 if not readonly and array.flags.writeable:
68 result.flags.writeable = True
69 return result
70
71
72 def broadcast_to(array, shape, subok=False):
73 """Broadcast an array to a new shape.
74
75 Parameters
76 ----------
77 array : array_like
78 The array to broadcast.
79 shape : tuple
80 The shape of the desired array.
81 subok : bool, optional
82 If True, then sub-classes will be passed-through, otherwise
83 the returned array will be forced to be a base-class array (default).
84
85 Returns
86 -------
87 broadcast : array
88 A readonly view on the original array with the given shape. It is
89 typically not contiguous. Furthermore, more than one element of a
90 broadcasted array may refer to a single memory location.
91
92 Raises
93 ------
94 ValueError
95 If the array is not compatible with the new shape according to NumPy's
96 broadcasting rules.
97
98 Examples
99 --------
100 >>> x = np.array([1, 2, 3])
101 >>> np.broadcast_to(x, (3, 3))
102 array([[1, 2, 3],
103 [1, 2, 3],
104 [1, 2, 3]])
105 """
106 return _broadcast_to(array, shape, subok=subok, readonly=True)
107
108
109 def _broadcast_shape(*args):
110 """Returns the shape of the ararys that would result from broadcasting the
111 supplied arrays against each other.
112 """
113 if not args:
114 raise ValueError('must provide at least one argument')
115 if len(args) == 1:
116 # a single argument does not work with np.broadcast
117 return np.asarray(args[0]).shape
118 # use the old-iterator because np.nditer does not handle size 0 arrays
119 # consistently
120 b = np.broadcast(*args[:32])
121 # unfortunately, it cannot handle 32 or more arguments directly
122 for pos in range(32, len(args), 31):
123 b = np.broadcast(b, *args[pos:(pos + 31)])
124 return b.shape
125
126
127 def broadcast_arrays(*args, **kwargs):
128 """
129 Broadcast any number of arrays against each other.
130
131 Parameters
132 ----------
133 `*args` : array_likes
134 The arrays to broadcast.
135
136 subok : bool, optional
137 If True, then sub-classes will be passed-through, otherwise
138 the returned arrays will be forced to be a base-class array (default).
139
140 Returns
141 -------
142 broadcasted : list of arrays
143 These arrays are views on the original arrays. They are typically
144 not contiguous. Furthermore, more than one element of a
145 broadcasted array may refer to a single memory location. If you
146 need to write to the arrays, make copies first.
147
148 Examples
149 --------
150 >>> x = np.array([[1,2,3]])
151 >>> y = np.array([[1],[2],[3]])
152 >>> np.broadcast_arrays(x, y)
153 [array([[1, 2, 3],
154 [1, 2, 3],
155 [1, 2, 3]]), array([[1, 1, 1],
156 [2, 2, 2],
157 [3, 3, 3]])]
158
159 Here is a useful idiom for getting contiguous copies instead of
160 non-contiguous views.
161
162 >>> [np.array(a) for a in np.broadcast_arrays(x, y)]
163 [array([[1, 2, 3],
164 [1, 2, 3],
165 [1, 2, 3]]), array([[1, 1, 1],
166 [2, 2, 2],
167 [3, 3, 3]])]
168
169 """
170 # nditer is not used here to avoid the limit of 32 arrays.
171 # Otherwise, something like the following one-liner would suffice:
172 # return np.nditer(args, flags=['multi_index', 'zerosize_ok'],
173 # order='C').itviews
174
175 subok = kwargs.pop('subok', False)
176 if kwargs:
177 raise TypeError('broadcast_arrays() got an unexpected keyword '
178 'argument {}'.format(kwargs.pop()))
179 args = [np.array(_m, copy=False, subok=subok) for _m in args]
180
181 shape = _broadcast_shape(*args)
182
183 if all(array.shape == shape for array in args):
184 # Common case where nothing needs to be broadcasted.
185 return args
186
187 # TODO: consider making the results of broadcast_arrays readonly to match
188 # broadcast_to. This will require a deprecation cycle.
189 return [_broadcast_to(array, shape, subok=subok, readonly=False)
190 for array in args]
191
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/numpy/lib/stride_tricks.py b/numpy/lib/stride_tricks.py
--- a/numpy/lib/stride_tricks.py
+++ b/numpy/lib/stride_tricks.py
@@ -46,9 +46,11 @@
if strides is not None:
interface['strides'] = tuple(strides)
array = np.asarray(DummyArray(interface, base=x))
- # Make sure dtype is correct in case of custom dtype
- if array.dtype.kind == 'V':
+
+ if array.dtype.fields is None and x.dtype.fields is not None:
+ # This should only happen if x.dtype is [('', 'Vx')]
array.dtype = x.dtype
+
return _maybe_view_as_subclass(x, array)
| {"golden_diff": "diff --git a/numpy/lib/stride_tricks.py b/numpy/lib/stride_tricks.py\n--- a/numpy/lib/stride_tricks.py\n+++ b/numpy/lib/stride_tricks.py\n@@ -46,9 +46,11 @@\n if strides is not None:\n interface['strides'] = tuple(strides)\n array = np.asarray(DummyArray(interface, base=x))\n- # Make sure dtype is correct in case of custom dtype\n- if array.dtype.kind == 'V':\n+\n+ if array.dtype.fields is None and x.dtype.fields is not None:\n+ # This should only happen if x.dtype is [('', 'Vx')]\n array.dtype = x.dtype\n+\n return _maybe_view_as_subclass(x, array)\n", "issue": "BUG np.broadcast_arrays does not work on void arrays with object fields\nIn the process of working on #4622, it was realised that `np.broadcas_arrays` do not work for void arrays with object fields, because in the process of changing new strides and shapes in `as_strided`, the `dtype` gets lost, and this cannot just be reset when object fields are present. While obviously this is somewhat of a corner case, it might still be good to see if it can be fixed easily, perhaps by doing something that preserves the dtype, or by bypassing the test for object fields.\n\n```\nimport numpy as np\ndt = np.dtype([('x', 'i8'), ('y', '?'), ('z', 'O')])\nb = np.array([(1, True, None), (2, False, [3, 4, 5])], dtype=dt)\nc = np.array([[-1], [-2]])\nb_strided, c_strided = np.broadcast_arrays(b, c)\nTypeError Traceback (most recent call last)\n...\n/usr/lib/python3/dist-packages/numpy/lib/stride_tricks.py in as_strided(x, shape, strides)\n 31 # Make sure dtype is correct in case of custom dtype\n 32 if array.dtype.kind == 'V':\n---> 33 array.dtype = x.dtype\n 34 return array\n 35 \n\nTypeError: Cannot change data-type for object array.\n```\n\n", "before_files": [{"content": "\"\"\"\nUtilities that manipulate strides to achieve desirable effects.\n\nAn explanation of strides can be found in the \"ndarray.rst\" file in the\nNumPy reference guide.\n\n\"\"\"\nfrom __future__ import division, absolute_import, print_function\n\nimport numpy as np\n\n__all__ = ['broadcast_to', 'broadcast_arrays']\n\n\nclass DummyArray(object):\n \"\"\"Dummy object that just exists to hang __array_interface__ dictionaries\n and possibly keep alive a reference to a base array.\n \"\"\"\n\n def __init__(self, interface, base=None):\n self.__array_interface__ = interface\n self.base = base\n\n\ndef _maybe_view_as_subclass(original_array, new_array):\n if type(original_array) is not type(new_array):\n # if input was an ndarray subclass and subclasses were OK,\n # then view the result as that subclass.\n new_array = new_array.view(type=type(original_array))\n # Since we have done something akin to a view from original_array, we\n # should let the subclass finalize (if it has it implemented, i.e., is\n # not None).\n if new_array.__array_finalize__:\n new_array.__array_finalize__(original_array)\n return new_array\n\n\ndef as_strided(x, shape=None, strides=None, subok=False):\n \"\"\" Make an ndarray from the given array with the given shape and strides.\n \"\"\"\n # first convert input to array, possibly keeping subclass\n x = np.array(x, copy=False, subok=subok)\n interface = dict(x.__array_interface__)\n if shape is not None:\n interface['shape'] = tuple(shape)\n if strides is not None:\n interface['strides'] = tuple(strides)\n array = np.asarray(DummyArray(interface, base=x))\n # Make sure dtype is correct in case of custom dtype\n if array.dtype.kind == 'V':\n array.dtype = x.dtype\n return _maybe_view_as_subclass(x, array)\n\n\ndef _broadcast_to(array, shape, subok, readonly):\n shape = tuple(shape) if np.iterable(shape) else (shape,)\n array = np.array(array, copy=False, subok=subok)\n if not shape and array.shape:\n raise ValueError('cannot broadcast a non-scalar to a scalar array')\n if any(size < 0 for size in shape):\n raise ValueError('all elements of broadcast shape must be non-'\n 'negative')\n broadcast = np.nditer(\n (array,), flags=['multi_index', 'refs_ok', 'zerosize_ok'],\n op_flags=['readonly'], itershape=shape, order='C').itviews[0]\n result = _maybe_view_as_subclass(array, broadcast)\n if not readonly and array.flags.writeable:\n result.flags.writeable = True\n return result\n\n\ndef broadcast_to(array, shape, subok=False):\n \"\"\"Broadcast an array to a new shape.\n\n Parameters\n ----------\n array : array_like\n The array to broadcast.\n shape : tuple\n The shape of the desired array.\n subok : bool, optional\n If True, then sub-classes will be passed-through, otherwise\n the returned array will be forced to be a base-class array (default).\n\n Returns\n -------\n broadcast : array\n A readonly view on the original array with the given shape. It is\n typically not contiguous. Furthermore, more than one element of a\n broadcasted array may refer to a single memory location.\n\n Raises\n ------\n ValueError\n If the array is not compatible with the new shape according to NumPy's\n broadcasting rules.\n\n Examples\n --------\n >>> x = np.array([1, 2, 3])\n >>> np.broadcast_to(x, (3, 3))\n array([[1, 2, 3],\n [1, 2, 3],\n [1, 2, 3]])\n \"\"\"\n return _broadcast_to(array, shape, subok=subok, readonly=True)\n\n\ndef _broadcast_shape(*args):\n \"\"\"Returns the shape of the ararys that would result from broadcasting the\n supplied arrays against each other.\n \"\"\"\n if not args:\n raise ValueError('must provide at least one argument')\n if len(args) == 1:\n # a single argument does not work with np.broadcast\n return np.asarray(args[0]).shape\n # use the old-iterator because np.nditer does not handle size 0 arrays\n # consistently\n b = np.broadcast(*args[:32])\n # unfortunately, it cannot handle 32 or more arguments directly\n for pos in range(32, len(args), 31):\n b = np.broadcast(b, *args[pos:(pos + 31)])\n return b.shape\n\n\ndef broadcast_arrays(*args, **kwargs):\n \"\"\"\n Broadcast any number of arrays against each other.\n\n Parameters\n ----------\n `*args` : array_likes\n The arrays to broadcast.\n\n subok : bool, optional\n If True, then sub-classes will be passed-through, otherwise\n the returned arrays will be forced to be a base-class array (default).\n\n Returns\n -------\n broadcasted : list of arrays\n These arrays are views on the original arrays. They are typically\n not contiguous. Furthermore, more than one element of a\n broadcasted array may refer to a single memory location. If you\n need to write to the arrays, make copies first.\n\n Examples\n --------\n >>> x = np.array([[1,2,3]])\n >>> y = np.array([[1],[2],[3]])\n >>> np.broadcast_arrays(x, y)\n [array([[1, 2, 3],\n [1, 2, 3],\n [1, 2, 3]]), array([[1, 1, 1],\n [2, 2, 2],\n [3, 3, 3]])]\n\n Here is a useful idiom for getting contiguous copies instead of\n non-contiguous views.\n\n >>> [np.array(a) for a in np.broadcast_arrays(x, y)]\n [array([[1, 2, 3],\n [1, 2, 3],\n [1, 2, 3]]), array([[1, 1, 1],\n [2, 2, 2],\n [3, 3, 3]])]\n\n \"\"\"\n # nditer is not used here to avoid the limit of 32 arrays.\n # Otherwise, something like the following one-liner would suffice:\n # return np.nditer(args, flags=['multi_index', 'zerosize_ok'],\n # order='C').itviews\n\n subok = kwargs.pop('subok', False)\n if kwargs:\n raise TypeError('broadcast_arrays() got an unexpected keyword '\n 'argument {}'.format(kwargs.pop()))\n args = [np.array(_m, copy=False, subok=subok) for _m in args]\n\n shape = _broadcast_shape(*args)\n\n if all(array.shape == shape for array in args):\n # Common case where nothing needs to be broadcasted.\n return args\n\n # TODO: consider making the results of broadcast_arrays readonly to match\n # broadcast_to. This will require a deprecation cycle.\n return [_broadcast_to(array, shape, subok=subok, readonly=False)\n for array in args]\n", "path": "numpy/lib/stride_tricks.py"}], "after_files": [{"content": "\"\"\"\nUtilities that manipulate strides to achieve desirable effects.\n\nAn explanation of strides can be found in the \"ndarray.rst\" file in the\nNumPy reference guide.\n\n\"\"\"\nfrom __future__ import division, absolute_import, print_function\n\nimport numpy as np\n\n__all__ = ['broadcast_to', 'broadcast_arrays']\n\n\nclass DummyArray(object):\n \"\"\"Dummy object that just exists to hang __array_interface__ dictionaries\n and possibly keep alive a reference to a base array.\n \"\"\"\n\n def __init__(self, interface, base=None):\n self.__array_interface__ = interface\n self.base = base\n\n\ndef _maybe_view_as_subclass(original_array, new_array):\n if type(original_array) is not type(new_array):\n # if input was an ndarray subclass and subclasses were OK,\n # then view the result as that subclass.\n new_array = new_array.view(type=type(original_array))\n # Since we have done something akin to a view from original_array, we\n # should let the subclass finalize (if it has it implemented, i.e., is\n # not None).\n if new_array.__array_finalize__:\n new_array.__array_finalize__(original_array)\n return new_array\n\n\ndef as_strided(x, shape=None, strides=None, subok=False):\n \"\"\" Make an ndarray from the given array with the given shape and strides.\n \"\"\"\n # first convert input to array, possibly keeping subclass\n x = np.array(x, copy=False, subok=subok)\n interface = dict(x.__array_interface__)\n if shape is not None:\n interface['shape'] = tuple(shape)\n if strides is not None:\n interface['strides'] = tuple(strides)\n array = np.asarray(DummyArray(interface, base=x))\n\n if array.dtype.fields is None and x.dtype.fields is not None:\n # This should only happen if x.dtype is [('', 'Vx')]\n array.dtype = x.dtype\n\n return _maybe_view_as_subclass(x, array)\n\n\ndef _broadcast_to(array, shape, subok, readonly):\n shape = tuple(shape) if np.iterable(shape) else (shape,)\n array = np.array(array, copy=False, subok=subok)\n if not shape and array.shape:\n raise ValueError('cannot broadcast a non-scalar to a scalar array')\n if any(size < 0 for size in shape):\n raise ValueError('all elements of broadcast shape must be non-'\n 'negative')\n broadcast = np.nditer((array,), flags=['multi_index', 'zerosize_ok'],\n op_flags=['readonly'], itershape=shape, order='C'\n ).itviews[0]\n result = _maybe_view_as_subclass(array, broadcast)\n if not readonly and array.flags.writeable:\n result.flags.writeable = True\n return result\n\n\ndef broadcast_to(array, shape, subok=False):\n \"\"\"Broadcast an array to a new shape.\n\n Parameters\n ----------\n array : array_like\n The array to broadcast.\n shape : tuple\n The shape of the desired array.\n subok : bool, optional\n If True, then sub-classes will be passed-through, otherwise\n the returned array will be forced to be a base-class array (default).\n\n Returns\n -------\n broadcast : array\n A readonly view on the original array with the given shape. It is\n typically not contiguous. Furthermore, more than one element of a\n broadcasted array may refer to a single memory location.\n\n Raises\n ------\n ValueError\n If the array is not compatible with the new shape according to NumPy's\n broadcasting rules.\n\n Examples\n --------\n >>> x = np.array([1, 2, 3])\n >>> np.broadcast_to(x, (3, 3))\n array([[1, 2, 3],\n [1, 2, 3],\n [1, 2, 3]])\n \"\"\"\n return _broadcast_to(array, shape, subok=subok, readonly=True)\n\n\ndef _broadcast_shape(*args):\n \"\"\"Returns the shape of the ararys that would result from broadcasting the\n supplied arrays against each other.\n \"\"\"\n if not args:\n raise ValueError('must provide at least one argument')\n if len(args) == 1:\n # a single argument does not work with np.broadcast\n return np.asarray(args[0]).shape\n # use the old-iterator because np.nditer does not handle size 0 arrays\n # consistently\n b = np.broadcast(*args[:32])\n # unfortunately, it cannot handle 32 or more arguments directly\n for pos in range(32, len(args), 31):\n b = np.broadcast(b, *args[pos:(pos + 31)])\n return b.shape\n\n\ndef broadcast_arrays(*args, **kwargs):\n \"\"\"\n Broadcast any number of arrays against each other.\n\n Parameters\n ----------\n `*args` : array_likes\n The arrays to broadcast.\n\n subok : bool, optional\n If True, then sub-classes will be passed-through, otherwise\n the returned arrays will be forced to be a base-class array (default).\n\n Returns\n -------\n broadcasted : list of arrays\n These arrays are views on the original arrays. They are typically\n not contiguous. Furthermore, more than one element of a\n broadcasted array may refer to a single memory location. If you\n need to write to the arrays, make copies first.\n\n Examples\n --------\n >>> x = np.array([[1,2,3]])\n >>> y = np.array([[1],[2],[3]])\n >>> np.broadcast_arrays(x, y)\n [array([[1, 2, 3],\n [1, 2, 3],\n [1, 2, 3]]), array([[1, 1, 1],\n [2, 2, 2],\n [3, 3, 3]])]\n\n Here is a useful idiom for getting contiguous copies instead of\n non-contiguous views.\n\n >>> [np.array(a) for a in np.broadcast_arrays(x, y)]\n [array([[1, 2, 3],\n [1, 2, 3],\n [1, 2, 3]]), array([[1, 1, 1],\n [2, 2, 2],\n [3, 3, 3]])]\n\n \"\"\"\n # nditer is not used here to avoid the limit of 32 arrays.\n # Otherwise, something like the following one-liner would suffice:\n # return np.nditer(args, flags=['multi_index', 'zerosize_ok'],\n # order='C').itviews\n\n subok = kwargs.pop('subok', False)\n if kwargs:\n raise TypeError('broadcast_arrays() got an unexpected keyword '\n 'argument {}'.format(kwargs.pop()))\n args = [np.array(_m, copy=False, subok=subok) for _m in args]\n\n shape = _broadcast_shape(*args)\n\n if all(array.shape == shape for array in args):\n # Common case where nothing needs to be broadcasted.\n return args\n\n # TODO: consider making the results of broadcast_arrays readonly to match\n # broadcast_to. This will require a deprecation cycle.\n return [_broadcast_to(array, shape, subok=subok, readonly=False)\n for array in args]\n", "path": "numpy/lib/stride_tricks.py"}]} | 2,646 | 166 |
gh_patches_debug_13462 | rasdani/github-patches | git_diff | cloud-custodian__cloud-custodian-4197 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Certifi python module missing when deploying cloudtrail lambda functions
The following error happens when the lambda function is triggered
`[ERROR] Runtime.ImportModuleError: Unable to import module 'custodian_policy': No module named 'certifi'`
As a workaround, I had to add the *certifi* package to the policy files.
```
policies:
- name: my-policy
description: my description
resource: my-resource
mode:
type: cloudtrail
packages:
- botocore
- boto3
- urllib3
- certifi
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `c7n/actions/webhook.py`
Content:
```
1 # Copyright 2019 Microsoft Corporation
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import json
16
17 import certifi
18 import jmespath
19 import urllib3
20 from six.moves.urllib import parse
21
22 from c7n import utils
23 from .core import EventAction
24
25
26 class Webhook(EventAction):
27 """Calls a webhook with optional parameters and body
28 populated from JMESPath queries.
29
30 .. code-block:: yaml
31
32 policies:
33 - name: call-webhook
34 resource: ec2
35 description: |
36 Call webhook with list of resource groups
37 actions:
38 - type: webhook
39 url: http://foo.com
40 query-params:
41 resource_name: resource.name
42 policy_name: policy.name
43 """
44
45 schema_alias = True
46 schema = utils.type_schema(
47 'webhook',
48 required=['url'],
49 **{
50 'url': {'type': 'string'},
51 'body': {'type': 'string'},
52 'batch': {'type': 'boolean'},
53 'batch-size': {'type': 'number'},
54 'method': {'type': 'string', 'enum': ['PUT', 'POST', 'GET', 'PATCH', 'DELETE']},
55 'query-params': {
56 "type": "object",
57 "additionalProperties": {
58 "type": "string",
59 "description": "query string values"
60 }
61 },
62 'headers': {
63 "type": "object",
64 "additionalProperties": {
65 "type": "string",
66 "description": "header values"
67 }
68 }
69 }
70 )
71
72 def __init__(self, data=None, manager=None, log_dir=None):
73 super(Webhook, self).__init__(data, manager, log_dir)
74 self.http = None
75 self.url = self.data.get('url')
76 self.body = self.data.get('body')
77 self.batch = self.data.get('batch', False)
78 self.batch_size = self.data.get('batch-size', 500)
79 self.query_params = self.data.get('query-params', {})
80 self.headers = self.data.get('headers', {})
81 self.method = self.data.get('method', 'POST')
82 self.lookup_data = {
83 'account_id': self.manager.config.account_id,
84 'region': self.manager.config.region,
85 'execution_id': self.manager.ctx.execution_id,
86 'execution_start': self.manager.ctx.start_time,
87 'policy': self.manager.data
88 }
89
90 def process(self, resources, event=None):
91 self.http = urllib3.PoolManager(
92 cert_reqs='CERT_REQUIRED',
93 ca_certs=certifi.where())
94
95 if self.batch:
96 for chunk in utils.chunks(resources, self.batch_size):
97 resource_data = self.lookup_data
98 resource_data['resources'] = chunk
99 self._process_call(resource_data)
100 else:
101 for r in resources:
102 resource_data = self.lookup_data
103 resource_data['resource'] = r
104 self._process_call(resource_data)
105
106 def _process_call(self, resource):
107 prepared_url = self._build_url(resource)
108 prepared_body = self._build_body(resource)
109 prepared_headers = self._build_headers(resource)
110
111 if prepared_body:
112 prepared_headers['Content-Type'] = 'application/json'
113
114 try:
115 res = self.http.request(
116 method=self.method,
117 url=prepared_url,
118 body=prepared_body,
119 headers=prepared_headers)
120
121 self.log.info("%s got response %s with URL %s" %
122 (self.method, res.status, prepared_url))
123 except urllib3.exceptions.HTTPError as e:
124 self.log.error("Error calling %s. Code: %s" % (prepared_url, e.reason))
125
126 def _build_headers(self, resource):
127 return {k: jmespath.search(v, resource) for k, v in self.headers.items()}
128
129 def _build_url(self, resource):
130 """
131 Compose URL with query string parameters.
132
133 Will not lose existing static parameters in the URL string
134 but does not support 'duplicate' parameter entries
135 """
136
137 if not self.query_params:
138 return self.url
139
140 evaluated_params = {k: jmespath.search(v, resource) for k, v in self.query_params.items()}
141
142 url_parts = list(parse.urlparse(self.url))
143 query = dict(parse.parse_qsl(url_parts[4]))
144 query.update(evaluated_params)
145 url_parts[4] = parse.urlencode(query)
146
147 return parse.urlunparse(url_parts)
148
149 def _build_body(self, resource):
150 """Create a JSON body and dump it to encoded bytes."""
151
152 if not self.body:
153 return None
154
155 return json.dumps(jmespath.search(self.body, resource)).encode('utf-8')
156
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/c7n/actions/webhook.py b/c7n/actions/webhook.py
--- a/c7n/actions/webhook.py
+++ b/c7n/actions/webhook.py
@@ -14,7 +14,11 @@
import json
-import certifi
+try:
+ import certifi
+except ImportError:
+ certifi = None
+
import jmespath
import urllib3
from six.moves.urllib import parse
@@ -90,7 +94,7 @@
def process(self, resources, event=None):
self.http = urllib3.PoolManager(
cert_reqs='CERT_REQUIRED',
- ca_certs=certifi.where())
+ ca_certs=certifi and certifi.where() or None)
if self.batch:
for chunk in utils.chunks(resources, self.batch_size):
| {"golden_diff": "diff --git a/c7n/actions/webhook.py b/c7n/actions/webhook.py\n--- a/c7n/actions/webhook.py\n+++ b/c7n/actions/webhook.py\n@@ -14,7 +14,11 @@\n \n import json\n \n-import certifi\n+try:\n+ import certifi\n+except ImportError:\n+ certifi = None\n+\n import jmespath\n import urllib3\n from six.moves.urllib import parse\n@@ -90,7 +94,7 @@\n def process(self, resources, event=None):\n self.http = urllib3.PoolManager(\n cert_reqs='CERT_REQUIRED',\n- ca_certs=certifi.where())\n+ ca_certs=certifi and certifi.where() or None)\n \n if self.batch:\n for chunk in utils.chunks(resources, self.batch_size):\n", "issue": "Certifi python module missing when deploying cloudtrail lambda functions\nThe following error happens when the lambda function is triggered\r\n\r\n`[ERROR] Runtime.ImportModuleError: Unable to import module 'custodian_policy': No module named 'certifi'`\r\n\r\nAs a workaround, I had to add the *certifi* package to the policy files.\r\n\r\n```\r\npolicies:\r\n - name: my-policy\r\n description: my description\r\n resource: my-resource\r\n mode:\r\n type: cloudtrail\r\n packages:\r\n - botocore\r\n - boto3\r\n - urllib3\r\n - certifi\r\n```\n", "before_files": [{"content": "# Copyright 2019 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport json\n\nimport certifi\nimport jmespath\nimport urllib3\nfrom six.moves.urllib import parse\n\nfrom c7n import utils\nfrom .core import EventAction\n\n\nclass Webhook(EventAction):\n \"\"\"Calls a webhook with optional parameters and body\n populated from JMESPath queries.\n\n .. code-block:: yaml\n\n policies:\n - name: call-webhook\n resource: ec2\n description: |\n Call webhook with list of resource groups\n actions:\n - type: webhook\n url: http://foo.com\n query-params:\n resource_name: resource.name\n policy_name: policy.name\n \"\"\"\n\n schema_alias = True\n schema = utils.type_schema(\n 'webhook',\n required=['url'],\n **{\n 'url': {'type': 'string'},\n 'body': {'type': 'string'},\n 'batch': {'type': 'boolean'},\n 'batch-size': {'type': 'number'},\n 'method': {'type': 'string', 'enum': ['PUT', 'POST', 'GET', 'PATCH', 'DELETE']},\n 'query-params': {\n \"type\": \"object\",\n \"additionalProperties\": {\n \"type\": \"string\",\n \"description\": \"query string values\"\n }\n },\n 'headers': {\n \"type\": \"object\",\n \"additionalProperties\": {\n \"type\": \"string\",\n \"description\": \"header values\"\n }\n }\n }\n )\n\n def __init__(self, data=None, manager=None, log_dir=None):\n super(Webhook, self).__init__(data, manager, log_dir)\n self.http = None\n self.url = self.data.get('url')\n self.body = self.data.get('body')\n self.batch = self.data.get('batch', False)\n self.batch_size = self.data.get('batch-size', 500)\n self.query_params = self.data.get('query-params', {})\n self.headers = self.data.get('headers', {})\n self.method = self.data.get('method', 'POST')\n self.lookup_data = {\n 'account_id': self.manager.config.account_id,\n 'region': self.manager.config.region,\n 'execution_id': self.manager.ctx.execution_id,\n 'execution_start': self.manager.ctx.start_time,\n 'policy': self.manager.data\n }\n\n def process(self, resources, event=None):\n self.http = urllib3.PoolManager(\n cert_reqs='CERT_REQUIRED',\n ca_certs=certifi.where())\n\n if self.batch:\n for chunk in utils.chunks(resources, self.batch_size):\n resource_data = self.lookup_data\n resource_data['resources'] = chunk\n self._process_call(resource_data)\n else:\n for r in resources:\n resource_data = self.lookup_data\n resource_data['resource'] = r\n self._process_call(resource_data)\n\n def _process_call(self, resource):\n prepared_url = self._build_url(resource)\n prepared_body = self._build_body(resource)\n prepared_headers = self._build_headers(resource)\n\n if prepared_body:\n prepared_headers['Content-Type'] = 'application/json'\n\n try:\n res = self.http.request(\n method=self.method,\n url=prepared_url,\n body=prepared_body,\n headers=prepared_headers)\n\n self.log.info(\"%s got response %s with URL %s\" %\n (self.method, res.status, prepared_url))\n except urllib3.exceptions.HTTPError as e:\n self.log.error(\"Error calling %s. Code: %s\" % (prepared_url, e.reason))\n\n def _build_headers(self, resource):\n return {k: jmespath.search(v, resource) for k, v in self.headers.items()}\n\n def _build_url(self, resource):\n \"\"\"\n Compose URL with query string parameters.\n\n Will not lose existing static parameters in the URL string\n but does not support 'duplicate' parameter entries\n \"\"\"\n\n if not self.query_params:\n return self.url\n\n evaluated_params = {k: jmespath.search(v, resource) for k, v in self.query_params.items()}\n\n url_parts = list(parse.urlparse(self.url))\n query = dict(parse.parse_qsl(url_parts[4]))\n query.update(evaluated_params)\n url_parts[4] = parse.urlencode(query)\n\n return parse.urlunparse(url_parts)\n\n def _build_body(self, resource):\n \"\"\"Create a JSON body and dump it to encoded bytes.\"\"\"\n\n if not self.body:\n return None\n\n return json.dumps(jmespath.search(self.body, resource)).encode('utf-8')\n", "path": "c7n/actions/webhook.py"}], "after_files": [{"content": "# Copyright 2019 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport json\n\ntry:\n import certifi\nexcept ImportError:\n certifi = None\n\nimport jmespath\nimport urllib3\nfrom six.moves.urllib import parse\n\nfrom c7n import utils\nfrom .core import EventAction\n\n\nclass Webhook(EventAction):\n \"\"\"Calls a webhook with optional parameters and body\n populated from JMESPath queries.\n\n .. code-block:: yaml\n\n policies:\n - name: call-webhook\n resource: ec2\n description: |\n Call webhook with list of resource groups\n actions:\n - type: webhook\n url: http://foo.com\n query-params:\n resource_name: resource.name\n policy_name: policy.name\n \"\"\"\n\n schema_alias = True\n schema = utils.type_schema(\n 'webhook',\n required=['url'],\n **{\n 'url': {'type': 'string'},\n 'body': {'type': 'string'},\n 'batch': {'type': 'boolean'},\n 'batch-size': {'type': 'number'},\n 'method': {'type': 'string', 'enum': ['PUT', 'POST', 'GET', 'PATCH', 'DELETE']},\n 'query-params': {\n \"type\": \"object\",\n \"additionalProperties\": {\n \"type\": \"string\",\n \"description\": \"query string values\"\n }\n },\n 'headers': {\n \"type\": \"object\",\n \"additionalProperties\": {\n \"type\": \"string\",\n \"description\": \"header values\"\n }\n }\n }\n )\n\n def __init__(self, data=None, manager=None, log_dir=None):\n super(Webhook, self).__init__(data, manager, log_dir)\n self.http = None\n self.url = self.data.get('url')\n self.body = self.data.get('body')\n self.batch = self.data.get('batch', False)\n self.batch_size = self.data.get('batch-size', 500)\n self.query_params = self.data.get('query-params', {})\n self.headers = self.data.get('headers', {})\n self.method = self.data.get('method', 'POST')\n self.lookup_data = {\n 'account_id': self.manager.config.account_id,\n 'region': self.manager.config.region,\n 'execution_id': self.manager.ctx.execution_id,\n 'execution_start': self.manager.ctx.start_time,\n 'policy': self.manager.data\n }\n\n def process(self, resources, event=None):\n self.http = urllib3.PoolManager(\n cert_reqs='CERT_REQUIRED',\n ca_certs=certifi and certifi.where() or None)\n\n if self.batch:\n for chunk in utils.chunks(resources, self.batch_size):\n resource_data = self.lookup_data\n resource_data['resources'] = chunk\n self._process_call(resource_data)\n else:\n for r in resources:\n resource_data = self.lookup_data\n resource_data['resource'] = r\n self._process_call(resource_data)\n\n def _process_call(self, resource):\n prepared_url = self._build_url(resource)\n prepared_body = self._build_body(resource)\n prepared_headers = self._build_headers(resource)\n\n if prepared_body:\n prepared_headers['Content-Type'] = 'application/json'\n\n try:\n res = self.http.request(\n method=self.method,\n url=prepared_url,\n body=prepared_body,\n headers=prepared_headers)\n\n self.log.info(\"%s got response %s with URL %s\" %\n (self.method, res.status, prepared_url))\n except urllib3.exceptions.HTTPError as e:\n self.log.error(\"Error calling %s. Code: %s\" % (prepared_url, e.reason))\n\n def _build_headers(self, resource):\n return {k: jmespath.search(v, resource) for k, v in self.headers.items()}\n\n def _build_url(self, resource):\n \"\"\"\n Compose URL with query string parameters.\n\n Will not lose existing static parameters in the URL string\n but does not support 'duplicate' parameter entries\n \"\"\"\n\n if not self.query_params:\n return self.url\n\n evaluated_params = {k: jmespath.search(v, resource) for k, v in self.query_params.items()}\n\n url_parts = list(parse.urlparse(self.url))\n query = dict(parse.parse_qsl(url_parts[4]))\n query.update(evaluated_params)\n url_parts[4] = parse.urlencode(query)\n\n return parse.urlunparse(url_parts)\n\n def _build_body(self, resource):\n \"\"\"Create a JSON body and dump it to encoded bytes.\"\"\"\n\n if not self.body:\n return None\n\n return json.dumps(jmespath.search(self.body, resource)).encode('utf-8')\n", "path": "c7n/actions/webhook.py"}]} | 1,879 | 183 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.