repo
stringclasses 856
values | pull_number
int64 3
127k
| instance_id
stringlengths 12
58
| issue_numbers
sequencelengths 1
5
| base_commit
stringlengths 40
40
| patch
stringlengths 67
1.54M
| test_patch
stringlengths 0
107M
| problem_statement
stringlengths 3
307k
| hints_text
stringlengths 0
908k
| created_at
timestamp[s] |
---|---|---|---|---|---|---|---|---|---|
sanic-org/sanic | 2,416 | sanic-org__sanic-2416 | [
"2325"
] | 49789b784183873a9deca550ac82ebb461a43967 | diff --git a/sanic/server/protocols/http_protocol.py b/sanic/server/protocols/http_protocol.py
--- a/sanic/server/protocols/http_protocol.py
+++ b/sanic/server/protocols/http_protocol.py
@@ -8,6 +8,8 @@
if TYPE_CHECKING: # no cov
from sanic.app import Sanic
+import sys
+
from asyncio import CancelledError
from time import monotonic as current_time
@@ -169,7 +171,10 @@ def check_timeouts(self):
)
self.loop.call_later(max(0.1, interval), self.check_timeouts)
return
- self._task.cancel()
+ cancel_msg_args = ()
+ if sys.version_info >= (3, 9):
+ cancel_msg_args = ("Cancel connection task with a timeout",)
+ self._task.cancel(*cancel_msg_args)
except Exception:
error_logger.exception("protocol.check_timeouts")
| Cancel tasks with a message where appropriate
[cancel()](https://docs.python.org/3/library/asyncio-task.html#asyncio.Task.cancel) also accepts a "msg" argument, might it be a good idea for this "message" to be added to places like https://github.com/sanic-org/sanic/blob/f7abf3db1bd4e79cd5121327359fc9021fab7ff3/sanic/server/protocols/http_protocol.py#L172 that are otherwise calling cancel() with no explanatory message? if this is the CancelledError this user is getting, a simple message there would save everyone a lot of time.
_Originally posted by @zzzeek in https://github.com/sanic-org/sanic/issues/2296#issuecomment-983881945_
---
Where we are able to in Py3.9, we should add a message to `cancel()`.
| This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. If this is incorrect, please respond with an update. Thank you for your contributions.
...
Can i have this?
Sold. ๐ | 2022-03-25T05:15:32 |
|
sanic-org/sanic | 2,438 | sanic-org__sanic-2438 | [
"2437"
] | cc97287f8eea5bf8d5c3af42586f1e802035439b | diff --git a/sanic/__init__.py b/sanic/__init__.py
--- a/sanic/__init__.py
+++ b/sanic/__init__.py
@@ -4,6 +4,7 @@
from sanic.constants import HTTPMethod
from sanic.request import Request
from sanic.response import HTTPResponse, html, json, text
+from sanic.server.websockets.impl import WebsocketImplProtocol as Websocket
__all__ = (
@@ -13,6 +14,7 @@
"HTTPMethod",
"HTTPResponse",
"Request",
+ "Websocket",
"html",
"json",
"text",
| Easier websocket interface annotation
Right now, to properly annotate a websocket endpoint you need to do this:
```python
from sanic.server.websockets.impl import WebsocketImplProtocol
from sanic import Request
@app.websocket("")
async def handler(request: Request, ws: WebsocketImplProtocol):
...
```
That is not easy or intuitive.
This would be much nicer:
```python
from sanic import Request, Websocket
@app.websocket("")
async def handler(request: Request, ws: Websocket):
...
```
We should just alias and put it inside `__init__.py` for convenience.
| 2022-04-24T08:58:05 |
||
sanic-org/sanic | 2,439 | sanic-org__sanic-2439 | [
"2427"
] | 3a6cc7389c5166a0c9580daf5d709a0dee85a996 | diff --git a/sanic/request.py b/sanic/request.py
--- a/sanic/request.py
+++ b/sanic/request.py
@@ -152,8 +152,8 @@ def __init__(
self.parsed_accept: Optional[AcceptContainer] = None
self.parsed_credentials: Optional[Credentials] = None
self.parsed_json = None
- self.parsed_form = None
- self.parsed_files = None
+ self.parsed_form: Optional[RequestParameters] = None
+ self.parsed_files: Optional[RequestParameters] = None
self.parsed_token: Optional[str] = None
self.parsed_args: DefaultDict[
Tuple[bool, bool, str, str], RequestParameters
@@ -426,28 +426,40 @@ def credentials(self) -> Optional[Credentials]:
pass
return self.parsed_credentials
+ def get_form(
+ self, keep_blank_values: bool = False
+ ) -> Optional[RequestParameters]:
+ self.parsed_form = RequestParameters()
+ self.parsed_files = RequestParameters()
+ content_type = self.headers.getone(
+ "content-type", DEFAULT_HTTP_CONTENT_TYPE
+ )
+ content_type, parameters = parse_content_header(content_type)
+ try:
+ if content_type == "application/x-www-form-urlencoded":
+ self.parsed_form = RequestParameters(
+ parse_qs(
+ self.body.decode("utf-8"),
+ keep_blank_values=keep_blank_values,
+ )
+ )
+ elif content_type == "multipart/form-data":
+ # TODO: Stream this instead of reading to/from memory
+ boundary = parameters["boundary"].encode( # type: ignore
+ "utf-8"
+ ) # type: ignore
+ self.parsed_form, self.parsed_files = parse_multipart_form(
+ self.body, boundary
+ )
+ except Exception:
+ error_logger.exception("Failed when parsing form")
+
+ return self.parsed_form
+
@property
def form(self):
if self.parsed_form is None:
- self.parsed_form = RequestParameters()
- self.parsed_files = RequestParameters()
- content_type = self.headers.getone(
- "content-type", DEFAULT_HTTP_CONTENT_TYPE
- )
- content_type, parameters = parse_content_header(content_type)
- try:
- if content_type == "application/x-www-form-urlencoded":
- self.parsed_form = RequestParameters(
- parse_qs(self.body.decode("utf-8"))
- )
- elif content_type == "multipart/form-data":
- # TODO: Stream this instead of reading to/from memory
- boundary = parameters["boundary"].encode("utf-8")
- self.parsed_form, self.parsed_files = parse_multipart_form(
- self.body, boundary
- )
- except Exception:
- error_logger.exception("Failed when parsing form")
+ self.get_form()
return self.parsed_form
| diff --git a/tests/test_requests.py b/tests/test_requests.py
--- a/tests/test_requests.py
+++ b/tests/test_requests.py
@@ -1016,6 +1016,72 @@ async def handler(request):
assert request.form.get("test") == "OK" # For request.parsed_form
+def test_post_form_urlencoded_keep_blanks(app):
+ @app.route("/", methods=["POST"])
+ async def handler(request):
+ request.get_form(keep_blank_values=True)
+ return text("OK")
+
+ payload = "test="
+ headers = {"content-type": "application/x-www-form-urlencoded"}
+
+ request, response = app.test_client.post(
+ "/", data=payload, headers=headers
+ )
+
+ assert request.form.get("test") == ""
+ assert request.form.get("test") == "" # For request.parsed_form
+
+
[email protected]
+async def test_post_form_urlencoded_keep_blanks_asgi(app):
+ @app.route("/", methods=["POST"])
+ async def handler(request):
+ request.get_form(keep_blank_values=True)
+ return text("OK")
+
+ payload = "test="
+ headers = {"content-type": "application/x-www-form-urlencoded"}
+
+ request, response = await app.asgi_client.post(
+ "/", data=payload, headers=headers
+ )
+
+ assert request.form.get("test") == ""
+ assert request.form.get("test") == "" # For request.parsed_form
+
+
+
+def test_post_form_urlencoded_drop_blanks(app):
+ @app.route("/", methods=["POST"])
+ async def handler(request):
+ return text("OK")
+
+ payload = "test="
+ headers = {"content-type": "application/x-www-form-urlencoded"}
+
+ request, response = app.test_client.post(
+ "/", data=payload, headers=headers
+ )
+
+ assert "test" not in request.form.keys()
+
[email protected]
+async def test_post_form_urlencoded_drop_blanks_asgi(app):
+ @app.route("/", methods=["POST"])
+ async def handler(request):
+ return text("OK")
+
+ payload = "test="
+ headers = {"content-type": "application/x-www-form-urlencoded"}
+
+ request, response = await app.asgi_client.post(
+ "/", data=payload, headers=headers
+ )
+
+ assert "test" not in request.form.keys()
+
+
@pytest.mark.parametrize(
"payload",
[
| request.form uses urllib.parse_qs which does not persist blank values by default when requests submit application/x-www-form-urlencoded data
**Describe the bug**
This seems to be an acknowledged limitation of parse_qs, but I'm raising as a bug as this introduces undesired behaviour rather than adding new features.
While `request.get_args` and and `request.get_query_args` accept the `keep_blank_values` argument in order to persist blanks, `request.form` does not provide an interface to persist blank values.
My assertion is that a framework should not remove data simply because it doesn't have a value.
Relevant anecdote: Twilio webhooks submit a POST request using `application/x-www-form-urlencoded` encoded body. The body includes all values including some potentially blank/empty values.
When the request is processed by Sanic, it is decoded using `urllib.parse_qs` and does not have a mechanism to allow blank elements to persist. This results in missing elements from the request when accessing the variables from `request.form`.
In my case, the elements from the submission are relevant to generate a signature for request validation (see https://www.twilio.com/docs/usage/security). The signature is constructed using SHA1 hash of the submitted request key/value pairs and missing pairs generates a different signature to that expected.
In this instance, I have been receiving requests which include two elements that, depending on the hook, may or may not have a value.
**Expected behavior**
All variables, irrespective of existence of a value should be passed to the application.
**Environment (please complete the following information):**
- Version v22.3.0
| @pobk Thanks for the heads up. I agree that we should not be dropping blank values as the parameter in and of itself is a value that may be necessary.
Just popping here to say that changing its behaviour now is probably a breaking change.
A different approach to avoid sudden breaking changes could be adding a method like `get_query_args` but for `get_form_args`, to which users can pass arguments for the `parse_qs` method.
If we want to change the behaviour of the `.form` attribute, we could also add a warning when accessing the `.form` attribute warning users that from a certain version we will no longer omit blank values by default, although in that case we would probably want to do the same with any other attributes (e.g.: query strings) for consistence.
This can be done as a non-breaking change where the behavior defaults to what it is currently, which should be fine. Already working on it, just need some tests before I submit a PR. | 2022-04-24T13:26:38 |
sanic-org/sanic | 2,452 | sanic-org__sanic-2452 | [
"2450"
] | 5d683c6ea4b615e80c51d80189436437b824cce6 | diff --git a/sanic/handlers.py b/sanic/handlers.py
--- a/sanic/handlers.py
+++ b/sanic/handlers.py
@@ -78,7 +78,7 @@ def _warn_fallback_deprecation():
@classmethod
def _get_fallback_value(cls, error_handler: ErrorHandler, config: Config):
if error_handler._fallback is not _default:
- if config._FALLBACK_ERROR_FORMAT is _default:
+ if config._FALLBACK_ERROR_FORMAT == error_handler._fallback:
return error_handler.fallback
error_logger.warning(
| Error Handler mismatch warning
The warning for error handler mismatch is triggering on v22.3 accidentally when setting `FALLBACK_ERROR_FORMAT`.
```python
app.config.FALLBACK_ERROR_FORMAT = "text"
@app.get("/")
async def handler(request: Request):
1 / 0
```
This can be resolved as follows:
```python
@classmethod
def _get_fallback_value(cls, error_handler: ErrorHandler, config: Config):
if error_handler._fallback is not _default:
if config._FALLBACK_ERROR_FORMAT == error_handler._fallback: # <<<<< This line needs this change
return error_handler.fallback
error_logger.warning(
"Conflicting error fallback values were found in the "
"error handler and in the app.config while handling an "
"exception. Using the value from app.config."
)
return config.FALLBACK_ERROR_FORMAT
```
https://github.com/sanic-org/sanic/blob/5d683c6ea4b615e80c51d80189436437b824cce6/sanic/handlers.py#L79
| 2022-05-10T19:32:28 |
||
sanic-org/sanic | 2,463 | sanic-org__sanic-2463 | [
"2462"
] | 86ae5f981cbe64fd85bee34fee3989b009e90dd2 | diff --git a/sanic/app.py b/sanic/app.py
--- a/sanic/app.py
+++ b/sanic/app.py
@@ -992,10 +992,10 @@ async def _websocket_handler(
cancelled = False
try:
await fut
- except Exception as e:
- self.error_handler.log(request, e)
except (CancelledError, ConnectionClosed):
cancelled = True
+ except Exception as e:
+ self.error_handler.log(request, e)
finally:
self.websocket_tasks.remove(fut)
if cancelled:
| Websocket disconnection and server task pending error
**Describe the bug**
I am a complete newbie trying to use sanic with websocket. I was following the tutorial of websockets in sanic document, and by the time I disconnected the websocket connection I found this in the console.
```
[2022-05-23 19:43:15 +0800] [14879] [ERROR] Exception occurred while handling uri: 'ws://10.215.220.4:4017/test/'
Traceback (most recent call last):
File "/home/yuzixin/workspace/sanicserver/venv/lib/python3.7/site-packages/sanic/app.py", line 994, in _websocket_handler
await fut
concurrent.futures._base.CancelledError
```
followed by this error message when the server was shut down.
```
Task was destroyed but it is pending!
task: <Task pending coro=<WebsocketFrameAssembler.get() done, defined at /home/yuzixin/workspace/sanicserver/venv/lib/python3.7/site-packages/sanic/server/websockets/frame.py:91> wait_for=<Future pending cb=[<TaskWakeupMethWrapper object at 0x7ff6e9bafa98>()]> cb=[_wait.<locals>._on_completion() at /usr/lib/python3.7/asyncio/tasks.py:440]>
```
So I checked the lines where error was triggered and found this at sanic/app.py line 993~999:
```python
try:
await fut
except Exception as e:
self.error_handler.log(request, e)
except (CancelledError, ConnectionClosed):
cancelled = True
finally:
...
```
It seemed like the CancelledError exception would never be reached.
I tried switching the two exceptions and it all worked fine. Wondering if the modification is correct, or the order of the exceptions were just meant to be.
**Code snippet**
```python
from sanic import Sanic
app = Sanic("MyHelloWorldApp")
@app.websocket("/test")
async def handler(request, ws):
while True:
message = await ws.recv()
await ws.send(message)
```
**Expected behavior**
Expected no errors at disconnect behavior.
**Environment (please complete the following information):**
- OS: Debian
- Version 10
| Hi @jrayu
~What version of Python are you using?~
I see you are using Python v3.7.
In Python v3.8 and newer, `CancelledError` is a subclass of `BaseException`, not `Exception` so it should not be caught by the first Exception handler. I think all of our testing on the new websockets implementation was done in Python 3.8.
Having said that, I know Sanic should still be Python 3.7 compatible, so this is still a bug, and those two lines could certainly be swapped around without hurting anything.
> Hi @jrayu What version of Python are you using?
>
> In python v3.8 and newer, `CancelledError` is a subclass of `BaseException`, not `Exception` so it should not be caught by the first Exception handler.
>
> Having said that, I know Sanic should still be Python 3.7 compatible, so this is still a bug, and those two lines could certainly be swapped around without hurting anything.
Thanks for the reply. I am using python3.7 as you have guessed. I will try to upgrade it and see if it works.
| 2022-05-23T13:06:53 |
|
sanic-org/sanic | 2,495 | sanic-org__sanic-2495 | [
"2478"
] | 312ab298fd8d4b17757a9212c52262d115b48d9f | diff --git a/sanic/mixins/routes.py b/sanic/mixins/routes.py
--- a/sanic/mixins/routes.py
+++ b/sanic/mixins/routes.py
@@ -3,9 +3,8 @@
from functools import partial, wraps
from inspect import getsource, signature
from mimetypes import guess_type
-from os import path
+from os import path, sep
from pathlib import PurePath
-from re import sub
from textwrap import dedent
from time import gmtime, strftime
from typing import (
@@ -806,23 +805,23 @@ async def _static_request_handler(
content_type=None,
__file_uri__=None,
):
- # Using this to determine if the URL is trying to break out of the path
- # served. os.path.realpath seems to be very slow
- if __file_uri__ and "../" in __file_uri__:
- raise BadRequest("Invalid URL")
# Merge served directory and requested file if provided
- # Strip all / that in the beginning of the URL to help prevent python
- # from herping a derp and treating the uri as an absolute path
- root_path = file_path = file_or_directory
+ root_path = file_path = path.abspath(unquote(file_or_directory))
+
if __file_uri__:
- file_path = path.join(
- file_or_directory, sub("^[/]*", "", __file_uri__)
- )
+ # Strip all / that in the beginning of the URL to help prevent
+ # python from herping a derp and treating the uri as an
+ # absolute path
+ unquoted_file_uri = unquote(__file_uri__).lstrip("/")
+
+ segments = unquoted_file_uri.split("/")
+ if ".." in segments or any(sep in segment for segment in segments):
+ raise BadRequest("Invalid URL")
+
+ file_path = path.join(file_or_directory, unquoted_file_uri)
+ file_path = path.abspath(file_path)
- # URL decode the path sent by the browser otherwise we won't be able to
- # match filenames which got encoded (filenames with spaces etc)
- file_path = path.abspath(unquote(file_path))
- if not file_path.startswith(path.abspath(unquote(root_path))):
+ if not file_path.startswith(root_path):
error_logger.exception(
f"File not found: path={file_or_directory}, "
f"relative_url={__file_uri__}"
| diff --git a/tests/test_static.py b/tests/test_static.py
--- a/tests/test_static.py
+++ b/tests/test_static.py
@@ -1,6 +1,7 @@
import inspect
import logging
import os
+import sys
from collections import Counter
from pathlib import Path
@@ -8,7 +9,7 @@
import pytest
-from sanic import text
+from sanic import Sanic, text
from sanic.exceptions import FileNotFound
@@ -21,6 +22,22 @@ def static_file_directory():
return static_directory
[email protected](scope="module")
+def double_dotted_directory_file(static_file_directory: str):
+ """Generate double dotted directory and its files"""
+ if sys.platform == "win32":
+ raise Exception("Windows doesn't support double dotted directories")
+
+ file_path = Path(static_file_directory) / "dotted.." / "dot.txt"
+ double_dotted_dir = file_path.parent
+ Path.mkdir(double_dotted_dir, exist_ok=True)
+ with open(file_path, "w") as f:
+ f.write("DOT\n")
+ yield file_path
+ Path.unlink(file_path)
+ Path.rmdir(double_dotted_dir)
+
+
def get_file_path(static_file_directory, file_name):
return os.path.join(static_file_directory, file_name)
@@ -578,3 +595,40 @@ def test_resource_type_dir(app, static_file_directory):
def test_resource_type_unknown(app, static_file_directory, caplog):
with pytest.raises(ValueError):
app.static("/static", static_file_directory, resource_type="unknown")
+
+
[email protected](
+ sys.platform == "win32",
+ reason="Windows does not support double dotted directories",
+)
+def test_dotted_dir_ok(
+ app: Sanic, static_file_directory: str, double_dotted_directory_file: Path
+):
+ app.static("/foo", static_file_directory)
+ double_dotted_directory_file = str(double_dotted_directory_file).lstrip(
+ static_file_directory
+ )
+ _, response = app.test_client.get("/foo/" + double_dotted_directory_file)
+ assert response.status == 200
+ assert response.body == b"DOT\n"
+
+
+def test_breakout(app: Sanic, static_file_directory: str):
+ app.static("/foo", static_file_directory)
+
+ _, response = app.test_client.get("/foo/..%2Fstatic/test.file")
+ assert response.status == 400
+
+
[email protected](
+ sys.platform != "win32", reason="Block backslash on Windows only"
+)
+def test_double_backslash_prohibited_on_win32(
+ app: Sanic, static_file_directory: str
+):
+ app.static("/foo", static_file_directory)
+
+ _, response = app.test_client.get("/foo/static/..\\static/test.file")
+ assert response.status == 400
+ _, response = app.test_client.get("/foo/static\\../static/test.file")
+ assert response.status == 400
| Sanic static handler allows parent ".." directory traversal
**Describe the bug**
The sanic static directory code checks for `../` as a substring of paths, but it also unquotes the path, which allows a malicious user to escape outside the static folder by using `..%2F`, where `%2F` is the URL-escaped version of `/`.
**Code snippet**
First, a basic server called `main.py`.
```python
from sanic import Sanic
app = Sanic(name="sanic_test")
app.static('/static', './static_files')
if __name__ == "__main__":
app.run(host="0.0.0.0", port=8000)
```
Then create a static file folder.
```
mkdir static_files
cat "hello world" > static_files/a.txt
```
Now run the server with `python3 main.py` and:
```sh-session
$ curl http://localhost:8000/static/a.txt
hello world
$ curl http://localhost:8000/static/..%2Fstatic_files/a.txt
hello world
```
This is very surprising behavior. From a security perspective it is not critical because sanic checks that the final resolved path has a prefix with the static directory, but this allows an attacker to expose information like the name of the static file folder.
Another case where this is dangerous is if you have a middleware that only allows a user to see certain subpaths like `/static/public/**` of the `/static/**` routes without authentication. Then, even without authentication, a user could visit a path like `/static/public/..%2F/private/secret_content.txt` and retrieve the contents of `/static/private/secret_content.txt`.
**Expected behavior**
Sanic should not allow parent directory traversal in static folders.
**Environment (please complete the following information):**
- OS: macOS and Linux
- Version 22.3.2
| Related to #2477.
I suppose this can also be exploited by using backslashes, which on Windows would be interpreted as path separators. I suggest denying any escaped slashes and plain backslashes. Processing of `..` (and `.`) ought to be preserved in a logical form, possibly yielding a redirect response, but never traversing parents on the actual filesystem. Somewhat related, all path elements that begin with a dot could be banned (avoiding access to hidden files, which also could be leaking data not intended public).
Python `pathlib` module would be a reasonable option for path processing, avoiding Sanic's own parsing, but concerns include performance and the said special cases with slashes being escaped. Also, whether URL sanitation (avoiding `.` `..` `//` and other oddities) should be applied on all requests and not only static files.
Just my 5 cents. I didn't look at the current code, and in any case this probably needs more thought for a solid implementation.
Most browsers and HTTP clients (like cURL) will automatically apply [path normalization](https://en.wikipedia.org/wiki/URI_normalization#Normalizations_that_preserve_semantics) to URLs, which includes removing dot segments. It's not really necessary to process `..` as a result.
~~Funnily enough it looks like we already do some kind of check, but we do so before unquoting it :S~~
EDIT: I misunderstood the issue, my bad! | 2022-07-10T14:02:28 |
sanic-org/sanic | 2,515 | sanic-org__sanic-2515 | [
"2514"
] | e4999401ab3bbb00d7fda7067e1dbed8533d7eb5 | diff --git a/sanic/blueprints.py b/sanic/blueprints.py
--- a/sanic/blueprints.py
+++ b/sanic/blueprints.py
@@ -308,7 +308,7 @@ def register(self, app, options):
# prefixed properly in the router
future.handler.__blueprintname__ = self.name
# Prepend the blueprint URI prefix if available
- uri = url_prefix + future.uri if url_prefix else future.uri
+ uri = self._setup_uri(future.uri, url_prefix)
version_prefix = self.version_prefix
for prefix in (
@@ -333,7 +333,7 @@ def register(self, app, options):
apply_route = FutureRoute(
future.handler,
- uri[1:] if uri.startswith("//") else uri,
+ uri,
future.methods,
host,
strict_slashes,
@@ -363,7 +363,7 @@ def register(self, app, options):
# Static Files
for future in self._future_statics:
# Prepend the blueprint URI prefix if available
- uri = url_prefix + future.uri if url_prefix else future.uri
+ uri = self._setup_uri(future.uri, url_prefix)
apply_route = FutureStatic(uri, *future[1:])
if (self, apply_route) in app._future_registry:
@@ -456,6 +456,18 @@ def _extract_value(*values):
break
return value
+ @staticmethod
+ def _setup_uri(base: str, prefix: Optional[str]):
+ uri = base
+ if prefix:
+ uri = prefix
+ if base.startswith("/") and prefix.endswith("/"):
+ uri += base[1:]
+ else:
+ uri += base
+
+ return uri[1:] if uri.startswith("//") else uri
+
@staticmethod
def register_futures(
apps: Set[Sanic], bp: Blueprint, futures: Sequence[Tuple[Any, ...]]
diff --git a/sanic/mixins/routes.py b/sanic/mixins/routes.py
--- a/sanic/mixins/routes.py
+++ b/sanic/mixins/routes.py
@@ -958,6 +958,7 @@ def _register_static(
# serve from the folder
if not static.resource_type:
if not path.isfile(file_or_directory):
+ uri = uri.rstrip("/")
uri += "/<__file_uri__:path>"
elif static.resource_type == "dir":
if path.isfile(file_or_directory):
@@ -965,6 +966,7 @@ def _register_static(
"Resource type improperly identified as directory. "
f"'{file_or_directory}'"
)
+ uri = uri.rstrip("/")
uri += "/<__file_uri__:path>"
elif static.resource_type == "file" and not path.isfile(
file_or_directory
| diff --git a/tests/test_blueprints.py b/tests/test_blueprints.py
--- a/tests/test_blueprints.py
+++ b/tests/test_blueprints.py
@@ -17,7 +17,7 @@
# ------------------------------------------------------------ #
-def test_bp(app):
+def test_bp(app: Sanic):
bp = Blueprint("test_text")
@bp.route("/")
@@ -30,7 +30,7 @@ def handler(request):
assert response.text == "Hello"
-def test_bp_app_access(app):
+def test_bp_app_access(app: Sanic):
bp = Blueprint("test")
with pytest.raises(
@@ -87,7 +87,7 @@ def handler(request):
assert response.status == 200
-def test_bp_strict_slash(app):
+def test_bp_strict_slash(app: Sanic):
bp = Blueprint("test_text")
@bp.get("/get", strict_slashes=True)
@@ -114,7 +114,7 @@ def post_handler(request):
assert response.status == 404
-def test_bp_strict_slash_default_value(app):
+def test_bp_strict_slash_default_value(app: Sanic):
bp = Blueprint("test_text", strict_slashes=True)
@bp.get("/get")
@@ -134,7 +134,7 @@ def post_handler(request):
assert response.status == 404
-def test_bp_strict_slash_without_passing_default_value(app):
+def test_bp_strict_slash_without_passing_default_value(app: Sanic):
bp = Blueprint("test_text")
@bp.get("/get")
@@ -154,7 +154,7 @@ def post_handler(request):
assert response.text == "OK"
-def test_bp_strict_slash_default_value_can_be_overwritten(app):
+def test_bp_strict_slash_default_value_can_be_overwritten(app: Sanic):
bp = Blueprint("test_text", strict_slashes=True)
@bp.get("/get", strict_slashes=False)
@@ -174,7 +174,7 @@ def post_handler(request):
assert response.text == "OK"
-def test_bp_with_url_prefix(app):
+def test_bp_with_url_prefix(app: Sanic):
bp = Blueprint("test_text", url_prefix="/test1")
@bp.route("/")
@@ -187,7 +187,7 @@ def handler(request):
assert response.text == "Hello"
-def test_several_bp_with_url_prefix(app):
+def test_several_bp_with_url_prefix(app: Sanic):
bp = Blueprint("test_text", url_prefix="/test1")
bp2 = Blueprint("test_text2", url_prefix="/test2")
@@ -208,7 +208,7 @@ def handler2(request):
assert response.text == "Hello2"
-def test_bp_with_host(app):
+def test_bp_with_host(app: Sanic):
bp = Blueprint("test_bp_host", url_prefix="/test1", host="example.com")
@bp.route("/")
@@ -230,7 +230,7 @@ def handler2(request):
assert response.body == b"Hello subdomain!"
-def test_several_bp_with_host(app):
+def test_several_bp_with_host(app: Sanic):
bp = Blueprint(
"test_text",
url_prefix="/test",
@@ -274,7 +274,7 @@ def handler2(request):
assert response.text == "Hello3"
-def test_bp_with_host_list(app):
+def test_bp_with_host_list(app: Sanic):
bp = Blueprint(
"test_bp_host",
url_prefix="/test1",
@@ -304,7 +304,7 @@ def handler2(request):
assert response.text == "Hello subdomain!"
-def test_several_bp_with_host_list(app):
+def test_several_bp_with_host_list(app: Sanic):
bp = Blueprint(
"test_text",
url_prefix="/test",
@@ -356,7 +356,7 @@ def handler2(request):
assert response.text == "Hello3"
-def test_bp_middleware(app):
+def test_bp_middleware(app: Sanic):
blueprint = Blueprint("test_bp_middleware")
@blueprint.middleware("response")
@@ -375,7 +375,7 @@ async def handler(request):
assert response.text == "FAIL"
-def test_bp_middleware_with_route(app):
+def test_bp_middleware_with_route(app: Sanic):
blueprint = Blueprint("test_bp_middleware")
@blueprint.middleware("response")
@@ -398,7 +398,7 @@ async def bp_handler(request):
assert response.text == "OK"
-def test_bp_middleware_order(app):
+def test_bp_middleware_order(app: Sanic):
blueprint = Blueprint("test_bp_middleware_order")
order = []
@@ -438,7 +438,7 @@ def process_response(request):
assert order == [1, 2, 3, 4, 5, 6]
-def test_bp_exception_handler(app):
+def test_bp_exception_handler(app: Sanic):
blueprint = Blueprint("test_middleware")
@blueprint.route("/1")
@@ -470,7 +470,7 @@ def handler_exception(request, exception):
assert response.status == 200
-def test_bp_exception_handler_applied(app):
+def test_bp_exception_handler_applied(app: Sanic):
class Error(Exception):
pass
@@ -500,7 +500,7 @@ def notok(request):
assert response.status == 500
-def test_bp_exception_handler_not_applied(app):
+def test_bp_exception_handler_not_applied(app: Sanic):
class Error(Exception):
pass
@@ -522,7 +522,7 @@ def notok(request):
assert response.status == 500
-def test_bp_listeners(app):
+def test_bp_listeners(app: Sanic):
app.route("/")(lambda x: x)
blueprint = Blueprint("test_middleware")
@@ -559,7 +559,7 @@ def handler_6(sanic, loop):
assert order == [1, 2, 3, 4, 5, 6]
-def test_bp_static(app):
+def test_bp_static(app: Sanic):
current_file = inspect.getfile(inspect.currentframe())
with open(current_file, "rb") as file:
current_file_contents = file.read()
@@ -597,7 +597,7 @@ def test_bp_static_content_type(app, file_name):
assert response.headers["Content-Type"] == "text/html; charset=utf-8"
-def test_bp_shorthand(app):
+def test_bp_shorthand(app: Sanic):
blueprint = Blueprint("test_shorhand_routes")
ev = asyncio.Event()
@@ -682,7 +682,7 @@ async def websocket_handler(request, ws):
assert ev.is_set()
-def test_bp_group(app):
+def test_bp_group(app: Sanic):
deep_0 = Blueprint("deep_0", url_prefix="/deep")
deep_1 = Blueprint("deep_1", url_prefix="/deep1")
@@ -722,7 +722,7 @@ def handler2(request):
assert response.text == "D1B_OK"
-def test_bp_group_with_default_url_prefix(app):
+def test_bp_group_with_default_url_prefix(app: Sanic):
from sanic.response import json
bp_resources = Blueprint("bp_resources")
@@ -873,7 +873,7 @@ async def websocket_handler(request, ws):
assert event.is_set()
-def test_duplicate_blueprint(app):
+def test_duplicate_blueprint(app: Sanic):
bp_name = "bp"
bp = Blueprint(bp_name)
bp1 = Blueprint(bp_name)
@@ -1056,7 +1056,7 @@ def test_bp_set_attribute_warning():
bp.foo = 1
-def test_early_registration(app):
+def test_early_registration(app: Sanic):
assert len(app.router.routes) == 0
bp = Blueprint("bp")
@@ -1082,3 +1082,29 @@ async def three(_):
for path in ("one", "two", "three"):
_, response = app.test_client.get(f"/{path}")
assert response.text == path
+
+
+def test_remove_double_slashes_defined_on_bp(app: Sanic):
+ bp = Blueprint("bp", url_prefix="/foo/", strict_slashes=True)
+
+ @bp.get("/")
+ async def handler(_):
+ ...
+
+ app.blueprint(bp)
+ app.router.finalize()
+
+ assert app.router.routes[0].path == "foo/"
+
+
+def test_remove_double_slashes_defined_on_register(app: Sanic):
+ bp = Blueprint("bp")
+
+ @bp.get("/")
+ async def index(_):
+ ...
+
+ app.blueprint(bp, url_prefix="/foo/", strict_slashes=True)
+ app.router.finalize()
+
+ assert app.router.routes[0].path == "foo/"
| Inconsistent slashes in Blueprint
**Describe the bug**
There is an inconsistency with how slashes are handled in blueprints when `strict_slashes` applied in different manner.
**Code snippet**
```python
@app.before_server_start
async def display(_):
for route in app.router.routes:
print(route)
```
The following yields: `<Route: name=__main__.bp.index path=foo/>`
This is correct :heavy_check_mark:
```python
bp = Blueprint("bp", url_prefix="/foo/", strict_slashes=True)
@bp.get("/")
async def handler(_):
...
```
And this yields: `<Route: name=__main__.bp.index path=foo//>`
This is incorrect :x:
```python
@bp.get("/")
async def index(_):
...
app.blueprint(bp, url_prefix="/foo/", strict_slashes=True)
```
**Expected behavior**
Applying prefix with `static_slashes` in `app.blueprint` is the same as in `Blueprint`
| 2022-07-31T20:39:56 |
|
sanic-org/sanic | 2,522 | sanic-org__sanic-2522 | [
"2520"
] | 7827b1b41d36dcf297c8bf8a3e8b74d5368339a2 | diff --git a/sanic/app.py b/sanic/app.py
--- a/sanic/app.py
+++ b/sanic/app.py
@@ -1315,7 +1315,7 @@ def update_config(self, config: Union[bytes, str, dict, Any]):
self.config.update_config(config)
@property
- def asgi(self):
+ def asgi(self) -> bool:
return self.state.asgi
@asgi.setter
diff --git a/sanic/mixins/runner.py b/sanic/mixins/runner.py
--- a/sanic/mixins/runner.py
+++ b/sanic/mixins/runner.py
@@ -525,7 +525,7 @@ def motd(
)
)
else:
- server = ""
+ server = "ASGI" if self.asgi else "unknown" # type: ignore
display = {
"mode": " ".join(mode),
@@ -571,8 +571,12 @@ def motd(
@property
def serve_location(self) -> str:
- server_settings = self.state.server_info[0].settings
- return self.get_server_location(server_settings)
+ try:
+ server_settings = self.state.server_info[0].settings
+ return self.get_server_location(server_settings)
+ except IndexError:
+ location = "ASGI" if self.asgi else "unknown" # type: ignore
+ return f"http://<{location}>"
@staticmethod
def get_server_location(
| diff --git a/tests/test_asgi.py b/tests/test_asgi.py
--- a/tests/test_asgi.py
+++ b/tests/test_asgi.py
@@ -546,3 +546,13 @@ def signal_handler(signal):
assert response.status_code == 200
assert response.text == "test_signals_triggered"
assert signals_triggered == signals_expected
+
+
[email protected]
+async def test_asgi_serve_location(app):
+ @app.get("/")
+ def _request(request: Request):
+ return text(request.app.serve_location)
+
+ _, response = await app.asgi_client.get("/")
+ assert response.text == "http://<ASGI>"
diff --git a/tests/test_http.py b/tests/test_http.py
--- a/tests/test_http.py
+++ b/tests/test_http.py
@@ -76,7 +76,7 @@ def test_full_message(client):
)
response = client.recv()
- # AltSvcCheck touchup removes the Alt-Svc header from the
+ # AltSvcCheck touchup removes the Alt-Svc header from the
# response in the Python 3.9+ in this case
assert len(response) == (151 if version_info < (3, 9) else 140)
assert b"200 OK" in response
| uvicorn (ASGI?) unable to run sanic application
**Describe the bug**
Sanic used to work behind uvicorn (ASGI). Now I can't get it to work
**Code snippet**
```python
# test_app.py
from sanic import Sanic, response
app = Sanic(name="test")
@app.get("/")
async def index(request):
return response.text("ok")
```
```console
$ uvicorn test_app:app
```
uvicorn+sanic then launches with:
```
INFO: ASGI 'lifespan' protocol appears unsupported.
INFO: Application startup complete.
INFO: Uvicorn running on http://127.0.0.1:8000 (Press CTRL+C to quit)
```
but on first request there's a 500 like:
```Traceback (most recent call last):
File "/home/pagessin/dev/sentinel/.venv/lib/python3.10/site-packages/uvicorn/protocols/http/httptools_impl.py", line 401, in run_asgi
result = await app(self.scope, self.receive, self.send)
File "/home/pagessin/dev/sentinel/.venv/lib/python3.10/site-packages/uvicorn/middleware/proxy_headers.py", line 78, in __call__
return await self.app(scope, receive, send)
File "/home/pagessin/dev/sentinel/.venv/lib/python3.10/site-packages/sanic/app.py", line 1297, in __call__
self._asgi_app = await ASGIApp.create(self, scope, receive, send)
File "/home/pagessin/dev/sentinel/.venv/lib/python3.10/site-packages/sanic/asgi.py", line 167, in create
await sanic_app.dispatch(
File "/home/pagessin/dev/sentinel/.venv/lib/python3.10/site-packages/sanic/signals.py", line 193, in dispatch
return await dispatch
File "/home/pagessin/dev/sentinel/.venv/lib/python3.10/site-packages/sanic/signals.py", line 131, in _dispatch
group, handlers, params = self.get(event, condition=condition)
File "/home/pagessin/dev/sentinel/.venv/lib/python3.10/site-packages/sanic/signals.py", line 93, in get
group, param_basket = self.find_route(
TypeError: 'NoneType' object is not callable
INFO: 127.0.0.1:33318 - "GET /status HTTP/1.1" 500 Internal Server Error
```
**Expected behavior**
Sanic is able to respond to HTTP requests via ASGI.
**Environment (please complete the following information):**
- OS: Ubuntu 20.04
- `sanic==22.6.1`
- `uvicorn==0.18.2`
**Additional context**
Add any other context about the problem here.
| 2022-08-07T20:16:19 |
|
sanic-org/sanic | 2,525 | sanic-org__sanic-2525 | [
"2524"
] | 2f6f2bfa7660e9583694a55fdce4a4bb73b81819 | diff --git a/sanic/app.py b/sanic/app.py
--- a/sanic/app.py
+++ b/sanic/app.py
@@ -1521,6 +1521,18 @@ async def _startup(self):
self.signalize(self.config.TOUCHUP)
self.finalize()
+ route_names = [route.name for route in self.router.routes]
+ duplicates = {
+ name for name in route_names if route_names.count(name) > 1
+ }
+ if duplicates:
+ names = ", ".join(duplicates)
+ deprecation(
+ f"Duplicate route names detected: {names}. In the future, "
+ "Sanic will enforce uniqueness in route naming.",
+ 23.3,
+ )
+
# TODO: Replace in v22.6 to check against apps in app registry
if (
self.__class__._uvloop_setting is not None
| diff --git a/tests/test_routes.py b/tests/test_routes.py
--- a/tests/test_routes.py
+++ b/tests/test_routes.py
@@ -1266,3 +1266,22 @@ async def handler(request: Request):
assert request.route.ctx.foo() == "foo"
assert await request.route.ctx.bar() == 99
+
+
[email protected]
+async def test_duplicate_route_deprecation(app):
+ @app.route("/foo", name="duped")
+ async def handler_foo(request):
+ return text("...")
+
+ @app.route("/bar", name="duped")
+ async def handler_bar(request):
+ return text("...")
+
+ message = (
+ r"\[DEPRECATION v23\.3\] Duplicate route names detected: "
+ r"test_duplicate_route_deprecation\.duped\. In the future, "
+ r"Sanic will enforce uniqueness in route naming\."
+ )
+ with pytest.warns(DeprecationWarning, match=message):
+ await app._startup()
| [Feature Request] Warn when multiple route names are the same
**Is your feature request related to a problem? Please describe your use case.**
Just spent a good amount of time trying to figure why injections were not working on certain routes, after digging the code a bit more, it seemed like when the route names are the same, the injection will overwrite the value in the `SignatureRegistry`
**Describe the solution you'd like**
Something like this, showing which route names are already in the registry and maybe also mentioning which route have injections (although that last one would be for debugging purposes, maybe have a toggle?):
```py
[2022-08-08 21:17:46 -0400] [80724] [DEBUG] Dispatching signal: server.init.after
[2022-08-08 21:23:19 -0400] [81202] [DEBUG] Injecting into route 'manager.cases.get_cases' {'user': (<class 'src.utils.User'>, <Constructor(func=create)>)}
[2022-08-08 21:23:19 -0400] [81202] [WARNING] Following route has already been injected: 'ioc_manager.templates.get_rule_filters'
[2022-08-08 21:23:19 -0400] [81202] [WARNING] Following route has already been injected: 'ioc_manager.cases.get_case_iocs'
[2022-08-08 21:23:19 -0400] [81202] [WARNING] Following route has already been injected: 'ioc_manager.cases.get_case_iocs'
[2022-08-08 21:17:46 -0400] [80724] [INFO] Starting worker [80724]
```
For the warning, I've just added a simple check here:
https://github.com/sanic-org/sanic-ext/blob/b65cd9f28c8d1e6ee0aad91685bfb84fc4925ac6/sanic_ext/extensions/injection/registry.py#L54-L59
With
```py
def register(
self,
route_name: str,
injections: Dict[str, Tuple[Type, Optional[Callable[..., Any]]]],
) -> None:
if route_name in self._registry:
logger.warning(f"Following route has already been injected: '{route_name}'")
self._registry[route_name] = injections
```
| Not sure if this should be a part of `sanic_ext` or `sanic`
Another way would be to list, at the beginning when starting the server, all the routes and their route names, and then warning on the duplicate ones (I would personally love this since you sometimes don't know which routes/handlers _actually_ get registered when you have a big codebase with nested blueprints) - unless that's already a feature
This is a known thing. We will probably start with a warning and eventually raise an error for duplicate named routes. It will probably be a part of `sanic-routing` in `finalize`.
I am going to solve this in app startup so we can provide a deprecation notice consistently. | 2022-08-10T05:27:35 |
sanic-org/sanic | 2,537 | sanic-org__sanic-2537 | [
"2166"
] | 358498db96bdd83982e6517ae0f60e28d7846b6f | diff --git a/sanic/handlers.py b/sanic/handlers.py
--- a/sanic/handlers.py
+++ b/sanic/handlers.py
@@ -47,6 +47,28 @@ def finalize(cls, *args, **kwargs):
def _full_lookup(self, exception, route_name: Optional[str] = None):
return self.lookup(exception, route_name)
+ def _add(
+ self,
+ key: Tuple[Type[BaseException], Optional[str]],
+ handler: RouteHandler,
+ ) -> None:
+ if key in self.cached_handlers:
+ exc, name = key
+ if name is None:
+ name = "__ALL_ROUTES__"
+
+ error_logger.warning(
+ f"Duplicate exception handler definition on: route={name} "
+ f"and exception={exc}"
+ )
+ deprecation(
+ "A duplicate exception handler definition was discovered. "
+ "This may cause unintended consequences. A warning has been "
+ "issued now, but it will not be allowed starting in v23.3.",
+ 23.3,
+ )
+ self.cached_handlers[key] = handler
+
def add(self, exception, handler, route_names: Optional[List[str]] = None):
"""
Add a new exception handler to an already existing handler object.
@@ -62,9 +84,9 @@ def add(self, exception, handler, route_names: Optional[List[str]] = None):
"""
if route_names:
for route in route_names:
- self.cached_handlers[(exception, route)] = handler
+ self._add((exception, route), handler)
else:
- self.cached_handlers[(exception, None)] = handler
+ self._add((exception, None), handler)
def lookup(self, exception, route_name: Optional[str] = None):
"""
| diff --git a/tests/test_exceptions_handler.py b/tests/test_exceptions_handler.py
--- a/tests/test_exceptions_handler.py
+++ b/tests/test_exceptions_handler.py
@@ -7,7 +7,7 @@
import pytest
from bs4 import BeautifulSoup
-from pytest import LogCaptureFixture, MonkeyPatch
+from pytest import LogCaptureFixture, MonkeyPatch, WarningsRecorder
from sanic import Sanic, handlers
from sanic.exceptions import BadRequest, Forbidden, NotFound, ServerError
@@ -266,3 +266,22 @@ async def handler2(request: Request):
_, response = app.test_client.get("/2")
assert "Error" in response.text
+
+
+def test_warn_on_duplicate(
+ app: Sanic, caplog: LogCaptureFixture, recwarn: WarningsRecorder
+):
+ @app.exception(ServerError)
+ async def exception_handler_1(request, exception):
+ ...
+
+ @app.exception(ServerError)
+ async def exception_handler_2(request, exception):
+ ...
+
+ assert len(caplog.records) == 1
+ assert len(recwarn) == 1
+ assert caplog.records[0].message == (
+ "Duplicate exception handler definition on: route=__ALL_ROUTES__ and "
+ "exception=<class 'sanic.exceptions.ServerError'>"
+ )
| Enforce exception handler uniquness
1. You should not be able to register the same exception more than once, or at least not on the same App/Blueprint.
2. Handlers should only be fetched in relation to the BP or App context of the matched route. This effectively means that some exceptions (`NotFound` could only be registered app level).
_Originally posted by @ahopkins in https://github.com/sanic-org/sanic/issues/2121#issuecomment-827077284_
| This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. If this is incorrect, please respond with an update. Thank you for your contributions.
Hello!
Regarding point 2, there is currently useful usecases to register exceptions like `NotFound` at a BP level. For example, you may have an API blueprint where you want all responses to be returned as JSON; but want them to be returned as HTML everywhere else.
I believe currently it is possible to do this and, if I understand correctly, point 2 would remove the ability to do so.
I guess it could work if you _manually_ raised `NotFound`. But if the route does not exist, there is no way for Sanic to know which blueprint (and therefore which handler) to apply.
What if the blueprint has a URL prefix? I'd expect the blueprint's error handler to try handling any errors within that URL prefix, if no child handles them before. ๐ค
That is not likely something we will introduce since it would require adding default routes to blueprint handlers as catch-all. Which would and could have some bizarre unintended (and difficult to debug) consequences.
Since `NotFound` could be raised manually in legit use cases, maybe we need to identify some (like this and `NoMethod`) that should raise a warning on BP registration.
> What if the blueprint has a URL prefix? I'd expect the blueprint's error handler to try handling any errors within that URL prefix, if no child handles them before. ๐ค
This is exactly how I thought it should work.
There can be some use cases out there: Say, if you have a blueprint for a GUI serving and you like to do special stuff with Exceptions here in a (sub)URL.
If that's the case I'd suggest catching the exception with an exception handler and checking if the target path segment is in the current request path.
This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. If this is incorrect, please respond with an update. Thank you for your contributions.
...
This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. If this is incorrect, please respond with an update. Thank you for your contributions.
| 2022-08-28T07:25:37 |
sanic-org/sanic | 2,546 | sanic-org__sanic-2546 | [
"2538"
] | 4726cf1910fe7735ac0944504b02fc4f3d89d5fd | diff --git a/sanic/app.py b/sanic/app.py
--- a/sanic/app.py
+++ b/sanic/app.py
@@ -42,7 +42,6 @@
Union,
)
from urllib.parse import urlencode, urlunparse
-from warnings import filterwarnings
from sanic_routing.exceptions import FinalizationError, NotFound
from sanic_routing.route import Route
@@ -105,8 +104,6 @@
if OS_IS_WINDOWS: # no cov
enable_windows_color_support()
-filterwarnings("once", category=DeprecationWarning)
-
class Sanic(BaseSanic, StartupMixin, metaclass=TouchUpMeta):
"""
diff --git a/sanic/config.py b/sanic/config.py
--- a/sanic/config.py
+++ b/sanic/config.py
@@ -1,9 +1,12 @@
from __future__ import annotations
+import sys
+
from inspect import getmembers, isclass, isdatadescriptor
from os import environ
from pathlib import Path
from typing import Any, Callable, Dict, Optional, Sequence, Union
+from warnings import filterwarnings
from sanic.constants import LocalCertCreator
from sanic.errorpages import DEFAULT_FORMAT, check_error_format
@@ -13,6 +16,20 @@
from sanic.utils import load_module_from_file_location, str_to_bool
+if sys.version_info >= (3, 8):
+ from typing import Literal
+
+ FilterWarningType = Union[
+ Literal["default"],
+ Literal["error"],
+ Literal["ignore"],
+ Literal["always"],
+ Literal["module"],
+ Literal["once"],
+ ]
+else:
+ FilterWarningType = str
+
SANIC_PREFIX = "SANIC_"
@@ -22,6 +39,7 @@
"AUTO_EXTEND": True,
"AUTO_RELOAD": False,
"EVENT_AUTOREGISTER": False,
+ "DEPRECATION_FILTER": "once",
"FORWARDED_FOR_HEADER": "X-Forwarded-For",
"FORWARDED_SECRET": None,
"GRACEFUL_SHUTDOWN_TIMEOUT": 15.0, # 15 sec
@@ -72,6 +90,7 @@ class Config(dict, metaclass=DescriptorMeta):
AUTO_EXTEND: bool
AUTO_RELOAD: bool
EVENT_AUTOREGISTER: bool
+ DEPRECATION_FILTER: FilterWarningType
FORWARDED_FOR_HEADER: str
FORWARDED_SECRET: Optional[str]
GRACEFUL_SHUTDOWN_TIMEOUT: float
@@ -130,6 +149,7 @@ def __init__(
self.load_environment_vars(SANIC_PREFIX)
self._configure_header_size()
+ self._configure_warnings()
self._check_error_format()
self._init = True
@@ -178,6 +198,8 @@ def _post_set(self, attr, value) -> None:
self.LOCAL_CERT_CREATOR = LocalCertCreator[
self.LOCAL_CERT_CREATOR.upper()
]
+ elif attr == "DEPRECATION_FILTER":
+ self._configure_warnings()
@property
def FALLBACK_ERROR_FORMAT(self) -> str:
@@ -205,6 +227,13 @@ def _configure_header_size(self):
self.REQUEST_MAX_SIZE,
)
+ def _configure_warnings(self):
+ filterwarnings(
+ self.DEPRECATION_FILTER,
+ category=DeprecationWarning,
+ module=r"sanic.*",
+ )
+
def _check_error_format(self, format: Optional[str] = None):
check_error_format(format or self.FALLBACK_ERROR_FORMAT)
| diff --git a/tests/test_deprecation.py b/tests/test_deprecation.py
--- a/tests/test_deprecation.py
+++ b/tests/test_deprecation.py
@@ -1,5 +1,6 @@
import pytest
+from sanic import Sanic
from sanic.log import deprecation
@@ -7,3 +8,13 @@ def test_deprecation():
message = r"\[DEPRECATION v9\.9\] hello"
with pytest.warns(DeprecationWarning, match=message):
deprecation("hello", 9.9)
+
+
[email protected](
+ "filter,expected",
+ (("default", 1), ("once", 1), ("ignore", 0)),
+)
+def test_deprecation_filter(app: Sanic, filter, expected, recwarn):
+ app.config.DEPRECATION_FILTER = filter
+ deprecation("hello", 9.9)
+ assert len(recwarn) == expected
| Sanic enables writing of DeprecationWarning to the log
**Describe the bug**
Sanic changes the default DeprecationWarning behaviour to be written to the log.
The code line that cause it is in app.py line 104:
`filterwarnings("once", category=DeprecationWarning)`
This change makes all of the DeprecationWarning, *in every 3rd party library* we use, to be written to the log.
**Expected behavior**
I would expect that this will not happen in production ๐
**Environment (please complete the following information):**
<!-- Please provide the information below. Instead, you can copy and paste the message that Sanic shows on startup. If you do, please remember to format it with ``` -->
- OS: ubuntu & macOS
- Sanic Version: 22.6
| Good point. At the very least that should probably be by config. Will think about how we can make that a nicer experience and still make sure we provide good warnings.
Yeah, I guess an ENABLE_DEPRECATION_WARNINGS config var would be best. I can submit a PR if you like
If you have a chance to add this PR this week, LMK. Otherwise I will tackle this so we can get it into the upcoming release.
Unfortunately I probably wont be able to get to this before the next release
ืืชืืจืื ืืื ืืณ, 14 ืืกืคืืณ 2022 ื-14:31 ืืืช Adam Hopkins <
***@***.***>:
> If you have a chance to add this PR this week, LMK. Otherwise I will
> tackle this so we can get it into the upcoming release.
>
> โ
> Reply to this email directly, view it on GitHub
> <https://github.com/sanic-org/sanic/issues/2538#issuecomment-1246630468>,
> or unsubscribe
> <https://github.com/notifications/unsubscribe-auth/AC2SEHHHK5FNNA4TFFSZJ53V6GZQFANCNFSM6AAAAAAQAJWFOI>
> .
> You are receiving this because you authored the thread.Message ID:
> ***@***.***>
>
> Unfortunately I probably wont be able to get to this before the next release ืืชืืจืื ืืื ืืณ, 14 ืืกืคืืณ 2022 ื-14:31 ืืืช Adam Hopkins < ***@***.***>:
No worries. I will add it. ืืื ืืื | 2022-09-15T07:13:33 |
sanic-org/sanic | 2,578 | sanic-org__sanic-2578 | [
"2577"
] | 3f4663b9f8715119130efe1bfe517f70b356939e | diff --git a/sanic/worker/loader.py b/sanic/worker/loader.py
--- a/sanic/worker/loader.py
+++ b/sanic/worker/loader.py
@@ -5,18 +5,10 @@
from importlib import import_module
from pathlib import Path
-from typing import (
- TYPE_CHECKING,
- Any,
- Callable,
- Dict,
- Optional,
- Type,
- Union,
- cast,
-)
+from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Union, cast
-from sanic.http.tls.creators import CertCreator, MkcertCreator, TrustmeCreator
+from sanic.http.tls.context import process_to_context
+from sanic.http.tls.creators import MkcertCreator, TrustmeCreator
if TYPE_CHECKING:
@@ -106,21 +98,30 @@ def load(self) -> SanicApp:
class CertLoader:
- _creator_class: Type[CertCreator]
+ _creators = {
+ "mkcert": MkcertCreator,
+ "trustme": TrustmeCreator,
+ }
def __init__(self, ssl_data: Dict[str, Union[str, os.PathLike]]):
- creator_name = ssl_data.get("creator")
- if creator_name not in ("mkcert", "trustme"):
+ self._ssl_data = ssl_data
+
+ creator_name = cast(str, ssl_data.get("creator"))
+
+ self._creator_class = self._creators.get(creator_name)
+ if not creator_name:
+ return
+
+ if not self._creator_class:
raise RuntimeError(f"Unknown certificate creator: {creator_name}")
- elif creator_name == "mkcert":
- self._creator_class = MkcertCreator
- elif creator_name == "trustme":
- self._creator_class = TrustmeCreator
self._key = ssl_data["key"]
self._cert = ssl_data["cert"]
self._localhost = cast(str, ssl_data["localhost"])
def load(self, app: SanicApp):
+ if not self._creator_class:
+ return process_to_context(self._ssl_data)
+
creator = self._creator_class(app, self._key, self._cert)
return creator.generate_cert(self._localhost)
| diff --git a/tests/test_tls.py b/tests/test_tls.py
--- a/tests/test_tls.py
+++ b/tests/test_tls.py
@@ -4,6 +4,7 @@
import subprocess
from contextlib import contextmanager
+from multiprocessing import Event
from pathlib import Path
from unittest.mock import Mock, patch
from urllib.parse import urlparse
@@ -636,3 +637,29 @@ def test_sanic_ssl_context_create():
assert sanic_context is context
assert isinstance(sanic_context, SanicSSLContext)
+
+
+def test_ssl_in_multiprocess_mode(app: Sanic, caplog):
+
+ ssl_dict = {"cert": localhost_cert, "key": localhost_key}
+ event = Event()
+
+ @app.main_process_start
+ async def main_start(app: Sanic):
+ app.shared_ctx.event = event
+
+ @app.after_server_start
+ async def shutdown(app):
+ app.shared_ctx.event.set()
+ app.stop()
+
+ assert not event.is_set()
+ with caplog.at_level(logging.INFO):
+ app.run(ssl=ssl_dict)
+ assert event.is_set()
+
+ assert (
+ "sanic.root",
+ logging.INFO,
+ "Goin' Fast @ https://127.0.0.1:8000",
+ ) in caplog.record_tuples
diff --git a/tests/worker/test_loader.py b/tests/worker/test_loader.py
--- a/tests/worker/test_loader.py
+++ b/tests/worker/test_loader.py
@@ -86,6 +86,10 @@ def test_input_is_module():
@patch("sanic.worker.loader.TrustmeCreator")
@patch("sanic.worker.loader.MkcertCreator")
def test_cert_loader(MkcertCreator: Mock, TrustmeCreator: Mock, creator: str):
+ CertLoader._creators = {
+ "mkcert": MkcertCreator,
+ "trustme": TrustmeCreator,
+ }
MkcertCreator.return_value = MkcertCreator
TrustmeCreator.return_value = TrustmeCreator
data = {
| Certificates not created with `mkcert` or `trustme` raise a RuntimeError
The `CertLoader` class in `sanic-org/sanic/sanic/worker/loader.py` checks the creator of the certificate. If the creator is not `mkcert` or `trustme` then it raises a `RuntimeError`. This will prevent Sanic from running with certificates from any other sources.
| 2022-10-19T17:21:31 |
|
sanic-org/sanic | 2,585 | sanic-org__sanic-2585 | [
"2579"
] | 97f33f42df7ba34e90f8622f74b8e265deef6ee4 | diff --git a/sanic/app.py b/sanic/app.py
--- a/sanic/app.py
+++ b/sanic/app.py
@@ -1453,7 +1453,14 @@ def get_app(
return cls.get_app("__mp_main__", force_create=force_create)
if force_create:
return cls(name)
- raise SanicException(f'Sanic app name "{name}" not found.')
+ raise SanicException(
+ f"Sanic app name '{name}' not found.\n"
+ "App instantiation must occur outside "
+ "if __name__ == '__main__' "
+ "block or by using an AppLoader.\nSee "
+ "https://sanic.dev/en/guide/deployment/app-loader.html"
+ " for more details."
+ )
@classmethod
def _check_uvloop_conflict(cls) -> None:
| diff --git a/tests/test_app.py b/tests/test_app.py
--- a/tests/test_app.py
+++ b/tests/test_app.py
@@ -347,7 +347,13 @@ def test_app_registry_retrieval_from_multiple():
def test_get_app_does_not_exist():
with pytest.raises(
- SanicException, match='Sanic app name "does-not-exist" not found.'
+ SanicException,
+ match="Sanic app name 'does-not-exist' not found.\n"
+ "App instantiation must occur outside "
+ "if __name__ == '__main__' "
+ "block or by using an AppLoader.\nSee "
+ "https://sanic.dev/en/guide/deployment/app-loader.html"
+ " for more details."
):
Sanic.get_app("does-not-exist")
| In multiworker mode better error message if no apps
If a developer does not define their app so that a worker process can access it, they will receive a `KeyError` when Sanic tries to access it from the registry.
For example, that would happen if an app only was instantiated on the main process:
```python
if __name__ == "__main__":
app = Sanic(...)
app.run()
```
The solution to this is usually either to move the instantiation outside that block, or to use an [`AppLoader`](https://sanic.dev/en/guide/deployment/app-loader.html).
We should provide a better error message from within a worker process if an app cannot be found in a registry.
| I want to take it๐
All yours ๐ | 2022-10-22T13:34:17 |
sanic-org/sanic | 2,595 | sanic-org__sanic-2595 | [
"2566"
] | 5369291c2732f8fe12d16bc804c9e31e97ab87fc | diff --git a/sanic/worker/reloader.py b/sanic/worker/reloader.py
--- a/sanic/worker/reloader.py
+++ b/sanic/worker/reloader.py
@@ -9,6 +9,7 @@
from pathlib import Path
from signal import SIGINT, SIGTERM
from signal import signal as signal_func
+from time import sleep
from typing import Dict, Set
from sanic.server.events import trigger_events
@@ -62,6 +63,7 @@ def __call__(self) -> None:
self.reload(",".join(changed) if changed else "unknown")
if after_trigger:
trigger_events(after_trigger, loop, app)
+ sleep(self.interval)
else:
if reloader_stop:
trigger_events(reloader_stop, loop, app)
| Auto reloader consumes full CPU core
**Describe the bug**
Enabling the auto reloader (`auto_reload=True` or `--auto-reload`) will consume a full CPU core, because it is looping over all files in all loaded modules and `stat`-ing every file. 100% usage on a single core is observed on macOS and Linux.
The problematic loop is here:
https://github.com/sanic-org/sanic/blob/f891995b487f01ff1207afcd241ae359725a8e3c/sanic/worker/reloader.py#L46-L64
**Code snippet**
Run the following minimal example with `python3 test.py` or `sanic test:app --auto-reload` and watch CPU usage
```python
from sanic import Sanic, text
app = Sanic("test")
@app.route("/")
def hello_world(request):
return text("hello, world")
if __name__ == "__main__":
app.run(auto_reload=True)
```
**Expected behavior**
The reloader should not consume a full CPU core, but should watch the file system for changes more intelligently, using platform-specific methods (inotify, FSEvents, kqueue, etc). See the [watchdog](https://github.com/gorakhargosh/watchdog/) project for inspiration. Maybe `watchdog` could be included as an optional dependency and used if available?
For instance, [another popular framework has implemented two reloader loops](https://github.com/pallets/werkzeug/blob/2.2.2/src/werkzeug/_reloader.py) and will select the `watchdog` loop if the package is available.
Alternatively, simply add a short sleep step to the reloader loop. I think a one second delay in reloading is acceptable.
**Environment (please complete the following information):**
- OS: macOS 12.6, Ubuntu Linux 22.04 (likely also Windows, but not tested)
- Sanic Version: 22.9.0
| I noticed the `Reloader` class already had an `interval` property which is unused. I made a pull request #2567 to actually make use of this interval to sleep in the main loop.
This issue has been mentioned on **Sanic Community Discussion**. There might be relevant details there:
https://community.sanicframework.org/t/on-intel-mac-the-sanic-dev-mode-uses-99-cpu/1077/2
| 2022-10-31T09:52:19 |
|
sanic-org/sanic | 2,606 | sanic-org__sanic-2606 | [
"2604"
] | 71cd53b64ec2d39a66feb8e256eeeea4ae332deb | diff --git a/sanic/asgi.py b/sanic/asgi.py
--- a/sanic/asgi.py
+++ b/sanic/asgi.py
@@ -6,7 +6,7 @@
from urllib.parse import quote
from sanic.compat import Header
-from sanic.exceptions import ServerError
+from sanic.exceptions import BadRequest, ServerError
from sanic.helpers import Default
from sanic.http import Stage
from sanic.log import error_logger, logger
@@ -132,12 +132,20 @@ async def create(
instance.sanic_app.state.is_started = True
setattr(instance.transport, "add_task", sanic_app.loop.create_task)
- headers = Header(
- [
- (key.decode("latin-1"), value.decode("latin-1"))
- for key, value in scope.get("headers", [])
- ]
- )
+ try:
+ headers = Header(
+ [
+ (
+ key.decode("ASCII"),
+ value.decode(errors="surrogateescape"),
+ )
+ for key, value in scope.get("headers", [])
+ ]
+ )
+ except UnicodeDecodeError:
+ raise BadRequest(
+ "Header names can only contain US-ASCII characters"
+ )
path = (
scope["path"][1:]
if scope["path"].startswith("/")
| diff --git a/tests/test_asgi.py b/tests/test_asgi.py
--- a/tests/test_asgi.py
+++ b/tests/test_asgi.py
@@ -7,6 +7,9 @@
import pytest
import uvicorn
+from httpx import Headers
+from pytest import MonkeyPatch
+
from sanic import Sanic
from sanic.application.state import Mode
from sanic.asgi import ASGIApp, Lifespan, MockTransport
@@ -626,3 +629,26 @@ async def before_server_stop(_):
)
]
)
+
+
[email protected]
+async def test_asgi_headers_decoding(app: Sanic, monkeypatch: MonkeyPatch):
+ @app.get("/")
+ def handler(request: Request):
+ return text("")
+
+ headers_init = Headers.__init__
+
+ def mocked_headers_init(self, *args, **kwargs):
+ if "encoding" in kwargs:
+ kwargs.pop("encoding")
+ headers_init(self, encoding="utf-8", *args, **kwargs)
+
+ monkeypatch.setattr(Headers, "__init__", mocked_headers_init)
+
+ message = "Header names can only contain US-ASCII characters"
+ with pytest.raises(BadRequest, match=message):
+ _, response = await app.asgi_client.get("/", headers={"๐": "๐
"})
+
+ _, response = await app.asgi_client.get("/", headers={"Test-Header": "๐
"})
+ assert response.status_code == 200
| HTTP 1 request headers decoded using default encoding instead of ISO-8859-1
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Describe the bug
headers are decoded here without specifying their encoding:
https://github.com/sanic-org/sanic/blob/ad4e526c775fc3ce950503d6476d9d344492b0dd/sanic/http/http1.py#L205
On my system (osx using python 3.10.8 installed via homebrew) this causes bytes that are valid characters in ISO-8859-1 but not in UTF-8 to be decoded as surrogate escape characters, e.g. `b"\x80"` becomes `"\udf80"` instead of `"\x80"`
### Code snippet
_No response_
### Expected Behavior
headers encoded as ISO-8859-1 with no MIME type to be decoded correctly without using UTF-8 surrogate escape characters.
### How do you run Sanic?
As a script (`app.run` or `Sanic.serve`)
### Operating System
linux
### Sanic Version
22.9.1
### Additional context
this used to work as expected in Sanic<=20.12.7
| Which client are you using? Browsers should generally stick to ASCII in request headers but will recognize UTF-8 in responses. Using anything other than ASCII is discouraged, and servers should handle it as opaque binary (not ISO-8859-1, and you cannot specify the charset used in headers). IIRC, you *can* indeed supply binary values in browsers using Javascript `fetch` API and there is nothing to say which charset it is in.
Sanic uses surrogate escapes so that you *can* encode it back to original binary, and then decode as ISO-8859-1 if you wish, but I would strongly advice updating your systems to use UTF-8 or ASCII if in any way possible.
> I would strongly advice updating your systems to use UTF-8 or ASCII if in any way possible.
my clients code is supposed to only send ASCII, but unfortunately I don't own the client code, in practice my application receives garbage that needs to be preserved, and changing how it handles that garbage is undesirable.
> you _can_ encode it back to original binary, and then decode as ISO-8859-1 if you wish
I had not figured this out, and that's sufficient for my use case, thank you.
Earlier Sanic versions handled headers as ISO-8859-1, which was causing trouble when they actually were in UTF-8 (more common nowadays). I had to put a lot of thought into this while reimplementing the HTTP parser code as leaving them as `bytes` wouldn't be practical either. The surrogate escape coding is [WTF-8](https://simonsapin.github.io/wtf-8/) which indeed is meant for *preserving garbage*, being able to restore original bytes of what might be ill-formed UTF-8. I'm glad you found use for this detail of Sanic's implementation, being able to restore those bytes instead of simply showing "replacement character" as a naive implementation might.
ASGI is decoding headers with latin-1 https://github.com/sanic-org/sanic/blob/ad4e526c775fc3ce950503d6476d9d344492b0dd/sanic/asgi.py#L123
@ahopkins Might wish to have a consistent implementation here?
Yes. @ChihweiLHBird expressed interest in taking this one on.
> ASGI is decoding headers with latin-1
I am a little confused about this... If it is encoded by `utf-8`, which is the general way, can it still be decoded by `latin-1`? Is `latin-1` a superset of `utf8`? Or are we going to also decode header as `utf-8` in asgi?
| 2022-11-23T11:14:10 |
sanic-org/sanic | 2,608 | sanic-org__sanic-2608 | [
"2581"
] | b276b91c21256b43f07792221d99aa28cb5bd3f5 | diff --git a/sanic/signals.py b/sanic/signals.py
--- a/sanic/signals.py
+++ b/sanic/signals.py
@@ -154,9 +154,7 @@ async def _dispatch(
try:
for signal in signals:
params.pop("__trigger__", None)
- requirements = getattr(
- signal.handler, "__requirements__", None
- )
+ requirements = signal.extra.requirements
if (
(condition is None and signal.ctx.exclusive is False)
or (condition is None and not requirements)
@@ -219,8 +217,13 @@ def add( # type: ignore
if not trigger:
event = ".".join([*parts[:2], "<__trigger__>"])
- handler.__requirements__ = condition # type: ignore
- handler.__trigger__ = trigger # type: ignore
+ try:
+ # Attaching __requirements__ and __trigger__ to the handler
+ # is deprecated and will be removed in v23.6.
+ handler.__requirements__ = condition # type: ignore
+ handler.__trigger__ = trigger # type: ignore
+ except AttributeError:
+ pass
signal = super().add(
event,
@@ -232,6 +235,7 @@ def add( # type: ignore
signal.ctx.exclusive = exclusive
signal.ctx.trigger = trigger
signal.ctx.definition = event_definition
+ signal.extra.requirements = condition
return cast(Signal, signal)
| A signal handler cannot be a method on an object
**Describe the bug**
<!-- A clear and concise description of what the bug is, make sure to paste any exceptions and tracebacks. -->
If I try and register a method on an object as a signal handler, an exception is thrown because it is not possible to set an attribute on a bound method
```
self.add_signal(self.after_routing, 'http.routing.after')
File "sanic/lib/python3.7/site-packages/sanic/mixins/signals.py", line 77, in add_signal
handler
File "sanic/lib/python3.7/site-packages/sanic/mixins/signals.py", line 57, in decorator
self._apply_signal(future_signal)
File "sanic/lib/python3.7/site-packages/sanic/app.py", line 420, in _apply_signal
return self.signal_router.add(*signal)
File "sanic/lib/python3.7/site-packages/sanic/signals.py", line 222, in add
handler.__requirements__ = condition # type: ignore
AttributeError: 'method' object has no attribute '__requirements__'
```
**Code snippet**
<!-- Relevant source code, make sure to remove what is not necessary. -->
```python
import sanic
class MyApp(sanic.Sanic):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.add_signal(self.after_routing, 'http.routing.after')
def after_routing(self, request, route, kwargs, handler):
pass
app = MyApp(name='method_signal_handler')
```
**Expected behavior**
<!-- A clear and concise description of what you expected to happen. -->
I would expect the bound method to be registered, and then called when the signal is dispatched.
**Environment (please complete the following information):**
<!-- Please provide the information below. Instead, you can copy and paste the message that Sanic shows on startup. If you do, please remember to format it with ``` -->
- OS: MacOS
- Sanic Version: 22.9
**Additional context**
<!-- Add any other context about the problem here. -->
| Added code tags for readability. | 2022-11-27T09:58:26 |
|
sanic-org/sanic | 2,615 | sanic-org__sanic-2615 | [
"2600"
] | f32437bf1344cbc2ca2b0cbf3062ab04a93157bb | diff --git a/sanic/http/http1.py b/sanic/http/http1.py
--- a/sanic/http/http1.py
+++ b/sanic/http/http1.py
@@ -16,6 +16,7 @@
PayloadTooLarge,
RequestCancelled,
ServerError,
+ ServiceUnavailable,
)
from sanic.headers import format_http1_response
from sanic.helpers import has_message_body
@@ -428,8 +429,11 @@ async def error_response(self, exception: Exception) -> None:
if self.request is None:
self.create_empty_request()
+ request_middleware = not isinstance(exception, ServiceUnavailable)
try:
- await app.handle_exception(self.request, exception)
+ await app.handle_exception(
+ self.request, exception, request_middleware
+ )
except Exception as e:
await app.handle_exception(self.request, e, False)
diff --git a/sanic/request.py b/sanic/request.py
--- a/sanic/request.py
+++ b/sanic/request.py
@@ -104,6 +104,7 @@ class Request:
"_protocol",
"_remote_addr",
"_request_middleware_started",
+ "_response_middleware_started",
"_scheme",
"_socket",
"_stream_id",
@@ -179,6 +180,7 @@ def __init__(
Tuple[bool, bool, str, str], List[Tuple[str, str]]
] = defaultdict(list)
self._request_middleware_started = False
+ self._response_middleware_started = False
self.responded: bool = False
self.route: Optional[Route] = None
self.stream: Optional[Stream] = None
@@ -337,7 +339,8 @@ async def add_header(_, response: HTTPResponse):
middleware = (
self.route and self.route.extra.response_middleware
) or self.app.response_middleware
- if middleware:
+ if middleware and not self._response_middleware_started:
+ self._response_middleware_started = True
response = await self.app._run_response_middleware(
self, response, middleware
)
| diff --git a/tests/test_middleware.py b/tests/test_middleware.py
--- a/tests/test_middleware.py
+++ b/tests/test_middleware.py
@@ -1,6 +1,6 @@
import logging
-from asyncio import CancelledError
+from asyncio import CancelledError, sleep
from itertools import count
from sanic.exceptions import NotFound
@@ -318,6 +318,32 @@ async def handler(request):
resp1 = await request.respond()
return resp1
- _, response = app.test_client.get("/")
+ app.test_client.get("/")
assert response_middleware_run_count == 1
assert request_middleware_run_count == 1
+
+
+def test_middleware_run_on_timeout(app):
+ app.config.RESPONSE_TIMEOUT = 0.1
+ response_middleware_run_count = 0
+ request_middleware_run_count = 0
+
+ @app.on_response
+ def response(_, response):
+ nonlocal response_middleware_run_count
+ response_middleware_run_count += 1
+
+ @app.on_request
+ def request(_):
+ nonlocal request_middleware_run_count
+ request_middleware_run_count += 1
+
+ @app.get("/")
+ async def handler(request):
+ resp1 = await request.respond()
+ await sleep(1)
+ return resp1
+
+ app.test_client.get("/")
+ assert request_middleware_run_count == 1
+ assert response_middleware_run_count == 1
| Sanic 22.9.1 entered into the request middlewire twice when response timeout
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Describe the bug
I'm trying to do perforamance test for our apps about new Sanic version v22.9.1 (old version is 22.6.0)
During performance testing ,my handler code is slow as expected, and my response time for sanic is 60 seconds
Normally, it will return a "response timeout error" to the client, and we test the 22.6.0, and everything works as designe.
But after upgrade to 22.9.1(The only change for the code), we see a new err which should be the failure inside one of our blueprint request middlewire. Normally the code will only run once. but in the new version, we see the function was called twice, and the interval between the two calls is longger than response timeout setting.
It's very easy to repeat the situation. seems like sanic change involve this.
### Code snippet
```import asyncio
from sanic import Blueprint, Sanic
from sanic.request import Request
from sanic.response import HTTPResponse
from sanic.response import text
class AsyncNorthHandler():
FUNCCOUNT = 0
async def http_handler(request):
print("handling the request")
await asyncio.sleep(100)
return text('ok')
bp = Blueprint("async_north")
bp.add_route(http_handler, '/', methods=['GET'], name='handler')
@bp.middleware("request")
async def set_req_header(request):
request: Request
AsyncNorthHandler.FUNCCOUNT = AsyncNorthHandler.FUNCCOUNT + 1
print("enter the function {} time".format(AsyncNorthHandler.FUNCCOUNT))
app = Sanic("TEST")
app.blueprint(bp)
app.run(host='0.0.0.0', port=8888)
```
### Expected Behavior
entery the reqeust middlewire 1 time every request
### How do you run Sanic?
As a module
### Operating System
linux
### Sanic Version
22.9.1
### Additional context
_No response_
| Confirmed this. I think the fix should be simple and is related specifically to the response timeout.
```py
request_middleware = not isinstance(exception, ServiceUnavailable)
try:
await app.handle_exception(
self.request, exception, request_middleware
)
```
https://github.com/sanic-org/sanic/blob/main/sanic/http/http1.py#L431
Will need to look closer at the logic to make sure there are not other use cases being impacted. | 2022-12-07T13:28:31 |
sanic-org/sanic | 2,622 | sanic-org__sanic-2622 | [
"2619"
] | 8e720365c25261149f22dad61a167e815dd47fa2 | diff --git a/sanic/worker/multiplexer.py b/sanic/worker/multiplexer.py
--- a/sanic/worker/multiplexer.py
+++ b/sanic/worker/multiplexer.py
@@ -21,9 +21,14 @@ def ack(self):
"state": ProcessState.ACKED.name,
}
- def restart(self, name: str = ""):
+ def restart(self, name: str = "", all_workers: bool = False):
+ if name and all_workers:
+ raise ValueError(
+ "Ambiguous restart with both a named process and"
+ " all_workers=True"
+ )
if not name:
- name = self.name
+ name = "__ALL_PROCESSES__:" if all_workers else self.name
self._monitor_publisher.send(name)
reload = restart # no cov
| diff --git a/tests/worker/test_multiplexer.py b/tests/worker/test_multiplexer.py
--- a/tests/worker/test_multiplexer.py
+++ b/tests/worker/test_multiplexer.py
@@ -1,6 +1,6 @@
from multiprocessing import Event
from os import environ, getpid
-from typing import Any, Dict
+from typing import Any, Dict, Type, Union
from unittest.mock import Mock
import pytest
@@ -117,3 +117,26 @@ def test_properties(
assert m.workers == worker_state
assert m.state == worker_state["Test"]
assert isinstance(m.state, WorkerState)
+
+
[email protected](
+ "params,expected",
+ (
+ ({}, "Test"),
+ ({"name": "foo"}, "foo"),
+ ({"all_workers": True}, "__ALL_PROCESSES__:"),
+ ({"name": "foo", "all_workers": True}, ValueError),
+ ),
+)
+def test_restart_params(
+ monitor_publisher: Mock,
+ m: WorkerMultiplexer,
+ params: Dict[str, Any],
+ expected: Union[str, Type[Exception]],
+):
+ if isinstance(expected, str):
+ m.restart(**params)
+ monitor_publisher.send.assert_called_once_with(expected)
+ else:
+ with pytest.raises(expected):
+ m.restart(**params)
| restart workers online (graceful restart) to hot reload, in production environment.
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Is your feature request related to a problem? Please describe.
Condition: keep service available any time.
# ---- client code----
```py
async def run():
while 1:
await post('http://127.0.0.1:8000/')
```
when called app.m.restart("__ALL_PROCESSES__") in a worker, sanic crashed.
# ---- server code ----
```py
@app.post("/")
async def handler(request):
app.m.restart('__ALL_PROCESSES__')
return response.text('ok')
if __name__ == "__main__":
app.run(debug=True, workers=2)
```
### Describe the solution you'd like
graceful restarting and reduce the effect when restarting.
my messy describe:
1. graceful restart workers; restart all workers will not crash, if only 1 worker, block a little while (if worker not started yet) is ok.
2. a way to graceful restart worker one by one, code eg:
woker_names = tuple(app.m.workers.keys())
for woker_name in worker_names:
ret_val = app.m.restart(worker_name)
# here, the worker has been graceful restarted, ret_val is meaningful
3. may combine the above 2, when restarting all workers, 50% workers restarting, 50% old workers keep serving
### Additional context
simplify the api,
```py
app.m.restart('__ALL_PROCESSES__') => app.m.restart_all()
```
thanks.
| 2022-12-08T09:52:49 |
|
sanic-org/sanic | 2,627 | sanic-org__sanic-2627 | [
"2392"
] | 95ee518aec1d7e9be3dc0da987ed5a0848cbe975 | diff --git a/sanic/asgi.py b/sanic/asgi.py
--- a/sanic/asgi.py
+++ b/sanic/asgi.py
@@ -9,7 +9,7 @@
from sanic.exceptions import ServerError
from sanic.helpers import Default
from sanic.http import Stage
-from sanic.log import logger
+from sanic.log import error_logger, logger
from sanic.models.asgi import ASGIReceive, ASGIScope, ASGISend, MockTransport
from sanic.request import Request
from sanic.response import BaseHTTPResponse
@@ -85,13 +85,27 @@ async def __call__(
) -> None:
message = await receive()
if message["type"] == "lifespan.startup":
- await self.startup()
- await send({"type": "lifespan.startup.complete"})
+ try:
+ await self.startup()
+ except Exception as e:
+ error_logger.exception(e)
+ await send(
+ {"type": "lifespan.startup.failed", "message": str(e)}
+ )
+ else:
+ await send({"type": "lifespan.startup.complete"})
message = await receive()
if message["type"] == "lifespan.shutdown":
- await self.shutdown()
- await send({"type": "lifespan.shutdown.complete"})
+ try:
+ await self.shutdown()
+ except Exception as e:
+ error_logger.exception(e)
+ await send(
+ {"type": "lifespan.shutdown.failed", "message": str(e)}
+ )
+ else:
+ await send({"type": "lifespan.shutdown.complete"})
class ASGIApp:
| diff --git a/tests/test_asgi.py b/tests/test_asgi.py
--- a/tests/test_asgi.py
+++ b/tests/test_asgi.py
@@ -8,7 +8,7 @@
from sanic import Sanic
from sanic.application.state import Mode
-from sanic.asgi import MockTransport
+from sanic.asgi import ASGIApp, MockTransport
from sanic.exceptions import BadRequest, Forbidden, ServiceUnavailable
from sanic.request import Request
from sanic.response import json, text
@@ -16,6 +16,12 @@
from sanic.signals import RESERVED_NAMESPACES
+try:
+ from unittest.mock import AsyncMock
+except ImportError:
+ from tests.asyncmock import AsyncMock # type: ignore
+
+
@pytest.fixture
def message_stack():
return deque()
@@ -558,3 +564,39 @@ def _request(request: Request):
_, response = await app.asgi_client.get("/")
assert response.text == "http://<ASGI>"
+
+
[email protected]
+async def test_error_on_lifespan_exception_start(app, caplog):
+ @app.before_server_start
+ async def before_server_start(_):
+ 1 / 0
+
+ recv = AsyncMock(return_value={"type": "lifespan.startup"})
+ send = AsyncMock()
+ app.asgi = True
+
+ with caplog.at_level(logging.ERROR):
+ await ASGIApp.create(app, {"type": "lifespan"}, recv, send)
+
+ send.assert_awaited_once_with(
+ {"type": "lifespan.startup.failed", "message": "division by zero"}
+ )
+
+
[email protected]
+async def test_error_on_lifespan_exception_stop(app: Sanic):
+ @app.before_server_stop
+ async def before_server_stop(_):
+ 1 / 0
+
+ recv = AsyncMock(return_value={"type": "lifespan.shutdown"})
+ send = AsyncMock()
+ app.asgi = True
+ await app._startup()
+
+ await ASGIApp.create(app, {"type": "lifespan"}, recv, send)
+
+ send.assert_awaited_once_with(
+ {"type": "lifespan.shutdown.failed", "message": "division by zero"}
+ )
| Uvicorn ignores listeners errors
**Describe the bug**
When an exception happens on a listener the error is not reported and the server doesn't crash when using Uvicorn.
**Code snippet**
```python
from sanic import Sanic
from sanic.response import text
app = Sanic("MyHelloWorldApp")
@app.listener("before_server_start")
async def raises(app, loop):
print("Trying to run faster")
raise Exception("oh noes")
@app.get("/")
async def hello_world(request):
return text("Hello, world.")
```
```
~ uvicorn app:app
INFO: Started server process [49837]
INFO: Waiting for application startup.
[2022-01-25 14:57:34 +0100] [49837] [INFO]
โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
โ Sanic v21.12.1 โ
โ โ
โโโโโโโโโโโโโโโโโโโโโโโโโฌโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโค
โ โ mode: production, ASGI โ
โ โโโโ โโโโโ โโ โ server: sanic โ
โ โโ โ python: 3.9.10 โ
โ โโโโโโโโ โโโโ โ platform: macOS-12.1-x86_64-i386-64bit โ
โ โโ โ packages: sanic-routing==0.7.2 โ
โ โโโโ โโโโโโโโโ โ โ
โ โ โ
โ Build Fast. Run Fast. โ โ
โโโโโโโโโโโโโโโโโโโโโโโโโดโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
/Users/andre.ericson/projects/sanic-uvicorn/.venv/lib/python3.9/site-packages/sanic/asgi.py:27: UserWarning: You have set a listener for "before_server_start" in ASGI mode. It will be executed as early as possible, but not before the ASGI server is started.
warnings.warn(
Trying to run faster
INFO: ASGI 'lifespan' protocol appears unsupported.
INFO: Application startup complete.
INFO: Uvicorn running on http://127.0.0.1:8000 (Press CTRL+C to quit)
```
**Expected behavior**
The server should crash, or at least log an error.
**Environment (please complete the following information):**
- MacOS
- python 3.9.10
```
pip list
Package Version
------------- ------------
aiofiles 0.8.0
asgiref 3.5.0
click 8.0.3
h11 0.13.0
httptools 0.3.0
multidict 5.2.0
pip 21.3.1
sanic 21.12.1
sanic-routing 0.7.2
setuptools 58.1.0
ujson 5.1.0
uvicorn 0.17.0.post1
uvloop 0.16.0
websockets 10.1
```
**Additional context**
Works as expected with `sanic app.app` and also with GunicornWorker.
| So when testing this, the exception bubbles up and causes an ASGI lifespan error in both uvicorn and hypercorn.
I'm not sure what the right process is to handle this is here, but I'll look into it.
This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. If this is incorrect, please respond with an update. Thank you for your contributions.
| 2022-12-15T18:47:13 |
sanic-org/sanic | 2,628 | sanic-org__sanic-2628 | [
"2388"
] | 71d3d87bccf70a01e2d54f081c8c131d89d90130 | diff --git a/sanic/server/loop.py b/sanic/server/loop.py
--- a/sanic/server/loop.py
+++ b/sanic/server/loop.py
@@ -1,11 +1,11 @@
import asyncio
import sys
-from distutils.util import strtobool
from os import getenv
from sanic.compat import OS_IS_WINDOWS
from sanic.log import error_logger
+from sanic.utils import str_to_bool
def try_use_uvloop() -> None:
@@ -35,7 +35,7 @@ def try_use_uvloop() -> None:
)
return
- uvloop_install_removed = strtobool(getenv("SANIC_NO_UVLOOP", "no"))
+ uvloop_install_removed = str_to_bool(getenv("SANIC_NO_UVLOOP", "no"))
if uvloop_install_removed:
error_logger.info(
"You are requesting to run Sanic using uvloop, but the "
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -6,8 +6,6 @@
import re
import sys
-from distutils.util import strtobool
-
from setuptools import find_packages, setup
from setuptools.command.test import test as TestCommand
@@ -37,6 +35,25 @@ def open_local(paths, mode="r", encoding="utf8"):
return codecs.open(path, mode, encoding)
+def str_to_bool(val: str) -> bool:
+ val = val.lower()
+ if val in {
+ "y",
+ "yes",
+ "yep",
+ "yup",
+ "t",
+ "true",
+ "on",
+ "enable",
+ "enabled",
+ "1",
+ }:
+ return True
+ elif val in {"n", "no", "f", "false", "off", "disable", "disabled", "0"}:
+ return False
+ else:
+ raise ValueError(f"Invalid truth value {val}")
with open_local(["sanic", "__version__.py"], encoding="latin1") as fp:
try:
@@ -131,13 +148,13 @@ def open_local(paths, mode="r", encoding="utf8"):
all_require = list(set(dev_require + docs_require))
-if strtobool(os.environ.get("SANIC_NO_UJSON", "no")):
+if str_to_bool(os.environ.get("SANIC_NO_UJSON", "no")):
print("Installing without uJSON")
requirements.remove(ujson)
tests_require.remove(types_ujson)
# 'nt' means windows OS
-if strtobool(os.environ.get("SANIC_NO_UVLOOP", "no")):
+if str_to_bool(os.environ.get("SANIC_NO_UVLOOP", "no")):
print("Installing without uvLoop")
requirements.remove(uvloop)
| distutils.strtobool deprecation
**Describe the bug**
he distutils package is deprecated and slated for removal in Python 3.12. [PEP 632](https://www.python.org/dev/peps/pep-0632/)
**Code snippet**
[sanic/server/loop.py](https://github.com/sanic-org/sanic/blob/ac388d644b1e22156e228470fad8ea34932c080a/sanic/server/loop.py#L3)
**Expected behavior**
A lack of warnings about deprecation
**Environment (please complete the following information):**
- OS: All
- Version: All supported as of 2022/01/23
**Additional context**
This is housekeeping to clean up a core python deprecation.
| Note: Even though this is an exceptionally minor fix, I don't feel like this should be backported to 20.12LTS. 20.12LTS will sunset long before 3.12 is past alpha (and possibly before 3.11.0 final hits, depending on if the timeline in [PEP 664](https://www.python.org/dev/peps/pep-0664/) is hit or not) and that makes it likely that 21.12LTS will be the only backport needed, if any.
No backport will be needed because 21.12 does not (an will not) purport to support 3.12. It is 3.7-3.10 and that will not change. We probably should add Python versions here: https://sanic.dev/en/org/policies.html#release-schedule.
This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. If this is incorrect, please respond with an update. Thank you for your contributions.
I think we still need to fix this, right? Any update?
This is still something to do, just not something that we need to release back to the LTS.
I'll resurrect this, completely forgot about it.
This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. If this is incorrect, please respond with an update. Thank you for your contributions.
This is still an issue: https://github.com/sanic-org/sanic/blob/4726cf1910fe7735ac0944504b02fc4f3d89d5fd/sanic/server/loop.py#L4
Anyone want to take a crack at this in the next couple days to get into the LTS? | 2022-12-16T00:40:51 |
|
sanic-org/sanic | 2,635 | sanic-org__sanic-2635 | [
"2558"
] | 4744a89c338a5610ce195ec8b330a020d77065a7 | diff --git a/sanic/server/runners.py b/sanic/server/runners.py
--- a/sanic/server/runners.py
+++ b/sanic/server/runners.py
@@ -200,7 +200,7 @@ def _serve_http_1(
asyncio_server_kwargs = (
asyncio_server_kwargs if asyncio_server_kwargs else {}
)
- if OS_IS_WINDOWS:
+ if OS_IS_WINDOWS and sock:
pid = os.getpid()
sock = sock.share(pid)
sock = socket.fromshare(sock)
| AttributeError: 'NoneType' object has no attribute 'share' when using the async server
**Describe the bug**
I'm trying to run a slightly modified version of https://github.com/sanic-org/sanic/blob/main/examples/run_async.py, where I switched `uvloop.new_event_loop()` for the builtin `asyncio.new_event_loop()`. This works in Sanic 22.6.2, but breaks in 22.9.0 with the following error message
```
Traceback (most recent call last):
File "C:\<path>\sanic_test.py", line 28, in <module>
asyncio.run(main())
File "C:\Python310\lib\asyncio\runners.py", line 44, in run
return loop.run_until_complete(main)
File "C:\Python310\lib\asyncio\base_events.py", line 646, in run_until_complete
return future.result()
File ""C:\<path>\sanic_test.py", line 15, in main
server = await app.create_server(
File ""C:\<path>\venv\lib\site-packages\sanic\mixins\startup.py", line 450, in create_server return await serve(
File ""C:\<path>\venv\lib\site-packages\sanic\server\runners.py", line 106, in serve
return _serve_http_1(
File ""C:\<path>\venv\lib\site-packages\sanic\server\runners.py", line 205, in _serve_http_1 sock = sock.share(pid)
AttributeError: 'NoneType' object has no attribute 'share'
```
**Code snippet**
https://github.com/sanic-org/sanic/blob/main/examples/run_async.py with `uvloop.new_event_loop()` replaced with `asyncio.new_event_loop()`.
**Expected behavior**
The server starts.
**Environment (please complete the following information):**
<!-- Please provide the information below. Instead, you can copy and paste the message that Sanic shows on startup. If you do, please remember to format it with ``` -->
```
โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
โ Sanic v22.9.0 โ
โ Goin' Fast @ http://0.0.0.0:8000 โ
โโโโโโโโโโโโโโโโโโโโโโโโโฌโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโค
โ โ mode: production, single worker โ
โ โโโโ โโโโโ โโ โ server: sanic, HTTP/1.1 โ
โ โโ โ python: 3.10.4 โ
โ โโโโโโโโ โโโโ โ platform: Windows-10-10.0.19043-SP0 โ
โ โโ โ packages: sanic-routing==22.8.0 โ
โ โโโโ โโโโโโโโโ โ โ
โ โ โ
โ Build Fast. Run Fast. โ โ
โโโโโโโโโโโโโโโโโโโโโโโโโดโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
```
**Additional context**
<!-- Add any other context about the problem here. -->
| I see the issue. Will get a patch out for this shortly. Thanks for bringing this up. Surprised the CI didn't catch this TBH.
Couldn't reproduce it. Already fixed?
> Couldn't reproduce it. Already fixed?
Did you use Windows? | 2022-12-18T11:48:54 |
|
sanic-org/sanic | 2,636 | sanic-org__sanic-2636 | [
"2596"
] | 911485d52e45a895a938f5dc62f3ed8e8fc031e5 | diff --git a/sanic/app.py b/sanic/app.py
--- a/sanic/app.py
+++ b/sanic/app.py
@@ -61,7 +61,7 @@
URLBuildError,
)
from sanic.handlers import ErrorHandler
-from sanic.helpers import Default
+from sanic.helpers import Default, _default
from sanic.http import Stage
from sanic.log import (
LOGGING_CONFIG_DEFAULTS,
@@ -69,6 +69,7 @@
error_logger,
logger,
)
+from sanic.middleware import Middleware, MiddlewareLocation
from sanic.mixins.listeners import ListenerEvent
from sanic.mixins.startup import StartupMixin
from sanic.models.futures import (
@@ -294,8 +295,12 @@ def register_listener(
return listener
def register_middleware(
- self, middleware: MiddlewareType, attach_to: str = "request"
- ) -> MiddlewareType:
+ self,
+ middleware: Union[MiddlewareType, Middleware],
+ attach_to: str = "request",
+ *,
+ priority: Union[Default, int] = _default,
+ ) -> Union[MiddlewareType, Middleware]:
"""
Register an application level middleware that will be attached
to all the API URLs registered under this application.
@@ -311,19 +316,37 @@ def register_middleware(
**response** - Invoke before the response is returned back
:return: decorated method
"""
- if attach_to == "request":
+ retval = middleware
+ location = MiddlewareLocation[attach_to.upper()]
+
+ if not isinstance(middleware, Middleware):
+ middleware = Middleware(
+ middleware,
+ location=location,
+ priority=priority if isinstance(priority, int) else 0,
+ )
+ elif middleware.priority != priority and isinstance(priority, int):
+ middleware = Middleware(
+ middleware.func,
+ location=middleware.location,
+ priority=priority,
+ )
+
+ if location is MiddlewareLocation.REQUEST:
if middleware not in self.request_middleware:
self.request_middleware.append(middleware)
- if attach_to == "response":
+ if location is MiddlewareLocation.RESPONSE:
if middleware not in self.response_middleware:
self.response_middleware.appendleft(middleware)
- return middleware
+ return retval
def register_named_middleware(
self,
middleware: MiddlewareType,
route_names: Iterable[str],
attach_to: str = "request",
+ *,
+ priority: Union[Default, int] = _default,
):
"""
Method for attaching middleware to specific routes. This is mainly an
@@ -337,19 +360,35 @@ def register_named_middleware(
defaults to "request"
:type attach_to: str, optional
"""
- if attach_to == "request":
+ retval = middleware
+ location = MiddlewareLocation[attach_to.upper()]
+
+ if not isinstance(middleware, Middleware):
+ middleware = Middleware(
+ middleware,
+ location=location,
+ priority=priority if isinstance(priority, int) else 0,
+ )
+ elif middleware.priority != priority and isinstance(priority, int):
+ middleware = Middleware(
+ middleware.func,
+ location=middleware.location,
+ priority=priority,
+ )
+
+ if location is MiddlewareLocation.REQUEST:
for _rn in route_names:
if _rn not in self.named_request_middleware:
self.named_request_middleware[_rn] = deque()
if middleware not in self.named_request_middleware[_rn]:
self.named_request_middleware[_rn].append(middleware)
- if attach_to == "response":
+ if location is MiddlewareLocation.RESPONSE:
for _rn in route_names:
if _rn not in self.named_response_middleware:
self.named_response_middleware[_rn] = deque()
if middleware not in self.named_response_middleware[_rn]:
self.named_response_middleware[_rn].appendleft(middleware)
- return middleware
+ return retval
def _apply_exception_handler(
self,
diff --git a/sanic/middleware.py b/sanic/middleware.py
--- a/sanic/middleware.py
+++ b/sanic/middleware.py
@@ -32,6 +32,9 @@ def __init__(
def __call__(self, *args, **kwargs):
return self.func(*args, **kwargs)
+ def __hash__(self) -> int:
+ return hash(self.func)
+
def __repr__(self) -> str:
return (
f"{self.__class__.__name__}("
| diff --git a/tests/test_middleware_priority.py b/tests/test_middleware_priority.py
--- a/tests/test_middleware_priority.py
+++ b/tests/test_middleware_priority.py
@@ -3,7 +3,7 @@
import pytest
from sanic import Sanic
-from sanic.middleware import Middleware
+from sanic.middleware import Middleware, MiddlewareLocation
from sanic.response import json
@@ -40,6 +40,86 @@ def reset_middleware():
Middleware.reset_count()
+def test_add_register_priority(app: Sanic):
+ def foo(*_):
+ ...
+
+ app.register_middleware(foo, priority=999)
+ assert len(app.request_middleware) == 1
+ assert len(app.response_middleware) == 0
+ assert app.request_middleware[0].priority == 999 # type: ignore
+ app.register_middleware(foo, attach_to="response", priority=999)
+ assert len(app.request_middleware) == 1
+ assert len(app.response_middleware) == 1
+ assert app.response_middleware[0].priority == 999 # type: ignore
+
+
+def test_add_register_named_priority(app: Sanic):
+ def foo(*_):
+ ...
+
+ app.register_named_middleware(foo, route_names=["foo"], priority=999)
+ assert len(app.named_request_middleware) == 1
+ assert len(app.named_response_middleware) == 0
+ assert app.named_request_middleware["foo"][0].priority == 999 # type: ignore
+ app.register_named_middleware(
+ foo, attach_to="response", route_names=["foo"], priority=999
+ )
+ assert len(app.named_request_middleware) == 1
+ assert len(app.named_response_middleware) == 1
+ assert app.named_response_middleware["foo"][0].priority == 999 # type: ignore
+
+
+def test_add_decorator_priority(app: Sanic):
+ def foo(*_):
+ ...
+
+ app.middleware(foo, priority=999)
+ assert len(app.request_middleware) == 1
+ assert len(app.response_middleware) == 0
+ assert app.request_middleware[0].priority == 999 # type: ignore
+ app.middleware(foo, attach_to="response", priority=999)
+ assert len(app.request_middleware) == 1
+ assert len(app.response_middleware) == 1
+ assert app.response_middleware[0].priority == 999 # type: ignore
+
+
+def test_add_convenience_priority(app: Sanic):
+ def foo(*_):
+ ...
+
+ app.on_request(foo, priority=999)
+ assert len(app.request_middleware) == 1
+ assert len(app.response_middleware) == 0
+ assert app.request_middleware[0].priority == 999 # type: ignore
+ app.on_response(foo, priority=999)
+ assert len(app.request_middleware) == 1
+ assert len(app.response_middleware) == 1
+ assert app.response_middleware[0].priority == 999 # type: ignore
+
+
+def test_add_conflicting_priority(app: Sanic):
+ def foo(*_):
+ ...
+
+ middleware = Middleware(foo, MiddlewareLocation.REQUEST, priority=998)
+ app.register_middleware(middleware=middleware, priority=999)
+ assert app.request_middleware[0].priority == 999 # type: ignore
+ middleware.priority == 998
+
+
+def test_add_conflicting_priority_named(app: Sanic):
+ def foo(*_):
+ ...
+
+ middleware = Middleware(foo, MiddlewareLocation.REQUEST, priority=998)
+ app.register_named_middleware(
+ middleware=middleware, route_names=["foo"], priority=999
+ )
+ assert app.named_request_middleware["foo"][0].priority == 999 # type: ignore
+ middleware.priority == 998
+
+
@pytest.mark.parametrize(
"expected,priorities",
PRIORITY_TEST_CASES,
| Missing priority functionality in app-wide middlware
## Description
In v22.9 the priority feature was added, but unfortunately it appears it was overlooked and there is priority parameter to `register_middleware()` for `Sanic`.
I am looking to use this feature because I have a Blueprint-specific middleware which appears to run before the application-wide middleware. In my case the application-wide middleware does authorization and updates `request.ctx` which is then used in the Blueprint-specific middleware.
### Is there an existing issue for this?
- [X] I have searched the existing issues
## Environment
Sanic (22.9.1; Routing 22.8.0) is ran on Windows using ASGI.
| 2022-12-18T13:04:05 |
|
sanic-org/sanic | 2,640 | sanic-org__sanic-2640 | [
"2638"
] | 029f5640324c2729f654d8007fcd110b3aaf2739 | diff --git a/sanic/server/websockets/connection.py b/sanic/server/websockets/connection.py
--- a/sanic/server/websockets/connection.py
+++ b/sanic/server/websockets/connection.py
@@ -9,8 +9,10 @@
Union,
)
+from sanic.exceptions import InvalidUsage
-ASIMessage = MutableMapping[str, Any]
+
+ASGIMessage = MutableMapping[str, Any]
class WebSocketConnection:
@@ -25,8 +27,8 @@ class WebSocketConnection:
def __init__(
self,
- send: Callable[[ASIMessage], Awaitable[None]],
- receive: Callable[[], Awaitable[ASIMessage]],
+ send: Callable[[ASGIMessage], Awaitable[None]],
+ receive: Callable[[], Awaitable[ASGIMessage]],
subprotocols: Optional[List[str]] = None,
) -> None:
self._send = send
@@ -47,7 +49,13 @@ async def recv(self, *args, **kwargs) -> Optional[str]:
message = await self._receive()
if message["type"] == "websocket.receive":
- return message["text"]
+ try:
+ return message["text"]
+ except KeyError:
+ try:
+ return message["bytes"].decode()
+ except KeyError:
+ raise InvalidUsage("Bad ASGI message received")
elif message["type"] == "websocket.disconnect":
pass
| There is an obvious bug in ASGI WebsocketConnection of Sanic
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Describe the bug
I started my sanic app with UvicornWorker. The original websocket will become WebsocketConnection. When I call
the ws.recv function will report an error if bytes data is received at this time.
`KeyError๏ผโtextโ`
[https://github.com/sanic-org/sanic/blob/main/sanic/server/websockets/connection.py](url)
` async def recv(self, *args, **kwargs) -> Optional[str]:
message = await self._receive()
if message["type"] == "websocket.receive":
return message["text"]
elif message["type"] == "websocket.disconnect":
pass
return None`
There is no data of bytes type processed here.
### Code snippet
_No response_
### Expected Behavior
_No response_
### How do you run Sanic?
ASGI
### Operating System
ubuntu
### Sanic Version
22.3
### Additional context
_No response_
| I cannot reproduce this on `main`, or even on `22.3` using uvicorn.
```py
from sanic import Request, Sanic
app = Sanic("TestApp")
@app.websocket("/feed")
async def feed(request: Request, ws):
while True:
data = await ws.recv()
await ws.send(data)
```
I am closing this now as I see no bug. If you have a reproducible snippet, please send.
> I cannot reproduce this on `main`, or even on `22.3` using uvicorn.
>
> ```python
> from sanic import Request, Sanic
>
> app = Sanic("TestApp")
>
>
> @app.websocket("/feed")
> async def feed(request: Request, ws):
> while True:
> data = await ws.recv()
> await ws.send(data)
> ```
>
> I am closing this now as I see no bug. If you have a reproducible snippet, please send.
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Describe the bug
I started my sanic app with UvicornWorker. The original websocket will become WebsocketConnection. When I call
the ws.recv function will report an error if bytes data is received at this time.
`KeyError๏ผโtextโ`
[https://github.com/sanic-org/sanic/blob/main/sanic/server/websockets/connection.py](url)
` async def recv(self, *args, **kwargs) -> Optional[str]:
message = await self._receive()
if message["type"] == "websocket.receive":
return message["text"]
elif message["type"] == "websocket.disconnect":
pass
return None`
There is no data of bytes type processed here.
### Code snippet
_No response_
### Expected Behavior
_No response_
### How do you run Sanic?
ASGI
### Operating System
ubuntu
### Sanic Version
22.3
### Additional context
_No response_
> I cannot reproduce this on `main`, or even on `22.3` using uvicorn.
>
> ```python
> from sanic import Request, Sanic
>
> app = Sanic("TestApp")
>
>
> @app.websocket("/feed")
> async def feed(request: Request, ws):
> while True:
> data = await ws.recv()
> await ws.send(data)
> ```
>
> I am closing this now as I see no bug. If you have a reproducible snippet, please send.
main.py
```
from sanic import Sanic
app = Sanic('ws')
@app.websocket('/feed')
async def feed(req,ws):
while True:
data =await ws.recv()
print(data)
if data is None:
break
if __name__ == '__main__':
import websockets
import asyncio
async def main():
async with websockets.connect('ws://127.0.0.1::8889/feed') as ws:
await ws.send('123')
await ws.send(bytes(123))
asyncio.get_event_loop().run_until_complete(main())
```
run: gunicorn main:app --bind 0.0.0.0:8889 --worker-class uvicorn.workers.UvicornWorker
Nope. Still works fine for me.
> Nope. Still works fine for me.
That's weird. However, you can look at this code in sanic, and it does seem to have a problem. It seems to only handle text, right?
[connection.py](https://github.com/sanic-org/sanic/blob/main/sanic/server/websockets/connection.py)
```
async def recv(self, *args, **kwargs) -> Optional[str]:
message = await self._receive()
if message["type"] == "websocket.receive":
return message["text"]
elif message["type"] == "websocket.disconnect":
pass
return None
```
Yeah, will go back and look at the spec to see exactly what it's supposed to be. | 2022-12-25T10:16:10 |
|
sanic-org/sanic | 2,651 | sanic-org__sanic-2651 | [
"2644"
] | c7a71cd00c10334d4440a1a8b23b480ac7b987f6 | diff --git a/sanic/server/websockets/connection.py b/sanic/server/websockets/connection.py
--- a/sanic/server/websockets/connection.py
+++ b/sanic/server/websockets/connection.py
@@ -45,7 +45,7 @@ async def send(self, data: Union[str, bytes], *args, **kwargs) -> None:
await self._send(message)
- async def recv(self, *args, **kwargs) -> Optional[str]:
+ async def recv(self, *args, **kwargs) -> Optional[Union[str, bytes]]:
message = await self._receive()
if message["type"] == "websocket.receive":
@@ -53,7 +53,7 @@ async def recv(self, *args, **kwargs) -> Optional[str]:
return message["text"]
except KeyError:
try:
- return message["bytes"].decode()
+ return message["bytes"]
except KeyError:
raise InvalidUsage("Bad ASGI message received")
elif message["type"] == "websocket.disconnect":
| diff --git a/tests/test_asgi.py b/tests/test_asgi.py
--- a/tests/test_asgi.py
+++ b/tests/test_asgi.py
@@ -342,7 +342,7 @@ async def test_websocket_send(send, receive, message_stack):
@pytest.mark.asyncio
-async def test_websocket_receive(send, receive, message_stack):
+async def test_websocket_text_receive(send, receive, message_stack):
msg = {"text": "hello", "type": "websocket.receive"}
message_stack.append(msg)
@@ -351,6 +351,15 @@ async def test_websocket_receive(send, receive, message_stack):
assert text == msg["text"]
[email protected]
+async def test_websocket_bytes_receive(send, receive, message_stack):
+ msg = {"bytes": b"hello", "type": "websocket.receive"}
+ message_stack.append(msg)
+
+ ws = WebSocketConnection(send, receive)
+ data = await ws.receive()
+
+ assert data == msg["bytes"]
@pytest.mark.asyncio
async def test_websocket_accept_with_no_subprotocols(
| ASGI websocket must pass thru bytes as is
_Originally posted by @Tronic in https://github.com/sanic-org/sanic/pull/2640#discussion_r1058027028_
|
If no one else is working on this issue by next week, I will work this issue. :D
it's yours | 2023-01-08T03:15:02 |
sanic-org/sanic | 2,659 | sanic-org__sanic-2659 | [
"2658"
] | 9cb9e88678b5ca7f85d870348ceb8349d9f87bb1 | diff --git a/sanic/errorpages.py b/sanic/errorpages.py
--- a/sanic/errorpages.py
+++ b/sanic/errorpages.py
@@ -406,16 +406,13 @@ def escape(text):
v: k for k, v in RENDERERS_BY_CONTENT_TYPE.items()
}
+# Handler source code is checked for which response types it returns with the
+# route error_format="auto" (default) to determine which format to use.
RESPONSE_MAPPING = {
- "empty": "html",
"json": "json",
"text": "text",
- "raw": "text",
"html": "html",
- "file": "html",
- "file_stream": "text",
- "stream": "text",
- "redirect": "html",
+ "JSONResponse": "json",
"text/plain": "text",
"text/html": "html",
"application/json": "json",
| FALLBACK_ERROR_FORMAT does not work with empty()
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Describe the bug
Today I tested a handler which always returns `empty()` (see example below).
I was quite confused by the format of the errors, as they were all in HTML format, even though the fallback was set to json.
It took me a while to figure out that this was because of the `empty()` statement returned by the function, which somehow hijacks the error format, so that the errors are converted to html.
I could use the `error_format` from now on for handlers returning `empty()`, but was wondering if this is a feature or a bug.
### Code snippet
```python
from sanic import Sanic, empty
from sanic.exceptions import Unauthorized
app = Sanic(__name__)
app.config.FALLBACK_ERROR_FORMAT = "json"
# Returns error in html format
@app.post("/empty")
async def empty_(request):
raise Unauthorized("bli bla blub")
return empty()
# Returns error in json format
@app.post("/nothing")
async def nothing(request):
raise Unauthorized("bli bla blub")
if __name__ == '__main__':
app.run("127.0.0.1", port=3000, auto_reload=True, debug=True, dev=True)
```
### How do you run Sanic?
As a script
### Operating System
Ubuntu
### Sanic Version
22.9.1
| Triage. Sanic tries to determine the response type automatically using various heuristics. The one you hit is that the handler returns empty(), which Sanic maps to HTML and considers the case solved, no fallback, error page in HTML. @ahopkins Can we make empty() unopinionated and use the fallback instead?
One way to avoid this is to add an `accept: application/json` header to your requests, then Sanic will always respond in JSON and not use any of these heuristics.
@simon-lund Thanks for the report and a useful test case, made triaging much easier! The heuristics are quite complicated but we can probably let empty run to the fallback which also gets what you need without the header. | 2023-01-19T00:25:37 |
|
sanic-org/sanic | 2,666 | sanic-org__sanic-2666 | [
"2442"
] | 4ad8168bb016cef19213cd3db2b12efb1a4dcb30 | diff --git a/sanic/blueprints.py b/sanic/blueprints.py
--- a/sanic/blueprints.py
+++ b/sanic/blueprints.py
@@ -304,9 +304,6 @@ def register(self, app, options):
# Routes
for future in self._future_routes:
- # attach the blueprint name to the handler so that it can be
- # prefixed properly in the router
- future.handler.__blueprintname__ = self.name
# Prepend the blueprint URI prefix if available
uri = self._setup_uri(future.uri, url_prefix)
| Deprecate __blueprintname__
This code no longer seems like it is necessary: https://github.com/sanic-org/sanic/blob/5d683c6ea4b615e80c51d80189436437b824cce6/sanic/blueprints.py#L309
Since you can get access to the name of the blueprint from the request object, the route object, and the blueprint object, I do not think there is any need for `__blueprintname__` anymore.
See #2440
| This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. If this is incorrect, please respond with an update. Thank you for your contributions.
Hello, @ahopkins. Since I'm experiencing the same error as mentioned in #2440, and, making the functions `static` works just fine for me too, I would prefer to keep my functions the way they are.
That said, I would like to open a PR to fix this issue, but I have never contributed to the Sanic codebase before, and I'm not 100% sure of what to do.
What are your suggestions to fix this? Removing the `future.handler.__blueprintname__ = self.name` completely, or do you have anything else in mind for this? If you have any directions for me, I would be happy to open a PR to fix that.
Awesome. I would love to have your first contribution, and happy to walk you thru it. There is some [information already](https://github.com/sanic-org/sanic/blob/main/CONTRIBUTING.rst), but certainly reach out and let me know if you have questions.
As for the specifics here, I think we probably can just remove it. However I am reluctant for backwards compat to do anything before v23 at this point. One of the things I would like to introduce is to wrap all handlers with some sort of a parent object to make the API a little more consistent and we do not do hacky things like adding properties to functions.
Thanks, @ahopkins! I will see if I can provide a patch by today.
I tested locally with a project, and removing that line worked just fine for me. Not sure if it broke anything else, but I will see if I can do that today and give that a proper test. | 2023-01-27T03:04:59 |
|
sanic-org/sanic | 2,670 | sanic-org__sanic-2670 | [
"2681"
] | 6848ff24d81e5c07ca12c66958237f15db4705df | diff --git a/sanic/config.py b/sanic/config.py
--- a/sanic/config.py
+++ b/sanic/config.py
@@ -43,14 +43,14 @@
"DEPRECATION_FILTER": "once",
"FORWARDED_FOR_HEADER": "X-Forwarded-For",
"FORWARDED_SECRET": None,
- "GRACEFUL_SHUTDOWN_TIMEOUT": 15.0, # 15 sec
+ "GRACEFUL_SHUTDOWN_TIMEOUT": 15.0,
"INSPECTOR": False,
"INSPECTOR_HOST": "localhost",
"INSPECTOR_PORT": 6457,
"INSPECTOR_TLS_KEY": _default,
"INSPECTOR_TLS_CERT": _default,
"INSPECTOR_API_KEY": "",
- "KEEP_ALIVE_TIMEOUT": 5, # 5 seconds
+ "KEEP_ALIVE_TIMEOUT": 120,
"KEEP_ALIVE": True,
"LOCAL_CERT_CREATOR": LocalCertCreator.AUTO,
"LOCAL_TLS_KEY": _default,
@@ -61,16 +61,16 @@
"NOISY_EXCEPTIONS": False,
"PROXIES_COUNT": None,
"REAL_IP_HEADER": None,
- "REQUEST_BUFFER_SIZE": 65536, # 64 KiB
- "REQUEST_MAX_HEADER_SIZE": 8192, # 8 KiB, but cannot exceed 16384
+ "REQUEST_BUFFER_SIZE": 65536,
+ "REQUEST_MAX_HEADER_SIZE": 8192, # Cannot exceed 16384
"REQUEST_ID_HEADER": "X-Request-ID",
- "REQUEST_MAX_SIZE": 100000000, # 100 megabytes
- "REQUEST_TIMEOUT": 60, # 60 seconds
- "RESPONSE_TIMEOUT": 60, # 60 seconds
+ "REQUEST_MAX_SIZE": 100_000_000,
+ "REQUEST_TIMEOUT": 60,
+ "RESPONSE_TIMEOUT": 60,
"TLS_CERT_PASSWORD": "",
"TOUCHUP": _default,
"USE_UVLOOP": _default,
- "WEBSOCKET_MAX_SIZE": 2**20, # 1 megabyte
+ "WEBSOCKET_MAX_SIZE": 2**20, # 1 MiB
"WEBSOCKET_PING_INTERVAL": 20,
"WEBSOCKET_PING_TIMEOUT": 20,
}
| Send keep-alive header and increase the default timeout
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Is your feature request related to a problem? Please describe.
Nginx doesn't notice Sanic closing its keep-alive connection by timeout, and then another request will fail for no good reason. High latency connections suffer of having to do frequent reconnections when a client or proxy loads additional resources 5+ seconds later, despite keeping such connections open being practically free with the async server.
### Describe the solution you'd like
Sanic http1 server should send `keep-alive: timeout=<KEEP_ALIVE_TIMEOUT>`, to inform the client of the duration it allows. Also Sanic should not limit max number of requests per pipeline (no reason to) and use a higher default `KEEP_ALIVE_TIMEOUT` setting of at least 70 seconds to exceed that of Nginx to avoid connection problems.
Sanic should also investigate ways of closing the connection so that Nginx or another client gets the notice of connection RST and won't attempt to send another request once those are no longer accepted. Could be problematic with TLS but this is also broken in plain HTTP in current version.
### Additional context
HTTP/3 does not use keep-alive header. On ASGI this might be up to the ASGI server, needs investigation.
| 2023-02-01T16:17:39 |
||
sanic-org/sanic | 2,680 | sanic-org__sanic-2680 | [
"2684"
] | 5e7f6998bdccce325a4c30d940d02d9d1e40b11e | diff --git a/sanic/mixins/startup.py b/sanic/mixins/startup.py
--- a/sanic/mixins/startup.py
+++ b/sanic/mixins/startup.py
@@ -877,7 +877,10 @@ def serve(
sync_manager.shutdown()
for sock in socks:
- sock.shutdown(SHUT_RDWR)
+ try:
+ sock.shutdown(SHUT_RDWR)
+ except OSError:
+ ...
sock.close()
socks = []
trigger_events(main_stop, loop, primary)
| Sanic doesn't shutdown cleanly on Mac
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Describe the bug
When running a simple server on mac os 13.1, after using ctrl-c to shutdown the app, a socket exception is thrown instead of a graceful shutdown
```sh
python3 helloworld.py
[2023-02-14 12:23:23 -0700] [6169] [DEBUG] Creating multiprocessing context using 'spawn'
[2023-02-14 12:23:23][DEBUG] Creating multiprocessing context using 'spawn'
[2023-02-14 12:23:23 -0700] [6169] [DEBUG] Starting a process: Sanic-Server-0-0
[2023-02-14 12:23:23][DEBUG] Starting a process: Sanic-Server-0-0
[2023-02-14 12:23:24 -0700] [6175] [DEBUG] Process ack: Sanic-Server-0-0 [6175]
[2023-02-14 12:23:24][DEBUG] Process ack: Sanic-Server-0-0 [6175]
[2023-02-14 12:23:24 -0700] [6175] [INFO] Starting worker [6175]
[2023-02-14 12:23:24][INFO] Starting worker [6175]
^C[2023-02-14 12:23:26 -0700] [6169] [INFO] Received signal SIGINT. Shutting down.
[2023-02-14 12:23:26][INFO] Received signal SIGINT. Shutting down.
[2023-02-14 12:23:26 -0700] [6169] [DEBUG] Terminating a process: Sanic-Server-0-0 [6175]
[2023-02-14 12:23:26][DEBUG] Terminating a process: Sanic-Server-0-0 [6175]
[2023-02-14 12:23:26 -0700] [6169] [INFO] Server Stopped
[2023-02-14 12:23:26][INFO] Server Stopped
Traceback (most recent call last):
File "/Users/tylerprete/sandbox/asana/asana2/asana/server/kube_app/apps/helloworld/helloworld.py", line 22, in <module>
app.run(host="127.0.0.1", port=8086, debug=True)
File "/usr/local/lib/python3.9/site-packages/sanic/mixins/startup.py", line 209, in run
serve(primary=self) # type: ignore
File "/usr/local/lib/python3.9/site-packages/sanic/mixins/startup.py", line 880, in serve
sock.shutdown(SHUT_RDWR)
OSError: [Errno 57] Socket is not connected
[2023-02-14 12:23:26 -0700] [6175] [INFO] Stopping worker [6175]
[2023-02-14 12:23:26][INFO] Stopping worker [6175]
```
### Code snippet
```python3
from sanic import Sanic
from sanic.response import html, text
app = Sanic("helloworld")
@app.get("/")
def hello_world(request):
print("Serving /")
return html("<p>Hello, World!</p>")
if __name__ == "__main__":
app.run(host="127.0.0.1", port=8086, debug=True)
```
### Expected Behavior
On linux I run this and get the following (removing the sanic banners for brevity):
```sh
python3 helloworld.py
[2023-02-14 19:17:43 +0000] [23570] [DEBUG] Creating multiprocessing context using 'spawn'
[2023-02-14 19:17:43][DEBUG] Creating multiprocessing context using 'spawn'
[2023-02-14 19:17:43 +0000] [23570] [DEBUG] Starting a process: Sanic-Server-0-0
[2023-02-14 19:17:43][DEBUG] Starting a process: Sanic-Server-0-0
[2023-02-14 19:17:43 +0000] [23579] [DEBUG] Process ack: Sanic-Server-0-0 [23579]
[2023-02-14 19:17:43][DEBUG] Process ack: Sanic-Server-0-0 [23579]
[2023-02-14 19:17:43 +0000] [23579] [INFO] Starting worker [23579]
[2023-02-14 19:17:43][INFO] Starting worker [23579]
^C[2023-02-14 19:17:45 +0000] [23570] [INFO] Received signal SIGINT. Shutting down.
[2023-02-14 19:17:45][INFO] Received signal SIGINT. Shutting down.
[2023-02-14 19:17:45 +0000] [23570] [DEBUG] Terminating a process: Sanic-Server-0-0 [23579]
[2023-02-14 19:17:45][DEBUG] Terminating a process: Sanic-Server-0-0 [23579]
[2023-02-14 19:17:45 +0000] [23570] [INFO] Server Stopped
[2023-02-14 19:17:45][INFO] Server Stopped
[2023-02-14 19:17:45 +0000] [23579] [INFO] Stopping worker [23579]
[2023-02-14 19:17:45][INFO] Stopping worker [23579]
```
### How do you run Sanic?
As a script (`app.run` or `Sanic.serve`)
### Operating System
macOS Ventura 13.1
### Sanic Version
22.12.0
### Additional context
_No response_
| 2023-02-11T21:04:54 |
||
sanic-org/sanic | 2,704 | sanic-org__sanic-2704 | [
"2699"
] | 5ee36fd933344861cb578105b3ad25b032f22912 | diff --git a/sanic/app.py b/sanic/app.py
--- a/sanic/app.py
+++ b/sanic/app.py
@@ -16,7 +16,7 @@
)
from asyncio.futures import Future
from collections import defaultdict, deque
-from contextlib import suppress
+from contextlib import contextmanager, suppress
from functools import partial
from inspect import isawaitable
from os import environ
@@ -33,6 +33,7 @@
Deque,
Dict,
Iterable,
+ Iterator,
List,
Optional,
Set,
@@ -433,14 +434,15 @@ def _apply_route(self, route: FutureRoute) -> List[Route]:
ctx = params.pop("route_context")
- routes = self.router.add(**params)
- if isinstance(routes, Route):
- routes = [routes]
+ with self.amend():
+ routes = self.router.add(**params)
+ if isinstance(routes, Route):
+ routes = [routes]
- for r in routes:
- r.extra.websocket = websocket
- r.extra.static = params.get("static", False)
- r.ctx.__dict__.update(ctx)
+ for r in routes:
+ r.extra.websocket = websocket
+ r.extra.static = params.get("static", False)
+ r.ctx.__dict__.update(ctx)
return routes
@@ -449,17 +451,19 @@ def _apply_middleware(
middleware: FutureMiddleware,
route_names: Optional[List[str]] = None,
):
- if route_names:
- return self.register_named_middleware(
- middleware.middleware, route_names, middleware.attach_to
- )
- else:
- return self.register_middleware(
- middleware.middleware, middleware.attach_to
- )
+ with self.amend():
+ if route_names:
+ return self.register_named_middleware(
+ middleware.middleware, route_names, middleware.attach_to
+ )
+ else:
+ return self.register_middleware(
+ middleware.middleware, middleware.attach_to
+ )
def _apply_signal(self, signal: FutureSignal) -> Signal:
- return self.signal_router.add(*signal)
+ with self.amend():
+ return self.signal_router.add(*signal)
def dispatch(
self,
@@ -1520,6 +1524,27 @@ def _check_uvloop_conflict(cls) -> None:
# Lifecycle
# -------------------------------------------------------------------- #
+ @contextmanager
+ def amend(self) -> Iterator[None]:
+ """
+ If the application has started, this function allows changes
+ to be made to add routes, middleware, and signals.
+ """
+ if not self.state.is_started:
+ yield
+ else:
+ do_router = self.router.finalized
+ do_signal_router = self.signal_router.finalized
+ if do_router:
+ self.router.reset()
+ if do_signal_router:
+ self.signal_router.reset()
+ yield
+ if do_signal_router:
+ self.signalize(self.config.TOUCHUP)
+ if do_router:
+ self.finalize()
+
def finalize(self):
try:
self.router.finalize()
| diff --git a/tests/test_late_adds.py b/tests/test_late_adds.py
new file mode 100644
--- /dev/null
+++ b/tests/test_late_adds.py
@@ -0,0 +1,54 @@
+import pytest
+
+from sanic import Sanic, text
+
+
[email protected]
+def late_app(app: Sanic):
+ app.config.TOUCHUP = False
+ app.get("/")(lambda _: text(""))
+ return app
+
+
+def test_late_route(late_app: Sanic):
+ @late_app.before_server_start
+ async def late(app: Sanic):
+ @app.get("/late")
+ def handler(_):
+ return text("late")
+
+ _, response = late_app.test_client.get("/late")
+ assert response.status_code == 200
+ assert response.text == "late"
+
+
+def test_late_middleware(late_app: Sanic):
+ @late_app.get("/late")
+ def handler(request):
+ return text(request.ctx.late)
+
+ @late_app.before_server_start
+ async def late(app: Sanic):
+ @app.on_request
+ def handler(request):
+ request.ctx.late = "late"
+
+ _, response = late_app.test_client.get("/late")
+ assert response.status_code == 200
+ assert response.text == "late"
+
+
+def test_late_signal(late_app: Sanic):
+ @late_app.get("/late")
+ def handler(request):
+ return text(request.ctx.late)
+
+ @late_app.before_server_start
+ async def late(app: Sanic):
+ @app.signal("http.lifecycle.request")
+ def handler(request):
+ request.ctx.late = "late"
+
+ _, response = late_app.test_client.get("/late")
+ assert response.status_code == 200
+ assert response.text == "late"
| request middleware not running when registered in a method since 22.9.0
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Describe the bug
Prior to 22.9.0, you could register request middleware in functions and listeners. Since 22.9.0 this has stopped working. I'm unclear on whether this is by design or not, but I wasn't able to find anything in the docs that explains the discrepancy.
The last working version I tested was 22.6.2.
### Code snippet
```
from sanic import Sanic, response
app = Sanic("my-hello-world-app")
@app.middleware("request")
async def this_always_works(request):
print("this is request middleware")
@app.before_server_start
async def before_server_start_listener(app, loop):
def this_fails_in_22_9_0_and_after(request):
print("this will not fire after in 22.9.0 and after")
app.register_middleware(this_fails_in_22_9_0_and_after, "request")
@app.route('/')
async def test(request):
return response.json({'hello': 'world'})
if __name__ == '__main__':
app.register_middleware(lambda app: print("this will also not fire in 22.9.0 and after"), "request")
app.run(host="0.0.0.0", debug=True, auto_reload=True)
```
### Expected Behavior
I expected all middleware to fire, but only the first one I set up fires since 22.9.0. I know middlware priority was introduced in 22.9, but I wouldn't expect that to have broken existing apps. I tried explicitly setting the priority and I still was not able to get the middleware to fire.
### How do you run Sanic?
As a script (`app.run` or `Sanic.serve`)
### Operating System
Linux
### Sanic Version
22.9.0
### Additional context
Thanks for Sanic!
| When I first looked at this earlier I was super shocked and quite disturbed to hear this.
1. This is meant to be accomplished
2. I do this very thing in a bunch of production applications
So I think we need to clarify a few things.
## Middleware added in a listener _will_ work and run
Your example code draws an incorrect conclusion here:
```python
@app.before_server_start
async def before_server_start_listener(app, loop):
def this_fails_in_22_9_0_and_after(request):
print("this will not fire after in 22.9.0 and after")
app.register_middleware(this_fails_in_22_9_0_and_after, "request")
```
This _should_ work and will work. Or, rather, I cannot reproduce a scenario where it does not.

## Adding to an app instance inside an `if __name__ == "__main__"` block is _NOT_ supported
When you define this block...
```py
if __name__ == '__main__':
app.register_middleware(lambda app: print("this will also not fire in 22.9.0 and after"), "request")
app.run(host="0.0.0.0", debug=True, auto_reload=True)
```
... what it means is **ONLY** run this code when the main Python interpreter process is running.
Sanic underwent a big ([yet super powerful)](https://sanic.dev/en/guide/deployment/manager.html) [shift in 22.9](https://sanic.dev/en/guide/release-notes/v22.9.html#warning-important-new-worker-manager-rocket). Every worker gets its own dedicated process. This enables scaling, coordination, etc, etc.
But it also means that anything run in that block is not attached to your actual running Application instance.
So, what can you do?
### Use a factory, or don't attach dynamically at run time in the `if __name__ == "__main__"` block
Going forward we are going to change a lot of the documents to really suggest that developers lean more heavily on the CLI. When you do, using the factory pattern becomes super easy and easier for such runtime changes.
```py
def create_app():
app = Sanic("MyApp")
... # do stuff
return app
```
```
$ sanic path.to.server:create_app --factory
```
You can achieve this of course with `if __name__ == "__main__"` and `app.run`, and that will of course always be supported. And, sometimes the simple pattern I showed above is inadequate.
Therefore, we documented how to create [dynamic runtime applications](https://sanic.dev/en/guide/deployment/app-loader.html). Perhaps this method will work for you?
### Use `single_process` mode
If you never intend to use multiple workers, perhaps you only want to run Sanic in single process mode:
```python
if __name__ == "__main__":
app.register_middleware(
lambda app: print("this will also not fire in 22.9.0 and after"),
"request",
)
app.run(..., single_process=True)
```
This does have a disadvantage though because now you do not have auto-reload for local development. That is itself running in a background process, therefore it is a tradeoff you need to decide upon.
### Keep runtime global scope
One thing is to simply move your registration into the global scope.
```python
app.register_middleware(lambda app: print("this will also not fire in 22.9.0 and after"), "request")
if __name__ == '__main__':
app.run(host="0.0.0.0", debug=True, auto_reload=True)
```
Now, that will trigger on both the main process and every single worker process to attach your middleware.
### Run _ONLY_ in the correct process
Since we ultimately want to run the `register_middleware` on the worker process, you could just be explicit about it:
```python
if __name__ == "__mp_main__":
app.register_middleware(
lambda app: print("this will also not fire in 22.9.0 and after"),
"request",
)
elif __name__ == "__main__":
app.run(host="0.0.0.0", debug=True, auto_reload=True)
```
There really is no reason to have the registration of middleware in the main app in most use cases. But, you could of course see how you could do both if you needed.
---
One final note:
This...
```py
app.run(host="0.0.0.0", debug=True, auto_reload=True)
```
can be simplified...
```py
app.run(host="0.0.0.0", dev=True)
```
Thanks for the quick and thoughtful response!
It's really the other issue, attaching middleware in `before_server_start`, that's blocking us from upgrading. I noticed the `main` issue when I was experimenting and trying to resolve that issue in various ways. Since it was a similar behavior change between the versions, I assumed it was probably part of the same issue which is why I included it in the report.
I'm using Python 3.10 and running Sanic via poetry in Fedora Linux, but I originally noticed this problem in Docker on my Mac laptop. I haven't had a chance to try the minimal example below on my Mac today, but I will give it a try tomorrow and report back here.
Can you see anything in my setup that would make this reproduce consistently for me? More details are below.
My `pyproject.toml` file looks like this:
```toml
[tool.poetry]
name = "sanic-bug"
version = "0.1.0"
description = ""
authors = ["Semmy Purewal <[email protected]>"]
readme = "README.md"
[tool.poetry.dependencies]
python = "^3.10"
sanic = "22.6.2"
[build-system]
requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"
```
And my `server.py` looks like this:
```py
from sanic import Sanic, response
app = Sanic("my-hello-world-app")
@app.middleware("request")
async def this_always_works(request):
print("this is request middleware")
@app.before_server_start
async def before_server_start_listener(app, loop):
def this_fails_in_22_9_0_and_after(request):
print("this will not fire after in 22.9.0 and after")
app.register_middleware(this_fails_in_22_9_0_and_after, "request")
@app.route('/')
async def test(request):
return response.json({'hello': 'world'})
if __name__ == '__main__':
app.run(host="0.0.0.0", dev=True)
```
Running `poetry install`, then `poetry run python server.py`, then hitting the endpoint, both middlewares run as we'd expect:
```
$ poetry run python server.py
[2023-03-01 20:13:34 -0500] [1401809] [INFO]
โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
โ Sanic v22.6.2 โ
โ Goin' Fast @ http://0.0.0.0:8000 โ
โโโโโโโโโโโโโโโโโโโโโโโโโฌโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโค
โ โ mode: debug, single worker โ
โ โโโโ โโโโโ โโ โ server: sanic, HTTP/1.1 โ
โ โโ โ python: 3.10.9 โ
โ โโโโโโโโ โโโโ โ platform: Linux-6.1.7-100.fc36.x86_64-x86_64-with-glibc2. โ
โ โโ โ 35 โ
โ โโโโ โโโโโโโโโ โ auto-reload: enabled โ
โ โ packages: sanic-routing==22.3.0 โ
โ Build Fast. Run Fast. โ โ
โโโโโโโโโโโโโโโโโโโโโโโโโดโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
[2023-03-01 20:13:34 -0500] [1401809] [DEBUG] Dispatching signal: server.init.before
[2023-03-01 20:13:34 -0500] [1401809] [DEBUG] Dispatching signal: server.init.after
[2023-03-01 20:13:34 -0500] [1401809] [INFO] Starting worker [1401809]
[2023-03-01 20:13:38 -0500] [1401809] [DEBUG] Dispatching signal: http.lifecycle.begin
[2023-03-01 20:13:38 -0500] [1401809] [DEBUG] Dispatching signal: http.lifecycle.read_head
[2023-03-01 20:13:38 -0500] [1401809] [DEBUG] Dispatching signal: http.lifecycle.request
[2023-03-01 20:13:38 -0500] [1401809] [DEBUG] Dispatching signal: http.lifecycle.handle
[2023-03-01 20:13:38 -0500] [1401809] [DEBUG] Dispatching signal: http.routing.before
[2023-03-01 20:13:38 -0500] [1401809] [DEBUG] Dispatching signal: http.routing.after
[2023-03-01 20:13:38 -0500] [1401809] [DEBUG] Dispatching signal: http.middleware.before
this is request middleware
[2023-03-01 20:13:38 -0500] [1401809] [DEBUG] Dispatching signal: http.middleware.after
[2023-03-01 20:13:38 -0500] [1401809] [DEBUG] Dispatching signal: http.middleware.before
this will not fire after in 22.9.0 and after
[2023-03-01 20:13:38 -0500] [1401809] [DEBUG] Dispatching signal: http.middleware.after
[2023-03-01 20:13:38 -0500] [1401809] [DEBUG] Dispatching signal: http.lifecycle.response
[2023-03-01 20:13:38 -0500] - (sanic.access)[INFO][127.0.0.1:39236]: GET http://localhost:8000/ 200 17
[2023-03-01 20:13:38 -0500] [1401809] [DEBUG] Dispatching signal: http.lifecycle.send
[2023-03-01 20:13:38 -0500] [1401809] [DEBUG] Dispatching signal: http.lifecycle.begin
[2023-03-01 20:13:43 -0500] [1401809] [DEBUG] KeepAlive Timeout. Closing connection.
```
Updating `pyproject.toml` to use Sanic 22.9.0, reinstalling dependencies and running again exactly as above shows the middleware no longer runs for us. I also tried removing and recreating the venv, but still had the same results.
```
$ poetry run python server.py
[2023-03-01 20:16:31 -0500] [1411105] [INFO]
โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
โ Sanic v22.9.0 โ
โ Goin' Fast @ http://0.0.0.0:8000 โ
โโโโโโโโโโโโโโโโโโโโโโโโโฌโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโค
โ โ mode: debug, single worker โ
โ โโโโ โโโโโ โโ โ server: sanic, HTTP/1.1 โ
โ โโ โ python: 3.10.9 โ
โ โโโโโโโโ โโโโ โ platform: Linux-6.1.7-100.fc36.x86_64-x86_64-with-glibc2. โ
โ โโ โ 35 โ
โ โโโโ โโโโโโโโโ โ auto-reload: enabled โ
โ โ packages: sanic-routing==22.8.0 โ
โ Build Fast. Run Fast. โ โ
โโโโโโโโโโโโโโโโโโโโโโโโโดโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
[2023-03-01 20:16:31 -0500] [1411105] [DEBUG] Starting a process: Sanic-Server-0-0
[2023-03-01 20:16:31 -0500] [1411105] [DEBUG] Starting a process: Sanic-Reloader-0
[2023-03-01 20:16:31 -0500] [1411116] [INFO] Starting worker [1411116]
this is request middleware
[2023-03-01 20:16:36 -0500] - (sanic.access)[INFO][127.0.0.1:46656]: GET http://localhost:8000/ 200 17
[2023-03-01 20:16:41 -0500] [1411116] [DEBUG] KeepAlive Timeout. Closing connection.
[2023-03-01 20:16:41 -0500] [1411116] [DEBUG] KeepAlive Timeout. Closing connection.
```
If you don't see anything obvious, we'll start digging into the code.
I am not experiencing that

Oh.... I see. :thinking: The difference was I had `sanic-ext` in my environment. Going to a clean env without that I see what you are seeing.
So the reason is that you are altering the router after it has tried to optimize itself. I will add something to the documentation about this.
Add this:
```py
app.router.reset()
app.register_middleware(this_fails_in_22_9_0_and_after, "request")
app.finalize()
``` | 2023-03-02T10:35:10 |
sanic-org/sanic | 2,719 | sanic-org__sanic-2719 | [
"2715"
] | 009954003c0ca57e8ec0d8157a7b43dc7635872a | diff --git a/sanic/http/tls/context.py b/sanic/http/tls/context.py
--- a/sanic/http/tls/context.py
+++ b/sanic/http/tls/context.py
@@ -159,7 +159,7 @@ def __new__(cls, cert, key, **kw):
# try common aliases, rename to cert/key
certfile = kw["cert"] = kw.pop("certificate", None) or cert
keyfile = kw["key"] = kw.pop("keyfile", None) or key
- password = kw.pop("password", None)
+ password = kw.get("password", None)
if not certfile or not keyfile:
raise ValueError("SSL dict needs filenames for cert and key.")
subject = {}
diff --git a/sanic/mixins/startup.py b/sanic/mixins/startup.py
--- a/sanic/mixins/startup.py
+++ b/sanic/mixins/startup.py
@@ -811,7 +811,7 @@ def serve(
ssl = kwargs.get("ssl")
if isinstance(ssl, SanicSSLContext):
- kwargs["ssl"] = kwargs["ssl"].sanic
+ kwargs["ssl"] = ssl.sanic
manager = WorkerManager(
primary.state.workers,
| diff --git a/tests/certs/password/fullchain.pem b/tests/certs/password/fullchain.pem
new file mode 100644
--- /dev/null
+++ b/tests/certs/password/fullchain.pem
@@ -0,0 +1,19 @@
+-----BEGIN CERTIFICATE-----
+MIIDCTCCAfGgAwIBAgIUa7OOlAGQfXOgUgRENJ9GbUgO7kwwDQYJKoZIhvcNAQEL
+BQAwFDESMBAGA1UEAwwJMTI3LjAuMC4xMB4XDTIzMDMyMDA3MzE1M1oXDTIzMDQx
+OTA3MzE1M1owFDESMBAGA1UEAwwJMTI3LjAuMC4xMIIBIjANBgkqhkiG9w0BAQEF
+AAOCAQ8AMIIBCgKCAQEAn2/RqVpzO7GFrgVGiowR5CzcFzf1tSFti1K/WIGr/jsu
+NP+1R3sim17pgg6SCOFnUMRS0KnDihkzoeP6z+0tFsrbCH4V1+fq0iud8WgYQrgD
+3ttUcHrz04p7wsMoeqndUQoLbyJzP8MpA2XJsoacdIVkuLv2AESGXLhJym/e9HGN
+g8bqdz25X0hVTczZW1FN9AZyWWVf9Go6jqC7LCaOnYXAnOkEy2/JHdkeNXYFZHB3
+71UemfkCjfp0vlRV8pVpkBGMhRNFphBTfxdqeWiGQwVqrhaJO4M7DJlQHCAPY16P
+o9ywnhLDhFHD7KIfTih9XxrdgTowqcwyGX3e3aJpTwIDAQABo1MwUTAdBgNVHQ4E
+FgQU5NogMq6mRBeGl4i6hIuUlcR2bVEwHwYDVR0jBBgwFoAU5NogMq6mRBeGl4i6
+hIuUlcR2bVEwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAYW34
+JY1kd0UO5HE41oxJD4PioQboXXX0al4RgKaUUsPykeHQbK0q0TSYAZLwRjooTVUO
+Wvna5bU2mzyULqA2r/Cr/w4zb9xybO3SiHFHcU1RacouauHXROHwRm98i8A73xnH
+vHws5BADr2ggnVcPNh4VOQ9ZvBlC7jhgpvMjqOEu5ZPCovhfZYfSsvBDHcD74ZYm
+Di9DvqsJmrb23Dv3SUykm3W+Ql2q+JyjFj30rhD89CFwJ9iSlFwTYEwZLHA+mV6p
+UKy3I3Fiht1Oc+nIivX5uhRSMbDVvDTVHbjjPujxxFjkiHXMjtwvwfg4Sb6du61q
+AjBRFyXbNu4hZkkHOA==
+-----END CERTIFICATE-----
diff --git a/tests/certs/password/privkey.pem b/tests/certs/password/privkey.pem
new file mode 100644
--- /dev/null
+++ b/tests/certs/password/privkey.pem
@@ -0,0 +1,30 @@
+-----BEGIN ENCRYPTED PRIVATE KEY-----
+MIIFLTBXBgkqhkiG9w0BBQ0wSjApBgkqhkiG9w0BBQwwHAQI94UBqjaZlG4CAggA
+MAwGCCqGSIb3DQIJBQAwHQYJYIZIAWUDBAEqBBCvJhEy+3/+0Ec0gpd5dkP6BIIE
+0E7rLplTe9rxK3sR9V0cx8Xn6V+uFhG3p7dzeMDCCKpGo9MEaacF5m+paGnBkMlH
+Pz3rRoLA5jqzwXl4US/C5E1Or//2YBgF1XXKi3BPF/bVx/g6vR+xeobf9kQGbqQk
+FNPYtP7mpg2dekp5BUsKSosIt8BkknWFvhBeNuGZT/zlMUuq1WpMe4KIh/W9IdNr
+HolcuZJWBhQAwGPciWIZRyq48wKa++W7Jdg/aG8FviJQnjaAUv4CyZJHUJnaNwUx
+iHOETpzIC+bhF2K+s4g5w68VCj6Jtz78sIBEZKzo7LI5QHdRHqYB5SJ/dGiV+h09
+R/rQ/M+24mwHDlRSCxxq0yuDwUuGBlHyATeDCFeE3L5OX8yTLuqYJ6vUa6UbzMYA
+8H4l5zfu9RrAhKYa9tD+4ONxMmHziIgmn5zvSXeBwJKfeUbnN4IKWLsSoSVspBRh
+zLl51DMAnem4NEjLfIW8WYjhsvSYwd9BYqxXaAiv4Wjx9ZV1yLqFICC7tejpVdRT
+afI0qMOfWu4ma6xVBg1ezLgF1wHIPrq6euTvWdnifYQopVICALlltEo5oxQ2i/OM
+NY8RyovWujiGNsa3pId9HmZXiLyLXjKPstGWRK4liMyc2EiP099gTdBvrb+VQp+I
+EyPavmh3WNhgZGOh3qah39X8HrBprc0PPfSPlxpaWdNMIIMSbcIWWdJEA/e4tcy/
+uBaV4H3sNCtBApgrb6B9YUbS9CXNUburJo19T1sk2uCaO12qYfdu2IDEnFf8JiF3
+i7nyftotRuoKq2D+V8d0PeMi/vJSo6+eZIn7VNe6ejYf+w0s7sxlpiKVzkslyOhq
+n0T4M3ZkSwGIETzgkRRuTY1OK7slhglMgXlQ2FuIUUo6CRg9WjRJvI5rujLzLWfB
+hkgP8STirjTV0DUWPFGtUcenvEcZPkYIQcoPHxOJGNW3ZPXNpt4RjbvPLeVzDm0O
+WJiay/qhag/bXGqKraO3b6Y7FOzJa8kG4G0XrcFY1s2oCXRqRqYJAtwaEeVCjCSJ
+Qy0OZkqcJEU7pv98pLMpG9OWz4Gle77g4KoQUJjQGtmg0MUMoPd0iPRmvkxsYg8E
+Q9uZS3m6PpWmmYDY0Ik1w/4avs3skl2mW3dqcZGLEepkjiQSnFABsuvxKd+uIEQy
+lyf9FrynXVcUI87LUkuniLRKwZZzFALVuc+BwtO3SA5mvEK22ZEq9QOysbwlpN54
+G5xXJKJEeexUSjEUIij4J89RLsXldibhp7YYZ7rFviR6chIqC0V7G6VqAM9TOCrV
+PWZXr3ZY5/pCZYs5DYKFJBFMSQ2UT/++VYxdZCeBH75vaxugbS8RdUM+iVDevWpQ
+/AnP1FolNAgkVhi3Rw4L16SibkqpEzIi1svPWKMwXdvewA32UidLElhuTWWjI2Wm
+veXhmEqwk/7ML4JMI7wHcDQdvSKen0mCL2J9tB7A/pewYyDE0ffIUmjxglOtw30f
+ZOlQKhMaKJGXp00U2zsHA2NJRI/hThbJncsnZyvuLei0P42RrF+r64b/0gUH6IZ5
+wPUttT815KSNoy+XXXum9YGDYYFoAL+6WVEkl6dgo+X0hcH7DDf5Nkewiq8UcJGh
+/69vFIfp+JlpicXzZ+R42LO3T3luC907aFBywF3pmi//
+-----END ENCRYPTED PRIVATE KEY-----
diff --git a/tests/test_tls.py b/tests/test_tls.py
--- a/tests/test_tls.py
+++ b/tests/test_tls.py
@@ -33,12 +33,19 @@
current_dir = os.path.dirname(os.path.realpath(__file__))
localhost_dir = os.path.join(current_dir, "certs/localhost")
+password_dir = os.path.join(current_dir, "certs/password")
sanic_dir = os.path.join(current_dir, "certs/sanic.example")
invalid_dir = os.path.join(current_dir, "certs/invalid.nonexist")
localhost_cert = os.path.join(localhost_dir, "fullchain.pem")
localhost_key = os.path.join(localhost_dir, "privkey.pem")
sanic_cert = os.path.join(sanic_dir, "fullchain.pem")
sanic_key = os.path.join(sanic_dir, "privkey.pem")
+password_dict = {
+ "cert": os.path.join(password_dir, "fullchain.pem"),
+ "key": os.path.join(password_dir, "privkey.pem"),
+ "password": "password",
+ "names": ["localhost"],
+}
@pytest.fixture
@@ -677,3 +684,34 @@ async def shutdown(app):
logging.INFO,
"Goin' Fast @ https://127.0.0.1:8000",
) in caplog.record_tuples
+
+
[email protected](
+ sys.platform not in ("linux", "darwin"),
+ reason="This test requires fork context",
+)
+def test_ssl_in_multiprocess_mode_password(
+ app: Sanic, caplog: pytest.LogCaptureFixture
+):
+ event = Event()
+
+ @app.main_process_start
+ async def main_start(app: Sanic):
+ app.shared_ctx.event = event
+
+ @app.after_server_start
+ async def shutdown(app):
+ app.shared_ctx.event.set()
+ app.stop()
+
+ assert not event.is_set()
+ with use_context("fork"):
+ with caplog.at_level(logging.INFO):
+ app.run(ssl=password_dict)
+ assert event.is_set()
+
+ assert (
+ "sanic.root",
+ logging.INFO,
+ "Goin' Fast @ https://127.0.0.1:8000",
+ ) in caplog.record_tuples
| SSL key encryption password not propagated to worker processes
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Describe the bug
When launching a server using the AppLoader, the SSL key encryption password from the ssl parameter is not reaching the workers, triggering a cert chain default "Enter PEM pass phrase:" prompt that causes the worker processes to fail.
### Code snippet
A typical scenario to reproduce the problem is as follows:
```
app_loader = AppLoader(factory=factory)
app = app_loader.load()
app.prepare(host='0.0.0.0',
port=443,
ssl={'cert': '/path/to/cert.pem',
'key': '/path/to/key.pem',
'password': 'the secret password'},
workers=4)
Sanic.serve(primary=app, app_loader=app_loader)
```
### Expected Behavior
_No response_
### How do you run Sanic?
As a script (`app.run` or `Sanic.serve`)
### Operating System
Ubuntu 20.04 LTS (Focal Fossa)
### Sanic Version
Sanic 22.12.0; Routing 22.8.0
### Additional context
The problem lies in lines 811-814 of file `sanic/mixins/startup.py` :
```
ssl = kwargs.get("ssl")
if isinstance(ssl, SanicSSLContext):
kwargs["ssl"] = kwargs["ssl"].sanic
```
When entering these lines, `kwargs["ssl"]` is a `CertSimple(SanicSSLContext)` instance that (according to line 176 of file `sanic/http/tls/context.py` where the `.sanic` attribute is set) is replaced with a dictionary that is missing the `"password"` entry because it was removed by the `pop` call at line 162 of file `sanic/http/tls/context.py`:
```
password = kw.pop("password", None)
```
| As a side note for coding purposes, I would probably replace line 814 of file `sanic/mixins/startup.py`
```
kwargs["ssl"] = kwargs["ssl"].sanic
```
with
```
kwargs["ssl"] = ssl.sanic
```
as we precisely already stored the value of `kwargs["ssl"]` in variable `ssl` in line 811
Thanks for posting this. Looks like a small fix. Pushing a PR now. | 2023-03-20T07:48:57 |
sanic-org/sanic | 2,720 | sanic-org__sanic-2720 | [
"2689"
] | 53820bc24142e75867557b872490d39589c9a83c | diff --git a/sanic/http/http1.py b/sanic/http/http1.py
--- a/sanic/http/http1.py
+++ b/sanic/http/http1.py
@@ -428,7 +428,9 @@ async def error_response(self, exception: Exception) -> None:
if self.request is None:
self.create_empty_request()
- request_middleware = not isinstance(exception, ServiceUnavailable)
+ request_middleware = not isinstance(
+ exception, (ServiceUnavailable, RequestCancelled)
+ )
try:
await app.handle_exception(
self.request, exception, request_middleware
| Malfunction of middleware when the request aborted.
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Describe the bug
When the client aborts the request while processing the request on the server, on_request is called again after on_response.
### Code snippet
```py
import asyncio
from sanic import Sanic, text
app = Sanic(__name__)
@app.on_request
async def on_request(request):
print("on_request", request.id)
@app.on_response
async def on_response(request, response):
print("on_response", request.id)
@app.get("/")
async def test(request):
print("test handler")
try:
await asyncio.sleep(10)
except asyncio.CancelledError as e:
print("test handler cancelled")
raise e
return text("test")
if __name__ == "__main__":
app.run(host="0.0.0.0", port=8000)
```
Output
```sh
on_request 592d1e27-b05b-4f2f-a138-f81d5c44c58c
test handler
---- I aborted the request here ----
test handler cancelled
on_response 592d1e27-b05b-4f2f-a138-f81d5c44c58c
on_request 592d1e27-b05b-4f2f-a138-f81d5c44c58c <<<< strange thing
```
### Expected Behavior
`on_request` should be called once before call `RouterHandler`, not again after `on_response`
### How do you run Sanic?
As a script (`app.run` or `Sanic.serve`)
### Operating System
Windows-10-10.0.19044-SP0
### Sanic Version
sanic==22.12.0, sanic-routing==22.8.0, sanic-ext==22.12.0
| Yeah I am experiencing something similar as well. I have a search component in my React frontend that does search-as-you-type so there are lots of requests sent to the server and sometimes the frontend will explicitly cancel the pending request if the user started typing something else.
Currently I have an error because while the request was cancelled, sanic was still in the process of running some middleware for the request and they error out because the request context is no longer available. There seems to be something odd with processing `CancelledError`. I wish there was more documentation about this so I can implement a fix that does not involve wrapper each-and-every middleware of method in a try/catch.
For info I am running Python 3.11, Latest sanic.
This appears to be a bug that we need to get sorted out. Hopefully for the upcoming 23.3. release.
The internals of error and cancellation handling & middlewares are really complicated and not really documented beyond some source code comments.
I have an overhaul branch that I wanted to get in but will wiat until next release. I'll get a patch for this in. | 2023-03-20T08:38:10 |
|
sanic-org/sanic | 2,721 | sanic-org__sanic-2721 | [
"2677"
] | ac1f56118aca06e8753134b7f5400e16c2cb00c3 | diff --git a/sanic/log.py b/sanic/log.py
--- a/sanic/log.py
+++ b/sanic/log.py
@@ -62,13 +62,13 @@ class StrEnum(str, Enum):
},
formatters={
"generic": {
- "format": "%(asctime)s [%(process)d] [%(levelname)s] %(message)s",
+ "format": "%(asctime)s [%(process)s] [%(levelname)s] %(message)s",
"datefmt": "[%Y-%m-%d %H:%M:%S %z]",
"class": "logging.Formatter",
},
"access": {
"format": "%(asctime)s - (%(name)s)[%(levelname)s][%(host)s]: "
- + "%(request)s %(message)s %(status)d %(byte)d",
+ + "%(request)s %(message)s %(status)s %(byte)s",
"datefmt": "[%Y-%m-%d %H:%M:%S %z]",
"class": "logging.Formatter",
},
| Access logging raise TypeError after `logging.logProcesses=False`
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Describe the bug
If someone use `logging.logProcesses = False` to disable the calling of `os.getpid()` while logging, the [default formatter of Sanic](https://github.com/sanic-org/sanic/blob/5e7f6998bdccce325a4c30d940d02d9d1e40b11e/sanic/log.py#L65) will lead to the exception as shown below.
See [`logging.logProcesses`](https://docs.python.org/3/howto/logging.html#optimization)
```log
File "C:\Program Files\Python\Python311\Lib\logging\__init__.py", line 445, in _format
return self._fmt % values
~~~~~~~~~~^~~~~~~~
File "C:\Program Files\Python\Python311\Lib\logging\__init__.py", line 449, in format
return self._format(record)
File "C:\Program Files\Python\Python311\Lib\logging\__init__.py", line 659, in formatMessage
return self._style.format(record)
File "C:\Program Files\Python\Python311\Lib\logging\__init__.py", line 690, in format
s = self.formatMessage(record)
File "C:\Program Files\Python\Python311\Lib\logging\__init__.py", line 953, in format
return fmt.format(record)
File "C:\Program Files\Python\Python311\Lib\logging\__init__.py", line 1110, in emit
msg = self.format(record)
File "C:\Program Files\Python\Python311\Lib\logging\__init__.py", line 978, in handle
self.emit(record)
File "C:\Program Files\Python\Python311\Lib\logging\__init__.py", line 1706, in callHandlers
hdlr.handle(record)
File "C:\Program Files\Python\Python311\Lib\logging\__init__.py", line 1644, in handle
self.callHandlers(record)
File "C:\Program Files\Python\Python311\Lib\logging\__init__.py", line 1634, in _log
self.handle(record)
File "C:\Program Files\Python\Python311\Lib\logging\__init__.py", line 1489, in info
self._log(INFO, msg, args, **kwargs)
File "C:\Program Files\Python\Python311\Lib\site-packages\sanic\application\motd.py", line 113, in display
out(indent("\n".join(lines), " "))
File "C:\Program Files\Python\Python311\Lib\site-packages\sanic\application\motd.py", line 39, in output
motd_class(logo, serve_location, data, extra).display()
File "C:\Program Files\Python\Python311\Lib\site-packages\sanic\mixins\startup.py", line 579, in motd
MOTD.output(logo, serve_location, display, extra)
File "C:\Program Files\Python\Python311\Lib\site-packages\sanic\mixins\startup.py", line 533, in _helper
self.motd(server_settings=server_settings)
File "C:\Program Files\Python\Python311\Lib\site-packages\sanic\mixins\startup.py", line 327, in prepare
server_settings = self._helper(
File "C:\Program Files\Python\Python311\Lib\site-packages\sanic\mixins\startup.py", line 176, in run
self.prepare(
...
TypeError: %d format: a real number is required, not NoneType
```
Without `os.getpid()`, the LogRecord will only be generated with a value dict like `values = {'process': None, ...}`. Then, exception raises when the formatter tries to translate `values = {'process': None, ...}` into `"[%(process)d]"`.
I suggest to use `[%(process)s]` instead of `[%(process)d]`.
https://github.com/sanic-org/sanic/blob/5e7f6998bdccce325a4c30d940d02d9d1e40b11e/sanic/log.py#L65
If there is no [conversion flags](https://docs.python.org/3/library/stdtypes.html#printf-style-string-formatting) (like `%06d`) setted, `%s` also has a better performance on converting unsigned integer to string.
See [Why is %s faster than %d for integer substitution in python?](https://stackoverflow.com/a/27800584/18677995)
And here is a shell snippet for you to make a brief test:
```shell
python -m timeit -n 100000 -s "fstr='[%(process)s]'" "fstr % {'process':12345}"
python -m timeit -n 100000 -s "fstr='[%(process)d]'" "fstr % {'process':12345}"
```
Result on my laptop is:
```log
100000 loops, best of 5: 157 nsec per loop
100000 loops, best of 5: 160 nsec per loop
```
### Code snippet
```python
import logging
from sanic import Sanic
from sanic.response import text
logging.logProcesses = False
app = Sanic("MyHelloWorldApp")
@app.get("/")
async def hello_world(request):
return text("Hello, world.")
if __name__ == '__main__':
app.run(host="127.0.0.1", port=8080, debug=True)
```
### Expected Behavior
Log be like (pid shows `None` instead of raising exceptions):
```log
[2023-02-09 10:39:08 +0800] [None] [INFO]
โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
โ Sanic v22.12.0 โ
โ Goin' Fast @ http://127.0.0.1:8080 โ
โโโโโโโโโโโโโโโโโโโโโโโโโฌโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโค
โ โ mode: debug, single worker โ
โ โโโโ โโโโโ โโ โ server: sanic, HTTP/1.1 โ
โ โโ โ python: 3.11.1 โ
โ โโโโโโโโ โโโโ โ platform: Windows-10-10.0.22621-SP0 โ
โ โโ โ packages: sanic-routing==22.8.0 โ
โ โโโโ โโโโโโโโโ โ โ
โ โ โ
โ Build Fast. Run Fast. โ โ
โโโโโโโโโโโโโโโโโโโโโโโโโดโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
[2023-02-09 10:39:08 +0800] [None] [DEBUG] Creating multiprocessing context using 'spawn'
[2023-02-09 10:39:08 +0800] [None] [DEBUG] Starting a process: Sanic-Server-0-0
[2023-02-09 10:39:09 +0800] [None] [DEBUG] Process ack: Sanic-Server-0-0 [13504]
[2023-02-09 10:39:09 +0800] [None] [INFO] Starting worker [13504]
```
### How do you run Sanic?
As a script (`app.run` or `Sanic.serve`)
### Operating System
Windows
### Sanic Version
22.12.0
### Additional context
_No response_
| 2023-03-20T08:57:44 |
||
sanic-org/sanic | 2,722 | sanic-org__sanic-2722 | [
"2655"
] | a245ab37733411fab555c5ea602833e713eae4f2 | diff --git a/sanic/app.py b/sanic/app.py
--- a/sanic/app.py
+++ b/sanic/app.py
@@ -92,6 +92,7 @@
from sanic.touchup import TouchUp, TouchUpMeta
from sanic.types.shared_ctx import SharedContext
from sanic.worker.inspector import Inspector
+from sanic.worker.loader import CertLoader
from sanic.worker.manager import WorkerManager
@@ -139,6 +140,7 @@ class Sanic(StaticHandleMixin, BaseSanic, StartupMixin, metaclass=TouchUpMeta):
"_test_client",
"_test_manager",
"blueprints",
+ "certloader_class",
"config",
"configure_logging",
"ctx",
@@ -181,6 +183,7 @@ def __init__(
loads: Optional[Callable[..., Any]] = None,
inspector: bool = False,
inspector_class: Optional[Type[Inspector]] = None,
+ certloader_class: Optional[Type[CertLoader]] = None,
) -> None:
super().__init__(name=name)
# logging
@@ -215,6 +218,9 @@ def __init__(
self.asgi = False
self.auto_reload = False
self.blueprints: Dict[str, Blueprint] = {}
+ self.certloader_class: Type[CertLoader] = (
+ certloader_class or CertLoader
+ )
self.configure_logging: bool = configure_logging
self.ctx: Any = ctx or SimpleNamespace()
self.error_handler: ErrorHandler = error_handler or ErrorHandler()
diff --git a/sanic/worker/loader.py b/sanic/worker/loader.py
--- a/sanic/worker/loader.py
+++ b/sanic/worker/loader.py
@@ -5,6 +5,7 @@
from importlib import import_module
from pathlib import Path
+from ssl import SSLContext
from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Union, cast
from sanic.http.tls.context import process_to_context
@@ -103,8 +104,16 @@ class CertLoader:
"trustme": TrustmeCreator,
}
- def __init__(self, ssl_data: Dict[str, Union[str, os.PathLike]]):
+ def __init__(
+ self,
+ ssl_data: Optional[
+ Union[SSLContext, Dict[str, Union[str, os.PathLike]]]
+ ],
+ ):
self._ssl_data = ssl_data
+ self._creator_class = None
+ if not ssl_data or not isinstance(ssl_data, dict):
+ return
creator_name = cast(str, ssl_data.get("creator"))
diff --git a/sanic/worker/serve.py b/sanic/worker/serve.py
--- a/sanic/worker/serve.py
+++ b/sanic/worker/serve.py
@@ -73,8 +73,8 @@ def worker_serve(
info.settings["app"] = a
a.state.server_info.append(info)
- if isinstance(ssl, dict):
- cert_loader = CertLoader(ssl)
+ if isinstance(ssl, dict) or app.certloader_class is not CertLoader:
+ cert_loader = app.certloader_class(ssl or {})
ssl = cert_loader.load(app)
for info in app.state.server_info:
info.settings["ssl"] = ssl
| diff --git a/tests/test_tls.py b/tests/test_tls.py
--- a/tests/test_tls.py
+++ b/tests/test_tls.py
@@ -12,7 +12,7 @@
import pytest
-from sanic_testing.testing import HOST, PORT
+from sanic_testing.testing import HOST, PORT, SanicTestClient
import sanic.http.tls.creators
@@ -29,6 +29,7 @@
get_ssl_context,
)
from sanic.response import text
+from sanic.worker.loader import CertLoader
current_dir = os.path.dirname(os.path.realpath(__file__))
@@ -427,6 +428,29 @@ async def handler(request):
assert "No certificates" in str(excinfo.value)
+def test_custom_cert_loader():
+ class MyCertLoader(CertLoader):
+ def load(self, app: Sanic):
+ self._ssl_data = {
+ "key": localhost_key,
+ "cert": localhost_cert,
+ }
+ return super().load(app)
+
+ app = Sanic("custom", certloader_class=MyCertLoader)
+
+ @app.get("/test")
+ async def handler(request):
+ return text("ssl test")
+
+ client = SanicTestClient(app, port=44556)
+
+ request, response = client.get("https://localhost:44556/test")
+ assert request.scheme == "https"
+ assert response.status_code == 200
+ assert response.text == "ssl test"
+
+
def test_logger_vhosts(caplog):
app = Sanic(name="test_logger_vhosts")
| Can't run normally when using SSLContext
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Describe the bug
```
app.run(host='0.0.0.0', port=443, ssl=context, dev=True)
File "/usr/lib/python3.10/site-packages/sanic/mixins/startup.py", line 209, in run
serve(primary=self) # type: ignore
File "/usr/lib/python3.10/site-packages/sanic/mixins/startup.py", line 862, in serve
manager.run()
File "/usr/lib/python3.10/site-packages/sanic/worker/manager.py", line 94, in run
self.start()
File "/usr/lib/python3.10/site-packages/sanic/worker/manager.py", line 101, in start
process.start()
File "/usr/lib/python3.10/site-packages/sanic/worker/process.py", line 53, in start
self._current_process.start()
File "/usr/lib/python3.10/multiprocessing/process.py", line 121, in start
self._popen = self._Popen(self)
File "/usr/lib/python3.10/multiprocessing/context.py", line 284, in _Popen
return Popen(process_obj)
File "/usr/lib/python3.10/multiprocessing/popen_spawn_posix.py", line 32, in __init__
super().__init__(process_obj)
File "/usr/lib/python3.10/multiprocessing/popen_fork.py", line 19, in __init__
self._launch(process_obj)
File "/usr/lib/python3.10/multiprocessing/popen_spawn_posix.py", line 47, in _launch
reduction.dump(process_obj, fp)
File "/usr/lib/python3.10/multiprocessing/reduction.py", line 60, in dump
ForkingPickler(file, protocol).dump(obj)
TypeError: cannot pickle 'SSLContext' object
```
### Code snippet
```python
from sanic import Sanic
import ssl
context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
context.load_cert_chain("certs/fullchain.pem", "certs/privkey.pem")
app = Sanic('test')
if __name__ == "__main__":
app.run(host='0.0.0.0', port=443, ssl=context, dev=True)
```
### Expected Behavior
_No response_
### How do you run Sanic?
As a script (`app.run` or `Sanic.serve`)
### Operating System
linux
### Sanic Version
22.12.0
### Additional context
pickle does not support dump ssl.SSLContext causing this problem.
`multiprocessing.context` use `pickle`
```python
import ssl
context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
context.load_cert_chain("certs/fullchain.pem", "certs/privkey.pem")
pickle.dumps(context)
```
| This is known. You can pass a dict of the paths, or use context Wil a single process or legacy mode. Working in an alternative.
```python
from sanic import Sanic
app = Sanic('test')
if __name__ == "__main__":
app.run(host='0.0.0.0', port=443, ssl={
"cert": "certs/fullchain.pem",
"key": "certs/privkey.pem",
}, dev=True)
```
@teixeirazeus That is equivalent to simply passing the cert dir as a string, as long as the files are named fullchain.pem and privkey.pem:
```python
app.run(host='0.0.0.0', port=443, ssl="certs", dev=True)
```
Or you may leave out `app.run` entirely (actually this is now preferred) and use the CLI instead. Assuming your file is main.py:
```sh
sanic --host 0.0.0.0 --port 443 --tls certs --dev main:app
```
I have read the code and [documentation ](https://sanic.dev/en/guide/how-to/tls.html#single-domain-and-single-certificate)in detail. I know all these ways, @Tronic @teixeirazeus thank you for your answer.
I need full control over details such as which crypto algorithms are permitted.
@sdir The problem is that SSLContext cannot be transferred to worker processes by pickling. This can be avoided by using
```python
from sanic import Sanic
Sanic.start_method = "fork"
```
On Linux you should be fine with that as a workaround. Sanic normally uses spawn (and thus pickling) because the fork mode doesn't work properly on Mac or Windows.
I believe @ahopkins is working on a solution that would allow you to construct your custom SSLContext in each worker process, avoiding the pickling even in spawn mode, and the issue is open waiting for that as the ultimate solution. I hope that either the workaround or whatever Adam brews up helps you.
> I need full control over details such as which crypto algorithms are permitted.
@sdir On this detail I would like to know more. Since TLS 1.2 and 1.3 there isn't much to control, but we are open to any input to make Sanic's selections on those more secure. If the tweaks that you do are reasonably compatible with browsers and not for weakening security, we might wish to implement the same in Sanic as well.
EDIT: Sanic currently gets grade A on https://www.ssllabs.com/ssltest (likely A+ if you add HSTS headers which my test app didn't have).
> > I need full control over details such as which crypto algorithms are permitted.
>
> @Tronic I didn't express myself clearly, This is just an example, not actually a crypto algorithms issue.
I just took what was mentioned in the document. If `Context` is not supported, should modify the document?
eg:` load_verify_locations`
```python
import ssl
context = ssl.SSLcontext()
context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
context.load_cert_chain("certs/fullchain.pem", "certs/privkey.pem")
context.load_verify_locations("certs/ca.crt")
```
And this is very useful for me.
```python
from sanic import Sanic
Sanic.start_method = "fork"
```
Normally `load_verify_locations` is only used on client side, to obtain CAs that are then used for verifying server certs. I am not sure if it is even possible (especially with Python) but in principle a server could also verify browsers' certificates. I have never seen this done anywhere. Unless you are doing that, you don't need the call.
For other checks and manipulations on certs, you can do it using temporary SSLContext (or other SSL tools because Python is fairly limited) and then *not* pass it to Sanic but let Sanic load the same cert files on its own.
As for cipher suites, Sanic only allows TLS 1.2 and 1.3, with the following cipher suites:
```
# TLS 1.3 (suites in server-preferred order)
TLS_AES_256_GCM_SHA384 (0x1302) ECDH x25519 (eq. 3072 bits RSA) FS 256
TLS_CHACHA20_POLY1305_SHA256 (0x1303) ECDH x25519 (eq. 3072 bits RSA) FS 256
TLS_AES_128_GCM_SHA256 (0x1301) ECDH x25519 (eq. 3072 bits RSA) FS 128
# TLS 1.2 (suites in server-preferred order)
TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 (0xcca9) ECDH x25519 (eq. 3072 bits RSA) FS 256
TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 (0xc02c) ECDH x25519 (eq. 3072 bits RSA) FS 256
TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 (0xc02b) ECDH x25519 (eq. 3072 bits RSA) FS 128
```
The weaker AES-128 MUST by implemented by the spec. The only thing I might change here is prefer CHACHA20 first on TLS 1.3 as well, but I don't think this can be configured with Python's SSLContext. But all three suites are actually very secure and supporting forward secrecy.
Another thing you could do with a customized context is to do something with the SNI sent in Client Hello (via a callback in SSLContext). Sanic uses this to select the correct vhost certificate if multiple certificates for different domains are available, or to reject the connection if no valid name was sent.
I'll leave it up to @sdir and @ahopkins to decide whether there is need to have a mechanism for constructing truly custom SSLContext (especially for platforms where fork cannot be used), or whether these issues are in fact not worth the effort.
+1 for this. My organization uses certificates for authentication internally, so `load_verify_locations` is mandatory because the server needs to have a copy of the companies CA that issues a certificate to each user. The server then verifies that the user is presenting a valid certificate signed by the company CA.
It works fine on the older Sanic, so it's a clear regression.
Then two paths to that, either specifically add support for load_verify_locations (assuming this is the missing piece that everyone needs), or add a signal for creating/manipulating SSLContext right as the workers are starting. Probably the latter as other needs may arise, and it is fairly simple to implement both in Sanic and for app developers (who will in any case need to make changes, unfortunately).
@goatpop try custom class
```python
class SSLContextSimple(ssl.SSLContext):
def config(self, conf:dict):
self.conf = conf
self.load_config(conf)
def load_config(self, conf):
self.load_cert_chain(conf['cert'], conf['key'])
self.load_verify_locations(conf['ca'])
def __getnewargs__(self):
return (self.protocol,)
def __getstate__(self):
return self.conf
def __setstate__(self, state):
self.load_config(state)
context = SSLContextSimple(ssl.PROTOCOL_TLS_SERVER)
context.config({
'cert': 'certs/fullchain.pem',
'key' : 'certs/privkey.pem',
'ca': 'certs/ca.crt'
})
if __name__ == "__main__":
app.run(host='0.0.0.0', port=443, ssl=context, dev=True)
```
> add a signal for creating/manipulating SSLContext right as the workers are starting
Yes, this is the approach I was after. | 2023-03-20T09:51:17 |
sanic-org/sanic | 2,728 | sanic-org__sanic-2728 | [
"2726"
] | e3744095675df170398a374cc20832470200ea40 | diff --git a/sanic/__version__.py b/sanic/__version__.py
--- a/sanic/__version__.py
+++ b/sanic/__version__.py
@@ -1 +1 @@
-__version__ = "23.3.0"
+__version__ = "23.3.1"
diff --git a/sanic/mixins/static.py b/sanic/mixins/static.py
--- a/sanic/mixins/static.py
+++ b/sanic/mixins/static.py
@@ -95,7 +95,7 @@ def static(
)
try:
- file_or_directory = Path(file_or_directory)
+ file_or_directory = Path(file_or_directory).resolve()
except TypeError:
raise TypeError(
"Static file or directory must be a path-like object or string"
| diff --git a/tests/test_static.py b/tests/test_static.py
--- a/tests/test_static.py
+++ b/tests/test_static.py
@@ -101,6 +101,31 @@ def test_static_file_pathlib(app, static_file_directory, file_name):
assert response.body == get_file_content(static_file_directory, file_name)
[email protected](
+ "file_name",
+ [
+ "test.file",
+ "decode me.txt",
+ "python.png",
+ "symlink",
+ "hard_link",
+ ],
+)
+def test_static_file_pathlib_relative_path_traversal(
+ app, static_file_directory, file_name
+):
+ """Get the current working directory and check if it ends with "sanic" """
+ cwd = Path.cwd()
+ if not str(cwd).endswith("sanic"):
+ pytest.skip("Current working directory does not end with 'sanic'")
+
+ file_path = "./tests/static/../static/"
+ app.static("/", file_path)
+ _, response = app.test_client.get(f"/{file_name}")
+ assert response.status == 200
+ assert response.body == get_file_content(static_file_directory, file_name)
+
+
@pytest.mark.parametrize(
"file_name",
[b"test.file", b"decode me.txt", b"python.png"],
| Static routes containing ".." don't work
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Describe the bug
Sanic does serve static files if the server file path contains "..", instead it returns file not found errors.
### Code snippet
The following doesn't work:
```
from sanic import Sanic
app = Sanic("Example")
app.static("/", "./static/../static/")
if __name__ == "__main__":
app.run(host="0.0.0.0", port=8000)
```
Attempt to access any files in the static directory will get the error:
`[2023-03-27 04:05:31 -0700] [60813] [ERROR] File not found: path=static/../static, relative_url=test.txt`
Removing the redundant `../static/` will result in the files being correctly served.
### Expected Behavior
The above example should serve all files in the static directory and not return file not found errors.
### How do you run Sanic?
Sanic CLI
### Operating System
linux
### Sanic Version
22.9.1
### Additional context
This issue appears to have been introduced by #2506 or #2508.
The fix might look something like the following:
```
diff --git a/sanic/mixins/static.py b/sanic/mixins/static.py
index bcffbc8..28effc4 100644
--- a/sanic/mixins/static.py
+++ b/sanic/mixins/static.py
@@ -323,7 +323,7 @@ class StaticHandleMixin(metaclass=SanicMeta):
# python from herping a derp and treating the uri as an
# absolute path
unquoted_file_uri = unquote(__file_uri__).lstrip("/")
- file_path_raw = Path(file_or_directory, unquoted_file_uri)
+ file_path_raw = Path(root_path, unquoted_file_uri)
file_path = file_path_raw.resolve()
if (
file_path < root_path and not file_path_raw.is_symlink()
```
It looks like the ".." was originally being done on the URI part only but with the #2506 change this is now done on the combination of the root path *and* relative path. The above change instead uses the resolved form of the root path (which won't contain "..").
However, there are likely other options and having more context about what the checks in this area are for would be crucial.
| Looks like Sanic should resolve that static dir path when the route is defined, making it an absolute path without any `..` elements. If that is the only thing missing, it should be easy to fix this. PRs welcome, if you are up to that :)
https://github.com/sanic-org/sanic/blob/main/sanic/mixins/static.py#L98
All we need is a `.resolve()` on this line and a test. | 2023-03-30T08:27:05 |
sanic-org/sanic | 2,737 | sanic-org__sanic-2737 | [
"2736"
] | 6eaab2a7e5be418385856371fdaebe4701f8c4fc | diff --git a/sanic/response/types.py b/sanic/response/types.py
--- a/sanic/response/types.py
+++ b/sanic/response/types.py
@@ -345,7 +345,7 @@ def __init__(
body: Optional[Any] = None,
status: int = 200,
headers: Optional[Union[Header, Dict[str, str]]] = None,
- content_type: Optional[str] = None,
+ content_type: str = "application/json",
dumps: Optional[Callable[..., str]] = None,
**kwargs: Any,
):
| diff --git a/tests/test_response_json.py b/tests/test_response_json.py
--- a/tests/test_response_json.py
+++ b/tests/test_response_json.py
@@ -213,3 +213,12 @@ def do_pop(request: Request, response: JSONResponse):
_, resp = json_app.test_client.get("/json-pop")
assert resp.body == json_dumps(["b"]).encode()
+
+
+def test_json_response_class_sets_proper_content_type(json_app: Sanic):
+ @json_app.get("/json-class")
+ async def handler(request: Request):
+ return JSONResponse(JSON_BODY)
+
+ _, resp = json_app.test_client.get("/json-class")
+ assert resp.headers["content-type"] == "application/json"
| JSONResponse defaults to None content-type
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Describe the bug
```python
@app.get("/")
async def handler(request: Request):
return JSONResponse({"message": "Hello World!"})
```
```sh
โฐโโถ curl localhost:9999
HTTP/1.1 200 OK
content-length: 26
connection: keep-alive
alt-svc:
content-type: None
{"message":"Hello World!"}
```
### Code snippet
_No response_
### Expected Behavior
`content-type: application/json`
### How do you run Sanic?
Sanic CLI
### Operating System
all
### Sanic Version
LTS+
### Additional context
_No response_
| 2023-04-09T16:19:44 |
|
sanic-org/sanic | 2,754 | sanic-org__sanic-2754 | [
"2753"
] | 049983cb704ab9d439c5fddf23d0bf3784b53741 | diff --git a/sanic/models/handler_types.py b/sanic/models/handler_types.py
--- a/sanic/models/handler_types.py
+++ b/sanic/models/handler_types.py
@@ -3,11 +3,12 @@
import sanic
-from sanic.request import Request
+from sanic import request
from sanic.response import BaseHTTPResponse, HTTPResponse
Sanic = TypeVar("Sanic", bound="sanic.Sanic")
+Request = TypeVar("Request", bound="request.Request")
MiddlewareResponse = Union[
Optional[HTTPResponse], Coroutine[Any, Any, Optional[HTTPResponse]]
| Improve type of `MiddlewareType`
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Is your feature request related to a problem? Please describe.
When using a custom Request class and type hinting the middleware with that custom Request class, type checkers complain that the argument types of the middleware function is invalid.
```python
from sanic import Request, Sanic
class MyRequest(Request):
...
async def some_middleware(request: MyRequest) -> None:
...
app = Sanic("trial-app")
# This raises a type error.
app.register_middleware(some_middleware, "request")
# Pyright Error
# Argument of type "(request: MyRequest) -> Coroutine[Any, Any, None]" cannot be assigned to parameter
# "middleware" of type "MiddlewareType | Middleware" in function "register_middleware"
# Type "(request: MyRequest) -> Coroutine[Any, Any, None]" cannot be assigned to type "MiddlewareType | Middleware"
# Type "(request: MyRequest) -> Coroutine[Any, Any, None]" cannot be assigned to type "RequestMiddlewareType"
# Parameter 1: type "Request" cannot be assigned to type "MyRequest"
# "Request" is incompatible with "MyRequest"
# Type "(request: MyRequest) -> Coroutine[Any, Any, None]" cannot be assigned to type "ResponseMiddlewareType"
# Function accepts too many positional parameters; expected 1 but received 2
# Parameter 1: type "Request" cannot be assigned to type "MyRequest"
# "Request" is incompatible with "MyRequest"
```
### Describe the solution you'd like
Using a subclass of Request shouldn't raise this error by the type checkers.
### Additional context
I think the fix is to make the `Request` type in `MiddlewareType` in [`handler_types`](https://github.com/sanic-org/sanic/blob/main/sanic/models/handler_types.py) a generic with the generic being bound to `Request` like it's done for the `Sanic` type.
| Thanks. Do you think you can create the PR for this? ๐
Yeah I would love to. I'll get it ready in a day or two. | 2023-05-17T04:26:35 |
|
sanic-org/sanic | 2,770 | sanic-org__sanic-2770 | [
"2757",
"2757"
] | f2cc83c1ba8287a9733144d68734b1c545c4f4c9 | diff --git a/sanic/cli/app.py b/sanic/cli/app.py
--- a/sanic/cli/app.py
+++ b/sanic/cli/app.py
@@ -180,6 +180,10 @@ def _get_app(self, app_loader: AppLoader):
" Example File: project/sanic_server.py -> app\n"
" Example Module: project.sanic_server.app"
)
+ error_logger.error(
+ "\nThe error below might have caused the above one:\n"
+ f"{e.msg}"
+ )
sys.exit(1)
else:
raise e
| Circular import in target file accidentally triggers 'No module named ... found'
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Describe the bug
While developing it appears that I accidentally caused a circular import which Python traces back to starting in the file I have my app in. As a result, Python outputs an error such as the following:
```
ImportError: cannot import name 'constants' from partially initialized module 'app' (most likely due to a circular import) (/api/app/__init__.py)
```
In this case my module I pass to the sanic server is `app:app`, from within `/api`.
### Code snippet
_No response_
### Expected Behavior
I had this in the back of my mind the entire time, but found it very difficult to troubleshoot due to Sanic swallowing the error. As a result I ended up the rabbit hole of accidental breaking changes and tried commenting out different changes. An hour later I finally found the right import.
It would help if Sanic continued to output the specific import error, on the off-chance that it isn't an incorrectly setup module. The alternative would be to use more fine-grained `importlib` and manually call some functions rather than use their help functions. As a result there should be a different call which finds the file (an `ImportError` here hints at an incorrectly setup module), than the one which loads it (user error).
### How do you run Sanic?
Sanic CLI
### Operating System
Windows (Docker, Python:3.11)
### Sanic Version
23.3
### Additional context
_No response_
Circular import in target file accidentally triggers 'No module named ... found'
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Describe the bug
While developing it appears that I accidentally caused a circular import which Python traces back to starting in the file I have my app in. As a result, Python outputs an error such as the following:
```
ImportError: cannot import name 'constants' from partially initialized module 'app' (most likely due to a circular import) (/api/app/__init__.py)
```
In this case my module I pass to the sanic server is `app:app`, from within `/api`.
### Code snippet
_No response_
### Expected Behavior
I had this in the back of my mind the entire time, but found it very difficult to troubleshoot due to Sanic swallowing the error. As a result I ended up the rabbit hole of accidental breaking changes and tried commenting out different changes. An hour later I finally found the right import.
It would help if Sanic continued to output the specific import error, on the off-chance that it isn't an incorrectly setup module. The alternative would be to use more fine-grained `importlib` and manually call some functions rather than use their help functions. As a result there should be a different call which finds the file (an `ImportError` here hints at an incorrectly setup module), than the one which loads it (user error).
### How do you run Sanic?
Sanic CLI
### Operating System
Windows (Docker, Python:3.11)
### Sanic Version
23.3
### Additional context
_No response_
| 2023-07-05T09:39:10 |
||
sanic-org/sanic | 2,773 | sanic-org__sanic-2773 | [
"2752"
] | 976da69e79e22c08ba2ea9ffe48bed72bf3290df | diff --git a/sanic/blueprints.py b/sanic/blueprints.py
--- a/sanic/blueprints.py
+++ b/sanic/blueprints.py
@@ -319,6 +319,10 @@ def register(self, app, options):
# Prepend the blueprint URI prefix if available
uri = self._setup_uri(future.uri, url_prefix)
+ route_error_format = (
+ future.error_format if future.error_format else error_format
+ )
+
version_prefix = self.version_prefix
for prefix in (
future.version_prefix,
@@ -358,7 +362,7 @@ def register(self, app, options):
future.unquote,
future.static,
version_prefix,
- error_format,
+ route_error_format,
future.route_context,
)
| diff --git a/tests/test_errorpages.py b/tests/test_errorpages.py
--- a/tests/test_errorpages.py
+++ b/tests/test_errorpages.py
@@ -2,6 +2,7 @@
import pytest
+import sanic
from sanic import Sanic
from sanic.config import Config
from sanic.errorpages import TextRenderer, exception_response, guess_mime
@@ -205,6 +206,27 @@ def json_response(request):
assert response.content_type == "text/plain; charset=utf-8"
+def test_blueprint_error_response_from_explicit_format(app):
+ bp = sanic.Blueprint("MyBlueprint")
+
+ @bp.get("/text", error_format="json")
+ def text_response(request):
+ raise Exception("oops")
+ return text("Never gonna see this")
+
+ @bp.get("/json", error_format="text")
+ def json_response(request):
+ raise Exception("oops")
+ return json({"message": "Never gonna see this"})
+
+ app.blueprint(bp)
+ _, response = app.test_client.get("/text")
+ assert response.content_type == "application/json"
+
+ _, response = app.test_client.get("/json")
+ assert response.content_type == "text/plain; charset=utf-8"
+
+
def test_unknown_fallback_format(app):
with pytest.raises(SanicException, match="Unknown format: bad"):
app.config.FALLBACK_ERROR_FORMAT = "bad"
| Blueprint ignores error_format option
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Describe the bug
`add_route` and its route-decorator friends respect the `error_format` option to specify "text", "json", or "html", as documented [here](https://sanic.dev/en/guide/best-practices/exceptions.html#built-in-error-handling).
But if I wrap the route in a blueprint, the `error_format` option no longer works, it just uses the default.
### Code snippet
#### Without Blueprint
```python
from sanic import Sanic
import sanic.exceptions
app = Sanic("MyHelloWorldApp")
@app.get("/", error_format="json")
async def hello_world(request):
raise sanic.exceptions.SanicException("Big Mistake, Huge.", status_code=400)
```
// Elsewhere...
```bash
$ curl localhost:8000/ -i
HTTP/1.1 400 Bad Request
content-length: 73
connection: keep-alive
content-type: application/json
{"description":"Bad Request","status":400,"message":"Big Mistake, Huge."}
```
#### With Blueprint
```python
from sanic import Sanic
import sanic.exceptions
app = Sanic("MyHelloWorldApp")
bp = sanic.Blueprint("MyBlueprint")
@bp.get("/", error_format="json")
async def hello_world(request):
raise sanic.exceptions.SanicException("Big Mistake, Huge.", status_code=400)
app.blueprint(bp)
```
// Elsewhere...
```bash
$ curl localhost:8000/ -i
HTTP/1.1 400 Bad Request
content-length: 68
connection: keep-alive
content-type: text/plain; charset=utf-8
โ ๏ธ 400 โ Bad Request
====================
Big Mistake, Huge.
```
### Expected Behavior
I expect the `error_format` option to be applied to the blueprint route the same as if applied to an app route.
### How do you run Sanic?
Sanic CLI
### Operating System
Mac OSX 12.3
### Sanic Version
23.3.0
### Additional context
_No response_
| 2023-07-08T19:08:49 |
|
sanic-org/sanic | 2,774 | sanic-org__sanic-2774 | [
"2749"
] | c17230ef9443d7e9932ac425ddbca4ad850f96a2 | diff --git a/sanic/errorpages.py b/sanic/errorpages.py
--- a/sanic/errorpages.py
+++ b/sanic/errorpages.py
@@ -92,8 +92,10 @@ def render(self) -> HTTPResponse:
self.full
if self.debug and not getattr(self.exception, "quiet", False)
else self.minimal
- )
- return output()
+ )()
+ output.status = self.status
+ output.headers.update(self.headers)
+ return output
def minimal(self) -> HTTPResponse: # noqa
"""
@@ -125,7 +127,7 @@ def full(self) -> HTTPResponse:
request=self.request,
exc=self.exception,
)
- return html(page.render(), status=self.status, headers=self.headers)
+ return html(page.render())
def minimal(self) -> HTTPResponse:
return self.full()
@@ -146,8 +148,7 @@ def full(self) -> HTTPResponse:
text=self.text,
bar=("=" * len(self.title)),
body=self._generate_body(full=True),
- ),
- status=self.status,
+ )
)
def minimal(self) -> HTTPResponse:
@@ -157,9 +158,7 @@ def minimal(self) -> HTTPResponse:
text=self.text,
bar=("=" * len(self.title)),
body=self._generate_body(full=False),
- ),
- status=self.status,
- headers=self.headers,
+ )
)
@property
@@ -218,11 +217,11 @@ class JSONRenderer(BaseRenderer):
def full(self) -> HTTPResponse:
output = self._generate_output(full=True)
- return json(output, status=self.status, dumps=self.dumps)
+ return json(output, dumps=self.dumps)
def minimal(self) -> HTTPResponse:
output = self._generate_output(full=False)
- return json(output, status=self.status, dumps=self.dumps)
+ return json(output, dumps=self.dumps)
def _generate_output(self, *, full):
output = {
| diff --git a/tests/test_errorpages.py b/tests/test_errorpages.py
--- a/tests/test_errorpages.py
+++ b/tests/test_errorpages.py
@@ -527,3 +527,26 @@ class FakeObject:
]
assert logmsg == expected
+
+
[email protected](
+ "format,expected",
+ (
+ ("html", "text/html; charset=utf-8"),
+ ("text", "text/plain; charset=utf-8"),
+ ("json", "application/json"),
+ ),
+)
+def test_exception_header_on_renderers(app: Sanic, format, expected):
+ app.config.FALLBACK_ERROR_FORMAT = format
+
+ @app.get("/test")
+ def test(request):
+ raise SanicException(
+ "test", status_code=400, headers={"exception": "test"}
+ )
+
+ _, response = app.test_client.get("/test")
+ assert response.status == 400
+ assert response.headers.get("exception") == "test"
+ assert response.content_type == expected
| Headers from Exceptions
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Describe the bug
Headers set on Exception objects not carried through on all renderers
### Code snippet
```py
raise Unauthorized(
"Auth required.",
headers={"foo": "bar"},
)
```
### Expected Behavior
Response should have:
```
Foo: bar
```
### How do you run Sanic?
Sanic CLI
### Operating System
all
### Sanic Version
23.3
### Additional context
_No response_
| 2023-07-09T06:34:27 |
|
sanic-org/sanic | 2,824 | sanic-org__sanic-2824 | [
"2823"
] | 57d44f263fa84bd5cd6f77b5565825640d85b1e2 | diff --git a/sanic/helpers.py b/sanic/helpers.py
--- a/sanic/helpers.py
+++ b/sanic/helpers.py
@@ -122,25 +122,6 @@ def is_hop_by_hop_header(header):
return header.lower() in _HOP_BY_HOP_HEADERS
-def remove_entity_headers(headers, allowed=("content-location", "expires")):
- """
- Removes all the entity headers present in the headers given.
- According to RFC 2616 Section 10.3.5,
- Content-Location and Expires are allowed as for the
- "strong cache validator".
- https://tools.ietf.org/html/rfc2616#section-10.3.5
-
- returns the headers without the entity headers
- """
- allowed = set([h.lower() for h in allowed])
- headers = {
- header: value
- for header, value in headers.items()
- if not is_entity_header(header) or header.lower() in allowed
- }
- return headers
-
-
def import_string(module_name, package=None):
"""
import a module or class by string path.
diff --git a/sanic/response/types.py b/sanic/response/types.py
--- a/sanic/response/types.py
+++ b/sanic/response/types.py
@@ -24,7 +24,6 @@
Default,
_default,
has_message_body,
- remove_entity_headers,
)
from sanic.http import Http
@@ -104,9 +103,6 @@ def processed_headers(self) -> Iterator[Tuple[bytes, bytes]]:
Returns:
Iterator[Tuple[bytes, bytes]]: A list of header tuples encoded in bytes for sending
""" # noqa: E501
- # TODO: Make a blacklist set of header names and then filter with that
- if self.status in (304, 412): # Not Modified, Precondition Failed
- self.headers = remove_entity_headers(self.headers)
if has_message_body(self.status):
self.headers.setdefault("content-type", self.content_type)
# Encode headers into bytes
| diff --git a/tests/test_helpers.py b/tests/test_helpers.py
--- a/tests/test_helpers.py
+++ b/tests/test_helpers.py
@@ -41,28 +41,6 @@ def test_is_hop_by_hop_header():
assert helpers.is_hop_by_hop_header(header) is expected
-def test_remove_entity_headers():
- tests = (
- ({}, {}),
- ({"Allow": "GET, POST, HEAD"}, {}),
- (
- {
- "Content-Type": "application/json",
- "Expires": "Wed, 21 Oct 2015 07:28:00 GMT",
- "Foo": "Bar",
- },
- {"Expires": "Wed, 21 Oct 2015 07:28:00 GMT", "Foo": "Bar"},
- ),
- (
- {"Allow": "GET, POST, HEAD", "Content-Location": "/test"},
- {"Content-Location": "/test"},
- ),
- )
-
- for header, expected in tests:
- assert helpers.remove_entity_headers(header) == expected
-
-
def test_import_string_class():
obj = helpers.import_string("sanic.config.Config")
assert isinstance(obj, Config)
diff --git a/tests/test_response.py b/tests/test_response.py
--- a/tests/test_response.py
+++ b/tests/test_response.py
@@ -178,6 +178,10 @@ async def no_content_unmodified_handler(request: Request):
async def unmodified_handler(request: Request):
return json(JSON_DATA, status=304)
+ @app.get("/precondition")
+ async def precondition_handler(request: Request):
+ return json(JSON_DATA, status=412)
+
@app.delete("/")
async def delete_handler(request: Request):
return json(None, status=204)
@@ -193,6 +197,10 @@ def test_json_response(json_app):
assert response.text == json_dumps(JSON_DATA)
assert response.json == JSON_DATA
+ request, response = json_app.test_client.get("/precondition")
+ assert response.status == 412
+ assert response.json == JSON_DATA
+
def test_no_content(json_app):
request, response = json_app.test_client.get("/no-content")
| The Sanic built-in server blocks for exactly 90 seconds on status code 412
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Describe the bug
Sanic, like most web servers, usually responds to requests. However, if the response has status code 412, it is very slow and takes exactly **90 extra seconds** to respond, stalling after the handler finishes.
This behavior does not happen when running Sanic with uvicorn. Only the official Sanic server. It also doesn't happen with FastAPI.
### Code snippet
```python
import sanic
app = sanic.Sanic(__name__)
@app.get("/")
async def root(req: sanic.Request):
status = int(req.args.get("status", "200"))
return sanic.json({"message": "Hello World"}, status=status)
```
### Expected Behavior
```bash
sanic main:app --port 8051
```
Then:
```sh-session
$ curl http://localhost:8051
{"message":"Hello World"}
$ curl http://localhost:8051/?status=400 # fine
{"message":"Hello World"}
$ curl http://localhost:8051/?status=411 # fine
{"message":"Hello World"}
$ curl http://localhost:8051/?status=413 # fine
{"message":"Hello World"}
$ curl http://localhost:8051/?status=412
# stalls with no response for 90 seconds
```
### How do you run Sanic?
Sanic CLI
### Operating System
Linux
### Sanic Version
Sanic v23.6.0
### Additional context
I have reproduced this on both Linux and macOS. I have also reproduced this using both the Sanic CLI and the `Sanic.serve()` function programmatically.
| Thanks for the report. This affects HTTP status ~~304~~ and 412, which are not supposed to have "entity headers" (https://datatracker.ietf.org/doc/html/rfc2616#section-7.1).
The response gets sent immediately (use `curl -v`) but without `content-length` because that header gets stripped off by Sanic but it still sends the body bytes. Curl sees the body but doesn't know what to do with it and waits for disconnection to signal the end of it.
Triaging:
sanic/helpers.py - stripping off entity headers
sanic/response/types.py - handling of 304, 412 by above
sanic/http/http1.py:343 - gets trimmed headers and then sends body anyway
Notes: `transfer-encoding: chunked` is not being stripped as entity header. Due to this a response that looks like chunked encoding, given that header is received by Curl decoded ๐ฅถ
```python
return sanic.text("4\r\nBUGS\r\n0\r\n\r\n", status=412, headers={"transfer-encoding": "chunked"})
```
I believe the 412 handling of Sanic as it is now is broken and handled in wrong location, causing this confusion within HTTP protocol (at least HTTP1, didn't check HTTP3 or ASGI). For those status codes in addition to entity headers also `transfer-encoding` and the body data would need to be stripped, it seems.
Referring to @ahopkins for further review.
Status code 304 is NOT affected because it is correctly handled by `sanic.helpers.has_message_body`.
Checking the HTTP RFC, I cannot find why entity headers or body should be stripped off 412 responses. Removing that stripping would allow body in such responses, which seems correct.
Thanks for finding the issue and responding so quickly โ makes sense. I wonder why specifically 412 was treated differently. :)
> The response gets sent immediately (use `curl -v`) but without `content-length` because that header gets stripped off by Sanic but it still sends the body bytes. Curl sees the body but doesn't know what to do with it and waits for disconnection to signal the end of it.
Makes sense then, so the 90 seconds is a TCP timeout. | 2023-09-20T19:01:13 |
sanic-org/sanic | 2,837 | sanic-org__sanic-2837 | [
"2835"
] | a5a9658896984ddad484e168d4cb5c96e589fbad | diff --git a/sanic/cookies/request.py b/sanic/cookies/request.py
--- a/sanic/cookies/request.py
+++ b/sanic/cookies/request.py
@@ -73,12 +73,17 @@ def parse_cookie(raw: str) -> Dict[str, List[str]]:
cookies: Dict[str, List[str]] = {}
for token in raw.split(";"):
- name, __, value = token.partition("=")
+ name, sep, value = token.partition("=")
name = name.strip()
value = value.strip()
- if not name:
- continue
+ # Support cookies =value or plain value with no name
+ # https://github.com/httpwg/http-extensions/issues/159
+ if not sep:
+ if not name:
+ # Empty value like ;; or a cookie header with no value
+ continue
+ name, value = "", name
if COOKIE_NAME_RESERVED_CHARS.search(name): # no cov
continue
diff --git a/sanic/models/protocol_types.py b/sanic/models/protocol_types.py
--- a/sanic/models/protocol_types.py
+++ b/sanic/models/protocol_types.py
@@ -3,7 +3,7 @@
import sys
from asyncio import BaseTransport
-from typing import TYPE_CHECKING, Any, AnyStr, Optional
+from typing import TYPE_CHECKING, Any, Optional, Union
if TYPE_CHECKING:
@@ -19,10 +19,10 @@
from typing import Protocol
class HTMLProtocol(Protocol):
- def __html__(self) -> AnyStr:
+ def __html__(self) -> Union[str, bytes]:
...
- def _repr_html_(self) -> AnyStr:
+ def _repr_html_(self) -> Union[str, bytes]:
...
class Range(Protocol):
| diff --git a/tests/test_cookies.py b/tests/test_cookies.py
--- a/tests/test_cookies.py
+++ b/tests/test_cookies.py
@@ -7,12 +7,28 @@
from sanic import Request, Sanic
from sanic.compat import Header
from sanic.cookies import Cookie, CookieJar
-from sanic.cookies.request import CookieRequestParameters
+from sanic.cookies.request import CookieRequestParameters, parse_cookie
from sanic.exceptions import ServerError
from sanic.response import text
from sanic.response.convenience import json
+def test_request_cookies():
+ cdict = parse_cookie("foo=one; foo=two; abc = xyz;;bare;=bare2")
+ assert cdict == {
+ "foo": ["one", "two"],
+ "abc": ["xyz"],
+ "": ["bare", "bare2"],
+ }
+ c = CookieRequestParameters(cdict)
+ assert c.getlist("foo") == ["one", "two"]
+ assert c.getlist("abc") == ["xyz"]
+ assert c.getlist("") == ["bare", "bare2"]
+ assert (
+ c.getlist("bare") == None
+ ) # [] might be sensible but we got None for now
+
+
# ------------------------------------------------------------ #
# GET
# ------------------------------------------------------------ #
diff --git a/tests/test_graceful_shutdown.py b/tests/test_graceful_shutdown.py
--- a/tests/test_graceful_shutdown.py
+++ b/tests/test_graceful_shutdown.py
@@ -1,6 +1,8 @@
import asyncio
import logging
+import pytest
+
from pytest import LogCaptureFixture
from sanic.response import empty
@@ -9,6 +11,7 @@
PORT = 42101
[email protected](reason="This test runs fine locally, but fails on CI")
def test_no_exceptions_when_cancel_pending_request(
app, caplog: LogCaptureFixture
):
| Cookie totally breaks if the client sets a bare cookie
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Describe the bug
A cookie may be not in the `key=value` format. For example. if the JS code runs `document.cookie = "bad"`, it becomes:

I don't know how to call it. I will use the term "bare cookie" in this report. In the following requests with a bare cookie, the Cookie HTTP header becomes: `Cookie: key=value; bad`

It seems that Sanic cannot parse the header with bare cookies, and will throw all cookies (including the legimit `key=value` pair) away. See the code snippet below.
### Code snippet
```python
from sanic import Sanic
from sanic.response import html, text
app = Sanic("test")
app.config.AUTO_EXTEND = False
@app.get("/")
async def route1(request):
return html('<script>document.cookie="key=value"; document.cookie="bad"; location.href="/fire";</script>')
@app.get("/fire")
async def route2(request):
return text(f'''
headers = {request.headers.get("Cookie")}
key = {request.cookies.get("key", "none")}
''')
if __name__ == '__main__':
app.run(port=4321, debug=True)
```
Then visit `http://127.0.0.1:4321/` in Chrome. The page shows:
```
headers = key=value; bad
key = none
```
### Expected Behavior
The page should show:
```
headers = key=value; bad
key = value
```
### How do you run Sanic?
As a script (`app.run` or `Sanic.serve`)
### Operating System
Windows
### Sanic Version
22.12.0
### Additional context
I am using the latest stable Chrome (117.0.5938.150) to reproduce this.
| Are you aware if this cookie format is allowed by any specs, or if it simply a quirk that the browser can send it like that? Browsers otherwise do some formatting on document.cookie and don't simply pass the string.
Malformed cookie headers could be used for exploiting parsers which understand them in different ways, potentially altering content of another cookie, so we try to make Sanic err on the side of rejection all of it rather than potentially return manipulated values, although it is noted that setting a cookie to prevent other cookies being parsed can also be exploited.
Should we simply skip the "bare cookie" and still try to read the rest, or store it as a `""` named cookie value? Needs investigation. Any real world implications to this?
See https://github.com/httpwg/http-extensions/issues/159 and https://github.com/httpwg/http-extensions/pull/1018. It seems that `document.cookie = "foo"` is considered as a Cookie of a null key and a `foo` value.
The impact to my website is that if a frontend script accidentially sets things like `document.cookie = "x"`, the user will never be able to log in, because Sanic will refuse to read any other values in the cookie. | 2023-10-14T18:31:12 |
sanic-org/sanic | 2,858 | sanic-org__sanic-2858 | [
"2743"
] | 0663f11d2096db133c1c963c66fd070f2ada1fea | diff --git a/sanic/server/protocols/websocket_protocol.py b/sanic/server/protocols/websocket_protocol.py
--- a/sanic/server/protocols/websocket_protocol.py
+++ b/sanic/server/protocols/websocket_protocol.py
@@ -1,4 +1,4 @@
-from typing import TYPE_CHECKING, Optional, Sequence, cast
+from typing import Optional, Sequence, cast
try: # websockets < 11.0
@@ -8,19 +8,18 @@
from websockets.protocol import State # type: ignore
from websockets.server import ServerProtocol # type: ignore
+from websockets import http11
+from websockets.datastructures import Headers as WSHeaders
from websockets.typing import Subprotocol
from sanic.exceptions import SanicException
from sanic.log import logger
+from sanic.request import Request
from sanic.server import HttpProtocol
from ..websockets.impl import WebsocketImplProtocol
-if TYPE_CHECKING:
- from websockets import http11
-
-
OPEN = State.OPEN
CLOSING = State.CLOSING
CLOSED = State.CLOSED
@@ -94,6 +93,13 @@ def close_if_idle(self):
else:
return super().close_if_idle()
+ @staticmethod
+ def sanic_request_to_ws_request(request: Request):
+ return http11.Request(
+ path=request.path,
+ headers=WSHeaders(request.headers),
+ )
+
async def websocket_handshake(
self, request, subprotocols: Optional[Sequence[str]] = None
):
@@ -117,7 +123,7 @@ async def websocket_handshake(
state=OPEN,
logger=logger,
)
- resp: "http11.Response" = ws_proto.accept(request)
+ resp = ws_proto.accept(self.sanic_request_to_ws_request(request))
except Exception:
msg = (
"Failed to open a WebSocket connection.\n"
| diff --git a/tests/test_ws_handlers.py b/tests/test_ws_handlers.py
--- a/tests/test_ws_handlers.py
+++ b/tests/test_ws_handlers.py
@@ -1,3 +1,6 @@
+import base64
+import secrets
+
from typing import Any, Callable, Coroutine
import pytest
@@ -70,6 +73,23 @@ async def ws_echo_handler(request: Request, ws: Websocket):
assert ws_proxy.client_received == ["test 1", "test 2"]
+def test_ws_handler_invalid_upgrade(app: Sanic):
+ @app.websocket("/ws")
+ async def ws_echo_handler(request: Request, ws: Websocket):
+ async for msg in ws:
+ await ws.send(msg)
+
+ ws_key = base64.b64encode(secrets.token_bytes(16)).decode("utf-8")
+ invalid_upgrade_headers = {
+ "Upgrade": "websocket",
+ # "Connection": "Upgrade",
+ "Sec-WebSocket-Key": ws_key,
+ "Sec-WebSocket-Version": "13",
+ }
+ _, response = app.test_client.get("/ws", headers=invalid_upgrade_headers)
+ assert response.status == 426
+
+
def test_ws_handler_async_for(
app: Sanic,
simple_ws_mimic_client: MimicClientType,
| Websocket invalid upgrade exception handling b0rkage
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Describe the bug
A client apparently sent no Upgrade header to a websocket endpoint, leading to an error as it should. An ugly traceback is printed on terminal even though the error eventually gets handled correctly it would seem.
It would appear that the websockets module attempts to attach its exception on `request._exception` field which Sanic's Request doesn't have a slot for. This could be hidden if Sanic later used `raise BadRequest(...) from None` rather than `raise SanicException(...)`, suppressing the chain and giving a non-500 error for what really is no server error. Not sure though if that would from this context ever reach the client anyway but at least it could avoid a traceback in server log.
If anyone wants to investigate and make a PR, feel free to (I am currently busy and cannot do that unfortunately).
```python
Traceback (most recent call last):
File "/home/user/.local/lib/python3.10/site-packages/websockets/server.py", line 111, in accept
) = self.process_request(request)
File "/home/user/.local/lib/python3.10/site-packages/websockets/server.py", line 218, in process_request
raise InvalidUpgrade("Upgrade", ", ".join(upgrade) if upgrade else None)
websockets.exceptions.InvalidUpgrade: missing Upgrade header
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/user/sanic/sanic/server/protocols/websocket_protocol.py", line 120, in websocket_handshake
resp: "http11.Response" = ws_proto.accept(request)
File "/home/user/.local/lib/python3.10/site-packages/websockets/server.py", line 122, in accept
request._exception = exc
AttributeError: 'Request' object has no attribute '_exception'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "handle_request", line 97, in handle_request
File "/home/user/sanic/sanic/app.py", line 1047, in _websocket_handler
ws = await protocol.websocket_handshake(request, subprotocols)
File "/home/user/sanic/sanic/server/protocols/websocket_protocol.py", line 126, in websocket_handshake
raise SanicException(msg, status_code=500)
sanic.exceptions.SanicException: Failed to open a WebSocket connection.
See server log for more information.
```
### Code snippet
_No response_
### Expected Behavior
400 Bad Request error reaching the client and being more silent on server side. Including the message of **missing Upgrade header** would be helpful for debugging (e.g. in case Nginx proxy config forgot to forward that header).
### How do you run Sanic?
Sanic CLI
### Operating System
Linux
### Sanic Version
Almost 23.03.0 (a git version slightly before release)
### Additional context
_No response_
| I plan to take a look at it if no one else started working on it.
~~Sanic server return a 500 error in this case. Should we make it be 400 instead? (I think so, but would like to double check)~~
Sorry misread the issue, and we should make it 400
FYI, this code snippet I got can reproduce the issue.
```python
import http.client
import base64
import secrets
from urllib.parse import urlparse
def connect_websocket(url: str, message: str) -> None:
parsed_url = urlparse(url)
conn = http.client.HTTPConnection(parsed_url.netloc)
websocket_key = base64.b64encode(secrets.token_bytes(16)).decode('utf-8')
headers = {
"Upgrade": "websocket",
#"Connection": "Upgrade",
"Sec-WebSocket-Key": websocket_key,
"Sec-WebSocket-Version": "13"
}
conn.putrequest("GET", parsed_url.path)
for header, value in headers.items():
conn.putheader(header, value)
conn.endheaders()
response = conn.getresponse()
if response.status == 101 and response.getheader('Upgrade', '').lower() == 'websocket':
print("WebSocket handshake successful")
conn.sock.sendall(message.encode())
else:
print(f"WebSocket handshake failed: {response.status} {response.reason}")
conn.close()
websocket_url = "ws://localhost:8000/ws-echo"
connect_websocket(websocket_url, "Hello, WebSocket Server!")
```
| 2023-11-26T08:39:32 |
sanic-org/sanic | 2,870 | sanic-org__sanic-2870 | [
"2836"
] | d0bbcf55d540e8d4c9d8bb1108384d8545355fcb | diff --git a/sanic/__version__.py b/sanic/__version__.py
--- a/sanic/__version__.py
+++ b/sanic/__version__.py
@@ -1 +1 @@
-__version__ = "23.12.0"
+__version__ = "23.12.1"
diff --git a/sanic/compat.py b/sanic/compat.py
--- a/sanic/compat.py
+++ b/sanic/compat.py
@@ -126,11 +126,11 @@ def __getattr__(self, key: str) -> str:
if key.startswith("_"):
return self.__getattribute__(key)
key = key.rstrip("_").replace("_", "-")
- return ",".join(self.getall(key, default=[]))
+ return ",".join(self.getall(key, []))
def get_all(self, key: str):
"""Convenience method mapped to ``getall()``."""
- return self.getall(key, default=[])
+ return self.getall(key, [])
use_trio = sys.argv[0].endswith("hypercorn") and "trio" in sys.argv
| diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml
--- a/.github/workflows/tests.yml
+++ b/.github/workflows/tests.yml
@@ -41,6 +41,7 @@ jobs:
- { python-version: "3.10", tox-env: py310, max-attempts: 3 }
- { python-version: "3.10", tox-env: py310-no-ext, max-attempts: 3 }
- { python-version: "3.11", tox-env: py311, max-attempts: 3 }
+ - { python-version: "3.12", tox-env: py312, max-attempts: 3 }
- { python-version: "3.11", tox-env: py311-no-ext, max-attempts: 3 }
- { python-version: "3.8", tox-env: py38-no-ext, platform: windows-latest, ignore-errors: true }
- { python-version: "3.9", tox-env: py39-no-ext, platform: windows-latest, ignore-errors: true }
diff --git a/tests/test_app.py b/tests/test_app.py
--- a/tests/test_app.py
+++ b/tests/test_app.py
@@ -657,3 +657,15 @@ def test_stop_trigger_terminate(app: Sanic):
app.stop(unregister=False)
app.multiplexer.terminate.assert_called_once()
+
+
+def test_refresh_pass_passthru_data_to_new_instance(app: Sanic):
+ # arrange
+ passthru = {
+ '_inspector': 2,
+ 'config': {'TOUCHUP': 23}
+ }
+ app = app.refresh(passthru)
+
+ assert app.inspector == 2
+ assert app.config.TOUCHUP == 23
diff --git a/tests/test_blueprint_group.py b/tests/test_blueprint_group.py
--- a/tests/test_blueprint_group.py
+++ b/tests/test_blueprint_group.py
@@ -25,6 +25,16 @@ def test_bp_group_indexing(app: Sanic):
with raises(expected_exception=IndexError):
_ = group[3]
+def test_bp_group_set_item_by_index(app: Sanic):
+ blueprint_1 = Blueprint("blueprint_1", url_prefix="/bp1")
+ blueprint_2 = Blueprint("blueprint_2", url_prefix="/bp2")
+
+ group = Blueprint.group(blueprint_1, blueprint_2)
+ group[0] = blueprint_2
+
+ assert group[0] == blueprint_2
+
+
def test_bp_group_with_additional_route_params(app: Sanic):
blueprint_1 = Blueprint("blueprint_1", url_prefix="/bp1")
diff --git a/tests/test_blueprints.py b/tests/test_blueprints.py
--- a/tests/test_blueprints.py
+++ b/tests/test_blueprints.py
@@ -1112,3 +1112,43 @@ async def index(_):
app.router.finalize()
assert app.router.routes[0].path == "foo/"
+
+
+def test_blueprint_copy_returns_blueprint_with_the_name_of_original_blueprint(
+ app: Sanic,
+):
+ # arrange
+ bp = Blueprint("bp")
+
+ # act
+ actual = bp.copy("new_bp_name")
+
+ # assert
+ assert bp.name == actual.copied_from
+
+
+def test_blueprint_copy_returns_blueprint_with_overwritten_properties(
+ app: Sanic,
+):
+ # arrange
+ bp = Blueprint("bp")
+ to_override_attrs = expected = dict(
+ url_prefix="v2",
+ version="v2",
+ version_prefix="v2",
+ allow_route_overwrite=True,
+ strict_slashes=True,
+ )
+
+ # act
+ actual = bp.copy(
+ "new_bp_name",
+ **to_override_attrs,
+ )
+
+ # assert
+ assert all(
+ value == getattr(actual, key)
+ for key, value in expected.items()
+ if hasattr(actual, key)
+ )
diff --git a/tests/test_websockets.py b/tests/test_websockets.py
--- a/tests/test_websockets.py
+++ b/tests/test_websockets.py
@@ -5,7 +5,7 @@
import pytest
-from websockets.frames import CTRL_OPCODES, DATA_OPCODES, Frame
+from websockets.frames import CTRL_OPCODES, DATA_OPCODES, OP_TEXT, Frame
from sanic.exceptions import ServerError
from sanic.server.websockets.frame import WebsocketFrameAssembler
@@ -210,17 +210,14 @@ async def test_ws_frame_put_message_complete(opcode):
@pytest.mark.asyncio
@pytest.mark.parametrize("opcode", DATA_OPCODES)
async def test_ws_frame_put_message_into_queue(opcode):
+ foo = "foo" if (opcode == OP_TEXT) else b"foo"
assembler = WebsocketFrameAssembler(Mock())
assembler.chunks_queue = AsyncMock(spec=Queue)
assembler.message_fetched = AsyncMock()
assembler.message_fetched.is_set = Mock(return_value=False)
-
await assembler.put(Frame(opcode, b"foo"))
- assembler.chunks_queue.put.has_calls(
- call(b"foo"),
- call(None),
- )
+ assert assembler.chunks_queue.put.call_args_list == [call(foo), call(None)]
@pytest.mark.asyncio
| Support for Python 3.12
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Is your feature request related to a problem? Please describe.
Currently unable to use Sanic with Python 3.12
### Describe the solution you'd like
[uvloop 0.18.0 was just released](https://github.com/MagicStack/uvloop/releases/tag/v0.18.0) and supports Python 3.12
It'd be great to get support for Python 3.12 on the next release of Sanic along with the 2022.12 LTS release
### Additional context
_No response_
| Making this the tracking issue for Python 3.12 compatibility. Please post any incompatibilities found here. We probably need to update some dependencies, and add 3.12 testing to our CI. Rarely have any Python updates (other than asyncio loop argument removal) prompted for changes in Sanic itself.
@ahopkins Is this feature something what is okay to pick up into progress by someone whom isn't part of the core maintainers group ?
@iAndriy Feel free to hack it, PRs from anyone are welcome here! And core developers are quite overloaded already =)
A good start would be to just add to the CI, open a PR, mark it as draft and list out all the errors you encountered.
First and foremost add it to tox.ini and run the tests.
```
tox -e py312
> A good start would be to just add to the CI, open a PR, mark it as draft and list out all the errors you encountered.
>
> First and foremost add it to tox.ini and run the tests.
>
> ```
> tox -e py312
> ```
@ahopkins thanks for suggestion | 2023-12-06T23:59:35 |
sanic-org/sanic | 2,895 | sanic-org__sanic-2895 | [
"2894"
] | 3d85a1c73849f093e8a7f39f7a5b1e7ddc35045d | diff --git a/sanic/__version__.py b/sanic/__version__.py
--- a/sanic/__version__.py
+++ b/sanic/__version__.py
@@ -1 +1 @@
-__version__ = "23.12.0"
+__version__ = "23.12.1"
diff --git a/sanic/worker/manager.py b/sanic/worker/manager.py
--- a/sanic/worker/manager.py
+++ b/sanic/worker/manager.py
@@ -452,7 +452,7 @@ def _poll_monitor(self) -> Optional[MonitorCycle]:
elif message == "__TERMINATE__":
self._handle_terminate()
return MonitorCycle.BREAK
- elif isinstance(message, tuple) and len(message) == 7:
+ elif isinstance(message, tuple) and len(message) == 8:
self._handle_manage(*message) # type: ignore
return MonitorCycle.CONTINUE
elif not isinstance(message, str):
| Multiplexer and Manager out of sync on number of expected arguments
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Describe the bug
The manager is expecting the wrong number of arguments. Therefore app.m.manage is not working as intended.
```
[ERROR] Monitor received an invalid message: ('foobar', <function foo at 0x7fd1c8ce34c0>, {}, False, None, False, True, 1)
```
### Code snippet
_No response_
### Expected Behavior
_No response_
### How do you run Sanic?
Sanic CLI
### Operating System
Linux
### Sanic Version
23.12.0
### Additional context
_No response_
| 2024-01-09T07:51:56 |
||
sanic-org/sanic | 2,899 | sanic-org__sanic-2899 | [
"2889"
] | 12e61db66a780c371510a6c877a5335124babf48 | diff --git a/sanic/cli/console.py b/sanic/cli/console.py
--- a/sanic/cli/console.py
+++ b/sanic/cli/console.py
@@ -130,7 +130,6 @@ def __init__(self, app: Sanic, start: Optional[Default] = None):
f" - {Colors.BOLD + Colors.SANIC}client{Colors.END}: A client to access the Sanic app instance using httpx - {Colors.BOLD + Colors.BLUE}from httpx import Client{Colors.END}", # noqa: E501
)
else:
- del variable_descriptions[3]
client_availability = (
f"\n{Colors.YELLOW}The HTTP client has been disabled. "
"To enable it, install httpx:\n\t"
| Can't use optional CLI arguments
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Describe the bug
When I try `sanic module:app` it works.
When I try `sanic -d module:app` it doesn't. Trying --dev doesn't work. I can run with `-r and --debug` separately.
```
$ sanic testapp:app --dev
[2024-01-04 15:39:58 -0500] [19499] [INFO]
โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
โ Sanic v23.12.0 โ
โ Goin' Fast @ http://127.0.0.1:8000 โ
โโโโโโโโโโโโโโโโโโโโโโโโโฌโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโค
โ โ app: testapp โ
โ โโโโ โโโโโ โโ โ mode: debug, single worker โ
โ โโ โ server: sanic, HTTP/1.1 โ
โ โโโโโโโโ โโโโ โ python: 3.10.12 โ
โ โโ โ platform: Linux-5.15.133.1-microsoft-standard-WSL2-x86_64-with-glibc2.35 โ
โ โโโโ โโโโโโโโโ โ auto-reload: enabled โ
โ โ packages: sanic-routing==23.12.0, sanic-ext==23.12.0 โ
โ Build Fast. Run Fast. โ โ
โโโโโโโโโโโโโโโโโโโโโโโโโดโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
[2024-01-04 15:39:58 -0500] [19499] [DEBUG] Creating multiprocessing context using 'spawn'
[2024-01-04 15:39:58 -0500] [19499] [ERROR] Experienced exception while trying to serve
Traceback (most recent call last):
File "/home/python/virtualenvs/testapp/lib/python3.10/site-packages/sanic/mixins/startup.py", line 1144, in serve
trigger_events(ready, loop, primary)
File "/home/python/virtualenvs/testapp/lib/python3.10/site-packages/sanic/server/events.py", line 35, in trigger_events
loop.run_until_complete(result)
File "uvloop/loop.pyx", line 1517, in uvloop.loop.Loop.run_until_complete
File "/home/python/virtualenvs/testapp/lib/python3.10/site-packages/sanic/cli/app.py", line 160, in start_repl
SanicREPL(app, self.args.repl).run()
File "/home/python/virtualenvs/testapp/lib/python3.10/site-packages/sanic/cli/console.py", line 134, in __init__
del variable_descriptions[3]
IndexError: list assignment index out of range
[2024-01-04 15:39:58 -0500] [19499] [INFO] Server Stopped
[2024-01-04 15:39:58 -0500] [19499] [DEBUG] Annyeong
Traceback (most recent call last):
File "/home/python/virtualenvs/testapp/bin/sanic", line 8, in <module>
sys.exit(main())
File "/home/python/virtualenvs/testapp/lib/python3.10/site-packages/sanic/__main__.py", line 12, in main
cli.run(args)
File "/home/python/virtualenvs/testapp/lib/python3.10/site-packages/sanic/cli/app.py", line 121, in run
serve(app)
File "/home/python/virtualenvs/testapp/lib/python3.10/site-packages/sanic/mixins/startup.py", line 1144, in serve
trigger_events(ready, loop, primary)
File "/home/python/virtualenvs/testapp/lib/python3.10/site-packages/sanic/server/events.py", line 35, in trigger_events
loop.run_until_complete(result)
File "uvloop/loop.pyx", line 1517, in uvloop.loop.Loop.run_until_complete
File "/home/python/virtualenvs/testapp/lib/python3.10/site-packages/sanic/cli/app.py", line 160, in start_repl
SanicREPL(app, self.args.repl).run()
File "/home/python/virtualenvs/testapp/lib/python3.10/site-packages/sanic/cli/console.py", line 134, in __init__
del variable_descriptions[3]
IndexError: list assignment index out of range
```
### Code snippet
_No response_
### Expected Behavior
_No response_
### How do you run Sanic?
Sanic CLI
### Operating System
Linux
### Sanic Version
23.12.0
### Additional context
_No response_
| Any more specifics about the OS? I ran into something similar today when running in docker and I assumed it was because of something with stdin.
In short, you can solve by adding `--no-repl` for right now.
Just WSL2, Ubuntu 23.10, Windows 11
`variable_descriptions` is a static list of length 3. There is no `[3]` index to do `del variable_descriptions[3]`.
```py
variable_descriptions = [
f" - {Colors.BOLD + Colors.SANIC}app{Colors.END}: The Sanic application instance - {Colors.BOLD + Colors.BLUE}{str(app)}{Colors.END}", # noqa: E501
f" - {Colors.BOLD + Colors.SANIC}sanic{Colors.END}: The Sanic module - {Colors.BOLD + Colors.BLUE}import sanic{Colors.END}", # noqa: E501
f" - {Colors.BOLD + Colors.SANIC}do{Colors.END}: An async function to fake a request to the application - {Colors.BOLD + Colors.BLUE}Result(request, response){Colors.END}", # noqa: E501
]
```
there is a quick 'solution' without any changes of the current code: `pip3 install httpx` :) | 2024-01-11T03:43:24 |
|
sanic-org/sanic | 2,919 | sanic-org__sanic-2919 | [
"2873"
] | acb29c9dc4d6ba3a453a18e30f0664ba6772a9b4 | diff --git a/sanic/server/protocols/websocket_protocol.py b/sanic/server/protocols/websocket_protocol.py
--- a/sanic/server/protocols/websocket_protocol.py
+++ b/sanic/server/protocols/websocket_protocol.py
@@ -1,12 +1,12 @@
from typing import Optional, Sequence, cast
-try: # websockets < 11.0
- from websockets.connection import State
- from websockets.server import ServerConnection as ServerProtocol
-except ImportError: # websockets >= 11.0
+try: # websockets >= 11.0
from websockets.protocol import State # type: ignore
from websockets.server import ServerProtocol # type: ignore
+except ImportError: # websockets < 11.0
+ from websockets.connection import State
+ from websockets.server import ServerConnection as ServerProtocol
from websockets import http11
from websockets.datastructures import Headers as WSHeaders
| (websockets 12.0) DeprecationWarning: websockets.connection was renamed to websockets.protocol and Connection was renamed to Protocol
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Describe the bug
The DeprecationWarning is thrown here:
https://github.com/sanic-org/sanic/blob/82bf46bea3b55e6b1371107cccda280add8e70eb/sanic/server/protocols/websocket_protocol.py#L4-L9
With websockets 12 the try block would run successfully with the warning while the catch block does not have chance to be run.
### Code snippet
_No response_
### Expected Behavior
The catch block is being run instead.
### How do you run Sanic?
Sanic CLI
### Operating System
MacOS
### Sanic Version
Sanic 23.6.0; Routing 23.6.0
### Additional context
_No response_
| Although it triggers a deprecation warning, the `try...except...` block will work when the classes are actually removed in `websockets` package in the future. Checking version of a package requires an additional dependency `packaging`, which we wouldn't like to add, thus I think we can just ignore the warning.
See more discussion in https://github.com/sanic-org/sanic/pull/2880
> Although it triggers a deprecation warning, the `try...except...` block will work when the classes are actually removed in `websockets` package in the future. Checking version of a package requires an additional dependency `packaging`, which we wouldn't like to add, thus I think we can just ignore the warning.
Can't you `try` the new API, which will give ImportError or such but no deprecation warnings, and `except` fallback to old API?
Looks like you can also use `websockets.__version__` for a version string.
@Tronic You are right, lol. I will try that way. | 2024-02-18T08:19:18 |
|
sanic-org/sanic | 2,937 | sanic-org__sanic-2937 | [
"2911"
] | 7331ced31b25d6441073be2fada8f7fe92d90ecd | diff --git a/sanic/asgi.py b/sanic/asgi.py
--- a/sanic/asgi.py
+++ b/sanic/asgi.py
@@ -219,19 +219,26 @@ def respond(self, response: BaseHTTPResponse):
return response
async def send(self, data, end_stream):
- self.stage = Stage.IDLE if end_stream else Stage.RESPONSE
- if self.response:
- response, self.response = self.response, None
+ if self.stage is Stage.IDLE:
+ if not end_stream or data:
+ raise RuntimeError(
+ "There is no request to respond to, either the "
+ "response has already been sent or the "
+ "request has not been received yet."
+ )
+ return
+ if self.response and self.stage is Stage.HANDLER:
await self.transport.send(
{
"type": "http.response.start",
- "status": response.status,
- "headers": response.processed_headers,
+ "status": self.response.status,
+ "headers": self.response.processed_headers,
}
)
- response_body = getattr(response, "body", None)
+ response_body = getattr(self.response, "body", None)
if response_body:
data = response_body + data if data else response_body
+ self.stage = Stage.IDLE if end_stream else Stage.RESPONSE
await self.transport.send(
{
"type": "http.response.body",
| diff --git a/tests/test_response.py b/tests/test_response.py
--- a/tests/test_response.py
+++ b/tests/test_response.py
@@ -575,14 +575,20 @@ async def test(request: Request):
assert "Content-Length" not in response.headers
-def test_two_respond_calls(app: Sanic):
[email protected]
+async def test_direct_response_stream_asgi(app: Sanic):
@app.route("/")
- async def handler(request: Request):
- response = await request.respond()
+ async def test(request: Request):
+ response = await request.respond(content_type="text/csv")
await response.send("foo,")
await response.send("bar")
await response.eof()
+ _, response = await app.asgi_client.get("/")
+ assert response.text == "foo,bar"
+ assert response.headers["Content-Type"] == "text/csv"
+ assert "Content-Length" not in response.headers
+
def test_multiple_responses(
app: Sanic,
@@ -684,7 +690,7 @@ async def handler6(request: Request):
assert message_in_records(caplog.records, error_msg2)
-def send_response_after_eof_should_fail(
+def test_send_response_after_eof_should_fail(
app: Sanic,
caplog: LogCaptureFixture,
message_in_records: Callable[[List[LogRecord], str], bool],
@@ -698,17 +704,48 @@ async def handler(request: Request):
error_msg1 = (
"The error response will not be sent to the client for the following "
- 'exception:"Second respond call is not allowed.". A previous '
+ 'exception:"Response stream was ended, no more response '
+ 'data is allowed to be sent.". A previous '
"response has at least partially been sent."
)
+ error_msg2 = "Response stream was ended, no more response data is allowed to be sent."
+
+ with caplog.at_level(ERROR):
+ _, response = app.test_client.get("/")
+ assert "foo, " in response.text
+ assert message_in_records(caplog.records, error_msg1)
+ assert message_in_records(caplog.records, error_msg2)
+
+
[email protected]
+async def test_send_response_after_eof_should_fail_asgi(
+ app: Sanic,
+ caplog: LogCaptureFixture,
+ message_in_records: Callable[[List[LogRecord], str], bool],
+):
+ @app.get("/")
+ async def handler(request: Request):
+ response = await request.respond()
+ await response.send("foo, ")
+ await response.eof()
+ await response.send("bar")
+
+ error_msg1 = (
+ "The error response will not be sent to the client for the "
+ 'following exception:"There is no request to respond to, '
+ "either the response has already been sent or the request "
+ 'has not been received yet.". A previous response has '
+ "at least partially been sent."
+ )
+
error_msg2 = (
- "Response stream was ended, no more "
- "response data is allowed to be sent."
+ "There is no request to respond to, either the response has "
+ "already been sent or the request has not been received yet."
)
with caplog.at_level(ERROR):
- _, response = app.test_client.get("/")
+ _, response = await app.asgi_client.get("/")
assert "foo, " in response.text
assert message_in_records(caplog.records, error_msg1)
assert message_in_records(caplog.records, error_msg2)
| Response streaming produces [ERROR] Invalid response type None (need HTTPResponse)
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Describe the bug
The "response streaming" [feature of Sanic](https://sanic.dev/en/guide/advanced/streaming.html#response-streaming) produces error messages when running from Uvicorn.
When accessing a page using the `await request.respond()` API, it produces error messages after each request.
```
[2024-01-31 19:37:14 +0000] [694830] [INFO]
โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
โ Sanic v23.6.0 โ
โ โ
โโโโโโโโโโโโโโโโโโโโโโโโโฌโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโค
โ โ mode: production, ASGI โ
โ โโโโ โโโโโ โโ โ server: ASGI โ
โ โโ โ python: 3.11.6 โ
โ โโโโโโโโ โโโโ โ platform: Linux-5.15.0-1048-aws-x86_64-with-glibc2.31 โ
โ โโ โ packages: sanic-routing==23.12.0, sanic-testing==23.6.0 โ
โ โโโโ โโโโโโโโโ โ โ
โ โ โ
โ Build Fast. Run Fast. โ โ
โโโโโโโโโโโโโโโโโโโโโโโโโดโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
INFO: Application startup complete.
INFO: 127.0.0.1:42186 - "GET / HTTP/1.1" 200 OK
[2024-01-31 19:38:19 +0000] [694830] [ERROR] Invalid response type None (need HTTPResponse)
Traceback (most recent call last):
File "handle_request", line 144, in handle_request
"_inspector",
^^^^^
sanic.exceptions.ServerError: Invalid response type None (need HTTPResponse)
[2024-01-31 19:38:19 +0000] [694830] [ERROR] The error response will not be sent to the client for the following exception:"Invalid response type None (need HTTPResponse)". A previous response has at least partially been sent.
```
### Code snippet
```python
from sanic import Sanic
app = Sanic("my-hello-world-app")
@app.route("/")
async def test(request):
response = await request.respond(content_type="text/plain")
await response.send("hello world")
await response.eof()
if __name__ == "__main__":
app.run()
```
### Expected Behavior
Sanic should not produce error messages when using the response streaming API.
### How do you run Sanic?
ASGI
### Operating System
Linux
### Sanic Version
23.6.0
### Additional context
Possibly related to #2572, but it seems like a different issue. I can reproduce this without using WebSockets or SSE.
| 2024-04-07T12:13:06 |
|
ray-project/ray | 506 | ray-project__ray-506 | [
"507"
] | e50a23b82039a37a073fcb098e5f3d2181f49b3e | diff --git a/python/ray/actor.py b/python/ray/actor.py
--- a/python/ray/actor.py
+++ b/python/ray/actor.py
@@ -5,7 +5,6 @@
import hashlib
import inspect
import json
-import numpy as np
import random
import redis
import traceback
@@ -14,7 +13,7 @@
import ray.pickling as pickling
import ray.signature as signature
import ray.worker
-from ray.utils import binary_to_hex, hex_to_binary
+from ray.utils import random_string, binary_to_hex, hex_to_binary
# This is a variable used by each actor to indicate the IDs of the GPUs that
# the worker is currently allowed to use.
@@ -30,10 +29,6 @@ def get_gpu_ids():
return gpu_ids
-def random_string():
- return np.random.bytes(20)
-
-
def random_actor_id():
return ray.local_scheduler.ObjectID(random_string())
diff --git a/python/ray/utils.py b/python/ray/utils.py
--- a/python/ray/utils.py
+++ b/python/ray/utils.py
@@ -3,11 +3,37 @@
from __future__ import print_function
import binascii
+import numpy as np
import sys
import ray.local_scheduler
+def random_string():
+ """Generate a random string to use as an ID.
+
+ Note that users may seed numpy, which could cause this function to generate
+ duplicate IDs. Therefore, we need to seed numpy ourselves, but we can't
+ interfere with the state of the user's random number generator, so we extract
+ the state of the random number generator and reset it after we are done.
+
+ TODO(rkn): If we want to later guarantee that these are generated in a
+ deterministic manner, then we will need to make some changes here.
+
+ Returns:
+ A random byte string of length 20.
+ """
+ # Get the state of the numpy random number generator.
+ numpy_state = np.random.get_state()
+ # Try to use true randomness.
+ np.random.seed(None)
+ # Generate the random ID.
+ random_id = np.random.bytes(20)
+ # Reset the state of the numpy random number generator.
+ np.random.set_state(numpy_state)
+ return random_id
+
+
def decode(byte_str):
"""Make this unicode in Python 3, otherwise leave it as bytes."""
if sys.version_info >= (3, 0):
diff --git a/python/ray/worker.py b/python/ray/worker.py
--- a/python/ray/worker.py
+++ b/python/ray/worker.py
@@ -27,6 +27,7 @@
import ray.numbuf
import ray.local_scheduler
import ray.plasma
+from ray.utils import random_string
SCRIPT_MODE = 0
WORKER_MODE = 1
@@ -60,14 +61,6 @@
TASK_STATUS_RUNNING = 8
-def random_string():
- return np.random.bytes(20)
-
-
-def random_object_id():
- return ray.local_scheduler.ObjectID(random_string())
-
-
class FunctionID(object):
def __init__(self, function_id):
self.function_id = function_id
| diff --git a/test/actor_test.py b/test/actor_test.py
--- a/test/actor_test.py
+++ b/test/actor_test.py
@@ -3,6 +3,7 @@
from __future__ import print_function
import numpy as np
+import random
import unittest
import ray
@@ -209,6 +210,27 @@ def __init__(self):
ray.worker.cleanup()
+ def testRandomIDGeneration(self):
+ ray.init(num_workers=0)
+
+ @ray.actor
+ class Foo(object):
+ def __init__(self):
+ pass
+
+ # Make sure that seeding numpy does not interfere with the generation of
+ # actor IDs.
+ np.random.seed(1234)
+ random.seed(1234)
+ f1 = Foo()
+ np.random.seed(1234)
+ random.seed(1234)
+ f2 = Foo()
+
+ self.assertNotEqual(f1._ray_actor_id.id(), f2._ray_actor_id.id())
+
+ ray.worker.cleanup()
+
class ActorMethods(unittest.TestCase):
| Seeding numpy interferes with random ID generation.
```python
import numpy as np
import ray
ray.init(num_workers=0)
@ray.actor
class Foo(object):
def __init__(self):
pass
np.random.seed(0)
a1 = Foo()
np.random.seed(0)
a2 = Foo()
print(a1)
print(a2)
```
This currently causes the local scheduler to die with the following check.
```
[FATAL] (/Users/rkn/Workspace/ray/src/local_scheduler/local_scheduler.cc:1051: errno: Operation now in progress) Check failure: state->actor_mapping.count(actor_id) == 0
0 local_scheduler 0x0000000102c25d07 _Z30handle_actor_creation_callback8UniqueIDS_S_Pv + 839
1 local_scheduler 0x0000000102c37dbc _Z49redis_actor_notification_table_subscribe_callbackP17redisAsyncContextPvS1_ + 428
2 local_scheduler 0x0000000102c4d19d redisProcessCallbacks + 861
3 local_scheduler 0x0000000102c3af39 aeProcessEvents + 649
4 local_scheduler 0x0000000102c3b25b aeMain + 43
5 local_scheduler 0x0000000102c25f30 _Z12start_serverPKcS0_S0_iS0_S0_S0_bPKdS0_i + 448
6 local_scheduler 0x0000000102c262fc main + 924
7 libdyld.dylib 0x00007fffd54c2255 start + 1
```
And the actor ID's that are generated are the same.
| 2017-05-03T21:17:29 |
|
ray-project/ray | 527 | ray-project__ray-527 | [
"528"
] | c688a64235852361276ea446ce79b63fa3a7b785 | diff --git a/python/ray/actor.py b/python/ray/actor.py
--- a/python/ray/actor.py
+++ b/python/ray/actor.py
@@ -5,7 +5,7 @@
import hashlib
import inspect
import json
-import random
+import numpy as np
import redis
import traceback
@@ -165,15 +165,18 @@ def select_local_scheduler(local_schedulers, num_gpus, worker):
"""
driver_id = worker.task_driver_id.id()
- if num_gpus == 0:
- local_scheduler_id = hex_to_binary(
- random.choice(local_schedulers)["DBClientID"])
- else:
- # All of this logic is for finding a local scheduler that has enough
- # available GPUs.
- local_scheduler_id = None
- # Loop through all of the local schedulers.
- for local_scheduler in local_schedulers:
+ local_scheduler_id = None
+ # Loop through all of the local schedulers in a random order.
+ local_schedulers = np.random.permutation(local_schedulers)
+ for local_scheduler in local_schedulers:
+ if local_scheduler["NumCPUs"] < 1:
+ continue
+ if local_scheduler["NumGPUs"] < num_gpus:
+ continue
+ if num_gpus == 0:
+ local_scheduler_id = hex_to_binary(local_scheduler["DBClientID"])
+ break
+ else:
# Try to reserve enough GPUs on this local scheduler.
success = attempt_to_reserve_gpus(num_gpus, driver_id, local_scheduler,
worker)
@@ -181,10 +184,11 @@ def select_local_scheduler(local_schedulers, num_gpus, worker):
local_scheduler_id = hex_to_binary(local_scheduler["DBClientID"])
break
- if local_scheduler_id is None:
- raise Exception("Could not find a node with enough GPUs to create this "
- "actor. The local scheduler information is {}."
- .format(local_schedulers))
+ if local_scheduler_id is None:
+ raise Exception("Could not find a node with enough GPUs or other "
+ "resources to create this actor. The local scheduler "
+ "information is {}.".format(local_schedulers))
+
return local_scheduler_id
@@ -201,7 +205,8 @@ def export_actor(actor_id, Class, actor_method_names, num_cpus, num_gpus,
"""
ray.worker.check_main_thread()
if worker.mode is None:
- raise NotImplemented("TODO(pcm): Cache actors")
+ raise Exception("Actors cannot be created before Ray has been started. "
+ "You can start Ray with 'ray.init()'.")
key = "Actor:{}".format(actor_id.id())
pickled_class = pickling.dumps(Class)
@@ -216,11 +221,12 @@ def export_actor(actor_id, Class, actor_method_names, num_cpus, num_gpus,
local_schedulers = []
for ip_address, clients in client_table.items():
for client in clients:
- if client["ClientType"] == "local_scheduler":
+ if client["ClientType"] == "local_scheduler" and not client["Deleted"]:
local_schedulers.append(client)
# Select a local scheduler for the actor.
local_scheduler_id = select_local_scheduler(local_schedulers, num_gpus,
worker)
+ assert local_scheduler_id is not None
d = {"driver_id": driver_id,
"actor_id": actor_id.id(),
@@ -240,6 +246,11 @@ def export_actor(actor_id, Class, actor_method_names, num_cpus, num_gpus,
# having trouble getting that to work. It almost works, but in Python 2.7,
# builder.CreateString fails on byte strings that contain characters outside
# range(128).
+
+ # TODO(rkn): There is actually no guarantee that the local scheduler that we
+ # are publishing to has already subscribed to the actor_notifications
+ # channel. Therefore, this message may be missed and the workload will hang.
+ # This is a bug.
worker.redis_client.publish("actor_notifications",
actor_id.id() + driver_id + local_scheduler_id)
| diff --git a/test/actor_test.py b/test/actor_test.py
--- a/test/actor_test.py
+++ b/test/actor_test.py
@@ -565,6 +565,19 @@ def f():
class ActorsOnMultipleNodes(unittest.TestCase):
+ def testActorsOnNodesWithNoCPUs(self):
+ ray.init(num_cpus=0)
+
+ @ray.actor
+ class Foo(object):
+ def __init__(self):
+ pass
+
+ with self.assertRaises(Exception):
+ Foo()
+
+ ray.worker.cleanup()
+
def testActorLoadBalancing(self):
num_local_schedulers = 3
ray.worker._init(start_ray_local=True, num_workers=0,
| Error message for creating actors if ray.init hasn't been called
Right now we get:
```
import ray
@ray.actor
class A(object):
def __init__(self):
self.counter = 0
def step(self, action):
self.counter += 1
def get_counter(self):
return self.counter
a = A()
```
```
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-2-144b248f218a> in <module>()
----> 1 a = A()
/home/pcmoritz/anaconda3/lib/python3.5/site-packages/ray-0.0.1-py3.5.egg/ray/actor.py in __init__(self, *args, **kwargs)
288 export_actor(self._ray_actor_id, Class,
289 self._ray_actor_methods.keys(), num_cpus, num_gpus,
--> 290 ray.worker.global_worker)
291 # Call __init__ as a remote function.
292 if "__init__" in self._ray_actor_methods.keys():
/home/pcmoritz/anaconda3/lib/python3.5/site-packages/ray-0.0.1-py3.5.egg/ray/actor.py in export_actor(actor_id, Class, actor_method_names, num_cpus, num_gpus, worker)
202 ray.worker.check_main_thread()
203 if worker.mode is None:
--> 204 raise NotImplemented("TODO(pcm): Cache actors")
205 key = "Actor:{}".format(actor_id.id())
206 pickled_class = pickling.dumps(Class)
TypeError: 'NotImplementedType' object is not callable
```
Thanks @pacchiano for reporting this!
| 2017-05-09T00:29:19 |
|
ray-project/ray | 786 | ray-project__ray-786 | [
"785"
] | b6a18cb39bc3b8a51a66d3dad28cdd6bde34f470 | diff --git a/python/ray/experimental/state.py b/python/ray/experimental/state.py
--- a/python/ray/experimental/state.py
+++ b/python/ray/experimental/state.py
@@ -57,6 +57,7 @@ class GlobalState(object):
def __init__(self):
"""Create a GlobalState object."""
self.redis_client = None
+ self.redis_clients = None
def _check_connected(self):
"""Check that the object has been initialized before it is used.
@@ -69,32 +70,65 @@ def _check_connected(self):
raise Exception("The ray.global_state API cannot be used before "
"ray.init has been called.")
- def _initialize_global_state(self, redis_ip_address, redis_port):
+ if self.redis_clients is None:
+ raise Exception("The ray.global_state API cannot be used before "
+ "ray.init has been called.")
+
+ def _initialize_global_state(self, redis_ip_address, redis_port,
+ timeout=20):
"""Initialize the GlobalState object by connecting to Redis.
+ It's possible that certain keys in Redis may not have been fully
+ populated yet. In this case, we will retry this method until they have
+ been populated or we exceed a timeout.
+
Args:
redis_ip_address: The IP address of the node that the Redis server
lives on.
redis_port: The port that the Redis server is listening on.
+ timeout: The maximum amount of time (in seconds) that we should
+ wait for the keys in Redis to be populated.
"""
self.redis_client = redis.StrictRedis(host=redis_ip_address,
port=redis_port)
- self.redis_clients = []
- num_redis_shards = self.redis_client.get("NumRedisShards")
- if num_redis_shards is None:
- raise Exception("No entry found for NumRedisShards")
- num_redis_shards = int(num_redis_shards)
- if (num_redis_shards < 1):
- raise Exception("Expected at least one Redis shard, found "
- "{}.".format(num_redis_shards))
-
- ip_address_ports = self.redis_client.lrange("RedisShards", start=0,
- end=-1)
- if len(ip_address_ports) != num_redis_shards:
- raise Exception("Expected {} Redis shard addresses, found "
- "{}".format(num_redis_shards,
- len(ip_address_ports)))
+ start_time = time.time()
+
+ num_redis_shards = None
+ ip_address_ports = []
+
+ while time.time() - start_time < timeout:
+ # Attempt to get the number of Redis shards.
+ num_redis_shards = self.redis_client.get("NumRedisShards")
+ if num_redis_shards is None:
+ print("Waiting longer for NumRedisShards to be populated.")
+ time.sleep(1)
+ continue
+ num_redis_shards = int(num_redis_shards)
+ if (num_redis_shards < 1):
+ raise Exception("Expected at least one Redis shard, found "
+ "{}.".format(num_redis_shards))
+
+ # Attempt to get all of the Redis shards.
+ ip_address_ports = self.redis_client.lrange("RedisShards", start=0,
+ end=-1)
+ if len(ip_address_ports) != num_redis_shards:
+ print("Waiting longer for RedisShards to be populated.")
+ time.sleep(1)
+ continue
+
+ # If we got here then we successfully got all of the information.
+ break
+
+ # Check to see if we timed out.
+ if time.time() - start_time >= timeout:
+ raise Exception("Timed out while attempting to initialize the "
+ "global state. num_redis_shards = {}, "
+ "ip_address_ports = {}"
+ .format(num_redis_shards, ip_address_ports))
+
+ # Get the rest of the information.
+ self.redis_clients = []
for ip_address_port in ip_address_ports:
shard_address, shard_port = ip_address_port.split(b":")
self.redis_clients.append(redis.StrictRedis(host=shard_address,
| Test failure in Jenkins "Exception: Expected 10 Redis shard addresses, found 7".
The following error occurred in Jenkins in this log https://amplab.cs.berkeley.edu/jenkins/job/Ray-PRB/1453/console.
```
+ python /home/jenkins/workspace/Ray-PRB/test/jenkins_tests/multi_node_docker_test.py --docker-image=60125e3f54e6f8f0623b956c8cb04ddc6402819ed22ef5908c5b9ba8db38b46d --num-nodes=5 --num-redis-shards=10 --test-script=/ray/test/jenkins_tests/multi_node_tests/test_0.py
Starting head node with command:['docker', 'run', '-d', '--shm-size=1G', '60125e3f54e6f8f0623b956c8cb04ddc6402819ed22ef5908c5b9ba8db38b46d', 'ray', 'start', '--head', '--block', '--redis-port=6379', '--num-redis-shards=10', '--num-cpus=10', '--num-gpus=0']
Starting worker node with command:['docker', 'run', '-d', '--shm-size=1G', '--shm-size=1G', '60125e3f54e6f8f0623b956c8cb04ddc6402819ed22ef5908c5b9ba8db38b46d', 'ray', 'start', '--block', '--redis-address=172.17.0.12:6379', '--num-cpus=10', '--num-gpus=0']
Starting worker node with command:['docker', 'run', '-d', '--shm-size=1G', '--shm-size=1G', '60125e3f54e6f8f0623b956c8cb04ddc6402819ed22ef5908c5b9ba8db38b46d', 'ray', 'start', '--block', '--redis-address=172.17.0.12:6379', '--num-cpus=10', '--num-gpus=0']
Starting worker node with command:['docker', 'run', '-d', '--shm-size=1G', '--shm-size=1G', '60125e3f54e6f8f0623b956c8cb04ddc6402819ed22ef5908c5b9ba8db38b46d', 'ray', 'start', '--block', '--redis-address=172.17.0.12:6379', '--num-cpus=10', '--num-gpus=0']
Starting worker node with command:['docker', 'run', '-d', '--shm-size=1G', '--shm-size=1G', '60125e3f54e6f8f0623b956c8cb04ddc6402819ed22ef5908c5b9ba8db38b46d', 'ray', 'start', '--block', '--redis-address=172.17.0.12:6379', '--num-cpus=10', '--num-gpus=0']
Starting driver with command /ray/test/jenkins_tests/multi_node_tests/test_0.py.
STDOUT:
Driver 0 started at 1501449715.55.
STDERR:
Traceback (most recent call last):
File "/ray/test/jenkins_tests/multi_node_tests/test_0.py", line 22, in <module>
ray.init(redis_address=redis_address)
File "/opt/conda/lib/python2.7/site-packages/ray-0.1.2-py2.7-linux-x86_64.egg/ray/worker.py", line 1011, in init
num_gpus=num_gpus, num_redis_shards=num_redis_shards)
File "/opt/conda/lib/python2.7/site-packages/ray-0.1.2-py2.7-linux-x86_64.egg/ray/worker.py", line 962, in _init
mode=driver_mode, worker=global_worker, actor_id=NIL_ACTOR_ID)
File "/opt/conda/lib/python2.7/site-packages/ray-0.1.2-py2.7-linux-x86_64.egg/ray/worker.py", line 1338, in connect
global_state._initialize_global_state(redis_ip_address, int(redis_port))
File "/opt/conda/lib/python2.7/site-packages/ray-0.1.2-py2.7-linux-x86_64.egg/ray/experimental/state.py", line 96, in _initialize_global_state
len(ip_address_ports)))
Exception: Expected 10 Redis shard addresses, found 7
stop_node {'container_id': u'fea4409391126e14fca2459ce705051d644ec15163d5143708fc7820a2b7ba2a', 'is_head': True}
stop_node {'container_id': u'd9942971920d496cd39b65b4b9f95525a4479d455036ec2460fdbe8fe4b51c6e', 'is_head': False}
Traceback (most recent call last):
File "/home/jenkins/workspace/Ray-PRB/test/jenkins_tests/multi_node_docker_test.py", line 323, in <module>
d.stop_ray()
File "/home/jenkins/workspace/Ray-PRB/test/jenkins_tests/multi_node_docker_test.py", line 224, in stop_ray
self._stop_node(container_id)
File "/home/jenkins/workspace/Ray-PRB/test/jenkins_tests/multi_node_docker_test.py", line 207, in _stop_node
.format(container_id))
Exception: Failed to stop container 27327225b793cc721628b2fa4cf66b3c0a49eb5f0732273b66fa7ee505689655.
Build step 'Execute shell' marked build as failure
Test FAILed.
Refer to this link for build results (access rights to CI server needed):
https://amplab.cs.berkeley.edu/jenkins//job/Ray-PRB/1453/
Test FAILed.
Finished: FAILURE
```
| 2017-07-31T22:56:43 |
||
ray-project/ray | 833 | ray-project__ray-833 | [
"832"
] | bfe473fa8c23a1e838456e57f32b27f277fe8117 | diff --git a/python/ray/services.py b/python/ray/services.py
--- a/python/ray/services.py
+++ b/python/ray/services.py
@@ -478,7 +478,7 @@ def start_ui(redis_address, stdout_file=None, stderr_file=None, cleanup=True):
port_test_socket.bind(("127.0.0.1", port))
port_test_socket.close()
break
- except OSError:
+ except socket.error:
port += 1
new_env = os.environ.copy()
new_env["REDIS_ADDRESS"] = redis_address
| Cannot run Ray in two separate interpreters with Python 2.
To reproduce this problem, run the following in two separate interpreters.
```python
import ray
ray.init()
```
In the second one, I see the following error.
```
---------------------------------------------------------------------------
error Traceback (most recent call last)
<ipython-input-2-daa1e6a285d0> in <module>()
----> 1 ray.init()
/home/ubuntu/ray/python/ray/worker.pyc in init(redis_address, node_ip_address, object_id_seed, num_workers, driver_mode, redirect_output, num_cpus, num_gpus, num_custom_resource, num_redis_shards)
1325 redirect_output=redirect_output, num_cpus=num_cpus,
1326 num_gpus=num_gpus, num_custom_resource=num_custom_resource,
-> 1327 num_redis_shards=num_redis_shards)
1328
1329
/home/ubuntu/ray/python/ray/worker.pyc in _init(address_info, start_ray_local, object_id_seed, num_workers, num_local_schedulers, object_store_memory, driver_mode, redirect_output, start_workers_from_local_scheduler, num_cpus, num_gpus, num_custom_resource, num_redis_shards)
1225 num_gpus=num_gpus,
1226 num_custom_resource=num_custom_resource,
-> 1227 num_redis_shards=num_redis_shards)
1228 else:
1229 if redis_address is None:
/home/ubuntu/ray/python/ray/services.pyc in start_ray_head(address_info, node_ip_address, redis_port, num_workers, num_local_schedulers, object_store_memory, worker_path, cleanup, redirect_output, start_workers_from_local_scheduler, num_cpus, num_gpus, num_custom_resource, num_redis_shards)
1121 num_gpus=num_gpus,
1122 num_custom_resource=num_custom_resource,
-> 1123 num_redis_shards=num_redis_shards)
1124
1125
/home/ubuntu/ray/python/ray/services.pyc in start_ray_processes(address_info, node_ip_address, redis_port, num_workers, num_local_schedulers, object_store_memory, num_redis_shards, worker_path, cleanup, redirect_output, include_global_scheduler, include_log_monitor, include_webui, start_workers_from_local_scheduler, num_cpus, num_gpus, num_custom_resource)
991 "webui", redirect_output=True)
992 start_ui(redis_address, stdout_file=ui_stdout_file,
--> 993 stderr_file=ui_stderr_file, cleanup=cleanup)
994
995 # Return the addresses of the relevant processes.
/home/ubuntu/ray/python/ray/services.pyc in start_ui(redis_address, stdout_file, stderr_file, cleanup)
476 try:
477 port_test_socket = socket.socket()
--> 478 port_test_socket.bind(("127.0.0.1", port))
479 port_test_socket.close()
480 break
/home/ubuntu/anaconda2/lib/python2.7/socket.pyc in meth(name, self, *args)
226
227 def meth(name,self,*args):
--> 228 return getattr(self._sock,name)(*args)
229
230 for _m in _socketmethods:
error: [Errno 98] Address already in use
```
It looks like the problem is that the exceptions raised by `bind` in Python 2 and Python 3 are different.
cc @alanamarzoev
| 2017-08-12T19:37:07 |
||
ray-project/ray | 836 | ray-project__ray-836 | [
"835"
] | 508eec58f541974327296f7823809d60808f6677 | diff --git a/python/ray/experimental/state.py b/python/ray/experimental/state.py
--- a/python/ray/experimental/state.py
+++ b/python/ray/experimental/state.py
@@ -416,7 +416,6 @@ def task_profiles(self, start=None, end=None, num_tasks=None, fwd=True):
list of profiling information for tasks where the events have
no task ID.
"""
-
task_info = dict()
event_log_sets = self.redis_client.keys("event_log*")
@@ -550,7 +549,6 @@ def dump_catapult_trace(self,
obj_dep: Boolean indicating whether or not object dependency edges
should be included in the trace.
"""
-
workers = self.workers()
start_time = None
for info in task_info.values():
@@ -570,10 +568,20 @@ def micros_rel(ts):
full_trace = []
for task_id, info in task_info.items():
- # total_info is what is displayed when selecting a task in the
- # timeline.
- total_info = dict()
- total_info["task_id"] = task_id
+ worker = workers[info["worker_id"]]
+ task_t_info = task_table[task_id]
+
+ # The total_info dictionary is what is displayed when selecting a
+ # task in the timeline. We copy the task spec so that we don't
+ # modify it in place since we will use the original values later.
+ total_info = copy.copy(task_table[task_id]["TaskSpec"])
+ total_info["Args"] = [
+ oid.hex() if isinstance(oid, ray.local_scheduler.ObjectID)
+ else oid for oid in task_t_info["TaskSpec"]["Args"]]
+ total_info["ReturnObjectIDs"] = [
+ oid.hex() for oid
+ in task_t_info["TaskSpec"]["ReturnObjectIDs"]]
+ total_info["LocalSchedulerID"] = task_t_info["LocalSchedulerID"]
total_info["get_arguments"] = (info["get_arguments_end"] -
info["get_arguments_start"])
total_info["execute"] = (info["execute_end"] -
@@ -582,23 +590,12 @@ def micros_rel(ts):
info["store_outputs_start"])
total_info["function_name"] = info["function_name"]
total_info["worker_id"] = info["worker_id"]
- worker = workers[info["worker_id"]]
- task_t_info = task_table[task_id]
- task_spec = task_table[task_id]["TaskSpec"]
- task_spec["Args"] = [oid.hex() if isinstance(oid,
- ray.local_scheduler.ObjectID) else oid
- for oid in task_t_info["TaskSpec"]["Args"]]
- task_spec["ReturnObjectIDs"] = [oid.hex() for oid in
- (task_t_info["TaskSpec"]
- ["ReturnObjectIDs"])]
- task_spec["LocalSchedulerID"] = task_t_info["LocalSchedulerID"]
- total_info = copy.copy(task_spec)
parent_info = task_info.get(
task_table[task_id]["TaskSpec"]["ParentTaskID"])
worker = workers[info["worker_id"]]
# The catapult trace format documentation can be found here:
- # https://docs.google.com/document/d/1CvAClvFfyA5R-PhYUmn5OOQtYMH4h6I0nSsKchNAySU/preview # NOQA
+ # https://docs.google.com/document/d/1CvAClvFfyA5R-PhYUmn5OOQtYMH4h6I0nSsKchNAySU/preview # noqa: E501
if breakdowns:
if "get_arguments_end" in info:
get_args_trace = {
@@ -735,55 +732,60 @@ def micros_rel(ts):
if obj_dep:
args = task_table[task_id]["TaskSpec"]["Args"]
for arg in args:
+ # Don't visualize arguments that are not object IDs.
if isinstance(arg, ray.local_scheduler.ObjectID):
- continue
- object_info = self._object_table(arg)
- if object_info["IsPut"]:
- continue
- if arg not in seen_obj:
- seen_obj[arg] = 0
- seen_obj[arg] += 1
- owner_task = self._object_table(arg)["TaskID"]
- owner_worker = (workers[task_profiles
- [owner_task]["worker_id"]])
- # Adding/subtracting 2 to the time associated with the
- # beginning/ending of the flow event is necessary to
- # make the flow events show up reliably. When these times
- # are exact, this is presumably an edge case, and catapult
- # doesn't recognize that there is a duration event at that
- # exact point in time that the flow event should be bound
- # to. This issue is solved by adding the 2 ms to the
- # start/end time of the flow event, which guarantees
- # overlap with the duration event that it's associated
- # with, and the flow event therefore always gets drawn.
- owner = {
- "cat": "obj_dependency",
- "pid": "Node " + owner_worker["node_ip_address"],
- "tid": task_profiles[owner_task]["worker_id"],
- "ts": micros_rel(task_profiles[owner_task]
- ["store_outputs_end"]) - 2,
- "ph": "s",
- "name": "ObjectDependency",
- "args": {},
- "bp": "e",
- "cname": "cq_build_attempt_failed",
- "id": "obj" + str(arg) + str(seen_obj[arg])
- }
- full_trace.append(owner)
-
- dependent = {
- "cat": "obj_dependency",
- "pid": "Node " + worker["node_ip_address"],
- "tid": info["worker_id"],
- "ts": micros_rel(info["get_arguments_start"]) + 2,
- "ph": "f",
- "name": "ObjectDependency",
- "args": {},
- "cname": "cq_build_attempt_failed",
- "bp": "e",
- "id": "obj" + str(arg) + str(seen_obj[arg])
- }
- full_trace.append(dependent)
+ object_info = self._object_table(arg)
+ # Don't visualize objects that were created by calls to
+ # put.
+ if not object_info["IsPut"]:
+ if arg not in seen_obj:
+ seen_obj[arg] = 0
+ seen_obj[arg] += 1
+ owner_task = self._object_table(arg)["TaskID"]
+ owner_worker = (workers[
+ task_profiles[owner_task]["worker_id"]])
+ # Adding/subtracting 2 to the time associated with
+ # the beginning/ending of the flow event is
+ # necessary to make the flow events show up
+ # reliably. When these times are exact, this is
+ # presumably an edge case, and catapult doesn't
+ # recognize that there is a duration event at that
+ # exact point in time that the flow event should be
+ # bound to. This issue is solved by adding the 2 ms
+ # to the start/end time of the flow event, which
+ # guarantees overlap with the duration event that
+ # it's associated with, and the flow event
+ # therefore always gets drawn.
+ owner = {
+ "cat": "obj_dependency",
+ "pid": ("Node " +
+ owner_worker["node_ip_address"]),
+ "tid": task_profiles[owner_task]["worker_id"],
+ "ts": micros_rel(task_profiles[
+ owner_task]["store_outputs_end"]) - 2,
+ "ph": "s",
+ "name": "ObjectDependency",
+ "args": {},
+ "bp": "e",
+ "cname": "cq_build_attempt_failed",
+ "id": "obj" + str(arg) + str(seen_obj[arg])
+ }
+ full_trace.append(owner)
+
+ dependent = {
+ "cat": "obj_dependency",
+ "pid": "Node " + worker["node_ip_address"],
+ "tid": info["worker_id"],
+ "ts": micros_rel(
+ info["get_arguments_start"]) + 2,
+ "ph": "f",
+ "name": "ObjectDependency",
+ "args": {},
+ "cname": "cq_build_attempt_failed",
+ "bp": "e",
+ "id": "obj" + str(arg) + str(seen_obj[arg])
+ }
+ full_trace.append(dependent)
print("Creating JSON {}/{}".format(len(full_trace), len(task_info)))
with open(path, "w") as outfile:
| Task timeline fails when arguments to remote functions are not object IDs.
The error can be reproduced as follows.
```python
import ray
ray.init()
@ray.remote
def f(x):
pass
f.remote(3)
```
Then open the UI, run all the cells, and click "View task timeline". This produces the following error.
```
1 tasks to trace
Dumping task profiling data to /var/folders/15/54jf68993rd7753c5fms424r0000gn/T/tmpnsl3ltkw.json
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
~/Workspace/ray/python/ray/experimental/ui.py in handle_submit(sender)
402 breakdowns=breakdown,
403 obj_dep=obj_dep.value,
--> 404 task_dep=task_dep.value)
405
406 print("Opening html file in browser...")
~/Workspace/ray/python/ray/experimental/state.py in dump_catapult_trace(self, path, task_info, breakdowns, task_dep, obj_dep)
738 if isinstance(arg, ray.local_scheduler.ObjectID):
739 continue
--> 740 object_info = self._object_table(arg)
741 if object_info["IsPut"]:
742 continue
~/Workspace/ray/python/ray/experimental/state.py in _object_table(self, object_id)
176 # Allow the argument to be either an ObjectID or a hex string.
177 if not isinstance(object_id, ray.local_scheduler.ObjectID):
--> 178 object_id = ray.local_scheduler.ObjectID(hex_to_binary(object_id))
179
180 # Return information about a single object ID.
~/Workspace/ray/python/ray/utils.py in hex_to_binary(hex_identifier)
59
60 def hex_to_binary(hex_identifier):
---> 61 return binascii.unhexlify(hex_identifier)
62
63
TypeError: argument should be bytes, buffer or ASCII string, not 'int'
```
cc @alanamarzoev
| 2017-08-13T19:38:13 |
||
ray-project/ray | 839 | ray-project__ray-839 | [
"825"
] | ca53e9ae7b40ce8397b3e65617848c084159c416 | diff --git a/python/ray/actor.py b/python/ray/actor.py
--- a/python/ray/actor.py
+++ b/python/ray/actor.py
@@ -6,6 +6,7 @@
import hashlib
import inspect
import json
+import time
import traceback
import ray.local_scheduler
@@ -215,7 +216,8 @@ def reconstruct_actor_state(actor_id, worker):
print("Loading actor state from checkpoint {}"
.format(checkpoint_index))
# Wait for the actor to have been defined.
- worker._wait_for_actor()
+ while not hasattr(worker, "actor_class"):
+ time.sleep(0.001)
# TODO(rkn): Restoring from the checkpoint may fail, so this should be
# in a try-except block and we should give a good error message.
worker.actors[actor_id] = (
| diff --git a/python/ray/test/test_utils.py b/python/ray/test/test_utils.py
--- a/python/ray/test/test_utils.py
+++ b/python/ray/test/test_utils.py
@@ -4,7 +4,6 @@
import json
import os
-import psutil
import redis
import time
@@ -115,18 +114,12 @@ def _pid_alive(pid):
pid: The pid to check.
Returns:
- This returns false if the process is dead or defunct. Otherwise, it
- returns true.
+ This returns false if the process is dead. Otherwise, it returns true.
"""
try:
os.kill(pid, 0)
except OSError:
return False
- else:
- if psutil.Process(pid).status() == psutil.STATUS_ZOMBIE:
- return False
- else:
- return True
def wait_for_pid_to_exit(pid, timeout=20):
| Test failure in actor checkpointing ActorReconstruction.testCheckpointing in actor_test.py.
I just saw this error in a recently introduced test (and the test hung).
```
testCheckpointing (__main__.ActorReconstruction) ... Waiting for redis server at 127.0.0.1:14868 to respond...
Waiting for redis server at 127.0.0.1:48841 to respond...
Starting local scheduler with 2 CPUs, 0 GPUs
Starting local scheduler with 2 CPUs, 0 GPUs
Failed to start the UI, you may need to run 'pip install jupyter'.
Traceback (most recent call last):
File "/home/travis/.local/lib/python2.7/site-packages/ray-0.1.2-py2.7-linux-x86_64.egg/ray/workers/default_worker.py", line 87, in <module>
ray.worker.global_worker)
File "/home/travis/.local/lib/python2.7/site-packages/ray-0.1.2-py2.7-linux-x86_64.egg/ray/actor.py", line 222, in reconstruct_actor_state
worker.actor_class.__ray_restore_from_checkpoint__(checkpoint))
AttributeError: 'Worker' object has no attribute 'actor_class'
You can inspect errors by running
ray.error_info()
If this driver is hanging, start a new one with
ray.init(redis_address="127.0.0.1:14868")
```
https://s3.amazonaws.com/archive.travis-ci.org/jobs/262150716/log.txt?X-Amz-Expires=30&X-Amz-Date=20170808T170129Z&X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAJRYRXRSVGNKPKO5A/20170808/us-east-1/s3/aws4_request&X-Amz-SignedHeaders=host&X-Amz-Signature=71e3540e370a11f66c599879fdee3dda5d9333e9984d3205fface3f3f2fe68e8
| 2017-08-15T21:38:06 |
|
ray-project/ray | 840 | ray-project__ray-840 | [
"806"
] | af71f9616e2f7eb95592e096de53a35d5b066c7f | diff --git a/python/setup.py b/python/setup.py
--- a/python/setup.py
+++ b/python/setup.py
@@ -81,12 +81,15 @@ def has_ext_modules(self):
# The BinaryDistribution argument triggers build_ext.
distclass=BinaryDistribution,
install_requires=["numpy",
+ "cython",
"funcsigs",
"click",
"colorama",
"psutil",
"redis",
"cloudpickle >= 0.2.2",
+ # The six module is required by pyarrow.
+ "six >= 1.0.0",
"flatbuffers"],
entry_points={"console_scripts": ["ray=ray.scripts.scripts:main"]},
include_package_data=True,
| Not possible to install Ray from git
I want to install Ray for Python 3.
```
$ apt-get install -y cmake pkg-config python3-dev build-essential autoconf curl libtool libboost-all-dev unzip
$ pip3 install git+https://github.com/ray-project/ray.git@37282330c0ea687fd1b983176dce85731fcf189d#subdirectory=python
```
But this tries to install it for python2.7, and not python3, failing with:
```
CMake Error at cmake_modules/FindNumPy.cmake:62 (message):
NumPy import failure:
Traceback (most recent call last):
File "<string>", line 1, in <module>
ImportError: No module named numpy
```
Because numpy is installed only for Python 3.
| If you first `pip3 install numpy` then does it work?
I think there may be a couple problems here.
1. This line https://github.com/ray-project/ray/blob/master/python/setup.py#L28 in our `setup.py` needs numpy to compile a bunch of stuff, and that all happens before the setup.py has a chance to pull in the numpy dependency.
2. Even after addressing point 1 and moving the call to `build.sh` into the `build_ext` class, I'm not sure that the numpy dependency gets pulled before the `build_ext` runs, but maybe it does.
`numpy` is installed for Python 3, but not for Python 2. The issue seems to be that it is trying to install it for Python 2.7 despite being installed by `pip3`.
I see, good catch.
The problem is that when we compile Arrow, it is finding the wrong version of Python. Specifically, we need to make sure this call to `cmake` finds the right Python. https://github.com/ray-project/ray/blob/64eaaaebf02ea3e4eeab5d51cbb51d7191c191d2/src/thirdparty/build_thirdparty.sh#L34
Passing in `-DPYTHON_EXECUTABLE:FILEPATH=$PYTHON_EXECUTABLE` to the `cmake` call helps it find the right Python interpreter, but it still finds the wrong Python libraries. We can probably also pass in `-DPYTHON_LIBRARIES=`, though that's usually less easy to figure out I think.
This should be fixed on Linux by #820. However, I think it still does not work on Mac.. that may require patching the arrow cmake module for finding the Python libraries.
It does not seem to work. I still get inside Docker container:
```
pip3 install git+https://github.com/ray-project/ray.git@master#subdirectory=python
```
```
Thread model: posix
gcc version 5.4.0 20160609 (Ubuntu 5.4.0-6ubuntu1~16.04.4)
INFOCompiler id: GNU
Selected compiler gcc 5.4.0
CMake Deprecation Warning at /usr/share/cmake-3.5/Modules/GenerateExportHeader.cmake:383 (message):
The add_compiler_export_flags function is obsolete. Use the
CXX_VISIBILITY_PRESET and VISIBILITY_INLINES_HIDDEN target properties
instead.
Call Stack (most recent call first):
CMakeLists.txt:307 (add_compiler_export_flags)
-- Performing Test COMPILER_HAS_HIDDEN_VISIBILITY
-- Performing Test COMPILER_HAS_HIDDEN_VISIBILITY - Success
-- Performing Test COMPILER_HAS_HIDDEN_INLINE_VISIBILITY
-- Performing Test COMPILER_HAS_HIDDEN_INLINE_VISIBILITY - Success
-- Performing Test COMPILER_HAS_DEPRECATED_ATTR
-- Performing Test COMPILER_HAS_DEPRECATED_ATTR - Success
-- Found cpplint executable at /tmp/pip-fu5ujtgs-build/src/thirdparty/arrow/cpp/build-support/cpplint.py
-- Found PythonInterp: /usr/bin/python (found version "2.7.12")
-- Searching for Python libs in /usr/lib64;/usr/lib;/usr/lib/python2.7/config-x86_64-linux-gnu
-- Looking for python2.7
-- Found Python lib /usr/lib/python2.7/config-x86_64-linux-gnu/libpython2.7.so
-- Found PythonLibs: /usr/lib/python2.7/config-x86_64-linux-gnu/libpython2.7.so
-- Looking for pthread.h
-- Looking for pthread.h - found
-- Looking for pthread_create
-- Looking for pthread_create - not found
-- Looking for pthread_create in pthreads
-- Looking for pthread_create in pthreads - not found
-- Looking for pthread_create in pthread
-- Looking for pthread_create in pthread - found
-- Found Threads: TRUE
-- Searching for Python libs in /usr/lib64;/usr/lib;/usr/lib/python2.7/config-x86_64-linux-gnu
-- Looking for python2.7
-- Found Python lib /usr/lib/python2.7/config-x86_64-linux-gnu/libpython2.7.so
CMake Error at cmake_modules/FindNumPy.cmake:62 (message):
NumPy import failure:
Traceback (most recent call last):
File "<string>", line 1, in <module>
ImportError: No module named numpy
Call Stack (most recent call first):
CMakeLists.txt:780 (find_package)
-- Configuring incomplete, errors occurred!
See also "/tmp/pip-fu5ujtgs-build/src/thirdparty/arrow/cpp/build/CMakeFiles/CMakeOutput.log".
See also "/tmp/pip-fu5ujtgs-build/src/thirdparty/arrow/cpp/build/CMakeFiles/CMakeError.log".
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "/tmp/pip-fu5ujtgs-build/python/setup.py", line 28, in <module>
subprocess.check_call(["../build.sh", sys.executable])
File "/usr/lib/python3.5/subprocess.py", line 581, in check_call
raise CalledProcessError(retcode, cmd)
subprocess.CalledProcessError: Command '['../build.sh', '/usr/bin/python3']' returned non-zero exit status 1
```
I think the issue is that because of `libboost-all-dev`, python2.7 also gets installed. And then it finds that one. And not the one from which is being called.
If I forcefully remove python2.7 files, then it succeeds installing it.
I think just setting the path is not enough, because it might find python2.7 in the same path and continue using the wrong version.
Is `libboost-all-dev` really necessary dependency? I do not like it installing python2.7. :-(
Oh right.. if you have multiple python binaries in the same directory (both in `/usr/bin/` in this case), then you can't guarantee that it'll find the right one.
Ok, in that case the previous approach of `-DPYTHON_EXECUTABLE:FILEPATH=$PYTHON_EXECUTABLE` is preferable. Let me see if that works on Linux.
As for `libboost-all-dev`, I think we need it for compiling Arrow.
> As for libboost-all-dev, I think we need it for compiling Arrow.
But, which do you really need `all`? Or just some boost libraries.
I also asked here: https://askubuntu.com/questions/944035/installing-libboost-python-dev-for-python3-without-installing-python2-7
libboost-dev libboost-filesystem-dev libboost-system-dev should be sufficient
Perfect! Thanks. Then I can confirm that:
```
$ apt-get install -y cmake pkg-config python3-dev build-essential autoconf curl libtool libboost-dev libboost-filesystem-dev libboost-system-dev unzip
$ pip3 install git+https://github.com/ray-project/ray.git@master#subdirectory=python
```
Works.
Great! Are you interested in creating a PR that changes it? It could be changed in the locations shown below and would probably reduce travis times a bunch and make everybody's life a little happier :)

Sure.
Done. See #823.
BTW, you use both Travis and Jenkins? Why?
Awesome, thanks! I'll merge as soon as the tests pass.
We use Travis because it is simple and supports Mac and Jenkins because for some tests we need more resources than are available in Travis (like more memory). Also our multinode docker tests are done in Jenkins. | 2017-08-16T00:13:58 |
|
ray-project/ray | 873 | ray-project__ray-873 | [
"837"
] | 617bc4d2394aec607755a1a33f16271c38298f3f | diff --git a/python/ray/experimental/state.py b/python/ray/experimental/state.py
--- a/python/ray/experimental/state.py
+++ b/python/ray/experimental/state.py
@@ -844,10 +844,14 @@ def workers(self):
"plasma_manager_socket": (worker_info[b"plasma_manager_socket"]
.decode("ascii")),
"plasma_store_socket": (worker_info[b"plasma_store_socket"]
- .decode("ascii")),
- "stderr_file": worker_info[b"stderr_file"].decode("ascii"),
- "stdout_file": worker_info[b"stdout_file"].decode("ascii")
+ .decode("ascii"))
}
+ if b"stderr_file" in worker_info:
+ workers_data[worker_id]["stderr_file"] = (
+ worker_info[b"stderr_file"].decode("ascii"))
+ if b"stdout_file" in worker_info:
+ workers_data[worker_id]["stdout_file"] = (
+ worker_info[b"stdout_file"].decode("ascii"))
return workers_data
def actors(self):
diff --git a/python/ray/services.py b/python/ray/services.py
--- a/python/ray/services.py
+++ b/python/ray/services.py
@@ -248,6 +248,7 @@ def start_redis(node_ip_address,
port=None,
num_redis_shards=1,
redirect_output=False,
+ redirect_worker_output=False,
cleanup=True):
"""Start the Redis global state store.
@@ -259,6 +260,11 @@ def start_redis(node_ip_address,
num_redis_shards (int): If provided, the number of Redis shards to
start, in addition to the primary one. The default value is one
shard.
+ redirect_output (bool): True if output should be redirected to a file
+ and false otherwise.
+ redirect_worker_output (bool): True if worker output should be
+ redirected to a file and false otherwise. Workers will have access
+ to this value when they start up.
cleanup (bool): True if using Ray in local mode. If cleanup is true,
then all Redis processes started by this method will be killed by
services.cleanup() when the Python process that imported services
@@ -284,6 +290,10 @@ def start_redis(node_ip_address,
redis_client = redis.StrictRedis(host=node_ip_address, port=port)
redis_client.set("NumRedisShards", str(num_redis_shards))
+ # Put the redirect_worker_output bool in the Redis shard so that workers
+ # can access it and know whether or not to redirect their output.
+ redis_client.set("RedirectOutput", 1 if redirect_worker_output else 0)
+
# Start other Redis shards listening on random ports. Each Redis shard logs
# to a separate file, prefixed by "redis-<shard number>".
redis_shards = []
@@ -847,7 +857,8 @@ def start_ray_processes(address_info=None,
redis_address, redis_shards = start_redis(
node_ip_address, port=redis_port,
num_redis_shards=num_redis_shards,
- redirect_output=redirect_output, cleanup=cleanup)
+ redirect_output=redirect_output,
+ redirect_worker_output=redirect_output, cleanup=cleanup)
address_info["redis_address"] = redis_address
time.sleep(0.1)
diff --git a/python/ray/worker.py b/python/ray/worker.py
--- a/python/ray/worker.py
+++ b/python/ray/worker.py
@@ -1625,15 +1625,6 @@ def connect(info, object_id_seed=None, mode=WORKER_MODE, worker=global_worker,
worker.actor_id = actor_id
worker.connected = True
worker.set_mode(mode)
- # Redirect worker output and error to their own files.
- if mode == WORKER_MODE:
- log_stdout_file, log_stderr_file = services.new_log_files("worker",
- True)
- sys.stdout = log_stdout_file
- sys.stderr = log_stderr_file
- services.record_log_files_in_redis(info["redis_address"],
- info["node_ip_address"],
- [log_stdout_file, log_stderr_file])
# The worker.events field is used to aggregate logging information and
# display it in the web UI. Note that Python lists protected by the GIL,
# which is important because we will append to this field from multiple
@@ -1652,6 +1643,26 @@ def connect(info, object_id_seed=None, mode=WORKER_MODE, worker=global_worker,
port=int(redis_port))
worker.lock = threading.Lock()
+ # Check the RedirectOutput key in Redis and based on its value redirect
+ # worker output and error to their own files.
+ if mode == WORKER_MODE:
+ # This key is set in services.py when Redis is started.
+ redirect_worker_output_val = worker.redis_client.get("RedirectOutput")
+ if (redirect_worker_output_val is not None and
+ int(redirect_worker_output_val) == 1):
+ redirect_worker_output = 1
+ else:
+ redirect_worker_output = 0
+ if redirect_worker_output:
+ log_stdout_file, log_stderr_file = services.new_log_files("worker",
+ True)
+ sys.stdout = log_stdout_file
+ sys.stderr = log_stderr_file
+ services.record_log_files_in_redis(info["redis_address"],
+ info["node_ip_address"],
+ [log_stdout_file,
+ log_stderr_file])
+
# Create an object for interfacing with the global state.
global_state._initialize_global_state(redis_ip_address, int(redis_port))
@@ -1673,14 +1684,15 @@ def connect(info, object_id_seed=None, mode=WORKER_MODE, worker=global_worker,
is_worker = False
elif mode == WORKER_MODE:
# Register the worker with Redis.
- worker.redis_client.hmset(
- b"Workers:" + worker.worker_id,
- {"node_ip_address": worker.node_ip_address,
- "stdout_file": os.path.abspath(log_stdout_file.name),
- "stderr_file": os.path.abspath(log_stderr_file.name),
- "plasma_store_socket": info["store_socket_name"],
- "plasma_manager_socket": info["manager_socket_name"],
- "local_scheduler_socket": info["local_scheduler_socket_name"]})
+ worker_dict = {
+ "node_ip_address": worker.node_ip_address,
+ "plasma_store_socket": info["store_socket_name"],
+ "plasma_manager_socket": info["manager_socket_name"],
+ "local_scheduler_socket": info["local_scheduler_socket_name"]}
+ if redirect_worker_output:
+ worker_dict["stdout_file"] = os.path.abspath(log_stdout_file.name)
+ worker_dict["stderr_file"] = os.path.abspath(log_stderr_file.name)
+ worker.redis_client.hmset(b"Workers:" + worker.worker_id, worker_dict)
is_worker = True
else:
raise Exception("This code should be unreachable.")
| Worker output not redirected to stdout if redirect_output=False
This illustrates the problem:
```python
import ray
ray.init()
@ray.remote
def f():
print("Hi")
return None
ray.get(f.remote())
```
It was introduced here: https://github.com/ray-project/ray/pull/646
| 2017-08-25T18:25:06 |
||
ray-project/ray | 956 | ray-project__ray-956 | [
"906"
] | 8906a920f774865c897e32384c5ca984f44c8535 | diff --git a/python/ray/experimental/ui.py b/python/ray/experimental/ui.py
--- a/python/ray/experimental/ui.py
+++ b/python/ray/experimental/ui.py
@@ -4,7 +4,6 @@
import pprint
import ray
import shutil
-import subprocess
import tempfile
import time
@@ -289,65 +288,6 @@ def _get_temp_file_path(**kwargs):
return os.path.relpath(temp_file_path)
-# Helper function that ensures that catapult is cloned to the correct location
-# and that the HTML files required for task trace embedding are in the same
-# directory as the web UI.
-def _setup_trace_dependencies():
- catapult_home = "/tmp/ray/catapult"
- catapult_commit = "33a9271eb3cf5caf925293ec6a4b47c94f1ac968"
- try:
- # Check if we're inside a git repo
- cmd = ["git",
- "-C",
- catapult_home,
- "rev-parse",
- "--is-inside-work-tree"]
- subprocess.check_call(cmd)
-
- except subprocess.CalledProcessError:
- # Error on non-zero exit code (e.g. - ".git not found")
- if not os.path.exists(os.path.join(catapult_home)):
- print(
- "Cloning catapult to {} (this may take a while...)".format(
- catapult_home))
- cmd = ["git",
- "clone",
- "https://github.com/catapult-project/catapult.git",
- catapult_home]
- subprocess.check_call(cmd)
-
- # Checks out the commit associated with allowing different arrow
- # colors. This can and should be removed after catapult's next
- # release.
- print("Checking out commit {}.".format(catapult_commit))
- cmd = ["git", "-C", catapult_home, "checkout", catapult_commit]
- subprocess.check_call(cmd)
-
- # Path to the embedded trace viewer HTML file.
- embedded_trace_path = os.path.join(catapult_home,
- "tracing",
- "bin",
- "index.html")
- # Checks that the trace viewer renderer file exists, generates it if it
- # doesn't.
- if not os.path.exists("trace_viewer_full.html"):
- vulcanize_bin = os.path.join(catapult_home,
- "tracing",
- "bin",
- "vulcanize_trace_viewer")
- # TODO(rkn): The vulcanize_trace_viewer script currently requires
- # Python 2. Remove this dependency.
- cmd = ["python2",
- vulcanize_bin,
- "--config",
- "chrome",
- "--output",
- "trace_viewer_full.html"]
- subprocess.check_call(cmd)
-
- return catapult_home, embedded_trace_path
-
-
def task_timeline():
path_input = widgets.Button(description="View task timeline")
@@ -381,6 +321,14 @@ def task_timeline():
display(widgets.HBox([label_options, breakdown_opt]))
display(path_input)
+ # Check that the trace viewer renderer file is present, and copy it to the
+ # current working directory if it is not present.
+ if not os.path.exists("trace_viewer_full.html"):
+ shutil.copy(
+ os.path.join(os.path.dirname(os.path.abspath(__file__)),
+ "../core/src/catapult_files/trace_viewer_full.html"),
+ "trace_viewer_full.html")
+
def handle_submit(sender):
json_tmp = tempfile.mktemp() + ".json"
@@ -425,9 +373,9 @@ def handle_submit(sender):
print("Opening html file in browser...")
- # Check that the catapult repo is cloned to the correct location
- print(_setup_trace_dependencies())
- catapult_home, trace_viewer_path = _setup_trace_dependencies()
+ trace_viewer_path = os.path.join(
+ os.path.dirname(os.path.abspath(__file__)),
+ "../core/src/catapult_files/index.html")
html_file_path = _get_temp_file_path(suffix=".html")
json_file_path = _get_temp_file_path(suffix=".json")
diff --git a/python/setup.py b/python/setup.py
--- a/python/setup.py
+++ b/python/setup.py
@@ -23,6 +23,8 @@
"ray/core/src/local_scheduler/local_scheduler",
"ray/core/src/local_scheduler/liblocal_scheduler_library.so",
"ray/core/src/global_scheduler/global_scheduler",
+ "ray/core/src/catapult_files/index.html",
+ "ray/core/src/catapult_files/trace_viewer_full.html",
"ray/WebUI.ipynb"
]
| Cloning catapult takes way too long. Speed this up.
Currently when using the web UI, the first time you click on "View Task Timeline", the catapult repository https://github.com/catapult-project/catapult will be cloned. This takes on the order of **minutes**. This needs to be faster.
Some thoughts and questions:
1. A quick looks suggests that the catapult repo is about 270MB, and 265MB of that is in the .git file, so if we could just clone a snapshot of the repo at the right commit without getting the git history that would help a lot (the remaining 5MB seems to be scattered around large html files).
2. Naively packaging catapult in our wheels would make our wheels enormous and slow to download, however, it should be ok if we first remove the git history.
3. Question, how much of catapult do we actually need?
So, if possible, a great short-term fix would be to figure out how to get only a snapshot of the repo without the git history.
Later, we should just the repo with our wheels after we've removed the git history and ideally other unnecessary files as well. For people building Ray from source, we can clone catapult in `build.sh` and cache it.
@alanamarzoev @Wapaul1 any thoughts about this?
| @robertnishihara afaik, you should only need catapult tracing.
I think that just taking a snapshot of the repo should work.
Right. I googled a little bit and didn't find a way to do that, but I'm sure there is one..
`git clone --depth=1` on catapult results in a reduction of 160 MB in the `.git` directory to a total download size of 107 MB. While this is better, we may want to consider forking catapult and keeping all the important files.
It would also be nice to print "this might take a while" in the UI so that users don't get confused.
We should look into speeding it up by extracting/minifying the relevant parts and deploying with the python wheel, bower/vulcanize should have an option for that.
I think that either cloning catapult or extracting the relevant files from catapult and including them with the python wheel/in the build.sh script would be better than what we're currently doing. Even if it ends up not being significantly faster, it at least wouldn't interfere with the perceived interactivity of the UI. | 2017-09-09T17:09:38 |
|
ray-project/ray | 960 | ray-project__ray-960 | [
"916"
] | 413140df3846d28c2d56bb1a862a277de9062f2c | diff --git a/examples/resnet/cifar_input.py b/examples/resnet/cifar_input.py
--- a/examples/resnet/cifar_input.py
+++ b/examples/resnet/cifar_input.py
@@ -31,24 +31,27 @@ def build_data(data_path, size, dataset):
image_bytes = image_size * image_size * depth
record_bytes = label_bytes + label_offset + image_bytes
- data_files = tf.gfile.Glob(data_path)
- file_queue = tf.train.string_input_producer(data_files, shuffle=True)
+ def load_transform(value):
+ # Convert these examples to dense labels and processed images.
+ record = tf.reshape(tf.decode_raw(value, tf.uint8), [record_bytes])
+ label = tf.cast(tf.slice(record, [label_offset], [label_bytes]),
+ tf.int32)
+ # Convert from string to [depth * height * width] to
+ # [depth, height, width].
+ depth_major = tf.reshape(
+ tf.slice(record, [label_bytes], [image_bytes]),
+ [depth, image_size, image_size])
+ # Convert from [depth, height, width] to [height, width, depth].
+ image = tf.cast(tf.transpose(depth_major, [1, 2, 0]), tf.float32)
+ return (image, label)
# Read examples from files in the filename queue.
- reader = tf.FixedLengthRecordReader(record_bytes=record_bytes)
- _, value = reader.read(file_queue)
-
- # Convert these examples to dense labels and processed images.
- record = tf.reshape(tf.decode_raw(value, tf.uint8), [record_bytes])
- label = tf.cast(tf.slice(record, [label_offset], [label_bytes]), tf.int32)
- # Convert from string to [depth * height * width] to
- # [depth, height, width].
- depth_major = tf.reshape(tf.slice(record, [label_bytes], [image_bytes]),
- [depth, image_size, image_size])
- # Convert from [depth, height, width] to [height, width, depth].
- image = tf.cast(tf.transpose(depth_major, [1, 2, 0]), tf.float32)
- queue = tf.train.shuffle_batch([image, label], size, size, 0,
- num_threads=16)
- return queue
+ data_files = tf.gfile.Glob(data_path)
+ data = tf.contrib.data.FixedLengthRecordDataset(data_files,
+ record_bytes=record_bytes)
+ data = data.map(load_transform)
+ data = data.batch(size)
+ iterator = data.make_one_shot_iterator()
+ return iterator.get_next()
def build_input(data, batch_size, dataset, train):
@@ -67,42 +70,35 @@ def build_input(data, batch_size, dataset, train):
Raises:
ValueError: When the specified dataset is not supported.
"""
- images_constant = tf.constant(data[0])
- labels_constant = tf.constant(data[1])
image_size = 32
depth = 3
num_classes = 10 if dataset == "cifar10" else 100
- image, label = tf.train.slice_input_producer([images_constant,
- labels_constant],
- capacity=16 * batch_size)
- if train:
+ images, labels = data
+ num_samples = images.shape[0] - images.shape[0] % batch_size
+ dataset = tf.contrib.data.Dataset.from_tensor_slices(
+ (images[:num_samples], labels[:num_samples]))
+
+ def map_train(image, label):
image = tf.image.resize_image_with_crop_or_pad(image, image_size + 4,
image_size + 4)
image = tf.random_crop(image, [image_size, image_size, 3])
image = tf.image.random_flip_left_right(image)
image = tf.image.per_image_standardization(image)
- example_queue = tf.RandomShuffleQueue(
- capacity=16 * batch_size,
- min_after_dequeue=8 * batch_size,
- dtypes=[tf.float32, tf.int32],
- shapes=[[image_size, image_size, depth], [1]])
- num_threads = 16
- else:
+ return (image, label)
+
+ def map_test(image, label):
image = tf.image.resize_image_with_crop_or_pad(image, image_size,
image_size)
image = tf.image.per_image_standardization(image)
- example_queue = tf.FIFOQueue(
- 3 * batch_size,
- dtypes=[tf.float32, tf.int32],
- shapes=[[image_size, image_size, depth], [1]])
- num_threads = 1
-
- example_enqueue_op = example_queue.enqueue([image, label])
- tf.train.add_queue_runner(tf.train.queue_runner.QueueRunner(
- example_queue, [example_enqueue_op] * num_threads))
-
- # Read "batch" labels + images from the example queue.
- images, labels = example_queue.dequeue_many(batch_size)
+ return (image, label)
+
+ dataset = dataset.map(map_train if train else map_test)
+ dataset = dataset.batch(batch_size)
+ dataset = dataset.repeat()
+ if train:
+ dataset = dataset.shuffle(buffer_size=16 * batch_size)
+ images, labels = dataset.make_one_shot_iterator().get_next()
+ images = tf.reshape(images, [batch_size, image_size, image_size, depth])
labels = tf.reshape(labels, [batch_size, 1])
indices = tf.reshape(tf.range(0, batch_size, 1), [batch_size, 1])
labels = tf.sparse_to_dense(
diff --git a/examples/resnet/resnet_main.py b/examples/resnet/resnet_main.py
--- a/examples/resnet/resnet_main.py
+++ b/examples/resnet/resnet_main.py
@@ -15,9 +15,11 @@
import cifar_input
import resnet_model
-# Tensorflow must be at least version 1.0.0 for the example to work.
-if int(tf.__version__.split(".")[0]) < 1:
- raise Exception("Your Tensorflow version is less than 1.0.0. Please "
+# Tensorflow must be at least version 1.2.0 for the example to work.
+tf_major = int(tf.__version__.split(".")[0])
+tf_minor = int(tf.__version__.split(".")[1])
+if (tf_major < 1) or (tf_major == 1 and tf_minor < 2):
+ raise Exception("Your Tensorflow version is less than 1.2.0. Please "
"update Tensorflow to the latest version.")
parser = argparse.ArgumentParser(description="Run the ResNet example.")
@@ -50,12 +52,9 @@ def get_data(path, size, dataset):
# This only uses the cpu.
os.environ["CUDA_VISIBLE_DEVICES"] = ""
with tf.device("/cpu:0"):
- queue = cifar_input.build_data(path, size, dataset)
+ dataset = cifar_input.build_data(path, size, dataset)
sess = tf.Session()
- coord = tf.train.Coordinator()
- tf.train.start_queue_runners(sess, coord=coord)
- images, labels = sess.run(queue)
- coord.request_stop()
+ images, labels = sess.run(dataset)
sess.close()
return images, labels
@@ -86,12 +85,9 @@ def __init__(self, data, dataset, num_gpus):
# Only a single actor in this case.
tf.set_random_seed(1)
- input_images = data[0]
- input_labels = data[1]
with tf.device("/gpu:0" if num_gpus > 0 else "/cpu:0"):
# Build the model.
- images, labels = cifar_input.build_input([input_images,
- input_labels],
+ images, labels = cifar_input.build_input(data,
hps.batch_size, dataset,
False)
self.model = resnet_model.ResNet(hps, images, labels, "train")
@@ -100,8 +96,6 @@ def __init__(self, data, dataset, num_gpus):
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
self.model.variables.set_session(sess)
- self.coord = tf.train.Coordinator()
- tf.train.start_queue_runners(sess, coord=self.coord)
init = tf.global_variables_initializer()
sess.run(init)
self.steps = 10
@@ -123,6 +117,7 @@ def get_weights(self):
@ray.remote
class ResNetTestActor(object):
def __init__(self, data, dataset, eval_batch_count, eval_dir):
+ os.environ["CUDA_VISIBLE_DEVICES"] = ""
hps = resnet_model.HParams(
batch_size=100,
num_classes=100 if dataset == "cifar100" else 10,
@@ -134,12 +129,9 @@ def __init__(self, data, dataset, eval_batch_count, eval_dir):
relu_leakiness=0.1,
optimizer="mom",
num_gpus=0)
- input_images = data[0]
- input_labels = data[1]
with tf.device("/cpu:0"):
# Builds the testing network.
- images, labels = cifar_input.build_input([input_images,
- input_labels],
+ images, labels = cifar_input.build_input(data,
hps.batch_size, dataset,
False)
self.model = resnet_model.ResNet(hps, images, labels, "eval")
@@ -148,8 +140,6 @@ def __init__(self, data, dataset, eval_batch_count, eval_dir):
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
self.model.variables.set_session(sess)
- self.coord = tf.train.Coordinator()
- tf.train.start_queue_runners(sess, coord=self.coord)
init = tf.global_variables_initializer()
sess.run(init)
| Resnet example with GPU crashes
Running the resnet example locally on a g2.2xlarge EC2 instance with the following options:
python ray/examples/resnet/resnet_main.py
--eval_dir=/tmp/resnet-model/eval
--train_data_path=cifar-10-batches-bin/data_batch*
--eval_data_path=cifar-10-batches-bin/test_batch.bin
--dataset=cifar10
--num_gpus=1
produces the following output:
I tensorflow/stream_executor/dso_loader.cc:135] successfully opened CUDA library libcublas.so.8.0 locally
I tensorflow/stream_executor/dso_loader.cc:135] successfully opened CUDA library libcudnn.so.5 locally
I tensorflow/stream_executor/dso_loader.cc:135] successfully opened CUDA library libcufft.so.8.0 locally
I tensorflow/stream_executor/dso_loader.cc:135] successfully opened CUDA library libcuda.so.1 locally
I tensorflow/stream_executor/dso_loader.cc:135] successfully opened CUDA library libcurand.so.8.0 locally
Waiting for redis server at 127.0.0.1:57431 to respond...
Waiting for redis server at 127.0.0.1:43581 to respond...
Starting local scheduler with 8 CPUs, 1 GPUs
View the web UI at http://localhost:8890/notebooks/ray_ui80356.ipynb
The log files for tensorboard are stored at ip 172.31.2.122.
Starting training loop. Use Ctrl-C to exit.
Traceback (most recent call last):
File "ray/examples/resnet/resnet_main.py", line 248, in <module>
train()
File "ray/examples/resnet/resnet_main.py", line 231, in train
for actor in train_actors])
File "/usr/local/lib/python2.7/dist-packages/ray/worker.py", line 1983, in get
raise RayGetError(object_ids[i], value)
ray.worker.RayGetError: Could not get objectid ObjectID(819d1c29782d0330f0bae9b966c4fa4f7909c849). It was created by remote function compute_steps which failed with:
Remote function compute_steps failed with:
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/ray/worker.py", line 732, in _process_task
self.actors[task.actor_id().id()], *arguments)
File "ray/examples/resnet/resnet_main.py", line 113, in compute_steps
self.model.variables.sess.run(self.model.train_op)
File "/home/ubuntu/.local/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 767, in run
run_metadata_ptr)
File "/home/ubuntu/.local/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 965, in _run
feed_dict_string, options, run_metadata)
File "/home/ubuntu/.local/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 1015, in _do_run
target_list, options, run_metadata)
File "/home/ubuntu/.local/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 1035, in _do_call
raise type(e)(node_def, op, message)
OutOfRangeError: FIFOQueue '_1_fifo_queue' is closed and has insufficient elements (requested 128, current size 0)
[[Node: fifo_queue_DequeueMany = QueueDequeueManyV2[component_types=[DT_FLOAT, DT_INT32], timeout_ms=-1, _device="/job:localhost/replica:0/task:0/cpu:0"](fifo_queue, fifo_queue_DequeueMany/n/_2345)]]
[[Node: fifo_queue_DequeueMany/_2349 = _HostRecv[client_terminated=false, recv_device="/job:localhost/replica:0/task:0/gpu:0", send_device="/job:localhost/replica:0/task:0/cpu:0", send_device_incarnation=1, tensor_name="edge_5921_fifo_queue_DequeueMany", tensor_type=DT_INT32, _device="/job:localhost/replica:0/task:0/gpu:0"]()]]
Caused by op u'fifo_queue_DequeueMany', defined at:
File "/usr/local/lib/python2.7/dist-packages/ray/workers/default_worker.py", line 105, in <module>
ray.worker.global_worker.main_loop()
File "/usr/local/lib/python2.7/dist-packages/ray/worker.py", line 880, in main_loop
self._wait_for_and_process_task(task)
File "/usr/local/lib/python2.7/dist-packages/ray/worker.py", line 833, in _wait_for_and_process_task
self._process_task(task)
File "/usr/local/lib/python2.7/dist-packages/ray/worker.py", line 732, in _process_task
self.actors[task.actor_id().id()], *arguments)
File "ray/examples/resnet/resnet_main.py", line 96, in __init__
False)
File "/home/ubuntu/ray/examples/resnet/cifar_input.py", line 105, in build_input
images, labels = example_queue.dequeue_many(batch_size)
File "/home/ubuntu/.local/lib/python2.7/site-packages/tensorflow/python/ops/data_flow_ops.py", line 458, in dequeue_many
self._queue_ref, n=n, component_types=self._dtypes, name=name)
File "/home/ubuntu/.local/lib/python2.7/site-packages/tensorflow/python/ops/gen_data_flow_ops.py", line 1310, in _queue_dequeue_many_v2
timeout_ms=timeout_ms, name=name)
File "/home/ubuntu/.local/lib/python2.7/site-packages/tensorflow/python/framework/op_def_library.py", line 763, in apply_op
op_def=op_def)
File "/home/ubuntu/.local/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 2327, in create_op
original_op=self._default_original_op, op_def=op_def)
File "/home/ubuntu/.local/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 1226, in __init__
self._traceback = _extract_stack()
OutOfRangeError (see above for traceback): FIFOQueue '_1_fifo_queue' is closed and has insufficient elements (requested 128, current size 0)
[[Node: fifo_queue_DequeueMany = QueueDequeueManyV2[component_types=[DT_FLOAT, DT_INT32], timeout_ms=-1, _device="/job:localhost/replica:0/task:0/cpu:0"](fifo_queue, fifo_queue_DequeueMany/n/_2345)]]
[[Node: fifo_queue_DequeueMany/_2349 = _HostRecv[client_terminated=false, recv_device="/job:localhost/replica:0/task:0/gpu:0", send_device="/job:localhost/replica:0/task:0/cpu:0", send_device_incarnation=1, tensor_name="edge_5921_fifo_queue_DequeueMany", tensor_type=DT_INT32, _device="/job:localhost/replica:0/task:0/gpu:0"]()]]
Remote function compute_steps failed with:
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/ray/worker.py", line 732, in _process_task
self.actors[task.actor_id().id()], *arguments)
File "ray/examples/resnet/resnet_main.py", line 113, in compute_steps
self.model.variables.sess.run(self.model.train_op)
File "/home/ubuntu/.local/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 767, in run
run_metadata_ptr)
File "/home/ubuntu/.local/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 965, in _run
feed_dict_string, options, run_metadata)
File "/home/ubuntu/.local/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 1015, in _do_run
target_list, options, run_metadata)
File "/home/ubuntu/.local/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 1035, in _do_call
raise type(e)(node_def, op, message)
OutOfRangeError: FIFOQueue '_1_fifo_queue' is closed and has insufficient elements (requested 128, current size 0)
[[Node: fifo_queue_DequeueMany = QueueDequeueManyV2[component_types=[DT_FLOAT, DT_INT32], timeout_ms=-1, _device="/job:localhost/replica:0/task:0/cpu:0"](fifo_queue, fifo_queue_DequeueMany/n/_2345)]]
[[Node: fifo_queue_DequeueMany/_2349 = _HostRecv[client_terminated=false, recv_device="/job:localhost/replica:0/task:0/gpu:0", send_device="/job:localhost/replica:0/task:0/cpu:0", send_device_incarnation=1, tensor_name="edge_5921_fifo_queue_DequeueMany", tensor_type=DT_INT32, _device="/job:localhost/replica:0/task:0/gpu:0"]()]]
Caused by op u'fifo_queue_DequeueMany', defined at:
File "/usr/local/lib/python2.7/dist-packages/ray/workers/default_worker.py", line 105, in <module>
ray.worker.global_worker.main_loop()
File "/usr/local/lib/python2.7/dist-packages/ray/worker.py", line 880, in main_loop
self._wait_for_and_process_task(task)
File "/usr/local/lib/python2.7/dist-packages/ray/worker.py", line 833, in _wait_for_and_process_task
self._process_task(task)
File "/usr/local/lib/python2.7/dist-packages/ray/worker.py", line 732, in _process_task
self.actors[task.actor_id().id()], *arguments)
File "ray/examples/resnet/resnet_main.py", line 96, in __init__
False)
File "/home/ubuntu/ray/examples/resnet/cifar_input.py", line 105, in build_input
images, labels = example_queue.dequeue_many(batch_size)
File "/home/ubuntu/.local/lib/python2.7/site-packages/tensorflow/python/ops/data_flow_ops.py", line 458, in dequeue_many
self._queue_ref, n=n, component_types=self._dtypes, name=name)
File "/home/ubuntu/.local/lib/python2.7/site-packages/tensorflow/python/ops/gen_data_flow_ops.py", line 1310, in _queue_dequeue_many_v2
timeout_ms=timeout_ms, name=name)
File "/home/ubuntu/.local/lib/python2.7/site-packages/tensorflow/python/framework/op_def_library.py", line 763, in apply_op
op_def=op_def)
File "/home/ubuntu/.local/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 2327, in create_op
original_op=self._default_original_op, op_def=op_def)
File "/home/ubuntu/.local/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 1226, in __init__
self._traceback = _extract_stack()
OutOfRangeError (see above for traceback): FIFOQueue '_1_fifo_queue' is closed and has insufficient elements (requested 128, current size 0)
[[Node: fifo_queue_DequeueMany = QueueDequeueManyV2[component_types=[DT_FLOAT, DT_INT32], timeout_ms=-1, _device="/job:localhost/replica:0/task:0/cpu:0"](fifo_queue, fifo_queue_DequeueMany/n/_2345)]]
[[Node: fifo_queue_DequeueMany/_2349 = _HostRecv[client_terminated=false, recv_device="/job:localhost/replica:0/task:0/gpu:0", send_device="/job:localhost/replica:0/task:0/cpu:0", send_device_incarnation=1, tensor_name="edge_5921_fifo_queue_DequeueMany", tensor_type=DT_INT32, _device="/job:localhost/replica:0/task:0/gpu:0"]()]]
You can inspect errors by running
ray.error_info()
If this driver is hanging, start a new one with
ray.init(redis_address="127.0.0.1:57431")
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/ray/workers/default_worker.py", line 105, in <module>
ray.worker.global_worker.main_loop()
File "/usr/local/lib/python2.7/dist-packages/ray/worker.py", line 880, in main_loop
self._wait_for_and_process_task(task)
File "/usr/local/lib/python2.7/dist-packages/ray/worker.py", line 836, in _wait_for_and_process_task
flush_log()
File "/usr/local/lib/python2.7/dist-packages/ray/worker.py", line 1949, in flush_log
worker.local_scheduler_client.log_event(event_log_key,
AttributeError: 'Worker' object has no attribute 'local_scheduler_client'
This error is unexpected and should not have happened. Somehow a worker
crashed in an unanticipated way causing the main_loop to throw an exception,
which is being caught in "python/ray/workers/default_worker.py".
You can inspect errors by running
ray.error_info()
If this driver is hanging, start a new one with
ray.init(redis_address="127.0.0.1:57431")
[1]+ Terminated python ray/examples/resnet/resnet_main.py --eval_dir=/tmp/resnet-model/eval --train_data_path=cifar-10-batches-bin/data_batch* --eval_data_path=cifar-10-batches-bin/test_batch.bin --dataset=cifar10 --num_gpus=0 &> output.txt
| Running this without gpu seems to work fine.
Interesting, it looks like this may be the same issue as #446. @Wapaul1 @pcmoritz do you have any ideas about this? It's easily reproducible.
Same problem with same error
@xc9 @maxim28, can you share some details so we can reproduce the problem? What versions os TensorFlow, CUDA, cuDNN are you using? Which OS and Python version? What type of GPU? Is this on EC2? If so, which instance type? Any other important details about your setup?
If it's on EC2, perhaps you could share an AMI where we can reproduce the problem.
We're thinking of fixing this by replacing the queues with the TensorFlow Dataset API https://www.tensorflow.org/versions/r1.3/programmers_guide/datasets. | 2017-09-10T01:37:37 |
|
ray-project/ray | 1,064 | ray-project__ray-1064 | [
"1060"
] | 0dcf36c91eb52b2a75d992af60c7d5ba13ba1eee | diff --git a/python/ray/actor.py b/python/ray/actor.py
--- a/python/ray/actor.py
+++ b/python/ray/actor.py
@@ -251,6 +251,8 @@ def __ray_restore_from_checkpoint__(cls, pickled_checkpoint):
# constructor.
exported = []
+ # Create objects to wrap method invocations. This is done so that we can
+ # invoke methods with actor.method.remote() instead of actor.method().
class ActorMethod(object):
def __init__(self, actor, method_name, method_signature):
self.actor = actor
@@ -307,14 +309,6 @@ def _manual_init(self, *args, **kwargs):
self._ray_method_signatures[k] = signature.extract_signature(
v, ignore_first=True)
- # Create objects to wrap method invocations. This is done so that
- # we can invoke methods with actor.method.remote() instead of
- # actor.method().
- self._actor_method_invokers = dict()
- for k, v in self._ray_actor_methods.items():
- self._actor_method_invokers[k] = ActorMethod(
- self, k, self._ray_method_signatures[k])
-
# Do not export the actor class or the actor if run in PYTHON_MODE
# Instead, instantiate the actor locally and add it to
# global_worker's dictionary
@@ -390,10 +384,17 @@ def __getattribute__(self, attr):
"_actor_method_call"]:
return object.__getattribute__(self, attr)
if attr in self._ray_actor_methods.keys():
- return self._actor_method_invokers[attr]
- # There is no method with this name, so raise an exception.
- raise AttributeError("'{}' Actor object has no attribute '{}'"
- .format(Class, attr))
+ # We create the ActorMethod on the fly here so that the
+ # ActorHandle doesn't need a reference to the ActorMethod. The
+ # ActorMethod has a reference to the ActorHandle and this was
+ # causing cyclic references which were prevent object
+ # deallocation from behaving in a predictable manner.
+ return ActorMethod(self, attr,
+ self._ray_method_signatures[attr])
+ else:
+ # There is no method with this name, so raise an exception.
+ raise AttributeError("'{}' Actor object has no attribute '{}'"
+ .format(Class, attr))
def __repr__(self):
return "Actor(" + self._ray_actor_id.hex() + ")"
| diff --git a/python/ray/test/test_utils.py b/python/ray/test/test_utils.py
--- a/python/ray/test/test_utils.py
+++ b/python/ray/test/test_utils.py
@@ -118,6 +118,7 @@ def _pid_alive(pid):
"""
try:
os.kill(pid, 0)
+ return True
except OSError:
return False
diff --git a/test/actor_test.py b/test/actor_test.py
--- a/test/actor_test.py
+++ b/test/actor_test.py
@@ -5,10 +5,12 @@
import collections
import random
import numpy as np
+import os
import time
import unittest
import ray
+import ray.test.test_utils
class ActorAPI(unittest.TestCase):
@@ -279,6 +281,40 @@ def f(self, y):
ray.worker.cleanup()
+ def testActorDeletion(self):
+ ray.init(num_workers=0)
+
+ # Make sure that when an actor handles goes out of scope, the actor
+ # destructor is called.
+
+ @ray.remote
+ class Actor(object):
+ def getpid(self):
+ return os.getpid()
+
+ a = Actor.remote()
+ pid = ray.get(a.getpid.remote())
+ a = None
+ ray.test.test_utils.wait_for_pid_to_exit(pid)
+
+ actors = [Actor.remote() for _ in range(10)]
+ pids = ray.get([a.getpid.remote() for a in actors])
+ a = None
+ actors = None
+ [ray.test.test_utils.wait_for_pid_to_exit(pid) for pid in pids]
+
+ @ray.remote
+ class Actor(object):
+ def method(self):
+ return 1
+
+ # Make sure that if we create an actor and call a method on it
+ # immediately, the actor doesn't get killed before the method is
+ # called.
+ self.assertEqual(ray.get(Actor.remote().method.remote()), 1)
+
+ ray.worker.cleanup()
+
def testActorState(self):
ray.init()
diff --git a/test/jenkins_tests/multi_node_tests/many_drivers_test.py b/test/jenkins_tests/multi_node_tests/many_drivers_test.py
--- a/test/jenkins_tests/multi_node_tests/many_drivers_test.py
+++ b/test/jenkins_tests/multi_node_tests/many_drivers_test.py
@@ -30,10 +30,10 @@ def check_ids(self):
def driver(redis_address, driver_index):
- """The script for driver 0.
+ """The script for all drivers.
- This driver should create five actors that each use one GPU and some actors
- that use no GPUs. After a while, it should exit.
+ This driver should create five actors that each use one GPU. After a while,
+ it should exit.
"""
ray.init(redis_address=redis_address)
@@ -44,7 +44,7 @@ def driver(redis_address, driver_index):
for i in range(driver_index - max_concurrent_drivers + 1):
_wait_for_event("DRIVER_{}_DONE".format(i), redis_address)
- def try_to_create_actor(actor_class, timeout=100):
+ def try_to_create_actor(actor_class, timeout=500):
# Try to create an actor, but allow failures while we wait for the
# monitor to release the resources for the removed drivers.
start_time = time.time()
| Actor processes not destroyed after Python ref is released
This can be reproduced with the following script. It will create 100 python processes, but even after the list is de-referenced the processes hang around forever (check `ps -A | grep python`).
```
import ray
import time
import sys
@ray.remote
class Actor(object):
def __init__(self):
pass
ray.init()
refs = [Actor.remote() for _ in range(100)]
time.sleep(5)
refs = None
time.sleep(99999)
```
| I'm looking into this and it looks like the reference counts on the actor handles are too high, so they aren't going out of scope. That is, the destructors aren't getting called.
In the meantime, as a workaround, you could call
```python
a = Actor.remote()
a.__ray_terminate__.remote(a._ray_actor_id.id())
```
Ok, looks like the bug was introduced somewhere between `0.2.0` and `0.2.1`.
Looks like the problem first appeared in #902. | 2017-10-03T00:37:01 |
ray-project/ray | 1,088 | ray-project__ray-1088 | [
"1065"
] | aebe9f937451bfa10aa0f2a41bafcf4747fb60f0 | diff --git a/python/ray/actor.py b/python/ray/actor.py
--- a/python/ray/actor.py
+++ b/python/ray/actor.py
@@ -12,8 +12,8 @@
import ray.local_scheduler
import ray.signature as signature
import ray.worker
-from ray.utils import (FunctionProperties, random_string,
- select_local_scheduler)
+from ray.utils import (binary_to_hex, FunctionProperties, random_string,
+ release_gpus_in_use, select_local_scheduler)
def random_actor_id():
@@ -112,7 +112,6 @@ def temporary_actor_method(*xs):
try:
unpickled_class = pickle.loads(pickled_class)
- worker.actor_class = unpickled_class
except Exception:
# If an exception was thrown when the actor was imported, we record the
# traceback and notify the scheduler of the failure.
@@ -120,6 +119,9 @@ def temporary_actor_method(*xs):
# Log the error message.
worker.push_error_to_driver(driver_id, "register_actor", traceback_str,
data={"actor_id": actor_id_str})
+ # TODO(rkn): In the future, it might make sense to have the worker exit
+ # here. However, currently that would lead to hanging if someone calls
+ # ray.get on a method invoked on the actor.
else:
# TODO(pcm): Why is the below line necessary?
unpickled_class.__module__ = module
@@ -133,6 +135,13 @@ def temporary_actor_method(*xs):
# because we currently do need the actor worker to submit new tasks
# for the actor.
+ # Store some extra information that will be used when the actor exits
+ # to release GPU resources.
+ worker.driver_id = binary_to_hex(driver_id)
+ local_scheduler_id = worker.redis_client.hget(
+ b"Actor:" + actor_id_str, "local_scheduler_id")
+ worker.local_scheduler_id = binary_to_hex(local_scheduler_id)
+
def export_actor_class(class_id, Class, actor_method_names,
checkpoint_interval, worker):
@@ -214,7 +223,14 @@ def __ray_terminate__(self, actor_id):
# remove the actor key from Redis here.
ray.worker.global_worker.redis_client.hset(b"Actor:" + actor_id,
"removed", True)
- # Disconnect the worker from he local scheduler. The point of this
+ # Release the GPUs that this worker was using.
+ if len(ray.get_gpu_ids()) > 0:
+ release_gpus_in_use(
+ ray.worker.global_worker.driver_id,
+ ray.worker.global_worker.local_scheduler_id,
+ ray.get_gpu_ids(),
+ ray.worker.global_worker.redis_client)
+ # Disconnect the worker from the local scheduler. The point of this
# is so that when the worker kills itself below, the local
# scheduler won't push an error message to the driver.
ray.worker.global_worker.local_scheduler_client.disconnect()
diff --git a/python/ray/utils.py b/python/ray/utils.py
--- a/python/ray/utils.py
+++ b/python/ray/utils.py
@@ -136,6 +136,54 @@ def attempt_to_reserve_gpus(num_gpus, driver_id, local_scheduler,
return success
+def release_gpus_in_use(driver_id, local_scheduler_id, gpu_ids, redis_client):
+ """Release the GPUs that a given worker was using.
+
+ Note that this does not affect the local scheduler's bookkeeping. It only
+ affects the GPU allocations which are recorded in the primary Redis shard,
+ which are redundant with the local scheduler bookkeeping.
+
+ Args:
+ driver_id: The ID of the driver that is releasing some GPUs.
+ local_scheduler_id: The ID of the local scheduler that owns the GPUs
+ being released.
+ gpu_ids: The IDs of the GPUs being released.
+ redis_client: A client for the primary Redis shard.
+ """
+ # Attempt to release GPU IDs atomically.
+ with redis_client.pipeline() as pipe:
+ while True:
+ try:
+ # If this key is changed before the transaction below (the
+ # multi/exec block), then the transaction will not take place.
+ pipe.watch(local_scheduler_id)
+
+ # Figure out which GPUs are currently in use.
+ result = redis_client.hget(local_scheduler_id, "gpus_in_use")
+ gpus_in_use = dict() if result is None else json.loads(
+ result.decode("ascii"))
+
+ assert driver_id in gpus_in_use
+ assert gpus_in_use[driver_id] >= len(gpu_ids)
+
+ gpus_in_use[driver_id] -= len(gpu_ids)
+
+ pipe.multi()
+
+ pipe.hset(local_scheduler_id, "gpus_in_use",
+ json.dumps(gpus_in_use))
+
+ pipe.execute()
+ # If a WatchError is not raised, then the operations should
+ # have gone through atomically.
+ break
+ except redis.WatchError:
+ # Another client must have changed the watched key between the
+ # time we started WATCHing it and the pipeline's execution. We
+ # should just retry.
+ continue
+
+
def select_local_scheduler(driver_id, local_schedulers, num_gpus,
redis_client):
"""Select a local scheduler to assign this actor to.
| diff --git a/test/actor_test.py b/test/actor_test.py
--- a/test/actor_test.py
+++ b/test/actor_test.py
@@ -315,6 +315,33 @@ def method(self):
ray.worker.cleanup()
+ def testActorDeletionWithGPUs(self):
+ ray.init(num_workers=0, num_gpus=1)
+
+ # When an actor that uses a GPU exits, make sure that the GPU resources
+ # are released.
+
+ @ray.remote(num_gpus=1)
+ class Actor(object):
+ def getpid(self):
+ return os.getpid()
+
+ for _ in range(5):
+ # If we can successfully create an actor, that means that enough
+ # GPU resources are available.
+ a = Actor.remote()
+ pid = ray.get(a.getpid.remote())
+
+ # Make sure that we can't create another actor.
+ with self.assertRaises(Exception):
+ Actor.remote()
+
+ # Let the actor go out of scope, and wait for it to exit.
+ a = None
+ ray.test.test_utils.wait_for_pid_to_exit(pid)
+
+ ray.worker.cleanup()
+
def testActorState(self):
ray.init()
| GPU resources not released after killing actor
The following crashes with
```
Exception: Could not find a node with enough GPUs or other resources to create this actor. The local scheduler information is [ {'ClientType': 'local_scheduler', 'Deleted': False, 'DBClientID': '31dc437d6df69857fea7a9eb6f04004421039e18', 'AuxAddress': '127.0.0.1:37853', 'NumCPUs': 32.0, 'NumGPUs': 1.0, 'LocalSchedulerSocketName': '/tmp/scheduler9534802'}].
```
```
import ray
import sys
import time
@ray.remote(num_gpus=1)
class Actor(object):
def __init__(self):
pass
ray.init(num_gpus=1)
a = Actor.remote()
a.__ray_terminate__.remote(a._ray_actor_id.id())
time.sleep(5)
a = Actor.remote() # crashes with not enough gpus
```
cc @stephanie-wang @robertnishihara
| That's very surprising. I can reproduce it. However, this kind of thing should be covered by our tests.. I'll look into it. | 2017-10-06T06:35:33 |
ray-project/ray | 1,094 | ray-project__ray-1094 | [
"1073"
] | a52a1e893fc44581e0de0e5c02ec4a21e5f8031b | diff --git a/python/setup.py b/python/setup.py
--- a/python/setup.py
+++ b/python/setup.py
@@ -23,11 +23,23 @@
"ray/core/src/local_scheduler/local_scheduler",
"ray/core/src/local_scheduler/liblocal_scheduler_library.so",
"ray/core/src/global_scheduler/global_scheduler",
- "ray/core/src/catapult_files/index.html",
- "ray/core/src/catapult_files/trace_viewer_full.html",
"ray/WebUI.ipynb"
]
+optional_ray_files = []
+
+ray_ui_files = [
+ "ray/core/src/catapult_files/index.html",
+ "ray/core/src/catapult_files/trace_viewer_full.html"
+]
+
+# The UI files are mandatory if the INCLUDE_UI environment variable equals 1.
+# Otherwise, they are optional.
+if "INCLUDE_UI" in os.environ and os.environ["INCLUDE_UI"] == "1":
+ ray_files += ray_ui_files
+else:
+ optional_ray_files += ray_ui_files
+
class build_ext(_build_ext.build_ext):
def run(self):
@@ -56,6 +68,14 @@ def run(self):
self.move_file(os.path.join(generated_python_directory,
filename))
+ # Try to copy over the optional files.
+ for filename in optional_ray_files:
+ try:
+ self.move_file(filename)
+ except Exception as e:
+ print("Failed to copy optional file {}. This is ok."
+ .format(filename))
+
def move_file(self, filename):
# TODO(rkn): This feels very brittle. It may not handle all cases. See
# https://github.com/apache/arrow/blob/master/python/setup.py for an
| Ray fails to compile from master branch with Python 3 only
Relevant log output:
```
+ git clone https://github.com/ray-project/catapult.git /tmp/pip-_r2ylpv0-build/src/thirdparty/catapult
Cloning into '/tmp/pip-_r2ylpv0-build/src/thirdparty/catapult'...
+ break
+ pushd /tmp/pip-_r2ylpv0-build/src/thirdparty/catapult
/tmp/pip-_r2ylpv0-build/src/thirdparty/catapult /tmp/pip-_r2ylpv0-build/python
+ git checkout 18cd334755701cf0c3b90b7172126c686d2eb787
Note: checking out '18cd334755701cf0c3b90b7172126c686d2eb787'.
You are in 'detached HEAD' state. You can look around, make experimental
changes and commit them, and you can discard any commits you make in this
state without impacting any branches by performing another checkout.
If you want to create a new branch to retain commits you create, you may
do so (now or later) by using -b with the checkout command again. Example:
git checkout -b <new-branch-name>
HEAD is now at 18cd334... Allow timeline scroll-to-zoom without holding ALT
+ popd
/tmp/pip-_r2ylpv0-build/python
+ [[ ! -f /tmp/pip-_r2ylpv0-build/src/thirdparty/../../python/ray/core/src/catapult_files/index.html ]]
+ python2 /tmp/pip-_r2ylpv0-build/src/thirdparty/catapult/tracing/bin/vulcanize_trace_viewer --config chrome --output /tmp/pip-_r2ylpv0-build/src/thirdparty/../../python/ray/core/src/catapult_files/trace_viewer_full.html
/tmp/pip-_r2ylpv0-build/src/thirdparty/build_ui.sh: line 52: python2: command not found
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "/tmp/pip-_r2ylpv0-build/python/setup.py", line 99, in <module>
license="Apache 2.0")
File "/usr/local/lib/python3.6/distutils/core.py", line 148, in setup
dist.run_commands()
File "/usr/local/lib/python3.6/distutils/dist.py", line 955, in run_commands
self.run_command(cmd)
File "/usr/local/lib/python3.6/distutils/dist.py", line 974, in run_command
cmd_obj.run()
File "/usr/local/lib/python3.6/site-packages/setuptools/command/install.py", line 61, in run
return orig.install.run(self)
File "/usr/local/lib/python3.6/distutils/command/install.py", line 545, in run
self.run_command('build')
File "/usr/local/lib/python3.6/distutils/cmd.py", line 313, in run_command
self.distribution.run_command(command)
File "/usr/local/lib/python3.6/distutils/dist.py", line 974, in run_command
cmd_obj.run()
File "/usr/local/lib/python3.6/distutils/command/build.py", line 135, in run
self.run_command(cmd_name)
File "/usr/local/lib/python3.6/distutils/cmd.py", line 313, in run_command
self.distribution.run_command(command)
File "/usr/local/lib/python3.6/distutils/dist.py", line 974, in run_command
cmd_obj.run()
File "/tmp/pip-_r2ylpv0-build/python/setup.py", line 38, in run
subprocess.check_call(["../build.sh", sys.executable])
File "/usr/local/lib/python3.6/subprocess.py", line 291, in check_call
raise CalledProcessError(retcode, cmd)
subprocess.CalledProcessError: Command '['../build.sh', '/usr/local/bin/python3.6']' returned non-zero exit status 127.
```
| Ah, in this case it is failing to find a `python2` executable. This could be fixed by doing `sudo apt-get install python` so that `which python2` succeeds.
Why does it need `python2`? Great question. We use it to generate the HTML page for the task timeline visualization in the web UI. This uses the catapult project https://github.com/catapult-project/catapult, which currently requires python 2. The relevant line in the codebase is
https://github.com/ray-project/ray/blob/0dcf36c91eb52b2a75d992af60c7d5ba13ba1eee/src/thirdparty/build_ui.sh#L52
Actually, now I'm confused about the error. It's failing at the line above, but it should have exited the script at
https://github.com/ray-project/ray/blob/0dcf36c91eb52b2a75d992af60c7d5ba13ba1eee/src/thirdparty/build_ui.sh#L26
and never gotten to that line.
Could you try doing `export INCLUDE_UI=0` before building Ray?
Since we forked catapult anyway, we could just make the relevant script work with Python 3. It probably wouldn't be that hard, but I haven't looked into it yet.
It feels like catapult is not something which is really required for day-to-day use of Ray, no?
Line:
```
if [ ! type python2 > /dev/null ]; then
```
is buggy. This is not valid Bash.
`[` and `]` should not be there.
> Could you try doing `export INCLUDE_UI=0` before building Ray?
I could do it for testing, but in my case I am installing this through pip from `requirements.txt` so it might be a bit tricky to pass it all the way down.
After fixing that, the next problem is that the following files from `setup.py` are not available anymore so installation fails:
```
"ray/core/src/catapult_files/index.html",
"ray/core/src/catapult_files/trace_viewer_full.html",
``` | 2017-10-09T22:10:41 |
|
ray-project/ray | 1,152 | ray-project__ray-1152 | [
"955"
] | 684e62e784020dadf2d6977f352159e3d15afb3e | diff --git a/python/ray/worker.py b/python/ray/worker.py
--- a/python/ray/worker.py
+++ b/python/ray/worker.py
@@ -1009,6 +1009,8 @@ def _initialize_serialization(worker=global_worker):
serialize several exception classes that we define for error handling.
"""
worker.serialization_context = pyarrow.SerializationContext()
+ pyarrow.register_default_serialization_handlers(
+ worker.serialization_context)
# Define a custom serializer and deserializer for handling Object IDs.
def objectid_custom_serializer(obj):
@@ -1018,85 +1020,10 @@ def objectid_custom_deserializer(serialized_obj):
return ray.local_scheduler.ObjectID(serialized_obj)
worker.serialization_context.register_type(
- ray.local_scheduler.ObjectID, 20 * b"\x00", pickle=False,
+ ray.local_scheduler.ObjectID, "ray.ObjectID", pickle=False,
custom_serializer=objectid_custom_serializer,
custom_deserializer=objectid_custom_deserializer)
- # Define a custom serializer and deserializer for handling numpy arrays
- # that contain objects.
- def array_custom_serializer(obj):
- return obj.tolist(), obj.dtype.str
-
- def array_custom_deserializer(serialized_obj):
- return np.array(serialized_obj[0], dtype=np.dtype(serialized_obj[1]))
-
- worker.serialization_context.register_type(
- np.ndarray, 20 * b"\x01", pickle=False,
- custom_serializer=array_custom_serializer,
- custom_deserializer=array_custom_deserializer)
-
- def ordered_dict_custom_serializer(obj):
- return list(obj.keys()), list(obj.values())
-
- def ordered_dict_custom_deserializer(obj):
- return collections.OrderedDict(zip(obj[0], obj[1]))
-
- worker.serialization_context.register_type(
- collections.OrderedDict, 20 * b"\x02", pickle=False,
- custom_serializer=ordered_dict_custom_serializer,
- custom_deserializer=ordered_dict_custom_deserializer)
-
- def default_dict_custom_serializer(obj):
- return list(obj.keys()), list(obj.values()), obj.default_factory
-
- def default_dict_custom_deserializer(obj):
- return collections.defaultdict(obj[2], zip(obj[0], obj[1]))
-
- worker.serialization_context.register_type(
- collections.defaultdict, 20 * b"\x03", pickle=False,
- custom_serializer=default_dict_custom_serializer,
- custom_deserializer=default_dict_custom_deserializer)
-
- def _serialize_pandas_series(s):
- import pandas as pd
- # TODO: serializing Series without extra copy
- serialized = pyarrow.serialize_pandas(pd.DataFrame({s.name: s}))
- return {
- 'type': 'Series',
- 'data': serialized.to_pybytes()
- }
-
- def _serialize_pandas_dataframe(df):
- return {
- 'type': 'DataFrame',
- 'data': pyarrow.serialize_pandas(df).to_pybytes()
- }
-
- def _deserialize_callback_pandas(data):
- deserialized = pyarrow.deserialize_pandas(data['data'])
- type_ = data['type']
- if type_ == 'Series':
- return deserialized[deserialized.columns[0]]
- elif type_ == 'DataFrame':
- return deserialized
- else:
- raise ValueError(type_)
-
- try:
- import pandas as pd
- worker.serialization_context.register_type(
- pd.Series, 'pandas.Series',
- custom_serializer=_serialize_pandas_series,
- custom_deserializer=_deserialize_callback_pandas)
-
- worker.serialization_context.register_type(
- pd.DataFrame, 'pandas.DataFrame',
- custom_serializer=_serialize_pandas_dataframe,
- custom_deserializer=_deserialize_callback_pandas)
- except ImportError:
- # no pandas
- pass
-
if worker.mode in [SCRIPT_MODE, SILENT_MODE]:
# These should only be called on the driver because _register_class
# will export the class to all of the workers.
| Ray fails to serialize Torch tensor.
I'm running into problems serializing pytorch tensors.
```python
import ray
import torch
ray.init()
x = torch.Tensor(10)
x_id = ray.put(x)
result = ray.get(x_id)
```
Inspecting the values shows that `x` and `result` are different.
```python
>>> x
0.0000e+00
-8.5899e+09
0.0000e+00
-8.5899e+09
5.6052e-45
1.4714e-43
1.5975e-43
1.4153e-43
1.3873e-43
1.6255e-43
[torch.FloatTensor of size 10]
>>> result
[torch.FloatTensor with no dimension]
>>> result[0]
RuntimeError: dimension 1 out of range of 0D tensor at /Users/soumith/code/builder/wheel/pytorch-src/torch/lib/TH/generic/THTensor.c:24
```
cc @vitchyr
| The fix for this is waiting on #1138. | 2017-10-21T19:45:54 |
|
ray-project/ray | 1,245 | ray-project__ray-1245 | [
"1183"
] | 2ae5a8484f9a55fabdefa7fa4facdb34a34bd0bf | diff --git a/python/ray/services.py b/python/ray/services.py
--- a/python/ray/services.py
+++ b/python/ray/services.py
@@ -4,6 +4,8 @@
import binascii
from collections import namedtuple, OrderedDict
+import cloudpickle
+import json
import os
import psutil
import random
@@ -261,6 +263,68 @@ def wait_for_redis_to_start(redis_ip_address, redis_port, num_retries=5):
"configured properly.")
+def _compute_version_info():
+ """Compute the versions of Python, cloudpickle, and Ray.
+
+ Returns:
+ A tuple containing the version information.
+ """
+ ray_version = ray.__version__
+ ray_location = ray.__file__
+ python_version = ".".join(map(str, sys.version_info[:3]))
+ cloudpickle_version = cloudpickle.__version__
+ return ray_version, ray_location, python_version, cloudpickle_version
+
+
+def _put_version_info_in_redis(redis_client):
+ """Store version information in Redis.
+
+ This will be used to detect if workers or drivers are started using
+ different versions of Python, cloudpickle, or Ray.
+
+ Args:
+ redis_client: A client for the primary Redis shard.
+ """
+ redis_client.set("VERSION_INFO", json.dumps(_compute_version_info()))
+
+
+def check_version_info(redis_client):
+ """Check if various version info of this process is correct.
+
+ This will be used to detect if workers or drivers are started using
+ different versions of Python, cloudpickle, or Ray. If the version
+ information is not present in Redis, then no check is done.
+
+ Args:
+ redis_client: A client for the primary Redis shard.
+
+ Raises:
+ Exception: An exception is raised if there is a version mismatch.
+ """
+ redis_reply = redis_client.get("VERSION_INFO")
+
+ # Don't do the check if there is no version information in Redis. This
+ # is to make it easier to do things like start the processes by hand.
+ if redis_reply is None:
+ return
+
+ true_version_info = tuple(json.loads(redis_reply.decode("ascii")))
+ version_info = _compute_version_info()
+ if version_info != true_version_info:
+ node_ip_address = ray.services.get_node_ip_address()
+ raise Exception("Version mismatch: The cluster was started with:\n"
+ " Ray: " + true_version_info[0] + "\n"
+ " Ray location: " + true_version_info[1] + "\n"
+ " Python: " + true_version_info[2] + "\n"
+ " Cloudpickle: " + true_version_info[3] + "\n"
+ "This process on node " + node_ip_address +
+ " was started with:" + "\n"
+ " Ray: " + version_info[0] + "\n"
+ " Ray location: " + version_info[1] + "\n"
+ " Python: " + version_info[2] + "\n"
+ " Cloudpickle: " + version_info[3])
+
+
def start_redis(node_ip_address,
port=None,
num_redis_shards=1,
@@ -311,6 +375,9 @@ def start_redis(node_ip_address,
# can access it and know whether or not to redirect their output.
redis_client.set("RedirectOutput", 1 if redirect_worker_output else 0)
+ # Store version information in the primary Redis shard.
+ _put_version_info_in_redis(redis_client)
+
# Start other Redis shards listening on random ports. Each Redis shard logs
# to a separate file, prefixed by "redis-<shard number>".
redis_shards = []
diff --git a/python/ray/worker.py b/python/ray/worker.py
--- a/python/ray/worker.py
+++ b/python/ray/worker.py
@@ -1701,6 +1701,11 @@ def connect(info, object_id_seed=None, mode=WORKER_MODE, worker=global_worker,
redis_ip_address, redis_port = info["redis_address"].split(":")
worker.redis_client = redis.StrictRedis(host=redis_ip_address,
port=int(redis_port))
+
+ # Check that the version information matches the version information that
+ # the Ray cluster was started with.
+ ray.services.check_version_info(worker.redis_client)
+
worker.lock = threading.Lock()
# Check the RedirectOutput key in Redis and based on its value redirect
diff --git a/python/ray/workers/default_worker.py b/python/ray/workers/default_worker.py
--- a/python/ray/workers/default_worker.py
+++ b/python/ray/workers/default_worker.py
@@ -41,12 +41,13 @@ def create_redis_client(redis_address):
return redis.StrictRedis(host=redis_ip_address, port=int(redis_port))
-def push_error_to_all_drivers(redis_client, message):
+def push_error_to_all_drivers(redis_client, message, error_type):
"""Push an error message to all drivers.
Args:
redis_client: The redis client to use.
message: The error message to push.
+ error_type: The type of the error.
"""
DRIVER_ID_LENGTH = 20
# We use a driver ID of all zeros to push an error message to all
@@ -54,7 +55,7 @@ def push_error_to_all_drivers(redis_client, message):
driver_id = DRIVER_ID_LENGTH * b"\x00"
error_key = b"Error:" + driver_id + b":" + random_string()
# Create a Redis client.
- redis_client.hmset(error_key, {"type": "worker_crash",
+ redis_client.hmset(error_key, {"type": error_type,
"message": message})
redis_client.rpush("ErrorKeys", error_key)
@@ -79,6 +80,13 @@ def push_error_to_all_drivers(redis_client, message):
ray.worker.connect(info, mode=ray.WORKER_MODE, actor_id=actor_id)
+ try:
+ ray.services.check_version_info(ray.worker.global_worker.redis_client)
+ except Exception as e:
+ traceback_str = traceback.format_exc()
+ push_error_to_all_drivers(ray.worker.global_worker.redis_client,
+ traceback_str, "version_mismatch")
+
error_explanation = """
This error is unexpected and should not have happened. Somehow a worker
crashed in an unanticipated way causing the main_loop to throw an exception,
@@ -96,7 +104,7 @@ def push_error_to_all_drivers(redis_client, message):
traceback_str = traceback.format_exc() + error_explanation
# Create a Redis client.
redis_client = create_redis_client(args.redis_address)
- push_error_to_all_drivers(redis_client, traceback_str)
+ push_error_to_all_drivers(redis_client, traceback_str, "worker_crash")
# TODO(rkn): Note that if the worker was in the middle of executing
# a task, then any worker or driver that is blocking in a get call
# and waiting for the output of that task will hang. We need to
| diff --git a/test/failure_test.py b/test/failure_test.py
--- a/test/failure_test.py
+++ b/test/failure_test.py
@@ -259,6 +259,21 @@ def get_val(self, x):
class WorkerDeath(unittest.TestCase):
+ def testWorkerRaisingException(self):
+ ray.init(num_workers=1, driver_mode=ray.SILENT_MODE)
+
+ @ray.remote
+ def f():
+ ray.worker.global_worker._get_next_task_from_local_scheduler = None
+
+ # Running this task should cause the worker to raise an exception after
+ # the task has successfully completed.
+ f.remote()
+
+ wait_for_errors(b"worker_crash", 1)
+ wait_for_errors(b"worker_died", 1)
+ self.assertEqual(len(ray.error_info()), 2)
+
def testWorkerDying(self):
ray.init(num_workers=0, driver_mode=ray.SILENT_MODE)
@@ -434,5 +449,20 @@ def put_task():
ray.worker.cleanup()
+class ConfigurationTest(unittest.TestCase):
+
+ def testVersionMismatch(self):
+ import cloudpickle
+ cloudpickle_version = cloudpickle.__version__
+ cloudpickle.__version__ = "fake cloudpickle version"
+
+ ray.init(num_workers=1, driver_mode=ray.SILENT_MODE)
+
+ wait_for_errors(b"version_mismatch", 1)
+
+ cloudpickle.__version__ = cloudpickle_version
+ ray.worker.cleanup()
+
+
if __name__ == "__main__":
unittest.main(verbosity=2)
| Print warning if different versions of Ray, Python, or cloudpickle are being used.
See #1181 for an example of an error caused by this problem.
That said, we probably don't want to cause an outright failure if the versions differ.
| 2017-11-23T02:26:11 |
|
ray-project/ray | 1,261 | ray-project__ray-1261 | [
"1259"
] | 2865128df0b28930cd5d178025150ecce0b7c2ec | diff --git a/python/ray/worker.py b/python/ray/worker.py
--- a/python/ray/worker.py
+++ b/python/ray/worker.py
@@ -1476,12 +1476,12 @@ def print_error_messages(worker):
worker.error_message_pubsub_client = worker.redis_client.pubsub()
# Exports that are published after the call to
- # error_message_pubsub_client.psubscribe and before the call to
+ # error_message_pubsub_client.subscribe and before the call to
# error_message_pubsub_client.listen will still be processed in the loop.
- worker.error_message_pubsub_client.psubscribe("__keyspace@0__:ErrorKeys")
+ worker.error_message_pubsub_client.subscribe("__keyspace@0__:ErrorKeys")
num_errors_received = 0
- # Get the exports that occurred before the call to psubscribe.
+ # Get the exports that occurred before the call to subscribe.
with worker.lock:
error_keys = worker.redis_client.lrange("ErrorKeys", 0, -1)
for error_key in error_keys:
@@ -1589,13 +1589,13 @@ def fetch_and_execute_function_to_run(key, worker=global_worker):
def import_thread(worker, mode):
worker.import_pubsub_client = worker.redis_client.pubsub()
# Exports that are published after the call to
- # import_pubsub_client.psubscribe and before the call to
+ # import_pubsub_client.subscribe and before the call to
# import_pubsub_client.listen will still be processed in the loop.
- worker.import_pubsub_client.psubscribe("__keyspace@0__:Exports")
+ worker.import_pubsub_client.subscribe("__keyspace@0__:Exports")
# Keep track of the number of imports that we've imported.
num_imported = 0
- # Get the exports that occurred before the call to psubscribe.
+ # Get the exports that occurred before the call to subscribe.
with worker.lock:
export_keys = worker.redis_client.lrange("Exports", 0, -1)
for key in export_keys:
@@ -1627,7 +1627,7 @@ def import_thread(worker, mode):
try:
for msg in worker.import_pubsub_client.listen():
with worker.lock:
- if msg["type"] == "psubscribe":
+ if msg["type"] == "subscribe":
continue
assert msg["data"] == b"rpush"
num_imports = worker.redis_client.llen("Exports")
| Initial sets of tasks are slow on large cluster.
### System information
- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: Ubuntu 16.04
- **Ray installed from (source or binary)**: source
- **Ray version**: Testing this PR https://github.com/ray-project/ray/pull/1257, but I'm pretty sure it happens with the current master at e583d5a421c6287505b885246bed731a957cd0bb.
- **Python version**: Python 3.5.2, Anaconda 4.2.0 (64-bit)
I first start Ray on the head node with
```
ray start --head --redis-port=6379 --redis-max-clients=65504
```
Then I start Ray on the workers with
```
parallel-ssh -h workers.txt -P -I -p 200 < start_worker.sh
```
using a `start_worker.sh` containing
```
export PATH=/home/ubuntu/anaconda3/bin/:$PATH
ray start --redis-address=<head-ip-address>:6379
```
Then I start a driver (on the head node) with
```python
import ray
import time
ray.init(redis_address=<head-node-ip>)
@ray.remote
def f():
time.sleep(0.01)
return ray.services.get_node_ip_address()
```
Then I run
```
%time l = set(ray.get([f.remote() for _ in range(1000)]))
```
in the driver a number of times. The first couple times take on the order of 50 seconds. Subsequent runs take around 170 milliseconds.
In `/tmp/raylogs`, a number of processes contain lines like
```
[WARN] (/home/ubuntu/ray/src/common/state/db_client_table.cc:54) calling redis_get_cached_db_client in a loop in with 1 manager IDs took 1038 milliseconds.
```
| 2017-11-27T03:48:34 |
||
ray-project/ray | 1,283 | ray-project__ray-1283 | [
"1282"
] | 26125e154744975abfb9fdbaf7663f57642c1dfd | diff --git a/doc/source/conf.py b/doc/source/conf.py
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -32,8 +32,6 @@
"tensorflow.python",
"tensorflow.python.client",
"tensorflow.python.util",
- "pyarrow",
- "pyarrow.plasma",
"smart_open",
"ray.local_scheduler",
"ray.plasma",
diff --git a/python/ray/__init__.py b/python/ray/__init__.py
--- a/python/ray/__init__.py
+++ b/python/ray/__init__.py
@@ -4,6 +4,12 @@
import os
import sys
+
+if "pyarrow" in sys.modules:
+ raise ImportError("Ray must be imported before pyarrow because Ray "
+ "requires a specific version of pyarrow (which is "
+ "packaged along with Ray).")
+
# Add the directory containing pyarrow to the Python path so that we find the
# pyarrow version packaged with ray and not a pre-existing pyarrow.
pyarrow_path = os.path.join(os.path.abspath(os.path.dirname(__file__)),
| ray.init() crashes if pyarrow was imported before ray
### System information
- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: macOS 10.13.1
- **Ray installed from (source or binary)**: Binary wheel (pip, `ray-0.3.0-cp36-cp36m-macosx_10_6_intel.whl`)
- **Ray version**: 0.3.0
- **pyarrow version**: 0.7.1 (pip, `pyarrow-0.7.1-cp36-cp36m-macosx_10_6_intel.whl`)
- **Python version**: 3.6
- **Exact command to reproduce**: `import pyarrow; import ray; ray.init()`
### Describe the problem
If I don't import pyarrow, or import ray then pyarrow, calling ray.init() will work. Otherwise the process crashes with a segmentation fault.
Also, `pyarrow` comes before `ray` in alphabetical order ๐จ
### Source code / logs
```
[py: ipython3] remram 12:08:01
randy /private/tmp$ python
Python 3.6.3 (default, Oct 7 2017, 02:03:21)
[GCC 4.2.1 Compatible Apple LLVM 9.0.0 (clang-900.0.37)] on darwin
Type "help", "copyright", "credits" or "license" for more information.
>>> import pyarrow
>>> import ray
>>> ray.init()
Waiting for redis server at 127.0.0.1:55607 to respond...
Waiting for redis server at 127.0.0.1:11827 to respond...
Allowing the Plasma store to use up to 13.7439GB of memory.
Starting object store with directory /tmp and huge page support disabled
Starting local scheduler with 8 CPUs, 0 GPUs
======================================================================
View the web UI at http://localhost:8896/notebooks/ray_ui34099.ipynb?token=78475c82f783b6bd1684744c9105e11a6fa623da814fb515
======================================================================
Disconnecting client on fd 9
Segmentation fault: 11
[e: 139] [py: ipython3] remram 12:08:10
randy /private/tmp$ python
Python 3.6.3 (default, Oct 7 2017, 02:03:21)
[GCC 4.2.1 Compatible Apple LLVM 9.0.0 (clang-900.0.37)] on darwin
Type "help", "copyright", "credits" or "license" for more information.
>>> import ray
>>> import pyarrow
>>> ray.init()
Waiting for redis server at 127.0.0.1:61223 to respond...
Waiting for redis server at 127.0.0.1:24990 to respond...
Allowing the Plasma store to use up to 13.7439GB of memory.
Starting object store with directory /tmp and huge page support disabled
Starting local scheduler with 8 CPUs, 0 GPUs
======================================================================
View the web UI at http://localhost:8897/notebooks/ray_ui14621.ipynb?token=4a184fb11f4cbd65674d3a4910a2b8e9d20ddb4172128476
======================================================================
{'node_ip_address': '127.0.0.1', 'redis_address': '127.0.0.1:61223', 'object_store_addresses': [ObjectStoreAddress(name='/tmp/plasma_store36375821', manager_name='/tmp/plasma_manager51291755', manager_port=31461)], 'local_scheduler_socket_names': ['/tmp/scheduler78575961'], 'webui_url': 'http://localhost:8897/notebooks/ray_ui14621.ipynb?token=4a184fb11f4cbd65674d3a4910a2b8e9d20ddb4172128476'}
>>>
```
| Thanks for reporting the issue.
Currently we ship our own version of pyarrow with Ray. When you call `import ray`, we modify the python path so that `import pyarrow` finds our version of pyarrow. See
https://github.com/ray-project/ray/blob/c21e18937137a17205d7ee0aa6158e7058ef6796/python/ray/__init__.py#L9-L11
When you do `import pyarrow` before `import ray`, you are actually getting a different version of pyarrow. At the moment, pyarrow is undergoing a lot of development and so we rely on using pyarrow at very specific commits. Down the road we'll just install pyarrow through pip and not ship it ourselves.
Maybe what we should do here (to prevent the crash) is to check if pyarrow has already been imported and raise an exception if so (when ray is imported).
That would be ideal. Also consider mentioning this in your documentation. | 2017-12-01T21:48:01 |
|
ray-project/ray | 1,307 | ray-project__ray-1307 | [
"1252"
] | 2606001a369086eaafbca13674d21d5a2c78aa67 | diff --git a/python/ray/services.py b/python/ray/services.py
--- a/python/ray/services.py
+++ b/python/ray/services.py
@@ -8,6 +8,7 @@
import json
import os
import psutil
+import pyarrow
import random
import redis
import shutil
@@ -279,23 +280,25 @@ def wait_for_redis_to_start(redis_ip_address, redis_port, num_retries=5):
def _compute_version_info():
- """Compute the versions of Python, cloudpickle, and Ray.
+ """Compute the versions of Python, cloudpickle, pyarrow, and Ray.
Returns:
A tuple containing the version information.
"""
ray_version = ray.__version__
- ray_location = ray.__file__
+ ray_location = os.path.abspath(ray.__file__)
python_version = ".".join(map(str, sys.version_info[:3]))
cloudpickle_version = cloudpickle.__version__
- return ray_version, ray_location, python_version, cloudpickle_version
+ pyarrow_version = pyarrow.__version__
+ return (ray_version, ray_location, python_version, cloudpickle_version,
+ pyarrow_version)
def _put_version_info_in_redis(redis_client):
"""Store version information in Redis.
This will be used to detect if workers or drivers are started using
- different versions of Python, cloudpickle, or Ray.
+ different versions of Python, cloudpickle, pyarrow, or Ray.
Args:
redis_client: A client for the primary Redis shard.
@@ -307,7 +310,7 @@ def check_version_info(redis_client):
"""Check if various version info of this process is correct.
This will be used to detect if workers or drivers are started using
- different versions of Python, cloudpickle, or Ray. If the version
+ different versions of Python, cloudpickle, pyarrow, or Ray. If the version
information is not present in Redis, then no check is done.
Args:
@@ -332,12 +335,14 @@ def check_version_info(redis_client):
" Ray location: " + true_version_info[1] + "\n"
" Python: " + true_version_info[2] + "\n"
" Cloudpickle: " + true_version_info[3] + "\n"
+ " Pyarrow: " + true_version_info[4] + "\n"
"This process on node " + node_ip_address +
" was started with:" + "\n"
" Ray: " + version_info[0] + "\n"
" Ray location: " + version_info[1] + "\n"
" Python: " + version_info[2] + "\n"
- " Cloudpickle: " + version_info[3])
+ " Cloudpickle: " + version_info[3] + "\n"
+ " Pyarrow: " + version_info[4])
def start_redis(node_ip_address,
| Ray cloudpickle, python, ray version check incompatible with `python setup.py develop`
### Describe the problem
When initializing ray that was installed with `python setup.py develop` we get the following error:
```
Exception: Version mismatch: The cluster was started with:
Ray: 0.2.2
Ray location: ray/__init__.pyc
Python: 2.7.14
Cloudpickle: 0.4.0
This process on node 192.168.1.205 was started with:
Ray: 0.2.2
Ray location: /Users/pcmoritz/ray/python/ray/__init__.pyc
Python: 2.7.14
Cloudpickle: 0.4.0
```
### System information
- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: Mac OS
- **Ray installed from (source or binary)**: source
- **Ray version**: latest master
- **Python version**: 2.7
- **Exact command to reproduce**:
| Note that this only happens if ray is used from within the ray/ folder (e.g. running an example), so it is not super severe but we should nonetheless fix it to make the user experience smooth.
I'm having trouble reproducing this with Python 3.
I ran into the same issue as of commit 044548bcfff4af4fdf1150ba6e65501b55feaa94 .
OS Platform and Distribution (e.g., Linux Ubuntu 16.04): Mac OS
Ray installed from (source or binary): source
Ray version: 044548bcfff4af4fdf1150ba6e65501b55feaa94
Python version: 2.7.12
Exact command to reproduce:
* inside ipython session: `import ray; ray.init()`
Does the error always happen? Or only if you are inside of `ray/python`? | 2017-12-09T08:59:23 |
|
ray-project/ray | 1,316 | ray-project__ray-1316 | [
"1317"
] | b1d89026cdd41d7437972de256eb3371bb3bc5c1 | diff --git a/python/ray/local_scheduler/__init__.py b/python/ray/local_scheduler/__init__.py
--- a/python/ray/local_scheduler/__init__.py
+++ b/python/ray/local_scheduler/__init__.py
@@ -4,9 +4,9 @@
from ray.core.src.local_scheduler.liblocal_scheduler_library import (
Task, LocalSchedulerClient, ObjectID, check_simple_value, task_from_string,
- task_to_string, _config)
+ task_to_string, _config, common_error)
from .local_scheduler_services import start_local_scheduler
__all__ = ["Task", "LocalSchedulerClient", "ObjectID", "check_simple_value",
"task_from_string", "task_to_string", "start_local_scheduler",
- "_config"]
+ "_config", "common_error"]
| diff --git a/test/runtest.py b/test/runtest.py
--- a/test/runtest.py
+++ b/test/runtest.py
@@ -284,6 +284,24 @@ def __init__(self):
ray.worker.cleanup()
+ def testPuttingObjectThatClosesOverObjectID(self):
+ # This test is here to prevent a regression of
+ # https://github.com/ray-project/ray/issues/1317.
+ ray.init(num_workers=0)
+
+ class Foo(object):
+ def __init__(self):
+ self.val = ray.put(0)
+
+ def method(self):
+ f
+
+ f = Foo()
+ with self.assertRaises(ray.local_scheduler.common_error):
+ ray.put(f)
+
+ ray.worker.cleanup()
+
class WorkerTest(unittest.TestCase):
def testPythonWorkers(self):
| Segfault when putting object whose class closes over an ObjectID.
The following causes a segfault.
```python
import ray
class Foo():
def __init__(self):
self.val = ray.put(0)
def method(self):
f
ray.init()
f = Foo()
ray.put(f)
```
The error appears to happen when trying to pickle the class `Foo`. We prevent object IDs from being pickled at
https://github.com/ray-project/ray/blob/b1d89026cdd41d7437972de256eb3371bb3bc5c1/src/common/lib/python/common_extension.cc#L205-L208
However, we never registered `CommonError` with the python extension module. I think that's the problem. Should be fixed by #1316.
| 2017-12-12T21:27:35 |
|
ray-project/ray | 1,413 | ray-project__ray-1413 | [
"1305"
] | 4b1c8be4fe3a5764869d0d78d8c58046492a77f6 | diff --git a/python/ray/dataframe/__init__.py b/python/ray/dataframe/__init__.py
--- a/python/ray/dataframe/__init__.py
+++ b/python/ray/dataframe/__init__.py
@@ -6,10 +6,5 @@
from .dataframe import from_pandas
from .dataframe import to_pandas
from .series import Series
-import ray
-import pandas as pd
__all__ = ["DataFrame", "from_pandas", "to_pandas", "Series"]
-
-ray.register_custom_serializer(pd.DataFrame, use_pickle=True)
-ray.register_custom_serializer(pd.core.indexes.base.Index, use_pickle=True)
| diff --git a/src/plasma/test/client_tests.cc b/src/plasma/test/client_tests.cc
--- a/src/plasma/test/client_tests.cc
+++ b/src/plasma/test/client_tests.cc
@@ -32,7 +32,7 @@ TEST plasma_status_tests(void) {
int64_t data_size = 100;
uint8_t metadata[] = {5};
int64_t metadata_size = sizeof(metadata);
- uint8_t *data;
+ std::shared_ptr<MutableBuffer> data;
ARROW_CHECK_OK(
client1.Create(oid1, data_size, metadata, metadata_size, &data));
ARROW_CHECK_OK(client1.Seal(oid1));
@@ -73,7 +73,7 @@ TEST plasma_fetch_tests(void) {
int64_t data_size = 100;
uint8_t metadata[] = {5};
int64_t metadata_size = sizeof(metadata);
- uint8_t *data;
+ std::shared_ptr<MutableBuffer> data;
ARROW_CHECK_OK(
client1.Create(oid1, data_size, metadata, metadata_size, &data));
ARROW_CHECK_OK(client1.Seal(oid1));
@@ -116,7 +116,9 @@ void init_data_123(uint8_t *data, uint64_t size, uint8_t base) {
}
}
-bool is_equal_data_123(uint8_t *data1, uint8_t *data2, uint64_t size) {
+bool is_equal_data_123(const uint8_t *data1,
+ const uint8_t *data2,
+ uint64_t size) {
for (size_t i = 0; i < size; i++) {
if (data1[i] != data2[i]) {
return false;
@@ -142,14 +144,15 @@ TEST plasma_nonblocking_get_tests(void) {
int64_t data_size = 4;
uint8_t metadata[] = {5};
int64_t metadata_size = sizeof(metadata);
- uint8_t *data;
+ std::shared_ptr<MutableBuffer> data;
ARROW_CHECK_OK(client.Create(oid, data_size, metadata, metadata_size, &data));
- init_data_123(data, data_size, 0);
+ init_data_123(data->mutable_data(), data_size, 0);
ARROW_CHECK_OK(client.Seal(oid));
sleep(1);
ARROW_CHECK_OK(client.Get(oid_array, 1, 0, &obj_buffer));
- ASSERT(is_equal_data_123(data, obj_buffer.data, data_size) == true);
+ ASSERT(is_equal_data_123(data->data(), obj_buffer.data->data(), data_size) ==
+ true);
sleep(1);
ARROW_CHECK_OK(client.Disconnect());
@@ -191,7 +194,7 @@ TEST plasma_wait_for_objects_tests(void) {
int64_t data_size = 4;
uint8_t metadata[] = {5};
int64_t metadata_size = sizeof(metadata);
- uint8_t *data;
+ std::shared_ptr<MutableBuffer> data;
ARROW_CHECK_OK(
client1.Create(oid1, data_size, metadata, metadata_size, &data));
ARROW_CHECK_OK(client1.Seal(oid1));
@@ -245,23 +248,23 @@ TEST plasma_get_tests(void) {
int64_t data_size = 4;
uint8_t metadata[] = {5};
int64_t metadata_size = sizeof(metadata);
- uint8_t *data;
+ std::shared_ptr<MutableBuffer> data;
ARROW_CHECK_OK(
client1.Create(oid1, data_size, metadata, metadata_size, &data));
- init_data_123(data, data_size, 1);
+ init_data_123(data->mutable_data(), data_size, 1);
ARROW_CHECK_OK(client1.Seal(oid1));
ARROW_CHECK_OK(client1.Get(oid_array1, 1, -1, &obj_buffer));
- ASSERT(data[0] == obj_buffer.data[0]);
+ ASSERT(data->data()[0] == obj_buffer.data->data()[0]);
ARROW_CHECK_OK(
client2.Create(oid2, data_size, metadata, metadata_size, &data));
- init_data_123(data, data_size, 2);
+ init_data_123(data->mutable_data(), data_size, 2);
ARROW_CHECK_OK(client2.Seal(oid2));
ARROW_CHECK_OK(client1.Fetch(1, oid_array2));
ARROW_CHECK_OK(client1.Get(oid_array2, 1, -1, &obj_buffer));
- ASSERT(data[0] == obj_buffer.data[0]);
+ ASSERT(data->data()[0] == obj_buffer.data->data()[0]);
sleep(1);
ARROW_CHECK_OK(client1.Disconnect());
@@ -288,25 +291,25 @@ TEST plasma_get_multiple_tests(void) {
int64_t data_size = 4;
uint8_t metadata[] = {5};
int64_t metadata_size = sizeof(metadata);
- uint8_t *data;
+ std::shared_ptr<MutableBuffer> data;
ARROW_CHECK_OK(
client1.Create(oid1, data_size, metadata, metadata_size, &data));
- init_data_123(data, data_size, obj1_first);
+ init_data_123(data->mutable_data(), data_size, obj1_first);
ARROW_CHECK_OK(client1.Seal(oid1));
/* This only waits for oid1. */
ARROW_CHECK_OK(client1.Get(obj_ids, 1, -1, obj_buffer));
- ASSERT(data[0] == obj_buffer[0].data[0]);
+ ASSERT(data->data()[0] == obj_buffer[0].data->data()[0]);
ARROW_CHECK_OK(
client2.Create(oid2, data_size, metadata, metadata_size, &data));
- init_data_123(data, data_size, obj2_first);
+ init_data_123(data->mutable_data(), data_size, obj2_first);
ARROW_CHECK_OK(client2.Seal(oid2));
ARROW_CHECK_OK(client1.Fetch(2, obj_ids));
ARROW_CHECK_OK(client1.Get(obj_ids, 2, -1, obj_buffer));
- ASSERT(obj1_first == obj_buffer[0].data[0]);
- ASSERT(obj2_first == obj_buffer[1].data[0]);
+ ASSERT(obj1_first == obj_buffer[0].data->data()[0]);
+ ASSERT(obj2_first == obj_buffer[1].data->data()[0]);
sleep(1);
ARROW_CHECK_OK(client1.Disconnect());
| Worker dies when passed pandas DataFrame.
### System information
- **Ray version**: 0.3.0
- **Python version**: 3.6.0
- **Exact command to reproduce**:
```python
import pandas as pd
import ray
pd.__version__ # '0.19.2'
ray.init()
df = pd.DataFrame(data={'col1': [1, 2, 3, 4], 'col2': [3, 4, 5, 6]})
@ray.remote
def f(x):
pass
f.remote(df)
```
The last line causes the following error to be printed in the background.
```
A worker died or was killed while executing a task.
```
cc @devin-petersohn
| The original application that failed was
```python
import pandas as pd
import ray
ray.init()
data = pd.DataFrame(data={'col1': [1, 2, 3, 4], 'col2': [3, 4, 5, 6]})
@ray.remote
def test_fn(df):
return df.assign(result=df.col1 * df.col2)
remote_data = ray.put(data)
x = test_fn.remote(remote_data)
```
Note that as a workaround for now you can do
```python
ray.register_custom_serializer(type(data), use_pickle=True)
```
So while ray.put and ray.get works in the same process, it doesn't work if the DataFrame is put in one process and then gotten in a different one. Interesting.
Backtrace:
```
* thread #1: tid = 0xf9367, 0x0000000000000000, queue = 'com.apple.main-thread', stop reason = EXC_BAD_ACCESS (code=1, address=0x0)
* frame #0: 0x0000000000000000
frame #1: 0x0000000103cb0ff2 libarrow_python.0.dylib`arrow::py::GetValue(context=0x000000010de149f8, parent=0x000000010db944f8, arr=0x000000010db88488, index=0, type=0, base=<unavailable>, blobs=<unavailable>, result=<unavailable>) + 2562 at arrow_to_python.cc:171 [opt]
frame #2: 0x0000000103cb0459 libarrow_python.0.dylib`arrow::py::DeserializeList(context=0x000000010de149f8, array=<unavailable>, start_idx=<unavailable>, stop_idx=2, base=0x000000010ddf3fa8, blobs=<unavailable>, out=<unavailable>) + 265 at arrow_to_python.cc:206 [opt]
frame #3: 0x0000000103cb0133 libarrow_python.0.dylib`arrow::py::DeserializeDict(context=0x000000010de149f8, array=0x000000010db94318, start_idx=0, stop_idx=2, base=0x000000010ddf3fa8, blobs=<unavailable>, out=<unavailable>) + 227 at arrow_to_python.cc:72 [opt]
frame #4: 0x0000000103cb0ef7 libarrow_python.0.dylib`arrow::py::GetValue(context=0x000000010de149f8, parent=0x00000001007dceb8, arr=0x000000010db883e8, index=<unavailable>, type=0, base=<unavailable>, blobs=<unavailable>, result=<unavailable>) + 2311 at arrow_to_python.cc:156 [opt]
frame #5: 0x0000000103cb0459 libarrow_python.0.dylib`arrow::py::DeserializeList(context=0x000000010de149f8, array=<unavailable>, start_idx=<unavailable>, stop_idx=1, base=0x000000010ddf3fa8, blobs=<unavailable>, out=<unavailable>) + 265 at arrow_to_python.cc:206 [opt]
frame #6: 0x0000000103cb187f libarrow_python.0.dylib`arrow::py::DeserializeObject(context=0x000000010de149f8, obj=0x000000010ddf2c90, base=0x000000010ddf3fa8, out=0x00007fff5fbfe058) + 111 at arrow_to_python.cc:285 [opt]
frame #7: 0x00000001038057de lib.cpython-35m-darwin.so`__pyx_pf_7pyarrow_3lib_18SerializedPyObject_2deserialize(__pyx_v_self=0x000000010ddf2c78, __pyx_v_context=0x000000010de149f8) + 462 at lib.cxx:73367
frame #8: 0x0000000103805014 lib.cpython-35m-darwin.so`__pyx_pw_7pyarrow_3lib_18SerializedPyObject_3deserialize(__pyx_v_self=0x000000010ddf2c78, __pyx_args=0x000000010de2b2b0, __pyx_kwds=0x0000000000000000) + 772 at lib.cxx:73290
frame #9: 0x00000001000561fc libpython3.5m.dylib`PyCFunction_Call + 60```
@robertnishihara created a PR in Arrow: https://github.com/apache/arrow/pull/1463 that fixes this. Once that gets pulled in we will need to update the serialization. | 2018-01-10T23:51:49 |
ray-project/ray | 1,457 | ray-project__ray-1457 | [
"1398"
] | 21a916009eb4705690c26037bf593ee0b6135cca | diff --git a/examples/parameter_server/async_parameter_server.py b/examples/parameter_server/async_parameter_server.py
--- a/examples/parameter_server/async_parameter_server.py
+++ b/examples/parameter_server/async_parameter_server.py
@@ -3,11 +3,9 @@
from __future__ import print_function
import argparse
-from tensorflow.examples.tutorials.mnist import input_data
import time
import ray
-
import model
parser = argparse.ArgumentParser(description="Run the asynchronous parameter "
@@ -35,9 +33,9 @@ def pull(self, keys):
@ray.remote
-def worker_task(ps, batch_size=50):
+def worker_task(ps, worker_index, batch_size=50):
# Download MNIST.
- mnist = input_data.read_data_sets("MNIST_data", one_hot=True)
+ mnist = model.download_mnist_retry(seed=worker_index)
# Initialize the model.
net = model.SimpleCNN()
@@ -65,10 +63,10 @@ def worker_task(ps, batch_size=50):
ps = ParameterServer.remote(all_keys, all_values)
# Start some training tasks.
- worker_tasks = [worker_task.remote(ps) for _ in range(args.num_workers)]
+ worker_tasks = [worker_task.remote(ps, i) for i in range(args.num_workers)]
# Download MNIST.
- mnist = input_data.read_data_sets("MNIST_data", one_hot=True)
+ mnist = model.download_mnist_retry()
i = 0
while True:
diff --git a/examples/parameter_server/model.py b/examples/parameter_server/model.py
--- a/examples/parameter_server/model.py
+++ b/examples/parameter_server/model.py
@@ -8,6 +8,18 @@
import ray
import tensorflow as tf
+from tensorflow.examples.tutorials.mnist import input_data
+import time
+
+
+def download_mnist_retry(seed=0, max_num_retries=20):
+ for _ in range(max_num_retries):
+ try:
+ return input_data.read_data_sets("MNIST_data", one_hot=True,
+ seed=seed)
+ except tf.errors.AlreadyExistsError:
+ time.sleep(1)
+ raise Exception("Failed to download MNIST.")
class SimpleCNN(object):
diff --git a/examples/parameter_server/sync_parameter_server.py b/examples/parameter_server/sync_parameter_server.py
--- a/examples/parameter_server/sync_parameter_server.py
+++ b/examples/parameter_server/sync_parameter_server.py
@@ -3,9 +3,7 @@
from __future__ import print_function
import argparse
-
import numpy as np
-from tensorflow.examples.tutorials.mnist import input_data
import ray
import model
@@ -36,8 +34,7 @@ class Worker(object):
def __init__(self, worker_index, batch_size=50):
self.worker_index = worker_index
self.batch_size = batch_size
- self.mnist = input_data.read_data_sets("MNIST_data", one_hot=True,
- seed=worker_index)
+ self.mnist = model.download_mnist_retry(seed=worker_index)
self.net = model.SimpleCNN()
def compute_gradients(self, weights):
@@ -60,7 +57,7 @@ def compute_gradients(self, weights):
for worker_index in range(args.num_workers)]
# Download MNIST.
- mnist = input_data.read_data_sets("MNIST_data", one_hot=True)
+ mnist = model.download_mnist_retry()
i = 0
current_weights = ps.get_weights.remote()
| Error in parameter server examples when multiple workers try to download MNIST at the same time.
To reproduce the example, run the following (making sure that `ray/examples/parameter_server/` does not have a copy of the MNIST data set)
```
cd ray/examples/parameter_server/
python async_parameter_server.py
```
Some tasks threw the following error
```
Remote function __main__.worker_task failed with:
Traceback (most recent call last):
File "async_parameter_server.py", line 40, in worker_task
mnist = input_data.read_data_sets("MNIST_data", one_hot=True)
File "/home/ubuntu/anaconda3/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py", line 245, in read_data_sets
source_url + TRAIN_LABELS)
File "/home/ubuntu/anaconda3/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/datasets/base.py", line 209, in maybe_download
gfile.Copy(temp_file_name, filepath)
File "/home/ubuntu/anaconda3/lib/python3.6/site-packages/tensorflow/python/lib/io/file_io.py", line 385, in copy
compat.as_bytes(oldpath), compat.as_bytes(newpath), overwrite, status)
File "/home/ubuntu/anaconda3/lib/python3.6/site-packages/tensorflow/python/framework/errors_impl.py", line 473, in __exit__
c_api.TF_GetCode(self.status.status))
tensorflow.python.framework.errors_impl.AlreadyExistsError: file already exists
```
The same error can probably occur with the sync parameter server as well.
| 2018-01-24T00:20:43 |
||
ray-project/ray | 1,467 | ray-project__ray-1467 | [
"1219"
] | 782b4aeb0ffebf50d55db97821f263ca60679c40 | diff --git a/python/ray/actor.py b/python/ray/actor.py
--- a/python/ray/actor.py
+++ b/python/ray/actor.py
@@ -6,7 +6,6 @@
import hashlib
import inspect
import json
-import numpy as np
import traceback
import pyarrow.plasma as plasma
@@ -110,37 +109,6 @@ def get_actor_checkpoint(worker, actor_id):
return checkpoint_index, checkpoint
-def put_dummy_object(worker, dummy_object_id):
- """Put a dummy actor object into the local object store.
-
- This registers a dummy object ID in the local store with an empty numpy
- array as the value. The resulting object is pinned to the store by storing
- it to the worker's state.
-
- For actors, dummy objects are used to store the stateful dependencies
- between consecutive method calls. This function should be called for every
- actor method execution that updates the actor's internal state.
-
- Args:
- worker: The worker to use to perform the put.
- dummy_object_id: The object ID of the dummy object.
- """
- # Add the dummy output for actor tasks. TODO(swang): We use
- # a numpy array as a hack to pin the object in the object
- # store. Once we allow object pinning in the store, we may
- # use `None`.
- dummy_object = np.zeros(1)
- worker.put_object(dummy_object_id, dummy_object)
- # Keep the dummy output in scope for the lifetime of the
- # actor, to prevent eviction from the object store.
- dummy_object = worker.get_object([dummy_object_id])
- dummy_object = dummy_object[0]
- worker.actor_pinned_objects.append(dummy_object)
- if (len(worker.actor_pinned_objects) >
- ray._config.actor_max_dummy_objects()):
- worker.actor_pinned_objects.pop(0)
-
-
def make_actor_method_executor(worker, method_name, method):
"""Make an executor that wraps a user-defined actor method.
@@ -168,11 +136,10 @@ def actor_method_executor(dummy_return_id, task_counter, actor,
if method_name == "__ray_checkpoint__":
# Execute the checkpoint task.
actor_checkpoint_failed, error = method(actor, *args)
- # If the checkpoint was successfully loaded, put the dummy object
- # and update the actor's task counter, so that the task following
- # the checkpoint can run.
+ # If the checkpoint was successfully loaded, update the actor's
+ # task counter and set a flag to notify the local scheduler, so
+ # that the task following the checkpoint can run.
if not actor_checkpoint_failed:
- put_dummy_object(worker, dummy_return_id)
worker.actor_task_counter = task_counter + 1
# Once the actor has resumed from a checkpoint, it counts as
# loaded.
@@ -188,7 +155,6 @@ def actor_method_executor(dummy_return_id, task_counter, actor,
else:
# Update the worker's internal state before executing the method in
# case the method throws an exception.
- put_dummy_object(worker, dummy_return_id)
worker.actor_task_counter = task_counter + 1
# Once the actor executes a task, it counts as loaded.
worker.actor_loaded = True
diff --git a/python/ray/worker.py b/python/ray/worker.py
--- a/python/ray/worker.py
+++ b/python/ray/worker.py
@@ -230,10 +230,6 @@ def __init__(self):
# task assigned. Workers are not assigned a task on startup, so we
# initialize to False.
self.actor_checkpoint_failed = False
- # TODO(swang): This is a hack to prevent the object store from evicting
- # dummy objects. Once we allow object pinning in the store, we may
- # remove this variable.
- self.actor_pinned_objects = None
# The number of threads Plasma should use when putting an object in the
# object store.
self.memcopy_threads = 12
@@ -1920,9 +1916,6 @@ def connect(info, object_id_seed=None, mode=WORKER_MODE, worker=global_worker,
actor_key = b"Actor:" + worker.actor_id
class_id = worker.redis_client.hget(actor_key, "class_id")
worker.class_id = class_id
- # Store a list of the dummy outputs produced by actor tasks, to pin the
- # dummy outputs in the object store.
- worker.actor_pinned_objects = []
# Initialize the serialization library. This registers some classes, and so
# it must be run before we export all of the cached remote functions.
| Submitting a large number of actor tasks in a loop gives reconstruction error.
I'm seeing this issue on Ubuntu 16.04 with Python 3.6.2 (Anaconda) and the current Ray master (building from source).
I'm running the following.
```python
import ray
ray.worker._init(start_ray_local=True, object_store_memory=100000000)
@ray.remote
class Foo():
def m(self):
return 1
actors = [Foo.remote() for _ in range(30)]
i = 0
while True:
print(i)
i += 1
ids = []
for _ in range(50):
ids += [a.m.remote() for a in actors]
ray.get(ids)
```
The relevant output is
```
0
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
There is not enough space to create this object, so evicting 32259 objects to free up 20000580 bytes.
31
32
33
34
35
There is not enough space to create this object, so evicting 20708 objects to free up 12839980 bytes.
[INFO] (/home/ubuntu/ray/src/local_scheduler/local_scheduler_algorithm.cc:443) A task that has already been executed has been resubmitted, so we are ignoring it. This should only happen during reconstruction.
[INFO] (/home/ubuntu/ray/src/local_scheduler/local_scheduler_algorithm.cc:443) A task that has already been executed has been resubmitted, so we are ignoring it. This should only happen during reconstruction.
[INFO] (/home/ubuntu/ray/src/local_scheduler/local_scheduler_algorithm.cc:443) A task that has already been executed has been resubmitted, so we are ignoring it. This should only happen during reconstruction.
[INFO] (/home/ubuntu/ray/src/local_scheduler/local_scheduler_algorithm.cc:443) A task that has already been executed has been resubmitted, so we are ignoring it. This should only happen during reconstruction.
```
The message repeats a large number of times very quickly and then the script hangs.
| The error goes away if I don't save dummy objects on the workers. I'm worried this may be the cause of a memory leak that I'm seeing (e.g., the dummy objects just accumulate forever).
Do we need to pin *all* of the dummy objects?
@stephanie-wang
For now, we will have to pin all the objects for forked actor handles.
The plan for the future is to remove the dummy objects entirely from the object store. Only the local scheduler hosting the actor process needs to know about them, so we can have the actor process mock the object notifications that would normally come from the object store. | 2018-01-25T00:53:42 |
|
ray-project/ray | 1,471 | ray-project__ray-1471 | [
"1470"
] | 173f1d629a1503097c81d53fe7076bff56315e5a | diff --git a/python/ray/rllib/models/preprocessors.py b/python/ray/rllib/models/preprocessors.py
--- a/python/ray/rllib/models/preprocessors.py
+++ b/python/ray/rllib/models/preprocessors.py
@@ -78,7 +78,6 @@ def transform(self, observation):
class OneHotPreprocessor(Preprocessor):
def _init(self):
- assert self._obs_space.shape == ()
self.shape = (self._obs_space.n,)
def transform(self, observation):
| Travis test failures in test_catalog.py.
The Travis builds all seem to be failing in `test_catalog.py`.
I can reproduce some failures locally with `gym` version `0.9.5`.
Gym pushed a new version today, so that may be the issue https://pypi.python.org/pypi/gym.
For example,
```
$ python -m pytest python/ray/rllib/test/test_catalog.py
[1m============================= test session starts ==============================[0m
platform linux2 -- Python 2.7.14, pytest-3.3.2, py-1.5.2, pluggy-0.6.0
rootdir: /home/travis/build/robertnishihara/ray-private-travis/python, inifile:
[1m
collecting 0 items [0m[1m
collecting 5 items [0m[1m
collecting 5 items [0m[1m
collected 5 items [0m
python/ray/rllib/test/test_catalog.py ...FF[36m [100%][0m
=================================== FAILURES ===================================
[1m[31m____________________ ModelCatalogTest.testGymPreprocessors _____________________[0m
self = <ray.rllib.test.test_catalog.ModelCatalogTest testMethod=testGymPreprocessors>
[1m def testGymPreprocessors(self):[0m
[1m p1 = ModelCatalog.get_preprocessor([0m
[1m get_registry(), gym.make("CartPole-v0"))[0m
[1m self.assertEqual(type(p1), NoPreprocessor)[0m
[1m [0m
[1m p2 = ModelCatalog.get_preprocessor([0m
[1m> get_registry(), gym.make("FrozenLake-v0"))[0m
[1m[31mpython/ray/rllib/test/test_catalog.py[0m:41:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[1m[31mpython/ray/rllib/models/catalog.py[0m:215: in get_preprocessor
[1m return preprocessor(env.observation_space, options)[0m
[1m[31mpython/ray/rllib/models/preprocessors.py[0m:23: in __init__
[1m self._init()[0m
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <ray.rllib.models.preprocessors.OneHotPreprocessor object at 0x7fad2df67dd0>
[1m def _init(self):[0m
[1m> assert self._obs_space.shape == ()[0m
[1m[31mE AssertionError[0m
[1m[31mpython/ray/rllib/models/preprocessors.py[0m:81: AssertionError
----------------------------- Captured stdout call -----------------------------
Observation shape is (4,)
Not using any observation preprocessor.
Observation shape is (16,)
Using one-hot preprocessor for discrete envs.
----------------------------- Captured stderr call -----------------------------
[2018-01-25 07:26:43,537] Making new env: CartPole-v0
[2018-01-25 07:26:43,540] Making new env: FrozenLake-v0
------------------------------ Captured log call -------------------------------
registration.py 120 INFO Making new env: CartPole-v0
registration.py 120 INFO Making new env: FrozenLake-v0
[1m[31m____________________ ModelCatalogTest.testTuplePreprocessor ____________________[0m
self = <ray.rllib.test.test_catalog.ModelCatalogTest testMethod=testTuplePreprocessor>
[1m def testTuplePreprocessor(self):[0m
[1m ray.init()[0m
[1m [0m
[1m class TupleEnv(object):[0m
[1m def __init__(self):[0m
[1m self.observation_space = Tuple([0m
[1m [Discrete(5), Box(0, 1, shape=(3,))])[0m
[1m p1 = ModelCatalog.get_preprocessor([0m
[1m> get_registry(), TupleEnv())[0m
[1m[31mpython/ray/rllib/test/test_catalog.py[0m:52:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[1m[31mpython/ray/rllib/models/catalog.py[0m:215: in get_preprocessor
[1m return preprocessor(env.observation_space, options)[0m
[1m[31mpython/ray/rllib/models/preprocessors.py[0m:23: in __init__
[1m self._init()[0m
[1m[31mpython/ray/rllib/models/preprocessors.py[0m:112: in _init
[1m preprocessor = get_preprocessor(space)(space, self._options)[0m
[1m[31mpython/ray/rllib/models/preprocessors.py[0m:23: in __init__
[1m self._init()[0m
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <ray.rllib.models.preprocessors.OneHotPreprocessor object at 0x7fad4ff234d0>
[1m def _init(self):[0m
[1m> assert self._obs_space.shape == ()[0m
[1m[31mE AssertionError[0m
[1m[31mpython/ray/rllib/models/preprocessors.py[0m:81: AssertionError
----------------------------- Captured stdout call -----------------------------
Waiting for redis server at 127.0.0.1:44545 to respond...
Waiting for redis server at 127.0.0.1:60007 to respond...
Starting local scheduler with the following resources: {'GPU': 0, 'CPU': 2}.
Failed to start the UI, you may need to run 'pip install jupyter'.
Observation shape is ((5,), (3,))
Using a TupleFlatteningPreprocessor
Creating sub-preprocessor for Discrete(5)
Observation shape is (5,)
Using one-hot preprocessor for discrete envs.
----------------------------- Captured stderr call -----------------------------
Allowing the Plasma store to use up to 3.13728GB of memory.
Starting object store with directory /dev/shm and huge page support disabled
Disconnecting client on fd 22
[INFO] (/home/travis/build/robertnishihara/ray-private-travis/src/local_scheduler/local_scheduler.cc:171) Killed worker pid 14098 which hadn't started yet.
[INFO] (/home/travis/build/robertnishihara/ray-private-travis/src/local_scheduler/local_scheduler.cc:171) Killed worker pid 14099 which hadn't started yet.
Disconnecting client on fd 20
Disconnecting client on fd 18
[1m[31m====================== 2 failed, 3 passed in 7.09 seconds ======================[0m
travis_time:end:224e60d5:start=1516865197573618638,finish=1516865205120814512,duration=7547195874
[0K
[31;1mThe command "python -m pytest python/ray/rllib/test/test_catalog.py" exited with 1.[0m
```
| I just confirmed that with gym `0.9.4`, `test_catalog.py` passes. | 2018-01-25T07:50:31 |
|
ray-project/ray | 1,499 | ray-project__ray-1499 | [
"1480"
] | 7550b628bfb294576adedfd0f6650ad68e6dcd2f | diff --git a/python/ray/services.py b/python/ray/services.py
--- a/python/ray/services.py
+++ b/python/ray/services.py
@@ -709,9 +709,25 @@ def start_local_scheduler(redis_address,
# By default, use the number of hardware execution threads for the
# number of cores.
resources["CPU"] = psutil.cpu_count()
+
+ # See if CUDA_VISIBLE_DEVICES has already been set.
+ gpu_ids = ray.utils.get_cuda_visible_devices()
+
+ # Check that the number of GPUs that the local scheduler wants doesn't
+ # excede the amount allowed by CUDA_VISIBLE_DEVICES.
+ if ("GPU" in resources and gpu_ids is not None and
+ resources["GPU"] > len(gpu_ids)):
+ raise Exception("Attempting to start local scheduler with {} GPUs, "
+ "but CUDA_VISIBLE_DEVICES contains {}.".format(
+ resources["GPU"], gpu_ids))
+
if "GPU" not in resources:
# Try to automatically detect the number of GPUs.
resources["GPU"] = _autodetect_num_gpus()
+ # Don't use more GPUs than allowed by CUDA_VISIBLE_DEVICES.
+ if gpu_ids is not None:
+ resources["GPU"] = min(resources["GPU"], len(gpu_ids))
+
print("Starting local scheduler with the following resources: {}."
.format(resources))
local_scheduler_name, p = ray.local_scheduler.start_local_scheduler(
diff --git a/python/ray/utils.py b/python/ray/utils.py
--- a/python/ray/utils.py
+++ b/python/ray/utils.py
@@ -6,6 +6,7 @@
import collections
import json
import numpy as np
+import os
import redis
import sys
@@ -114,6 +115,33 @@ def hex_to_binary(hex_identifier):
"""FunctionProperties: A named tuple storing remote functions information."""
+def get_cuda_visible_devices():
+ """Get the device IDs in the CUDA_VISIBLE_DEVICES environment variable.
+
+ Returns:
+ if CUDA_VISIBLE_DEVICES is set, this returns a list of integers with
+ the IDs of the GPUs. If it is not set, this returns None.
+ """
+ gpu_ids_str = os.environ.get("CUDA_VISIBLE_DEVICES", None)
+
+ if gpu_ids_str is None:
+ return None
+
+ if gpu_ids_str == "":
+ return []
+
+ return [int(i) for i in gpu_ids_str.split(",")]
+
+
+def set_cuda_visible_devices(gpu_ids):
+ """Set the CUDA_VISIBLE_DEVICES environment variable.
+
+ Args:
+ gpu_ids: This is a list of integers representing GPU IDs.
+ """
+ os.environ["CUDA_VISIBLE_DEVICES"] = ",".join([str(i) for i in gpu_ids])
+
+
def attempt_to_reserve_gpus(num_gpus, driver_id, local_scheduler,
redis_client):
"""Attempt to acquire GPUs on a particular local scheduler for an actor.
diff --git a/python/ray/worker.py b/python/ray/worker.py
--- a/python/ray/worker.py
+++ b/python/ray/worker.py
@@ -233,6 +233,9 @@ def __init__(self):
# The number of threads Plasma should use when putting an object in the
# object store.
self.memcopy_threads = 12
+ # When the worker is constructed. Record the original value of the
+ # CUDA_VISIBLE_DEVICES environment variable.
+ self.original_gpu_ids = ray.utils.get_cuda_visible_devices()
def set_mode(self, mode):
"""Set the mode of the worker.
@@ -868,8 +871,7 @@ def _get_next_task_from_local_scheduler(self):
self.actor_checkpoint_failed = False
# Automatically restrict the GPUs available to this task.
- os.environ["CUDA_VISIBLE_DEVICES"] = ",".join(
- [str(i) for i in ray.get_gpu_ids()])
+ ray.utils.set_cuda_visible_devices(ray.get_gpu_ids())
return task
@@ -889,15 +891,29 @@ def exit(signum, frame):
def get_gpu_ids():
- """Get the IDs of the GPU that are available to the worker.
+ """Get the IDs of the GPUs that are available to the worker.
- Each ID is an integer in the range [0, NUM_GPUS - 1], where NUM_GPUS is the
- number of GPUs that the node has.
+ If the CUDA_VISIBLE_DEVICES environment variable was set when the worker
+ started up, then the IDs returned by this method will be a subset of the
+ IDs in CUDA_VISIBLE_DEVICES. If not, the IDs will fall in the range
+ [0, NUM_GPUS - 1], where NUM_GPUS is the number of GPUs that the node has.
+
+ Returns:
+ A list of GPU IDs.
"""
if _mode() == PYTHON_MODE:
raise Exception("ray.get_gpu_ids() currently does not work in PYTHON "
"MODE.")
- return global_worker.local_scheduler_client.gpu_ids()
+
+ assigned_ids = global_worker.local_scheduler_client.gpu_ids()
+ # If the user had already set CUDA_VISIBLE_DEVICES, then respect that (in
+ # the sense that only GPU IDs that appear in CUDA_VISIBLE_DEVICES should be
+ # returned).
+ if global_worker.original_gpu_ids is not None:
+ assigned_ids = [global_worker.original_gpu_ids[gpu_id]
+ for gpu_id in assigned_ids]
+
+ return assigned_ids
def _webui_url_helper(client):
| diff --git a/test/runtest.py b/test/runtest.py
--- a/test/runtest.py
+++ b/test/runtest.py
@@ -1312,7 +1312,8 @@ def f():
self.assertGreater(t2 - t1, 0.09)
list_of_ids = ray.get(ready)
all_ids = [gpu_id for gpu_ids in list_of_ids for gpu_id in gpu_ids]
- self.assertEqual(set(all_ids), set(range(10)))
+ # Commenting out the below assert because it seems to fail a lot.
+ # self.assertEqual(set(all_ids), set(range(10)))
# Test that actors have CUDA_VISIBLE_DEVICES set properly.
@@ -1587,6 +1588,44 @@ def f():
ray.get(results)
+class CudaVisibleDevicesTest(unittest.TestCase):
+ def setUp(self):
+ # Record the curent value of this environment variable so that we can
+ # reset it after the test.
+ self.original_gpu_ids = os.environ.get(
+ "CUDA_VISIBLE_DEVICES", None)
+
+ def tearDown(self):
+ ray.worker.cleanup()
+ # Reset the environment variable.
+ if self.original_gpu_ids is not None:
+ os.environ["CUDA_VISIBLE_DEVICES"] = self.original_gpu_ids
+ else:
+ del os.environ["CUDA_VISIBLE_DEVICES"]
+
+ def testSpecificGPUs(self):
+ allowed_gpu_ids = [4, 5, 6]
+ os.environ["CUDA_VISIBLE_DEVICES"] = ",".join(
+ [str(i) for i in allowed_gpu_ids])
+ ray.init(num_gpus=3)
+
+ @ray.remote(num_gpus=1)
+ def f():
+ gpu_ids = ray.get_gpu_ids()
+ assert len(gpu_ids) == 1
+ assert gpu_ids[0] in allowed_gpu_ids
+
+ @ray.remote(num_gpus=2)
+ def g():
+ gpu_ids = ray.get_gpu_ids()
+ assert len(gpu_ids) == 2
+ assert gpu_ids[0] in allowed_gpu_ids
+ assert gpu_ids[1] in allowed_gpu_ids
+
+ ray.get([f.remote() for _ in range(100)])
+ ray.get([g.remote() for _ in range(100)])
+
+
class WorkerPoolTests(unittest.TestCase):
def tearDown(self):
ray.worker.cleanup()
| ray.get_gpu_ids() should respect CUDA_VISIBLE_DEVICES.
Suppose someone sets `CUDA_VISIBLE_DEVICES=4,5,6` and then starts Ray on a machine with `num_gpus=3`. Right now, `ray.get_gpu_ids()` will return values in `[0, 1, 2]`. Instead, it should return values in `[4, 5, 6]`.
Two more cases:
1. If `num_gpus = 4` and `CUDA_VISIBLE_DEVICES=1,2,3`, then raise an exception.
2. If `num_gpus = 2` and `CUDA_VISIBLE_DEVICES=1,2,3`, then either just use devices 1 and 2 or allow the use of all 3 (but at most 2 at a time).
cc @fyu
| 2018-02-01T02:32:01 |
|
ray-project/ray | 1,523 | ray-project__ray-1523 | [
"1504"
] | ff8e7f82598a7e8a8791f961d3852f22d054b769 | diff --git a/python/ray/rllib/examples/multiagent_mountaincar_env.py b/python/ray/rllib/examples/multiagent_mountaincar_env.py
--- a/python/ray/rllib/examples/multiagent_mountaincar_env.py
+++ b/python/ray/rllib/examples/multiagent_mountaincar_env.py
@@ -22,8 +22,8 @@ def __init__(self):
self.viewer = None
self.action_space = [Discrete(3) for _ in range(2)]
- self.observation_space = Tuple(tuple(Box(self.low, self.high)
- for _ in range(2)))
+ self.observation_space = Tuple([
+ Box(self.low, self.high) for _ in range(2)])
self._seed()
self.reset()
diff --git a/python/ray/rllib/examples/multiagent_pendulum_env.py b/python/ray/rllib/examples/multiagent_pendulum_env.py
--- a/python/ray/rllib/examples/multiagent_pendulum_env.py
+++ b/python/ray/rllib/examples/multiagent_pendulum_env.py
@@ -24,8 +24,8 @@ def __init__(self):
self.action_space = [Box(low=-self.max_torque / 2,
high=self.max_torque / 2, shape=(1,))
for _ in range(2)]
- self.observation_space = Tuple(tuple(Box(low=-high, high=high)
- for _ in range(2)))
+ self.observation_space = Tuple([
+ Box(low=-high, high=high) for _ in range(2)])
self._seed()
| [rllib] [docs] Document multi-agent support
We should document the new multi-agent support in rllib and have some examples in readthedocs. It would be good to cover the supported cases and which ones are not yet supported (or provide workarounds).
| 2018-02-07T21:01:12 |
||
ray-project/ray | 1,545 | ray-project__ray-1545 | [
"1525"
] | fd03fb967f50c80ca31c50be541d7b98dffd9b1a | diff --git a/python/ray/dataframe/dataframe.py b/python/ray/dataframe/dataframe.py
--- a/python/ray/dataframe/dataframe.py
+++ b/python/ray/dataframe/dataframe.py
@@ -7,10 +7,12 @@
import ray
import itertools
+from .index import Index
+
class DataFrame(object):
- def __init__(self, df, columns):
+ def __init__(self, df, columns, index=None):
"""Distributed DataFrame object backed by Pandas dataframes.
Args:
@@ -22,29 +24,53 @@ def __init__(self, df, columns):
assert(len(df) > 0)
self._df = df
+ # TODO: Clean up later.
+ # We will call get only when we access the object (and only once).
+ self._lengths = \
+ ray.get([_deploy_func.remote(_get_lengths, d) for d in self._df])
self.columns = columns
+ if index is None:
+ self._index = self._default_index()
+ else:
+ self._index = index
+
+ self._pd_index = None
+
def __str__(self):
return "ray.DataFrame object"
def __repr__(self):
return "ray.DataFrame object"
- @property
- def index(self):
+ def _get_index(self):
"""Get the index for this DataFrame.
Returns:
The union of all indexes across the partitions.
"""
- indices = ray.get(self._map_partitions(lambda df: df.index)._df)
- if isinstance(indices[0], pd.RangeIndex):
- merged = indices[0]
- for index in indices[1:]:
- merged = merged.union(index)
- return merged
- else:
- return indices[0].append(indices[1:])
+ if self._pd_index is None:
+ self._pd_index = Index.to_pandas(self._index)
+
+ return self._pd_index
+
+ def _set_index(self, new_index):
+ """Set the index for this DataFrame.
+
+ Args:
+ new_index: The new index to set this
+ """
+ self._pd_index = None
+ self._index = Index.from_pandas(new_index, self._lengths)
+
+ def _default_index(self):
+ dest_indices = [(i, j)
+ for i in range(len(self._lengths))
+ for j in range(self._lengths[i])]
+ return Index({i: dest_indices[i] for i in range(len(dest_indices))},
+ pd.RangeIndex)
+
+ index = property(_get_index, _set_index)
@property
def size(self):
@@ -140,7 +166,7 @@ def _map_partitions(self, func, *args):
assert(callable(func))
new_df = [_deploy_func.remote(func, part) for part in self._df]
- return DataFrame(new_df, self.columns)
+ return DataFrame(new_df, self.columns, index=self._index)
def add_prefix(self, prefix):
"""Add a prefix to each of the column names.
@@ -150,7 +176,7 @@ def add_prefix(self, prefix):
"""
new_dfs = self._map_partitions(lambda df: df.add_prefix(prefix))
new_cols = self.columns.map(lambda x: str(prefix) + str(x))
- return DataFrame(new_dfs._df, new_cols)
+ return DataFrame(new_dfs._df, new_cols, index=self._index)
def add_suffix(self, suffix):
"""Add a suffix to each of the column names.
@@ -160,7 +186,7 @@ def add_suffix(self, suffix):
"""
new_dfs = self._map_partitions(lambda df: df.add_suffix(suffix))
new_cols = self.columns.map(lambda x: str(x) + str(suffix))
- return DataFrame(new_dfs._df, new_cols)
+ return DataFrame(new_dfs._df, new_cols, index=self._index)
def applymap(self, func):
"""Apply a function to a DataFrame elementwise.
@@ -177,7 +203,7 @@ def copy(self, deep=True):
Returns:
A new DataFrame pointing to the same partitions as this one.
"""
- return DataFrame(self._df, self.columns)
+ return DataFrame(self._df, self.columns, index=self._index)
def groupby(self, by=None, axis=0, level=None, as_index=True, sort=True,
group_keys=True, squeeze=False, **kwargs):
@@ -199,11 +225,8 @@ def groupby(self, by=None, axis=0, level=None, as_index=True, sort=True,
[index for df in ray.get(self._df) for index in list(df.index)]))
chunksize = int(len(indices) / len(self._df))
- partitions = []
-
- for df in self._df:
- partitions.append(_shuffle.remote(df, indices, chunksize))
-
+ partitions = [_shuffle.remote(df, indices, chunksize)
+ for df in self._df]
partitions = ray.get(partitions)
# Transpose the list of dataframes
@@ -213,7 +236,6 @@ def groupby(self, by=None, axis=0, level=None, as_index=True, sort=True,
shuffle.append([])
for j in range(len(partitions)):
shuffle[i].append(partitions[j][i])
-
new_dfs = [_local_groupby.remote(part, axis=axis) for part in shuffle]
return DataFrame(new_dfs, self.columns)
@@ -311,8 +333,10 @@ def transpose(self, *args, **kwargs):
"""
local_transpose = self._map_partitions(
lambda df: df.transpose(*args, **kwargs))
+
# Sum will collapse the NAs from the groupby
- return local_transpose.reduce_by_index(lambda df: df.sum(), axis=1)
+ return local_transpose.reduce_by_index(
+ lambda df: df.apply(lambda x: x), axis=1)
T = property(transpose)
@@ -1502,6 +1526,24 @@ def iloc(axis=None):
raise NotImplementedError("Not Yet implemented.")
+def _get_lengths(df):
+ """Gets the length of the dataframe.
+
+ Args:
+ df: A remote pd.DataFrame object.
+
+ Returns:
+ Returns an integer length of the dataframe object. If the attempt
+ fails, returns 0 as the length.
+ """
+ try:
+ return len(df)
+ # Because we sometimes have cases where we have summary statistics in our
+ # DataFrames
+ except TypeError:
+ return 0
+
+
@ray.remote
def _shuffle(df, indices, chunksize):
"""Shuffle data by sending it through the Ray Store.
@@ -1518,12 +1560,12 @@ def _shuffle(df, indices, chunksize):
i = 0
partition = []
while len(indices) > chunksize:
- oids = df.reindex(indices[:chunksize]).dropna()
+ oids = df.reindex(indices[:chunksize])
partition.append(oids)
indices = indices[chunksize:]
i += 1
else:
- oids = df.reindex(indices).dropna()
+ oids = df.reindex(indices)
partition.append(oids)
return partition
@@ -1581,16 +1623,27 @@ def from_pandas(df, npartitions=None, chunksize=None, sort=True):
elif chunksize is None:
raise ValueError("The number of partitions or chunksize must be set.")
+ old_index = df.index
+
# TODO stop reassigning df
dataframes = []
+ lengths = []
while len(df) > chunksize:
- top = ray.put(df[:chunksize])
+ t_df = df[:chunksize]
+ lengths.append(len(t_df))
+ # reindex here because we want a pd.RangeIndex within the partitions.
+ # It is smaller and sometimes faster.
+ t_df.reindex()
+ top = ray.put(t_df)
dataframes.append(top)
df = df[chunksize:]
else:
dataframes.append(ray.put(df))
+ lengths.append(len(df))
+
+ ray_index = Index.from_pandas(old_index, lengths)
- return DataFrame(dataframes, df.columns)
+ return DataFrame(dataframes, df.columns, index=ray_index)
def to_pandas(df):
diff --git a/python/ray/dataframe/index.py b/python/ray/dataframe/index.py
--- a/python/ray/dataframe/index.py
+++ b/python/ray/dataframe/index.py
@@ -7,15 +7,50 @@
class Index(object):
- def __init__(self, idx):
+ def __init__(self, idx, pandas_type):
self.idx = idx
+ self.pandas_type = pandas_type
+
+ def __getitem__(self, item):
+ return self.idx[item]
+
+ def __len__(self):
+ return len(self.idx)
@classmethod
- def to_pandas(indices):
- if isinstance(indices[0], pd.RangeIndex):
- merged = indices[0]
- for index in indices[1:]:
- merged = merged.union(index)
- return merged
+ def to_pandas(cls, index):
+ """Convert a Ray Index object to a Pandas Index object.
+
+ Args:
+ index (ray.Index): A Ray Index object.
+
+ Returns:
+ A pandas Index object.
+ """
+ k = index.idx.keys()
+ if index.pandas_type is pd.RangeIndex:
+ return pd.RangeIndex(min(k), max(k) + 1)
else:
- return indices[0].append(indices[1:])
+ return pd.Index(k)
+
+ @classmethod
+ def from_pandas(cls, pd_index, lengths):
+ """Convert a Pandas Index object to a Ray Index object.
+
+ Args:
+ pd_index (pd.Index): A Pandas Index object.
+ lengths ([int]): A list of lengths for the partitions.
+
+ Returns:
+ A Ray Index object.
+ """
+ dest_indices = [(i, j)
+ for i in range(len(lengths))
+ for j in range(lengths[i])]
+ if len(pd_index) != len(dest_indices):
+ raise ValueError(
+ "Length of index given does not match current dataframe")
+
+ return Index(
+ {pd_index[i]: dest_indices[i] for i in range(len(dest_indices))},
+ type(pd_index))
| diff --git a/python/ray/dataframe/test/test_dataframe.py b/python/ray/dataframe/test/test_dataframe.py
--- a/python/ray/dataframe/test/test_dataframe.py
+++ b/python/ray/dataframe/test/test_dataframe.py
@@ -22,6 +22,12 @@ def test_roundtrip(ray_df, pandas_df):
@pytest.fixture
def test_index(ray_df, pandas_df):
assert(ray_df.index.equals(pandas_df.index))
+ ray_df_cp = ray_df.copy()
+ pandas_df_cp = pandas_df.copy()
+
+ ray_df_cp.index = [str(i) for i in ray_df_cp.index]
+ pandas_df_cp.index = [str(i) for i in pandas_df_cp.index]
+ assert(ray_df_cp.index.sort_values().equals(pandas_df_cp.index))
@pytest.fixture
@@ -41,10 +47,7 @@ def test_ftypes(ray_df, pandas_df):
@pytest.fixture
def test_values(ray_df, pandas_df):
- a = np.ndarray.flatten(ray_df.values)
- b = np.ndarray.flatten(pandas_df.values)
- for c, d in zip(a, b):
- assert(c == d or (np.isnan(c) and np.isnan(d)))
+ np.testing.assert_equal(ray_df.values, pandas_df.values)
@pytest.fixture
@@ -339,6 +342,51 @@ def test_mixed_dtype_dataframe():
test_notnull(ray_df, pandas_df)
+def test_nan_dataframe():
+ pandas_df = pd.DataFrame({
+ 'col1': [1, 2, 3, np.nan],
+ 'col2': [4, 5, np.nan, 7],
+ 'col3': [8, np.nan, 10, 11],
+ 'col4': [np.nan, 13, 14, 15]})
+
+ ray_df = rdf.from_pandas(pandas_df, 2)
+
+ testfuncs = [lambda x: x + x,
+ lambda x: str(x),
+ lambda x: x,
+ lambda x: False]
+
+ keys = ['col1',
+ 'col2',
+ 'col3',
+ 'col4']
+
+ test_roundtrip(ray_df, pandas_df)
+ test_index(ray_df, pandas_df)
+ test_size(ray_df, pandas_df)
+ test_ndim(ray_df, pandas_df)
+ test_ftypes(ray_df, pandas_df)
+ test_values(ray_df, pandas_df)
+ test_axes(ray_df, pandas_df)
+ test_shape(ray_df, pandas_df)
+ test_add_prefix(ray_df, pandas_df)
+ test_add_suffix(ray_df, pandas_df)
+
+ for testfunc in testfuncs:
+ test_applymap(ray_df, pandas_df, testfunc)
+
+ test_copy(ray_df)
+ test_sum(ray_df, pandas_df)
+ test_keys(ray_df, pandas_df)
+ test_transpose(ray_df, pandas_df)
+
+ for key in keys:
+ test_get(ray_df, pandas_df, key)
+
+ test_get_dtype_counts(ray_df, pandas_df)
+ test_get_ftype_counts(ray_df, pandas_df)
+
+
def test_add():
ray_df = create_test_dataframe()
| [DataFrame] ray.transpose() not working with NaN values
<!--
General questions should be asked on the mailing list [email protected].
Before submitting an issue, please fill out the following form.
-->
### System information
- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: macOS 10.13.2
- **Ray installed from (source or binary)**: source
- **Ray version**: 0.3.0
- **Python version**: 3.5.2
- **Exact command to reproduce**: ray_df.transpose()
<!--
You can obtain the Ray version with
python -c "import ray; print(ray.__version__)"
-->
### Describe the problem
@devin-petersohn
ray_df.T doesn't work with np.NaN values. It drops all values (even if not NaN) and returns an empty dataframe.
### Source code / logs
Test DataFrames:
<img width="584" alt="screen shot 2018-02-07 at 4 48 56 pm" src="https://user-images.githubusercontent.com/8093660/35949527-d3a00d76-0c26-11e8-942e-15da64bca90c.png">
Code for tests:
<img width="584" alt="screen shot 2018-02-07 at 4 48 56 pm" src="https://user-images.githubusercontent.com/8093660/35949592-32cb3f46-0c27-11e8-9eae-fd16834b29b0.png">
Output for test_transpose (the first is ray_df.T, the second is pandas_df.T):
<img width="854" alt="screen shot 2018-02-07 at 4 50 00 pm" src="https://user-images.githubusercontent.com/8093660/35949559-f9d839dc-0c26-11e8-8aee-58fb7d9e46b4.png">
| 2018-02-15T00:17:18 |
|
ray-project/ray | 1,662 | ray-project__ray-1662 | [
"1660"
] | 162d063f0dd68c3c85f39bb9a8b9c275cc226f8c | diff --git a/python/ray/dataframe/__init__.py b/python/ray/dataframe/__init__.py
--- a/python/ray/dataframe/__init__.py
+++ b/python/ray/dataframe/__init__.py
@@ -1,8 +1,18 @@
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
+
+import pandas as pd
import threading
+pd_version = pd.__version__
+pd_major = int(pd_version.split(".")[0])
+pd_minor = int(pd_version.split(".")[1])
+
+if pd_major == 0 and pd_minor < 22:
+ raise Exception("In order to use Pandas on Ray, please upgrade your Pandas"
+ " version to >= 0.22.")
+
DEFAULT_NPARTITIONS = 4
| [DataFrame] Error checking on Pandas version
We need better reporting for issues with the Pandas version on a user's system.
| How about in `dataframe.py`, we add a line like
```python
# Pandas must be at least version 0.22.0.
pd_major = int(pd.__version__.split(".")[0])
pd_minor = int(pd.__version__.split(".")[1])
if (pd_major == 0) and (pd_minor < 22):
raise Exception("Your Pandas version is less than 0.22.0. Please "
"update Pandas to the latest version.")
```
I was thinking it belongs in `dataframe/__init__.py` before we start the Ray cluster.
That's probably better. | 2018-03-06T00:54:20 |
|
ray-project/ray | 1,666 | ray-project__ray-1666 | [
"1579"
] | 4af42d5bb6ab3fef7dad80c722249218bb9cc061 | diff --git a/python/ray/actor.py b/python/ray/actor.py
--- a/python/ray/actor.py
+++ b/python/ray/actor.py
@@ -706,6 +706,10 @@ class ActorHandle(actor_handle_class):
@classmethod
def remote(cls, *args, **kwargs):
+ if ray.worker.global_worker.mode is None:
+ raise Exception("Actors cannot be created before ray.init() "
+ "has been called.")
+
actor_id = random_actor_id()
# The ID for this instance of ActorHandle. These should be unique
# across instances with the same _ray_actor_id.
| "This should be unreachable" error message hard to interpret
I've seen this when forgetting to call `ray.init()`, maybe it should say `..Unreachable...Did you call ray.init()?`
```
return func(*args, **kwargs)
File "/home/ubuntu/Dropbox/git0/tensorpack/tensorpack/train/base.py", line 253, in main_loop
self.run_step() # implemented by subclass
File "resnet_numpy_concat_ray_test.py", line 184, in run_step
ps = ParameterServer.remote(grad_values_flat.size)
File "/home/ubuntu/anaconda3/envs/mxnet_p36/lib/python3.6/site-packages/ray/actor.py", line 761, in remote
ray.worker.global_worker)
File "/home/ubuntu/anaconda3/envs/mxnet_p36/lib/python3.6/site-packages/ray/actor.py", line 387, in export_actor_class
assert False, "This should be unreachable."
```
| 2018-03-07T04:20:09 |
||
ray-project/ray | 1,693 | ray-project__ray-1693 | [
"1691"
] | 40799fee37d5a9a1df4c52aaec59e5129f10494f | diff --git a/python/ray/dataframe/dataframe.py b/python/ray/dataframe/dataframe.py
--- a/python/ray/dataframe/dataframe.py
+++ b/python/ray/dataframe/dataframe.py
@@ -741,8 +741,8 @@ def drop(self, labels=None, axis=0, index=None, columns=None, level=None,
Args:
labels: Index or column labels to drop.
- axis: Whether to drop labels from the index (0 / โindexโ) or
- columns (1 / โcolumnsโ).
+ axis: Whether to drop labels from the index (0 / 'index') or
+ columns (1 / 'columns').
index, columns: Alternative to specifying axis (labels, axis=1 is
equivalent to columns=labels).
@@ -751,7 +751,7 @@ def drop(self, labels=None, axis=0, index=None, columns=None, level=None,
inplace: If True, do operation inplace and return None.
- errors: If โignoreโ, suppress error and existing labels are
+ errors: If 'ignore', suppress error and existing labels are
dropped.
Returns:
dropped : type of caller
@@ -972,7 +972,7 @@ def fillna(self, value=None, method=None, axis=None, inplace=False,
backfill.
bfill: use NEXT valid observation to fill gap.
- axis: 0 or โindexโ, 1 or โcolumnsโ.
+ axis: 0 or 'index', 1 or 'columns'.
inplace: If True, fill in place. Note: this will modify any other
views on this object.
@@ -986,7 +986,7 @@ def fillna(self, value=None, method=None, axis=None, inplace=False,
than 0 if not None.
downcast: A dict of item->dtype of what to downcast if possible,
- or the string โinferโ which will try to downcast to an
+ or the string 'infer' which will try to downcast to an
appropriate equal type.
Returns:
| SyntaxError: Non-ASCII character '\xe2' in file ray/dataframe/dataframe.py
Hi,
I built the ray from source in Ubuntu 16.04 (python 2.7) and tried to import ray.dataframe:
`import ray.dataframe as pd`
and received the following error:
```
SyntaxError: Non-ASCII character '\xe2' in file ray/dataframe/dataframe.py on line 745, but no encoding dttp://python.org/dev/peps/pep-0263/ for details
```
https://github.com/ray-project/ray/blob/2b747ba46cee4445c8ff1640a2a5d7c88e2d1723/python/ray/dataframe/dataframe.py#L744
Could you please change all the non-ASCII single-quote (โ) in the docstring to the standard quote (') in your repo? It works after I did that. Thanks!
| 2018-03-10T07:52:36 |
||
ray-project/ray | 1,744 | ray-project__ray-1744 | [
"1698"
] | 7b493aa4a1129ac01d7ced9e5cedf8b4a9355653 | diff --git a/python/ray/actor.py b/python/ray/actor.py
--- a/python/ray/actor.py
+++ b/python/ray/actor.py
@@ -12,18 +12,10 @@
import ray.local_scheduler
import ray.signature as signature
import ray.worker
-from ray.utils import (FunctionProperties, random_string, is_cython,
+from ray.utils import (FunctionProperties, _random_string, is_cython,
push_error_to_driver)
-def random_actor_id():
- return ray.local_scheduler.ObjectID(random_string())
-
-
-def random_actor_class_id():
- return random_string()
-
-
def compute_actor_handle_id(actor_handle_id, num_forks):
"""Deterministically comopute an actor handle ID.
@@ -750,7 +742,7 @@ def remote(cls, *args, **kwargs):
raise Exception("Actors cannot be created before ray.init() "
"has been called.")
- actor_id = random_actor_id()
+ actor_id = ray.local_scheduler.ObjectID(_random_string())
# The ID for this instance of ActorHandle. These should be unique
# across instances with the same _ray_actor_id.
actor_handle_id = ray.local_scheduler.ObjectID(
@@ -930,7 +922,7 @@ def __ray_checkpoint_restore__(self):
Class.__module__ = cls.__module__
Class.__name__ = cls.__name__
- class_id = random_actor_class_id()
+ class_id = _random_string()
return actor_handle_from_class(Class, class_id, resources,
checkpoint_interval, actor_method_cpus)
diff --git a/python/ray/utils.py b/python/ray/utils.py
--- a/python/ray/utils.py
+++ b/python/ray/utils.py
@@ -4,9 +4,11 @@
import binascii
import collections
+import hashlib
import numpy as np
import os
import sys
+import uuid
import ray.local_scheduler
@@ -15,7 +17,11 @@
def _random_string():
- return np.random.bytes(20)
+ id_hash = hashlib.sha1()
+ id_hash.update(uuid.uuid4().bytes)
+ id_bytes = id_hash.digest()
+ assert len(id_bytes) == 20
+ return id_bytes
def format_error_message(exception_message, task_exception=False):
| Speed up actor ID (and other random ID generation).
This function is really slow (on the order of 5-10ms). It is used in a bunch of places and should be replaced with something faster (e.g., a deterministic hash). This is an issue e.g., for actor creation.
https://github.com/ray-project/ray/blob/cae108d019a51bd7bd35df7f93b68ff07a84d9cd/python/ray/utils.py#L84-L107
| 2018-03-18T22:07:44 |
||
ray-project/ray | 1,760 | ray-project__ray-1760 | [
"1741"
] | 5c7ef34b054de9ca28328333189ccc788a7d121f | diff --git a/python/ray/worker.py b/python/ray/worker.py
--- a/python/ray/worker.py
+++ b/python/ray/worker.py
@@ -1174,7 +1174,10 @@ def get_address_info_from_redis_helper(redis_address, node_ip_address):
assert b"ray_client_id" in info
assert b"node_ip_address" in info
assert b"client_type" in info
- if info[b"node_ip_address"].decode("ascii") == node_ip_address:
+ client_node_ip_address = info[b"node_ip_address"].decode("ascii")
+ if (client_node_ip_address == node_ip_address or
+ (client_node_ip_address == "127.0.0.1" and
+ redis_ip_address == ray.services.get_node_ip_address())):
if info[b"client_type"].decode("ascii") == "plasma_manager":
plasma_managers.append(info)
elif info[b"client_type"].decode("ascii") == "local_scheduler":
| diff --git a/test/multi_node_test.py b/test/multi_node_test.py
--- a/test/multi_node_test.py
+++ b/test/multi_node_test.py
@@ -283,5 +283,24 @@ def f():
subprocess.Popen(["ray", "stop"]).wait()
+class MiscellaneousTest(unittest.TestCase):
+ def tearDown(self):
+ ray.worker.cleanup()
+
+ def testConnectingInLocalCase(self):
+ address_info = ray.init(num_cpus=0)
+
+ # Define a driver that just connects to Redis.
+ driver_script = """
+import ray
+ray.init(redis_address="{}")
+print("success")
+""".format(address_info["redis_address"])
+
+ out = run_string_as_driver(driver_script)
+ # Make sure the other driver succeeded.
+ self.assertIn("success", out)
+
+
if __name__ == "__main__":
unittest.main(verbosity=2)
| [webui] Connecting to webui doesn't work when Ray is started with `ray.init()`.
Running the webui gives me an error message like "Some processes on this node may not have started yet".
| 2018-03-21T05:21:07 |
|
ray-project/ray | 1,774 | ray-project__ray-1774 | [
"1773"
] | 8704c8618c8d0115c5b0f6393f78858ee5af8702 | diff --git a/python/ray/rllib/agent.py b/python/ray/rllib/agent.py
--- a/python/ray/rllib/agent.py
+++ b/python/ray/rllib/agent.py
@@ -220,7 +220,7 @@ def _train(self):
def get_agent_class(alg):
- """Returns the class of an known agent given its name."""
+ """Returns the class of a known agent given its name."""
if alg == "PPO":
from ray.rllib import ppo
diff --git a/python/ray/rllib/es/es.py b/python/ray/rllib/es/es.py
--- a/python/ray/rllib/es/es.py
+++ b/python/ray/rllib/es/es.py
@@ -12,14 +12,12 @@
import time
import ray
-from ray.rllib.agent import Agent
-from ray.rllib.models import ModelCatalog
+from ray.rllib import agent
from ray.rllib.es import optimizers
from ray.rllib.es import policies
from ray.rllib.es import tabular_logger as tlogger
from ray.rllib.es import utils
-from ray.tune.result import TrainingResult
Result = namedtuple("Result", [
@@ -72,7 +70,9 @@ def __init__(self, registry, config, policy_params, env_creator, noise,
self.noise = SharedNoiseTable(noise)
self.env = env_creator(config["env_config"])
- self.preprocessor = ModelCatalog.get_preprocessor(registry, self.env)
+ from ray.rllib import models
+ self.preprocessor = models.ModelCatalog.get_preprocessor(
+ registry, self.env)
self.sess = utils.make_session(single_threaded=True)
self.policy = policies.GenericPolicy(
@@ -133,7 +133,7 @@ def do_rollouts(self, params, timestep_limit=None):
eval_lengths=eval_lengths)
-class ESAgent(Agent):
+class ESAgent(agent.Agent):
_agent_name = "ES"
_default_config = DEFAULT_CONFIG
_allow_unknown_subkeys = ["env_config"]
@@ -144,7 +144,9 @@ def _init(self):
}
env = self.env_creator(self.config["env_config"])
- preprocessor = ModelCatalog.get_preprocessor(self.registry, env)
+ from ray.rllib import models
+ preprocessor = models.ModelCatalog.get_preprocessor(
+ self.registry, env)
self.sess = utils.make_session(single_threaded=False)
self.policy = policies.GenericPolicy(
@@ -292,7 +294,7 @@ def _train(self):
"time_elapsed": step_tend - self.tstart
}
- result = TrainingResult(
+ result = ray.tune.result.TrainingResult(
episode_reward_mean=eval_returns.mean(),
episode_len_mean=eval_lengths.mean(),
timesteps_this_iter=noisy_lengths.sum(),
| [rllib] Deadlock error when running ES.
When running
```
python ray/python/ray/rllib/train.py --redis-address=172.31.7.72:6379 --env=Humanoid-v1 --run=ES --config='{"episodes_per_batch": 1000, "timesteps_per_batch": 10000, "num_workers": 400}'
```
on a cluster (100 machines), I see
```
Traceback (most recent call last):
File "/home/ubuntu/ray/python/ray/worker.py", line 1720, in fetch_and_execute_function_to_run
function = pickle.loads(serialized_function)
File "/home/ubuntu/ray/python/ray/rllib/__init__.py", line 17, in <module>
_register_all()
File "/home/ubuntu/ray/python/ray/rllib/__init__.py", line 14, in _register_all
register_trainable(key, get_agent_class(key))
File "/home/ubuntu/ray/python/ray/rllib/agent.py", line 229, in get_agent_class
from ray.rllib import es
File "/home/ubuntu/ray/python/ray/rllib/es/__init__.py", line 1, in <module>
from ray.rllib.es.es import (ESAgent, DEFAULT_CONFIG)
File "/home/ubuntu/ray/python/ray/rllib/es/es.py", line 19, in <module>
from ray.rllib.es import policies
File "<frozen importlib._bootstrap>", line 968, in _find_and_load
File "<frozen importlib._bootstrap>", line 168, in __enter__
File "<frozen importlib._bootstrap>", line 110, in acquire
_frozen_importlib._DeadlockError: deadlock detected by _ModuleLock('ray.rllib.es.policies') at 139937598221224
```
This likely has to do with recursive imports in rllib, probably related to #1716.
| 2018-03-23T01:26:00 |
||
ray-project/ray | 1,783 | ray-project__ray-1783 | [
"1773"
] | 0fd4112354d433105a8945f51ffc2b25db16d169 | diff --git a/python/ray/worker.py b/python/ray/worker.py
--- a/python/ray/worker.py
+++ b/python/ray/worker.py
@@ -881,7 +881,8 @@ def _become_actor(self, task):
while key not in self.imported_actor_classes:
time.sleep(0.001)
- self.fetch_and_register_actor(key, task.required_resources(), self)
+ with self.lock:
+ self.fetch_and_register_actor(key, task.required_resources(), self)
def _wait_for_and_process_task(self, task):
"""Wait for a task to be ready and process the task.
| [rllib] Deadlock error when running ES.
When running
```
python ray/python/ray/rllib/train.py --redis-address=172.31.7.72:6379 --env=Humanoid-v1 --run=ES --config='{"episodes_per_batch": 1000, "timesteps_per_batch": 10000, "num_workers": 400}'
```
on a cluster (100 machines), I see
```
Traceback (most recent call last):
File "/home/ubuntu/ray/python/ray/worker.py", line 1720, in fetch_and_execute_function_to_run
function = pickle.loads(serialized_function)
File "/home/ubuntu/ray/python/ray/rllib/__init__.py", line 17, in <module>
_register_all()
File "/home/ubuntu/ray/python/ray/rllib/__init__.py", line 14, in _register_all
register_trainable(key, get_agent_class(key))
File "/home/ubuntu/ray/python/ray/rllib/agent.py", line 229, in get_agent_class
from ray.rllib import es
File "/home/ubuntu/ray/python/ray/rllib/es/__init__.py", line 1, in <module>
from ray.rllib.es.es import (ESAgent, DEFAULT_CONFIG)
File "/home/ubuntu/ray/python/ray/rllib/es/es.py", line 19, in <module>
from ray.rllib.es import policies
File "<frozen importlib._bootstrap>", line 968, in _find_and_load
File "<frozen importlib._bootstrap>", line 168, in __enter__
File "<frozen importlib._bootstrap>", line 110, in acquire
_frozen_importlib._DeadlockError: deadlock detected by _ModuleLock('ray.rllib.es.policies') at 139937598221224
```
This likely has to do with recursive imports in rllib, probably related to #1716.
| The same happens with PPO:
```
File "/opt/conda/lib/python3.6/site-packages/ray/actor.py", line 284, in fetch_and_register_actor
unpickled_class = pickle.loads(pickled_class)
File "/opt/conda/lib/python3.6/site-packages/ray/rllib/ppo/__init__.py", line 1, in <module>
from ray.rllib.ppo.ppo import (PPOAgent, DEFAULT_CONFIG)
File "/opt/conda/lib/python3.6/site-packages/ray/rllib/ppo/ppo.py", line 17, in <module>
from ray.rllib.ppo.ppo_evaluator import PPOEvaluator
File "/opt/conda/lib/python3.6/site-packages/ray/rllib/ppo/ppo_evaluator.py", line 14, in <module>
from ray.rllib.optimizers import PolicyEvaluator, SampleBatch
File "<frozen importlib._bootstrap>", line 960, in _find_and_load
File "<frozen importlib._bootstrap>", line 151, in __enter__
File "<frozen importlib._bootstrap>", line 93, in acquire
_frozen_importlib._DeadlockError: deadlock detected by _ModuleLock('ray.rllib.optimizers') at 140202634781472``` | 2018-03-26T23:05:26 |
|
ray-project/ray | 1,880 | ray-project__ray-1880 | [
"1870"
] | 15a668dd126664bb507294f4301c6b6d08e16d22 | diff --git a/python/ray/services.py b/python/ray/services.py
--- a/python/ray/services.py
+++ b/python/ray/services.py
@@ -1551,7 +1551,8 @@ def start_ray_node(node_ip_address,
redirect_output=redirect_output,
resources=resources,
plasma_directory=plasma_directory,
- huge_pages=huge_pages)
+ huge_pages=huge_pages,
+ use_raylet=use_raylet)
def start_ray_head(address_info=None,
| [XRay] GPU label support
Start two raylets:
```
ray start --head --use-raylet --num-gpus 1
```
```
ray start --redis-address 172.31.23.99:28629 --use-raylet --num-gpus 1
```
Execute this script:
```
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ray
@ray.remote(num_gpus=1)
class Worker(object):
def __init__(self):
pass
def get_node_ip(self):
return ray.services.get_node_ip_address()
ray.init(redis_address="172.31.23.99:28629", use_raylet=True)
ws = [Worker.remote() for i in range(10)]
print("XXX", [ray.get(w.get_node_ip.remote()) for w in ws])
```
Output:
```
XXX ['172.31.23.99', '172.31.23.99', '172.31.23.99', '172.31.23.99', '172.31.23.99', '172.31.23.99', '172.31.23.99', '172.31.23.99', '172.31.23.99', '172.31.23.99']
```
| 2018-04-12T03:43:07 |
||
ray-project/ray | 1,892 | ray-project__ray-1892 | [
"1878"
] | 4b655b0ff6ae757f054f0b0dd2dcd76039456e3a | diff --git a/python/ray/experimental/state.py b/python/ray/experimental/state.py
--- a/python/ray/experimental/state.py
+++ b/python/ray/experimental/state.py
@@ -594,6 +594,10 @@ def dump_catapult_trace(self,
# TODO (hme): do something to correct slider here,
# slider should be correct to begin with, though.
task_table[task_id] = self.task_table(task_id)
+ task_table[task_id]["TaskSpec"]["Args"] = [
+ repr(arg)
+ for arg in task_table[task_id]["TaskSpec"]["Args"]
+ ]
except Exception as e:
print("Could not find task {}".format(task_id))
| diff --git a/test/runtest.py b/test/runtest.py
--- a/test/runtest.py
+++ b/test/runtest.py
@@ -2057,7 +2057,7 @@ def testDumpTraceFile(self):
ray.init(redirect_output=True)
@ray.remote
- def f():
+ def f(*xs):
return 1
@ray.remote
@@ -2068,7 +2068,16 @@ def __init__(self):
def method(self):
pass
- ray.get([f.remote() for _ in range(10)])
+ # We use a number of test objects because objects that are not JSON
+ # serializable caused problems in the past.
+ test_objects = [
+ 0, 0.5, "hi", b"hi",
+ ray.put(0),
+ np.zeros(3), [0], (0, ), {
+ 0: 0
+ }, True, False, None
+ ]
+ ray.get([f.remote(obj) for obj in test_objects])
actors = [Foo.remote() for _ in range(5)]
ray.get([actor.method.remote() for actor in actors])
ray.get([actor.method.remote() for actor in actors])
| UI timeline doesn't work when remote function has an argument of type "bytes".
To reproduce the issue, run the following
```python
import ray
ray.init()
@ray.remote
def f(x):
pass
f.remote(b'hello')
```
Then go to the UI and try to generate the timeline. I see the following
```
Collected profiles for 2 tasks.
Dumping task profile data to /var/folders/15/54jf68993rd7753c5fms424r0000gn/T/tmpm91862cq.json, this might take a while...
Creating JSON 6/2
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
~/Workspace/ray/python/ray/experimental/ui.py in handle_submit(sender)
390 breakdowns=breakdown,
391 obj_dep=obj_dep.value,
--> 392 task_dep=task_dep.value)
393 print("Opening html file in browser...")
394
~/Workspace/ray/python/ray/experimental/state.py in dump_catapult_trace(self, path, task_info, breakdowns, task_dep, obj_dep)
823 print("Creating JSON {}/{}".format(len(full_trace), len(task_info)))
824 with open(path, "w") as outfile:
--> 825 json.dump(full_trace, outfile)
826
827 def _get_times(self, data):
~/anaconda3/lib/python3.6/json/__init__.py in dump(obj, fp, skipkeys, ensure_ascii, check_circular, allow_nan, cls, indent, separators, default, sort_keys, **kw)
177 # could accelerate with writelines in some versions of Python, at
178 # a debuggability cost
--> 179 for chunk in iterable:
180 fp.write(chunk)
181
~/anaconda3/lib/python3.6/json/encoder.py in _iterencode(o, _current_indent_level)
426 yield _floatstr(o)
427 elif isinstance(o, (list, tuple)):
--> 428 yield from _iterencode_list(o, _current_indent_level)
429 elif isinstance(o, dict):
430 yield from _iterencode_dict(o, _current_indent_level)
~/anaconda3/lib/python3.6/json/encoder.py in _iterencode_list(lst, _current_indent_level)
323 else:
324 chunks = _iterencode(value, _current_indent_level)
--> 325 yield from chunks
326 if newline_indent is not None:
327 _current_indent_level -= 1
~/anaconda3/lib/python3.6/json/encoder.py in _iterencode_dict(dct, _current_indent_level)
402 else:
403 chunks = _iterencode(value, _current_indent_level)
--> 404 yield from chunks
405 if newline_indent is not None:
406 _current_indent_level -= 1
~/anaconda3/lib/python3.6/json/encoder.py in _iterencode_dict(dct, _current_indent_level)
402 else:
403 chunks = _iterencode(value, _current_indent_level)
--> 404 yield from chunks
405 if newline_indent is not None:
406 _current_indent_level -= 1
~/anaconda3/lib/python3.6/json/encoder.py in _iterencode_list(lst, _current_indent_level)
323 else:
324 chunks = _iterencode(value, _current_indent_level)
--> 325 yield from chunks
326 if newline_indent is not None:
327 _current_indent_level -= 1
~/anaconda3/lib/python3.6/json/encoder.py in _iterencode(o, _current_indent_level)
435 raise ValueError("Circular reference detected")
436 markers[markerid] = o
--> 437 o = _default(o)
438 yield from _iterencode(o, _current_indent_level)
439 if markers is not None:
~/anaconda3/lib/python3.6/json/encoder.py in default(self, o)
178 """
179 raise TypeError("Object of type '%s' is not JSON serializable" %
--> 180 o.__class__.__name__)
181
182 def encode(self, o):
TypeError: Object of type 'bytes' is not JSON serializable
```
| 2018-04-13T02:57:34 |
|
ray-project/ray | 1,984 | ray-project__ray-1984 | [
"1983"
] | 558942648447743616b3bf669438893b8aaebaaf | diff --git a/python/ray/dataframe/index_metadata.py b/python/ray/dataframe/index_metadata.py
--- a/python/ray/dataframe/index_metadata.py
+++ b/python/ray/dataframe/index_metadata.py
@@ -103,8 +103,6 @@ def _get_index(self):
_IndexMetadata constructor for more details)
"""
if isinstance(self._coord_df_cache, ray.local_scheduler.ObjectID):
- if self._index_cache is None:
- self._index_cache = pd.RangeIndex(len(self))
return self._index_cache
else:
return self._coord_df_cache.index
@@ -128,6 +126,37 @@ def _set_index(self, new_index):
index = property(_get_index, _set_index)
+ def _get_index_cache(self):
+ """Get the cached Index object, which may sometimes be an OID.
+
+ This will ray.get the Index object out of the Ray store lazily, such
+ that it is not grabbed until it is needed in the driver. This layer of
+ abstraction is important for allowing this object to be instantiated
+ with a remote Index object.
+
+ Returns:
+ The Index object in _index_cache.
+ """
+ if self._index_cache_validator is None:
+ self._index_cache_validator = pd.RangeIndex(len(self))
+ elif isinstance(self._index_cache_validator,
+ ray.local_scheduler.ObjectID):
+ self._index_cache_validator = ray.get(self._index_cache_validator)
+
+ return self._index_cache_validator
+
+ def _set_index_cache(self, new_index):
+ """Sets the new index cache.
+
+ Args:
+ new_index: The Index to set the _index_cache to.
+ """
+ self._index_cache_validator = new_index
+
+ # _index_cache_validator is an extra layer of abstraction to allow the
+ # cache to accept ObjectIDs and ray.get them when needed.
+ _index_cache = property(_get_index_cache, _set_index_cache)
+
def coords_of(self, key):
"""Returns the coordinates (partition, index_within_partition) of the
provided key in the index. Can be called on its own or implicitly
diff --git a/python/ray/dataframe/utils.py b/python/ray/dataframe/utils.py
--- a/python/ray/dataframe/utils.py
+++ b/python/ray/dataframe/utils.py
@@ -112,7 +112,6 @@ def to_pandas(df):
else:
pd_df = pd.concat(ray.get(df._col_partitions),
axis=1)
- print(df.columns)
pd_df.index = df.index
pd_df.columns = df.columns
return pd_df
| [DataFrame] _Index_MetaData cannot be instantiated with an OID.
This was a functionality reversion merged in #1965. Since we're caching the Index now, we need an additional layer of abstraction to allow `_Index_Metadata` objects to be instantiated with indexes created from remote tasks. We use this in `merge`.
PR coming shortly to fix it.
cc @Veryku
| 2018-05-02T16:36:25 |
||
ray-project/ray | 2,081 | ray-project__ray-2081 | [
"998"
] | 7549209aeaf34b7a1b9dca802bd6262eba61fec8 | diff --git a/python/ray/signature.py b/python/ray/signature.py
--- a/python/ray/signature.py
+++ b/python/ray/signature.py
@@ -4,6 +4,7 @@
from collections import namedtuple
import funcsigs
+from funcsigs import Parameter
from ray.utils import is_cython
@@ -14,15 +15,16 @@
"""This class is used to represent a function signature.
Attributes:
- keyword_names: The names of the functions keyword arguments. This is used
- to test if an incorrect keyword argument has been passed to the
- function.
+ arg_names: A list containing the name of all arguments.
arg_defaults: A dictionary mapping from argument name to argument default
value. If the argument is not a keyword argument, the default value
will be funcsigs._empty.
arg_is_positionals: A dictionary mapping from argument name to a bool. The
bool will be true if the argument is a *args argument. Otherwise it
will be false.
+ keyword_names: A set containing the names of the keyword arguments.
+ Note most arguments in Python can be called as positional or keyword
+ arguments, so this overlaps (sometimes completely) with arg_names.
function_name: The name of the function whose signature is being
inspected. This is used for printing better error messages.
"""
@@ -85,16 +87,13 @@ def check_signature_supported(func, warn=False):
function_name = func.__name__
sig_params = get_signature_params(func)
- has_vararg_param = False
has_kwargs_param = False
- has_keyword_arg = False
+ has_kwonly_param = False
for keyword_name, parameter in sig_params:
- if parameter.kind == parameter.VAR_KEYWORD:
+ if parameter.kind == Parameter.VAR_KEYWORD:
has_kwargs_param = True
- if parameter.kind == parameter.VAR_POSITIONAL:
- has_vararg_param = True
- if parameter.default != funcsigs._empty:
- has_keyword_arg = True
+ if parameter.kind == Parameter.KEYWORD_ONLY:
+ has_kwonly_param = True
if has_kwargs_param:
message = ("The function {} has a **kwargs argument, which is "
@@ -103,12 +102,11 @@ def check_signature_supported(func, warn=False):
print(message)
else:
raise Exception(message)
- # Check if the user specified a variable number of arguments and any
- # keyword arguments.
- if has_vararg_param and has_keyword_arg:
- message = ("Function {} has a *args argument as well as a keyword "
- "argument, which is currently not supported."
- .format(function_name))
+
+ if has_kwonly_param:
+ message = ("The function {} has a keyword only argument "
+ "(defined after * or *args), which is currently "
+ "not supported.".format(function_name))
if warn:
print(message)
else:
@@ -136,20 +134,18 @@ def extract_signature(func, ignore_first=False):
func.__name__))
sig_params = sig_params[1:]
- # Extract the names of the keyword arguments.
- keyword_names = set()
- for keyword_name, parameter in sig_params:
- if parameter.default != funcsigs._empty:
- keyword_names.add(keyword_name)
-
# Construct the argument default values and other argument information.
arg_names = []
arg_defaults = []
arg_is_positionals = []
- for keyword_name, parameter in sig_params:
- arg_names.append(keyword_name)
+ keyword_names = set()
+ for arg_name, parameter in sig_params:
+ arg_names.append(arg_name)
arg_defaults.append(parameter.default)
arg_is_positionals.append(parameter.kind == parameter.VAR_POSITIONAL)
+ if parameter.kind == Parameter.POSITIONAL_OR_KEYWORD:
+ # Note KEYWORD_ONLY arguments currently unsupported.
+ keyword_names.add(arg_name)
return FunctionSignature(arg_names, arg_defaults, arg_is_positionals,
keyword_names, func.__name__)
@@ -189,8 +185,14 @@ def extend_args(function_signature, args, kwargs):
keyword_name, function_name))
# Fill in the remaining arguments.
- zipped_info = list(zip(arg_names, arg_defaults,
- arg_is_positionals))[len(args):]
+ for skipped_name in arg_names[0:len(args)]:
+ if skipped_name in kwargs:
+ raise Exception("Positional and keyword value provided for the "
+ "argument '{}' for the function '{}'".format(
+ keyword_name, function_name))
+
+ zipped_info = zip(arg_names, arg_defaults, arg_is_positionals)
+ zipped_info = list(zipped_info)[len(args):]
for keyword_name, default_value, is_positional in zipped_info:
if keyword_name in kwargs:
args.append(kwargs[keyword_name])
@@ -206,9 +208,8 @@ def extend_args(function_signature, args, kwargs):
"'{}' for the function '{}'.".format(
keyword_name, function_name))
- too_many_arguments = (len(args) > len(arg_names)
- and (len(arg_is_positionals) == 0
- or not arg_is_positionals[-1]))
+ no_positionals = len(arg_is_positionals) == 0 or not arg_is_positionals[-1]
+ too_many_arguments = len(args) > len(arg_names) and no_positionals
if too_many_arguments:
raise Exception("Too many arguments were passed to the function '{}'"
.format(function_name))
| diff --git a/python/ray/test/test_functions.py b/python/ray/test/test_functions.py
--- a/python/ray/test/test_functions.py
+++ b/python/ray/test/test_functions.py
@@ -68,16 +68,6 @@ def kwargs_throw_exception(**c):
except Exception:
kwargs_exception_thrown = True
-try:
-
- @ray.remote
- def varargs_and_kwargs_throw_exception(a, b="hi", *c):
- return "{} {} {}".format(a, b, c)
-
- varargs_and_kwargs_exception_thrown = False
-except Exception:
- varargs_and_kwargs_exception_thrown = True
-
# test throwing an exception
diff --git a/test/actor_test.py b/test/actor_test.py
--- a/test/actor_test.py
+++ b/test/actor_test.py
@@ -57,6 +57,9 @@ def get_values(self, arg0, arg1=2, arg2="b"):
self.assertEqual(
ray.get(actor.get_values.remote(0, arg2="d", arg1=0)),
(1, 2, "cd"))
+ self.assertEqual(
+ ray.get(actor.get_values.remote(arg2="d", arg1=0, arg0=2)),
+ (3, 2, "cd"))
# Make sure we get an exception if the constructor is called
# incorrectly.
@@ -66,6 +69,9 @@ def get_values(self, arg0, arg1=2, arg2="b"):
with self.assertRaises(Exception):
actor = Actor.remote(0, 1, 2, arg3=3)
+ with self.assertRaises(Exception):
+ actor = Actor.remote(0, arg0=1)
+
# Make sure we get an exception if the method is called incorrectly.
actor = Actor.remote(1)
with self.assertRaises(Exception):
diff --git a/test/runtest.py b/test/runtest.py
--- a/test/runtest.py
+++ b/test/runtest.py
@@ -529,6 +529,8 @@ def testKeywordArgs(self):
self.assertEqual(ray.get(x), "1 hi")
x = test_functions.keyword_fct1.remote(1, b="world")
self.assertEqual(ray.get(x), "1 world")
+ x = test_functions.keyword_fct1.remote(a=1, b="world")
+ self.assertEqual(ray.get(x), "1 world")
x = test_functions.keyword_fct2.remote(a="w", b="hi")
self.assertEqual(ray.get(x), "w hi")
@@ -545,6 +547,10 @@ def testKeywordArgs(self):
x = test_functions.keyword_fct3.remote(0, 1, c="w", d="hi")
self.assertEqual(ray.get(x), "0 1 w hi")
+ x = test_functions.keyword_fct3.remote(0, b=1, c="w", d="hi")
+ self.assertEqual(ray.get(x), "0 1 w hi")
+ x = test_functions.keyword_fct3.remote(a=0, b=1, c="w", d="hi")
+ self.assertEqual(ray.get(x), "0 1 w hi")
x = test_functions.keyword_fct3.remote(0, 1, d="hi", c="w")
self.assertEqual(ray.get(x), "0 1 w hi")
x = test_functions.keyword_fct3.remote(0, 1, c="w")
@@ -553,6 +559,8 @@ def testKeywordArgs(self):
self.assertEqual(ray.get(x), "0 1 hello hi")
x = test_functions.keyword_fct3.remote(0, 1)
self.assertEqual(ray.get(x), "0 1 hello world")
+ x = test_functions.keyword_fct3.remote(a=0, b=1)
+ self.assertEqual(ray.get(x), "0 1 hello world")
# Check that we cannot pass invalid keyword arguments to functions.
@ray.remote
@@ -573,6 +581,9 @@ def f2(x, y=0, z=0):
with self.assertRaises(Exception):
f2.remote(0, w=0)
+ with self.assertRaises(Exception):
+ f2.remote(3, x=3)
+
# Make sure we get an exception if too many arguments are passed in.
with self.assertRaises(Exception):
f2.remote(1, 2, 3, 4)
@@ -593,7 +604,6 @@ def testVariableNumberOfArgs(self):
self.assertEqual(ray.get(x), "1 2")
self.assertTrue(test_functions.kwargs_exception_thrown)
- self.assertTrue(test_functions.varargs_and_kwargs_exception_thrown)
@ray.remote
def f1(*args):
| Calling a remote function with a keyword argument fails
Not sure if this is a bug or a missing feature:
```
import ray
@ray.remote
def f(a):
return a+1
ray.init()
f.remote(1)
f.remote(a=1)
```
The last line produces this error message when using ray 0.2.0:
> Exception: The name 'a' is not a valid keyword argument for the function 'f'
| Good catch, and thanks for pointing this out! We probably want to modify this so that remote functions can be called in exactly the same way as regular functions.
The exception is being raised in this line
https://github.com/ray-project/ray/blob/5c70faf76be0039a893052f15e4005733492e5d0/python/ray/signature.py#L149
And the logic is generally in the file https://github.com/ray-project/ray/blob/master/python/ray/signature.py.
+1
Hope to see this feature released soon!
@robertnishihara I can take a shot at this, if nobody is working on it?
@abishekk92 sounds great, I don't think anyone is working on it at the moment. | 2018-05-17T04:10:55 |
ray-project/ray | 2,090 | ray-project__ray-2090 | [
"2089",
"2089"
] | 3c245f66d446356b7fccde8cd57361af5bd0029e | diff --git a/python/ray/remote_function.py b/python/ray/remote_function.py
--- a/python/ray/remote_function.py
+++ b/python/ray/remote_function.py
@@ -14,15 +14,6 @@
DEFAULT_REMOTE_FUNCTION_MAX_CALLS = 0
-def in_ipython():
- """Return true if we are in an IPython interpreter and false otherwise."""
- try:
- __IPYTHON__
- return True
- except NameError:
- return False
-
-
def compute_function_id(function):
"""Compute an function ID for a function.
@@ -36,14 +27,14 @@ def compute_function_id(function):
# Include the function module and name in the hash.
function_id_hash.update(function.__module__.encode("ascii"))
function_id_hash.update(function.__name__.encode("ascii"))
- # If we are running a script or are in IPython, include the source code in
- # the hash. If we are in a regular Python interpreter we skip this part
- # because the source code is not accessible. If the function is a built-in
- # (e.g., Cython), the source code is not accessible.
- import __main__ as main
- if (hasattr(main, "__file__") or in_ipython()) \
- and inspect.isfunction(function):
- function_id_hash.update(inspect.getsource(function).encode("ascii"))
+ try:
+ # If we are running a script or are in IPython, include the source code
+ # in the hash.
+ source = inspect.getsource(function).encode("ascii")
+ function_id_hash.update(source)
+ except (IOError, OSError, TypeError):
+ # Source code may not be available: e.g. Cython or Python interpreter.
+ pass
# Compute the function ID.
function_id = function_id_hash.digest()
assert len(function_id) == 20
| Same function assigned different IDs on driver and worker
### System information
- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: Linux Ubuntu 17.10
- **Ray installed from (source or binary)**: Binary
- **Ray version**: 0.40
- **Python version**: Python 3.6.5
- **Exact command to reproduce**:
### Describe the problem
ray/worker.py:compute_function_id checks whether __main__ has the __file__ attribute to decide whether to include the source code in the function hash. When starting a Python executable using ```python -m mypkg.executable```, dependencies in mypkg can be imported before __main__ is loaded, resulting in this check failing (and source code being excluded) in the driver. When it comes to running the code, however, __main__ exists. This can lead to function IDs differing between the driver and worker in nested parallelism settings.
The error I get is similar to that of #1446 so it may have a common cause. After diagnosing this I think I've become convinced that it is best for executables to be in the top-level and not nested in the package structure; I'm posting this mostly as a warning to others as it is far from obvious what caused this error.
I think we can make this more robust by using a try-except statement rather than guessing when the source code is present; I'll submit a PR.
### Source code / logs
This bug is only triggered with a particular package structure, so this needs a few files. First, create a directory foo containing:
```python3
# __init__.py
from foo import bar
```
```python3
# main.py
import ray
from foo import bar
if __name__ == '__main__':
ray.init(redirect_worker_output=True)
bar.run()
```
```python3
# bar.py
import ray
@ray.remote
def f(x):
return x
def g(x):
return f.remote(x)
@ray.remote
def h(x):
return ray.get(g(x))
def run():
print(ray.get(h.remote(42)))
```
Running this produces:
```
python -m foo.main
Process STDOUT and STDERR is being redirected to /tmp/raylogs/.
Waiting for redis server at 127.0.0.1:39255 to respond...
Waiting for redis server at 127.0.0.1:20998 to respond...
Starting local scheduler with the following resources: {'CPU': 12, 'GPU': 2}.
======================================================================
View the web UI at http://localhost:8889/notebooks/ray_ui70409.ipynb?token=d6d894f5459ef32b1986d4cdf348574421d1787c71960fef
======================================================================
Traceback (most recent call last):
File "/home/adam/bin/anaconda3/envs/mypirl/lib/python3.6/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/home/adam/bin/anaconda3/envs/mypirl/lib/python3.6/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/home/adam/dev/ray/foo/main.py", line 7, in <module>
bar.run()
File "/home/adam/dev/ray/foo/bar.py", line 15, in run
print(ray.get(h.remote(42)))
Remote function foo.bar.h failed with:
Traceback (most recent call last):
File "/home/adam/dev/ray/foo/bar.py", line 12, in h
return ray.get(g(x))
File "/home/adam/dev/ray/foo/bar.py", line 8, in g
return f.remote(x)
File "/home/adam/bin/anaconda3/envs/mypirl/lib/python3.6/site-packages/ray/worker.py", line 2602, in func_call
return _submit(args=args, kwargs=kwargs)
File "/home/adam/bin/anaconda3/envs/mypirl/lib/python3.6/site-packages/ray/worker.py", line 2622, in _submit
resources=resources)
File "/home/adam/bin/anaconda3/envs/mypirl/lib/python3.6/site-packages/ray/worker.py", line 2430, in _submit_task
return global_worker.submit_task(function_id, *args, **kwargs)
File "/home/adam/bin/anaconda3/envs/mypirl/lib/python3.6/site-packages/ray/worker.py", line 580, in submit_task
self.task_driver_id.id()][function_id.id()]
KeyError: b'\x8d\x04\x11\xd9\xea\xd4\xbe\xad\x90\x0c\x0b\x10\x0f^\x0cq\xcc\xe3\xb1\x07'
```
Same function assigned different IDs on driver and worker
### System information
- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: Linux Ubuntu 17.10
- **Ray installed from (source or binary)**: Binary
- **Ray version**: 0.40
- **Python version**: Python 3.6.5
- **Exact command to reproduce**:
### Describe the problem
ray/worker.py:compute_function_id checks whether __main__ has the __file__ attribute to decide whether to include the source code in the function hash. When starting a Python executable using ```python -m mypkg.executable```, dependencies in mypkg can be imported before __main__ is loaded, resulting in this check failing (and source code being excluded) in the driver. When it comes to running the code, however, __main__ exists. This can lead to function IDs differing between the driver and worker in nested parallelism settings.
The error I get is similar to that of #1446 so it may have a common cause. After diagnosing this I think I've become convinced that it is best for executables to be in the top-level and not nested in the package structure; I'm posting this mostly as a warning to others as it is far from obvious what caused this error.
I think we can make this more robust by using a try-except statement rather than guessing when the source code is present; I'll submit a PR.
### Source code / logs
This bug is only triggered with a particular package structure, so this needs a few files. First, create a directory foo containing:
```python3
# __init__.py
from foo import bar
```
```python3
# main.py
import ray
from foo import bar
if __name__ == '__main__':
ray.init(redirect_worker_output=True)
bar.run()
```
```python3
# bar.py
import ray
@ray.remote
def f(x):
return x
def g(x):
return f.remote(x)
@ray.remote
def h(x):
return ray.get(g(x))
def run():
print(ray.get(h.remote(42)))
```
Running this produces:
```
python -m foo.main
Process STDOUT and STDERR is being redirected to /tmp/raylogs/.
Waiting for redis server at 127.0.0.1:39255 to respond...
Waiting for redis server at 127.0.0.1:20998 to respond...
Starting local scheduler with the following resources: {'CPU': 12, 'GPU': 2}.
======================================================================
View the web UI at http://localhost:8889/notebooks/ray_ui70409.ipynb?token=d6d894f5459ef32b1986d4cdf348574421d1787c71960fef
======================================================================
Traceback (most recent call last):
File "/home/adam/bin/anaconda3/envs/mypirl/lib/python3.6/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/home/adam/bin/anaconda3/envs/mypirl/lib/python3.6/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/home/adam/dev/ray/foo/main.py", line 7, in <module>
bar.run()
File "/home/adam/dev/ray/foo/bar.py", line 15, in run
print(ray.get(h.remote(42)))
Remote function foo.bar.h failed with:
Traceback (most recent call last):
File "/home/adam/dev/ray/foo/bar.py", line 12, in h
return ray.get(g(x))
File "/home/adam/dev/ray/foo/bar.py", line 8, in g
return f.remote(x)
File "/home/adam/bin/anaconda3/envs/mypirl/lib/python3.6/site-packages/ray/worker.py", line 2602, in func_call
return _submit(args=args, kwargs=kwargs)
File "/home/adam/bin/anaconda3/envs/mypirl/lib/python3.6/site-packages/ray/worker.py", line 2622, in _submit
resources=resources)
File "/home/adam/bin/anaconda3/envs/mypirl/lib/python3.6/site-packages/ray/worker.py", line 2430, in _submit_task
return global_worker.submit_task(function_id, *args, **kwargs)
File "/home/adam/bin/anaconda3/envs/mypirl/lib/python3.6/site-packages/ray/worker.py", line 580, in submit_task
self.task_driver_id.id()][function_id.id()]
KeyError: b'\x8d\x04\x11\xd9\xea\xd4\xbe\xad\x90\x0c\x0b\x10\x0f^\x0cq\xcc\xe3\xb1\x07'
```
| 2018-05-18T02:31:33 |
||
ray-project/ray | 2,183 | ray-project__ray-2183 | [
"2138"
] | 19d6ca0670bac9d9c7fdda3060e6f92705b04263 | diff --git a/python/ray/autoscaler/autoscaler.py b/python/ray/autoscaler/autoscaler.py
--- a/python/ray/autoscaler/autoscaler.py
+++ b/python/ray/autoscaler/autoscaler.py
@@ -2,10 +2,14 @@
from __future__ import division
from __future__ import print_function
+import copy
import json
import hashlib
+import math
import os
+import queue
import subprocess
+import threading
import time
import traceback
@@ -16,8 +20,8 @@
import yaml
from ray.ray_constants import AUTOSCALER_MAX_NUM_FAILURES, \
- AUTOSCALER_MAX_CONCURRENT_LAUNCHES, AUTOSCALER_UPDATE_INTERVAL_S, \
- AUTOSCALER_HEARTBEAT_TIMEOUT_S
+ AUTOSCALER_MAX_LAUNCH_BATCH, AUTOSCALER_MAX_CONCURRENT_LAUNCHES,\
+ AUTOSCALER_UPDATE_INTERVAL_S, AUTOSCALER_HEARTBEAT_TIMEOUT_S
from ray.autoscaler.node_provider import get_node_provider, \
get_default_config
from ray.autoscaler.updater import NodeUpdaterProcess
@@ -199,6 +203,64 @@ def _info(self):
}
+class NodeLauncher(threading.Thread):
+ def __init__(self, queue, pending, *args, **kwargs):
+ self.queue = queue
+ self.pending = pending
+ self.provider = None
+ super(NodeLauncher, self).__init__(*args, **kwargs)
+
+ def _launch_node(self, config, count):
+ if self.provider is None:
+ self.provider = get_node_provider(config["provider"],
+ config["cluster_name"])
+
+ tag_filters = {TAG_RAY_NODE_TYPE: "worker"}
+ before = self.provider.nodes(tag_filters=tag_filters)
+ launch_hash = hash_launch_conf(config["worker_nodes"], config["auth"])
+ self.provider.create_node(
+ config["worker_nodes"], {
+ TAG_RAY_NODE_NAME: "ray-{}-worker".format(
+ config["cluster_name"]),
+ TAG_RAY_NODE_TYPE: "worker",
+ TAG_RAY_NODE_STATUS: "uninitialized",
+ TAG_RAY_LAUNCH_CONFIG: launch_hash,
+ }, count)
+ after = self.provider.nodes(tag_filters=tag_filters)
+ if set(after).issubset(before):
+ print("Warning: No new nodes reported after node creation")
+
+ def run(self):
+ while True:
+ config, count = self.queue.get()
+ try:
+ self._launch_node(config, count)
+ finally:
+ self.pending.dec(count)
+
+
+class ConcurrentCounter():
+ def __init__(self):
+ self._value = 0
+ self._lock = threading.Lock()
+
+ def inc(self, count):
+ with self._lock:
+ self._value += count
+ return self._value
+
+ def dec(self, count):
+ with self._lock:
+ assert self._value >= count, "counter cannot go negative"
+ self._value -= count
+ return self._value
+
+ @property
+ def value(self):
+ with self._lock:
+ return self._value
+
+
class StandardAutoscaler(object):
"""The autoscaling control loop for a Ray cluster.
@@ -220,6 +282,7 @@ class StandardAutoscaler(object):
def __init__(self,
config_path,
load_metrics,
+ max_launch_batch=AUTOSCALER_MAX_LAUNCH_BATCH,
max_concurrent_launches=AUTOSCALER_MAX_CONCURRENT_LAUNCHES,
max_failures=AUTOSCALER_MAX_NUM_FAILURES,
process_runner=subprocess,
@@ -233,6 +296,7 @@ def __init__(self,
self.config["cluster_name"])
self.max_failures = max_failures
+ self.max_launch_batch = max_launch_batch
self.max_concurrent_launches = max_concurrent_launches
self.verbose_updates = verbose_updates
self.process_runner = process_runner
@@ -246,6 +310,17 @@ def __init__(self,
self.last_update_time = 0.0
self.update_interval_s = update_interval_s
+ # Node launchers
+ self.launch_queue = queue.Queue()
+ self.num_launches_pending = ConcurrentCounter()
+ max_batches = math.ceil(
+ max_concurrent_launches / float(max_launch_batch))
+ for i in range(int(max_batches)):
+ node_launcher = NodeLauncher(
+ queue=self.launch_queue, pending=self.num_launches_pending)
+ node_launcher.daemon = True
+ node_launcher.start()
+
# Expand local file_mounts to allow ~ in the paths. This can't be done
# earlier when the config is written since we might be on different
# platform and the expansion would result in wrong path.
@@ -278,6 +353,7 @@ def _update(self):
return
self.last_update_time = time.time()
+ num_pending = self.num_launches_pending.value
nodes = self.workers()
print(self.debug_string(nodes))
self.load_metrics.prune_active_ips(
@@ -318,9 +394,11 @@ def _update(self):
# Launch new nodes if needed
target_num = self.target_num_workers()
- if len(nodes) < target_num:
- self.launch_new_node(
- min(self.max_concurrent_launches, target_num - len(nodes)))
+ num_nodes = len(nodes) + num_pending
+ if num_nodes < target_num:
+ max_allowed = min(self.max_launch_batch,
+ self.max_concurrent_launches - num_pending)
+ self.launch_new_node(min(max_allowed, target_num - num_nodes))
print(self.debug_string())
# Process any completed updates
@@ -453,27 +531,19 @@ def can_update(self, node_id):
def launch_new_node(self, count):
print("StandardAutoscaler: Launching {} new nodes".format(count))
- num_before = len(self.workers())
- self.provider.create_node(
- self.config["worker_nodes"], {
- TAG_RAY_NODE_NAME: "ray-{}-worker".format(
- self.config["cluster_name"]),
- TAG_RAY_NODE_TYPE: "worker",
- TAG_RAY_NODE_STATUS: "uninitialized",
- TAG_RAY_LAUNCH_CONFIG: self.launch_hash,
- }, count)
- if len(self.workers()) <= num_before:
- print("Warning: Num nodes failed to increase after node creation")
+ self.num_launches_pending.inc(count)
+ config = copy.deepcopy(self.config)
+ self.launch_queue.put((config, count))
def workers(self):
- return self.provider.nodes(tag_filters={
- TAG_RAY_NODE_TYPE: "worker",
- })
+ return self.provider.nodes(tag_filters={TAG_RAY_NODE_TYPE: "worker"})
def debug_string(self, nodes=None):
if nodes is None:
nodes = self.workers()
suffix = ""
+ if self.num_launches_pending:
+ suffix += " ({} pending)".format(self.num_launches_pending.value)
if self.updaters:
suffix += " ({} updating)".format(len(self.updaters))
if self.num_failed_updates:
diff --git a/python/ray/ray_constants.py b/python/ray/ray_constants.py
--- a/python/ray/ray_constants.py
+++ b/python/ray/ray_constants.py
@@ -16,6 +16,11 @@ def env_integer(key, default):
# is a safety feature to prevent e.g. runaway node launches.
AUTOSCALER_MAX_NUM_FAILURES = env_integer("AUTOSCALER_MAX_NUM_FAILURES", 5)
+# The maximum number of nodes to launch in a single request.
+# Multiple requests may be made for this batch size, up to
+# the limit of AUTOSCALER_MAX_CONCURRENT_LAUNCHES.
+AUTOSCALER_MAX_LAUNCH_BATCH = env_integer("AUTOSCALER_MAX_LAUNCH_BATCH", 5)
+
# Max number of nodes to launch at a time.
AUTOSCALER_MAX_CONCURRENT_LAUNCHES = env_integer(
"AUTOSCALER_MAX_CONCURRENT_LAUNCHES", 10)
| diff --git a/test/autoscaler_test.py b/test/autoscaler_test.py
--- a/test/autoscaler_test.py
+++ b/test/autoscaler_test.py
@@ -4,6 +4,7 @@
import shutil
import tempfile
+import threading
import time
import unittest
import yaml
@@ -50,6 +51,8 @@ def __init__(self):
self.next_id = 0
self.throw = False
self.fail_creates = False
+ self.ready_to_create = threading.Event()
+ self.ready_to_create.set()
def nodes(self, tag_filters):
if self.throw:
@@ -75,6 +78,7 @@ def external_ip(self, node_id):
return self.mock_nodes[node_id].external_ip
def create_node(self, node_config, tags, count):
+ self.ready_to_create.wait()
if self.fail_creates:
return
for _ in range(count):
@@ -182,6 +186,20 @@ def waitFor(self, condition):
time.sleep(.1)
raise Exception("Timed out waiting for {}".format(condition))
+ def waitForNodes(self, expected, comparison=None, tag_filters={}):
+ MAX_ITER = 50
+ for i in range(MAX_ITER):
+ n = len(self.provider.nodes(tag_filters))
+ if comparison is None:
+ comparison = self.assertEqual
+ try:
+ comparison(n, expected)
+ return
+ except Exception:
+ if i == MAX_ITER - 1:
+ raise
+ time.sleep(.1)
+
def create_provider(self, config, cluster_name):
assert self.provider
return self.provider
@@ -241,9 +259,9 @@ def testScaleUp(self):
config_path, LoadMetrics(), max_failures=0, update_interval_s=0)
self.assertEqual(len(self.provider.nodes({})), 0)
autoscaler.update()
- self.assertEqual(len(self.provider.nodes({})), 2)
+ self.waitForNodes(2)
autoscaler.update()
- self.assertEqual(len(self.provider.nodes({})), 2)
+ self.waitForNodes(2)
def testTerminateOutdatedNodesGracefully(self):
config = SMALL_CLUSTER.copy()
@@ -254,16 +272,16 @@ def testTerminateOutdatedNodesGracefully(self):
self.provider.create_node({}, {TAG_RAY_NODE_TYPE: "worker"}, 10)
autoscaler = StandardAutoscaler(
config_path, LoadMetrics(), max_failures=0, update_interval_s=0)
- self.assertEqual(len(self.provider.nodes({})), 10)
+ self.waitForNodes(10)
# Gradually scales down to meet target size, never going too low
for _ in range(10):
autoscaler.update()
- self.assertLessEqual(len(self.provider.nodes({})), 5)
- self.assertGreaterEqual(len(self.provider.nodes({})), 4)
+ self.waitForNodes(5, comparison=self.assertLessEqual)
+ self.waitForNodes(4, comparison=self.assertGreaterEqual)
# Eventually reaches steady state
- self.assertEqual(len(self.provider.nodes({})), 5)
+ self.waitForNodes(5)
def testDynamicScaling(self):
config_path = self.write_config(SMALL_CLUSTER)
@@ -271,28 +289,115 @@ def testDynamicScaling(self):
autoscaler = StandardAutoscaler(
config_path,
LoadMetrics(),
+ max_launch_batch=5,
max_concurrent_launches=5,
max_failures=0,
update_interval_s=0)
- self.assertEqual(len(self.provider.nodes({})), 0)
+ self.waitForNodes(0)
autoscaler.update()
- self.assertEqual(len(self.provider.nodes({})), 2)
+ self.waitForNodes(2)
# Update the config to reduce the cluster size
new_config = SMALL_CLUSTER.copy()
new_config["max_workers"] = 1
self.write_config(new_config)
autoscaler.update()
- self.assertEqual(len(self.provider.nodes({})), 1)
+ self.waitForNodes(1)
# Update the config to reduce the cluster size
new_config["min_workers"] = 10
new_config["max_workers"] = 10
self.write_config(new_config)
autoscaler.update()
- self.assertEqual(len(self.provider.nodes({})), 6)
+ self.waitForNodes(6)
autoscaler.update()
- self.assertEqual(len(self.provider.nodes({})), 10)
+ self.waitForNodes(10)
+
+ def testDelayedLaunch(self):
+ config_path = self.write_config(SMALL_CLUSTER)
+ self.provider = MockProvider()
+ autoscaler = StandardAutoscaler(
+ config_path,
+ LoadMetrics(),
+ max_launch_batch=5,
+ max_concurrent_launches=5,
+ max_failures=0,
+ update_interval_s=0)
+ self.assertEqual(len(self.provider.nodes({})), 0)
+
+ # Update will try to create, but will block until we set the flag
+ self.provider.ready_to_create.clear()
+ autoscaler.update()
+ self.assertEqual(autoscaler.num_launches_pending.value, 2)
+ self.assertEqual(len(self.provider.nodes({})), 0)
+
+ # Set the flag, check it updates
+ self.provider.ready_to_create.set()
+ self.waitForNodes(2)
+ self.assertEqual(autoscaler.num_launches_pending.value, 0)
+
+ # Update the config to reduce the cluster size
+ new_config = SMALL_CLUSTER.copy()
+ new_config["max_workers"] = 1
+ self.write_config(new_config)
+ autoscaler.update()
+ self.assertEqual(len(self.provider.nodes({})), 1)
+
+ def testDelayedLaunchWithFailure(self):
+ config = SMALL_CLUSTER.copy()
+ config["min_workers"] = 10
+ config["max_workers"] = 10
+ config_path = self.write_config(config)
+ self.provider = MockProvider()
+ autoscaler = StandardAutoscaler(
+ config_path,
+ LoadMetrics(),
+ max_launch_batch=5,
+ max_concurrent_launches=8,
+ max_failures=0,
+ update_interval_s=0)
+ self.assertEqual(len(self.provider.nodes({})), 0)
+
+ # update() should launch a wave of 5 nodes (max_launch_batch)
+ # Force this first wave to block.
+ rtc1 = self.provider.ready_to_create
+ rtc1.clear()
+ autoscaler.update()
+ # Synchronization: wait for launchy thread to be blocked on rtc1
+ if hasattr(rtc1, '_cond'): # Python 3.5
+ waiters = rtc1._cond._waiters
+ else: # Python 2.7
+ waiters = rtc1._Event__cond._Condition__waiters
+ self.waitFor(lambda: len(waiters) == 1)
+ self.assertEqual(autoscaler.num_launches_pending.value, 5)
+ self.assertEqual(len(self.provider.nodes({})), 0)
+
+ # Call update() to launch a second wave of 3 nodes,
+ # as 5 + 3 = 8 = max_concurrent_launches.
+ # Make this wave complete immediately.
+ rtc2 = threading.Event()
+ self.provider.ready_to_create = rtc2
+ rtc2.set()
+ autoscaler.update()
+ self.waitForNodes(3)
+ self.assertEqual(autoscaler.num_launches_pending.value, 5)
+
+ # The first wave of 5 will now tragically fail
+ self.provider.fail_creates = True
+ rtc1.set()
+ self.waitFor(lambda: autoscaler.num_launches_pending.value == 0)
+ self.assertEqual(len(self.provider.nodes({})), 3)
+
+ # Retry the first wave, allowing it to succeed this time
+ self.provider.fail_creates = False
+ autoscaler.update()
+ self.waitForNodes(8)
+ self.assertEqual(autoscaler.num_launches_pending.value, 0)
+
+ # Final wave of 2 nodes
+ autoscaler.update()
+ self.waitForNodes(10)
+ self.assertEqual(autoscaler.num_launches_pending.value, 0)
def testUpdateThrottling(self):
config_path = self.write_config(SMALL_CLUSTER)
@@ -300,16 +405,22 @@ def testUpdateThrottling(self):
autoscaler = StandardAutoscaler(
config_path,
LoadMetrics(),
+ max_launch_batch=5,
max_concurrent_launches=5,
max_failures=0,
update_interval_s=10)
autoscaler.update()
- self.assertEqual(len(self.provider.nodes({})), 2)
+ self.waitForNodes(2)
+ self.assertEqual(autoscaler.num_launches_pending.value, 0)
new_config = SMALL_CLUSTER.copy()
new_config["max_workers"] = 1
self.write_config(new_config)
autoscaler.update()
- self.assertEqual(len(self.provider.nodes({})), 2) # not updated yet
+ # not updated yet
+ # note that node termination happens in the main thread, so
+ # we do not need to add any delay here before checking
+ self.assertEqual(len(self.provider.nodes({})), 2)
+ self.assertEqual(autoscaler.num_launches_pending.value, 0)
def testLaunchConfigChange(self):
config_path = self.write_config(SMALL_CLUSTER)
@@ -317,18 +428,18 @@ def testLaunchConfigChange(self):
autoscaler = StandardAutoscaler(
config_path, LoadMetrics(), max_failures=0, update_interval_s=0)
autoscaler.update()
- self.assertEqual(len(self.provider.nodes({})), 2)
+ self.waitForNodes(2)
# Update the config to change the node type
new_config = SMALL_CLUSTER.copy()
new_config["worker_nodes"]["InstanceType"] = "updated"
self.write_config(new_config)
- existing_nodes = set(self.provider.nodes({}))
+ self.provider.ready_to_create.clear()
for _ in range(5):
autoscaler.update()
- new_nodes = set(self.provider.nodes({}))
- self.assertEqual(len(new_nodes), 2)
- self.assertEqual(len(new_nodes.intersection(existing_nodes)), 0)
+ self.waitForNodes(0)
+ self.provider.ready_to_create.set()
+ self.waitForNodes(2)
def testIgnoresCorruptedConfig(self):
config_path = self.write_config(SMALL_CLUSTER)
@@ -336,15 +447,19 @@ def testIgnoresCorruptedConfig(self):
autoscaler = StandardAutoscaler(
config_path,
LoadMetrics(),
+ max_launch_batch=10,
max_concurrent_launches=10,
max_failures=0,
update_interval_s=0)
autoscaler.update()
+ self.waitForNodes(2)
# Write a corrupted config
self.write_config("asdf")
for _ in range(10):
autoscaler.update()
+ time.sleep(0.1)
+ self.assertEqual(autoscaler.num_launches_pending.value, 0)
self.assertEqual(len(self.provider.nodes({})), 2)
# New a good config again
@@ -353,7 +468,7 @@ def testIgnoresCorruptedConfig(self):
new_config["max_workers"] = 10
self.write_config(new_config)
autoscaler.update()
- self.assertEqual(len(self.provider.nodes({})), 10)
+ self.waitForNodes(10)
def testMaxFailures(self):
config_path = self.write_config(SMALL_CLUSTER)
@@ -372,12 +487,12 @@ def testLaunchNewNodeOnOutOfBandTerminate(self):
config_path, LoadMetrics(), max_failures=0, update_interval_s=0)
autoscaler.update()
autoscaler.update()
- self.assertEqual(len(self.provider.nodes({})), 2)
+ self.waitForNodes(2)
for node in self.provider.mock_nodes.values():
node.state = "terminated"
self.assertEqual(len(self.provider.nodes({})), 0)
autoscaler.update()
- self.assertEqual(len(self.provider.nodes({})), 2)
+ self.waitForNodes(2)
def testConfiguresNewNodes(self):
config_path = self.write_config(SMALL_CLUSTER)
@@ -393,7 +508,7 @@ def testConfiguresNewNodes(self):
update_interval_s=0)
autoscaler.update()
autoscaler.update()
- self.assertEqual(len(self.provider.nodes({})), 2)
+ self.waitForNodes(2)
for node in self.provider.mock_nodes.values():
node.state = "running"
assert len(
@@ -401,9 +516,7 @@ def testConfiguresNewNodes(self):
TAG_RAY_NODE_STATUS: "uninitialized"
})) == 2
autoscaler.update()
- self.waitFor(
- lambda: len(self.provider.nodes(
- {TAG_RAY_NODE_STATUS: "up-to-date"})) == 2)
+ self.waitForNodes(2, tag_filters={TAG_RAY_NODE_STATUS: "up-to-date"})
def testReportsConfigFailures(self):
config_path = self.write_config(SMALL_CLUSTER)
@@ -419,7 +532,7 @@ def testReportsConfigFailures(self):
update_interval_s=0)
autoscaler.update()
autoscaler.update()
- self.assertEqual(len(self.provider.nodes({})), 2)
+ self.waitForNodes(2)
for node in self.provider.mock_nodes.values():
node.state = "running"
assert len(
@@ -427,9 +540,8 @@ def testReportsConfigFailures(self):
TAG_RAY_NODE_STATUS: "uninitialized"
})) == 2
autoscaler.update()
- self.waitFor(
- lambda: len(self.provider.nodes(
- {TAG_RAY_NODE_STATUS: "update-failed"})) == 2)
+ self.waitForNodes(
+ 2, tag_filters={TAG_RAY_NODE_STATUS: "update-failed"})
def testConfiguresOutdatedNodes(self):
config_path = self.write_config(SMALL_CLUSTER)
@@ -445,13 +557,11 @@ def testConfiguresOutdatedNodes(self):
update_interval_s=0)
autoscaler.update()
autoscaler.update()
- self.assertEqual(len(self.provider.nodes({})), 2)
+ self.waitForNodes(2)
for node in self.provider.mock_nodes.values():
node.state = "running"
autoscaler.update()
- self.waitFor(
- lambda: len(self.provider.nodes(
- {TAG_RAY_NODE_STATUS: "up-to-date"})) == 2)
+ self.waitForNodes(2, tag_filters={TAG_RAY_NODE_STATUS: "up-to-date"})
runner.calls = []
new_config = SMALL_CLUSTER.copy()
new_config["worker_setup_commands"] = ["cmdX", "cmdY"]
@@ -472,33 +582,37 @@ def testScaleUpBasedOnLoad(self):
config_path, lm, max_failures=0, update_interval_s=0)
self.assertEqual(len(self.provider.nodes({})), 0)
autoscaler.update()
- self.assertEqual(len(self.provider.nodes({})), 2)
+ self.waitForNodes(2)
autoscaler.update()
+ self.assertEqual(autoscaler.num_launches_pending.value, 0)
self.assertEqual(len(self.provider.nodes({})), 2)
# Scales up as nodes are reported as used
lm.update("172.0.0.0", {"CPU": 2}, {"CPU": 0})
lm.update("172.0.0.1", {"CPU": 2}, {"CPU": 0})
autoscaler.update()
- self.assertEqual(len(self.provider.nodes({})), 4)
+ self.waitForNodes(4)
lm.update("172.0.0.2", {"CPU": 2}, {"CPU": 0})
autoscaler.update()
- self.assertEqual(len(self.provider.nodes({})), 6)
+ self.waitForNodes(6)
# Holds steady when load is removed
lm.update("172.0.0.0", {"CPU": 2}, {"CPU": 2})
lm.update("172.0.0.1", {"CPU": 2}, {"CPU": 2})
autoscaler.update()
+ self.assertEqual(autoscaler.num_launches_pending.value, 0)
self.assertEqual(len(self.provider.nodes({})), 6)
# Scales down as nodes become unused
lm.last_used_time_by_ip["172.0.0.0"] = 0
lm.last_used_time_by_ip["172.0.0.1"] = 0
autoscaler.update()
+ self.assertEqual(autoscaler.num_launches_pending.value, 0)
self.assertEqual(len(self.provider.nodes({})), 4)
lm.last_used_time_by_ip["172.0.0.2"] = 0
lm.last_used_time_by_ip["172.0.0.3"] = 0
autoscaler.update()
+ self.assertEqual(autoscaler.num_launches_pending.value, 0)
self.assertEqual(len(self.provider.nodes({})), 2)
def testRecoverUnhealthyWorkers(self):
@@ -515,12 +629,11 @@ def testRecoverUnhealthyWorkers(self):
node_updater_cls=NodeUpdaterThread,
update_interval_s=0)
autoscaler.update()
+ self.waitForNodes(2)
for node in self.provider.mock_nodes.values():
node.state = "running"
autoscaler.update()
- self.waitFor(
- lambda: len(self.provider.nodes(
- {TAG_RAY_NODE_STATUS: "up-to-date"})) == 2)
+ self.waitForNodes(2, tag_filters={TAG_RAY_NODE_STATUS: "up-to-date"})
# Mark a node as unhealthy
lm.last_heartbeat_time_by_ip["172.0.0.0"] = 0
| [AWS Autoscaler] create_node hangs forever when insufficient AWS Capacity
In aws/node_provider.py:create_node, the call to ```self.ec2.create_instances``` can block for over half an hour, stopping all other Monitor activity. This has happened to me for spot requests when AWS has no capacity, but it could plausibly happen in other situations too.
A quick fix might be to reduce the number of retries that boto makes. They support a [global parameter](http://botocore.readthedocs.io/en/latest/reference/config.html), but it doesn't seem like it can be set per request.
A more robust approach would be to use request_spot_instances, reserving create_instance only for on-demand instances; however, this would complicate the code significantly, as I think we'd need to poll for when the instances are created.
Alternately, we could switch to using {request,modify}_spot_fleet, which supports both on-demand and spot types. I think this is closest to how Amazon intends the API to be used, but it is a bit of a mismatch with the Ray autoscaling API. The main problem is in scaling down: I think we'd need to follow [this procedure](https://stackoverflow.com/questions/45870613/terminate-specific-instance-in-aws-spot-fleet) to terminate a specific node. We'd also still need to poll to discover new nodes.
## Logs
```
StandardAutoscaler: Launching 2 new nodes
StandardAutoscaler: Error during autoscaling: {} Traceback (most recent call last):
File "/home/ec2-user/anaconda3/envs/pirl/lib/python3.6/site-packages/ray/autoscaler/autoscaler.py", line 246, in update
self._update()
File "/home/ec2-user/anaconda3/envs/pirl/lib/python3.6/site-packages/ray/autoscaler/autoscaler.py", line 308, in _update
min(self.max_concurrent_launches, target_num - len(nodes)))
File "/home/ec2-user/anaconda3/envs/pirl/lib/python3.6/site-packages/ray/autoscaler/autoscaler.py", line 455, in launch_new_node
count)
File "/home/ec2-user/anaconda3/envs/pirl/lib/python3.6/site-packages/ray/autoscaler/aws/node_provider.py", line 113, in create_node
self.ec2.create_instances(**conf)
File "/home/ec2-user/anaconda3/envs/pirl/lib/python3.6/site-packages/boto3/resources/factory.py", line 520, in do_action
response = action(self, *args, **kwargs)
File "/home/ec2-user/anaconda3/envs/pirl/lib/python3.6/site-packages/boto3/resources/action.py", line 83, in __call__
response = getattr(parent.meta.client, operation_name)(**params)
File "/home/ec2-user/anaconda3/envs/pirl/lib/python3.6/site-packages/botocore/client.py", line 324, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/home/ec2-user/anaconda3/envs/pirl/lib/python3.6/site-packages/botocore/client.py", line 622, in _make_api_call
raise error_class(parsed_response, operation_name)
botocore.exceptions.ClientError: An error occurred (InsufficientInstanceCapacity) when calling the RunInstances operation (reached max retries: 12): There is no Spot capacity available that matches your request.
StandardAutoscaler [2018-05-25 19:16:35.413360]: 0/2 target nodes
- NodeIdleSeconds: Min=0 Mean=0 Max=0
- NumNodesConnected: 1
- NumNodesUsed: 1.0
- ResourceUsage: 4.0/4.0 CPU, 4.0/4.0 GPU
StandardAutoscaler: Launching 2 new nodes
StandardAutoscaler [2018-05-25 19:17:15.009775]: 2/2 target nodes
```
| I looked into this a little more. Changing the number of retries in boto seems quite messy and I'd prefer not to go down that route. I think the easiest change to make would be to use the SpotInstanceRequest, and then block for some configurable period (e.g. a minute) until we know if the request is satisfied; if not, then cancel the request. This would require relaxing the assertion in autoscaler.py:launch_new_node that checks new nodes have in fact been launched, but I can't see any harm coming from this.
I'd be happy to implement this, however I don't feel it's a great solution, so would appreciate some feedback before starting.
Will this happen any time that I try to create more instances than my limits allow?
If you are able to help out with fixing this that'd be awesome! @ericl is the person to talk to.
Another approach would be to do the request in another thread, so we could still use CreateInstances. This would require some additional tracking logic for outstanding requests, but would avoid blocking the main loop.
Let me know what you think. Btw, the assertion that the number of nodes increases has already been removed, due to the possibility of a race with node removal.
@robertnishihara I think if you hit instance limits EC2 returns an error immediately rather than blocking (at least I did not encounter this problem and I have hit instance limits before). The issue can be reproduced fairly reliably by trying to create spot instances with limited capacity, e.g. a p2.16xlarge request in us-west-2b will almost certainly not be fulfilled.
@ericl I'm happy with a threading solution, I was reluctant to suggest it as I hadn't seen it used elsewhere in Ray, but a search throws up some other use cases.
The main challenge I see with introducing concurrency here is that the autoscaler keeps trying to add nodes so long as the current number of nodes is less than the target. If the requests blocks in a separate thread (but is eventually satisfied), we could end up launching a lot more nodes than was originally intended. Options I see are to either change the API and have the autoscaler expose the target number of nodes to the node provider, or to have the AWS node provider only process one instance launch request at a time (silently dropping the others). Thoughts?
@AdamGleave originally I was thinking of something like the following:
1. Keep a "pending nodes" counter in the autoscaler which tracks outstanding instance creation requests.
2. When checking whether to create new nodes, compare num_running + num_pending to the target.
3. When checking whether to remove nodes, compare against just num_running.
The idea to just avoid launching more than 1 instance creation request at a time could also work though, and might be simpler, since it only involves only adding a single boolean to the autoscaler state. I would do this in the autoscaler logic itself instead of the node provider so that it will work with other providers (perhaps create a new node provider copy for the instance launcher thread, to avoid potential thread safety issues).
@ericl That approach sounds good. The only problem I can foresee is we won't cancel requests when we no longer need them (only terminating the node once it starts); this problem seems minor, however.
An alternative that avoids this problem (and does not require threading) would be to have create_node make a [RequestSpotInstance](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RequestSpotInstances.html) call, which should always return immediately. nodes() would be changed to check both the running instances and unfulfilled spot instance requests. (There's potentially a race here, but it seems like it would happen very rarely, and would not have major consequences.) {internal,external}_ip would return None while the instance request is pending.
The only change I can see this requiring to the autoscaler would having _update preferentially terminate nodes without a recorded last_used time. However, I'm not familiar with this part of the code base so I may have overlooked some other implications.
Any thoughts on pros/cons of this compared to alternative approaches? I'll try and carve out some time later this week to implement whatever we decide on.
I think the main advantage of RequestSpotInstance would be the built-in cancellation. However, I think this is something we can add separately, for example, by cancelling the underlying HTTP request, or running the request in another process and killing the process to kill all its outstanding requests.
Another thing is that RequestSpotInstance is AWS-specific, and would not address the more general problem of instance creation requests hanging for other reasons. Whereas, a threading / process based solution would work for GCE as well and we can make it tolerant of hangs in the general case.
Could you elaborate on what other hangs in ```create_node``` we might expect to encounter, this might help me understand the design trade-offs better? Note we make (blocking) API calls in many other places, e.g. each call to nodes() retrieves all the currently running EC2 instances.
If there are some other cases where this is likely to arise then I'd favor the threading solution, but otherwise I feel a single-threaded approach is cleaner. I'm also not sure Amazon's API is clever enough to interpret closing the HTTP session as corresponding to cancelling the spot request.
@hartikainen do you know if the API call to start an instance in GCE ever blocks for a significant amount of time? The [documentation](https://cloud.google.com/compute/docs/instances/create-start-instance) doesn't make this clear. If so this is definitely an argument in favor of threading. If not then this problem feels like an AWS-specific quirk that we should fix in the AWS node provider.
I believe that even a normal instance request can take a while if there are insufficient instances available (e.g., I've seen this when requesting large numbers of ondemand GPU instances).
My preference here is to avoid additional AWS dependencies such as createSpotInstances, for the sake of longer-term maintainability, I'm willing to increase architectural complexity a bit to avoid these special cases.
@AdamGleave Good question. I don't know for sure how long GCE instance creation might take, but I do know that the blocking doesn't happen on the library level. [instances.insert](https://cloud.google.com/compute/docs/reference/rest/v1/instances/insert) method returns an [operation](https://cloud.google.com/deployment-manager/docs/reference/latest/operations), which we then [explicitly wait to complete](https://github.com/ray-project/ray/blob/940c1b1fd1d5b168cdbab3e63f501d94eea23b4d/python/ray/autoscaler/gcp/config.py#L51). | 2018-06-03T06:46:26 |
ray-project/ray | 2,254 | ray-project__ray-2254 | [
"2177",
"2177"
] | fa0ade2bc59a50ab84e4a611611c334a0d808910 | diff --git a/python/ray/autoscaler/aws/config.py b/python/ray/autoscaler/aws/config.py
--- a/python/ray/autoscaler/aws/config.py
+++ b/python/ray/autoscaler/aws/config.py
@@ -156,33 +156,25 @@ def _configure_subnet(config):
"and trying this again. Note that the subnet must map public IPs "
"on instance launch.")
if "availability_zone" in config["provider"]:
- default_subnet = next((
- s for s in subnets
- if s.availability_zone == config["provider"]["availability_zone"]),
- None)
- if not default_subnet:
+ azs = config["provider"]["availability_zone"].split(',')
+ subnets = [s for s in subnets if s.availability_zone in azs]
+ if not subnets:
raise Exception(
"No usable subnets matching availability zone {} "
"found. Choose a different availability zone or try "
"manually creating an instance in your specified region "
"to populate the list of subnets and trying this again."
.format(config["provider"]["availability_zone"]))
- else:
- default_subnet = subnets[0]
-
- if "SubnetId" not in config["head_node"]:
- assert default_subnet.map_public_ip_on_launch, \
- "The chosen subnet must map nodes with public IPs on launch"
- config["head_node"]["SubnetId"] = default_subnet.id
- print("SubnetId not specified for head node, using {} in {}".format(
- default_subnet.id, default_subnet.availability_zone))
-
- if "SubnetId" not in config["worker_nodes"]:
- assert default_subnet.map_public_ip_on_launch, \
- "The chosen subnet must map nodes with public IPs on launch"
- config["worker_nodes"]["SubnetId"] = default_subnet.id
- print("SubnetId not specified for workers, using {} in {}".format(
- default_subnet.id, default_subnet.availability_zone))
+
+ subnet_ids = [s.subnet_id for s in subnets]
+ subnet_descr = [(s.subnet_id, s.availability_zone) for s in subnets]
+ if "SubnetIds" not in config["head_node"]:
+ config["head_node"]["SubnetIds"] = subnet_ids
+ print("SubnetIds not specified for head node, using ", subnet_descr)
+
+ if "SubnetIds" not in config["worker_nodes"]:
+ config["worker_nodes"]["SubnetIds"] = subnet_ids
+ print("SubnetId not specified for workers, using ", subnet_descr)
return config
@@ -193,8 +185,8 @@ def _configure_security_group(config):
return config # have user-defined groups
group_name = SECURITY_GROUP_TEMPLATE.format(config["cluster_name"])
- subnet = _get_subnet_or_die(config, config["worker_nodes"]["SubnetId"])
- security_group = _get_security_group(config, subnet.vpc_id, group_name)
+ vpc_id = _get_vpc_id_or_die(config, config["worker_nodes"]["SubnetIds"][0])
+ security_group = _get_security_group(config, vpc_id, group_name)
if security_group is None:
print("Creating new security group {}".format(group_name))
@@ -202,8 +194,8 @@ def _configure_security_group(config):
client.create_security_group(
Description="Auto-created security group for Ray workers",
GroupName=group_name,
- VpcId=subnet.vpc_id)
- security_group = _get_security_group(config, subnet.vpc_id, group_name)
+ VpcId=vpc_id)
+ security_group = _get_security_group(config, vpc_id, group_name)
assert security_group, "Failed to create security group"
if not security_group.ip_permissions:
@@ -236,7 +228,7 @@ def _configure_security_group(config):
return config
-def _get_subnet_or_die(config, subnet_id):
+def _get_vpc_id_or_die(config, subnet_id):
ec2 = _resource("ec2", config)
subnet = list(
ec2.subnets.filter(Filters=[{
@@ -245,7 +237,7 @@ def _get_subnet_or_die(config, subnet_id):
}]))
assert len(subnet) == 1, "Subnet not found"
subnet = subnet[0]
- return subnet
+ return subnet.vpc_id
def _get_security_group(config, vpc_id, group_name):
diff --git a/python/ray/autoscaler/aws/node_provider.py b/python/ray/autoscaler/aws/node_provider.py
--- a/python/ray/autoscaler/aws/node_provider.py
+++ b/python/ray/autoscaler/aws/node_provider.py
@@ -2,6 +2,8 @@
from __future__ import division
from __future__ import print_function
+import random
+
import boto3
from botocore.config import Config
@@ -35,6 +37,9 @@ def __init__(self, provider_config, cluster_name):
self.ec2 = boto3.resource(
"ec2", region_name=provider_config["region"], config=config)
+ # Try availability zones round-robin, starting from random offset
+ self.subnet_idx = random.randint(0, 100)
+
# Cache of node objects from the last nodes() call. This avoids
# excessive DescribeInstances requests.
self.cached_nodes = {}
@@ -121,9 +126,15 @@ def create_node(self, node_config, tags, count):
"Key": k,
"Value": v,
})
+ # SubnetIds is not a real config key: we must resolve to a
+ # single SubnetId before invoking the AWS API.
+ subnet_ids = conf.pop("SubnetIds")
+ subnet_id = subnet_ids[self.subnet_idx % len(subnet_ids)]
+ self.subnet_idx += 1
conf.update({
"MinCount": 1,
"MaxCount": count,
+ "SubnetId": subnet_id,
"TagSpecifications": conf.get("TagSpecifications", []) + [{
"ResourceType": "instance",
"Tags": tag_pairs,
diff --git a/python/ray/ray_constants.py b/python/ray/ray_constants.py
--- a/python/ray/ray_constants.py
+++ b/python/ray/ray_constants.py
@@ -8,7 +8,7 @@
def env_integer(key, default):
if key in os.environ:
- return int(os.environ(key))
+ return int(os.environ[key])
return default
| [AWS Autoscaler] Spread across availability zones
Feature enhancement: be able to specify multiple availability zones to launch worker nodes into. For spot instances, this would reduce the risk of all of your workers being terminated, and could also enable greater peak capacity. This is particularly valuable in regions such as us-east-1 that have seven availability regions.
It's not clear what the best way to do this is. Right now, we specify SubnetId (with it being filled in by `aws/config.py:_configure_subnet`) to peg it to a particular availability zone. [Spot fleet requests](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_SpotFleetLaunchSpecification.html) let you specify multiple SubnetId's, but [RunInstance](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RunInstances.html) (which we currently use) does not. A reasonable policy might be to launch workers round-robin between availability zones. (This has the disadvantage of not favoring regions with lower prices, but now that Amazon makes spot prices vary only gradually over time, this doesn't seem like a big loss.)
Note there is a downside in terms of increased latency to having nodes in different availability zones, so there are probably better allocation strategies than round-robin.
[AWS Autoscaler] Spread across availability zones
Feature enhancement: be able to specify multiple availability zones to launch worker nodes into. For spot instances, this would reduce the risk of all of your workers being terminated, and could also enable greater peak capacity. This is particularly valuable in regions such as us-east-1 that have seven availability regions.
It's not clear what the best way to do this is. Right now, we specify SubnetId (with it being filled in by `aws/config.py:_configure_subnet`) to peg it to a particular availability zone. [Spot fleet requests](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_SpotFleetLaunchSpecification.html) let you specify multiple SubnetId's, but [RunInstance](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RunInstances.html) (which we currently use) does not. A reasonable policy might be to launch workers round-robin between availability zones. (This has the disadvantage of not favoring regions with lower prices, but now that Amazon makes spot prices vary only gradually over time, this doesn't seem like a big loss.)
Note there is a downside in terms of increased latency to having nodes in different availability zones, so there are probably better allocation strategies than round-robin.
| 2018-06-14T23:31:25 |
||
ray-project/ray | 2,274 | ray-project__ray-2274 | [
"2264"
] | 51744459f3abafc66e08cd918a22eca4d1cea6b6 | diff --git a/python/ray/dataframe/indexing.py b/python/ray/dataframe/indexing.py
--- a/python/ray/dataframe/indexing.py
+++ b/python/ray/dataframe/indexing.py
@@ -370,7 +370,11 @@ def _compute_enlarge_labels(self, locator, base_index):
Returns:
nan_labels: The labels needs to be added
"""
- locator_as_index = pandas.Index(locator)
+ # base_index_type can be pd.Index or pd.DatetimeIndex
+ # depending on user input and pandas behavior
+ # See issue #2264
+ base_index_type = type(base_index)
+ locator_as_index = base_index_type(locator)
nan_labels = locator_as_index.difference(base_index)
common_labels = locator_as_index.intersection(base_index)
| [Pandas on Ray] Cannot .loc by toDateTime Index
Hi @devin-petersohn , I retried my example on the new machine @pcmoritz gave me, and I ran into an issue where I could not run .loc by index after I converted the index to DateTime. The same code works in Pandas.
This time, I did figure how to print out the version of Ray that is installed on the new machine, so I hope you can let me know if this is indeed an old version of Ray or if this is a legitimate bug.
### System information
- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: Ubuntu 16.04.4 LTS (Xenial Xerus)
- **Ray installed from (source or binary)**: Unknown. Please ask Philipp Moritz, he was the one who installed Ray for me.
- **Ray version**: 0.4.0
- **Python version**: Anaconda Python 3.6.4
- **Exact command to reproduce**:
```
import ray.dataframe as pd
# import pandas as pd
df = pd.read_csv("yellow_tripdata_2015-01-01.csv")
df = df.set_index('tpep_pickup_datetime')
df.index = pd.to_datetime(df.index) # Ray does fine without this line
df.loc['2015-01-10 19:01:44'] #Error here
```
### Describe the problem
I get the traceback copied below in the **Source code/logs** section, when running the line `df.loc['2015-01-10 19:01:44']`. It complains that the Index is not unique-- indeed, there are multiple rows in the data that have the same timestamp for that column, and it is not monotonic either.
However, in Pandas, this line normally returns a DataFrame of multiple rows that match the`2015-01-10 19:01:44` DateTime index. Ray correctly returns multiple rows if the index was not converted into DateTime first.
In case it helps, I'm under the impression this problem may have to do with the `pd.to_datetime` more than `DataFrame.loc` in this case. I once saw `pd.to_datetime` return all rows as `1970-01-01 00:00:00` when converting this column to DateTime (though I haven't reproduced that error since then), so I have seen funny behavior from `to_datetime` before from Pandas on Ray.
A similar error may have used to exist in an older version of Pandas, but was fixed in this Pandas Github issue: [https://github.com/pandas-dev/pandas/issues/3659](url)
Also, this Pandas Github issue mentions in the reply chain that the correct behavior for .loc with non-unique indices is a DataFrame of matching rows: [https://github.com/pandas-dev/pandas/issues/9466](url)
Additionally, I noticed when running Ray on this new machine that the memory errors seem **much** worse than before. In my original email example, I was using the January dataset (1/12 of all of 2015 data), and was able to run several parts in succession.
However, this time when running Ray, it would hang and spam `ObjectID already exists in the object store` error messages immediately, on any next line, right after `pd.read_csv` (with my 1/12 data, running **isolated** from pandas or anything else). I wouldn't even be able to run `df.head()` right after reading in the csv this time around.
I had to split my January dataset in half, and use only 1/24 of the 2015 data, in order to just run this test. It may have just been that the new machine has less memory, but I do hope that I am indeed running from the right version of Ray.
You can get the source data csv I used at this page [http://www.nyc.gov/html/tlc/html/about/trip_record_data.shtml](url) under 2015>January>Yellow. Note that this csv file is 2 GB.
### Source code / logs
Traceback returned at `df.loc['2015-01-10 19:01:44']` line:
```
Traceback (most recent call last):
File "/home/ubuntu/anaconda3/lib/python3.6/site-packages/pandas/core/indexes/base.py", line 2319, in intersection
indexer = Index(other._values).get_indexer(self._values)
File "/home/ubuntu/anaconda3/lib/python3.6/site-packages/pandas/core/indexes/base.py", line 2684, in get_indexer
tolerance=tolerance)
File "/home/ubuntu/anaconda3/lib/python3.6/site-packages/pandas/core/indexes/base.py", line 2687, in get_indexer
raise InvalidIndexError('Reindexing only valid with uniquely'
pandas.core.indexes.base.InvalidIndexError: Reindexing only valid with uniquely valued Index objects
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/ubuntu/ray/python/ray/dataframe/indexing.py", line 286, in __getitem__
self._handle_enlargement(row_loc, col_loc)
File "/home/ubuntu/ray/python/ray/dataframe/indexing.py", line 313, in _handle_enlargement
new_meta = self._enlarge_axis(loc, axis=axis)
File "/home/ubuntu/ray/python/ray/dataframe/indexing.py", line 335, in _enlarge_axis
nan_labels = self._compute_enlarge_labels(locator, major_meta.index)
File "/home/ubuntu/ray/python/ray/dataframe/indexing.py", line 376, in _compute_enlarge_labels
common_labels = locator_as_index.intersection(base_index)
File "/home/ubuntu/anaconda3/lib/python3.6/site-packages/pandas/core/indexes/base.py", line 2309, in intersection
return this.intersection(other)
File "/home/ubuntu/anaconda3/lib/python3.6/site-packages/pandas/core/indexes/base.py", line 2324, in intersection
Index(other._values).get_indexer_non_unique(self._values)[0])
File "/home/ubuntu/anaconda3/lib/python3.6/site-packages/pandas/core/indexes/base.py", line 2816, in get_indexer_non_unique
indexer, missing = self._engine.get_indexer_non_unique(tgt_values)
File "pandas/_libs/index.pyx", line 310, in pandas._libs.index.IndexEngine.get_indexer_non_unique
TypeError: 'NoneType' object is not iterable
```
| Thanks @crystalzyan!
@simon-mo Do you know why we are getting a `NoneType` error?
@kunalgosar Can you check the `to_datetime` problems? | 2018-06-19T22:43:10 |
|
ray-project/ray | 2,283 | ray-project__ray-2283 | [
"2253"
] | 18ee044f033d044c8604af24fb5f0173c3e75bfd | diff --git a/python/ray/actor.py b/python/ray/actor.py
--- a/python/ray/actor.py
+++ b/python/ray/actor.py
@@ -884,13 +884,15 @@ def _serialization_helper(self, ray_forking):
"actor_id": self._ray_actor_id.id(),
"class_name": self._ray_class_name,
"actor_forks": self._ray_actor_forks,
- "actor_cursor": self._ray_actor_cursor.id(),
+ "actor_cursor": self._ray_actor_cursor.id()
+ if self._ray_actor_cursor is not None else None,
"actor_counter": 0, # Reset the actor counter.
"actor_method_names": self._ray_actor_method_names,
"method_signatures": self._ray_method_signatures,
"method_num_return_vals": self._ray_method_num_return_vals,
"actor_creation_dummy_object_id": self.
- _ray_actor_creation_dummy_object_id.id(),
+ _ray_actor_creation_dummy_object_id.id()
+ if self._ray_actor_creation_dummy_object_id is not None else None,
"actor_method_cpus": self._ray_actor_method_cpus,
"actor_driver_id": self._ray_actor_driver_id.id(),
"previous_actor_handle_id": self._ray_actor_handle_id.id()
@@ -929,12 +931,14 @@ def _deserialization_helper(self, state, ray_forking):
self.__init__(
ray.ObjectID(state["actor_id"]),
state["class_name"],
- ray.ObjectID(state["actor_cursor"]),
+ ray.ObjectID(state["actor_cursor"])
+ if state["actor_cursor"] is not None else None,
state["actor_counter"],
state["actor_method_names"],
state["method_signatures"],
state["method_num_return_vals"],
- ray.ObjectID(state["actor_creation_dummy_object_id"]),
+ ray.ObjectID(state["actor_creation_dummy_object_id"])
+ if state["actor_creation_dummy_object_id"] is not None else None,
state["actor_method_cpus"],
actor_driver_id,
actor_handle_id=actor_handle_id,
| diff --git a/test/runtest.py b/test/runtest.py
--- a/test/runtest.py
+++ b/test/runtest.py
@@ -1189,6 +1189,16 @@ def modify_and_set_array(self, array):
test_array[0] = -1
assert_equal(test_array, test_actor.get_array.remote())
+ # Check that actor handles work in Python mode.
+
+ @ray.remote
+ def use_actor_handle(handle):
+ array = np.ones(10)
+ handle.set_array.remote(array)
+ assert np.alltrue(array == ray.get(handle.get_array.remote()))
+
+ ray.get(use_actor_handle.remote(test_actor))
+
class ResourcesTest(unittest.TestCase):
def tearDown(self):
| Actor handles don't work when running in local mode.
```python
import ray
ray.init(driver_mode=ray.PYTHON_MODE)
@ray.remote
class Actor1:
def method(self):
pass
@ray.remote
class Actor2:
def __init__(self, a):
self.a = a
def method(self):
ray.get(self.a.method.remote())
a1 = Actor1.remote()
a2 = Actor2.remote(a1)
ray.get(a2.method.remote())
```
Fails with an error like the following (if you're not using the master branch you'll probably see an error like "Exception: Actor objects cannot be pickled").
```
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-1-8b7c665c4619> in <module>()
16
17 a1 = Actor1.remote()
---> 18 a2 = Actor2.remote(a1)
19 ray.get(a2.method.remote())
~/Workspace/ray/python/ray/actor.py in remote(self, *args, **kwargs)
556 A handle to the newly created actor.
557 """
--> 558 return self._submit(args=args, kwargs=kwargs)
559
560 def _submit(self,
~/Workspace/ray/python/ray/actor.py in _submit(self, args, kwargs, num_cpus, num_gpus, resources)
632 # Call __init__ as a remote function.
633 if "__init__" in actor_handle._ray_actor_method_names:
--> 634 actor_handle.__init__.remote(*args, **kwargs)
635 else:
636 if len(args) != 0 or len(kwargs) != 0:
~/Workspace/ray/python/ray/actor.py in remote(self, *args, **kwargs)
454
455 def remote(self, *args, **kwargs):
--> 456 return self._submit(args, kwargs)
457
458 def _submit(self, args, kwargs, num_return_vals=None):
~/Workspace/ray/python/ray/actor.py in _submit(self, args, kwargs, num_return_vals)
465 kwargs=kwargs,
466 num_return_vals=num_return_vals,
--> 467 dependency=self._actor._ray_actor_cursor)
468
469
~/Workspace/ray/python/ray/actor.py in _actor_method_call(self, method_name, args, kwargs, num_return_vals, dependency)
774 if worker.mode == ray.PYTHON_MODE:
775 return getattr(worker.actors[self._ray_actor_id],
--> 776 method_name)(*copy.deepcopy(args))
777
778 # Add the execution dependency.
/anaconda3/lib/python3.6/copy.py in deepcopy(x, memo, _nil)
148 copier = _deepcopy_dispatch.get(cls)
149 if copier:
--> 150 y = copier(x, memo)
151 else:
152 try:
/anaconda3/lib/python3.6/copy.py in _deepcopy_list(x, memo, deepcopy)
213 append = y.append
214 for a in x:
--> 215 append(deepcopy(a, memo))
216 return y
217 d[list] = _deepcopy_list
/anaconda3/lib/python3.6/copy.py in deepcopy(x, memo, _nil)
167 reductor = getattr(x, "__reduce_ex__", None)
168 if reductor:
--> 169 rv = reductor(4)
170 else:
171 reductor = getattr(x, "__reduce__", None)
~/Workspace/ray/python/ray/actor.py in __getstate__(self)
944 def __getstate__(self):
945 """This code path is used by pickling but not by Ray forking."""
--> 946 return self._serialization_helper(False)
947
948 def __setstate__(self, state):
~/Workspace/ray/python/ray/actor.py in _serialization_helper(self, ray_forking)
885 "class_name": self._ray_class_name,
886 "actor_forks": self._ray_actor_forks,
--> 887 "actor_cursor": self._ray_actor_cursor.id(),
888 "actor_counter": 0, # Reset the actor counter.
889 "actor_method_names": self._ray_actor_method_names,
AttributeError: 'NoneType' object has no attribute 'id'
```
cc @jsuarez5341
| 2018-06-20T21:53:31 |
|
ray-project/ray | 2,320 | ray-project__ray-2320 | [
"2317",
"2317"
] | b4dff9f933ce03619fc22b3d6456a6531eed4dd1 | diff --git a/python/ray/autoscaler/autoscaler.py b/python/ray/autoscaler/autoscaler.py
--- a/python/ray/autoscaler/autoscaler.py
+++ b/python/ray/autoscaler/autoscaler.py
@@ -359,6 +359,7 @@ def _update(self):
print(self.debug_string(nodes))
self.load_metrics.prune_active_ips(
[self.provider.internal_ip(node_id) for node_id in nodes])
+ target_workers = self.target_num_workers()
# Terminate any idle or out of date nodes
last_used = self.load_metrics.last_used_time_by_ip
@@ -367,7 +368,7 @@ def _update(self):
for node_id in nodes:
node_ip = self.provider.internal_ip(node_id)
if node_ip in last_used and last_used[node_ip] < horizon and \
- len(nodes) - num_terminated > self.config["min_workers"]:
+ len(nodes) - num_terminated > target_workers:
num_terminated += 1
print("StandardAutoscaler: Terminating idle node: "
"{}".format(node_id))
@@ -394,12 +395,12 @@ def _update(self):
print(self.debug_string(nodes))
# Launch new nodes if needed
- target_num = self.target_num_workers()
- num_nodes = len(nodes) + num_pending
- if num_nodes < target_num:
+ num_workers = len(nodes) + num_pending
+ if num_workers < target_workers:
max_allowed = min(self.max_launch_batch,
self.max_concurrent_launches - num_pending)
- self.launch_new_node(min(max_allowed, target_num - num_nodes))
+ num_launches = min(max_allowed, target_workers - num_workers)
+ self.launch_new_node(num_launches)
print(self.debug_string())
# Process any completed updates
@@ -453,7 +454,8 @@ def reload_config(self, errors_fatal=False):
def target_num_workers(self):
target_frac = self.config["target_utilization_fraction"]
cur_used = self.load_metrics.approx_workers_used()
- ideal_num_workers = int(np.ceil(cur_used / float(target_frac)))
+ ideal_num_nodes = int(np.ceil(cur_used / float(target_frac)))
+ ideal_num_workers = ideal_num_nodes - 1 # subtract 1 for head node
return min(self.config["max_workers"],
max(self.config["min_workers"], ideal_num_workers))
| diff --git a/test/autoscaler_test.py b/test/autoscaler_test.py
--- a/test/autoscaler_test.py
+++ b/test/autoscaler_test.py
@@ -11,6 +11,7 @@
import copy
import ray
+import ray.services as services
from ray.autoscaler.autoscaler import StandardAutoscaler, LoadMetrics, \
fillout_defaults, validate_config
from ray.autoscaler.tags import TAG_RAY_NODE_TYPE, TAG_RAY_NODE_STATUS
@@ -572,7 +573,7 @@ def testConfiguresOutdatedNodes(self):
def testScaleUpBasedOnLoad(self):
config = SMALL_CLUSTER.copy()
- config["min_workers"] = 2
+ config["min_workers"] = 1
config["max_workers"] = 10
config["target_utilization_fraction"] = 0.5
config_path = self.write_config(config)
@@ -582,38 +583,73 @@ def testScaleUpBasedOnLoad(self):
config_path, lm, max_failures=0, update_interval_s=0)
self.assertEqual(len(self.provider.nodes({})), 0)
autoscaler.update()
- self.waitForNodes(2)
+ self.waitForNodes(1)
autoscaler.update()
self.assertEqual(autoscaler.num_launches_pending.value, 0)
- self.assertEqual(len(self.provider.nodes({})), 2)
+ self.assertEqual(len(self.provider.nodes({})), 1)
# Scales up as nodes are reported as used
- lm.update("172.0.0.0", {"CPU": 2}, {"CPU": 0})
- lm.update("172.0.0.1", {"CPU": 2}, {"CPU": 0})
+ local_ip = services.get_node_ip_address()
+ lm.update(local_ip, {"CPU": 2}, {"CPU": 0}) # head
+ lm.update("172.0.0.0", {"CPU": 2}, {"CPU": 0}) # worker 1
autoscaler.update()
- self.waitForNodes(4)
- lm.update("172.0.0.2", {"CPU": 2}, {"CPU": 0})
+ self.waitForNodes(3)
+ lm.update("172.0.0.1", {"CPU": 2}, {"CPU": 0})
autoscaler.update()
- self.waitForNodes(6)
+ self.waitForNodes(5)
# Holds steady when load is removed
lm.update("172.0.0.0", {"CPU": 2}, {"CPU": 2})
lm.update("172.0.0.1", {"CPU": 2}, {"CPU": 2})
autoscaler.update()
self.assertEqual(autoscaler.num_launches_pending.value, 0)
- self.assertEqual(len(self.provider.nodes({})), 6)
+ self.assertEqual(len(self.provider.nodes({})), 5)
# Scales down as nodes become unused
lm.last_used_time_by_ip["172.0.0.0"] = 0
lm.last_used_time_by_ip["172.0.0.1"] = 0
autoscaler.update()
self.assertEqual(autoscaler.num_launches_pending.value, 0)
- self.assertEqual(len(self.provider.nodes({})), 4)
+ self.assertEqual(len(self.provider.nodes({})), 3)
lm.last_used_time_by_ip["172.0.0.2"] = 0
lm.last_used_time_by_ip["172.0.0.3"] = 0
autoscaler.update()
self.assertEqual(autoscaler.num_launches_pending.value, 0)
- self.assertEqual(len(self.provider.nodes({})), 2)
+ self.assertEqual(len(self.provider.nodes({})), 1)
+
+ def testDontScaleBelowTarget(self):
+ config = SMALL_CLUSTER.copy()
+ config["min_workers"] = 0
+ config["max_workers"] = 2
+ config["target_utilization_fraction"] = 0.5
+ config_path = self.write_config(config)
+ self.provider = MockProvider()
+ lm = LoadMetrics()
+ autoscaler = StandardAutoscaler(
+ config_path, lm, max_failures=0, update_interval_s=0)
+ self.assertEqual(len(self.provider.nodes({})), 0)
+ autoscaler.update()
+ self.assertEqual(autoscaler.num_launches_pending.value, 0)
+ self.assertEqual(len(self.provider.nodes({})), 0)
+
+ # Scales up as nodes are reported as used
+ local_ip = services.get_node_ip_address()
+ lm.update(local_ip, {"CPU": 2}, {"CPU": 0}) # head
+ # 1.0 nodes used => target nodes = 2 => target workers = 1
+ autoscaler.update()
+ self.waitForNodes(1)
+
+ # Make new node idle, and never used.
+ # Should hold steady as target is still 2.
+ lm.update("172.0.0.0", {"CPU": 0}, {"CPU": 0})
+ lm.last_used_time_by_ip["172.0.0.0"] = 0
+ autoscaler.update()
+ self.assertEqual(len(self.provider.nodes({})), 1)
+
+ # Reduce load on head => target nodes = 1 => target workers = 0
+ lm.update(local_ip, {"CPU": 2}, {"CPU": 1})
+ autoscaler.update()
+ self.assertEqual(len(self.provider.nodes({})), 0)
def testRecoverUnhealthyWorkers(self):
config_path = self.write_config(SMALL_CLUSTER)
| Autoscaler cyclically kills and restarts idle nodes
### System information
- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: Amazon Linux
- **Ray installed from (source or binary)**: Binary
- **Ray version**: 0.4.1
- **Python version**: 3.6.5
- **Exact command to reproduce**: Any Ray driver that uses enough resources to launch a new node, but not enough to give it any work.
### Describe the problem
The Ray autoscaler launches new nodes as needed to maintain the target_utilization_fraction. The Ray autoscaler terminates nodes based on their idle time.
Since the autoscaler (quite rightly) grows the cluster beyond the current resource requirements, sometimes you can end up with a newly launched node that has no tasks scheduled on it. In this case, it is killed after the idle timeout.
However, if nothing else has changed, after it is killed the autoscaler will immediately scale up again. This cycle repeats ad-infinitum until the cluster utilization changes.
A quick fix would be to only terminate idle nodes if it wouldn't take us below the target? I feel this might be better resolved by changing the heuristics used by the autoscaler to be something a bit more robust, however. Perhaps forecast the demand based on increase in resources; if a node is idle, this implies the demand has not increased, so our forecast decreases and there is less need for spare capacity.
### Source code / logs
In the wild:
```
StandardAutoscaler [2018-06-28 06:17:30.297538]: 3/3 target nodes (0 pending)
- NodeIdleSeconds: Min=0 Mean=75 Max=302
- NumNodesConnected: 4
- NumNodesUsed: 2.17
- ResourceUsage: 9.0/20.0 CPU, 9.0/20.0 GPU
StandardAutoscaler: Terminating idle node: i-000d34eab0dd6d564
StandardAutoscaler [2018-06-28 06:17:30.623786]: 2/3 target nodes (0 pending)
- NodeIdleSeconds: Min=0 Mean=76 Max=302
- NumNodesConnected: 4
- NumNodesUsed: 2.17
- ResourceUsage: 9.0/20.0 CPU, 9.0/20.0 GPU
StandardAutoscaler: Launching 1 new nodes
StandardAutoscaler [2018-06-28 06:17:30.674819]: 2/3 target nodes (1 pending)
- NodeIdleSeconds: Min=0 Mean=76 Max=302
- NumNodesConnected: 4
- NumNodesUsed: 2.17
- ResourceUsage: 9.0/20.0 CPU, 9.0/20.0 GPU
StandardAutoscaler [2018-06-28 06:17:35.338444]: 3/3 target nodes (0 pending)
- NodeIdleSeconds: Min=0 Mean=77 Max=307
- NumNodesConnected: 4
- NumNodesUsed: 2.17
- ResourceUsage: 9.0/20.0 CPU, 9.0/20.0 GPU
```
Autoscaler cyclically kills and restarts idle nodes
### System information
- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: Amazon Linux
- **Ray installed from (source or binary)**: Binary
- **Ray version**: 0.4.1
- **Python version**: 3.6.5
- **Exact command to reproduce**: Any Ray driver that uses enough resources to launch a new node, but not enough to give it any work.
### Describe the problem
The Ray autoscaler launches new nodes as needed to maintain the target_utilization_fraction. The Ray autoscaler terminates nodes based on their idle time.
Since the autoscaler (quite rightly) grows the cluster beyond the current resource requirements, sometimes you can end up with a newly launched node that has no tasks scheduled on it. In this case, it is killed after the idle timeout.
However, if nothing else has changed, after it is killed the autoscaler will immediately scale up again. This cycle repeats ad-infinitum until the cluster utilization changes.
A quick fix would be to only terminate idle nodes if it wouldn't take us below the target? I feel this might be better resolved by changing the heuristics used by the autoscaler to be something a bit more robust, however. Perhaps forecast the demand based on increase in resources; if a node is idle, this implies the demand has not increased, so our forecast decreases and there is less need for spare capacity.
### Source code / logs
In the wild:
```
StandardAutoscaler [2018-06-28 06:17:30.297538]: 3/3 target nodes (0 pending)
- NodeIdleSeconds: Min=0 Mean=75 Max=302
- NumNodesConnected: 4
- NumNodesUsed: 2.17
- ResourceUsage: 9.0/20.0 CPU, 9.0/20.0 GPU
StandardAutoscaler: Terminating idle node: i-000d34eab0dd6d564
StandardAutoscaler [2018-06-28 06:17:30.623786]: 2/3 target nodes (0 pending)
- NodeIdleSeconds: Min=0 Mean=76 Max=302
- NumNodesConnected: 4
- NumNodesUsed: 2.17
- ResourceUsage: 9.0/20.0 CPU, 9.0/20.0 GPU
StandardAutoscaler: Launching 1 new nodes
StandardAutoscaler [2018-06-28 06:17:30.674819]: 2/3 target nodes (1 pending)
- NodeIdleSeconds: Min=0 Mean=76 Max=302
- NumNodesConnected: 4
- NumNodesUsed: 2.17
- ResourceUsage: 9.0/20.0 CPU, 9.0/20.0 GPU
StandardAutoscaler [2018-06-28 06:17:35.338444]: 3/3 target nodes (0 pending)
- NodeIdleSeconds: Min=0 Mean=77 Max=307
- NumNodesConnected: 4
- NumNodesUsed: 2.17
- ResourceUsage: 9.0/20.0 CPU, 9.0/20.0 GPU
```
| I think there's also an issue caused by the autoscaler not counting the head node. For example:
```
StandardAutoscaler [2018-06-28 18:01:47.047472]: 2/2 target nodes (0 pending)
- NodeIdleSeconds: Min=0 Mean=1 Max=2
- NumNodesConnected: 3
- NumNodesUsed: 1.0
- ResourceUsage: 2.0/14.0 CPU, 2.0/14.0 GPU
```
In this case, my head node (with 2 CPU cores in my config) resources are 100% used. The expected behavior is to launch 1 worker node (with 4 CPU cores in my config), to keep cluster utilization below my configured threshold (0.9). Instead, it launches 2 worker nodes.
I think there's also an issue caused by the autoscaler not counting the head node. For example:
```
StandardAutoscaler [2018-06-28 18:01:47.047472]: 2/2 target nodes (0 pending)
- NodeIdleSeconds: Min=0 Mean=1 Max=2
- NumNodesConnected: 3
- NumNodesUsed: 1.0
- ResourceUsage: 2.0/14.0 CPU, 2.0/14.0 GPU
```
In this case, my head node (with 2 CPU cores in my config) resources are 100% used. The expected behavior is to launch 1 worker node (with 4 CPU cores in my config), to keep cluster utilization below my configured threshold (0.9). Instead, it launches 2 worker nodes. | 2018-06-28T18:43:40 |
ray-project/ray | 2,784 | ray-project__ray-2784 | [
"2780"
] | eda6ebb87ddef8f34e7ea6ef7fbf2ab8dc0c9ed2 | diff --git a/python/ray/ray_constants.py b/python/ray/ray_constants.py
--- a/python/ray/ray_constants.py
+++ b/python/ray/ray_constants.py
@@ -42,6 +42,7 @@ def env_integer(key, default):
WORKER_DIED_PUSH_ERROR = "worker_died"
PUT_RECONSTRUCTION_PUSH_ERROR = "put_reconstruction"
HASH_MISMATCH_PUSH_ERROR = "object_hash_mismatch"
+INFEASIBLE_TASK_ERROR = "infeasible_task"
# Abort autoscaling if more than this number of errors are encountered. This
# is a safety feature to prevent e.g. runaway node launches.
| diff --git a/test/failure_test.py b/test/failure_test.py
--- a/test/failure_test.py
+++ b/test/failure_test.py
@@ -480,3 +480,26 @@ def __init__(self):
# Make sure that a warning is generated.
wait_for_errors(ray_constants.PICKLING_LARGE_OBJECT_PUSH_ERROR, 2)
+
+
[email protected](
+ os.environ.get("RAY_USE_XRAY") != "1",
+ reason="This test only works with xray.")
+def test_warning_for_infeasible_tasks(ray_start_regular):
+ # Check that we get warning messages for infeasible tasks.
+
+ @ray.remote(num_gpus=1)
+ def f():
+ pass
+
+ @ray.remote(resources={"Custom": 1})
+ class Foo(object):
+ pass
+
+ # This task is infeasible.
+ f.remote()
+ wait_for_errors(ray_constants.INFEASIBLE_TASK_ERROR, 1)
+
+ # This actor placement task is infeasible.
+ Foo.remote()
+ wait_for_errors(ray_constants.INFEASIBLE_TASK_ERROR, 2)
| [xray] Users get no warning for infeasible tasks.
Start Ray with
```
RAY_USE_XRAY=1 ray start --head --redis-port=6379 --num-gpus=0
```
Then start `RAY_USE_XRAY=1 ipython` and run
```python
import ray
ray.init(redis_address='localhost:6379')
@ray.remote(num_gpus=1)
def f():
return 1
f.remote()
```
`f` will never execute because it is infeasible, and yet the user will get no warning.
| what about this : https://github.com/ray-project/ray/blob/master/src/ray/raylet/scheduling_policy.cc#L110
That's good, but it goes to a log file (assuming output is redirected, which is the default in the cluster setting). It'd be best for it to be pushed to the driver. | 2018-08-31T06:04:28 |
ray-project/ray | 2,837 | ray-project__ray-2837 | [
"2803"
] | 9948e8c11b92bc0f9f72c0402b87f8445b27c7cb | diff --git a/python/ray/actor.py b/python/ray/actor.py
--- a/python/ray/actor.py
+++ b/python/ray/actor.py
@@ -373,6 +373,15 @@ def _submit(self,
self._num_cpus, self._num_gpus, self._resources, num_cpus,
num_gpus, resources)
+ # If the actor methods require CPU resources, then set the required
+ # placement resources. If actor_placement_resources is empty, then
+ # the required placement resources will be the same as resources.
+ actor_placement_resources = {}
+ assert self._actor_method_cpus in [0, 1]
+ if self._actor_method_cpus == 1:
+ actor_placement_resources = resources.copy()
+ actor_placement_resources["CPU"] += 1
+
creation_args = [self._class_id]
function_id = compute_actor_creation_function_id(self._class_id)
[actor_cursor] = worker.submit_task(
@@ -380,7 +389,8 @@ def _submit(self,
creation_args,
actor_creation_id=actor_id,
num_return_vals=1,
- resources=resources)
+ resources=resources,
+ placement_resources=actor_placement_resources)
# We initialize the actor counter at 1 to account for the actor
# creation task.
@@ -566,6 +576,7 @@ def _actor_method_call(self,
# We add one for the dummy return ID.
num_return_vals=num_return_vals + 1,
resources={"CPU": self._ray_actor_method_cpus},
+ placement_resources={},
driver_id=self._ray_actor_driver_id)
# Update the actor counter and cursor to reflect the most recent
# invocation.
diff --git a/python/ray/worker.py b/python/ray/worker.py
--- a/python/ray/worker.py
+++ b/python/ray/worker.py
@@ -556,6 +556,7 @@ def submit_task(self,
execution_dependencies=None,
num_return_vals=None,
resources=None,
+ placement_resources=None,
driver_id=None):
"""Submit a remote task to the scheduler.
@@ -581,6 +582,9 @@ def submit_task(self,
num_return_vals: The number of return values this function should
have.
resources: The resource requirements for this task.
+ placement_resources: The resources required for placing the task.
+ If this is not provided or if it is an empty dictionary, then
+ the placement resources will be equal to resources.
driver_id: The ID of the relevant driver. This is almost always the
driver ID of the driver that is currently running. However, in
the exceptional case that an actor task is being dispatched to
@@ -634,6 +638,9 @@ def submit_task(self,
raise ValueError(
"Resource quantities must all be whole numbers.")
+ if placement_resources is None:
+ placement_resources = {}
+
with self.state_lock:
# Increment the worker's task index to track how many tasks
# have been submitted by the current task so far.
@@ -646,7 +653,8 @@ def submit_task(self,
num_return_vals, self.current_task_id, task_index,
actor_creation_id, actor_creation_dummy_object_id, actor_id,
actor_handle_id, actor_counter, is_actor_checkpoint_method,
- execution_dependencies, resources, self.use_raylet)
+ execution_dependencies, resources, placement_resources,
+ self.use_raylet)
self.local_scheduler_client.submit(task)
return task.returns()
@@ -2144,7 +2152,7 @@ def connect(info,
worker.current_task_id, worker.task_index,
ray.ObjectID(NIL_ACTOR_ID), ray.ObjectID(NIL_ACTOR_ID),
ray.ObjectID(NIL_ACTOR_ID), ray.ObjectID(NIL_ACTOR_ID),
- nil_actor_counter, False, [], {"CPU": 0}, worker.use_raylet)
+ nil_actor_counter, False, [], {"CPU": 0}, {}, worker.use_raylet)
# Add the driver task to the task table.
if not worker.use_raylet:
| diff --git a/src/ray/raylet/worker_pool_test.cc b/src/ray/raylet/worker_pool_test.cc
--- a/src/ray/raylet/worker_pool_test.cc
+++ b/src/ray/raylet/worker_pool_test.cc
@@ -65,7 +65,7 @@ static inline TaskSpecification ExampleTaskSpec(
const Language &language = Language::PYTHON) {
return TaskSpecification(UniqueID::nil(), UniqueID::nil(), 0, ActorID::nil(),
ObjectID::nil(), actor_id, ActorHandleID::nil(), 0,
- FunctionID::nil(), {}, 0, {{}}, language);
+ FunctionID::nil(), {}, 0, {{}}, {{}}, language);
}
TEST_F(WorkerPoolTest, HandleWorkerRegistration) {
diff --git a/test/failure_test.py b/test/failure_test.py
--- a/test/failure_test.py
+++ b/test/failure_test.py
@@ -548,6 +548,25 @@ class Foo(object):
wait_for_errors(ray_constants.INFEASIBLE_TASK_ERROR, 2)
[email protected](
+ os.environ.get("RAY_USE_XRAY") != "1",
+ reason="This test only works with xray.")
+def test_warning_for_infeasible_zero_cpu_actor(shutdown_only):
+ # Check that we cannot place an actor on a 0 CPU machine and that we get an
+ # infeasibility warning (even though the actor creation task itself
+ # requires no CPUs).
+
+ ray.init(num_cpus=0)
+
+ @ray.remote
+ class Foo(object):
+ pass
+
+ # The actor creation should be infeasible.
+ Foo.remote()
+ wait_for_errors(ray_constants.INFEASIBLE_TASK_ERROR, 1)
+
+
@pytest.fixture
def ray_start_two_nodes():
# Start the Ray processes.
| Hanging in test/runtest.py::ResourcesTest::testZeroCPUsActor on Travis.
I've seen the following test failure a fair amount in the `RAY_USE_XRAY=1` job.
```
test/runtest.py::ResourcesTest::testResourceConstraints [32mPASSED[0m[36m [ 78%][0m
test/runtest.py::ResourcesTest::testTwoCustomResources [32mPASSED[0m[36m [ 80%][0m
test/runtest.py::ResourcesTest::testZeroCPUs [32mPASSED[0m[36m [ 81%][0m
test/runtest.py::ResourcesTest::testZeroCPUsActor
No output has been received in the last 10m0s, this potentially indicates a stalled build or something wrong with the build itself.
Check the details on how to adjust your build configuration on: https://docs.travis-ci.com/user/common-build-problems/#Build-times-out-because-no-output-was-received
The build has been terminated
```
| I can reproduce this with
```
for i in {1..100}; do python -m pytest -s -v test/runtest.py::ResourcesTest::testZeroCPUsActor; done
```
in docker. | 2018-09-07T06:46:32 |
ray-project/ray | 3,109 | ray-project__ray-3109 | [
"3108"
] | 6b3236349cfb01e5623bcd1d0ebd1271c8f9b505 | diff --git a/python/ray/__init__.py b/python/ray/__init__.py
--- a/python/ray/__init__.py
+++ b/python/ray/__init__.py
@@ -46,6 +46,9 @@
e.args += (helpful_message, )
raise
+modin_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), "modin")
+sys.path.insert(0, modin_path)
+
from ray.raylet import ObjectID, _config # noqa: E402
from ray.profiling import profile # noqa: E402
from ray.worker import (error_info, init, connect, disconnect, get, put, wait,
| diff --git a/python/ray/test/test_modin.py b/python/ray/test/test_modin.py
new file mode 100644
--- /dev/null
+++ b/python/ray/test/test_modin.py
@@ -0,0 +1,12 @@
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import ray # noqa F401
+
+
+def test_modin_import():
+ import modin.pandas as pd
+ frame_data = [1, 2, 3, 4, 5, 6, 7, 8]
+ frame = pd.DataFrame(frame_data)
+ assert frame.sum().squeeze() == sum(frame_data)
| Ship Modin with Ray
### Describe the problem
<!-- Describe the problem clearly here. -->
I think it makes sense to ship Modin with Ray. I suggest doing this similar to how pyarrow is shipped with Ray.
We don't need to rely on the dependencies of Modin, but some of the Modin source will have to be updated to make sure that the pandas version is correct.
| 2018-10-22T20:47:03 |
|
ray-project/ray | 3,124 | ray-project__ray-3124 | [
"3141"
] | fb6ac28b44947c74600d09ff2231bc0d1987537d | diff --git a/python/ray/services.py b/python/ray/services.py
--- a/python/ray/services.py
+++ b/python/ray/services.py
@@ -1216,6 +1216,8 @@ def start_raylet_monitor(redis_address,
gcs_ip_address, gcs_port = redis_address.split(":")
redis_password = redis_password or ""
command = [RAYLET_MONITOR_EXECUTABLE, gcs_ip_address, gcs_port]
+ if redis_password:
+ command += [redis_password]
p = subprocess.Popen(command, stdout=stdout_file, stderr=stderr_file)
if cleanup:
all_processes[PROCESS_TYPE_MONITOR].append(p)
diff --git a/python/ray/worker.py b/python/ray/worker.py
--- a/python/ray/worker.py
+++ b/python/ray/worker.py
@@ -1933,8 +1933,9 @@ def connect(info,
sys.stdout = log_stdout_file
sys.stderr = log_stderr_file
services.record_log_files_in_redis(
- info["redis_address"], info["node_ip_address"],
- [log_stdout_file, log_stderr_file])
+ info["redis_address"],
+ info["node_ip_address"], [log_stdout_file, log_stderr_file],
+ password=redis_password)
# Create an object for interfacing with the global state.
global_state._initialize_global_state(
| diff --git a/python/ray/test/cluster_utils.py b/python/ray/test/cluster_utils.py
--- a/python/ray/test/cluster_utils.py
+++ b/python/ray/test/cluster_utils.py
@@ -37,7 +37,10 @@ def __init__(self,
head_node_args = head_node_args or {}
self.add_node(**head_node_args)
if connect:
- ray.init(redis_address=self.redis_address)
+ redis_password = head_node_args.get("redis_password")
+ ray.init(
+ redis_address=self.redis_address,
+ redis_password=redis_password)
def add_node(self, **override_kwargs):
"""Adds a node to the local Ray Cluster.
diff --git a/python/ray/test/test_ray_init.py b/python/ray/test/test_ray_init.py
--- a/python/ray/test/test_ray_init.py
+++ b/python/ray/test/test_ray_init.py
@@ -7,6 +7,7 @@
import redis
import ray
+from ray.test.cluster_utils import Cluster
@pytest.fixture
@@ -29,7 +30,6 @@ class TestRedisPassword(object):
os.environ.get("RAY_USE_NEW_GCS") == "on",
reason="New GCS API doesn't support Redis authentication yet.")
def test_redis_password(self, password, shutdown_only):
- # Workaround for https://github.com/ray-project/ray/issues/3045
@ray.remote
def f():
return 1
@@ -52,3 +52,19 @@ def f():
redis_client = redis.StrictRedis(
host=redis_ip, port=redis_port, password=password)
assert redis_client.ping()
+
+ @pytest.mark.skipif(
+ os.environ.get("RAY_USE_NEW_GCS") == "on",
+ reason="New GCS API doesn't support Redis authentication yet.")
+ def test_redis_password_cluster(self, password, shutdown_only):
+ @ray.remote
+ def f():
+ return 1
+
+ node_args = {"redis_password": password}
+ cluster = Cluster(
+ initialize_head=True, connect=True, head_node_args=node_args)
+ cluster.add_node(**node_args)
+
+ object_id = f.remote()
+ ray.get(object_id)
| No jobs scheduled when using redis-password
### System information
- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: Ubuntu 16.04.1
- **Ray installed from (source or binary)**: pip install -U https://s3-us-west-2.amazonaws.com/ray-wheels/latest/ray-0.5.3-cp35-cp35m-manylinux1_x86_64.whl
- **Ray version**: 0.5.3
- **Python version**: 3.5.2
- **Exact command to reproduce**: Given `foo.py` below, running the following reproduces the problem:
```
export RAY_USE_XRAY=1
ray start --head --redis-password=thequickbrownfox
python foo.py
```
### Describe the problem
When I start ray without a `--redis-password`, `foo.py` works as expected (writes 5 `foo#.out` files). When using the `--redis-password`, `foo.py` hangs indefinitely.
### Source code / logs
`foo.py`:
```
import ray
# using whatever address and port were given when I ran ray start
ray.init(redis_address="<addr>:<port>", redis_password="thequickbrownfox")
@ray.remote
def foo(i):
with open("foo{}.out".format(i), "w") as fooF:
fooF.write("hello world")
ids = []
for i in range(5):
ids.append(foo.remote(i))
ray.get(ids)
print("done!")
```
| 2018-10-24T17:49:11 |
|
ray-project/ray | 3,130 | ray-project__ray-3130 | [
"3129"
] | befbf78048120e4f995fc1cf00927b4ea665abfc | diff --git a/python/ray/scripts/scripts.py b/python/ray/scripts/scripts.py
--- a/python/ray/scripts/scripts.py
+++ b/python/ray/scripts/scripts.py
@@ -104,6 +104,11 @@ def cli(logging_level, logging_format):
required=False,
type=int,
help="the port to use for starting the object manager")
[email protected](
+ "--node-manager-port",
+ required=False,
+ type=int,
+ help="the port to use for starting the node manager")
@click.option(
"--object-store-memory",
required=False,
@@ -190,11 +195,11 @@ def cli(logging_level, logging_format):
help="manually specify the root temporary dir of the Ray process")
def start(node_ip_address, redis_address, redis_port, num_redis_shards,
redis_max_clients, redis_password, redis_shard_ports,
- object_manager_port, object_store_memory, num_workers, num_cpus,
- num_gpus, resources, head, no_ui, block, plasma_directory,
- huge_pages, autoscaling_config, no_redirect_worker_output,
- no_redirect_output, plasma_store_socket_name, raylet_socket_name,
- temp_dir):
+ object_manager_port, node_manager_port, object_store_memory,
+ num_workers, num_cpus, num_gpus, resources, head, no_ui, block,
+ plasma_directory, huge_pages, autoscaling_config,
+ no_redirect_worker_output, no_redirect_output,
+ plasma_store_socket_name, raylet_socket_name, temp_dir):
# Convert hostnames to numerical IP address.
if node_ip_address is not None:
node_ip_address = services.address_to_ip(node_ip_address)
@@ -243,15 +248,9 @@ def start(node_ip_address, redis_address, redis_port, num_redis_shards,
logger.info("Using IP address {} for this node."
.format(node_ip_address))
- address_info = {}
- # Use the provided object manager port if there is one.
- if object_manager_port is not None:
- address_info["object_manager_ports"] = [object_manager_port]
- if address_info == {}:
- address_info = None
-
address_info = services.start_ray_head(
- address_info=address_info,
+ object_manager_ports=[object_manager_port],
+ node_manager_ports=[node_manager_port],
node_ip_address=node_ip_address,
redis_port=redis_port,
redis_shard_ports=redis_shard_ports,
@@ -337,6 +336,7 @@ def start(node_ip_address, redis_address, redis_port, num_redis_shards,
node_ip_address=node_ip_address,
redis_address=redis_address,
object_manager_ports=[object_manager_port],
+ node_manager_ports=[node_manager_port],
num_workers=num_workers,
object_store_memory=object_store_memory,
redis_password=redis_password,
diff --git a/python/ray/services.py b/python/ray/services.py
--- a/python/ray/services.py
+++ b/python/ray/services.py
@@ -849,6 +849,8 @@ def start_raylet(redis_address,
plasma_store_name,
worker_path,
resources=None,
+ object_manager_port=None,
+ node_manager_port=None,
num_workers=0,
use_valgrind=False,
use_profiler=False,
@@ -867,6 +869,13 @@ def start_raylet(redis_address,
raylet_name (str): The name of the raylet socket to create.
worker_path (str): The path of the script to use when the local
scheduler starts up new workers.
+ resources: The resources that this raylet has.
+ object_manager_port (int): The port to use for the object manager. If
+ this is not provided, we will use 0 and the object manager will
+ choose its own port.
+ node_manager_port (int): The port to use for the node manager. If
+ this is not provided, we will use 0 and the node manager will
+ choose its own port.
use_valgrind (bool): True if the raylet should be started inside
of valgrind. If this is True, use_profiler must be False.
use_profiler (bool): True if the raylet should be started inside
@@ -915,10 +924,21 @@ def start_raylet(redis_address,
if redis_password:
start_worker_command += " --redis-password {}".format(redis_password)
+ # If the object manager port is None, then use 0 to cause the object
+ # manager to choose its own port.
+ if object_manager_port is None:
+ object_manager_port = 0
+ # If the node manager port is None, then use 0 to cause the node manager
+ # to choose its own port.
+ if node_manager_port is None:
+ node_manager_port = 0
+
command = [
RAYLET_EXECUTABLE,
raylet_name,
plasma_store_name,
+ str(object_manager_port),
+ str(node_manager_port),
node_ip_address,
gcs_ip_address,
gcs_port,
@@ -1159,6 +1179,8 @@ def start_raylet_monitor(redis_address,
def start_ray_processes(address_info=None,
+ object_manager_ports=None,
+ node_manager_ports=None,
node_ip_address="127.0.0.1",
redis_port=None,
redis_shard_ports=None,
@@ -1188,6 +1210,12 @@ def start_ray_processes(address_info=None,
address_info (dict): A dictionary with address information for
processes that have already been started. If provided, address_info
will be modified to include processes that are newly started.
+ object_manager_ports (list): A list of the ports to use for the object
+ managers. There should be one per object manager being started on
+ this node (typically just one).
+ node_manager_ports (list): A list of the ports to use for the node
+ managers. There should be one per node manager being started on
+ this node (typically just one).
node_ip_address (str): The IP address of this node.
redis_port (int): The port that the primary Redis shard should listen
to. If None, then a random port will be chosen. If the key
@@ -1341,11 +1369,14 @@ def start_ray_processes(address_info=None,
raylet_socket_names = address_info["raylet_socket_names"]
# Get the ports to use for the object managers if any are provided.
- object_manager_ports = (address_info["object_manager_ports"] if
- "object_manager_ports" in address_info else None)
if not isinstance(object_manager_ports, list):
+ assert object_manager_ports is None or num_local_schedulers == 1
object_manager_ports = num_local_schedulers * [object_manager_ports]
assert len(object_manager_ports) == num_local_schedulers
+ if not isinstance(node_manager_ports, list):
+ assert node_manager_ports is None or num_local_schedulers == 1
+ node_manager_ports = num_local_schedulers * [node_manager_ports]
+ assert len(node_manager_ports) == num_local_schedulers
# Start any object stores that do not yet exist.
for i in range(num_local_schedulers - len(object_store_addresses)):
@@ -1378,6 +1409,8 @@ def start_ray_processes(address_info=None,
raylet_socket_name or get_raylet_socket_name(),
object_store_addresses[i],
worker_path,
+ object_manager_port=object_manager_ports[i],
+ node_manager_port=node_manager_ports[i],
resources=resources[i],
num_workers=workers_per_local_scheduler[i],
stdout_file=raylet_stdout_file,
@@ -1402,6 +1435,7 @@ def start_ray_processes(address_info=None,
def start_ray_node(node_ip_address,
redis_address,
object_manager_ports=None,
+ node_manager_ports=None,
num_workers=0,
num_local_schedulers=1,
object_store_memory=None,
@@ -1427,6 +1461,9 @@ def start_ray_node(node_ip_address,
object_manager_ports (list): A list of the ports to use for the object
managers. There should be one per object manager being started on
this node (typically just one).
+ node_manager_ports (list): A list of the ports to use for the node
+ managers. There should be one per node manager being started on
+ this node (typically just one).
num_workers (int): The number of workers to start.
num_local_schedulers (int): The number of local schedulers to start.
This is also the number of plasma stores and plasma managers to
@@ -1463,10 +1500,11 @@ def start_ray_node(node_ip_address,
"""
address_info = {
"redis_address": redis_address,
- "object_manager_ports": object_manager_ports
}
return start_ray_processes(
address_info=address_info,
+ object_manager_ports=object_manager_ports,
+ node_manager_ports=node_manager_ports,
node_ip_address=node_ip_address,
num_workers=num_workers,
num_local_schedulers=num_local_schedulers,
@@ -1486,6 +1524,8 @@ def start_ray_node(node_ip_address,
def start_ray_head(address_info=None,
+ object_manager_ports=None,
+ node_manager_ports=None,
node_ip_address="127.0.0.1",
redis_port=None,
redis_shard_ports=None,
@@ -1514,6 +1554,12 @@ def start_ray_head(address_info=None,
address_info (dict): A dictionary with address information for
processes that have already been started. If provided, address_info
will be modified to include processes that are newly started.
+ object_manager_ports (list): A list of the ports to use for the object
+ managers. There should be one per object manager being started on
+ this node (typically just one).
+ node_manager_ports (list): A list of the ports to use for the node
+ managers. There should be one per node manager being started on
+ this node (typically just one).
node_ip_address (str): The IP address of this node.
redis_port (int): The port that the primary Redis shard should listen
to. If None, then a random port will be chosen. If the key
@@ -1570,6 +1616,8 @@ def start_ray_head(address_info=None,
num_redis_shards = 1 if num_redis_shards is None else num_redis_shards
return start_ray_processes(
address_info=address_info,
+ object_manager_ports=object_manager_ports,
+ node_manager_ports=node_manager_ports,
node_ip_address=node_ip_address,
redis_port=redis_port,
redis_shard_ports=redis_shard_ports,
| diff --git a/test/multi_node_test.py b/test/multi_node_test.py
--- a/test/multi_node_test.py
+++ b/test/multi_node_test.py
@@ -285,9 +285,12 @@ def test_calling_start_ray_head():
["ray", "start", "--head", "--node-ip-address", "127.0.0.1"])
subprocess.Popen(["ray", "stop"]).wait()
- # Test starting Ray with an object manager port specified.
- run_and_get_output(
- ["ray", "start", "--head", "--object-manager-port", "12345"])
+ # Test starting Ray with the object manager and node manager ports
+ # specified.
+ run_and_get_output([
+ "ray", "start", "--head", "--object-manager-port", "12345",
+ "--node-manager-port", "54321"
+ ])
subprocess.Popen(["ray", "stop"]).wait()
# Test starting Ray with the number of CPUs specified.
| Allow setting object manager port and node manger port through ray start.
| 2018-10-24T22:33:09 |
|
ray-project/ray | 3,161 | ray-project__ray-3161 | [
"2852"
] | e49839c73f16489ed2baef55861e9b4e6e20814d | diff --git a/python/ray/experimental/state.py b/python/ray/experimental/state.py
--- a/python/ray/experimental/state.py
+++ b/python/ray/experimental/state.py
@@ -162,22 +162,26 @@ def _object_table(self, object_id):
message = self._execute_command(object_id, "RAY.TABLE_LOOKUP",
ray.gcs_utils.TablePrefix.OBJECT, "",
object_id.id())
- result = []
gcs_entry = ray.gcs_utils.GcsTableEntry.GetRootAsGcsTableEntry(
message, 0)
- for i in range(gcs_entry.EntriesLength()):
+ assert gcs_entry.EntriesLength() > 0
+
+ entry = ray.gcs_utils.ObjectTableData.GetRootAsObjectTableData(
+ gcs_entry.Entries(0), 0)
+
+ object_info = {
+ "DataSize": entry.ObjectSize(),
+ "Manager": entry.Manager(),
+ "IsEviction": [entry.IsEviction()],
+ }
+
+ for i in range(1, gcs_entry.EntriesLength()):
entry = ray.gcs_utils.ObjectTableData.GetRootAsObjectTableData(
gcs_entry.Entries(i), 0)
- object_info = {
- "DataSize": entry.ObjectSize(),
- "Manager": entry.Manager(),
- "IsEviction": entry.IsEviction(),
- "NumEvictions": entry.NumEvictions()
- }
- result.append(object_info)
+ object_info["IsEviction"].append(entry.IsEviction())
- return result
+ return object_info
def object_table(self, object_id=None):
"""Fetch and parse the object table info for one or more object IDs.
@@ -224,44 +228,42 @@ def _task_table(self, task_id):
gcs_entries = ray.gcs_utils.GcsTableEntry.GetRootAsGcsTableEntry(
message, 0)
- info = []
- for i in range(gcs_entries.EntriesLength()):
- task_table_message = ray.gcs_utils.Task.GetRootAsTask(
- gcs_entries.Entries(i), 0)
-
- execution_spec = task_table_message.TaskExecutionSpec()
- task_spec = task_table_message.TaskSpecification()
- task_spec = ray.raylet.task_from_string(task_spec)
- task_spec_info = {
- "DriverID": binary_to_hex(task_spec.driver_id().id()),
- "TaskID": binary_to_hex(task_spec.task_id().id()),
- "ParentTaskID": binary_to_hex(task_spec.parent_task_id().id()),
- "ParentCounter": task_spec.parent_counter(),
- "ActorID": binary_to_hex(task_spec.actor_id().id()),
- "ActorCreationID": binary_to_hex(
- task_spec.actor_creation_id().id()),
- "ActorCreationDummyObjectID": binary_to_hex(
- task_spec.actor_creation_dummy_object_id().id()),
- "ActorCounter": task_spec.actor_counter(),
- "FunctionID": binary_to_hex(task_spec.function_id().id()),
- "Args": task_spec.arguments(),
- "ReturnObjectIDs": task_spec.returns(),
- "RequiredResources": task_spec.required_resources()
- }
-
- info.append({
- "ExecutionSpec": {
- "Dependencies": [
- execution_spec.Dependencies(i)
- for i in range(execution_spec.DependenciesLength())
- ],
- "LastTimestamp": execution_spec.LastTimestamp(),
- "NumForwards": execution_spec.NumForwards()
- },
- "TaskSpec": task_spec_info
- })
+ assert gcs_entries.EntriesLength() == 1
+
+ task_table_message = ray.gcs_utils.Task.GetRootAsTask(
+ gcs_entries.Entries(0), 0)
+
+ execution_spec = task_table_message.TaskExecutionSpec()
+ task_spec = task_table_message.TaskSpecification()
+ task_spec = ray.raylet.task_from_string(task_spec)
+ task_spec_info = {
+ "DriverID": binary_to_hex(task_spec.driver_id().id()),
+ "TaskID": binary_to_hex(task_spec.task_id().id()),
+ "ParentTaskID": binary_to_hex(task_spec.parent_task_id().id()),
+ "ParentCounter": task_spec.parent_counter(),
+ "ActorID": binary_to_hex(task_spec.actor_id().id()),
+ "ActorCreationID": binary_to_hex(
+ task_spec.actor_creation_id().id()),
+ "ActorCreationDummyObjectID": binary_to_hex(
+ task_spec.actor_creation_dummy_object_id().id()),
+ "ActorCounter": task_spec.actor_counter(),
+ "FunctionID": binary_to_hex(task_spec.function_id().id()),
+ "Args": task_spec.arguments(),
+ "ReturnObjectIDs": task_spec.returns(),
+ "RequiredResources": task_spec.required_resources()
+ }
- return info
+ return {
+ "ExecutionSpec": {
+ "Dependencies": [
+ execution_spec.Dependencies(i)
+ for i in range(execution_spec.DependenciesLength())
+ ],
+ "LastTimestamp": execution_spec.LastTimestamp(),
+ "NumForwards": execution_spec.NumForwards()
+ },
+ "TaskSpec": task_spec_info
+ }
def task_table(self, task_id=None):
"""Fetch and parse the task table information for one or more task IDs.
diff --git a/python/ray/monitor.py b/python/ray/monitor.py
--- a/python/ray/monitor.py
+++ b/python/ray/monitor.py
@@ -152,10 +152,8 @@ def _xray_clean_up_entries_for_driver(self, driver_id):
task_table_objects = self.state.task_table()
driver_id_hex = binary_to_hex(driver_id)
driver_task_id_bins = set()
- for task_id_hex in task_table_objects:
- if len(task_table_objects[task_id_hex]) == 0:
- continue
- task_table_object = task_table_objects[task_id_hex][0]["TaskSpec"]
+ for task_id_hex, task_info in task_table_objects.items():
+ task_table_object = task_info["TaskSpec"]
task_driver_id_hex = task_table_object["DriverID"]
if driver_id_hex != task_driver_id_hex:
# Ignore tasks that aren't from this driver.
@@ -165,8 +163,7 @@ def _xray_clean_up_entries_for_driver(self, driver_id):
# Get objects associated with the driver.
object_table_objects = self.state.object_table()
driver_object_id_bins = set()
- for object_id, object_table_object in object_table_objects.items():
- assert len(object_table_object) > 0
+ for object_id, _ in object_table_objects.items():
task_id_bin = ray.raylet.compute_task_id(object_id).id()
if task_id_bin in driver_task_id_bins:
driver_object_id_bins.add(object_id.id())
| diff --git a/test/runtest.py b/test/runtest.py
--- a/test/runtest.py
+++ b/test/runtest.py
@@ -2115,8 +2115,7 @@ def test_global_state_api(shutdown_only):
task_table = ray.global_state.task_table()
assert len(task_table) == 1
assert driver_task_id == list(task_table.keys())[0]
- assert len(task_table[driver_task_id]) == 1
- task_spec = task_table[driver_task_id][0]["TaskSpec"]
+ task_spec = task_table[driver_task_id]["TaskSpec"]
assert task_spec["TaskID"] == driver_task_id
assert task_spec["ActorID"] == ray_constants.ID_SIZE * "ff"
@@ -2147,7 +2146,7 @@ def f(*xs):
task_id = list(task_id_set)[0]
function_table = ray.global_state.function_table()
- task_spec = task_table[task_id][0]["TaskSpec"]
+ task_spec = task_table[task_id]["TaskSpec"]
assert task_spec["ActorID"] == ray_constants.ID_SIZE * "ff"
assert task_spec["Args"] == [1, "hi", x_id]
assert task_spec["DriverID"] == driver_id
@@ -2178,13 +2177,9 @@ def wait_for_object_table():
object_table = ray.global_state.object_table()
assert len(object_table) == 2
- assert len(object_table[x_id]) == 1
- assert object_table[x_id][0]["IsEviction"] is False
- assert object_table[x_id][0]["NumEvictions"] == 0
+ assert object_table[x_id]["IsEviction"][0] is False
- assert len(object_table[result_id]) == 1
- assert object_table[result_id][0]["IsEviction"] is False
- assert object_table[result_id][0]["NumEvictions"] == 0
+ assert object_table[result_id]["IsEviction"][0] is False
assert object_table[x_id] == ray.global_state.object_table(x_id)
object_table_entry = ray.global_state.object_table(result_id)
@@ -2251,78 +2246,6 @@ def f():
assert "stdout_file" in info
[email protected]("This test does not work yet.")
[email protected](
- os.environ.get("RAY_USE_NEW_GCS") == "on",
- reason="New GCS API doesn't have a Python API yet.")
-def test_flush_api(shutdown_only):
- ray.init(num_cpus=1)
-
- @ray.remote
- def f():
- return 1
-
- [ray.put(1) for _ in range(10)]
- ray.get([f.remote() for _ in range(10)])
-
- # Wait until all of the task and object information has been stored in
- # Redis. Note that since a given key may be updated multiple times
- # (e.g., multiple calls to TaskTableUpdate), this is an attempt to wait
- # until all updates have happened. Note that in a real application we
- # could encounter this kind of issue as well.
- while True:
- object_table = ray.global_state.object_table()
- task_table = ray.global_state.task_table()
-
- tables_ready = True
-
- if len(object_table) != 20:
- tables_ready = False
-
- for object_info in object_table.values():
- if len(object_info) != 5:
- tables_ready = False
- if (object_info["ManagerIDs"] is None
- or object_info["DataSize"] == -1
- or object_info["Hash"] == ""):
- tables_ready = False
-
- if len(task_table) != 10 + 1:
- tables_ready = False
-
- driver_task_id = ray.utils.binary_to_hex(
- ray.worker.global_worker.current_task_id.id())
-
- for info in task_table.values():
- if info["State"] != ray.experimental.state.TASK_STATUS_DONE:
- if info["TaskSpec"]["TaskID"] != driver_task_id:
- tables_ready = False
-
- if tables_ready:
- break
- # this test case is blocked sometimes, add this may fix the problem
- time.sleep(0.1)
-
- # Flush the tables.
- ray.experimental.flush_redis_unsafe()
- ray.experimental.flush_task_and_object_metadata_unsafe()
-
- # Make sure the tables are empty.
- assert len(ray.global_state.object_table()) == 0
- assert len(ray.global_state.task_table()) == 0
-
- # Run some more tasks.
- ray.get([f.remote() for _ in range(10)])
-
- while len(ray.global_state.task_table()) != 0:
- time.sleep(0.1)
- ray.experimental.flush_finished_tasks_unsafe()
-
- # Make sure that we can call this method (but it won't do anything in
- # this test case).
- ray.experimental.flush_evicted_objects_unsafe()
-
-
@pytest.fixture
def shutdown_only_with_initialization_check():
yield None
| [xray] Hide "append-only log" semantics in global state API.
Certain global state commands expose unnecessary implementation details.
- `ray.global_state.client_table()` returns a log, which can contain multiple entries for the same "client". This came up in #2851.
- `ray.global_state.object_table()` returns a list of entries for each object ID, we should probably just have one entry per object ID.
- `ray.global_state.task_table()`returns a list of entries for each task ID, which should be a single entry.
| For client table, it makes sense to have a single entry for each client.
For object table, we might want to have all "current" clients for this object, so that people can know where the object is stored.
For task table, it might be helpful to provide an option to return a list of entries, so that developers can leverage this information for debugging purpose, e.g. investigate failure & reconstruction for a task.
Thoughts?
For the object table, we could have one entry per object, and that entry could include a list of clients or of creation/eviction events.
For the task table, we actually don't store a log in the GCS, updates to the task table overwrite the current entry. This is the case because we use `Table` instead of `Log` in
https://github.com/ray-project/ray/blob/588c573d418d78b730898dcd6314d61f0dd1389d/src/ray/gcs/tables.cc#L440-L451
I agree that Client table should have one entry per client. [The bug](https://github.com/ray-project/ray/pull/2905) that I'm trying to fix is caused by the multi-entry for one client.
Maybe we can create a new class named `UpdatableLog`? Then the ObjectTable and ClientTable could both use this `UpdatableLog` and keep one entry?
Ok, I understand now. The `Table` is already an `UpdatableLog`.
However, even with an `UpdatableLog`, we could still have a race condition where the node manager tries to connect to another dead node manager.
`Table` is a single entry structure, right? One Redis key will only have one DataT element, so we can update it as a whole. Log is different, it uses a ZSET to hold several DataT elements. For `ClientTable` there should be multiple entries for different clients. If we can update a `Log` entry, then `ClientAdded` function may not get an entry of a dead raylet with `is_insertion=true`?
The `Table` data structure is a single entry *per key*. The `Log` is a list/zset *per key*. Currently the `ClientTable` only uses a single key.
I think it's good to keep the whole record of the nodes that joined and left the cluster around, since that information may be useful for debugging and other reasons.
Even if we use a `Table` so that we can update the `ClientTable` entry in place, it is still possible that a node manager could try to connect to a remote node manager that has died. E.g.
1. Node manager 1 registers with the GCS.
2. Node manager 2 registers with the GCS.
3. `ClientAdded(node_manager_1)` is called on node manager 2, node manager 1 simultaneously dies.
4. As part of `ClientAdded`, node manager 2 tries to connect to node manager 1 and fails.
5. Later on, the monitor detects that node manager 1 has died and updates the GCS.
So we still need to handle the case where a node manager tries to connect to a dead node manager. | 2018-10-29T22:45:52 |
ray-project/ray | 3,270 | ray-project__ray-3270 | [
"3266"
] | 4182b856118a38295e76db37c752e0a5c68352d8 | diff --git a/python/ray/autoscaler/autoscaler.py b/python/ray/autoscaler/autoscaler.py
--- a/python/ray/autoscaler/autoscaler.py
+++ b/python/ray/autoscaler/autoscaler.py
@@ -69,6 +69,7 @@
"project_id": (None, OPTIONAL), # gcp project id, if using gcp
"head_ip": (str, OPTIONAL), # local cluster head node
"worker_ips": (list, OPTIONAL), # local cluster worker nodes
+ "use_internal_ips": (bool, OPTIONAL), # don't require public ips
},
REQUIRED),
diff --git a/python/ray/autoscaler/aws/config.py b/python/ray/autoscaler/aws/config.py
--- a/python/ray/autoscaler/aws/config.py
+++ b/python/ray/autoscaler/aws/config.py
@@ -152,9 +152,10 @@ def _configure_key_pair(config):
def _configure_subnet(config):
ec2 = _resource("ec2", config)
+ use_internal_ips = config["provider"].get("use_internal_ips", False)
subnets = sorted(
- (s for s in ec2.subnets.all()
- if s.state == "available" and s.map_public_ip_on_launch),
+ (s for s in ec2.subnets.all() if s.state == "available" and (
+ use_internal_ips or s.map_public_ip_on_launch)),
reverse=True, # sort from Z-A
key=lambda subnet: subnet.availability_zone)
if not subnets:
@@ -162,7 +163,8 @@ def _configure_subnet(config):
"No usable subnets found, try manually creating an instance in "
"your specified region to populate the list of subnets "
"and trying this again. Note that the subnet must map public IPs "
- "on instance launch.")
+ "on instance launch unless you set 'use_internal_ips': True in "
+ "the 'provider' config.")
if "availability_zone" in config["provider"]:
azs = config["provider"]["availability_zone"].split(',')
subnets = [s for s in subnets if s.availability_zone in azs]
diff --git a/python/ray/autoscaler/updater.py b/python/ray/autoscaler/updater.py
--- a/python/ray/autoscaler/updater.py
+++ b/python/ray/autoscaler/updater.py
@@ -47,7 +47,8 @@ def __init__(self,
self.daemon = True
self.process_runner = process_runner
self.node_id = node_id
- self.use_internal_ip = use_internal_ip
+ self.use_internal_ip = (use_internal_ip or provider_config.get(
+ "use_internal_ips", False))
self.provider = get_node_provider(provider_config, cluster_name)
self.ssh_private_key = auth_config["ssh_private_key"]
self.ssh_user = auth_config["ssh_user"]
| Autoscaler requires public IPs
<!--
General questions should be asked on the mailing list [email protected].
Before submitting an issue, please fill out the following form.
-->
### System information
- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: Ubuntu 17.04
- **Ray installed from (source or binary)**: binary
- **Ray version**: 0.5.3
- **Python version**: 3.6.6
- **Exact command to reproduce**:
1. Configure a cluster with a subnet which has `"MapPublicIpOnLaunch": false`
2. Run `ray up <yaml file>`
### Describe the problem
We see the following stack trace:
```Traceback (most recent call last):
File "/home/matthew/conda/envs/venv/bin/ray", line 11, in <module>
load_entry_point('ray==0.5.3', 'console_scripts', 'ray')()
File "/home/matthew/conda/envs/venv/lib/python3.6/site-packages/ray-0.5.3-py3.6-linux-x86_64.egg/ray/scripts/scripts.py", line 569, in main
return cli()
File "/home/matthew/conda/envs/venv/lib/python3.6/site-packages/Click-7.0-py3.6.egg/click/core.py", line 764, in __call__
return self.main(*args, **kwargs)
File "/home/matthew/conda/envs/venv/lib/python3.6/site-packages/Click-7.0-py3.6.egg/click/core.py", line 717, in main
rv = self.invoke(ctx)
File "/home/matthew/conda/envs/venv/lib/python3.6/site-packages/Click-7.0-py3.6.egg/click/core.py", line 1137, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/matthew/conda/envs/venv/lib/python3.6/site-packages/Click-7.0-py3.6.egg/click/core.py", line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/matthew/conda/envs/venv/lib/python3.6/site-packages/Click-7.0-py3.6.egg/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/home/matthew/conda/envs/venv/lib/python3.6/site-packages/ray-0.5.3-py3.6-linux-x86_64.egg/ray/scripts/scripts.py", line 440, in create_or_update
no_restart, restart_only, yes, cluster_name)
File "/home/matthew/conda/envs/venv/lib/python3.6/site-packages/ray-0.5.3-py3.6-linux-x86_64.egg/ray/autoscaler/commands.py", line 42, in create_or_update_cluster
config = _bootstrap_config(config)
File "/home/matthew/conda/envs/venv/lib/python3.6/site-packages/ray-0.5.3-py3.6-linux-x86_64.egg/ray/autoscaler/commands.py", line 64, in _bootstrap_config
resolved_config = bootstrap_config(config)
File "/home/matthew/conda/envs/venv/lib/python3.6/site-packages/ray-0.5.3-py3.6-linux-x86_64.egg/ray/autoscaler/aws/config.py", line 48, in bootstrap_aws
config = _configure_subnet(config)
File "/home/matthew/conda/envs/venv/lib/python3.6/site-packages/ray-0.5.3-py3.6-linux-x86_64.egg/ray/autoscaler/aws/config.py", line 156, in _configure_subnet
"No usable subnets found, try manually creating an instance in "
Exception: No usable subnets found, try manually creating an instance in your specified region to populate the list of subnets and trying this again. Note that the subnet must map public IPs on instance launch.
```
Is it possible to support private IPs? We operate inside a VPN and public IPs aren't an option for us. Thanks in advance for your help
| I believe the only reason to map public ips is for the command to be able to SSH to the node it just launched. You can instead set up a jump node in the VPC, SSH to that, and run ray up there.
You'll also have to remove the check for public ips https://github.com/ray-project/ray/blob/master/python/ray/autoscaler/aws/config.py#L157
Let me know if this works. We can merge this as an option if it does.
Thanks for the quick response. I tried changing this line already and things seemed to work (at least better than before), although I also had to change the `use_internal_ip` default arg to `True` in in the `NodeUpdater` class. I also didn't need to create a jump node as my machine is already on the VPC VPN and so can automatically see the private IPs. Is it possible to get this change committed? | 2018-11-07T18:34:59 |
|
ray-project/ray | 3,286 | ray-project__ray-3286 | [
"3268"
] | 9dd3eedbac31d93cc32e9e87d03e8d8da1507fa6 | diff --git a/python/ray/tune/experiment.py b/python/ray/tune/experiment.py
--- a/python/ray/tune/experiment.py
+++ b/python/ray/tune/experiment.py
@@ -85,7 +85,7 @@ def __init__(self,
repeat=1,
num_samples=1,
local_dir=None,
- upload_dir="",
+ upload_dir=None,
checkpoint_freq=0,
checkpoint_at_end=False,
max_failures=3,
@@ -97,7 +97,7 @@ def __init__(self,
"trial_resources": trial_resources,
"num_samples": num_samples,
"local_dir": local_dir or DEFAULT_RESULTS_DIR,
- "upload_dir": upload_dir,
+ "upload_dir": upload_dir or "", # argparse converts None to "null"
"checkpoint_freq": checkpoint_freq,
"checkpoint_at_end": checkpoint_at_end,
"max_failures": max_failures,
| diff --git a/python/ray/tune/test/trial_runner_test.py b/python/ray/tune/test/trial_runner_test.py
--- a/python/ray/tune/test/trial_runner_test.py
+++ b/python/ray/tune/test/trial_runner_test.py
@@ -185,6 +185,21 @@ def train(config, reporter):
}
})
+ def testUploadDirNone(self):
+ def train(config, reporter):
+ reporter(timesteps_total=1)
+
+ [trial] = run_experiments({
+ "foo": {
+ "run": train,
+ "upload_dir": None,
+ "config": {
+ "a": "b"
+ },
+ }
+ })
+ self.assertFalse(trial.upload_dir)
+
def testLogdirStartingWithTilde(self):
local_dir = '~/ray_results/local_dir'
| [tune] run_experiment should allow upload_dir to be None
### System information
- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: Linux Ubuntu 18.04
- **Ray installed from (source or binary)**: source
- **Ray version**: 0.5.3
- **Python version**: 3.6
### Describe the problem
`run_experiment` definition does currently not allow `None` to be passed as the `upload_dir`. This happens because the `to_argv` called [here](https://github.com/ray-project/ray/blob/4182b856118a38295e76db37c752e0a5c68352d8/python/ray/tune/config_parser.py#L182) converts `None` to `'null'`. Noting that and empty string works fine.
### Source code / logs
```python
import time
import ray
from ray import tune
def my_func(config, reporter): # add the reporter parameter
i = 0
while True:
reporter(timesteps_total=i, mean_accuracy=i ** config['alpha'])
i += config['beta']
time.sleep(0.01)
tune.register_trainable('my_func', my_func)
ray.init()
tune.run_experiments({
'my_experiment': {
'run': 'my_func',
'trial_resources': {'cpu': 8},
'stop': {'mean_accuracy': 100},
'config': {
'alpha': tune.grid_search([0.0, 0.2, 0.4, 0.6, 0.8, 1.0]),
'beta': tune.grid_search([2]),
},
'local_dir': '~/ray_results',
'upload_dir': None,
}
})
```
| 2018-11-09T21:13:31 |
|
ray-project/ray | 3,293 | ray-project__ray-3293 | [
"3057"
] | d681893b0f196f6599433a4c59d9edb5d858eabb | diff --git a/python/ray/tune/trainable.py b/python/ray/tune/trainable.py
--- a/python/ray/tune/trainable.py
+++ b/python/ray/tune/trainable.py
@@ -161,14 +161,14 @@ def train(self):
result.setdefault(DONE, False)
# self._timesteps_total should only be tracked if increments provided
- if result.get(TIMESTEPS_THIS_ITER):
+ if result.get(TIMESTEPS_THIS_ITER) is not None:
if self._timesteps_total is None:
self._timesteps_total = 0
self._timesteps_total += result[TIMESTEPS_THIS_ITER]
self._timesteps_since_restore += result[TIMESTEPS_THIS_ITER]
- # self._timesteps_total should only be tracked if increments provided
- if result.get(EPISODES_THIS_ITER):
+ # self._episodes_total should only be tracked if increments provided
+ if result.get(EPISODES_THIS_ITER) is not None:
if self._episodes_total is None:
self._episodes_total = 0
self._episodes_total += result[EPISODES_THIS_ITER]
| diff --git a/python/ray/tune/test/trial_runner_test.py b/python/ray/tune/test/trial_runner_test.py
--- a/python/ray/tune/test/trial_runner_test.py
+++ b/python/ray/tune/test/trial_runner_test.py
@@ -14,7 +14,8 @@
from ray.tune.ray_trial_executor import RayTrialExecutor
from ray.tune.schedulers import TrialScheduler, FIFOScheduler
from ray.tune.registry import _global_registry, TRAINABLE_CLASS
-from ray.tune.result import DEFAULT_RESULTS_DIR, TIMESTEPS_TOTAL, DONE
+from ray.tune.result import (DEFAULT_RESULTS_DIR, TIMESTEPS_TOTAL, DONE,
+ EPISODES_TOTAL)
from ray.tune.util import pin_in_object_store, get_pinned_object
from ray.tune.experiment import Experiment
from ray.tune.trial import Trial, Resources
@@ -419,10 +420,25 @@ def train(config, reporter):
})
self.assertIsNone(trial.last_result[TIMESTEPS_TOTAL])
- def train3(config, reporter):
+ def train2(config, reporter):
for i in range(10):
reporter(timesteps_total=5)
+ [trial2] = run_experiments({
+ "foo": {
+ "run": train2,
+ "config": {
+ "script_min_iter_time_s": 0,
+ },
+ }
+ })
+ self.assertEqual(trial2.last_result[TIMESTEPS_TOTAL], 5)
+ self.assertEqual(trial2.last_result["timesteps_this_iter"], 0)
+
+ def train3(config, reporter):
+ for i in range(10):
+ reporter(timesteps_this_iter=0, episodes_this_iter=0)
+
[trial3] = run_experiments({
"foo": {
"run": train3,
@@ -431,8 +447,8 @@ def train3(config, reporter):
},
}
})
- self.assertEqual(trial3.last_result[TIMESTEPS_TOTAL], 5)
- self.assertEqual(trial3.last_result["timesteps_this_iter"], 0)
+ self.assertEqual(trial3.last_result[TIMESTEPS_TOTAL], 0)
+ self.assertEqual(trial3.last_result[EPISODES_TOTAL], 0)
def testCheckpointDict(self):
class TestTrain(Trainable):
| [tune] Trial executor crashes with certain stop conditions
<!--
General questions should be asked on the mailing list [email protected].
Before submitting an issue, please fill out the following form.
-->
### System information
- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**:
- **Ray installed from (source or binary)**:
- **Ray version**:
- **Python version**:
- **Exact command to reproduce**:
<!--
You can obtain the Ray version with
python -c "import ray; print(ray.__version__)"
-->
### Describe the problem
It seems one trial finishes, but the rest fail.
### Source code / logs
```
Traceback (most recent call last):
File "/home/eric/Desktop/ray-private/python/ray/tune/trial_runner.py", line 242, in _process_events
if trial.should_stop(result):
File "/home/eric/Desktop/ray-private/python/ray/tune/trial.py", line 213, in should_stop
if result[criteria] >= stop_value:
TypeError: unorderable types: NoneType() >= int()
Worker ip unknown, skipping log sync for /home/eric/ray_results/test/IMPALA_cartpole_stateless_4_2018-10-14_00-11-54wk3lun7w
== Status ==
Using FIFO scheduling algorithm.
Resources requested: 0/4 CPUs, 0/0 GPUs
Result logdir: /home/eric/ray_results/test
ERROR trials:
- IMPALA_cartpole_stateless_1: ERROR, 1 failures: /home/eric/ray_results/test/IMPALA_cartpole_stateless_1_2018-10-14_00-11-08bzsn9bjz/error_2018-10-14_00-11-23.txt
- IMPALA_cartpole_stateless_2: ERROR, 1 failures: /home/eric/ray_results/test/IMPALA_cartpole_stateless_2_2018-10-14_00-11-23zv6jbrbr/error_2018-10-14_00-11-38.txt
- IMPALA_cartpole_stateless_3: ERROR, 1 failures: /home/eric/ray_results/test/IMPALA_cartpole_stateless_3_2018-10-14_00-11-38p18gjmul/error_2018-10-14_00-11-54.txt
- IMPALA_cartpole_stateless_4: ERROR, 1 failures: /home/eric/ray_results/test/IMPALA_cartpole_stateless_4_2018-10-14_00-11-54wk3lun7w/error_2018-10-14_00-12-09.txt
TERMINATED trials:
- IMPALA_cartpole_stateless_0: TERMINATED [pid=19362], 173 s, 17 iter, 143900 ts, 221 rew
```
```
tune.run_experiments({
"test": {
"env": "cartpole_stateless",
"run": "IMPALA",
"num_samples": 5,
"stop": {
"episode_reward_mean": args.stop,
"timesteps_total": 200000,
},
"config": {
"num_workers": 2,
"num_gpus": 0,
"vf_loss_coeff": 0.01,
"model": {
"use_lstm": True,
},
},
}
})
```
| @richardliaw | 2018-11-10T23:13:39 |
ray-project/ray | 3,294 | ray-project__ray-3294 | [
"2807"
] | e37891d79d0920175485a288e735cd88c8013085 | diff --git a/python/ray/tune/function_runner.py b/python/ray/tune/function_runner.py
--- a/python/ray/tune/function_runner.py
+++ b/python/ray/tune/function_runner.py
@@ -14,11 +14,12 @@
class StatusReporter(object):
- """Object passed into your main() that you can report status through.
+ """Object passed into your function that you can report status through.
Example:
- >>> reporter = StatusReporter()
- >>> reporter(timesteps_total=1)
+ >>> def trainable_function(config, reporter):
+ >>> assert isinstance(reporter, StatusReporter)
+ >>> reporter(timesteps_total=1)
"""
def __init__(self):
@@ -33,6 +34,9 @@ def __call__(self, **kwargs):
Args:
kwargs: Latest training result status.
+
+ Example:
+ >>> reporter(mean_accuracy=1, training_iteration=4)
"""
with self._lock:
diff --git a/python/ray/tune/result.py b/python/ray/tune/result.py
--- a/python/ray/tune/result.py
+++ b/python/ray/tune/result.py
@@ -4,6 +4,8 @@
import os
+# yapf: disable
+# __sphinx_doc_begin__
# (Optional/Auto-filled) training is terminated. Filled only if not provided.
DONE = "done"
@@ -37,6 +39,8 @@
# (Auto-filled) The index of this training iteration.
TRAINING_ITERATION = "training_iteration"
+# __sphinx_doc_end__
+# yapf: enable
# Where Tune writes result files by default
DEFAULT_RESULTS_DIR = (os.environ.get("TUNE_RESULT_DIR")
| [docs/tune] Provide better documentation for reporter/results
### Describe the problem
The `reporter` object in function-based API is not well documented on the docs.
### Source code / logs
https://groups.google.com/forum/?utm_medium=email&utm_source=footer#!msg/ray-dev/y4UXbSPoYaw/cLmdgq2nAwAJ
| 2018-11-11T00:40:40 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.