repo
stringclasses 856
values | pull_number
int64 3
127k
| instance_id
stringlengths 12
58
| issue_numbers
sequencelengths 1
5
| base_commit
stringlengths 40
40
| patch
stringlengths 67
1.54M
| test_patch
stringlengths 0
107M
| problem_statement
stringlengths 3
307k
| hints_text
stringlengths 0
908k
| created_at
timestamp[s] |
---|---|---|---|---|---|---|---|---|---|
litestar-org/litestar | 3,100 | litestar-org__litestar-3100 | [
"3068"
] | 1e42fa2737906dfab690af65f6b571cfa24a1ed4 | diff --git a/litestar/_openapi/responses.py b/litestar/_openapi/responses.py
--- a/litestar/_openapi/responses.py
+++ b/litestar/_openapi/responses.py
@@ -9,9 +9,10 @@
from typing import TYPE_CHECKING, Any, Iterator
from litestar._openapi.schema_generation import SchemaCreator
+from litestar._openapi.schema_generation.utils import get_formatted_examples
from litestar.enums import MediaType
from litestar.exceptions import HTTPException, ValidationException
-from litestar.openapi.spec import Example, OpenAPIResponse
+from litestar.openapi.spec import Example, OpenAPIResponse, Reference
from litestar.openapi.spec.enums import OpenAPIFormat, OpenAPIType
from litestar.openapi.spec.header import OpenAPIHeader
from litestar.openapi.spec.media_type import OpenAPIMediaType
@@ -240,14 +241,20 @@ def create_additional_responses(self) -> Iterator[tuple[str, OpenAPIResponse]]:
prefer_alias=False,
generate_examples=additional_response.generate_examples,
)
- schema = schema_creator.for_field_definition(
- FieldDefinition.from_annotation(additional_response.data_container)
- )
+ field_def = FieldDefinition.from_annotation(additional_response.data_container)
+ schema = schema_creator.for_field_definition(field_def)
+
+ examples: dict[str, Example | Reference] | None
+ if additional_response.examples:
+ examples = dict(get_formatted_examples(field_def, additional_response.examples))
+ else:
+ examples = None
+
yield (
str(status_code),
OpenAPIResponse(
description=additional_response.description,
- content={additional_response.media_type: OpenAPIMediaType(schema=schema)},
+ content={additional_response.media_type: OpenAPIMediaType(schema=schema, examples=examples)},
),
)
diff --git a/litestar/openapi/datastructures.py b/litestar/openapi/datastructures.py
--- a/litestar/openapi/datastructures.py
+++ b/litestar/openapi/datastructures.py
@@ -6,6 +6,7 @@
from litestar.enums import MediaType
if TYPE_CHECKING:
+ from litestar.openapi.spec import Example
from litestar.types import DataContainerType
@@ -24,3 +25,5 @@ class ResponseSpec:
"""A description of the response."""
media_type: MediaType = field(default=MediaType.JSON)
"""Response media type."""
+ examples: list[Example] | None = field(default=None)
+ """A list of Example models."""
| diff --git a/tests/unit/test_openapi/test_responses.py b/tests/unit/test_openapi/test_responses.py
--- a/tests/unit/test_openapi/test_responses.py
+++ b/tests/unit/test_openapi/test_responses.py
@@ -28,7 +28,7 @@
from litestar.handlers import HTTPRouteHandler
from litestar.openapi.config import OpenAPIConfig
from litestar.openapi.datastructures import ResponseSpec
-from litestar.openapi.spec import OpenAPIHeader, OpenAPIMediaType, Reference, Schema
+from litestar.openapi.spec import Example, OpenAPIHeader, OpenAPIMediaType, Reference, Schema
from litestar.openapi.spec.enums import OpenAPIType
from litestar.response import File, Redirect, Stream, Template
from litestar.response.base import T
@@ -416,6 +416,28 @@ def handler() -> DataclassPerson:
assert responses["400"].description == "Overwritten response"
+def test_additional_responses_with_custom_examples(create_factory: CreateFactoryFixture) -> None:
+ @get(responses={200: ResponseSpec(DataclassPerson, examples=[Example(value={"string": "example", "number": 1})])})
+ def handler() -> DataclassPerson:
+ return DataclassPersonFactory.build()
+
+ factory = create_factory(handler)
+ responses = factory.create_additional_responses()
+ status_code, response = next(responses)
+ assert response.content
+ assert response.content["application/json"].examples == {
+ "dataclassperson-example-1": Example(
+ value={
+ "string": "example",
+ "number": 1,
+ }
+ ),
+ }
+
+ with pytest.raises(StopIteration):
+ next(responses)
+
+
def test_create_response_for_response_subclass(create_factory: CreateFactoryFixture) -> None:
class CustomResponse(Response[T]):
pass
| Enhancement: Add `ResponseSpec(examples=...)`
### Summary
Support adding custom examples for the response body.
The inbound parameters can be defined via `Parameter`, but the same does not work for the response body. `ResponseSpec` only has `generate_examples`, but it should support also `examples` for custom ones.
### Basic Example
_No response_
### Drawbacks and Impact
_No response_
### Unresolved questions
_No response_
<!-- POLAR PLEDGE BADGE START -->
---
> [!NOTE]
> While we are open for sponsoring on [GitHub Sponsors](https://github.com/sponsors/litestar-org/) and
> [OpenCollective](https://opencollective.com/litestar), we also utilize [Polar.sh](https://polar.sh/) to engage in pledge-based sponsorship.
>
> Check out all issues funded or available for funding [on our Polar.sh dashboard](https://polar.sh/litestar-org)
> * If you would like to see an issue prioritized, make a pledge towards it!
> * We receive the pledge once the issue is completed & verified
> * This, along with engagement in the community, helps us know which features are a priority to our users.
<a href="https://polar.sh/litestar-org/litestar/issues/3068">
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://polar.sh/api/github/litestar-org/litestar/issues/3068/pledge.svg?darkmode=1">
<img alt="Fund with Polar" src="https://polar.sh/api/github/litestar-org/litestar/issues/3068/pledge.svg">
</picture>
</a>
<!-- POLAR PLEDGE BADGE END -->
| Related, any thoughts about supporting `Parameter` (-like) construct for response annotation?
```py
@get()
def endpoint() -> Annotated[ResponseModel, ResponseSpec(examples=[...])]: ...
```
> Related, any thoughts about supporting `Parameter` (-like) construct for response annotation?
>
> ```python
> @get()
> def endpoint() -> Annotated[ResponseModel, ResponseSpec(examples=[...])]: ...
> ```
This feels reasonable to me. | 2024-02-13T07:13:19 |
litestar-org/litestar | 3,106 | litestar-org__litestar-3106 | [
"3082"
] | b266f5ad17ee87e456f712b70e4c966ae4444068 | diff --git a/litestar/app.py b/litestar/app.py
--- a/litestar/app.py
+++ b/litestar/app.py
@@ -421,6 +421,15 @@ def __init__(
if self.pdb_on_exception:
warn_pdb_on_exception()
+ try:
+ from starlette.exceptions import HTTPException as StarletteHTTPException
+
+ from litestar.middleware.exceptions.middleware import _starlette_exception_handler
+
+ config.exception_handlers.setdefault(StarletteHTTPException, _starlette_exception_handler)
+ except ImportError:
+ pass
+
super().__init__(
after_request=config.after_request,
after_response=config.after_response,
diff --git a/litestar/middleware/exceptions/middleware.py b/litestar/middleware/exceptions/middleware.py
--- a/litestar/middleware/exceptions/middleware.py
+++ b/litestar/middleware/exceptions/middleware.py
@@ -9,7 +9,7 @@
from litestar.datastructures import Headers
from litestar.enums import MediaType, ScopeType
-from litestar.exceptions import WebSocketException
+from litestar.exceptions import HTTPException, LitestarException, WebSocketException
from litestar.middleware.cors import CORSMiddleware
from litestar.middleware.exceptions._debug_response import _get_type_encoders_for_request, create_debug_response
from litestar.serialization import encode_json
@@ -20,6 +20,8 @@
if TYPE_CHECKING:
+ from starlette.exceptions import HTTPException as StarletteHTTPException
+
from litestar import Response
from litestar.app import Litestar
from litestar.connection import Request
@@ -58,15 +60,16 @@ def get_exception_handler(exception_handlers: ExceptionHandlersMap, exc: Excepti
if not exception_handlers:
return None
- status_code: int | None = getattr(exc, "status_code", None)
- if status_code and (exception_handler := exception_handlers.get(status_code)):
- return exception_handler
+ default_handler: ExceptionHandler | None = None
+ if isinstance(exc, HTTPException):
+ if exception_handler := exception_handlers.get(exc.status_code):
+ return exception_handler
+ else:
+ default_handler = exception_handlers.get(HTTP_500_INTERNAL_SERVER_ERROR)
return next(
(exception_handlers[cast("Type[Exception]", cls)] for cls in getmro(type(exc)) if cls in exception_handlers),
- exception_handlers[HTTP_500_INTERNAL_SERVER_ERROR]
- if not hasattr(exc, "status_code") and HTTP_500_INTERNAL_SERVER_ERROR in exception_handlers
- else None,
+ default_handler,
)
@@ -107,6 +110,17 @@ def to_response(self, request: Request | None = None) -> Response:
)
+def _starlette_exception_handler(request: Request[Any, Any, Any], exc: StarletteHTTPException) -> Response:
+ return create_exception_response(
+ request=request,
+ exc=HTTPException(
+ detail=exc.detail,
+ status_code=exc.status_code,
+ headers=exc.headers,
+ ),
+ )
+
+
def create_exception_response(request: Request[Any, Any, Any], exc: Exception) -> Response:
"""Construct a response from an exception.
@@ -122,11 +136,23 @@ def create_exception_response(request: Request[Any, Any, Any], exc: Exception) -
Returns:
Response: HTTP response constructed from exception details.
"""
- status_code = getattr(exc, "status_code", HTTP_500_INTERNAL_SERVER_ERROR)
- if status_code == HTTP_500_INTERNAL_SERVER_ERROR:
- detail = "Internal Server Error"
+ headers: dict[str, Any] | None
+ extra: dict[str, Any] | list | None
+
+ if isinstance(exc, HTTPException):
+ status_code = exc.status_code
+ headers = exc.headers
+ extra = exc.extra
else:
- detail = getattr(exc, "detail", repr(exc))
+ status_code = HTTP_500_INTERNAL_SERVER_ERROR
+ headers = None
+ extra = None
+
+ detail = (
+ exc.detail
+ if isinstance(exc, LitestarException) and status_code != HTTP_500_INTERNAL_SERVER_ERROR
+ else "Internal Server Error"
+ )
try:
media_type = request.route_handler.media_type
@@ -136,8 +162,8 @@ def create_exception_response(request: Request[Any, Any, Any], exc: Exception) -
content = ExceptionResponseContent(
status_code=status_code,
detail=detail,
- headers=getattr(exc, "headers", None),
- extra=getattr(exc, "extra", None),
+ headers=headers,
+ extra=extra,
media_type=media_type,
)
return content.to_response(request=request)
@@ -246,12 +272,13 @@ async def handle_websocket_exception(send: Send, exc: Exception) -> None:
Returns:
None.
"""
+ code = 4000 + HTTP_500_INTERNAL_SERVER_ERROR
+ reason = "Internal Server Error"
if isinstance(exc, WebSocketException):
code = exc.code
reason = exc.detail
- else:
- code = 4000 + getattr(exc, "status_code", HTTP_500_INTERNAL_SERVER_ERROR)
- reason = getattr(exc, "detail", repr(exc))
+ elif isinstance(exc, LitestarException):
+ reason = exc.detail
event: WebSocketCloseEvent = {"type": "websocket.close", "code": code, "reason": reason}
await send(event)
@@ -266,7 +293,7 @@ def default_http_exception_handler(self, request: Request, exc: Exception) -> Re
Returns:
An HTTP response.
"""
- status_code = getattr(exc, "status_code", HTTP_500_INTERNAL_SERVER_ERROR)
+ status_code = exc.status_code if isinstance(exc, HTTPException) else HTTP_500_INTERNAL_SERVER_ERROR
if status_code == HTTP_500_INTERNAL_SERVER_ERROR and self._get_debug_scope(request.scope):
return create_debug_response(request=request, exc=exc)
return create_exception_response(request=request, exc=exc)
| diff --git a/tests/unit/test_app.py b/tests/unit/test_app.py
--- a/tests/unit/test_app.py
+++ b/tests/unit/test_app.py
@@ -164,15 +164,7 @@ def test_app_config_object_used(app_config_object: AppConfig, monkeypatch: pytes
# have been accessed during app instantiation.
property_mocks: List[Tuple[str, Mock]] = []
for field in fields(AppConfig):
- if field.name == "response_cache_config":
- property_mock = PropertyMock(return_value=ResponseCacheConfig())
- if field.name in ["event_emitter_backend", "response_cache_config"]:
- property_mock = PropertyMock(return_value=Mock())
- else:
- # default iterable return value allows the mock properties that need to be iterated over in
- # `Litestar.__init__()` to not blow up, for other properties it shouldn't matter what the value is for the
- # sake of this test.
- property_mock = PropertyMock(return_value=[])
+ property_mock = PropertyMock()
property_mocks.append((field.name, property_mock))
monkeypatch.setattr(type(app_config_object), field.name, property_mock, raising=False)
diff --git a/tests/unit/test_exceptions.py b/tests/unit/test_exceptions.py
--- a/tests/unit/test_exceptions.py
+++ b/tests/unit/test_exceptions.py
@@ -111,15 +111,13 @@ def test_create_exception_response_utility_litestar_http_exception(media_type: M
@pytest.mark.parametrize("media_type", [MediaType.JSON, MediaType.TEXT])
def test_create_exception_response_utility_starlette_http_exception(media_type: MediaType) -> None:
- exc = StarletteHTTPException(detail="starlette http exception", status_code=HTTP_400_BAD_REQUEST)
- request = RequestFactory(handler_kwargs={"media_type": media_type}).get()
- response = create_exception_response(request=request, exc=exc)
- assert response.status_code == HTTP_400_BAD_REQUEST
- assert response.media_type == media_type
- if media_type == MediaType.JSON:
- assert response.content == {"status_code": 400, "detail": "starlette http exception"}
- else:
- assert response.content == b'{"status_code":400,"detail":"starlette http exception"}'
+ @get("/", media_type=media_type)
+ def handler() -> str:
+ raise StarletteHTTPException(status_code=400)
+
+ with create_test_client(handler) as client:
+ response = client.get("/", headers={"Accept": media_type})
+ assert response.json() == {"status_code": 400, "detail": "Bad Request"}
@pytest.mark.parametrize("media_type", [MediaType.JSON, MediaType.TEXT])
@@ -171,3 +169,30 @@ def handler() -> None:
assert response.json().get("details").startswith("Traceback (most recent call last")
else:
assert response.text.startswith("Traceback (most recent call last")
+
+
+def test_non_litestar_exception_with_status_code_is_500() -> None:
+ # https://github.com/litestar-org/litestar/issues/3082
+ class MyException(Exception):
+ status_code: int = 400
+
+ @get("/")
+ def handler() -> None:
+ raise MyException("hello")
+
+ with create_test_client([handler]) as client:
+ assert client.get("/").status_code == 500
+
+
+def test_non_litestar_exception_with_detail_is_not_included() -> None:
+ # https://github.com/litestar-org/litestar/issues/3082
+ class MyException(Exception):
+ status_code: int = 400
+ detail: str = "hello"
+
+ @get("/")
+ def handler() -> None:
+ raise MyException()
+
+ with create_test_client([handler], debug=False) as client:
+ assert client.get("/", headers={"Accept": MediaType.JSON}).json().get("detail") == "Internal Server Error"
diff --git a/tests/unit/test_middleware/test_exception_handler_middleware.py b/tests/unit/test_middleware/test_exception_handler_middleware.py
--- a/tests/unit/test_middleware/test_exception_handler_middleware.py
+++ b/tests/unit/test_middleware/test_exception_handler_middleware.py
@@ -12,7 +12,7 @@
from litestar.logging.config import LoggingConfig, StructLoggingConfig
from litestar.middleware.exceptions import ExceptionHandlerMiddleware
from litestar.middleware.exceptions._debug_response import get_symbol_name
-from litestar.middleware.exceptions.middleware import get_exception_handler
+from litestar.middleware.exceptions.middleware import _starlette_exception_handler, get_exception_handler
from litestar.status_codes import HTTP_400_BAD_REQUEST, HTTP_500_INTERNAL_SERVER_ERROR
from litestar.testing import TestClient, create_test_client
from litestar.types import ExceptionHandlersMap
@@ -124,7 +124,8 @@ def exception_handler(request: Request, exc: Exception) -> Response:
app = Litestar(route_handlers=[handler], exception_handlers={Exception: exception_handler}, openapi_config=None)
assert app.asgi_router.root_route_map_node.children["/"].asgi_handlers["GET"][0].exception_handlers == { # type: ignore
- Exception: exception_handler
+ Exception: exception_handler,
+ StarletteHTTPException: _starlette_exception_handler,
}
| Bug: Exception handler leaks internal exceptions
### Discussed in https://github.com/orgs/litestar-org/discussions/3060
<div type='discussions-op-text'>
<sup>Originally posted by **floxay** February 1, 2024</sup>
I have posted this [on Discord](https://discord.com/channels/919193495116337154/919193495690936353/1197749178399465542) before but it did not really go anywhere.
As I am unsure whether this should be a bug or enhancement I post it now on Github as a discussion and then can be converted to either if needed. (This is basically the same message as the one on Discord, nothing new has been added just structured slightly differently.)
The [documented behavior of exception handling](https://docs.litestar.dev/latest/usage/exceptions.html#exception-handling) mentions:
> If the errors are ***instances of [HTTPException](https://docs.litestar.dev/latest/reference/exceptions.html#litestar.exceptions.HTTPException)***, the responses will include the appropriate status_code.
Which is incorrect; what actually is happening is that Litestar tries to retrieve the value of the `status_code` field/property, regardless whether the object is an instance of `HTTPException` or not.
Code; https://github.com/litestar-org/litestar/blob/24095740e7c12220a8ca5cc14766cf0ec95764ca/litestar/middleware/exceptions/middleware.py#L125-L127 Notes from the same function above:
https://github.com/litestar-org/litestar/blob/24095740e7c12220a8ca5cc14766cf0ec95764ca/litestar/middleware/exceptions/middleware.py#L113-L116 One of these is definitely incorrect, and I would guess it is the documentation that is incomplete.
Before the documentation just gets changed to reflect what actually is happening I would also like to point out that this behavior is problematic.
I only noticed this issue because I got an unhandled Elasticsearch error back from the backend stringified in the exception details:

Related code: https://github.com/elastic/elasticsearch-py/blob/5014ce5337594f66040c81a2610220b1e8c0527e/elasticsearch/exceptions.py#L43-L46
This error also contains other information such as how (e.g. IP:port) the client is connected to elastic and headers which expose versions of used modules.
I do not think this kind of stuff generally should make it out to the client by default, debug mode was also not enabled.
For now I have added a custom exception handler for Elastic errors but on the long term this does not feel right, I would essentially need check out all external modules whether they have a `status_code` filed/property or not and if it would pose a problem.
I am not really sure what the right(?, better?, proper?) way to handle this would be, I've mentioned an exception class whitelist mechanic for the exception handler in the Discord comment as a potential solution but I imagine it would also affect the mounting of other ASGI apps, like Starlette apps and would be *very* breaking.</div>
<!-- POLAR PLEDGE BADGE START -->
---
> [!NOTE]
> While we are open for sponsoring on [GitHub Sponsors](https://github.com/sponsors/litestar-org/) and
> [OpenCollective](https://opencollective.com/litestar), we also utilize [Polar.sh](https://polar.sh/) to engage in pledge-based sponsorship.
>
> Check out all issues funded or available for funding [on our Polar.sh dashboard](https://polar.sh/litestar-org)
> * If you would like to see an issue prioritized, make a pledge towards it!
> * We receive the pledge once the issue is completed & verified
> * This, along with engagement in the community, helps us know which features are a priority to our users.
<a href="https://polar.sh/litestar-org/litestar/issues/3082">
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://polar.sh/api/github/litestar-org/litestar/issues/3082/pledge.svg?darkmode=1">
<img alt="Fund with Polar" src="https://polar.sh/api/github/litestar-org/litestar/issues/3082/pledge.svg">
</picture>
</a>
<!-- POLAR PLEDGE BADGE END -->
| 2024-02-13T14:33:55 |
|
litestar-org/litestar | 3,109 | litestar-org__litestar-3109 | [
"3063"
] | 67ab44dc9379c323fd2a3f7f223091a83f9eb0a9 | diff --git a/litestar/data_extractors.py b/litestar/data_extractors.py
--- a/litestar/data_extractors.py
+++ b/litestar/data_extractors.py
@@ -1,6 +1,7 @@
from __future__ import annotations
-from typing import TYPE_CHECKING, Any, Callable, Coroutine, Literal, TypedDict, cast
+import inspect
+from typing import TYPE_CHECKING, Any, Callable, Coroutine, Iterable, Literal, TypedDict, cast
from litestar._parsers import parse_cookie_string
from litestar.connection.request import Request
@@ -70,6 +71,7 @@ class ConnectionDataExtractor:
"parse_query",
"obfuscate_headers",
"obfuscate_cookies",
+ "skip_parse_malformed_body",
)
def __init__(
@@ -88,6 +90,7 @@ def __init__(
obfuscate_headers: set[str] | None = None,
parse_body: bool = False,
parse_query: bool = False,
+ skip_parse_malformed_body: bool = False,
) -> None:
"""Initialize ``ConnectionDataExtractor``
@@ -106,9 +109,11 @@ def __init__(
obfuscate_cookies: cookie keys to obfuscate. Obfuscated values are replaced with '*****'.
parse_body: Whether to parse the body value or return the raw byte string, (for requests only).
parse_query: Whether to parse query parameters or return the raw byte string.
+ skip_parse_malformed_body: Whether to skip parsing the body if it is malformed
"""
self.parse_body = parse_body
self.parse_query = parse_query
+ self.skip_parse_malformed_body = skip_parse_malformed_body
self.obfuscate_headers = {h.lower() for h in (obfuscate_headers or set())}
self.obfuscate_cookies = {c.lower() for c in (obfuscate_cookies or set())}
self.connection_extractors: dict[str, Callable[[ASGIConnection[Any, Any, Any, Any]], Any]] = {}
@@ -153,6 +158,25 @@ def __call__(self, connection: ASGIConnection[Any, Any, Any, Any]) -> ExtractedR
)
return cast("ExtractedRequestData", {key: extractor(connection) for key, extractor in extractors.items()})
+ async def extract(
+ self, connection: ASGIConnection[Any, Any, Any, Any], fields: Iterable[str]
+ ) -> ExtractedRequestData:
+ extractors = (
+ {**self.connection_extractors, **self.request_extractors} # type: ignore
+ if isinstance(connection, Request)
+ else self.connection_extractors
+ )
+ data = {}
+ for key, extractor in extractors.items():
+ if key not in fields:
+ continue
+ if inspect.iscoroutinefunction(extractor):
+ value = await extractor(connection)
+ else:
+ value = extractor(connection)
+ data[key] = value
+ return cast("ExtractedRequestData", data)
+
@staticmethod
def extract_scheme(connection: ASGIConnection[Any, Any, Any, Any]) -> str:
"""Extract the scheme from an ``ASGIConnection``
@@ -272,13 +296,20 @@ async def extract_body(self, request: Request[Any, Any, Any]) -> Any:
return None
if not self.parse_body:
return await request.body()
- request_encoding_type = request.content_type[0]
- if request_encoding_type == RequestEncodingType.JSON:
- return await request.json()
- form_data = await request.form()
- if request_encoding_type == RequestEncodingType.URL_ENCODED:
- return dict(form_data)
- return {key: repr(value) if isinstance(value, UploadFile) else value for key, value in form_data.multi_items()}
+ try:
+ request_encoding_type = request.content_type[0]
+ if request_encoding_type == RequestEncodingType.JSON:
+ return await request.json()
+ form_data = await request.form()
+ if request_encoding_type == RequestEncodingType.URL_ENCODED:
+ return dict(form_data)
+ return {
+ key: repr(value) if isinstance(value, UploadFile) else value for key, value in form_data.multi_items()
+ }
+ except Exception as exc:
+ if self.skip_parse_malformed_body:
+ return await request.body()
+ raise exc
class ExtractedResponseData(TypedDict, total=False):
diff --git a/litestar/middleware/logging.py b/litestar/middleware/logging.py
--- a/litestar/middleware/logging.py
+++ b/litestar/middleware/logging.py
@@ -1,7 +1,6 @@
from __future__ import annotations
from dataclasses import dataclass, field
-from inspect import isawaitable
from typing import TYPE_CHECKING, Any, Iterable
from litestar.constants import (
@@ -81,6 +80,7 @@ def __init__(self, app: ASGIApp, config: LoggingMiddlewareConfig) -> None:
obfuscate_headers=self.config.request_headers_to_obfuscate,
parse_body=self.is_struct_logger,
parse_query=self.is_struct_logger,
+ skip_parse_malformed_body=True,
)
self.response_extractor = ResponseDataExtractor(
extract_body="body" in self.config.response_log_fields,
@@ -172,12 +172,11 @@ async def extract_request_data(self, request: Request) -> dict[str, Any]:
data: dict[str, Any] = {"message": self.config.request_log_message}
serializer = get_serializer_from_scope(request.scope)
- extracted_data = self.request_extractor(connection=request)
+
+ extracted_data = await self.request_extractor.extract(connection=request, fields=self.config.request_log_fields)
+
for key in self.config.request_log_fields:
- value = extracted_data.get(key)
- if isawaitable(value):
- value = await value
- data[key] = self._serialize_value(serializer, value)
+ data[key] = self._serialize_value(serializer, extracted_data.get(key))
return data
def extract_response_data(self, scope: Scope) -> dict[str, Any]:
| diff --git a/tests/unit/test_data_extractors.py b/tests/unit/test_data_extractors.py
--- a/tests/unit/test_data_extractors.py
+++ b/tests/unit/test_data_extractors.py
@@ -1,6 +1,8 @@
from typing import Any, List
+from unittest.mock import AsyncMock
import pytest
+from pytest_mock import MockFixture
from litestar import Request
from litestar.connection.base import empty_receive
@@ -108,3 +110,18 @@ async def send(message: "Any") -> None:
assert extracted_data.get("body") == b'{"hello":"world"}'
assert extracted_data.get("headers") == {**headers, "content-length": "17"}
assert extracted_data.get("cookies") == {"Path": "/", "SameSite": "lax", "auth": "", "regular": ""}
+
+
+async def test_request_data_extractor_skip_keys() -> None:
+ req = factory.get()
+ extractor = ConnectionDataExtractor()
+ assert (await extractor.extract(req, {"body"})).keys() == {"body"}
+
+
+async def test_skip_parse_malformed_body_false_raises(mocker: MockFixture) -> None:
+ mocker.patch("litestar.testing.request_factory.Request.json", new=AsyncMock(side_effect=ValueError()))
+ req = factory.post(headers={"Content-Type": "application/json"})
+ extractor = ConnectionDataExtractor(parse_body=True, skip_parse_malformed_body=False)
+
+ with pytest.raises(ValueError):
+ await extractor.extract(req, {"body"})
diff --git a/tests/unit/test_middleware/test_logging_middleware.py b/tests/unit/test_middleware/test_logging_middleware.py
--- a/tests/unit/test_middleware/test_logging_middleware.py
+++ b/tests/unit/test_middleware/test_logging_middleware.py
@@ -1,5 +1,5 @@
from logging import INFO
-from typing import TYPE_CHECKING, Dict
+from typing import TYPE_CHECKING, Any, Dict
import pytest
from structlog.testing import capture_logs
@@ -286,3 +286,17 @@ async def get_session() -> None:
assert response.status_code == HTTP_200_OK
assert "session" in client.cookies
assert client.cookies["session"] == session_id
+
+
+def test_structlog_invalid_request_body_handled() -> None:
+ # https://github.com/litestar-org/litestar/issues/3063
+ @post("/")
+ async def hello_world(data: Dict[str, Any]) -> Dict[str, Any]:
+ return data
+
+ with create_test_client(
+ route_handlers=[hello_world],
+ logging_config=StructLoggingConfig(log_exceptions="always"),
+ middleware=[LoggingMiddlewareConfig().middleware],
+ ) as client:
+ assert client.post("/", headers={"Content-Type": "application/json"}, content=b'{"a": "b",}').status_code == 400
| Bug: Logging middleware with structlog causes application to return HTTP 500 when request body is malformed
### Description
It looks to me that the logging middleware is attempting to parse the body of the incoming requests and if the parsing fails this results in an HTTP 500 Internal Error instead of an HTTP 400 Bad Request. If I don't use the logging middleware, the behavior is what I would normally expect to happen.
I've tested this with standard logging and structlog (structlog being what I use in my project) and the behavior is the same.
### URL to code causing the issue
_No response_
### MCVE
```python
from typing import Any
from litestar import Litestar, post
from litestar.logging.config import LoggingConfig, StructLoggingConfig
from litestar.middleware.logging import LoggingMiddlewareConfig
@post("/")
async def hello_world(data: dict[str, Any]) -> dict[str, Any]:
"""Route Handler that outputs hello world."""
return data
app = Litestar(
route_handlers=[hello_world],
# logging_config=LoggingConfig(log_exceptions="always"), # Also happens with standard logging
logging_config=StructLoggingConfig(log_exceptions="always"),
middleware=[LoggingMiddlewareConfig().middleware],
)
```
### Steps to reproduce
```bash
1. Run example, e.g.: `litestar --app bug:app run`
2. Post invalid json: `curl --header "Content-Type: application/json" --request POST --data '{"a": "b",}' http://localhost:8000`
3. Examine the logs.
```
### Screenshots
_No response_
### Logs
```bash
!!! I'm using the standard logging version output as it formats the exception in a readable manner
!!! but the outcome is essentially the same regardless if using structlog or standard logging.
┌─┤10:26:41│mihai@mobilews:~/bug
└──────────╼ litestar --app bug:app run
Using Litestar app from env: 'bug:app'
Starting server process ───────────────────────────────────────────────────────────────────────────────────────────────────────
┌──────────────────────────────┬──────────────────────┐
│ Litestar version │ 2.5.4 │
│ Debug mode │ Disabled │
│ Python Debugger on exception │ Disabled │
│ CORS │ Disabled │
│ CSRF │ Disabled │
│ OpenAPI │ Enabled path=/schema │
│ Compression │ Disabled │
│ Middlewares │ LoggingMiddleware │
└──────────────────────────────┴──────────────────────┘
INFO: Started server process [110144]
INFO: Waiting for application startup.
INFO: Application startup complete.
INFO: Uvicorn running on http://127.0.0.1:8000 (Press CTRL+C to quit)
INFO: 127.0.0.1:35020 - "POST / HTTP/1.1" 500 Internal Server Error
ERROR - 2024-02-02 10:26:45,152 - litestar - config - exception raised on http connection to route /
Traceback (most recent call last):
File "/home/mihai/.virtualenvs/litestar/lib/python3.11/site-packages/litestar/serialization/msgspec_hooks.py", line 186, in decode_json
return _msgspec_json_decoder.decode(value)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
msgspec.DecodeError: JSON is malformed: trailing comma in object (byte 10)
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/home/mihai/.virtualenvs/litestar/lib/python3.11/site-packages/litestar/middleware/exceptions/middleware.py", line 192, in __call__
await self.app(scope, receive, send)
File "/home/mihai/.virtualenvs/litestar/lib/python3.11/site-packages/litestar/middleware/base.py", line 129, in wrapped_call
await original__call__(self, scope, receive, send) # pyright: ignore
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/mihai/.virtualenvs/litestar/lib/python3.11/site-packages/litestar/middleware/logging.py", line 112, in __call__
await self.log_request(scope=scope, receive=receive)
File "/home/mihai/.virtualenvs/litestar/lib/python3.11/site-packages/litestar/middleware/logging.py", line 126, in log_request
extracted_data = await self.extract_request_data(request=scope["app"].request_class(scope, receive))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/mihai/.virtualenvs/litestar/lib/python3.11/site-packages/litestar/middleware/logging.py", line 179, in extract_request_data
value = await value
^^^^^^^^^^^
File "/home/mihai/.virtualenvs/litestar/lib/python3.11/site-packages/litestar/data_extractors.py", line 277, in extract_body
return await request.json()
^^^^^^^^^^^^^^^^^^^^
File "/home/mihai/.virtualenvs/litestar/lib/python3.11/site-packages/litestar/connection/request.py", line 134, in json
self._json = self._connection_state.json = decode_json(
^^^^^^^^^^^^
File "/home/mihai/.virtualenvs/litestar/lib/python3.11/site-packages/litestar/serialization/msgspec_hooks.py", line 191, in decode_json
raise SerializationException(str(msgspec_error)) from msgspec_error
litestar.exceptions.base_exceptions.SerializationException: JSON is malformed: trailing comma in object (byte 10)
Traceback (most recent call last):
File "/home/mihai/.virtualenvs/litestar/lib/python3.11/site-packages/litestar/serialization/msgspec_hooks.py", line 186, in decode_json
return _msgspec_json_decoder.decode(value)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
msgspec.DecodeError: JSON is malformed: trailing comma in object (byte 10)
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/home/mihai/.virtualenvs/litestar/lib/python3.11/site-packages/litestar/middleware/exceptions/middleware.py", line 192, in __call__
await self.app(scope, receive, send)
File "/home/mihai/.virtualenvs/litestar/lib/python3.11/site-packages/litestar/middleware/base.py", line 129, in wrapped_call
await original__call__(self, scope, receive, send) # pyright: ignore
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/mihai/.virtualenvs/litestar/lib/python3.11/site-packages/litestar/middleware/logging.py", line 112, in __call__
await self.log_request(scope=scope, receive=receive)
File "/home/mihai/.virtualenvs/litestar/lib/python3.11/site-packages/litestar/middleware/logging.py", line 126, in log_request
extracted_data = await self.extract_request_data(request=scope["app"].request_class(scope, receive))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/mihai/.virtualenvs/litestar/lib/python3.11/site-packages/litestar/middleware/logging.py", line 179, in extract_request_data
value = await value
^^^^^^^^^^^
File "/home/mihai/.virtualenvs/litestar/lib/python3.11/site-packages/litestar/data_extractors.py", line 277, in extract_body
return await request.json()
^^^^^^^^^^^^^^^^^^^^
File "/home/mihai/.virtualenvs/litestar/lib/python3.11/site-packages/litestar/connection/request.py", line 134, in json
self._json = self._connection_state.json = decode_json(
^^^^^^^^^^^^
File "/home/mihai/.virtualenvs/litestar/lib/python3.11/site-packages/litestar/serialization/msgspec_hooks.py", line 191, in decode_json
raise SerializationException(str(msgspec_error)) from msgspec_error
litestar.exceptions.base_exceptions.SerializationException: JSON is malformed: trailing comma in object (byte 10)
```
### Litestar Version
litestar[standard]==2.5.4
structlog==24.1.0
### Platform
- [x] Linux
- [ ] Mac
- [ ] Windows
- [ ] Other (Please specify in the description above)
<!-- POLAR PLEDGE BADGE START -->
---
> [!NOTE]
> While we are open for sponsoring on [GitHub Sponsors](https://github.com/sponsors/litestar-org/) and
> [OpenCollective](https://opencollective.com/litestar), we also utilize [Polar.sh](https://polar.sh/) to engage in pledge-based sponsorship.
>
> Check out all issues funded or available for funding [on our Polar.sh dashboard](https://polar.sh/litestar-org)
> * If you would like to see an issue prioritized, make a pledge towards it!
> * We receive the pledge once the issue is completed & verified
> * This, along with engagement in the community, helps us know which features are a priority to our users.
<a href="https://polar.sh/litestar-org/litestar/issues/3063">
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://polar.sh/api/github/litestar-org/litestar/issues/3063/pledge.svg?darkmode=1">
<img alt="Fund with Polar" src="https://polar.sh/api/github/litestar-org/litestar/issues/3063/pledge.svg">
</picture>
</a>
<!-- POLAR PLEDGE BADGE END -->
| I think this has been resolved in the development branch. @zoopp are you able to give that branch a try to confirm if the problem remains?
@cofin I gave it a try now but the issue is still reproducible with the MCVE above:
```
!!! I'm using the standard logging version output as it formats the exception in a readable manner
!!! but the outcome is essentially the same regardless if using structlog or standard logging.
(litestar)
┌─┤10:11:07│mihai@mobilews:~/bug
└──────────╼ litestar --app bug:app run --reload
Using Litestar app from env: 'bug:app'
Starting server process ───────────────────────────────────────────────────────────────────────────────────────────────────────
┌──────────────────────────────┬──────────────────────┐
│ Litestar version │ 2.6.0 │
│ Debug mode │ Disabled │
│ Python Debugger on exception │ Disabled │
│ CORS │ Disabled │
│ CSRF │ Disabled │
│ OpenAPI │ Enabled path=/schema │
│ Compression │ Disabled │
│ Middlewares │ LoggingMiddleware │
└──────────────────────────────┴──────────────────────┘
INFO: Will watch for changes in these directories: ['/home/mihai/bug']
INFO: Uvicorn running on http://127.0.0.1:8000 (Press CTRL+C to quit)
INFO: Started reloader process [150319] using WatchFiles
INFO: Started server process [150321]
INFO: Waiting for application startup.
INFO: Application startup complete.
INFO: 127.0.0.1:58246 - "POST / HTTP/1.1" 500 Internal Server Error
ERROR - 2024-02-05 10:11:12,310 - litestar - config - exception raised on http connection to route /
Traceback (most recent call last):
File "/home/mihai/.virtualenvs/litestar/lib/python3.11/site-packages/litestar/serialization/msgspec_hooks.py", line 186, in decode_json
return _msgspec_json_decoder.decode(value)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
msgspec.DecodeError: JSON is malformed: trailing comma in object (byte 10)
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/home/mihai/.virtualenvs/litestar/lib/python3.11/site-packages/litestar/middleware/exceptions/middleware.py", line 192, in __call__
await self.app(scope, receive, send)
File "/home/mihai/.virtualenvs/litestar/lib/python3.11/site-packages/litestar/middleware/base.py", line 129, in wrapped_call
await original__call__(self, scope, receive, send) # pyright: ignore
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/mihai/.virtualenvs/litestar/lib/python3.11/site-packages/litestar/middleware/logging.py", line 112, in __call__
await self.log_request(scope=scope, receive=receive)
File "/home/mihai/.virtualenvs/litestar/lib/python3.11/site-packages/litestar/middleware/logging.py", line 126, in log_request
extracted_data = await self.extract_request_data(request=scope["app"].request_class(scope, receive))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/mihai/.virtualenvs/litestar/lib/python3.11/site-packages/litestar/middleware/logging.py", line 179, in extract_request_data
value = await value
^^^^^^^^^^^
File "/home/mihai/.virtualenvs/litestar/lib/python3.11/site-packages/litestar/data_extractors.py", line 277, in extract_body
return await request.json()
^^^^^^^^^^^^^^^^^^^^
File "/home/mihai/.virtualenvs/litestar/lib/python3.11/site-packages/litestar/connection/request.py", line 134, in json
self._json = self._connection_state.json = decode_json(
^^^^^^^^^^^^
File "/home/mihai/.virtualenvs/litestar/lib/python3.11/site-packages/litestar/serialization/msgspec_hooks.py", line 191, in decode_json
raise SerializationException(str(msgspec_error)) from msgspec_error
litestar.exceptions.base_exceptions.SerializationException: JSON is malformed: trailing comma in object (byte 10)
Traceback (most recent call last):
File "/home/mihai/.virtualenvs/litestar/lib/python3.11/site-packages/litestar/serialization/msgspec_hooks.py", line 186, in decode_json
return _msgspec_json_decoder.decode(value)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
msgspec.DecodeError: JSON is malformed: trailing comma in object (byte 10)
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/home/mihai/.virtualenvs/litestar/lib/python3.11/site-packages/litestar/middleware/exceptions/middleware.py", line 192, in __call__
await self.app(scope, receive, send)
File "/home/mihai/.virtualenvs/litestar/lib/python3.11/site-packages/litestar/middleware/base.py", line 129, in wrapped_call
await original__call__(self, scope, receive, send) # pyright: ignore
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/mihai/.virtualenvs/litestar/lib/python3.11/site-packages/litestar/middleware/logging.py", line 112, in __call__
await self.log_request(scope=scope, receive=receive)
File "/home/mihai/.virtualenvs/litestar/lib/python3.11/site-packages/litestar/middleware/logging.py", line 126, in log_request
extracted_data = await self.extract_request_data(request=scope["app"].request_class(scope, receive))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/mihai/.virtualenvs/litestar/lib/python3.11/site-packages/litestar/middleware/logging.py", line 179, in extract_request_data
value = await value
^^^^^^^^^^^
File "/home/mihai/.virtualenvs/litestar/lib/python3.11/site-packages/litestar/data_extractors.py", line 277, in extract_body
return await request.json()
^^^^^^^^^^^^^^^^^^^^
File "/home/mihai/.virtualenvs/litestar/lib/python3.11/site-packages/litestar/connection/request.py", line 134, in json
self._json = self._connection_state.json = decode_json(
^^^^^^^^^^^^
File "/home/mihai/.virtualenvs/litestar/lib/python3.11/site-packages/litestar/serialization/msgspec_hooks.py", line 191, in decode_json
raise SerializationException(str(msgspec_error)) from msgspec_error
litestar.exceptions.base_exceptions.SerializationException: JSON is malformed: trailing comma in object (byte 10)
``` | 2024-02-13T19:06:51 |
litestar-org/litestar | 3,111 | litestar-org__litestar-3111 | [
"3083"
] | 1ff7f1e1b59948fa5e70009b40e5c135ff6226ac | diff --git a/litestar/app.py b/litestar/app.py
--- a/litestar/app.py
+++ b/litestar/app.py
@@ -388,7 +388,12 @@ def __init__(
self._openapi_schema: OpenAPI | None = None
self._debug: bool = True
+ self.stores: StoreRegistry = (
+ config.stores if isinstance(config.stores, StoreRegistry) else StoreRegistry(config.stores)
+ )
self._lifespan_managers = config.lifespan
+ for store in self.stores._stores.values():
+ self._lifespan_managers.append(store)
self._server_lifespan_managers = [p.server_lifespan for p in config.plugins or [] if isinstance(p, CLIPlugin)]
self.experimental_features = frozenset(config.experimental_features or [])
self.get_logger: GetLogger = get_logger_placeholder
@@ -471,10 +476,6 @@ def __init__(
self.asgi_handler = self._create_asgi_handler()
- self.stores: StoreRegistry = (
- config.stores if isinstance(config.stores, StoreRegistry) else StoreRegistry(config.stores)
- )
-
@property
@deprecated(version="2.6.0", kind="property", info="Use create_static_files router instead")
def static_files_config(self) -> list[StaticFilesConfig]:
diff --git a/litestar/stores/base.py b/litestar/stores/base.py
--- a/litestar/stores/base.py
+++ b/litestar/stores/base.py
@@ -9,6 +9,8 @@
from msgspec.msgpack import encode as msgpack_encode
if TYPE_CHECKING:
+ from types import TracebackType
+
from typing_extensions import Self
@@ -76,6 +78,17 @@ async def expires_in(self, key: str) -> int | None:
"""
raise NotImplementedError
+ async def __aenter__(self) -> None: # noqa: B027
+ pass
+
+ async def __aexit__( # noqa: B027
+ self,
+ exc_type: type[BaseException] | None,
+ exc_val: BaseException | None,
+ exc_tb: TracebackType | None,
+ ) -> None:
+ pass
+
class NamespacedStore(Store):
"""A subclass of :class:`Store`, offering hierarchical namespacing.
diff --git a/litestar/stores/redis.py b/litestar/stores/redis.py
--- a/litestar/stores/redis.py
+++ b/litestar/stores/redis.py
@@ -1,7 +1,7 @@
from __future__ import annotations
from datetime import timedelta
-from typing import cast
+from typing import TYPE_CHECKING, cast
from redis.asyncio import Redis
from redis.asyncio.connection import ConnectionPool
@@ -14,13 +14,18 @@
__all__ = ("RedisStore",)
+if TYPE_CHECKING:
+ from types import TracebackType
+
class RedisStore(NamespacedStore):
"""Redis based, thread and process safe asynchronous key/value store."""
__slots__ = ("_redis",)
- def __init__(self, redis: Redis, namespace: str | None | EmptyType = Empty) -> None:
+ def __init__(
+ self, redis: Redis, namespace: str | None | EmptyType = Empty, handle_client_shutdown: bool = False
+ ) -> None:
"""Initialize :class:`RedisStore`
Args:
@@ -28,9 +33,11 @@ def __init__(self, redis: Redis, namespace: str | None | EmptyType = Empty) -> N
namespace: A key prefix to simulate a namespace in redis. If not given,
defaults to ``LITESTAR``. Namespacing can be explicitly disabled by passing
``None``. This will make :meth:`.delete_all` unavailable.
+ handle_client_shutdown: If ``True``, handle the shutdown of the `redis` instance automatically during the store's lifespan. Should be set to `True` unless the shutdown is handled externally
"""
self._redis = redis
self.namespace: str | None = value_or_default(namespace, "LITESTAR")
+ self.handle_client_shutdown = handle_client_shutdown
# script to get and renew a key in one atomic step
self._get_and_renew_script = self._redis.register_script(
@@ -64,6 +71,18 @@ def __init__(self, redis: Redis, namespace: str | None | EmptyType = Empty) -> N
"""
)
+ async def _shutdown(self) -> None:
+ if self.handle_client_shutdown:
+ await self._redis.aclose(close_connection_pool=True) # type: ignore[attr-defined]
+
+ async def __aexit__(
+ self,
+ exc_type: type[BaseException] | None,
+ exc_val: BaseException | None,
+ exc_tb: TracebackType | None,
+ ) -> None:
+ await self._shutdown()
+
@classmethod
def with_client(
cls,
@@ -93,14 +112,22 @@ def with_client(
username=username,
password=password,
)
- return cls(redis=Redis(connection_pool=pool), namespace=namespace)
+ return cls(
+ redis=Redis(connection_pool=pool),
+ namespace=namespace,
+ handle_client_shutdown=True,
+ )
def with_namespace(self, namespace: str) -> RedisStore:
"""Return a new :class:`RedisStore` with a nested virtual key namespace.
The current instances namespace will serve as a prefix for the namespace, so it
can be considered the parent namespace.
"""
- return type(self)(redis=self._redis, namespace=f"{self.namespace}_{namespace}" if self.namespace else namespace)
+ return type(self)(
+ redis=self._redis,
+ namespace=f"{self.namespace}_{namespace}" if self.namespace else namespace,
+ handle_client_shutdown=self.handle_client_shutdown,
+ )
def _make_key(self, key: str) -> str:
prefix = f"{self.namespace}:" if self.namespace else ""
| diff --git a/tests/unit/test_stores.py b/tests/unit/test_stores.py
--- a/tests/unit/test_stores.py
+++ b/tests/unit/test_stores.py
@@ -366,3 +366,16 @@ async def test_file_store_handle_rename_fail(file_store: FileStore, mocker: Mock
await file_store.set("foo", "bar")
mock_unlink.assert_called_once()
assert Path(mock_unlink.call_args_list[0].args[0]).with_suffix("") == file_store.path.joinpath("foo")
+
+
+async def test_redis_store_with_client_shutdown() -> None:
+ redis_store = RedisStore.with_client(url="redis://localhost:6397")
+ assert await redis_store._redis.ping()
+ # remove the private shutdown and the assert below fails
+ # the check on connection is a mimic of https://github.com/redis/redis-py/blob/d529c2ad8d2cf4dcfb41bfd93ea68cfefd81aa66/tests/test_asyncio/test_connection_pool.py#L35-L39
+ await redis_store._shutdown()
+ assert not any(
+ x.is_connected
+ for x in redis_store._redis.connection_pool._available_connections
+ + list(redis_store._redis.connection_pool._in_use_connections)
+ )
| Bug: declaring a RedisStore in the app triggers a 500 error in the 2nd set of parameters of a parametrized test
### Description
If you have a parametrized test that uses a function-scoped fixture that uses itself a session-scoped fixture and you declare a RedisStore in the app, the 2nd test fails with `RuntimeError: Event loop is closed`
The 2nd test passes in those cases:
1. all fixtures are session-based (just comment the client fixture)
2. there is no RedisStore in the app (or replace with a MemoryStore)
### URL to code causing the issue
_No response_
### MCVE
```python
import msgspec
import pytest
from litestar import Litestar, get
from litestar.middleware.session.server_side import ServerSideSessionConfig
from litestar.stores.redis import RedisStore
from litestar.testing import AsyncTestClient
@get()
async def hget() -> int:
return 1
class AppSettings(msgspec.Struct):
debug: bool
redis_url: str = "redis://localhost:6379"
def get_app(app_settings: AppSettings) -> Litestar:
# setting up stores
session_store = RedisStore.with_client(url=app_settings.redis_url)
app = Litestar(route_handlers=[hget],
middleware=[
ServerSideSessionConfig().middleware,
],
stores={"sessions": session_store, }, # comment this and 2nd test pass
debug=app_settings.debug
)
return app
@pytest.fixture(scope="session")
def anyio_backend() -> str:
return "asyncio"
@pytest.fixture(scope="session")
def app_settings_test():
return AppSettings(debug=True)
@pytest.fixture(scope="session")
def app_test(app_settings_test: AppSettings):
app = get_app(app_settings_test)
yield app
@pytest.fixture # add scope="session" and 2nd test pass
async def client(app_test: Litestar):
async with AsyncTestClient(app=app_test) as c:
yield c
@pytest.mark.anyio
@pytest.mark.parametrize("p1, p2",
[(1, 2), (3, 4)])
async def test_param(client: AsyncTestClient, p1: int, p2: int):
response = await client.get("/")
assert response.status_code == 200
```
### Steps to reproduce
```bash
1. Go to '...'
2. Click on '....'
3. Scroll down to '....'
4. See error
```
### Screenshots
```bash
""
```
### Logs
_No response_
### Litestar Version
2.5.5
### Platform
- [X] Linux
- [ ] Mac
- [ ] Windows
- [ ] Other (Please specify in the description above)
<!-- POLAR PLEDGE BADGE START -->
---
> [!NOTE]
> While we are open for sponsoring on [GitHub Sponsors](https://github.com/sponsors/litestar-org/) and
> [OpenCollective](https://opencollective.com/litestar), we also utilize [Polar.sh](https://polar.sh/) to engage in pledge-based sponsorship.
>
> Check out all issues funded or available for funding [on our Polar.sh dashboard](https://polar.sh/litestar-org)
> * If you would like to see an issue prioritized, make a pledge towards it!
> * We receive the pledge once the issue is completed & verified
> * This, along with engagement in the community, helps us know which features are a priority to our users.
<a href="https://polar.sh/litestar-org/litestar/issues/3083">
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://polar.sh/api/github/litestar-org/litestar/issues/3083/pledge.svg?darkmode=1">
<img alt="Fund with Polar" src="https://polar.sh/api/github/litestar-org/litestar/issues/3083/pledge.svg">
</picture>
</a>
<!-- POLAR PLEDGE BADGE END -->
| thinking out loud after I read [this](https://redis.readthedocs.io/en/stable/examples/asyncio_examples.html#Connecting-and-Disconnecting), shouldn't there be some close/aclose methods for stores, or at least one for the RedisStore ?
this can be circumvented in the get_app factory replacing the `RedisStore` with a `MemoryStore` only for tests but it feels like a hack
> thinking out loud after I read [this](https://redis.readthedocs.io/en/stable/examples/asyncio_examples.html#Connecting-and-Disconnecting), shouldn't there be some close/aclose methods for stores, or at least one for the RedisStore ?
I agree that we should add this to the stores. Then Litestar could close the stores on app shutdown (make it configurable as to whether the store should be closed or not).
> > thinking out loud after I read [this](https://redis.readthedocs.io/en/stable/examples/asyncio_examples.html#Connecting-and-Disconnecting), shouldn't there be some close/aclose methods for stores, or at least one for the RedisStore ?
>
> I agree that we should add this to the stores. Then Litestar could close the stores on app shutdown (make it configurable as to whether the store should be closed or not).
I greped all python Redis store in github and nobody seems to have methods to close stores weirdly enough, so we're not alone :)
I'm not sure this is related to the issue at hand though, but from experience tests and unclosed ressources usually dont make good friends.
> thinking out loud after I read [this](https://redis.readthedocs.io/en/stable/examples/asyncio_examples.html#Connecting-and-Disconnecting), shouldn't there be some close/aclose methods for stores, or at least one for the RedisStore ?
The reason this does not exist is that the store can receive an "externally managed" `Redis` instance. The idea here was that whoever passes it in is responsible for managing its lifetime as well. While I still think this is the right thing to do, I can see how it could be an issue when the `with_client` classmethod is used, since then it's the store that's responsible for creating the client instance.
Maybe the solution here is to add a flag to the `RedisStore` that indicates whether we should handle the client's lifetime?
> I'm not sure this is related to the issue at hand though, but from experience tests and unclosed ressources usually dont make good friends.
There are/were quite a few other issues surrounding the async Redis client and lifetimes, e.g. it trying to perform some action in `__del__` that relied on an active async context, so I wouldn't entirely dismiss the possibility that this is also just the redis client being weird / buggy.
You could try though to manage its lifetime explicitly and see if that fixes the issue.
> You could try though to manage its lifetime explicitly and see if that fixes the issue.
something like this does work indeed, I'm not too sure this is not overly complicated though it does the job, and my app factory needs to be async now, and I need to wrap it in an async context manager that creates / closes the redis client :
```python
from contextlib import asynccontextmanager
from typing import Iterator
import msgspec
import pytest
from redis.asyncio import Redis, ConnectionPool
from litestar import Litestar, get
from litestar.middleware.session.server_side import ServerSideSessionConfig
from litestar.testing import AsyncTestClient
@get()
async def hget() -> int:
return 1
class AppSettings(msgspec.Struct):
debug: bool
redis_url: str = "redis://localhost:6379"
@asynccontextmanager
async def make_session_store(app_settings: AppSettings) -> Iterator[None]:
pool = ConnectionPool.from_url(app_settings.redis_url)
client = Redis.from_pool(pool)
try:
yield
finally:
await client.aclose()
async def get_app(app_settings: AppSettings) -> Litestar:
# setting up stores
# session_store = RedisStore.with_client(url=app_settings.redis_url)
# session_store = RedisStore(redis=Redis(), namespace="sessions")
# session_store = MemoryStore()
session_config = ServerSideSessionConfig()
async with make_session_store(app_settings) as session_store:
app = Litestar(route_handlers=[hget],
middleware=[
session_config.middleware,
],
stores={"sessions": session_store}, # comment this and 2nd test pass
debug=app_settings.debug,
# lifespan=[partial(lifespan, app_settings=app_settings)]
)
return app
@pytest.fixture(scope="session")
def anyio_backend() -> str:
return "asyncio"
@pytest.fixture(scope="session")
def app_settings_test():
return AppSettings(debug=True)
@pytest.fixture(scope="session")
async def app_test(app_settings_test: AppSettings):
app = await get_app(app_settings_test)
yield app
@pytest.fixture # add scope="session" and 2nd test pass
async def client(app_test: Litestar):
async with AsyncTestClient(app=app_test) as c:
yield c
@pytest.mark.anyio(scope="session")
@pytest.mark.parametrize("p1, p2",
[(1, 2), (3, 4)])
async def test_param(client: AsyncTestClient, p1: int, p2: int):
response = await client.get("/")
assert response.status_code == 200
```
actually maybe if there is a way to lazily load the store, or use a lifespan-created redis client into the store that would be maybe cleaner, not sure...
> actually maybe if there is a way to lazily load the store, or use a lifespan-created redis client into the store that would be maybe cleaner, not sure...
I think you should be able to create the store in a sync function and then have a lifespan context manager on the app instance that takes care of its shutdown.
> > actually maybe if there is a way to lazily load the store, or use a lifespan-created redis client into the store that would be maybe cleaner, not sure...
>
> I think you should be able to create the store in a sync function and then have a lifespan context manager on the app instance that takes care of its shutdown.
alright yeah this is much better this way indeed, I just have to access the private redis attribute from the store in the lifespan in order to close it but that solves everything and looks not ugly like the above :)
thanks for the patience )
> While I still think this is the right thing to do, I can see how it could be an issue when the with_client classmethod is used, since then it's the store that's responsible for creating the client instance.
Do you think I should maybe try "solve" this adding a note in the docs ? This is not trivial and as you said maybe unexpected at first. lmk
```
from contextlib import asynccontextmanager
from functools import partial
from typing import Iterator
import msgspec
import pytest
from redis.asyncio import Redis
from litestar import Litestar, get
from litestar.middleware.session.server_side import ServerSideSessionConfig
from litestar.stores.redis import RedisStore
from litestar.testing import AsyncTestClient
@get()
async def hget() -> int:
return 1
class AppSettings(msgspec.Struct):
debug: bool
redis_url: str = "redis://localhost:6379"
@asynccontextmanager
async def lifespan(app: Litestar) -> Iterator[None]:
try:
yield
finally:
print("lifespan cleanup")
await app.stores.get("sessions")._redis.aclose()
def get_app(app_settings: AppSettings) -> Litestar:
# setting up stores
# session_store = RedisStore.with_client(url=app_settings.redis_url)
session_store = RedisStore(redis=Redis(), namespace="sessions")
# session_store = MemoryStore()
session_config = ServerSideSessionConfig()
app = Litestar(route_handlers=[hget],
middleware=[
session_config.middleware,
],
stores={"sessions": session_store}, # comment this and 2nd test pass
debug=app_settings.debug,
lifespan=[partial(lifespan, app_settings=app_settings)]
)
return app
@pytest.fixture(scope="session")
def anyio_backend() -> str:
return "asyncio"
@pytest.fixture(scope="session")
def app_settings_test():
return AppSettings(debug=True)
@pytest.fixture(scope="session")
def app_test(app_settings_test: AppSettings):
app = get_app(app_settings_test)
yield app
@pytest.fixture # add scope="session" and 2nd test pass
async def client(app_test: Litestar):
async with AsyncTestClient(app=app_test) as c:
yield c
@pytest.mark.anyio(scope="session")
@pytest.mark.parametrize("p1, p2",
[(1, 2), (3, 4)])
async def test_param(client: AsyncTestClient, p1: int, p2: int):
response = await client.get("/")
assert response.status_code == 200
```
> Do you think I should maybe try "solve" this adding a note in the docs ? This is not trivial and as you said maybe unexpected at first. lmk
I think there are two things that need to be done:
1. Add a note to the docs about passing in a `Redis` instance and that you're expected to handle its lifetime
2. Properly handle the lifetime of internally created `Redis` instances. Adding `lifespan` or `on_startup`/`on_shutdown` methods to the stores seems to be the most sensible option here, with a flag for the Redis store to know if we should handle its shutdown.
sounds good to me, I'm keen on tackling both, 1st one will be simpler and faster obviously
I've assigned you here. I'll leave it up to you if you want to address both in a single PR or two separate ones, both are fine with me. | 2024-02-14T12:26:51 |
litestar-org/litestar | 3,113 | litestar-org__litestar-3113 | [
"3026"
] | 11d79ce4935c1d4fcdc455f98bb94adcb07cf14a | diff --git a/docs/examples/contrib/piccolo/app.py b/docs/examples/contrib/piccolo/app.py
--- a/docs/examples/contrib/piccolo/app.py
+++ b/docs/examples/contrib/piccolo/app.py
@@ -77,7 +77,4 @@ async def on_startup():
await create_db_tables(Task, if_not_exists=True)
-app = Litestar(
- route_handlers=[tasks, create_task, delete_task, update_task],
- on_startup=[on_startup],
-)
+app = Litestar(route_handlers=[tasks, create_task, delete_task, update_task], on_startup=[on_startup], debug=True)
diff --git a/litestar/contrib/piccolo.py b/litestar/contrib/piccolo.py
--- a/litestar/contrib/piccolo.py
+++ b/litestar/contrib/piccolo.py
@@ -1,5 +1,6 @@
from __future__ import annotations
+import warnings
from dataclasses import replace
from decimal import Decimal
from typing import Any, Generator, Generic, List, Optional, TypeVar
@@ -9,7 +10,7 @@
from litestar.dto import AbstractDTO, DTOField, Mark
from litestar.dto.data_structures import DTOFieldDefinition
-from litestar.exceptions import MissingDependencyException
+from litestar.exceptions import LitestarWarning, MissingDependencyException
from litestar.types import Empty
from litestar.typing import FieldDefinition
from litestar.utils import warn_deprecation
@@ -38,12 +39,22 @@ def __getattr__(name: str) -> Any:
def _parse_piccolo_type(column: Column, extra: dict[str, Any]) -> FieldDefinition:
+ is_optional = not column._meta.required
+
if isinstance(column, (column_types.Decimal, column_types.Numeric)):
column_type: Any = Decimal
meta = Meta(extra=extra)
elif isinstance(column, (column_types.Email, column_types.Varchar)):
column_type = str
- meta = Meta(max_length=column.length, extra=extra)
+ if is_optional:
+ meta = Meta(extra=extra)
+ warnings.warn(
+ f"Dropping max_length constraint for column {column!r} because the " "column is optional",
+ category=LitestarWarning,
+ stacklevel=2,
+ )
+ else:
+ meta = Meta(max_length=column.length, extra=extra)
elif isinstance(column, column_types.Array):
column_type = List[column.base_column.value_type] # type: ignore
meta = Meta(extra=extra)
@@ -57,7 +68,7 @@ def _parse_piccolo_type(column: Column, extra: dict[str, Any]) -> FieldDefinitio
column_type = column.value_type
meta = Meta(extra=extra)
- if not column._meta.required:
+ if is_optional:
column_type = Optional[column_type]
return FieldDefinition.from_annotation(Annotated[column_type, meta])
diff --git a/litestar/dto/_backend.py b/litestar/dto/_backend.py
--- a/litestar/dto/_backend.py
+++ b/litestar/dto/_backend.py
@@ -17,7 +17,9 @@
cast,
)
+import msgspec
from msgspec import UNSET, Struct, UnsetType, convert, defstruct, field
+from typing_extensions import Annotated
from litestar.dto._types import (
CollectionType,
@@ -33,6 +35,7 @@
from litestar.dto.data_structures import DTOData, DTOFieldDefinition
from litestar.dto.field import Mark
from litestar.enums import RequestEncodingType
+from litestar.params import KwargDefinition
from litestar.serialization import decode_json, decode_msgpack
from litestar.types import Empty
from litestar.typing import FieldDefinition
@@ -740,6 +743,24 @@ def _create_msgspec_field(field_definition: TransferDTOFieldDefinition) -> Any:
return field(**kwargs)
+def _create_struct_field_meta_for_field_definition(field_definition: TransferDTOFieldDefinition) -> msgspec.Meta | None:
+ if (kwarg_definition := field_definition.kwarg_definition) is None or not isinstance(
+ kwarg_definition, KwargDefinition
+ ):
+ return None
+
+ return msgspec.Meta(
+ gt=kwarg_definition.gt,
+ ge=kwarg_definition.ge,
+ lt=kwarg_definition.lt,
+ le=kwarg_definition.le,
+ multiple_of=kwarg_definition.multiple_of,
+ min_length=kwarg_definition.min_length if not field_definition.is_partial else None,
+ max_length=kwarg_definition.max_length if not field_definition.is_partial else None,
+ pattern=kwarg_definition.pattern,
+ )
+
+
def _create_struct_for_field_definitions(
model_name: str,
field_definitions: tuple[TransferDTOFieldDefinition, ...],
@@ -755,6 +776,9 @@ def _create_struct_for_field_definitions(
if field_definition.is_partial:
field_type = Union[field_type, UnsetType]
+ if (field_meta := _create_struct_field_meta_for_field_definition(field_definition)) is not None:
+ field_type = Annotated[field_type, field_meta]
+
struct_fields.append(
(
field_definition.name,
| diff --git a/tests/unit/test_dto/test_factory/test_integration.py b/tests/unit/test_dto/test_factory/test_integration.py
--- a/tests/unit/test_dto/test_factory/test_integration.py
+++ b/tests/unit/test_dto/test_factory/test_integration.py
@@ -848,3 +848,53 @@ def test(data: Optional[Foo] = None) -> dict:
with create_test_client([test]) as client:
response = client.post("/")
assert response.json() == {"foo": None}
+
+
[email protected](
+ "field_type, constraint_name, constraint_value, request_data",
+ [
+ (int, "gt", 2, 2),
+ (int, "ge", 2, 1),
+ (int, "lt", 2, 2),
+ (int, "le", 2, 3),
+ (int, "multiple_of", 2, 3),
+ (str, "min_length", 2, "1"),
+ (str, "max_length", 1, "12"),
+ (str, "pattern", r"\d", "a"),
+ ],
+)
+def test_msgspec_dto_copies_constraints(
+ field_type: Any, constraint_name: str, constraint_value: Any, request_data: Any, use_experimental_dto_backend: bool
+) -> None:
+ # https://github.com/litestar-org/litestar/issues/3026
+ struct = msgspec.defstruct(
+ "Foo",
+ fields=[("bar", Annotated[field_type, msgspec.Meta(**{constraint_name: constraint_value})])], # type: ignore[list-item]
+ )
+
+ @post(
+ "/",
+ dto=Annotated[MsgspecDTO[struct], DTOConfig(experimental_codegen_backend=use_experimental_dto_backend)], # type: ignore[arg-type, valid-type]
+ signature_namespace={"struct": struct},
+ )
+ def handler(data: struct) -> None: # type: ignore[valid-type]
+ pass
+
+ with create_test_client([handler]) as client:
+ assert client.post("/", json={"bar": request_data}).status_code == 400
+
+
+def test_msgspec_dto_dont_copy_length_constraint_for_partial_dto() -> None:
+ class Foo(msgspec.Struct):
+ bar: Annotated[str, msgspec.Meta(min_length=2)]
+ baz: Annotated[str, msgspec.Meta(max_length=2)]
+
+ class FooDTO(MsgspecDTO[Foo]):
+ config = DTOConfig(partial=True)
+
+ @post("/", dto=FooDTO, signature_types={Foo})
+ def handler(data: Foo) -> None:
+ pass
+
+ with create_test_client([handler]) as client:
+ assert client.post("/", json={"bar": "1", "baz": "123"}).status_code == 201
| Bug: Msgspec constraints ignored in MsgspecDTO
### Discussed in https://github.com/orgs/litestar-org/discussions/3025
<div type='discussions-op-text'>
<sup>Originally posted by **9128305** January 25, 2024</sup>
Msgpec constraint here is ignored when sending `{"foo": "t"}`. What am I doing wrong?
```python
from typing import Annotated
import msgspec
from litestar import post, Litestar
from litestar.dto import MsgspecDTO
class Request(msgspec.Struct):
foo: Annotated[str, msgspec.Meta(min_length=3,)]
@post("/example/", dto=MsgspecDTO[Request],sync_to_thread=False)
def example(data: Request) -> Request:
return data
app = Litestar([example])
| I checked code and DTOFieldDefinition skips Msgpec.Meta(...) for annotation, so new struct field has only str type
Thanks @9128305 - we should be copying these constraints onto the transfer models.
Should be Annotated type annotation splitted to unwrapped and metadata? Or is viable only for msgspec? https://github.com/litestar-org/litestar/blob/6e4e530445eadbc1fd2f65bebca3bc68cf12f29a/litestar/typing.py#L495
> Should be Annotated type annotation splitted to unwrapped and metadata? Or is viable only for msgspec?
>
> https://github.com/litestar-org/litestar/blob/6e4e530445eadbc1fd2f65bebca3bc68cf12f29a/litestar/typing.py#L495
Sorry, I don't understand the question, can you rephrase or give an example? | 2024-02-14T17:28:51 |
litestar-org/litestar | 3,118 | litestar-org__litestar-3118 | [
"2967"
] | 323e23e51508cf142074447098b48ddcc2a67ad0 | diff --git a/litestar/_openapi/schema_generation/schema.py b/litestar/_openapi/schema_generation/schema.py
--- a/litestar/_openapi/schema_generation/schema.py
+++ b/litestar/_openapi/schema_generation/schema.py
@@ -117,7 +117,7 @@
Sequence: Schema(type=OpenAPIType.ARRAY),
Set: Schema(type=OpenAPIType.ARRAY),
Tuple: Schema(type=OpenAPIType.ARRAY),
- UUID: Schema(type=OpenAPIType.STRING, format=OpenAPIFormat.UUID, description="Any UUID string"),
+ UUID: Schema(type=OpenAPIType.STRING, format=OpenAPIFormat.UUID),
bool: Schema(type=OpenAPIType.BOOLEAN),
bytearray: Schema(type=OpenAPIType.STRING),
bytes: Schema(type=OpenAPIType.STRING),
| diff --git a/tests/examples/test_openapi.py b/tests/examples/test_openapi.py
--- a/tests/examples/test_openapi.py
+++ b/tests/examples/test_openapi.py
@@ -28,7 +28,7 @@ def test_schema_generation() -> None:
"components": {
"schemas": {
"IdModel": {
- "properties": {"id": {"type": "string", "format": "uuid", "description": "Any UUID string"}},
+ "properties": {"id": {"type": "string", "format": "uuid"}},
"type": "object",
"required": ["id"],
"title": "IdContainer",
diff --git a/tests/unit/test_openapi/test_parameters.py b/tests/unit/test_openapi/test_parameters.py
--- a/tests/unit/test_openapi/test_parameters.py
+++ b/tests/unit/test_openapi/test_parameters.py
@@ -1,4 +1,5 @@
from typing import TYPE_CHECKING, List, Optional, Type, cast
+from uuid import UUID
import pytest
from typing_extensions import Annotated
@@ -324,3 +325,21 @@ async def index(
assert response.json()["paths"]["/"]["get"]["parameters"][0]["examples"] == {
"text-example-1": {"summary": "example summary", "value": "example value"}
}
+
+
+def test_uuid_path_description_generation() -> None:
+ # https://github.com/litestar-org/litestar/issues/2967
+ @get("str/{id:str}")
+ async def str_path(id: Annotated[str, Parameter(description="String ID")]) -> str:
+ return id
+
+ @get("uuid/{id:uuid}")
+ async def uuid_path(id: Annotated[UUID, Parameter(description="UUID ID")]) -> UUID:
+ return id
+
+ with create_test_client(
+ [str_path, uuid_path], openapi_config=OpenAPIConfig(title="Test API", version="1.0.0")
+ ) as client:
+ response = client.get("/schema/openapi.json")
+ assert response.json()["paths"]["/str/{id}"]["get"]["parameters"][0]["description"] == "String ID"
+ assert response.json()["paths"]["/uuid/{id}"]["get"]["parameters"][0]["description"] == "UUID ID"
| Bug: Description not set for UUID based path parameters in OpenAPI
### Description
The value set in `Parameter(description="UUID ID")` is not used for the actual description in the OpenAPI schema if the path parameter is of type `UUID`. I did some digging and found out this is due to the presence of the following code. https://github.com/litestar-org/litestar/blob/44809182b1b62a8da5b7c5bec3b22987581779d5/litestar/_openapi/schema_generation/schema.py#L525-L532
Since description for UUID is already set https://github.com/litestar-org/litestar/blob/44809182b1b62a8da5b7c5bec3b22987581779d5/litestar/_openapi/schema_generation/schema.py#L120 the code above makes it so that description from the user defined `Parameter` is not set again.
#### Proposed Fix 1 (Breaking / debatably breaking?)
Remove "description" in https://github.com/litestar-org/litestar/blob/44809182b1b62a8da5b7c5bec3b22987581779d5/litestar/_openapi/schema_generation/schema.py#L120 such that it appears like so
```suggestion py
UUID: Schema(type=OpenAPIType.STRING, format=OpenAPIFormat.UUID)
```
#### Proposed Fix 2 (Special Casing until next major release then replace with Proposed Fix 1)
Change https://github.com/litestar-org/litestar/blob/44809182b1b62a8da5b7c5bec3b22987581779d5/litestar/_openapi/schema_generation/schema.py#L531 such that it appears like so
```suggestion py
if schema_key == "description" or getattr(schema, schema_key, None) is None:
```
@peterschutt suggested special casing with comments explaining the behavior would be ideal. Thoughts?
### URL to code causing the issue
_No response_
### MCVE
```python
from uuid import UUID
from typing_extensions import Annotated
from litestar.params import Parameter
from litestar import get
from litestar.testing import create_test_client
@get("str/{id:str}")
async def str_path(id: Annotated[str, Parameter(description="String ID")]) -> str:
return id
@get("uuid/{id:uuid}")
async def uuid_path(id: Annotated[UUID, Parameter(description="UUID ID")]) -> UUID:
return id
with create_test_client([str_path, uuid_path]) as client:
# correct, currently passes, should pass
assert client.app.openapi_schema.paths["/str/{id}"].get.parameters[0].description == "String ID"
# wrong, currently passes, should fail
assert client.app.openapi_schema.paths["/uuid/{id}"].get.parameters[0].description == "Any UUID string"
# expected, currently fails, should pass
assert client.app.openapi_schema.paths["/uuid/{id}"].get.parameters[0].description == "UUID ID"
```
### Steps to reproduce
```bash
1. Save as "mcve.py"
2. Run `python mcve.py`
3. Second assert that currently passes, should fail.
4. Third assert that currently fails, should pass.
```
### Screenshots
```bash
""
```
### Logs
_No response_
### Litestar Version
cc45c1132584210250dd725595612c9c95c4bf68 (main as of issue creation)
### Platform
- [ ] Linux
- [X] Mac
- [ ] Windows
- [ ] Other (Please specify in the description above)
<!-- POLAR PLEDGE BADGE START -->
---
> [!NOTE]
> While we are open for sponsoring on [GitHub Sponsors](https://github.com/sponsors/litestar-org/) and
> [OpenCollective](https://opencollective.com/litestar), we also utilize [Polar.sh](https://polar.sh/) to engage in pledge-based sponsorship.
>
> Check out all issues funded or available for funding [on our Polar.sh dashboard](https://polar.sh/litestar-org)
> * If you would like to see an issue prioritized, make a pledge towards it!
> * We receive the pledge once the issue is completed & verified
> * This, along with engagement in the community, helps us know which features are a priority to our users.
<a href="https://polar.sh/litestar-org/litestar/issues/2967">
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://polar.sh/api/github/litestar-org/litestar/issues/2967/pledge.svg?darkmode=1">
<img alt="Fund with Polar" src="https://polar.sh/api/github/litestar-org/litestar/issues/2967/pledge.svg">
</picture>
</a>
<!-- POLAR PLEDGE BADGE END -->
| > Proposed Fix 1 (Breaking / debatably breaking?)
If we treat this as a bug and include the fix in a patch release, removing the default description will cause schema generation to change behavior between patch releases - this is why I'm in favor of option 2.
@Alc-Alc would you want to go ahead and implement a fix for this? | 2024-02-15T16:13:27 |
litestar-org/litestar | 3,131 | litestar-org__litestar-3131 | [
"3129"
] | 19f4f04342937d623b14e86fd4b87266bd30a18e | diff --git a/litestar/static_files/base.py b/litestar/static_files/base.py
--- a/litestar/static_files/base.py
+++ b/litestar/static_files/base.py
@@ -22,7 +22,7 @@
class StaticFiles:
"""ASGI App that handles file sending."""
- __slots__ = ("is_html_mode", "directories", "adapter", "send_as_attachment")
+ __slots__ = ("is_html_mode", "directories", "adapter", "send_as_attachment", "headers")
def __init__(
self,
@@ -31,6 +31,7 @@ def __init__(
file_system: FileSystemProtocol,
send_as_attachment: bool = False,
resolve_symlinks: bool = True,
+ headers: dict[str, str] | None = None,
) -> None:
"""Initialize the Application.
@@ -41,11 +42,13 @@ def __init__(
send_as_attachment: Whether to send the file with a ``content-disposition`` header of
``attachment`` or ``inline``
resolve_symlinks: Resolve symlinks to the directories
+ headers: Headers that will be sent with every response.
"""
self.adapter = FileSystemAdapter(file_system)
self.directories = tuple(Path(p).resolve() if resolve_symlinks else Path(p) for p in directories)
self.is_html_mode = is_html_mode
self.send_as_attachment = send_as_attachment
+ self.headers = headers
async def get_fs_info(
self, directories: Sequence[PathType], file_path: PathType
@@ -111,6 +114,7 @@ async def handle(self, path: str, is_head_response: bool) -> ASGIFileResponse:
filename=filename,
content_disposition_type=content_disposition_type,
is_head_response=is_head_response,
+ headers=self.headers,
)
if self.is_html_mode:
@@ -129,6 +133,7 @@ async def handle(self, path: str, is_head_response: bool) -> ASGIFileResponse:
status_code=HTTP_404_NOT_FOUND,
content_disposition_type=content_disposition_type,
is_head_response=is_head_response,
+ headers=self.headers,
)
raise NotFoundException(
diff --git a/litestar/static_files/config.py b/litestar/static_files/config.py
--- a/litestar/static_files/config.py
+++ b/litestar/static_files/config.py
@@ -162,12 +162,17 @@ def create_static_files_router(
_validate_config(path=path, directories=directories, file_system=file_system)
path = normalize_path(path)
+ headers = None
+ if cache_control:
+ headers = {cache_control.HEADER_NAME: cache_control.to_header()}
+
static_files = StaticFiles(
is_html_mode=html_mode,
directories=directories,
file_system=file_system,
send_as_attachment=send_as_attachment,
resolve_symlinks=resolve_symlinks,
+ headers=headers,
)
@get("{file_path:path}", name=name)
| diff --git a/tests/unit/test_static_files/test_create_static_router.py b/tests/unit/test_static_files/test_create_static_router.py
--- a/tests/unit/test_static_files/test_create_static_router.py
+++ b/tests/unit/test_static_files/test_create_static_router.py
@@ -1,4 +1,7 @@
-from typing import Any
+from pathlib import Path
+from typing import Any, Optional
+
+import pytest
from litestar import Litestar, Request, Response, Router
from litestar.connection import ASGIConnection
@@ -6,6 +9,8 @@
from litestar.exceptions import ValidationException
from litestar.handlers import BaseRouteHandler
from litestar.static_files import create_static_files_router
+from litestar.status_codes import HTTP_200_OK
+from litestar.testing.helpers import create_test_client
def test_route_reverse() -> None:
@@ -71,3 +76,21 @@ class MyRouter(Router):
router = create_static_files_router("/", directories=["some"], router_class=MyRouter)
assert isinstance(router, MyRouter)
+
+
[email protected]("cache_control", (None, CacheControlHeader(max_age=3600)))
+def test_cache_control(tmp_path: Path, cache_control: Optional[CacheControlHeader]) -> None:
+ static_dir = tmp_path / "foo"
+ static_dir.mkdir()
+ static_dir.joinpath("test.txt").write_text("hello")
+
+ router = create_static_files_router("/static", [static_dir], name="static", cache_control=cache_control)
+
+ with create_test_client([router]) as client:
+ response = client.get("static/test.txt")
+
+ assert response.status_code == HTTP_200_OK
+ if cache_control is not None:
+ assert response.headers["cache-control"] == cache_control.to_header()
+ else:
+ assert "cache-control" not in response.headers
| Bug: CacheControlHeader not applying on create_static_files_router
### Description
When applying a `CacheControlHeader` to the [static files router](https://docs.litestar.dev/latest/reference/static_files.html#litestar.static_files.create_static_files_router), the cache headers are not applied to requests served from this router.
`CacheControlHeader` works correctly on regular routers.
### URL to code causing the issue
NA
### MCVE
```python
from litestar import Litestar, Router, get
from litestar.response import File
from litestar.static_files import create_static_files_router
from litestar.datastructures.headers import CacheControlHeader
import uvicorn
def create_static_router() -> Router:
return create_static_files_router(
path="/static",
directories=["./static"],
name="static",
cache_control=CacheControlHeader(max_age=3600)
)
def create_direct_route_handler() -> Router:
@get("/example.txt")
async def example_txt() -> File:
return File("static/example.txt", content_disposition_type="inline", media_type="text/plain", filename="example.txt")
return Router(
path="/static",
cache_control=CacheControlHeader(max_age=3600),
route_handlers=[example_txt],
)
app = Litestar(route_handlers=[create_static_router()]) # <- this does not work as expected
#app = Litestar(route_handlers=[create_direct_route_handler()]) # <- this works as expected
if __name__ == "__main__":
uvicorn.run(
app,
)
# Expects a "static" folder in the same directory as this file and an example.txt file in the "static" folder
```
### Steps to reproduce
```bash
1. Put the MVCE beside a folder called static with example.txt inside it
2. Run the server and make a request to /static/example.txt
3. Observe that there is no cache header applied to the request
4. Change the code to the create_direct_route_handler() version and repeat
5. Observe the cache header applied as expected
```
### Screenshots
```bash
NA
```
### Logs
_No response_
### Litestar Version
2.6.1
### Platform
- [ ] Linux
- [ ] Mac
- [X] Windows
- [ ] Other (Please specify in the description above)
<!-- POLAR PLEDGE BADGE START -->
---
> [!NOTE]
> While we are open for sponsoring on [GitHub Sponsors](https://github.com/sponsors/litestar-org/) and
> [OpenCollective](https://opencollective.com/litestar), we also utilize [Polar.sh](https://polar.sh/) to engage in pledge-based sponsorship.
>
> Check out all issues funded or available for funding [on our Polar.sh dashboard](https://polar.sh/litestar-org)
> * If you would like to see an issue prioritized, make a pledge towards it!
> * We receive the pledge once the issue is completed & verified
> * This, along with engagement in the community, helps us know which features are a priority to our users.
<a href="https://polar.sh/litestar-org/litestar/issues/3129">
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://polar.sh/api/github/litestar-org/litestar/issues/3129/pledge.svg?darkmode=1">
<img alt="Fund with Polar" src="https://polar.sh/api/github/litestar-org/litestar/issues/3129/pledge.svg">
</picture>
</a>
<!-- POLAR PLEDGE BADGE END -->
| 2024-02-23T02:45:24 |
|
litestar-org/litestar | 3,136 | litestar-org__litestar-3136 | [
"3134"
] | 23ec59032f1dae3e2951d1c289478409b9478371 | diff --git a/litestar/dto/_backend.py b/litestar/dto/_backend.py
--- a/litestar/dto/_backend.py
+++ b/litestar/dto/_backend.py
@@ -406,6 +406,8 @@ def _create_transfer_type(
if nested_depth == self.dto_factory.config.max_nested_depth:
raise RecursionError
+ unique_name = f"{unique_name}{field_definition.raw.__name__}"
+
nested_field_definitions = self.parse_model(
model_type=field_definition.annotation,
exclude=exclude,
| diff --git a/tests/unit/test_dto/test_factory/test_backends/test_backends.py b/tests/unit/test_dto/test_factory/test_backends/test_backends.py
--- a/tests/unit/test_dto/test_factory/test_backends/test_backends.py
+++ b/tests/unit/test_dto/test_factory/test_backends/test_backends.py
@@ -196,6 +196,7 @@ def handler(data: DC) -> DC:
schemas = creator.schema_registry.generate_components_schemas()
assert isinstance(ref, Reference)
schema = schemas[ref.value]
+ assert schema.title == "HandlerDCResponseBody"
assert schema.properties is not None
a, b, c = schema.properties["a"], schema.properties["b"], schema.properties["c"]
assert isinstance(a, Schema)
@@ -208,6 +209,7 @@ def handler(data: DC) -> DC:
assert c.items.type == "integer"
assert isinstance(nested := schema.properties["nested"], Reference) # noqa: RUF018
nested_schema = schemas[nested.value]
+ assert nested_schema.title == "HandlerDCNestedDCResponseBody"
assert nested_schema.properties is not None
nested_a, nested_b = nested_schema.properties["a"], nested_schema.properties["b"]
assert isinstance(nested_a, Schema)
| Bug: Duplicate Response Schema name when using ManyToOne relationships
### Description
I am creating log file models that have a relationship to a user model.
The models are similar to the following.
```py
class User(UUIDBase):
name: Mapped[str]
class Logfile(UUIDAuditBase):
name: Mapped[str]
user_id: Mapped[UUID] = mapped_column(ForeignKey("user.id"))
user: Mapped[User] = relationship(lazy="joined", innerjoin=True, viewonly=True)
class LogfileDTO(SQLAlchemyDTO[Logfile]):
config = SQLAlchemyDTOConfig()
@get(path="/logs", dto=LogfileDTO)
async def get_logs(db_session: "AsyncSession") -> list[Logfile]:
return list(await db_session.scalars(select(Logfile)))
```
The expected outcome would be two response body schemas, with different names, the Logfile and the nested User.
Actual outcome is two schemas with the same name are generated, so one gets the long format name.
From Swagger of the MCVE, showing the two ResponseBody schemas

I thought it was an issue with SQLAlchemyDTO, but after simplifying the MCVE further, it presents itself with even DataclassDTO.
### URL to code causing the issue
_No response_
### MCVE
```python
from dataclasses import dataclass
from litestar import Litestar, get
from litestar.dto import DataclassDTO, DTOConfig
@dataclass
class User:
name: str
logs: list["Logfile"]
@dataclass
class Logfile:
name: str
user: User
class UserDTO(DataclassDTO[User]):
config = DTOConfig()
class LogfileDTO(DataclassDTO[Logfile]):
config = DTOConfig()
@get(path="/logs", dto=LogfileDTO)
async def get_logs() -> list[Logfile]:
user = User(name="test", logs=[])
logfiles = [Logfile(name="test", user=user)]
user.logs = logfiles
return logfiles
@get(path="/users", dto=UserDTO)
async def get_users() -> list[User]:
user = User(name="test", logs=[])
logfiles = [Logfile(name="test", user=user)]
user.logs = logfiles
return [user]
app = Litestar(
route_handlers=[get_logs, get_users],
)
```
### Steps to reproduce
```bash
1. Run the MCVE
2. Go to the Swagger interface ex. http://127.0.0.1:8000/schema/swagger/
3. Scroll to the bottom
4. See error
```
### Screenshots
```bash
"
```
### Logs
_No response_
### Litestar Version
2.6.1
### Platform
- [X] Linux
- [ ] Mac
- [ ] Windows
- [ ] Other (Please specify in the description above)
<!-- POLAR PLEDGE BADGE START -->
---
> [!NOTE]
> While we are open for sponsoring on [GitHub Sponsors](https://github.com/sponsors/litestar-org/) and
> [OpenCollective](https://opencollective.com/litestar), we also utilize [Polar.sh](https://polar.sh/) to engage in pledge-based sponsorship.
>
> Check out all issues funded or available for funding [on our Polar.sh dashboard](https://polar.sh/litestar-org)
> * If you would like to see an issue prioritized, make a pledge towards it!
> * We receive the pledge once the issue is completed & verified
> * This, along with engagement in the community, helps us know which features are a priority to our users.
<a href="https://polar.sh/litestar-org/litestar/issues/3134">
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://polar.sh/api/github/litestar-org/litestar/issues/3134/pledge.svg?darkmode=1">
<img alt="Fund with Polar" src="https://polar.sh/api/github/litestar-org/litestar/issues/3134/pledge.svg">
</picture>
</a>
<!-- POLAR PLEDGE BADGE END -->
| 2024-02-26T03:48:54 |
|
litestar-org/litestar | 3,149 | litestar-org__litestar-3149 | [
"3148"
] | fe7214378553a88701222e4895746fe15f6844ab | diff --git a/litestar/contrib/pydantic/pydantic_schema_plugin.py b/litestar/contrib/pydantic/pydantic_schema_plugin.py
--- a/litestar/contrib/pydantic/pydantic_schema_plugin.py
+++ b/litestar/contrib/pydantic/pydantic_schema_plugin.py
@@ -142,6 +142,8 @@
if pydantic_v2 is not None: # pragma: no cover
PYDANTIC_TYPE_MAP.update(
{
+ pydantic_v2.SecretStr: Schema(type=OpenAPIType.STRING),
+ pydantic_v2.SecretBytes: Schema(type=OpenAPIType.STRING),
pydantic_v2.ByteSize: Schema(type=OpenAPIType.INTEGER),
pydantic_v2.EmailStr: Schema(type=OpenAPIType.STRING, format=OpenAPIFormat.EMAIL),
pydantic_v2.IPvAnyAddress: Schema(
| diff --git a/tests/unit/test_contrib/test_pydantic/test_schema_plugin.py b/tests/unit/test_contrib/test_pydantic/test_schema_plugin.py
--- a/tests/unit/test_contrib/test_pydantic/test_schema_plugin.py
+++ b/tests/unit/test_contrib/test_pydantic/test_schema_plugin.py
@@ -8,6 +8,7 @@
from pydantic.v1.generics import GenericModel
from typing_extensions import Annotated
+from litestar._openapi.schema_generation import SchemaCreator
from litestar.contrib.pydantic.pydantic_schema_plugin import PydanticSchemaPlugin
from litestar.openapi.spec import OpenAPIType
from litestar.openapi.spec.schema import Schema
@@ -65,3 +66,17 @@ def test_schema_generation_with_generic_classes(model: Type[Union[PydanticV1Gene
)
def test_is_pydantic_constrained_field(constrained: Any) -> None:
PydanticSchemaPlugin.is_constrained_field(FieldDefinition.from_annotation(constrained))
+
+
+def test_v2_constrained_secrets() -> None:
+ # https://github.com/litestar-org/litestar/issues/3148
+ class Model(pydantic_v2.BaseModel):
+ string: pydantic_v2.SecretStr = pydantic_v2.Field(min_length=1)
+ bytes_: pydantic_v2.SecretBytes = pydantic_v2.Field(min_length=1)
+
+ schema = PydanticSchemaPlugin.for_pydantic_model(
+ FieldDefinition.from_annotation(Model), schema_creator=SchemaCreator(plugins=[PydanticSchemaPlugin()])
+ )
+ assert schema.properties
+ assert schema.properties["string"] == Schema(min_length=1, type=OpenAPIType.STRING)
+ assert schema.properties["bytes_"] == Schema(min_length=1, type=OpenAPIType.STRING)
| Bug: OpenAPI schema error when using `pydantic.SecretStr` with a `pydantic.Field`
Reported by seels in Discord: <#1212255742954971207>:
Hey, all. I'm getting the following error when using a `SecretStr` attribute (from Pydantic) and accessing `/schema/openapi.json`: `type object 'SecretStr' has no attribute 'item_type'`. I've got `SecretStr: str` in my `type_encoders`, but that doesn't seem to help.
<!-- POLAR PLEDGE BADGE START -->
---
> [!NOTE]
> While we are open for sponsoring on [GitHub Sponsors](https://github.com/sponsors/litestar-org/) and
> [OpenCollective](https://opencollective.com/litestar), we also utilize [Polar.sh](https://polar.sh/) to engage in pledge-based sponsorship.
>
> Check out all issues funded or available for funding [on our Polar.sh dashboard](https://polar.sh/litestar-org)
> * If you would like to see an issue prioritized, make a pledge towards it!
> * We receive the pledge once the issue is completed & verified
> * This, along with engagement in the community, helps us know which features are a priority to our users.
<a href="https://polar.sh/litestar-org/litestar/issues/3148">
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://polar.sh/api/github/litestar-org/litestar/issues/3148/pledge.svg?darkmode=1">
<img alt="Fund with Polar" src="https://polar.sh/api/github/litestar-org/litestar/issues/3148/pledge.svg">
</picture>
</a>
<!-- POLAR PLEDGE BADGE END -->
| MCVE:
```python
from litestar import post
from pydantic import BaseModel, Field, SecretStr
class Something(BaseModel):
password: SecretStr = Field(min_length=8)
@post()
async def something(data: Something) -> Something:
return data
from litestar.testing import create_test_client
with create_test_client([something]) as client:
print(client.get('schema/openapi.json').json())
```
This only happens when `password` is set to a `Field`. It works fine when it's just `password: SecretStr`. | 2024-03-01T18:11:02 |
litestar-org/litestar | 3,151 | litestar-org__litestar-3151 | [
"3150"
] | 1332983e09e13571c9a7490540a21f5cf8121f77 | diff --git a/litestar/contrib/pydantic/pydantic_schema_plugin.py b/litestar/contrib/pydantic/pydantic_schema_plugin.py
--- a/litestar/contrib/pydantic/pydantic_schema_plugin.py
+++ b/litestar/contrib/pydantic/pydantic_schema_plugin.py
@@ -1,6 +1,6 @@
from __future__ import annotations
-from typing import TYPE_CHECKING, Any
+from typing import TYPE_CHECKING, Any, Optional
from typing_extensions import Annotated
@@ -11,14 +11,15 @@
is_pydantic_constrained_field,
is_pydantic_model_class,
is_pydantic_undefined,
- pydantic_get_unwrapped_annotation_and_type_hints,
+ pydantic_get_type_hints_with_generics_resolved,
+ pydantic_unwrap_and_get_origin,
)
from litestar.exceptions import MissingDependencyException
from litestar.openapi.spec import Example, OpenAPIFormat, OpenAPIType, Schema
from litestar.plugins import OpenAPISchemaPlugin
from litestar.types import Empty
from litestar.typing import FieldDefinition
-from litestar.utils import is_class_and_subclass
+from litestar.utils import is_class_and_subclass, is_generic
try:
# check if we have pydantic v2 installed, and try to import both versions
@@ -249,17 +250,22 @@ def for_pydantic_model(cls, field_definition: FieldDefinition, schema_creator: S
"""
annotation = field_definition.annotation
- unwrapped_annotation, annotation_hints = pydantic_get_unwrapped_annotation_and_type_hints(annotation)
+ if is_generic(annotation):
+ is_generic_model = True
+ model = pydantic_unwrap_and_get_origin(annotation) or annotation
+ else:
+ is_generic_model = False
+ model = annotation
- if is_pydantic_2_model(annotation):
- model_config = annotation.model_config
- model_field_info = unwrapped_annotation.model_fields
+ if is_pydantic_2_model(model):
+ model_config = model.model_config
+ model_field_info = model.model_fields
title = model_config.get("title")
example = model_config.get("example")
is_v2_model = True
else:
model_config = annotation.__config__
- model_field_info = unwrapped_annotation.__fields__
+ model_field_info = model.__fields__
title = getattr(model_config, "title", None)
example = getattr(model_config, "example", None)
is_v2_model = False
@@ -268,15 +274,34 @@ def for_pydantic_model(cls, field_definition: FieldDefinition, schema_creator: S
k: getattr(f, "field_info", f) for k, f in model_field_info.items()
}
+ if is_v2_model:
+ # extract the annotations from the FieldInfo. This allows us to skip fields
+ # which have been marked as private
+ model_annotations = {k: field_info.annotation for k, field_info in model_fields.items()} # type: ignore[union-attr]
+
+ else:
+ # pydantic v1 requires some workarounds here
+ model_annotations = {
+ k: f.outer_type_ if f.required else Optional[f.outer_type_] for k, f in model.__fields__.items()
+ }
+
+ if is_generic_model:
+ # if the model is generic, resolve the type variables. We pass in the
+ # already extracted annotations, to keep the logic of respecting private
+ # fields consistent with the above
+ model_annotations = pydantic_get_type_hints_with_generics_resolved(
+ annotation, model_annotations=model_annotations, include_extras=True
+ )
+
property_fields = {
- f.alias if f.alias and schema_creator.prefer_alias else k: FieldDefinition.from_kwarg(
- annotation=Annotated[annotation_hints[k], f, f.metadata] # type: ignore[union-attr]
+ field_info.alias if field_info.alias and schema_creator.prefer_alias else k: FieldDefinition.from_kwarg(
+ annotation=Annotated[model_annotations[k], field_info, field_info.metadata] # type: ignore[union-attr]
if is_v2_model
- else Annotated[annotation_hints[k], f], # pyright: ignore
- name=f.alias if f.alias and schema_creator.prefer_alias else k,
- default=Empty if schema_creator.is_undefined(f.default) else f.default,
+ else Annotated[model_annotations[k], field_info], # pyright: ignore
+ name=field_info.alias if field_info.alias and schema_creator.prefer_alias else k,
+ default=Empty if schema_creator.is_undefined(field_info.default) else field_info.default,
)
- for k, f in model_fields.items()
+ for k, field_info in model_fields.items()
}
computed_field_definitions = create_field_definitions_for_computed_fields(
diff --git a/litestar/contrib/pydantic/utils.py b/litestar/contrib/pydantic/utils.py
--- a/litestar/contrib/pydantic/utils.py
+++ b/litestar/contrib/pydantic/utils.py
@@ -8,7 +8,7 @@
from litestar.params import KwargDefinition
from litestar.types import Empty
from litestar.typing import FieldDefinition
-from litestar.utils import is_class_and_subclass
+from litestar.utils import deprecated, is_class_and_subclass
from litestar.utils.predicates import is_generic
from litestar.utils.typing import (
_substitute_typevars,
@@ -129,24 +129,32 @@ def pydantic_get_type_hints_with_generics_resolved(
globalns: dict[str, Any] | None = None,
localns: dict[str, Any] | None = None,
include_extras: bool = False,
+ model_annotations: dict[str, Any] | None = None,
) -> dict[str, Any]:
if pydantic_v2 is Empty or (pydantic_v1 is not Empty and is_class_and_subclass(annotation, pydantic_v1.BaseModel)):
return get_type_hints_with_generics_resolved(annotation)
origin = pydantic_unwrap_and_get_origin(annotation)
if origin is None:
- type_hints = get_type_hints(annotation, globalns=globalns, localns=localns, include_extras=include_extras)
+ if model_annotations is None: # pragma: no cover
+ model_annotations = get_type_hints(
+ annotation, globalns=globalns, localns=localns, include_extras=include_extras
+ )
typevar_map = {p: p for p in annotation.__pydantic_generic_metadata__["parameters"]}
else:
- type_hints = get_type_hints(origin, globalns=globalns, localns=localns, include_extras=include_extras)
+ if model_annotations is None:
+ model_annotations = get_type_hints(
+ origin, globalns=globalns, localns=localns, include_extras=include_extras
+ )
args = annotation.__pydantic_generic_metadata__["args"]
parameters = origin.__pydantic_generic_metadata__["parameters"]
typevar_map = dict(zip(parameters, args))
- return {n: _substitute_typevars(type_, typevar_map) for n, type_ in type_hints.items()}
+ return {n: _substitute_typevars(type_, typevar_map) for n, type_ in model_annotations.items()}
-def pydantic_get_unwrapped_annotation_and_type_hints(annotation: Any) -> tuple[Any, dict[str, Any]]:
+@deprecated(version="2.6.2")
+def pydantic_get_unwrapped_annotation_and_type_hints(annotation: Any) -> tuple[Any, dict[str, Any]]: # pragma: pver
"""Get the unwrapped annotation and the type hints after resolving generics.
Args:
| diff --git a/tests/unit/test_contrib/test_pydantic/models.py b/tests/unit/test_contrib/test_pydantic/models.py
--- a/tests/unit/test_contrib/test_pydantic/models.py
+++ b/tests/unit/test_contrib/test_pydantic/models.py
@@ -1,4 +1,4 @@
-from typing import Dict, List, Optional
+from typing import Dict, List, Optional, Union
from pydantic import BaseModel
from pydantic.dataclasses import dataclass as pydantic_dataclass
@@ -15,6 +15,7 @@ class PydanticDataclassPerson:
id: str
optional: Optional[str]
complex: Dict[str, List[Dict[str, str]]]
+ union: Union[int, List[str]]
pets: Optional[List[DataclassPet]] = None
@@ -24,6 +25,7 @@ class PydanticPerson(BaseModel):
id: str
optional: Optional[str]
complex: Dict[str, List[Dict[str, str]]]
+ union: Union[int, List[str]]
pets: Optional[List[DataclassPet]] = None
@@ -33,6 +35,7 @@ class PydanticV1Person(BaseModelV1):
id: str
optional: Optional[str]
complex: Dict[str, List[Dict[str, str]]]
+ union: Union[int, List[str]]
pets: Optional[List[DataclassPet]] = None
@@ -43,4 +46,5 @@ class PydanticV1DataclassPerson:
id: str
optional: Optional[str]
complex: Dict[str, List[Dict[str, str]]]
+ union: Union[int, List[str]]
pets: Optional[List[DataclassPet]] = None
diff --git a/tests/unit/test_contrib/test_pydantic/test_integration.py b/tests/unit/test_contrib/test_pydantic/test_integration.py
--- a/tests/unit/test_contrib/test_pydantic/test_integration.py
+++ b/tests/unit/test_contrib/test_pydantic/test_integration.py
@@ -98,7 +98,7 @@ def my_route_handler(param: int, data: PydanticPerson) -> None:
response = client.post("/123", json={"first_name": "moishe"})
extra = response.json().get("extra")
assert extra is not None
- assert len(extra) == 4
+ assert len(extra) == 5
def test_default_error_handling_v1() -> None:
@@ -110,7 +110,7 @@ def my_route_handler(param: int, data: PydanticV1Person) -> None:
response = client.post("/123", json={"first_name": "moishe"})
extra = response.json().get("extra")
assert extra is not None
- assert len(extra) == 3
+ assert len(extra) == 4
def test_signature_model_invalid_input(
@@ -172,3 +172,35 @@ def test(
"key": "other_child.val.1",
},
]
+
+
+class V1ModelWithPrivateFields(pydantic_v1.BaseModel):
+ class Config:
+ underscore_fields_are_private = True
+
+ _field: str = pydantic_v1.PrivateAttr()
+ # include an invalid annotation here to ensure we never touch those fields
+ _underscore_field: "foo" # type: ignore[name-defined] # noqa: F821
+ bar: str
+
+
+class V2ModelWithPrivateFields(pydantic_v2.BaseModel):
+ class Config:
+ underscore_fields_are_private = True
+
+ _field: str = pydantic_v2.PrivateAttr()
+ # include an invalid annotation here to ensure we never touch those fields
+ _underscore_field: "foo" # type: ignore[name-defined] # noqa: F821
+ bar: str
+
+
[email protected]("model_type", [V1ModelWithPrivateFields, V2ModelWithPrivateFields])
+def test_private_fields(model_type: Type[Union[pydantic_v1.BaseModel, pydantic_v2.BaseModel]]) -> None:
+ @post("/")
+ async def handler(data: V2ModelWithPrivateFields) -> V2ModelWithPrivateFields:
+ return data
+
+ with create_test_client([handler]) as client:
+ res = client.post("/", json={"bar": "value"})
+ assert res.status_code == 201
+ assert res.json() == {"bar": "value"}
diff --git a/tests/unit/test_contrib/test_pydantic/test_openapi.py b/tests/unit/test_contrib/test_pydantic/test_openapi.py
--- a/tests/unit/test_contrib/test_pydantic/test_openapi.py
+++ b/tests/unit/test_contrib/test_pydantic/test_openapi.py
@@ -346,6 +346,7 @@ def handler(data: cls) -> cls:
"items": {"type": "object", "additionalProperties": {"type": "string"}},
},
},
+ "union": {"oneOf": [{"type": "integer"}, {"items": {"type": "string"}, "type": "array"}]},
"pets": {
"oneOf": [
{"type": "null"},
@@ -357,7 +358,7 @@ def handler(data: cls) -> cls:
},
},
"type": "object",
- "required": ["complex", "first_name", "id", "last_name"],
+ "required": ["complex", "first_name", "id", "last_name", "union"],
"title": f"{cls.__name__}",
}
@@ -561,7 +562,7 @@ def test_create_schema_for_pydantic_model_with_annotated_model_attribute(
f"""
{'from __future__ import annotations' if with_future_annotations else ''}
from typing_extensions import Annotated
-{'from pydantic import BaseModel' if pydantic_version == 'v1' else 'from pydantic.v1 import BaseModel'}
+{'from pydantic import BaseModel' if pydantic_version == 'v2' else 'from pydantic.v1 import BaseModel'}
class Foo(BaseModel):
foo: Annotated[int, "Foo description"]
diff --git a/tests/unit/test_contrib/test_pydantic/test_schema_plugin.py b/tests/unit/test_contrib/test_pydantic/test_schema_plugin.py
--- a/tests/unit/test_contrib/test_pydantic/test_schema_plugin.py
+++ b/tests/unit/test_contrib/test_pydantic/test_schema_plugin.py
@@ -80,3 +80,30 @@ class Model(pydantic_v2.BaseModel):
assert schema.properties
assert schema.properties["string"] == Schema(min_length=1, type=OpenAPIType.STRING)
assert schema.properties["bytes_"] == Schema(min_length=1, type=OpenAPIType.STRING)
+
+
+class V1ModelWithPrivateFields(pydantic_v1.BaseModel):
+ class Config:
+ underscore_fields_are_private = True
+
+ _field: str = pydantic_v1.PrivateAttr()
+ # include an invalid annotation here to ensure we never touch those fields
+ _underscore_field: "foo" # type: ignore[name-defined] # noqa: F821
+
+
+class V2ModelWithPrivateFields(pydantic_v2.BaseModel):
+ class Config:
+ underscore_fields_are_private = True
+
+ _field: str = pydantic_v2.PrivateAttr()
+ # include an invalid annotation here to ensure we never touch those fields
+ _underscore_field: "foo" # type: ignore[name-defined] # noqa: F821
+
+
[email protected]("model_class", [V1ModelWithPrivateFields, V2ModelWithPrivateFields])
+def test_exclude_private_fields(model_class: Type[Union[pydantic_v1.BaseModel, pydantic_v2.BaseModel]]) -> None:
+ # https://github.com/litestar-org/litestar/issues/3150
+ schema = PydanticSchemaPlugin.for_pydantic_model(
+ FieldDefinition.from_annotation(model_class), schema_creator=SchemaCreator(plugins=[PydanticSchemaPlugin()])
+ )
+ assert not schema.properties
| Bug: pydantic PrivateAttr fields are inspected for type hints
### Description
If a private field is annotated with unresolvable ForwardRef, openapi schema build fails (see example)
### URL to code causing the issue
_No response_
### MCVE
```python
from typing import TYPE_CHECKING
from litestar import Litestar, post
from litestar._openapi.plugin import OpenAPIPlugin
from pydantic import BaseModel
if TYPE_CHECKING:
from typing import Any
class Model(BaseModel):
class Config:
underscore_attrs_are_private = True
_value: "Any"
@post("/")
def hello_world(data: Model) -> dict[str, str]:
"""Keeping the tradition alive with hello world."""
return {"hello": "world"}
def main():
app = Litestar(route_handlers=[hello_world], )
app.plugins.get(OpenAPIPlugin).provide_openapi()
if __name__ == '__main__':
main()
```
### Steps to reproduce
```bash
run the example
```
### Screenshots
```bash
no screenshots
```
### Logs
```bash
.../litestar_schema/app.py:21: LitestarWarning: Use of a synchronous callable <function hello_world at 0x102eafeb0> without setting sync_to_thread is discouraged since synchronous callables can block the main thread if they perform blocking operations. If the callable is guaranteed to be non-blocking, you can set sync_to_thread=False to skip this warning, or set the environmentvariable LITESTAR_WARN_IMPLICIT_SYNC_TO_THREAD=0 to disable warnings of this type entirely.
def hello_world(data: Model) -> dict[str, str]:
Traceback (most recent call last):
File ".../litestar_schema/app.py", line 33, in <module>
main()
File ".../litestar_schema/app.py", line 29, in main
app.plugins.get(OpenAPIPlugin).provide_openapi()
File "/Users/mike0sv/mambaforge/envs/evidently/lib/python3.10/site-packages/litestar/_openapi/plugin.py", line 46, in provide_openapi
self._openapi_schema = self._build_openapi_schema()
File "/Users/mike0sv/mambaforge/envs/evidently/lib/python3.10/site-packages/litestar/_openapi/plugin.py", line 37, in _build_openapi_schema
openapi.paths = {
File "/Users/mike0sv/mambaforge/envs/evidently/lib/python3.10/site-packages/litestar/_openapi/plugin.py", line 38, in <dictcomp>
route.path_format or "/": create_path_item_for_route(context, route)
File "/Users/mike0sv/mambaforge/envs/evidently/lib/python3.10/site-packages/litestar/_openapi/path_item.py", line 137, in create_path_item_for_route
return path_item_factory.create_path_item()
File "/Users/mike0sv/mambaforge/envs/evidently/lib/python3.10/site-packages/litestar/_openapi/path_item.py", line 42, in create_path_item
operation = self.create_operation_for_handler_method(route_handler, HttpMethod(http_method))
File "/Users/mike0sv/mambaforge/envs/evidently/lib/python3.10/site-packages/litestar/_openapi/path_item.py", line 66, in create_operation_for_handler_method
request_body = create_request_body(
File "/Users/mike0sv/mambaforge/envs/evidently/lib/python3.10/site-packages/litestar/_openapi/request_body.py", line 49, in create_request_body
schema = schema_creator.for_field_definition(data_field)
File "/Users/mike0sv/mambaforge/envs/evidently/lib/python3.10/site-packages/litestar/_openapi/schema_generation/schema.py", line 327, in for_field_definition
result = self.for_plugin(field_definition, plugin_for_annotation)
File "/Users/mike0sv/mambaforge/envs/evidently/lib/python3.10/site-packages/litestar/_openapi/schema_generation/schema.py", line 487, in for_plugin
schema = plugin.to_openapi_schema(field_definition=field_definition, schema_creator=self)
File "/Users/mike0sv/mambaforge/envs/evidently/lib/python3.10/site-packages/litestar/contrib/pydantic/pydantic_schema_plugin.py", line 234, in to_openapi_schema
return self.for_pydantic_model(field_definition=field_definition, schema_creator=schema_creator)
File "/Users/mike0sv/mambaforge/envs/evidently/lib/python3.10/site-packages/litestar/contrib/pydantic/pydantic_schema_plugin.py", line 250, in for_pydantic_model
unwrapped_annotation, annotation_hints = pydantic_get_unwrapped_annotation_and_type_hints(annotation)
File "/Users/mike0sv/mambaforge/envs/evidently/lib/python3.10/site-packages/litestar/contrib/pydantic/utils.py", line 162, in pydantic_get_unwrapped_annotation_and_type_hints
return annotation, get_type_hints(annotation, include_extras=True)
File "/Users/mike0sv/mambaforge/envs/evidently/lib/python3.10/site-packages/typing_extensions.py", line 1104, in get_type_hints
hint = typing.get_type_hints(
File "/Users/mike0sv/mambaforge/envs/evidently/lib/python3.10/typing.py", line 1836, in get_type_hints
value = _eval_type(value, base_globals, base_locals)
File "/Users/mike0sv/mambaforge/envs/evidently/lib/python3.10/typing.py", line 327, in _eval_type
return t._evaluate(globalns, localns, recursive_guard)
File "/Users/mike0sv/mambaforge/envs/evidently/lib/python3.10/typing.py", line 695, in _evaluate
eval(self.__forward_code__, globalns, localns),
File "<string>", line 1, in <module>
NameError: name 'Any' is not defined. Did you mean: 'any'?
```
### Litestar Version
litestar==2.5.1
### Platform
- [ ] Linux
- [X] Mac
- [ ] Windows
- [ ] Other (Please specify in the description above)
<!-- POLAR PLEDGE BADGE START -->
---
> [!NOTE]
> While we are open for sponsoring on [GitHub Sponsors](https://github.com/sponsors/litestar-org/) and
> [OpenCollective](https://opencollective.com/litestar), we also utilize [Polar.sh](https://polar.sh/) to engage in pledge-based sponsorship.
>
> Check out all issues funded or available for funding [on our Polar.sh dashboard](https://polar.sh/litestar-org)
> * If you would like to see an issue prioritized, make a pledge towards it!
> * We receive the pledge once the issue is completed & verified
> * This, along with engagement in the community, helps us know which features are a priority to our users.
<a href="https://polar.sh/litestar-org/litestar/issues/3150">
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://polar.sh/api/github/litestar-org/litestar/issues/3150/pledge.svg?darkmode=1">
<img alt="Fund with Polar" src="https://polar.sh/api/github/litestar-org/litestar/issues/3150/pledge.svg">
</picture>
</a>
<!-- POLAR PLEDGE BADGE END -->
| ~~Okay so I think the issue here is that we don't respect the `Config.underscore_attrs_are_private`. IMO we simply shouldn't touch the underscore fields at all if this is set. @litestar-org/maintainers?~~
We actually do. This is a different issue.
Issue here is that while we do exclude those fields from the schema, we still try to extract their type annotations. | 2024-03-02T10:39:01 |
litestar-org/litestar | 3,161 | litestar-org__litestar-3161 | [
"3150"
] | c02cc2d9b99abdf5afeb85d88ca159da091fae7a | diff --git a/litestar/contrib/pydantic/utils.py b/litestar/contrib/pydantic/utils.py
--- a/litestar/contrib/pydantic/utils.py
+++ b/litestar/contrib/pydantic/utils.py
@@ -132,7 +132,7 @@ def pydantic_get_type_hints_with_generics_resolved(
model_annotations: dict[str, Any] | None = None,
) -> dict[str, Any]:
if pydantic_v2 is Empty or (pydantic_v1 is not Empty and is_class_and_subclass(annotation, pydantic_v1.BaseModel)):
- return get_type_hints_with_generics_resolved(annotation)
+ return get_type_hints_with_generics_resolved(annotation, type_hints=model_annotations)
origin = pydantic_unwrap_and_get_origin(annotation)
if origin is None:
diff --git a/litestar/utils/typing.py b/litestar/utils/typing.py
--- a/litestar/utils/typing.py
+++ b/litestar/utils/typing.py
@@ -235,6 +235,7 @@ def get_type_hints_with_generics_resolved(
globalns: dict[str, Any] | None = None,
localns: dict[str, Any] | None = None,
include_extras: bool = False,
+ type_hints: dict[str, Any] | None = None,
) -> dict[str, Any]:
"""Get the type hints for the given object after resolving the generic types as much as possible.
@@ -243,15 +244,18 @@ def get_type_hints_with_generics_resolved(
globalns: The global namespace.
localns: The local namespace.
include_extras: A flag indicating whether to include the ``Annotated[T, ...]`` or not.
+ type_hints: Already resolved type hints
"""
origin = get_origin(annotation)
if origin is None:
# Implies the generic types have not been specified in the annotation
- type_hints = get_type_hints(annotation, globalns=globalns, localns=localns, include_extras=include_extras)
+ if type_hints is None: # pragma: no cover
+ type_hints = get_type_hints(annotation, globalns=globalns, localns=localns, include_extras=include_extras)
typevar_map = {p: p for p in annotation.__parameters__}
else:
- type_hints = get_type_hints(origin, globalns=globalns, localns=localns, include_extras=include_extras)
+ if type_hints is None: # pragma: no cover
+ type_hints = get_type_hints(origin, globalns=globalns, localns=localns, include_extras=include_extras)
# the __parameters__ is only available on the origin itself and not the annotation
typevar_map = dict(zip(origin.__parameters__, get_args(annotation)))
| diff --git a/tests/unit/test_contrib/test_pydantic/test_schema_plugin.py b/tests/unit/test_contrib/test_pydantic/test_schema_plugin.py
--- a/tests/unit/test_contrib/test_pydantic/test_schema_plugin.py
+++ b/tests/unit/test_contrib/test_pydantic/test_schema_plugin.py
@@ -91,16 +91,36 @@ class Config:
_underscore_field: "foo" # type: ignore[name-defined] # noqa: F821
-class V2ModelWithPrivateFields(pydantic_v2.BaseModel):
+class V1GenericModelWithPrivateFields(pydantic_v1.generics.GenericModel, Generic[T]): # pyright: ignore
class Config:
underscore_fields_are_private = True
+ _field: str = pydantic_v1.PrivateAttr()
+ # include an invalid annotation here to ensure we never touch those fields
+ _underscore_field: "foo" # type: ignore[name-defined] # noqa: F821
+
+
+class V2ModelWithPrivateFields(pydantic_v2.BaseModel):
+ _field: str = pydantic_v2.PrivateAttr()
+ # include an invalid annotation here to ensure we never touch those fields
+ _underscore_field: "foo" # type: ignore[name-defined] # noqa: F821
+
+
+class V2GenericModelWithPrivateFields(pydantic_v2.BaseModel, Generic[T]):
_field: str = pydantic_v2.PrivateAttr()
# include an invalid annotation here to ensure we never touch those fields
_underscore_field: "foo" # type: ignore[name-defined] # noqa: F821
[email protected]("model_class", [V1ModelWithPrivateFields, V2ModelWithPrivateFields])
[email protected](
+ "model_class",
+ [
+ V1ModelWithPrivateFields,
+ V1GenericModelWithPrivateFields,
+ V2ModelWithPrivateFields,
+ V2GenericModelWithPrivateFields,
+ ],
+)
def test_exclude_private_fields(model_class: Type[Union[pydantic_v1.BaseModel, pydantic_v2.BaseModel]]) -> None:
# https://github.com/litestar-org/litestar/issues/3150
schema = PydanticSchemaPlugin.for_pydantic_model(
| Bug: pydantic PrivateAttr fields are inspected for type hints
### Description
If a private field is annotated with unresolvable ForwardRef, openapi schema build fails (see example)
### URL to code causing the issue
_No response_
### MCVE
```python
from typing import TYPE_CHECKING
from litestar import Litestar, post
from litestar._openapi.plugin import OpenAPIPlugin
from pydantic import BaseModel
if TYPE_CHECKING:
from typing import Any
class Model(BaseModel):
class Config:
underscore_attrs_are_private = True
_value: "Any"
@post("/")
def hello_world(data: Model) -> dict[str, str]:
"""Keeping the tradition alive with hello world."""
return {"hello": "world"}
def main():
app = Litestar(route_handlers=[hello_world], )
app.plugins.get(OpenAPIPlugin).provide_openapi()
if __name__ == '__main__':
main()
```
### Steps to reproduce
```bash
run the example
```
### Screenshots
```bash
no screenshots
```
### Logs
```bash
.../litestar_schema/app.py:21: LitestarWarning: Use of a synchronous callable <function hello_world at 0x102eafeb0> without setting sync_to_thread is discouraged since synchronous callables can block the main thread if they perform blocking operations. If the callable is guaranteed to be non-blocking, you can set sync_to_thread=False to skip this warning, or set the environmentvariable LITESTAR_WARN_IMPLICIT_SYNC_TO_THREAD=0 to disable warnings of this type entirely.
def hello_world(data: Model) -> dict[str, str]:
Traceback (most recent call last):
File ".../litestar_schema/app.py", line 33, in <module>
main()
File ".../litestar_schema/app.py", line 29, in main
app.plugins.get(OpenAPIPlugin).provide_openapi()
File "/Users/mike0sv/mambaforge/envs/evidently/lib/python3.10/site-packages/litestar/_openapi/plugin.py", line 46, in provide_openapi
self._openapi_schema = self._build_openapi_schema()
File "/Users/mike0sv/mambaforge/envs/evidently/lib/python3.10/site-packages/litestar/_openapi/plugin.py", line 37, in _build_openapi_schema
openapi.paths = {
File "/Users/mike0sv/mambaforge/envs/evidently/lib/python3.10/site-packages/litestar/_openapi/plugin.py", line 38, in <dictcomp>
route.path_format or "/": create_path_item_for_route(context, route)
File "/Users/mike0sv/mambaforge/envs/evidently/lib/python3.10/site-packages/litestar/_openapi/path_item.py", line 137, in create_path_item_for_route
return path_item_factory.create_path_item()
File "/Users/mike0sv/mambaforge/envs/evidently/lib/python3.10/site-packages/litestar/_openapi/path_item.py", line 42, in create_path_item
operation = self.create_operation_for_handler_method(route_handler, HttpMethod(http_method))
File "/Users/mike0sv/mambaforge/envs/evidently/lib/python3.10/site-packages/litestar/_openapi/path_item.py", line 66, in create_operation_for_handler_method
request_body = create_request_body(
File "/Users/mike0sv/mambaforge/envs/evidently/lib/python3.10/site-packages/litestar/_openapi/request_body.py", line 49, in create_request_body
schema = schema_creator.for_field_definition(data_field)
File "/Users/mike0sv/mambaforge/envs/evidently/lib/python3.10/site-packages/litestar/_openapi/schema_generation/schema.py", line 327, in for_field_definition
result = self.for_plugin(field_definition, plugin_for_annotation)
File "/Users/mike0sv/mambaforge/envs/evidently/lib/python3.10/site-packages/litestar/_openapi/schema_generation/schema.py", line 487, in for_plugin
schema = plugin.to_openapi_schema(field_definition=field_definition, schema_creator=self)
File "/Users/mike0sv/mambaforge/envs/evidently/lib/python3.10/site-packages/litestar/contrib/pydantic/pydantic_schema_plugin.py", line 234, in to_openapi_schema
return self.for_pydantic_model(field_definition=field_definition, schema_creator=schema_creator)
File "/Users/mike0sv/mambaforge/envs/evidently/lib/python3.10/site-packages/litestar/contrib/pydantic/pydantic_schema_plugin.py", line 250, in for_pydantic_model
unwrapped_annotation, annotation_hints = pydantic_get_unwrapped_annotation_and_type_hints(annotation)
File "/Users/mike0sv/mambaforge/envs/evidently/lib/python3.10/site-packages/litestar/contrib/pydantic/utils.py", line 162, in pydantic_get_unwrapped_annotation_and_type_hints
return annotation, get_type_hints(annotation, include_extras=True)
File "/Users/mike0sv/mambaforge/envs/evidently/lib/python3.10/site-packages/typing_extensions.py", line 1104, in get_type_hints
hint = typing.get_type_hints(
File "/Users/mike0sv/mambaforge/envs/evidently/lib/python3.10/typing.py", line 1836, in get_type_hints
value = _eval_type(value, base_globals, base_locals)
File "/Users/mike0sv/mambaforge/envs/evidently/lib/python3.10/typing.py", line 327, in _eval_type
return t._evaluate(globalns, localns, recursive_guard)
File "/Users/mike0sv/mambaforge/envs/evidently/lib/python3.10/typing.py", line 695, in _evaluate
eval(self.__forward_code__, globalns, localns),
File "<string>", line 1, in <module>
NameError: name 'Any' is not defined. Did you mean: 'any'?
```
### Litestar Version
litestar==2.5.1
### Platform
- [ ] Linux
- [X] Mac
- [ ] Windows
- [ ] Other (Please specify in the description above)
<!-- POLAR PLEDGE BADGE START -->
---
> [!NOTE]
> While we are open for sponsoring on [GitHub Sponsors](https://github.com/sponsors/litestar-org/) and
> [OpenCollective](https://opencollective.com/litestar), we also utilize [Polar.sh](https://polar.sh/) to engage in pledge-based sponsorship.
>
> Check out all issues funded or available for funding [on our Polar.sh dashboard](https://polar.sh/litestar-org)
> * If you would like to see an issue prioritized, make a pledge towards it!
> * We receive the pledge once the issue is completed & verified
> * This, along with engagement in the community, helps us know which features are a priority to our users.
<a href="https://polar.sh/litestar-org/litestar/issues/3150">
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://polar.sh/api/github/litestar-org/litestar/issues/3150/pledge.svg?darkmode=1">
<img alt="Fund with Polar" src="https://polar.sh/api/github/litestar-org/litestar/issues/3150/pledge.svg">
</picture>
</a>
<!-- POLAR PLEDGE BADGE END -->
| ~~Okay so I think the issue here is that we don't respect the `Config.underscore_attrs_are_private`. IMO we simply shouldn't touch the underscore fields at all if this is set. @litestar-org/maintainers?~~
We actually do. This is a different issue.
Issue here is that while we do exclude those fields from the schema, we still try to extract their type annotations.
@mike0sv There's a fix in #3151. Can you give it a try and confirm that it solve the issue for you?
<!--closing-comment-->
This issue has been closed in #3151. The change will be included in the upcoming patch release.
I tested it and it works for MCVE, but it still fails for evidently codebase. I narrowed it down to the fact that our "Model" class is actually a generic. Here is the new MCVE
```py
from typing import Generic, Optional, TYPE_CHECKING, TypeVar
from litestar import Litestar, post
from litestar._openapi.plugin import OpenAPIPlugin
from pydantic import BaseModel
if TYPE_CHECKING:
from typing import Any
T = TypeVar("T")
class Model(BaseModel, Generic[T]):
class Config:
underscore_attrs_are_private = True
_value: Optional["Any"]
@post("/")
def hello_world(data: Model) -> dict[str, str]:
"""Keeping the tradition alive with hello world."""
return {"hello": "world"}
def main():
app = Litestar(route_handlers=[hello_world], )
app.plugins.get(OpenAPIPlugin).provide_openapi()
if __name__ == '__main__':
main()
```
This seems to be exclusive to Pydantic V1 though; I couldn't reproduce with V2. | 2024-03-03T10:14:13 |
litestar-org/litestar | 3,172 | litestar-org__litestar-3172 | [
"3059"
] | 054ac9e135cbfc6704ff58828e2fb73b1f97624e | diff --git a/litestar/_openapi/datastructures.py b/litestar/_openapi/datastructures.py
--- a/litestar/_openapi/datastructures.py
+++ b/litestar/_openapi/datastructures.py
@@ -150,7 +150,8 @@ def generate_components_schemas(self) -> dict[str, Schema]:
self.set_reference_paths(name_, registered_schema)
components_schemas[name_] = registered_schema.schema
- return components_schemas
+ # Sort them by name to ensure they're always generated in the same order.
+ return {name: components_schemas[name] for name in sorted(components_schemas.keys())}
class OpenAPIContext:
| diff --git a/tests/unit/test_openapi/test_integration.py b/tests/unit/test_openapi/test_integration.py
--- a/tests/unit/test_openapi/test_integration.py
+++ b/tests/unit/test_openapi/test_integration.py
@@ -9,7 +9,7 @@
import yaml
from typing_extensions import Annotated
-from litestar import Controller, Litestar, get, post
+from litestar import Controller, Litestar, delete, get, patch, post
from litestar._openapi.plugin import OpenAPIPlugin
from litestar.app import DEFAULT_OPENAPI_CONFIG
from litestar.enums import MediaType, OpenAPIMediaType, ParamType
@@ -399,3 +399,47 @@ def get_handler(q: str) -> None:
assert openapi_one == openapi_two
else:
assert openapi_one != openapi_two
+
+
+def test_components_schemas_in_alphabetical_order() -> None:
+ # https://github.com/litestar-org/litestar/issues/3059
+
+ @dataclass
+ class A:
+ ...
+
+ @dataclass
+ class B:
+ ...
+
+ @dataclass
+ class C:
+ ...
+
+ class TestController(Controller):
+ @post("/", sync_to_thread=False)
+ def post_handler(self, data: B) -> None:
+ ...
+
+ @get("/", sync_to_thread=False)
+ def get_handler(self) -> A: # type: ignore[empty-body]
+ ...
+
+ @patch("/", sync_to_thread=False)
+ def patch_handler(self, data: C) -> A: # type: ignore[empty-body]
+ ...
+
+ @delete("/", sync_to_thread=False)
+ def delete_handler(self, data: B) -> None:
+ ...
+
+ app = Litestar([TestController], signature_types=[A, B, C])
+ openapi_plugin = app.plugins.get(OpenAPIPlugin)
+ openapi = openapi_plugin.provide_openapi()
+
+ expected_keys = [
+ "test_components_schemas_in_alphabetical_order.A",
+ "test_components_schemas_in_alphabetical_order.B",
+ "test_components_schemas_in_alphabetical_order.C",
+ ]
+ assert list(openapi.components.schemas.keys()) == expected_keys
| Enhancement: Generate OpenAPI schema always in the same order
### Summary
Update: Turned out the seed is already static, but that elements might be generated in random order. Let's fix that.
---
The `generate_examples` is nice but the examples are re-generated for each application execution.
This makes it impossible to store the generated schema in git, for change tracking, as there will be always a diff.
Example generation utilizes `polyfactory` and `Faker`, which both allow setting the seed, I assume. Tried googling it up, tried something like `Faker.seed(123)` but no luck.
This should be made tuneable, and maybe default to a fixed seed, too.
Related:
- The order of `components.schemas` entries seems to also change over executions. Could this also retain the ordering?
### Basic Example
See https://github.com/litestar-org/litestar/issues/3058 for example.
### Drawbacks and Impact
_No response_
### Unresolved questions
_No response_
<!-- POLAR PLEDGE BADGE START -->
---
> [!NOTE]
> While we are open for sponsoring on [GitHub Sponsors](https://github.com/sponsors/litestar-org/) and
> [OpenCollective](https://opencollective.com/litestar), we also utilize [Polar.sh](https://polar.sh/) to engage in pledge-based sponsorship.
>
> Check out all issues funded or available for funding [on our Polar.sh dashboard](https://polar.sh/litestar-org)
> * If you would like to see an issue prioritized, make a pledge towards it!
> * We receive the pledge once the issue is completed & verified
> * This, along with engagement in the community, helps us know which features are a priority to our users.
<a href="https://polar.sh/litestar-org/litestar/issues/3059">
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://polar.sh/api/github/litestar-org/litestar/issues/3059/pledge.svg?darkmode=1">
<img alt="Fund with Polar" src="https://polar.sh/api/github/litestar-org/litestar/issues/3059/pledge.svg">
</picture>
</a>
<!-- POLAR PLEDGE BADGE END -->
| @guacs
This seeding is already being done though it is not exposed as a configurable value as seen [here](https://github.com/litestar-org/litestar/blob/f8d03194b8f541a4459c27fe51599526a73ee2ba/litestar/_openapi/schema_generation/examples.py#L27). I think it's a good idea to make this configurable as well.
If the examples being generated are still different, then it may be that the schemas are not being created in the same order each time which seems to be the case as @tuukkamustonen pointed out regarding the `components.schemas`. So, ensuring the schemas are generated in the same order for a given Litestar app should fix this as well.
@guacs Were you planning to work on this?
I started wondering _how_ the order of the elements could be random as since Python 3.7+ dicts retain insertion order and generating the schema should be deterministic?
But yeah, it does seem to generate some elements (in my long spec) in a bit different order each time.
@tuukkamustonen if you'd like to work on this, then feel free to do so.
I haven't looked into it yet, so I'm not sure just where the randomness is coming from.
I'm not sure whether this is what this issue is about, since it seems to be related to deterministic ordering (rather than defined ordering through the `Controller`), but it might be related, so I'll avoid opening a new one.
From a DX perspective I personally find very helpful to have ordered routes in the Controller, that follow a certain logic:
- Progressive nesting (`GET /` comes before `GET /:id` and before `GET /:id/nested`)
- Logical actions ordering (`GET`, `POST`, `GET id`, `PATCH id`, `DELETE id`)
Example:
```python
class MyController(Controller):
tags = ["..."]
path = "/"
dependencies = {...}
@routes.get()
async def get_many(self):
...
@routes.post()
async def create(self, data: ...):
...
@routes.get("/{resource_id:uuid}")
async def get(self, resource_id: UUID):
...
@routes.patch("/{resource_id:uuid}")
async def update(self, resource_id: UUID):
...
@routes.delete("/{resource_id:uuid}")
async def delete(self, resource_id: UUID):
...
@routes.get("/{resource_id:uuid}/nested")
async def get_nested(self, resource_id: UUID):
...
```
Currently the ordering of the route definition at the Controller is not respected by the docs, so I end up having a Swagger that looks like this:
```
- GET /:id/nested/:nest_id/another
- POST /:id/nested
- GET /
- DELETE /:id
- POST /
```
Which I personally find very confusing since:
(1) It doesn't seem to follow a pre-defined logic (couldn't find any pattern for that when looking at the docs)
(2) It doesn't respect the logic that was defined on the controller
Not sure what might be causing it, I think this is generated on `_openapi/schema_generation/schema.py:SchemaCreator` maybe? Will be happy to help with some guidance, since it seems this has been refactored not long ago here: https://github.com/litestar-org/litestar/pull/2805.
@ccrvlh I'd say that's a different feature to this one so it'd be great if you could create an issue for that. I think it does follow a certain order which is GET, PUT, POST, DELETE etc. (I'm guessing the same order as seen [here](https://github.com/litestar-org/litestar/blob/3e5c179e714bb074bae13e02d10e2f3f51e24d5c/litestar/openapi/spec/path_item.py#L44)). However, this considers `/:id/nested` to have no relation to `/:id` or any of the other routes. The logic of this starts [here](https://github.com/litestar-org/litestar/blob/3e5c179e714bb074bae13e02d10e2f3f51e24d5c/litestar/_openapi/plugin.py#L37).
The difficulty here is that by the time the route reaches to OpenAPI logic, we don't know whether this was defined as part of a controller or not. So, if that order is to be maintained then we'd need to figure out a way to tell let the OpenAPIPlugin figure out the order the paths should be in. This would have to be done at the time we parse the Controller to create the individual route handlers. Even then a single path would consist of all the GET, PUT, POST etc. in that path but it should be possible to define that they should come in the following order from your example: `/`, `/:resource_id`, `/:resource_id/nested`.
@guacs thank you, opening a new issue [here](https://github.com/litestar-org/litestar/issues/3130#issue-2150259041)
@tuukkamustonen I'm not able to reproduce the issue actually. I tried with the following and the schema generated is always the same:
```python
from __future__ import annotations
from dataclasses import dataclass
from rich import print
from litestar import get, post
from litestar.openapi.config import OpenAPIConfig
from litestar.testing import create_test_client
@dataclass
class Foo:
foo: int
@dataclass
class Bar:
bar: str
@get("/foo")
async def get_foo() -> Foo:
return Foo(9)
@post("/foo")
async def post_foo(foo: Foo) -> None:
...
@get("/bar")
async def get_bar() -> Bar:
return Bar("bar")
with create_test_client([get_foo, get_bar, post_foo], openapi_config=OpenAPIConfig("foo", "voo", True)) as client:
resp = client.get("/schema/openapi.json")
print(resp.json())
```
@guacs Thanks for looking into it. Maybe it's something more exotic, or maybe I'm just wrong. I'll investigate and ping back (a bit later).
Okay, I'm able to reproduce it in _a really weird_ setup.
The random ordering occurs in `components` section of OpenAPI spec. Elements elsewhere (e.g. `paths`) seem to remain in same order.
It's related to use of `Controllers`, where the order of HTTP methods gets randomized by a use of `set()` in https://github.com/litestar-org/litestar/blob/main/litestar/utils/sequence.py#L22
Sets are not ordered, so that `unique` should return the handlers in whatever order, which leads to model registration in whatever order.
To reproduce, I had to separate the handlers _into a separate file_.
```py
# handlers.py
from dataclasses import dataclass
from litestar import get, post, Controller
from pydantic import BaseModel
@dataclass
class Payload:
foo: int
@dataclass
class Response(BaseModel):
bar: str
class FooController(Controller):
@post()
async def post(self, data: Payload) -> None: ...
@get()
async def get(self) -> Response: ...
# app.py
import json
from litestar import Litestar
from handlers import FooController
app = Litestar(route_handlers=[FooController])
print(json.dumps(app.openapi_schema.to_schema()["components"], indent=4))
```
(If I dump it all into a single file, the issue will not reproduce.)
When I run that once, the first time, it outputs the schema components in `Response`, `Payload` order. Run it again, and the order is reverses. Run it again, and the order remains same as second time.
If you flush the `.pyc` cache (in `__pycache__`) and re-run it, the order is `Response`, `Payload` again. And then following invocations, with the cache have it reversed again.
I don't understand how exactly that bytecode caching impacts this, but it does, at least on my computer 🤯 Maybe the `.pyc` file is ordered somehow, and re-loading it up from there somehow leads to the `set()` always giving same ordering _in this case_. As only when running without cache the order is reversed. I don't know.
In any case, I believe that `set` messes it up. And there are other `set`s around, which might have an impact, too.
Does that sound plausible?
@tuukkamustonen thanks! I was able to reproduce it and I believe you're correct in that the use of `unique` is what's causing the difference in the order of the components and that this only happens when you use a `Controller` with multiple HTTP methods.
The use of `unique` to find the unique HTTP handlers results in `route.route_handlers` possibly having a different order of handlers each time which results in a different insertion order to `route.route_handler_map`. For creating the paths portion of the OpenaAPI schema, we iterate over the `route_handler_map` which in turn causes a difference in the order of insertion of the generated schema into the `SchemaRegistry`.
I was thinking that we force the schemas to be generated in alphabetical order so that we should be able to get the same OpenAPI schema every time regardless of the order the paths (and their individual HTTP methods) are processed. WDYT?
The OpenAPI JSON spec is supposed for machines and not humans, so whatever order is fine as long as it's consistent 🤔
Alphabetical ordering would make sense, as it would keep the spec unchanged when you refactor and move controllers/handlers around... on the other hand, if you instead refactor by renaming models in-place, then the spec would change 😄
I guess I would just go with what is simplest and fastest, though I don't see exact issues with alphabetical ordering, either 🤷🏻♂️ | 2024-03-05T13:46:59 |
litestar-org/litestar | 3,179 | litestar-org__litestar-3179 | [
"3178"
] | 1fb981da4b6171cd3fa348c9ffe1c575c5bc862f | diff --git a/litestar/middleware/cors.py b/litestar/middleware/cors.py
--- a/litestar/middleware/cors.py
+++ b/litestar/middleware/cors.py
@@ -70,6 +70,15 @@ async def wrapped_send(message: Message) -> None:
headers["Access-Control-Allow-Origin"] = origin
headers["Vary"] = "Origin"
+ # We don't want to overwrite this for preflight requests.
+ allow_headers = headers.get("Access-Control-Allow-Headers")
+ if not allow_headers and self.config.allow_headers:
+ headers["Access-Control-Allow-Headers"] = ", ".join(sorted(set(self.config.allow_headers)))
+
+ allow_methods = headers.get("Access-Control-Allow-Methods")
+ if not allow_methods and self.config.allow_methods:
+ headers["Access-Control-Allow-Methods"] = ", ".join(sorted(set(self.config.allow_methods)))
+
await send(message)
return wrapped_send
| diff --git a/tests/unit/test_middleware/test_cors_middleware.py b/tests/unit/test_middleware/test_cors_middleware.py
--- a/tests/unit/test_middleware/test_cors_middleware.py
+++ b/tests/unit/test_middleware/test_cors_middleware.py
@@ -1,4 +1,4 @@
-from typing import Any, Dict, List, Mapping, Optional, cast
+from typing import Any, Dict, List, Literal, Mapping, Optional, Union, cast
import pytest
@@ -7,6 +7,7 @@
from litestar.middleware.cors import CORSMiddleware
from litestar.status_codes import HTTP_200_OK, HTTP_404_NOT_FOUND
from litestar.testing import create_test_client
+from litestar.types.asgi_types import Method
def test_setting_cors_middleware() -> None:
@@ -38,16 +39,31 @@ def test_setting_cors_middleware() -> None:
@pytest.mark.parametrize("origin", [None, "http://www.example.com", "https://moishe.zuchmir.com"])
@pytest.mark.parametrize("allow_origins", ["*", "http://www.example.com", "https://moishe.zuchmir.com"])
@pytest.mark.parametrize("allow_credentials", [True, False])
[email protected]("expose_headers", ["X-First-Header", "SomeOtherHeader", "X-Second-Header"])
[email protected](
+ "expose_headers", [["x-first-header", "x-second-header", "x-third-header"], ["*"], ["x-first-header"]]
+)
[email protected](
+ "allow_headers", [["x-first-header", "x-second-header", "x-third-header"], ["*"], ["x-first-header"]]
+)
[email protected]("allow_methods", [["GET", "POST", "PUT", "DELETE"], ["GET", "POST"], ["GET"]])
def test_cors_simple_response(
- origin: Optional[str], allow_origins: List[str], allow_credentials: bool, expose_headers: List[str]
+ origin: Optional[str],
+ allow_origins: List[str],
+ allow_credentials: bool,
+ expose_headers: List[str],
+ allow_headers: List[str],
+ allow_methods: List[Union[Literal["*"], "Method"]],
) -> None:
@get("/")
def handler() -> Dict[str, str]:
return {"hello": "world"}
cors_config = CORSConfig(
- allow_origins=allow_origins, allow_credentials=allow_credentials, expose_headers=expose_headers
+ allow_origins=allow_origins,
+ allow_credentials=allow_credentials,
+ expose_headers=expose_headers,
+ allow_headers=allow_headers,
+ allow_methods=allow_methods,
)
with create_test_client(handler, cors_config=cors_config) as client:
@@ -58,6 +74,8 @@ def handler() -> Dict[str, str]:
assert cors_config.expose_headers == expose_headers
assert cors_config.allow_origins == allow_origins
assert cors_config.allow_credentials == allow_credentials
+ assert cors_config.allow_headers == allow_headers
+ assert cors_config.allow_methods == allow_methods
if origin:
if cors_config.is_allow_all_origins:
@@ -68,10 +86,20 @@ def handler() -> Dict[str, str]:
assert response.headers.get("Access-Control-Expose-Headers") == ", ".join(
sorted(set(cors_config.expose_headers))
)
+ if cors_config.allow_headers:
+ assert response.headers.get("Access-Control-Allow-Headers") == ", ".join(
+ sorted(set(cors_config.allow_headers))
+ )
+ if cors_config.allow_methods:
+ assert response.headers.get("Access-Control-Allow-Methods") == ", ".join(
+ sorted(set(cors_config.allow_methods))
+ )
else:
assert "Access-Control-Allow-Origin" not in response.headers
assert "Access-Control-Allow-Credentials" not in response.headers
assert "Access-Control-Expose-Headers" not in response.headers
+ assert "Access-Control-Allow-Headers" not in response.headers
+ assert "Access-Control-Allow-Methods" not in response.headers
@pytest.mark.parametrize("origin, should_apply_cors", (("http://www.example.com", True), (None, False)))
| Bug: CORS Middleware not setting all headers as per spec
### Description
Right now, there's only a handful of headers that are only being set for the preflight request. They must be set for both the preflight and actual request.
https://fetch.spec.whatwg.org/#http-responses
Only `Access-Control-Allow-Origin` is being set here.
https://github.com/litestar-org/litestar/blob/1fb981da4b6171cd3fa348c9ffe1c575c5bc862f/litestar/middleware/cors.py#L61-L73
Only `Access-Control-Allow-Credentials` and `Access-Control-Expose-Headers` get set here, and this is what the above code uses to update headers
https://github.com/litestar-org/litestar/blob/1fb981da4b6171cd3fa348c9ffe1c575c5bc862f/litestar/config/cors.py#L123-L136
This still doesn't account for:
- Access-Control-Allow-Methods
- Access-Control-Allow-Headers
which are only set on preflight, but should also be set to the actual request.
### Litestar Version
2.2.1
### Platform
- [X] Linux
- [ ] Mac
- [ ] Windows
- [ ] Other (Please specify in the description above)
<!-- POLAR PLEDGE BADGE START -->
---
> [!NOTE]
> While we are open for sponsoring on [GitHub Sponsors](https://github.com/sponsors/litestar-org/) and
> [OpenCollective](https://opencollective.com/litestar), we also utilize [Polar.sh](https://polar.sh/) to engage in pledge-based sponsorship.
>
> Check out all issues funded or available for funding [on our Polar.sh dashboard](https://polar.sh/litestar-org)
> * If you would like to see an issue prioritized, make a pledge towards it!
> * We receive the pledge once the issue is completed & verified
> * This, along with engagement in the community, helps us know which features are a priority to our users.
<a href="https://polar.sh/litestar-org/litestar/issues/3178">
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://polar.sh/api/github/litestar-org/litestar/issues/3178/pledge.svg?darkmode=1">
<img alt="Fund with Polar" src="https://polar.sh/api/github/litestar-org/litestar/issues/3178/pledge.svg">
</picture>
</a>
<!-- POLAR PLEDGE BADGE END -->
| 2024-03-07T18:52:32 |
|
litestar-org/litestar | 3,185 | litestar-org__litestar-3185 | [
"2954"
] | e763cea5e8a259695a2827c5ac3e568832c2445d | diff --git a/litestar/logging/config.py b/litestar/logging/config.py
--- a/litestar/logging/config.py
+++ b/litestar/logging/config.py
@@ -2,13 +2,12 @@
import sys
from abc import ABC, abstractmethod
-from dataclasses import asdict, dataclass, field
+from dataclasses import asdict, dataclass, field, fields
from importlib.util import find_spec
from logging import INFO
from typing import TYPE_CHECKING, Any, Callable, Literal, cast
from litestar.exceptions import ImproperlyConfiguredException, MissingDependencyException
-from litestar.serialization import encode_json
from litestar.serialization.msgspec_hooks import _msgspec_json_encoder
from litestar.utils.deprecation import deprecated
@@ -49,7 +48,20 @@
}
if sys.version_info >= (3, 12, 0):
- default_handlers["queue_listener"]["handlers"] = ["console"]
+ default_handlers["queue_listener"].update(
+ {
+ "class": "logging.handlers.QueueHandler",
+ "queue": {
+ "()": "queue.Queue",
+ "maxsize": -1,
+ },
+ "listener": "litestar.logging.standard.LoggingQueueListener",
+ "handlers": ["console"],
+ }
+ )
+
+ # do not format twice, the console handler will do the job
+ del default_handlers["queue_listener"]["formatter"]
default_picologging_handlers: dict[str, dict[str, Any]] = {
@@ -228,21 +240,25 @@ def configure(self) -> GetLogger:
A 'logging.getLogger' like function.
"""
- if "picologging" in str(encode_json(self.handlers)):
+ excluded_fields: tuple[str, ...]
+ if "picologging" in " ".join([handler["class"] for handler in self.handlers.values()]):
try:
from picologging import config, getLogger
except ImportError as e:
raise MissingDependencyException("picologging") from e
- values = {
- k: v
- for k, v in asdict(self).items()
- if v is not None and k not in ("incremental", "configure_root_logger")
- }
+ excluded_fields = ("incremental", "configure_root_logger")
else:
from logging import config, getLogger # type: ignore[no-redef, assignment]
- values = {k: v for k, v in asdict(self).items() if v is not None and k not in ("configure_root_logger",)}
+ excluded_fields = ("configure_root_logger",)
+
+ values = {
+ _field.name: getattr(self, _field.name)
+ for _field in fields(self)
+ if getattr(self, _field.name) is not None and _field.name not in excluded_fields
+ }
+
if not self.configure_root_logger:
values.pop("root")
config.dictConfig(values)
diff --git a/litestar/logging/standard.py b/litestar/logging/standard.py
--- a/litestar/logging/standard.py
+++ b/litestar/logging/standard.py
@@ -1,34 +1,47 @@
from __future__ import annotations
import atexit
-import sys
-from logging import StreamHandler
+from logging import Handler, LogRecord, StreamHandler
from logging.handlers import QueueHandler, QueueListener
from queue import Queue
from typing import Any
from litestar.logging._utils import resolve_handlers
-__all__ = ("QueueListenerHandler",)
+__all__ = ("LoggingQueueListener", "QueueListenerHandler")
-if sys.version_info < (3, 12):
+class LoggingQueueListener(QueueListener):
+ """Custom ``QueueListener`` which starts and stops the listening process."""
- class QueueListenerHandler(QueueHandler):
- """Configure queue listener and handler to support non-blocking logging configuration."""
+ def __init__(self, queue: Queue[LogRecord], *handlers: Handler, respect_handler_level: bool = False) -> None:
+ """Initialize ``LoggingQueueListener``.
- def __init__(self, handlers: list[Any] | None = None) -> None:
- """Initialize `?QueueListenerHandler`.
+ Args:
+ queue: The queue to send messages to
+ *handlers: A list of handlers which will handle entries placed on the queue
+ respect_handler_level: If ``respect_handler_level`` is ``True``, a handler's level is respected (compared with the level for the message) when deciding whether to pass messages to that handler
+ """
+ super().__init__(queue, *handlers, respect_handler_level=respect_handler_level)
+ self.start()
+ atexit.register(self.stop)
- Args:
- handlers: Optional 'ConvertingList'
- """
- super().__init__(Queue(-1))
- handlers = resolve_handlers(handlers) if handlers else [StreamHandler()]
- self.listener = QueueListener(self.queue, *handlers)
- self.listener.start()
- atexit.register(self.listener.stop)
+class QueueListenerHandler(QueueHandler):
+ """Configure queue listener and handler to support non-blocking logging configuration.
-else:
- QueueListenerHandler = QueueHandler
+ .. caution::
+
+ This handler doesn't work with Python >= 3.12 and ``logging.config.dictConfig``. It might
+ be deprecated in the future. Please use ``logging.QueueHandler`` instead.
+ """
+
+ def __init__(self, handlers: list[Any] | None = None) -> None:
+ """Initialize ``QueueListenerHandler``.
+
+ Args:
+ handlers: Optional 'ConvertingList'
+ """
+ super().__init__(Queue(-1))
+ handlers = resolve_handlers(handlers) if handlers else [StreamHandler()]
+ self.listener = LoggingQueueListener(self.queue, *handlers) # type: ignore[arg-type]
| diff --git a/tests/helpers.py b/tests/helpers.py
--- a/tests/helpers.py
+++ b/tests/helpers.py
@@ -1,10 +1,15 @@
from __future__ import annotations
+import atexit
import inspect
+import logging
import random
import sys
-from contextlib import AbstractContextManager
-from typing import Any, AsyncContextManager, Awaitable, ContextManager, TypeVar, cast, overload
+from contextlib import AbstractContextManager, contextmanager
+from typing import Any, AsyncContextManager, Awaitable, ContextManager, Generator, TypeVar, cast, overload
+
+import picologging
+from _pytest.logging import LogCaptureHandler, _LiveLoggingNullHandler
from litestar._openapi.schema_generation import SchemaCreator
from litestar._openapi.schema_generation.plugins import openapi_schema_plugins
@@ -28,6 +33,15 @@ def randbytes(n: int) -> bytes:
randbytes = RANDOM.randbytes
+if sys.version_info >= (3, 12):
+ getHandlerByName = logging.getHandlerByName
+else:
+ from logging import _handlers # type: ignore[attr-defined]
+
+ def getHandlerByName(name: str) -> Any:
+ return _handlers.get(name)
+
+
@overload
async def maybe_async(obj: Awaitable[T]) -> T: ...
@@ -66,3 +80,29 @@ def get_schema_for_field_definition(
if isinstance(result, Schema):
return result
return creator.schema_registry.from_reference(result).schema
+
+
+@contextmanager
+def cleanup_logging_impl() -> Generator:
+ # Reset root logger (`logging` module)
+ std_root_logger: logging.Logger = logging.getLogger()
+ for std_handler in std_root_logger.handlers:
+ # Don't interfere with PyTest handler config
+ if not isinstance(std_handler, (_LiveLoggingNullHandler, LogCaptureHandler)):
+ std_root_logger.removeHandler(std_handler)
+
+ # Reset root logger (`picologging` module)
+ pico_root_logger: picologging.Logger = picologging.getLogger()
+ for pico_handler in pico_root_logger.handlers:
+ pico_root_logger.removeHandler(pico_handler)
+
+ yield
+
+ # Stop queue_listener listener (mandatory for the 'logging' module with Python 3.12,
+ # else the test suite would hang on at the end of the tests and some tests would fail)
+ queue_listener_handler = getHandlerByName("queue_listener")
+ if queue_listener_handler and hasattr(queue_listener_handler, "listener"):
+ atexit.unregister(queue_listener_handler.listener.stop)
+ queue_listener_handler.listener.stop()
+ queue_listener_handler.close()
+ del queue_listener_handler
diff --git a/tests/unit/test_logging/test_logging_config.py b/tests/unit/test_logging/test_logging_config.py
--- a/tests/unit/test_logging/test_logging_config.py
+++ b/tests/unit/test_logging/test_logging_config.py
@@ -1,9 +1,12 @@
import logging
import sys
-from typing import TYPE_CHECKING, Any, Dict
+import time
+from logging.handlers import QueueHandler
+from typing import TYPE_CHECKING, Any, Dict, Generator, Optional
from unittest.mock import Mock, patch
import pytest
+from _pytest.logging import LogCaptureHandler, _LiveLoggingNullHandler
from litestar import Request, get
from litestar.exceptions import ImproperlyConfiguredException
@@ -12,9 +15,16 @@
from litestar.logging.standard import QueueListenerHandler as StandardQueueListenerHandler
from litestar.status_codes import HTTP_200_OK
from litestar.testing import create_test_client
+from tests.helpers import cleanup_logging_impl
if TYPE_CHECKING:
- from _pytest.logging import LogCaptureFixture
+ from _pytest.capture import CaptureFixture
+
+
[email protected](autouse=True)
+def cleanup_logging() -> Generator:
+ with cleanup_logging_impl():
+ yield
@pytest.mark.parametrize(
@@ -66,14 +76,45 @@ def test_dictconfig_startup(dict_config_class: str, handlers: Any) -> None:
assert dict_config_mock.called
-def test_standard_queue_listener_logger(caplog: "LogCaptureFixture") -> None:
- with caplog.at_level("INFO", logger="test_logger"):
- logger = logging.getLogger("test_logger")
- logger.info("Testing now!")
- assert "Testing now!" in caplog.text
- var = "test_var"
- logger.info("%s", var)
- assert var in caplog.text
+def test_standard_queue_listener_logger(capsys: "CaptureFixture[str]") -> None:
+ def wait_log_queue(queue: Any, sleep_time: float = 0.1, max_retries: int = 5) -> None:
+ retry = 0
+ while queue.qsize() > 0 and retry < max_retries:
+ retry += 1
+ time.sleep(sleep_time)
+
+ def assert_log(queue: Any, expected: str, count: Optional[int] = None) -> None:
+ wait_log_queue(queue)
+ log_output = capsys.readouterr().err.strip()
+ if count is not None:
+ assert len(log_output.split("\n")) == count
+ assert log_output == expected
+
+ with patch("litestar.logging.config.find_spec") as find_spec_mock:
+ find_spec_mock.return_value = False
+ get_logger = LoggingConfig(
+ formatters={"standard": {"format": "%(levelname)s :: %(name)s :: %(message)s"}},
+ loggers={
+ "test_logger": {
+ "level": "INFO",
+ "handlers": ["queue_listener"],
+ "propagate": False,
+ },
+ },
+ ).configure()
+
+ logger = get_logger("test_logger")
+ assert isinstance(logger, logging.Logger) # type: ignore[unreachable]
+
+ handler = logger.handlers[0] # type: ignore[unreachable]
+ assert isinstance(handler, QueueHandler if sys.version_info >= (3, 12, 0) else StandardQueueListenerHandler)
+
+ logger.info("Testing now!")
+ assert_log(handler.queue, expected="INFO :: test_logger :: Testing now!", count=1)
+
+ var = "test_var"
+ logger.info("%s", var)
+ assert_log(handler.queue, expected="INFO :: test_logger :: test_var", count=1)
@patch("picologging.config.dictConfig")
@@ -94,9 +135,10 @@ def test_get_logger_without_logging_config() -> None:
def test_get_default_logger() -> None:
with create_test_client(logging_config=LoggingConfig(handlers=default_handlers)) as client:
- assert isinstance(client.app.logger.handlers[0], StandardQueueListenerHandler)
+ expected_handler_class = QueueHandler if sys.version_info >= (3, 12, 0) else StandardQueueListenerHandler
+ assert isinstance(client.app.logger.handlers[0], expected_handler_class)
new_logger = client.app.get_logger()
- assert isinstance(new_logger.handlers[0], StandardQueueListenerHandler)
+ assert isinstance(new_logger.handlers[0], expected_handler_class)
def test_get_picologging_logger() -> None:
@@ -109,7 +151,7 @@ def test_get_picologging_logger() -> None:
@pytest.mark.parametrize(
"handlers, listener",
[
- [default_handlers, StandardQueueListenerHandler],
+ [default_handlers, QueueHandler if sys.version_info >= (3, 12, 0) else StandardQueueListenerHandler],
[default_picologging_handlers, PicologgingQueueListenerHandler],
],
)
@@ -133,7 +175,7 @@ def test_validation() -> None:
@pytest.mark.parametrize(
"handlers, listener",
[
- [default_handlers, StandardQueueListenerHandler],
+ [default_handlers, QueueHandler if sys.version_info >= (3, 12, 0) else StandardQueueListenerHandler],
[default_picologging_handlers, PicologgingQueueListenerHandler],
],
)
@@ -144,43 +186,36 @@ def test_root_logger(handlers: Any, listener: Any) -> None:
assert isinstance(root_logger.handlers[0], listener) # type: ignore[attr-defined]
[email protected](
- "handlers, listener",
- [
- [default_handlers, StandardQueueListenerHandler],
- [default_picologging_handlers, PicologgingQueueListenerHandler],
- ],
-)
-def test_root_logger_no_config(handlers: Any, listener: Any) -> None:
[email protected]("handlers", [default_handlers, default_picologging_handlers])
+def test_root_logger_no_config(handlers: Any) -> None:
logging_config = LoggingConfig(handlers=handlers, configure_root_logger=False)
get_logger = logging_config.configure()
root_logger = get_logger()
- for handler in root_logger.handlers: # type: ignore[attr-defined]
- root_logger.removeHandler(handler) # type: ignore[attr-defined]
- get_logger = logging_config.configure()
- root_logger = get_logger()
- if handlers["console"]["class"] == "logging.StreamHandler":
- assert not isinstance(root_logger.handlers[0], listener) # type: ignore[attr-defined]
+ if isinstance(root_logger, logging.Logger): # type: ignore[unreachable]
+ # pytest automatically configures some handlers
+ for handler in root_logger.handlers: # type: ignore[unreachable]
+ assert isinstance(handler, (_LiveLoggingNullHandler, LogCaptureHandler))
else:
- assert len(root_logger.handlers) < 1 # type: ignore[attr-defined]
+ assert len(root_logger.handlers) == 0 # type: ignore[attr-defined]
@pytest.mark.parametrize(
- "handlers, listener",
+ "handlers, expected_handler_class",
[
- pytest.param(
- default_handlers,
- StandardQueueListenerHandler,
- marks=pytest.mark.xfail(
- condition=sys.version_info >= (3, 12), reason="change to QueueHandler/QueueListener config in 3.12"
- ),
- ),
+ [default_handlers, QueueHandler if sys.version_info >= (3, 12, 0) else StandardQueueListenerHandler],
[default_picologging_handlers, PicologgingQueueListenerHandler],
],
)
-def test_customizing_handler(handlers: Any, listener: Any, monkeypatch: pytest.MonkeyPatch) -> None:
- monkeypatch.setitem(handlers["queue_listener"], "handlers", ["cfg://handlers.console"])
- logging_config = LoggingConfig(handlers=handlers)
+def test_customizing_handler(handlers: Any, expected_handler_class: Any, monkeypatch: pytest.MonkeyPatch) -> None:
+ log_format = "%(levelname)s :: %(name)s :: %(message)s"
+ formatters = {"standard": {"format": log_format}}
+ logging_config = LoggingConfig(formatters=formatters, handlers=handlers)
get_logger = logging_config.configure()
root_logger = get_logger()
- assert isinstance(root_logger.handlers[0], listener) # type: ignore[attr-defined]
+ root_logger_handler = root_logger.handlers[0] # type: ignore[attr-defined]
+ assert isinstance(root_logger_handler, expected_handler_class)
+ if type(root_logger_handler) is QueueHandler:
+ formatter = root_logger_handler.listener.handlers[0].formatter # type: ignore[attr-defined]
+ else:
+ formatter = root_logger_handler.formatter
+ assert formatter._fmt == log_format
diff --git a/tests/unit/test_middleware/test_exception_handler_middleware.py b/tests/unit/test_middleware/test_exception_handler_middleware.py
--- a/tests/unit/test_middleware/test_exception_handler_middleware.py
+++ b/tests/unit/test_middleware/test_exception_handler_middleware.py
@@ -1,5 +1,5 @@
from inspect import getinnerframes
-from typing import TYPE_CHECKING, Any, Callable, Optional
+from typing import TYPE_CHECKING, Any, Callable, Generator, Optional
import pytest
from _pytest.capture import CaptureFixture
@@ -17,6 +17,7 @@
from litestar.testing import TestClient, create_test_client
from litestar.types import ExceptionHandlersMap
from litestar.types.asgi_types import HTTPScope
+from tests.helpers import cleanup_logging_impl
if TYPE_CHECKING:
from _pytest.logging import LogCaptureFixture
@@ -29,6 +30,12 @@ async def dummy_app(scope: Any, receive: Any, send: Any) -> None:
return None
[email protected](autouse=True)
+def cleanup_logging() -> Generator:
+ with cleanup_logging_impl():
+ yield
+
+
@pytest.fixture()
def app() -> Litestar:
return Litestar()
diff --git a/tests/unit/test_middleware/test_logging_middleware.py b/tests/unit/test_middleware/test_logging_middleware.py
--- a/tests/unit/test_middleware/test_logging_middleware.py
+++ b/tests/unit/test_middleware/test_logging_middleware.py
@@ -1,5 +1,5 @@
from logging import INFO
-from typing import TYPE_CHECKING, Any, Dict
+from typing import TYPE_CHECKING, Any, Dict, Generator
import pytest
from structlog.testing import capture_logs
@@ -18,6 +18,7 @@
from litestar.params import Body
from litestar.status_codes import HTTP_200_OK, HTTP_201_CREATED
from litestar.testing import create_test_client
+from tests.helpers import cleanup_logging_impl
if TYPE_CHECKING:
from _pytest.logging import LogCaptureFixture
@@ -30,6 +31,12 @@
pytestmark = pytest.mark.usefixtures("reset_httpx_logging")
[email protected](autouse=True)
+def cleanup_logging() -> Generator:
+ with cleanup_logging_impl():
+ yield
+
+
@pytest.fixture
def handler() -> HTTPRouteHandler:
@get("/")
| Bug: Logs and Stack traces are not visible OOTB in Python3.12
### Description
Logging (on routes) and also stack trace (when using `litestar.testing.create_test_client` in debug mode) don't appear to be printed on the terminal when executed in Python 3.12. Running the minimal example in Python 3.8, I can see contents of the "Logs" section in my terminal; both a `WARNING` and an `ERROR` log, along with a stack trace. However, neither is visible when run on Python 3.12. There is one workaround (that I am aware of) shared by @cofin in the Discord server but (IMO) these functionalities should simply "just work".
> @cofin's example modified to suit the example code (the actual MCVE is under the MCVE section)
This at least makes one of the log ("WARNING" log) show up on 3.12, but no "ERROR" log and stack trace.
```python
import logging
from litestar import get, Request
from litestar.logging.config import LoggingConfig
from litestar.testing import create_test_client
@get()
async def evil_route(request: Request) -> str:
request.logger.warning("you can't see me")
1 / 0
return "so, you will never catch me"
with create_test_client(
[evil_route],
logging_config=LoggingConfig(
root={"level": logging.getLevelName(logging.INFO), "handlers": ["console"]},
formatters={
"standard": {
"format": "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
}
},
),
) as client:
response = client.get("")
assert response.status_code == 500
```
PS: This may probably be a duplicate issue, so feel free to close as one.
### URL to code causing the issue
_No response_
### MCVE
```python
from litestar import get, Request
from litestar.testing import create_test_client
@get()
async def evil_route(request: Request) -> str:
request.logger.warning("you can't see me")
1 / 0
return "so, you will never catch me"
with create_test_client([evil_route]) as client:
response = client.get("")
assert response.status_code == 500
```
### Steps to reproduce
```bash
1. Save as "some_python_file.py"
2. Run `python some_python_file.py`
4. See error (actually you can't :p that is the error)
```
### Screenshots
```bash
""
```
### Logs
```bash
WARNING - ,-555869153 - root - http - you can't see me
ERROR - ,-555869152 - litestar - middleware - exception raised on http connection to route /
Traceback (most recent call last):
File "litestar/litestar/middleware/exceptions/middleware.py", line 192, in __call__
await self.app(scope, receive, send)
File "litestar/litestar/routes/http.py", line 82, in handle
response = await self._get_response_for_request(
File "litestar/litestar/routes/http.py", line 134, in _get_response_for_request
return await self._call_handler_function(
File "litestar/litestar/routes/http.py", line 154, in _call_handler_function
response_data, cleanup_group = await self._get_response_data(
File "litestar/litestar/routes/http.py", line 207, in _get_response_data
data = await route_handler.fn(**parsed_kwargs)
File "litestar/app.py", line 7, in evil_route
1 / 0
ZeroDivisionError: division by zero
Traceback (most recent call last):
File "litestar/litestar/middleware/exceptions/middleware.py", line 192, in __call__
await self.app(scope, receive, send)
File "litestar/litestar/routes/http.py", line 82, in handle
response = await self._get_response_for_request(
File "litestar/litestar/routes/http.py", line 134, in _get_response_for_request
return await self._call_handler_function(
File "litestar/litestar/routes/http.py", line 154, in _call_handler_function
response_data, cleanup_group = await self._get_response_data(
File "litestar/litestar/routes/http.py", line 207, in _get_response_data
data = await route_handler.fn(**parsed_kwargs)
File "litestar/app.py", line 7, in evil_route
1 / 0
ZeroDivisionError: division by zero
```
### Litestar Version
804f0151fc9b03e81f6cb6cbb38a9dfd395ead79 (main as of issue creation)
### Platform
- [] Linux
- [X] Mac (sorry, I forgot I had to check this)
- [ ] Windows
- [ ] Other (Please specify in the description above)
<!-- POLAR PLEDGE BADGE START -->
---
> [!NOTE]
> While we are open for sponsoring on [GitHub Sponsors](https://github.com/sponsors/litestar-org/) and
> [OpenCollective](https://opencollective.com/litestar), we also utilize [Polar.sh](https://polar.sh/) to engage in pledge-based sponsorship.
>
> Check out all issues funded or available for funding [on our Polar.sh dashboard](https://polar.sh/litestar-org)
> * If you would like to see an issue prioritized, make a pledge towards it!
> * We receive the pledge once the issue is completed & verified
> * This, along with engagement in the community, helps us know which features are a priority to our users.
<a href="https://polar.sh/litestar-org/litestar/issues/2954">
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://polar.sh/api/github/litestar-org/litestar/issues/2954/pledge.svg?darkmode=1">
<img alt="Fund with Polar" src="https://polar.sh/api/github/litestar-org/litestar/issues/2954/pledge.svg">
</picture>
</a>
<!-- POLAR PLEDGE BADGE END -->
| Hi @Alc-Alc I've tried reproducing using the MCVE on `804f015`, but I'm still somehow able to get the entirety of the logs. I've tried using Python 3.12.0 and 3.12.1.
```
(.venv) ➜ litestar git:(v2.5.0) ✗ python -V
Python 3.12.1
(.venv) ➜ litestar git:(v2.5.0) ✗ python some_python_file.py
WARNING - �3
g�U,-144329509 - root - http - you can't see me
ERROR - �3
g�U,-144329507 - litestar - middleware - exception raised on http connection to route /
Traceback (most recent call last):
...
```
> Hi @Alc-Alc I've tried reproducing using the MCVE on `804f015`, but I'm still somehow able to get the entirety of the logs. I've tried using Python 3.12.0 and 3.12.1.
>
> ```
> (.venv) ➜ litestar git:(v2.5.0) ✗ python -V
> Python 3.12.1
> (.venv) ➜ litestar git:(v2.5.0) ✗ python some_python_file.py
> WARNING - �3
> g�U,-144329509 - root - http - you can't see me
> ERROR - �3
> g�U,-144329507 - litestar - middleware - exception raised on http connection to route /
>
> Traceback (most recent call last):
> ...
> ```
@FergusMok thanks for checking, I created a new venv and did `pip install git+https://github.com/litestar-org/litestar.git@804f0151fc9b03e81f6cb6cbb38a9dfd395ead79` and I can still observe the same issue I have described (no logs, nothing). I am on 3.12.0
on 3.12.1
```
❯ python a.py
2024-01-11 09:39:41,301 - root - WARNING - you can't see me 3.12.1 at 09:39:37
2024-01-11 09:39:41,304 - httpx - INFO - HTTP Request: GET http://testserver.local "HTTP/1.1 500 Internal Server Error"
```
FWIW, I am on a Mac (edited the issue description, my apologies)
The problem seems to be with the `queue_listener` handler. Using the `console` one works.
Tested with Python 3.12.2. Former Python 3.12 versions won't work due to https://github.com/python/cpython/issues/111615.
I'm not familiar with the queue handlers, but I'll try to dig a little bit more.
> The problem seems to be with the `queue_listener` handler. Using the `console` one works.
>
> Tested with Python 3.12.2. Former Python 3.12 versions won't work due to [python/cpython#111615](https://github.com/python/cpython/issues/111615).
>
> I'm not familiar with the queue handlers, but I'll try to dig a little bit more.
thanks for taking a look at this, it indeed works with `console` as shown in the "workaround" code of the issue description | 2024-03-09T21:15:52 |
litestar-org/litestar | 3,196 | litestar-org__litestar-3196 | [
"3047"
] | 5bbe16803092636fd7548fb7df4273a3896e05ba | diff --git a/litestar/openapi/controller.py b/litestar/openapi/controller.py
--- a/litestar/openapi/controller.py
+++ b/litestar/openapi/controller.py
@@ -1,7 +1,7 @@
from __future__ import annotations
from functools import cached_property
-from typing import TYPE_CHECKING, Any, Callable, Literal
+from typing import TYPE_CHECKING, Any, Callable, Final, Literal
from yaml import dump as dump_yaml
@@ -22,6 +22,8 @@
from litestar.connection.request import Request
from litestar.openapi.spec.open_api import OpenAPI
+_OPENAPI_JSON_ROUTER_NAME: Final = "__litestar_openapi_json"
+
class OpenAPIController(Controller):
"""Controller for OpenAPI endpoints."""
@@ -171,7 +173,13 @@ def retrieve_schema_yaml(self, request: Request[Any, Any, Any]) -> ASGIResponse:
return ASGIResponse(body=self._dumped_yaml_schema, media_type=OpenAPIMediaType.OPENAPI_YAML)
return ASGIResponse(body=b"", status_code=HTTP_404_NOT_FOUND, media_type=MediaType.HTML)
- @get(path="/openapi.json", media_type=OpenAPIMediaType.OPENAPI_JSON, include_in_schema=False, sync_to_thread=False)
+ @get(
+ path="/openapi.json",
+ media_type=OpenAPIMediaType.OPENAPI_JSON,
+ include_in_schema=False,
+ sync_to_thread=False,
+ name=_OPENAPI_JSON_ROUTER_NAME,
+ )
def retrieve_schema_json(self, request: Request[Any, Any, Any]) -> ASGIResponse:
"""Return the OpenAPI schema as JSON with an ``application/vnd.oai.openapi+json`` Content-Type header.
@@ -457,10 +465,10 @@ def render_stoplight_elements(self, request: Request[Any, Any, Any]) -> bytes:
</head>
"""
- body = """
+ body = f"""
<body>
<elements-api
- apiDescriptionUrl="openapi.json"
+ apiDescriptionUrl="{request.app.route_reverse(_OPENAPI_JSON_ROUTER_NAME)}"
router="hash"
layout="sidebar"
/>
@@ -489,9 +497,9 @@ def render_rapidoc(self, request: Request[Any, Any, Any]) -> bytes: # pragma: n
</head>
"""
- body = """
+ body = f"""
<body>
- <rapi-doc spec-url="openapi.json" />
+ <rapi-doc spec-url="{request.app.route_reverse(_OPENAPI_JSON_ROUTER_NAME)}" />
</body>
"""
| Bug: RapiDoc and Stoplight Elements fail when used as root schema
### Description
Setting `root_schema_site="rapidoc"` or `root_schema_site="elements"` in the `OpenAPIConfig` results in an error when visiting `/schema`. Both `/schema/rapidoc` and `/schema/elements` work as expected.
My guess is that this might be related to the fact that both RapiDoc and StopLight elements [are configured with `spec-url`](https://github.com/litestar-org/litestar/blob/747fb90772721e7867f2e1ff2da6636f8442a6fb/litestar/openapi/controller.py#L494) whereas Swagger UI and Redoc HTML has the OpenAPI JSON inlined via a call to `OpenAPIController._get_schema_as_json`.
### MCVE
```python
from litestar import Litestar, get
from litestar.openapi import OpenAPIConfig
@get("/")
async def hello_world() -> str:
return "Hello world!"
app = Litestar(
route_handlers=[hello_world],
openapi_config=OpenAPIConfig(
title="Test app", version="0.0.1", root_schema_site="rapidoc"
),
)
```
### Steps to reproduce
```text
1. Run app with `litestar run`
2. Visit `localhost:8000/schema` in your browser.
```
### Screenshots
<img width="859" alt="image" src="https://github.com/litestar-org/litestar/assets/15220906/c962e9b0-acba-40bd-bf29-e0603756d91c">
### Litestar Version
2.5.0
### Platform
- [X] Linux
- [X] Mac
- [ ] Windows
- [ ] Other (Please specify in the description above)
| Can you update to the latest version (`pip install -U litestar==2.5.3`) and confirm this issue persists?
Upgraded, and unfortunately the issue persists with `2.5.3` installed.
Using the provided MCVE I don't seem to be able to reproduce :\
```
~ via 🐋 colima via pyenv
➜ ntp -v 3047
Directory /tmp/testing/3047 created and switched to.
Virtual environment created and activated.
➜ vim mcve.py
➜ pip install litestar[standard] && litestar run --reload --reload-dir . --debug
➜ browse 127.0.0.1/schema
```
result:
<img width="1201" alt="image" src="https://github.com/litestar-org/litestar/assets/45884264/a39060ad-0e03-4d95-8172-791564cc71bd">
and same for Stoplight.
tested on release in 2.5 - 2.6.
Am i doing something dumb here?
Wow, that's really strange... I just tested again at my end with a clean virtual environment and the above example and saw the same issue.
```
pip install -U "litestar[standard]"
litestar run --reload --reload-dir . --debug
open http://127.0.0.1:8000/schema
```
The request is being received by the backend as expected
```
INFO: 127.0.0.1:55586 - "GET /schema HTTP/1.1" 200 OK
INFO: 127.0.0.1:55586 - "GET /openapi.json HTTP/1.1" 404 Not Found
ERROR - 2024-02-11 10:46:25,461 - litestar - config - exception raised on http connection to route /openapi.json
```
Issue does appear to be `/openapi.json` route as if I visit `/schema/rapidoc` instead I get
```
INFO: 127.0.0.1:55628 - "GET /schema/rapidoc HTTP/1.1" 200 OK
INFO: 127.0.0.1:55628 - "GET /schema/openapi.json HTTP/1.1" 200 OK
```
I tried private windows and force refreshing the page but always the same result
<img width="864" alt="image" src="https://github.com/litestar-org/litestar/assets/15220906/6aae1341-07b9-453c-9883-46ed004bc1be">
In case it helps I tested just now with Python 3.11.4 and this is the result of `pip freeze`.
[pip-freeze.txt](https://github.com/litestar-org/litestar/files/14232018/pip-freeze.txt)
No idea what's going on but I'm also very open to the possibility that I'm doing something dumb on this end 😅
@tcbegley I also can't reproduce this. Are you sure that you're running the correct application file?
Hey @provinzkraut, thanks for also looking into this.
I noticed from @JacobCoffee's screenshot that he has `/schema/` in his address bar whereas I have `/schema`. Sure enough if I visit `/schema/` then I don't get the error. By contrast `/schema/rapidoc` works but `/schema/rapidoc/` does not. I guess an issue with relative paths resolving to the wrong place? Ditto for StopLight Elements. Are you able to reproduce by visiting `/schema`?
To answer your original question, definitely running the right application file. To double check I added
```python
if __name__ == "__main__":
import uvicorn
uvicorn.run(app)
```
to the bottom of the above example and ran `python app.py` and observed the same result.
Yes, I was able to reproduce this with the help of @Alc-Alc who came to the same conclusion as you.
I'm not yet sure if this is an upstream issue or a bug on our side, but he provided some additional insight:
```
INFO: 127.0.0.1:49743 - "GET /schema HTTP/1.1" 200 OK
INFO: 127.0.0.1:49743 - "GET /openapi.json HTTP/1.1" 404 Not Found
INFO: 127.0.0.1:49743 - "GET /schema HTTP/1.1" 200 OK
INFO: 127.0.0.1:49743 - "GET /schema/openapi.json HTTP/1.1" 200 OK
```
these are the paths requested with/without the trailing slash, so something in there is not building them correctly. | 2024-03-13T11:54:25 |
|
litestar-org/litestar | 3,204 | litestar-org__litestar-3204 | [
"3022"
] | c1c9f53ca9d5786468a50642ff38524709f5c380 | diff --git a/litestar/_openapi/schema_generation/schema.py b/litestar/_openapi/schema_generation/schema.py
--- a/litestar/_openapi/schema_generation/schema.py
+++ b/litestar/_openapi/schema_generation/schema.py
@@ -50,7 +50,7 @@
from litestar.exceptions import ImproperlyConfiguredException
from litestar.openapi.spec.enums import OpenAPIFormat, OpenAPIType
from litestar.openapi.spec.schema import Schema, SchemaDataContainer
-from litestar.params import BodyKwarg, ParameterKwarg
+from litestar.params import BodyKwarg, KwargDefinition, ParameterKwarg
from litestar.plugins import OpenAPISchemaPlugin
from litestar.types import Empty
from litestar.types.builtin_types import NoneType
@@ -569,6 +569,14 @@ def process_schema_result(self, field: FieldDefinition, schema: Schema) -> Schem
if getattr(schema, schema_key, None) is None:
setattr(schema, schema_key, value)
+ if isinstance(field.kwarg_definition, KwargDefinition) and (extra := field.kwarg_definition.schema_extra):
+ for schema_key, value in extra.items():
+ if not hasattr(schema, schema_key):
+ raise ValueError(
+ f"`schema_extra` declares key `{schema_key}` which does not exist in `Schema` object"
+ )
+ setattr(schema, schema_key, value)
+
if not schema.examples and self.generate_examples:
from litestar._openapi.schema_generation.examples import create_examples_for_field
diff --git a/litestar/params.py b/litestar/params.py
--- a/litestar/params.py
+++ b/litestar/params.py
@@ -112,6 +112,13 @@ class KwargDefinition:
"""A sequence of valid values."""
read_only: bool | None = field(default=None)
"""A boolean flag dictating whether this parameter is read only."""
+ schema_extra: dict[str, Any] | None = field(default=None)
+ """Extensions to the generated schema.
+
+ If set, will overwrite the matching fields in the generated schema.
+
+ .. versionadded:: 2.8.0
+ """
@property
def is_constrained(self) -> bool:
@@ -187,6 +194,7 @@ def Parameter(
query: str | None = None,
required: bool | None = None,
title: str | None = None,
+ schema_extra: dict[str, Any] | None = None,
) -> Any:
"""Create an extended parameter kwarg definition.
@@ -227,6 +235,10 @@ def Parameter(
required: A boolean flag dictating whether this parameter is required.
If set to False, None values will be allowed. Defaults to True.
title: String value used in the title section of the OpenAPI schema for the given parameter.
+ schema_extra: Extensions to the generated schema. If set, will overwrite the matching fields in the generated
+ schema.
+
+ .. versionadded:: 2.8.0
"""
return ParameterKwarg(
annotation=annotation,
@@ -251,6 +263,7 @@ def Parameter(
min_length=min_length,
max_length=max_length,
pattern=pattern,
+ schema_extra=schema_extra,
)
@@ -294,6 +307,7 @@ def Body(
multiple_of: float | None = None,
pattern: str | None = None,
title: str | None = None,
+ schema_extra: dict[str, Any] | None = None,
) -> Any:
"""Create an extended request body kwarg definition.
@@ -331,6 +345,10 @@ def Body(
pattern: A string representing a regex against which the given string will be matched.
Equivalent to pattern in the OpenAPI specification.
title: String value used in the title section of the OpenAPI schema for the given parameter.
+ schema_extra: Extensions to the generated schema. If set, will overwrite the matching fields in the generated
+ schema.
+
+ .. versionadded:: 2.8.0
"""
return BodyKwarg(
media_type=media_type,
@@ -352,6 +370,7 @@ def Body(
max_length=max_length,
pattern=pattern,
multipart_form_part_limit=multipart_form_part_limit,
+ schema_extra=schema_extra,
)
| diff --git a/tests/unit/test_openapi/test_parameters.py b/tests/unit/test_openapi/test_parameters.py
--- a/tests/unit/test_openapi/test_parameters.py
+++ b/tests/unit/test_openapi/test_parameters.py
@@ -1,4 +1,4 @@
-from typing import TYPE_CHECKING, List, Optional, Type, cast
+from typing import TYPE_CHECKING, Any, List, Optional, Type, cast
from uuid import UUID
import pytest
@@ -324,6 +324,46 @@ async def index(
}
+def test_parameter_schema_extra() -> None:
+ @get()
+ async def handler(
+ query1: Annotated[
+ str,
+ Parameter(
+ schema_extra={
+ "schema_not": Schema(
+ any_of=[
+ Schema(type=OpenAPIType.STRING, pattern=r"^somePrefix:.*$"),
+ Schema(type=OpenAPIType.STRING, enum=["denied", "values"]),
+ ]
+ ),
+ }
+ ),
+ ],
+ ) -> Any:
+ return query1
+
+ @get()
+ async def error_handler(query1: Annotated[str, Parameter(schema_extra={"invalid": "dummy"})]) -> Any:
+ return query1
+
+ # Success
+ app = Litestar([handler])
+ schema = app.openapi_schema.to_schema()
+ assert schema["paths"]["/"]["get"]["parameters"][0]["schema"]["not"] == {
+ "anyOf": [
+ {"type": "string", "pattern": r"^somePrefix:.*$"},
+ {"type": "string", "enum": ["denied", "values"]},
+ ]
+ }
+
+ # Attempt to pass invalid key
+ app = Litestar([error_handler])
+ with pytest.raises(ValueError) as e:
+ app.openapi_schema
+ assert str(e.value).startswith("`schema_extra` declares key")
+
+
def test_uuid_path_description_generation() -> None:
# https://github.com/litestar-org/litestar/issues/2967
@get("str/{id:str}")
diff --git a/tests/unit/test_openapi/test_request_body.py b/tests/unit/test_openapi/test_request_body.py
--- a/tests/unit/test_openapi/test_request_body.py
+++ b/tests/unit/test_openapi/test_request_body.py
@@ -3,8 +3,9 @@
from unittest.mock import ANY, MagicMock
import pytest
+from typing_extensions import Annotated
-from litestar import Controller, Litestar, post
+from litestar import Controller, Litestar, get, post
from litestar._openapi.datastructures import OpenAPIContext
from litestar._openapi.request_body import create_request_body
from litestar.datastructures.upload_file import UploadFile
@@ -56,6 +57,31 @@ def test_create_request_body(person_controller: Type[Controller], create_request
assert request_body
+def test_request_body_schema_extra() -> None:
+ @dataclass
+ class RequestBody:
+ foo: str
+
+ @get()
+ async def handler(
+ body1: Annotated[
+ RequestBody,
+ Body(
+ title="Default title",
+ schema_extra={
+ "title": "Overridden title",
+ },
+ ),
+ ],
+ ) -> Any:
+ return body1
+
+ app = Litestar([handler])
+ schema = app.openapi_schema.to_schema()
+ resp = next(iter(schema["components"]["schemas"].values()))
+ assert resp["title"] == "Overridden title"
+
+
def test_upload_single_file_schema_generation() -> None:
@post(path="/file-upload")
async def handle_file_upload(
| Enhancement: Allow `Parameter(schema_extra=...)` / `ResponseSpec(schema_extra=...)`
### Summary
Pydantic provides `Field(json_schema_extra=...)` which allows to do something like:
```py
def forbid_internal_labels(value):
if re.match(r"^reserved-for-internal-use:.*$", value):
raise ValueError(...)
class Foo(BaseModel):
label: Annotated[str, AfterValidator(forbid_internal_labels), Field(json_schema_extra={
"not": {
{
"type": "string",
"pattern": "^reserved-for-internal-use:.*$"
},
}
})]
```
This would generate:
```json
"type": "string",
"not": {
{
"type": "string",
"pattern": "^reserved-for-internal-use:.*$"
},
}
```
It doesn't actually _add/do any validation_, it merely impacts the generated JSON schema.
Similar mechanism is needed for `Parameter`, to inject _discriminators_ (e.g. `anyOf`, `not`) into the OpenAPI spec (although the validation is decoupled from the schema declaration).
The builtin generation uses `oneOf` for union types (IIRC Pydantic uses `anyOf` for those). But I don't think it's possible to declare something like above.
Workarounds at this point would be:
- To adjust via custom `Operation` class (did not try but I assume it could work)
- To adjust the OpenAPI generation on a more global level (by extending `OpenAPIPlugin`?)
Both are quite ugly hacks, while something like this should be supported out-of-the-box.
### Basic Example
_No response_
### Drawbacks and Impact
_No response_
### Unresolved questions
_No response_
<!-- POLAR PLEDGE BADGE START -->
---
> [!NOTE]
> While we are open for sponsoring on [GitHub Sponsors](https://github.com/sponsors/litestar-org/) and
> [OpenCollective](https://opencollective.com/litestar), we also utilize [Polar.sh](https://polar.sh/) to engage in pledge-based sponsorship.
>
> Check out all issues funded or available for funding [on our Polar.sh dashboard](https://polar.sh/litestar-org)
> * If you would like to see an issue prioritized, make a pledge towards it!
> * We receive the pledge once the issue is completed & verified
> * This, along with engagement in the community, helps us know which features are a priority to our users.
<a href="https://polar.sh/litestar-org/litestar/issues/3022">
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://polar.sh/api/github/litestar-org/litestar/issues/3022/pledge.svg?darkmode=1">
<img alt="Fund with Polar" src="https://polar.sh/api/github/litestar-org/litestar/issues/3022/pledge.svg">
</picture>
</a>
<!-- POLAR PLEDGE BADGE END -->
| As mentioned in https://github.com/litestar-org/litestar/issues/3018#issuecomment-1924002760 `Parameter(schema_extra=...)` could be supported, and that's what this ticket is about.
> Having an independent schema_extra for Parameter seems fine though.
I guess `ResponseSpec(schema_extra=...)` should exist, too. | 2024-03-14T17:07:12 |
litestar-org/litestar | 3,237 | litestar-org__litestar-3237 | [
"3232"
] | 25c3fc4f7a1ddc7f405c86440a984e3d4818cb49 | diff --git a/litestar/dto/_backend.py b/litestar/dto/_backend.py
--- a/litestar/dto/_backend.py
+++ b/litestar/dto/_backend.py
@@ -750,14 +750,17 @@ def _create_struct_field_meta_for_field_definition(field_definition: TransferDTO
return None
return msgspec.Meta(
- gt=kwarg_definition.gt,
+ description=kwarg_definition.description,
+ examples=[e.value for e in kwarg_definition.examples or []],
ge=kwarg_definition.ge,
- lt=kwarg_definition.lt,
+ gt=kwarg_definition.gt,
le=kwarg_definition.le,
- multiple_of=kwarg_definition.multiple_of,
- min_length=kwarg_definition.min_length if not field_definition.is_partial else None,
+ lt=kwarg_definition.lt,
max_length=kwarg_definition.max_length if not field_definition.is_partial else None,
+ min_length=kwarg_definition.min_length if not field_definition.is_partial else None,
+ multiple_of=kwarg_definition.multiple_of,
pattern=kwarg_definition.pattern,
+ title=kwarg_definition.title,
)
| diff --git a/tests/unit/test_contrib/test_pydantic/test_dto.py b/tests/unit/test_contrib/test_pydantic/test_dto.py
--- a/tests/unit/test_contrib/test_pydantic/test_dto.py
+++ b/tests/unit/test_contrib/test_pydantic/test_dto.py
@@ -1,6 +1,6 @@
from __future__ import annotations
-from typing import TYPE_CHECKING, Optional
+from typing import TYPE_CHECKING, Optional, cast
import pytest
from pydantic import v1 as pydantic_v1
@@ -13,8 +13,13 @@
from litestar.typing import FieldDefinition
if TYPE_CHECKING:
+ from collections.abc import Callable
+ from types import ModuleType
+
from pydantic import BaseModel
+ from litestar import Litestar
+
def test_schema_required_fields_with_pydantic_dto(
use_experimental_dto_backend: bool, base_model: type[BaseModel]
@@ -62,3 +67,36 @@ class Model(pydantic_v1.BaseModel):
dto_type = PydanticDTO[Model]
assert dto_type.detect_nested_field(FieldDefinition.from_annotation(Model)) is True
assert dto_type.detect_nested_field(FieldDefinition.from_annotation(int)) is False
+
+
+def test_pydantic_field_descriptions(create_module: Callable[[str], ModuleType]) -> None:
+ module = create_module(
+ """
+from litestar import Litestar, get
+from litestar.contrib.pydantic import PydanticDTO
+from litestar.dto import DTOConfig
+from pydantic import BaseModel, Field
+from typing_extensions import Annotated
+
+class User(BaseModel):
+ id: Annotated[
+ int,
+ Field(description="This is a test (id description)."),
+ ]
+
+class DataCollectionDTO(PydanticDTO[User]):
+ config = DTOConfig(rename_strategy="camel")
+
+@get("/user", return_dto=DataCollectionDTO, sync_to_thread=False)
+def get_user() -> User:
+ return User(id=user_id)
+
+app = Litestar(route_handlers=[get_user])
+ """
+ )
+ app = cast("Litestar", module.app)
+ schema = app.openapi_schema
+ assert schema.components.schemas is not None
+ component_schema = schema.components.schemas["GetUserUserResponseBody"]
+ assert component_schema.properties is not None
+ assert component_schema.properties["id"].description == "This is a test (id description)."
diff --git a/tests/unit/test_dto/test_factory/test_backends/test_backends.py b/tests/unit/test_dto/test_factory/test_backends/test_backends.py
--- a/tests/unit/test_dto/test_factory/test_backends/test_backends.py
+++ b/tests/unit/test_dto/test_factory/test_backends/test_backends.py
@@ -7,18 +7,20 @@
from unittest.mock import MagicMock
import pytest
-from msgspec import Struct, to_builtins
+from msgspec import Meta, Struct, to_builtins
from litestar import Litestar, Request, get, post
from litestar._openapi.schema_generation import SchemaCreator
from litestar.dto import DataclassDTO, DTOConfig, DTOField
-from litestar.dto._backend import DTOBackend
+from litestar.dto._backend import DTOBackend, _create_struct_field_meta_for_field_definition
from litestar.dto._types import CollectionType, SimpleType, TransferDTOFieldDefinition
from litestar.dto.data_structures import DTOFieldDefinition
from litestar.enums import MediaType
from litestar.exceptions import SerializationException
+from litestar.openapi.spec.example import Example
from litestar.openapi.spec.reference import Reference
from litestar.openapi.spec.schema import Schema
+from litestar.params import KwargDefinition
from litestar.serialization import encode_json
from litestar.testing import RequestFactory
from litestar.typing import FieldDefinition
@@ -448,3 +450,29 @@ class Factory(DataclassDTO):
assert b_d_nested_info is not None
assert not next(f for f in b_d_nested_info.field_definitions if f.name == "e").is_excluded
assert b_d_nested_info.field_definitions[1].name == "f"
+
+
[email protected](
+ ("constraint_kwargs",),
+ (
+ ({},),
+ ({"gt": 0, "lt": 2},),
+ ({"ge": 0, "le": 2},),
+ ({"min_length": 1, "max_length": 2},),
+ ({"pattern": "test"},),
+ ),
+)
+def test_create_struct_field_meta_for_field_definition(constraint_kwargs: Any) -> None:
+ mock_field = MagicMock(spec=TransferDTOFieldDefinition, is_partial=False)
+ mock_field.kwarg_definition = KwargDefinition(
+ description="test",
+ examples=[Example(value=1)],
+ title="test",
+ **constraint_kwargs,
+ )
+ assert _create_struct_field_meta_for_field_definition(mock_field) == Meta(
+ description="test",
+ examples=[1],
+ title="test",
+ **constraint_kwargs,
+ )
| Bug: return DTO causes field descriptions in Pydantic model to disappear
### Description
When using a Pydantic model without a return DTO, the field descriptions show up correctly in /schema documentation. With a return_dto, field descriptions disappear.
## No DTO - Field Descriptions Present
_See bottom portion of image below data types for `id` and `user_name` ._

## With return DTO - Field Descriptions now missing

### URL to code causing the issue
_No response_
### MCVE
```python
from typing import Annotated
from litestar import Litestar, get
from litestar.contrib.pydantic import PydanticDTO
from litestar.dto import DTOConfig
from pydantic import BaseModel, Field
class User(BaseModel):
id: Annotated[
int,
Field(description="This is a test (id description)."),
]
user_name: str = Field(description="Not annotated test (name description)")
class DataCollectionDTO(PydanticDTO[User]):
config = DTOConfig(rename_strategy="camel")
USER_DB = {1: {"id": 1, "user_name": "Stella"}}
# #1 - NO DTO: field descriptions will be present in /schema
@get("/user/{user_id:int}", sync_to_thread=False)
# #2 - With return_DTO: field descriptions are missing from schema
# @get("/user/{user_id:int}", return_dto=DataCollectionDTO, sync_to_thread=False)
async def get_user(user_id: int) -> User:
return USER_DB[user_id]
app = Litestar(route_handlers=[get_user])
```
### Steps to reproduce
```bash
1. Build and run a litestar application using the code above for app.py.
2. View the /schema for the API (likely at http://127.0.0.1:8000/schema).
3. See the field descriptions for id and user_name as shown in the top image above. Field descriptions should be present
4. Edit app.py by commenting out the first decorator for get_user() and removing the comment from the second decorator line.
5. Again run the litestar application and view the /schema for the API.
6. Now the field descriptions are missing as in the second image above.
```
### Screenshots
_No response_
### Logs
_No response_
### Litestar Version
Litestar 2.4.2 (running on Ubuntu in WSL2)
### Platform
- [X] Linux
- [ ] Mac
- [ ] Windows
- [ ] Other (Please specify in the description above)
<!-- POLAR PLEDGE BADGE START -->
---
> [!NOTE]
> While we are open for sponsoring on [GitHub Sponsors](https://github.com/sponsors/litestar-org/) and
> [OpenCollective](https://opencollective.com/litestar), we also utilize [Polar.sh](https://polar.sh/) to engage in pledge-based sponsorship.
>
> Check out all issues funded or available for funding [on our Polar.sh dashboard](https://polar.sh/litestar-org)
> * If you would like to see an issue prioritized, make a pledge towards it!
> * We receive the pledge once the issue is completed & verified
> * This, along with engagement in the community, helps us know which features are a priority to our users.
<a href="https://polar.sh/litestar-org/litestar/issues/3232">
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://polar.sh/api/github/litestar-org/litestar/issues/3232/pledge.svg?darkmode=1">
<img alt="Fund with Polar" src="https://polar.sh/api/github/litestar-org/litestar/issues/3232/pledge.svg">
</picture>
</a>
<!-- POLAR PLEDGE BADGE END -->
| 2024-03-22T06:52:52 |
|
litestar-org/litestar | 3,280 | litestar-org__litestar-3280 | [
"3278"
] | 043a044f3fd25fa7f38e223eb071b9390c35bc71 | diff --git a/litestar/_openapi/schema_generation/schema.py b/litestar/_openapi/schema_generation/schema.py
--- a/litestar/_openapi/schema_generation/schema.py
+++ b/litestar/_openapi/schema_generation/schema.py
@@ -569,6 +569,9 @@ def process_schema_result(self, field: FieldDefinition, schema: Schema) -> Schem
if getattr(schema, schema_key, None) is None:
setattr(schema, schema_key, value)
+ if schema.default is None and field.default is not Empty:
+ schema.default = field.default
+
if not schema.examples and self.generate_examples:
from litestar._openapi.schema_generation.examples import create_examples_for_field
| diff --git a/tests/unit/test_openapi/test_schema.py b/tests/unit/test_openapi/test_schema.py
--- a/tests/unit/test_openapi/test_schema.py
+++ b/tests/unit/test_openapi/test_schema.py
@@ -37,7 +37,7 @@
from litestar.openapi.spec.example import Example
from litestar.openapi.spec.schema import Schema
from litestar.pagination import ClassicPagination, CursorPagination, OffsetPagination
-from litestar.params import Parameter, ParameterKwarg
+from litestar.params import KwargDefinition, Parameter, ParameterKwarg
from litestar.testing import create_test_client
from litestar.types.builtin_types import NoneType
from litestar.typing import FieldDefinition
@@ -555,3 +555,18 @@ class ModelB(base_type): # type: ignore[no-redef, misc]
Reference(ref="#/components/schemas/tests_unit_test_openapi_test_schema_test_type_union_with_none.ModelA"),
Reference("#/components/schemas/tests_unit_test_openapi_test_schema_test_type_union_with_none.ModelB"),
]
+
+
+def test_default_only_on_field_definition() -> None:
+ field_definition = FieldDefinition.from_annotation(int, default=10)
+ assert field_definition.kwarg_definition is None
+
+ schema = get_schema_for_field_definition(field_definition)
+ assert schema.default == 10
+
+
+def test_default_not_provided_for_kwarg_but_for_field() -> None:
+ field_definition = FieldDefinition.from_annotation(int, default=10, kwarg_definition=KwargDefinition())
+ schema = get_schema_for_field_definition(field_definition)
+
+ assert schema.default == 10
| Bug: default not generated for query parameter in openapi spec.
### Description
Hi,
The default of a query parameter is not properly generated when creating the openapi specification. See MVCE down below.
The generated specification looks like this:
```json
...
"paths": {
"/query_default": {
"get": {
"summary": "QueryDefault",
"operationId": "QueryDefaultQueryDefault",
"parameters": [
{
"name": "foo",
"in": "query",
"schema": {
"type": "integer"
},
"required": false,
"deprecated": false,
"allowEmptyValue": false,
"allowReserved": false
}
],
...
```
where the default is missing in the schema property.
### URL to code causing the issue
_No response_
### MCVE
```python
@get(path="query_default")
def query_default(foo: int = 12) -> None:
return
app = Litestar(route_handlers=[query_default])
```
### Steps to reproduce
_No response_
### Screenshots
_No response_
### Logs
_No response_
### Litestar Version
Version 2.7.1
### Platform
- [X] Linux
- [ ] Mac
- [ ] Windows
- [ ] Other (Please specify in the description above)
<!-- POLAR PLEDGE BADGE START -->
---
> [!NOTE]
> While we are open for sponsoring on [GitHub Sponsors](https://github.com/sponsors/litestar-org/) and
> [OpenCollective](https://opencollective.com/litestar), we also utilize [Polar.sh](https://polar.sh/) to engage in pledge-based sponsorship.
>
> Check out all issues funded or available for funding [on our Polar.sh dashboard](https://polar.sh/litestar-org)
> * If you would like to see an issue prioritized, make a pledge towards it!
> * We receive the pledge once the issue is completed & verified
> * This, along with engagement in the community, helps us know which features are a priority to our users.
<a href="https://polar.sh/litestar-org/litestar/issues/3278">
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://polar.sh/api/github/litestar-org/litestar/issues/3278/pledge.svg?darkmode=1">
<img alt="Fund with Polar" src="https://polar.sh/api/github/litestar-org/litestar/issues/3278/pledge.svg">
</picture>
</a>
<!-- POLAR PLEDGE BADGE END -->
| Could you confirm that this is also true in 2.7.0?
Same result with fixed version 2.7.0
```toml
dependencies = [
"litestar[standard]==2.7.0"
]
```
Thanks!
I don't think this is a bug. The [specification](https://spec.openapis.org/oas/latest.html#parameter-object) doesn't have a field to indicate a default value for parameters.
Sorry about that. I was completely wrong. While it's correct that the parameter object as per the spec doesn't allow for defaults, it does allow for a `schema` which *can* have a default value.
Us not parsing the default properly is a bug.
Just leaving this here in case you need a workaround. You can use `Annotated[int, Parameter(default=12)]` to get the behavior you expect.
```py
from typing_extensions import Annotated
from litestar import Litestar, get
from litestar.params import Parameter
@get(path="query_default")
async def query_default(foo: Annotated[int, Parameter(default=12)]) -> None:
return
app = Litestar(route_handlers=[query_default])
``` | 2024-03-29T08:32:13 |
litestar-org/litestar | 3,281 | litestar-org__litestar-3281 | [
"3277"
] | d2cb891fcc5264620674aa0010f0a0aab682aa85 | diff --git a/litestar/typing.py b/litestar/typing.py
--- a/litestar/typing.py
+++ b/litestar/typing.py
@@ -96,7 +96,7 @@ def _parse_metadata(value: Any, is_sequence_container: bool, extra: dict[str, An
example_list: list[Any] | None
if example := extra.pop("example", None):
example_list = [Example(value=example)]
- elif examples := getattr(value, "examples", None):
+ elif examples := (extra.pop("examples", None) or getattr(value, "examples", None)):
example_list = [Example(value=example) for example in cast("list[str]", examples)]
else:
example_list = None
| diff --git a/tests/unit/test_contrib/test_pydantic/test_openapi.py b/tests/unit/test_contrib/test_pydantic/test_openapi.py
--- a/tests/unit/test_contrib/test_pydantic/test_openapi.py
+++ b/tests/unit/test_contrib/test_pydantic/test_openapi.py
@@ -553,6 +553,26 @@ class Model(pydantic_v2.BaseModel):
assert value.examples == ["example"]
+def test_create_schema_for_field_v2__examples() -> None:
+ class Model(pydantic_v2.BaseModel):
+ value: str = pydantic_v2.Field(
+ title="title", description="description", max_length=16, json_schema_extra={"examples": ["example"]}
+ )
+
+ schema = get_schema_for_field_definition(
+ FieldDefinition.from_kwarg(name="Model", annotation=Model), plugins=[PydanticSchemaPlugin()]
+ )
+
+ assert schema.properties
+
+ value = schema.properties["value"]
+
+ assert isinstance(value, Schema)
+ assert value.description == "description"
+ assert value.title == "title"
+ assert value.examples == ["example"]
+
+
@pytest.mark.parametrize("with_future_annotations", [True, False])
def test_create_schema_for_pydantic_model_with_annotated_model_attribute(
with_future_annotations: bool, create_module: "Callable[[str], ModuleType]", pydantic_version: PydanticVersion
| Bug: schema generation crashes in 2.7.1
### Description
Using 2.7.0, I code view the "/schema" page of my app without problem, but with 2.7.1, I get internal server error 500 back. I ran it with debug and got this:
`'str' object has no attribute 'value'`
and a trace pointing to this line: https://github.com/litestar-org/litestar/blob/main/litestar/_openapi/schema_generation/utils.py#L114
as called from [litestar/_openapi/schema_generation/schema.py in SchemaCreator.process_schema_result at line 561](https://github.com/litestar-org/litestar/blob/main/litestar/_openapi/schema_generation/schema.py#L561)
I believe the error comes from this PR: https://github.com/litestar-org/litestar/pull/3224/files
The problem is triggered by _pydantic models with examples_ in the `json_schema_extra`. See MCVE. The examples being strings trigger this problem, but I am sure that other types would trigger similar errors.
### URL to code causing the issue
_No response_
### MCVE
```python
import litestar
import pydantic # v2
from litestar.testing import create_test_client
class Foo(pydantic.BaseModel):
bar: str = pydantic.Field(
json_schema_extra={
"examples": ["this-is-a-string"]
}
)
@litestar.get()
async def get_foo() -> Foo:
...
with create_test_client([get_foo]) as client:
resp = client.get("/schema/openapi.json")
print(resp.status_code) # 500
```
### Steps to reproduce
Run the above MCVE. It prints 500. Remove the examples from `json_schema_extra` and it prints 200.
### Screenshots
```bash
""
```
### Logs
_No response_
### Litestar Version
2.7.1
### Platform
- [X] Linux
- [ ] Mac
- [ ] Windows
- [ ] Other (Please specify in the description above)
<!-- POLAR PLEDGE BADGE START -->
---
> [!NOTE]
> While we are open for sponsoring on [GitHub Sponsors](https://github.com/sponsors/litestar-org/) and
> [OpenCollective](https://opencollective.com/litestar), we also utilize [Polar.sh](https://polar.sh/) to engage in pledge-based sponsorship.
>
> Check out all issues funded or available for funding [on our Polar.sh dashboard](https://polar.sh/litestar-org)
> * If you would like to see an issue prioritized, make a pledge towards it!
> * We receive the pledge once the issue is completed & verified
> * This, along with engagement in the community, helps us know which features are a priority to our users.
<a href="https://polar.sh/litestar-org/litestar/issues/3277">
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://polar.sh/api/github/litestar-org/litestar/issues/3277/pledge.svg?darkmode=1">
<img alt="Fund with Polar" src="https://polar.sh/api/github/litestar-org/litestar/issues/3277/pledge.svg">
</picture>
</a>
<!-- POLAR PLEDGE BADGE END -->
| I might have a fix. You can assign this to me for now. | 2024-03-29T08:35:53 |
litestar-org/litestar | 3,285 | litestar-org__litestar-3285 | [
"3201"
] | 62f2ceebee3eaa92234f55a123fed86cb922040d | diff --git a/litestar/_openapi/schema_generation/plugins/dataclass.py b/litestar/_openapi/schema_generation/plugins/dataclass.py
--- a/litestar/_openapi/schema_generation/plugins/dataclass.py
+++ b/litestar/_openapi/schema_generation/plugins/dataclass.py
@@ -1,9 +1,11 @@
from __future__ import annotations
+import dataclasses
from dataclasses import MISSING, fields
from typing import TYPE_CHECKING
from litestar.plugins import OpenAPISchemaPlugin
+from litestar.types import Empty
from litestar.typing import FieldDefinition
from litestar.utils.predicates import is_optional_union
@@ -31,6 +33,11 @@ def to_openapi_schema(self, field_definition: FieldDefinition, schema_creator: S
)
),
property_fields={
- field.name: FieldDefinition.from_kwarg(type_hints[field.name], field.name) for field in dataclass_fields
+ field.name: FieldDefinition.from_kwarg(
+ annotation=type_hints[field.name],
+ name=field.name,
+ default=field.default if field.default is not dataclasses.MISSING else Empty,
+ )
+ for field in dataclass_fields
},
)
diff --git a/litestar/_openapi/schema_generation/plugins/struct.py b/litestar/_openapi/schema_generation/plugins/struct.py
--- a/litestar/_openapi/schema_generation/plugins/struct.py
+++ b/litestar/_openapi/schema_generation/plugins/struct.py
@@ -2,6 +2,7 @@
from typing import TYPE_CHECKING
+import msgspec
from msgspec import Struct
from msgspec.structs import fields
@@ -38,7 +39,11 @@ def is_field_required(field: FieldInfo) -> bool:
]
),
property_fields={
- field.encode_name: FieldDefinition.from_kwarg(type_hints[field.name], field.encode_name)
+ field.encode_name: FieldDefinition.from_kwarg(
+ type_hints[field.name],
+ field.encode_name,
+ default=field.default if field.default not in {msgspec.NODEFAULT, msgspec.UNSET} else Empty,
+ )
for field in struct_fields
},
)
| diff --git a/tests/unit/test_openapi/test_integration.py b/tests/unit/test_openapi/test_integration.py
--- a/tests/unit/test_openapi/test_integration.py
+++ b/tests/unit/test_openapi/test_integration.py
@@ -1,5 +1,6 @@
from __future__ import annotations
+import dataclasses
from dataclasses import dataclass
from types import ModuleType
from typing import Callable, Generic, Optional, TypeVar, cast
@@ -184,6 +185,45 @@ async def example_route() -> Lookup:
}
+def test_dataclass_field_default() -> None:
+ # https://github.com/litestar-org/litestar/issues/3201
+ @dataclass
+ class SomeModel:
+ field_a: str = "default_a"
+ field_b: str = dataclasses.field(default="default_b")
+ field_c: str = dataclasses.field(default_factory=lambda: "default_c")
+
+ @get("/")
+ async def handler() -> SomeModel:
+ return SomeModel()
+
+ app = Litestar(route_handlers=[handler], signature_types=[SomeModel])
+ schema = app.openapi_schema.components.schemas["test_dataclass_field_default.SomeModel"]
+ assert schema
+ assert schema.properties["field_a"].default == "default_a" # type: ignore[union-attr, index]
+ assert schema.properties["field_b"].default == "default_b" # type: ignore[union-attr, index]
+ assert schema.properties["field_c"].default is None # type: ignore[union-attr, index]
+
+
+def test_struct_field_default() -> None:
+ # https://github.com/litestar-org/litestar/issues/3201
+ class SomeModel(msgspec.Struct, kw_only=True):
+ field_a: str = "default_a"
+ field_b: str = msgspec.field(default="default_b")
+ field_c: str = msgspec.field(default_factory=lambda: "default_c")
+
+ @get("/")
+ async def handler() -> SomeModel:
+ return SomeModel()
+
+ app = Litestar(route_handlers=[handler], signature_types=[SomeModel])
+ schema = app.openapi_schema.components.schemas["test_struct_field_default.SomeModel"]
+ assert schema
+ assert schema.properties["field_a"].default == "default_a" # type: ignore[union-attr, index]
+ assert schema.properties["field_b"].default == "default_b" # type: ignore[union-attr, index]
+ assert schema.properties["field_c"].default is None # type: ignore[union-attr, index]
+
+
def test_schema_for_optional_path_parameter() -> None:
@get(path=["/", "/{test_message:str}"], media_type=MediaType.TEXT, sync_to_thread=False)
def handler(test_message: Optional[str]) -> str: # noqa: UP007
| Bug: `default` not generated in OpenAPI schema for body as models
### Description
Declaring default fields (various ways) generates OpenAPI spec with missing `default` declarations.
Also, when/if fields are marked into `required` varies a bit.
Didn't test what happens when requests are sent - are the defaults actually picked up runtime. But at least the OpenAPI schema generation fails.
(The docs don't mention _how_ these are supported so a bit difficult to know how it's _intended_.)
### MCVE
```python
import dataclasses
import json
from dataclasses import dataclass
from typing import Annotated
import msgspec
from litestar import post
from litestar.app import Litestar
from litestar.params import Parameter
from msgspec import Struct
from pydantic import BaseModel, Field
class PydanticBody(BaseModel):
field1: Annotated[str, Field(default="dummy")]
field2: str = Field(default="dummy")
field3: str = "dummy"
field4: Annotated[str, Parameter(default="dummy")]
@dataclass
class DataclassBody:
field1: Annotated[str, Parameter(default="dummy")] # default generated, but declared as required
field3: str = Parameter(default="dummy")
field4: str = "dummy"
field5: str = dataclasses.field(default="dummy")
class MsgspecBody(Struct):
field1: Annotated[str, Parameter(default="dummy")] # default generated, but declared as required
field2: Annotated[str, msgspec.field(default="dummy")] # no default, marked as required
field3: str = Parameter(default="dummy")
field4: str = "dummy"
field5: str = msgspec.field(default="dummy")
@post("/1")
async def pydantic_handler(data: PydanticBody) -> None: ...
@post("/2")
async def dataclass_handler(data: DataclassBody) -> None: ...
@post("/3")
async def msgspec_handler(data: MsgspecBody) -> None: ...
app = Litestar([pydantic_handler, dataclass_handler, msgspec_handler])
print(json.dumps(app.openapi_schema.to_schema(), indent=4))
```
### Steps to reproduce
This generates:
```json
"components": {
"schemas": {
"DataclassBody": {
"properties": {
"field1": {
"type": "string",
"default": "dummy"
},
"field3": {
"type": "string"
},
"field4": {
"type": "string"
},
"field5": {
"type": "string"
}
},
"type": "object",
"required": [
"field1"
],
"title": "DataclassBody"
},
"MsgspecBody": {
"properties": {
"field1": {
"type": "string",
"default": "dummy"
},
"field2": {
"type": "string"
},
"field3": {
"type": "string"
},
"field4": {
"type": "string"
},
"field5": {
"type": "string"
}
},
"type": "object",
"required": [
"field1",
"field2"
],
"title": "MsgspecBody"
},
"PydanticBody": {
"properties": {
"field1": {
"type": "string"
},
"field2": {
"type": "string"
},
"field3": {
"type": "string"
},
"field4": {
"type": "string"
}
},
"type": "object",
"required": [
"field4"
],
"title": "PydanticBody"
}
}
```
### Litestar Version
2.7.0
### Platform
- [X] Linux
- [ ] Mac
- [ ] Windows
- [ ] Other (Please specify in the description above)
<!-- POLAR PLEDGE BADGE START -->
---
> [!NOTE]
> While we are open for sponsoring on [GitHub Sponsors](https://github.com/sponsors/litestar-org/) and
> [OpenCollective](https://opencollective.com/litestar), we also utilize [Polar.sh](https://polar.sh/) to engage in pledge-based sponsorship.
>
> Check out all issues funded or available for funding [on our Polar.sh dashboard](https://polar.sh/litestar-org)
> * If you would like to see an issue prioritized, make a pledge towards it!
> * We receive the pledge once the issue is completed & verified
> * This, along with engagement in the community, helps us know which features are a priority to our users.
<a href="https://polar.sh/litestar-org/litestar/issues/3201">
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://polar.sh/api/github/litestar-org/litestar/issues/3201/pledge.svg?darkmode=1">
<img alt="Fund with Polar" src="https://polar.sh/api/github/litestar-org/litestar/issues/3201/pledge.svg">
</picture>
</a>
<!-- POLAR PLEDGE BADGE END -->
| We use the APIs provided by `dataclasses`, `msgspec`, and `pydantic` to parse the details of a model such as the default. Both, `msgspec` and `dataclasses`, do not support specifying the `field` within the annotation but rather, you have to assign it whereas pydantic does (I think it does).
Also, I don't think `Parameter` is meant to be used for the request/response bodies. They're meant to be used to give extra details for parameters like query params, path params etc.
So _how_ should I specify defaults and other OpenAPI data, then, for those cases? Quite puzzled at this point.
It seems I was mistaken since the default isn't being generated as part of the schema even when specifying the default value correctly.
```python
from dataclasses import dataclass
from litestar import post
from litestar.app import Litestar
from litestar.openapi.spec import Schema
@dataclass
class DataclassBody:
field4: str = "dummy"
@post("/2")
async def dataclass_handler(data: DataclassBody) -> None:
...
app = Litestar([dataclass_handler])
schema = app.openapi_schema.components.schemas["DataclassBody"]
assert schema.properties
schema = schema.properties["field4"]
assert isinstance(schema, Schema)
assert schema.default == "dummy" # This fails.
```
There's a few things going on here (some of which have also been discussed elsewhere I think):
```python
@dataclass
class DataclassBody:
field1: Annotated[str, Parameter(default="dummy")] # default generated, but declared as required
field3: str = Parameter(default="dummy")
field4: str = "dummy"
field5: str = dataclasses.field(default="dummy")
```
- `field1` is simply an invalid way of defining a default for a dataclass. You are essentially "lying" to Litestar by telling it "even though I have not defined a default for this field, I want you to say that it has a default value of `"dummy"`".
- `field3` is setting a default value for the field, but, according to dataclass semantics, the default value is `Parameter(default="dummy")`. Since this is neither an annotation, nor the default value you meant to specify, it's not surprising this isn't working
- `field4` and `field5` should work, and it not working is a bug (and I think a regression, will have to check this out)
```python
class MsgspecBody(Struct):
field1: Annotated[str, Parameter(default="dummy")] # default generated, but declared as required
field2: Annotated[str, msgspec.field(default="dummy")] # no default, marked as required
field3: str = Parameter(default="dummy")
field4: str = "dummy"
field5: str = msgspec.field(default="dummy")
```
- `field1`/`field2`/`field3`: Same as with the dataclass, this is not a valid way to define a default way for a msgspec.Struct
- `field4`/`field5`: Also same as with the dataclass, this should work
It's important to keep in mind that while Litestar does allow to *extend* modelling capabilities of the chosen libraries, it does not *alter* them. You still have to adhere to the semantics and patterns these libraries use and produce a valid model for Litestar to use. | 2024-03-29T14:33:45 |
litestar-org/litestar | 3,286 | litestar-org__litestar-3286 | [
"2365"
] | 2ab5bf432669e4590de78457de6ead668df4c39a | diff --git a/litestar/contrib/pydantic/pydantic_dto_factory.py b/litestar/contrib/pydantic/pydantic_dto_factory.py
--- a/litestar/contrib/pydantic/pydantic_dto_factory.py
+++ b/litestar/contrib/pydantic/pydantic_dto_factory.py
@@ -47,6 +47,14 @@
__all__ = ("PydanticDTO",)
+def convert_validation_error(validation_error: ValidationErrorV1 | ValidationErrorV2) -> list[dict[str, Any]]:
+ error_list = validation_error.errors()
+ for error in error_list:
+ if isinstance(exception := error.get("ctx", {}).get("error"), Exception):
+ error["ctx"]["error"] = type(exception).__name__
+ return error_list # type: ignore[return-value]
+
+
class PydanticDTO(AbstractDTO[T], Generic[T]):
"""Support for domain modelling with Pydantic."""
@@ -55,14 +63,14 @@ def decode_builtins(self, value: dict[str, Any]) -> Any:
try:
return super().decode_builtins(value)
except (ValidationErrorV2, ValidationErrorV1) as ex:
- raise ValidationException(extra=ex.errors()) from ex
+ raise ValidationException(extra=convert_validation_error(ex)) from ex
@override
def decode_bytes(self, value: bytes) -> Any:
try:
return super().decode_bytes(value)
except (ValidationErrorV2, ValidationErrorV1) as ex:
- raise ValidationException(extra=ex.errors()) from ex
+ raise ValidationException(extra=convert_validation_error(ex)) from ex
@classmethod
def generate_field_definitions(
| diff --git a/tests/unit/test_contrib/test_pydantic/test_integration.py b/tests/unit/test_contrib/test_pydantic/test_integration.py
--- a/tests/unit/test_contrib/test_pydantic/test_integration.py
+++ b/tests/unit/test_contrib/test_pydantic/test_integration.py
@@ -111,6 +111,35 @@ def my_route_handler(param: int, data: PydanticV1Person) -> None: ...
assert len(extra) == 4
+def test_serialize_raw_errors_v2() -> None:
+ # https://github.com/litestar-org/litestar/issues/2365
+ class User(pydantic_v2.BaseModel):
+ user_id: int
+
+ @pydantic_v2.field_validator("user_id")
+ @classmethod
+ def validate_user_id(cls, user_id: int) -> None:
+ raise ValueError("user id must be greater than 0")
+
+ @post("/", dto=PydanticDTO[User])
+ async def create_user(data: User) -> User:
+ return data
+
+ with create_test_client(create_user) as client:
+ response = client.post("/", json={"user_id": -1})
+ extra = response.json().get("extra")
+ assert extra == [
+ {
+ "type": "value_error",
+ "loc": ["user_id"],
+ "msg": "Value error, user id must be greater than 0",
+ "input": -1,
+ "ctx": {"error": "ValueError"},
+ "url": "https://errors.pydantic.dev/2.6/v/value_error",
+ }
+ ]
+
+
def test_signature_model_invalid_input(
base_model: Type[Union[pydantic_v2.BaseModel, pydantic_v1.BaseModel]], pydantic_version: PydanticVersion
) -> None:
| Use of pydantic custom field validator returns HTTP 500 instead of HTTP 400
### Discussed in https://github.com/orgs/litestar-org/discussions/2363
<div type='discussions-op-text'>
<sup>Originally posted by **trcw** September 26, 2023</sup>
I have a pydantic model with a custom validator that throws a ```ValueError``` as described in https://docs.pydantic.dev/latest/errors/errors/#custom-errors.
The behavior is different in version 1 and 2 when I try to do an invalid post request.
* In Starlite 1.51.14 the request returns ```HTTP 400``` with details of the validation failure.
* In Litestar 2.1.0 the request returns ```HTTP 500``` with an internal server error instead.
I was expecting a ```HTTP 400```. Is this a regression or do I need to do something different?
Here is a minimal example for Litestar v2.1.0:
```python
from litestar import Litestar, post
from litestar.contrib.pydantic import PydanticDTO
from litestar.testing import TestClient
from pydantic import BaseModel, field_validator
class User(BaseModel):
user_id: int
@field_validator('user_id')
@classmethod
def user_id_must_be_greater_than_zero(cls, user_id):
if user_id < 1:
raise ValueError('user id must be greater than 0')
return user_id
UserDTO = PydanticDTO[User]
@post("/user", dto=UserDTO, sync_to_thread=False)
def create_user(data: User) -> User:
return data
with TestClient(Litestar([create_user], debug=True)) as client:
response = client.post("/user", json={"user_id": 0})
print(response.text)
print(f"Status code: {response.status_code}")
assert response.status_code == 400
```
```
Traceback (most recent call last):
File "/home/vscode/.local/lib/python3.11/site-packages/litestar/contrib/pydantic/pydantic_dto_factory.py", line 49, in decode_bytes
return super().decode_bytes(value)
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/vscode/.local/lib/python3.11/site-packages/litestar/dto/base_dto.py", line 96, in decode_bytes
return backend.populate_data_from_raw(value, self.asgi_connection)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/vscode/.local/lib/python3.11/site-packages/litestar/dto/_backend.py", line 297, in populate_data_from_raw
return _transfer_data(
^^^^^^^^^^^^^^^
File "/home/vscode/.local/lib/python3.11/site-packages/litestar/dto/_backend.py", line 557, in _transfer_data
return _transfer_instance_data(
^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/vscode/.local/lib/python3.11/site-packages/litestar/dto/_backend.py", line 621, in _transfer_instance_data
return destination_type(**unstructured_data)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/vscode/.local/lib/python3.11/site-packages/pydantic/main.py", line 165, in __init__
__pydantic_self__.__pydantic_validator__.validate_python(data, self_instance=__pydantic_self__)
pydantic_core._pydantic_core.ValidationError: 1 validation error for User
user_id
Value error, user id must be greater than 0 [type=value_error, input_value=0, input_type=int]
For further information visit https://errors.pydantic.dev/2.3/v/value_error
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/home/vscode/.local/lib/python3.11/site-packages/litestar/middleware/exceptions/middleware.py", line 191, in __call__
await self.app(scope, receive, send)
File "/home/vscode/.local/lib/python3.11/site-packages/litestar/routes/http.py", line 79, in handle
response = await self._get_response_for_request(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/vscode/.local/lib/python3.11/site-packages/litestar/routes/http.py", line 131, in _get_response_for_request
response = await self._call_handler_function(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/vscode/.local/lib/python3.11/site-packages/litestar/routes/http.py", line 160, in _call_handler_function
response_data, cleanup_group = await self._get_response_data(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/vscode/.local/lib/python3.11/site-packages/litestar/routes/http.py", line 184, in _get_response_data
kwargs["data"] = await kwargs["data"]
^^^^^^^^^^^^^^^^^^^^
File "/home/vscode/.local/lib/python3.11/site-packages/litestar/_kwargs/extractors.py", line 427, in dto_extractor
return data_dto(connection).decode_bytes(body)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/vscode/.local/lib/python3.11/site-packages/litestar/contrib/pydantic/pydantic_dto_factory.py", line 51, in decode_bytes
raise ValidationException(extra=ex.errors()) from ex
litestar.exceptions.http_exceptions.ValidationException: 400: Bad Request
Traceback (most recent call last):
File "/home/vscode/.local/lib/python3.11/site-packages/litestar/contrib/pydantic/pydantic_dto_factory.py", line 49, in decode_bytes
return super().decode_bytes(value)
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/vscode/.local/lib/python3.11/site-packages/litestar/dto/base_dto.py", line 96, in decode_bytes
return backend.populate_data_from_raw(value, self.asgi_connection)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/vscode/.local/lib/python3.11/site-packages/litestar/dto/_backend.py", line 297, in populate_data_from_raw
return _transfer_data(
^^^^^^^^^^^^^^^
File "/home/vscode/.local/lib/python3.11/site-packages/litestar/dto/_backend.py", line 557, in _transfer_data
return _transfer_instance_data(
^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/vscode/.local/lib/python3.11/site-packages/litestar/dto/_backend.py", line 621, in _transfer_instance_data
return destination_type(**unstructured_data)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/vscode/.local/lib/python3.11/site-packages/pydantic/main.py", line 165, in __init__
__pydantic_self__.__pydantic_validator__.validate_python(data, self_instance=__pydantic_self__)
pydantic_core._pydantic_core.ValidationError: 1 validation error for User
user_id
Value error, user id must be greater than 0 [type=value_error, input_value=0, input_type=int]
For further information visit https://errors.pydantic.dev/2.3/v/value_error
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/home/vscode/.local/lib/python3.11/site-packages/litestar/middleware/exceptions/middleware.py", line 191, in __call__
await self.app(scope, receive, send)
File "/home/vscode/.local/lib/python3.11/site-packages/litestar/routes/http.py", line 79, in handle
response = await self._get_response_for_request(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/vscode/.local/lib/python3.11/site-packages/litestar/routes/http.py", line 131, in _get_response_for_request
response = await self._call_handler_function(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/vscode/.local/lib/python3.11/site-packages/litestar/routes/http.py", line 160, in _call_handler_function
response_data, cleanup_group = await self._get_response_data(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/vscode/.local/lib/python3.11/site-packages/litestar/routes/http.py", line 184, in _get_response_data
kwargs["data"] = await kwargs["data"]
^^^^^^^^^^^^^^^^^^^^
File "/home/vscode/.local/lib/python3.11/site-packages/litestar/_kwargs/extractors.py", line 427, in dto_extractor
return data_dto(connection).decode_bytes(body)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/vscode/.local/lib/python3.11/site-packages/litestar/contrib/pydantic/pydantic_dto_factory.py", line 51, in decode_bytes
raise ValidationException(extra=ex.errors()) from ex
litestar.exceptions.http_exceptions.ValidationException: 400: Bad Request
ERROR - 2023-09-25 15:57:01,967 - litestar - config - exception raised on http connection to route /user
Traceback (most recent call last):
File "/home/vscode/.local/lib/python3.11/site-packages/litestar/routes/http.py", line 131, in _get_response_for_request
response = await self._call_handler_function(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/vscode/.local/lib/python3.11/site-packages/litestar/routes/http.py", line 160, in _call_handler_function
response_data, cleanup_group = await self._get_response_data(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/vscode/.local/lib/python3.11/site-packages/litestar/routes/http.py", line 184, in _get_response_data
kwargs["data"] = await kwargs["data"]
^^^^^^^^^^^^^^^^^^^^
File "/home/vscode/.local/lib/python3.11/site-packages/litestar/_kwargs/extractors.py", line 427, in dto_extractor
return data_dto(connection).decode_bytes(body)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/vscode/.local/lib/python3.11/site-packages/litestar/contrib/pydantic/pydantic_dto_factory.py", line 51, in decode_bytes
raise ValidationException(extra=ex.errors()) from ex
litestar.exceptions.http_exceptions.ValidationException: 400: Bad Request
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/vscode/.local/lib/python3.11/site-packages/litestar/serialization/msgspec_hooks.py", line 141, in encode_json
return msgspec.json.encode(value, enc_hook=serializer) if serializer else _msgspec_json_encoder.encode(value)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/vscode/.local/lib/python3.11/site-packages/litestar/serialization/msgspec_hooks.py", line 88, in default_serializer
raise TypeError(f"Unsupported type: {type(value)!r}")
TypeError: Unsupported type: <class 'ValueError'>
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/home/vscode/.local/lib/python3.11/site-packages/litestar/middleware/exceptions/middleware.py", line 191, in __call__
await self.app(scope, receive, send)
File "/home/vscode/.local/lib/python3.11/site-packages/litestar/middleware/exceptions/middleware.py", line 205, in __call__
await self.handle_request_exception(
File "/home/vscode/.local/lib/python3.11/site-packages/litestar/middleware/exceptions/middleware.py", line 235, in handle_request_exception
await response.to_asgi_response(app=None, request=request)(scope=scope, receive=receive, send=send)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/vscode/.local/lib/python3.11/site-packages/litestar/response/base.py", line 444, in to_asgi_response
body=self.render(self.content, media_type, get_serializer(type_encoders)),
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/vscode/.local/lib/python3.11/site-packages/litestar/response/base.py", line 385, in render
return encode_json(content, enc_hook)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/vscode/.local/lib/python3.11/site-packages/litestar/serialization/msgspec_hooks.py", line 143, in encode_json
raise SerializationException(str(msgspec_error)) from msgspec_error
litestar.exceptions.base_exceptions.SerializationException: Unsupported type: <class 'ValueError'>
Traceback (most recent call last):
File "/home/vscode/.local/lib/python3.11/site-packages/litestar/contrib/pydantic/pydantic_dto_factory.py", line 49, in decode_bytes
return super().decode_bytes(value)
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/vscode/.local/lib/python3.11/site-packages/litestar/dto/base_dto.py", line 96, in decode_bytes
return backend.populate_data_from_raw(value, self.asgi_connection)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/vscode/.local/lib/python3.11/site-packages/litestar/dto/_backend.py", line 297, in populate_data_from_raw
return _transfer_data(
^^^^^^^^^^^^^^^
File "/home/vscode/.local/lib/python3.11/site-packages/litestar/dto/_backend.py", line 557, in _transfer_data
return _transfer_instance_data(
^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/vscode/.local/lib/python3.11/site-packages/litestar/dto/_backend.py", line 621, in _transfer_instance_data
return destination_type(**unstructured_data)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/vscode/.local/lib/python3.11/site-packages/pydantic/main.py", line 165, in __init__
__pydantic_self__.__pydantic_validator__.validate_python(data, self_instance=__pydantic_self__)
pydantic_core._pydantic_core.ValidationError: 1 validation error for User
user_id
Value error, user id must be greater than 0 [type=value_error, input_value=0, input_type=int]
For further information visit https://errors.pydantic.dev/2.3/v/value_error
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/home/vscode/.local/lib/python3.11/site-packages/litestar/middleware/exceptions/middleware.py", line 191, in __call__
await self.app(scope, receive, send)
File "/home/vscode/.local/lib/python3.11/site-packages/litestar/routes/http.py", line 79, in handle
response = await self._get_response_for_request(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/vscode/.local/lib/python3.11/site-packages/litestar/routes/http.py", line 131, in _get_response_for_request
response = await self._call_handler_function(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/vscode/.local/lib/python3.11/site-packages/litestar/routes/http.py", line 160, in _call_handler_function
response_data, cleanup_group = await self._get_response_data(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/vscode/.local/lib/python3.11/site-packages/litestar/routes/http.py", line 184, in _get_response_data
kwargs["data"] = await kwargs["data"]
^^^^^^^^^^^^^^^^^^^^
File "/home/vscode/.local/lib/python3.11/site-packages/litestar/_kwargs/extractors.py", line 427, in dto_extractor
return data_dto(connection).decode_bytes(body)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/vscode/.local/lib/python3.11/site-packages/litestar/contrib/pydantic/pydantic_dto_factory.py", line 51, in decode_bytes
raise ValidationException(extra=ex.errors()) from ex
litestar.exceptions.http_exceptions.ValidationException: 400: Bad Request
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/vscode/.local/lib/python3.11/site-packages/litestar/serialization/msgspec_hooks.py", line 141, in encode_json
return msgspec.json.encode(value, enc_hook=serializer) if serializer else _msgspec_json_encoder.encode(value)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/vscode/.local/lib/python3.11/site-packages/litestar/serialization/msgspec_hooks.py", line 88, in default_serializer
raise TypeError(f"Unsupported type: {type(value)!r}")
TypeError: Unsupported type: <class 'ValueError'>
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/home/vscode/.local/lib/python3.11/site-packages/litestar/middleware/exceptions/middleware.py", line 191, in __call__
await self.app(scope, receive, send)
File "/home/vscode/.local/lib/python3.11/site-packages/litestar/middleware/exceptions/middleware.py", line 205, in __call__
await self.handle_request_exception(
File "/home/vscode/.local/lib/python3.11/site-packages/litestar/middleware/exceptions/middleware.py", line 235, in handle_request_exception
await response.to_asgi_response(app=None, request=request)(scope=scope, receive=receive, send=send)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/vscode/.local/lib/python3.11/site-packages/litestar/response/base.py", line 444, in to_asgi_response
body=self.render(self.content, media_type, get_serializer(type_encoders)),
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/vscode/.local/lib/python3.11/site-packages/litestar/response/base.py", line 385, in render
return encode_json(content, enc_hook)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/vscode/.local/lib/python3.11/site-packages/litestar/serialization/msgspec_hooks.py", line 143, in encode_json
raise SerializationException(str(msgspec_error)) from msgspec_error
litestar.exceptions.base_exceptions.SerializationException: Unsupported type: <class 'ValueError'>
Traceback (most recent call last):
File "/home/vscode/.local/lib/python3.11/site-packages/litestar/contrib/pydantic/pydantic_dto_factory.py", line 49, in decode_bytes
return super().decode_bytes(value)
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/vscode/.local/lib/python3.11/site-packages/litestar/dto/base_dto.py", line 96, in decode_bytes
return backend.populate_data_from_raw(value, self.asgi_connection)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/vscode/.local/lib/python3.11/site-packages/litestar/dto/_backend.py", line 297, in populate_data_from_raw
return _transfer_data(
^^^^^^^^^^^^^^^
File "/home/vscode/.local/lib/python3.11/site-packages/litestar/dto/_backend.py", line 557, in _transfer_data
return _transfer_instance_data(
^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/vscode/.local/lib/python3.11/site-packages/litestar/dto/_backend.py", line 621, in _transfer_instance_data
return destination_type(**unstructured_data)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/vscode/.local/lib/python3.11/site-packages/pydantic/main.py", line 165, in __init__
__pydantic_self__.__pydantic_validator__.validate_python(data, self_instance=__pydantic_self__)
pydantic_core._pydantic_core.ValidationError: 1 validation error for User
user_id
Value error, user id must be greater than 0 [type=value_error, input_value=0, input_type=int]
For further information visit https://errors.pydantic.dev/2.3/v/value_error
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/home/vscode/.local/lib/python3.11/site-packages/litestar/middleware/exceptions/middleware.py", line 191, in __call__
await self.app(scope, receive, send)
File "/home/vscode/.local/lib/python3.11/site-packages/litestar/routes/http.py", line 79, in handle
response = await self._get_response_for_request(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/vscode/.local/lib/python3.11/site-packages/litestar/routes/http.py", line 131, in _get_response_for_request
response = await self._call_handler_function(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/vscode/.local/lib/python3.11/site-packages/litestar/routes/http.py", line 160, in _call_handler_function
response_data, cleanup_group = await self._get_response_data(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/vscode/.local/lib/python3.11/site-packages/litestar/routes/http.py", line 184, in _get_response_data
kwargs["data"] = await kwargs["data"]
^^^^^^^^^^^^^^^^^^^^
File "/home/vscode/.local/lib/python3.11/site-packages/litestar/_kwargs/extractors.py", line 427, in dto_extractor
return data_dto(connection).decode_bytes(body)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/vscode/.local/lib/python3.11/site-packages/litestar/contrib/pydantic/pydantic_dto_factory.py", line 51, in decode_bytes
raise ValidationException(extra=ex.errors()) from ex
litestar.exceptions.http_exceptions.ValidationException: 400: Bad Request
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/vscode/.local/lib/python3.11/site-packages/litestar/serialization/msgspec_hooks.py", line 141, in encode_json
return msgspec.json.encode(value, enc_hook=serializer) if serializer else _msgspec_json_encoder.encode(value)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/vscode/.local/lib/python3.11/site-packages/litestar/serialization/msgspec_hooks.py", line 88, in default_serializer
raise TypeError(f"Unsupported type: {type(value)!r}")
TypeError: Unsupported type: <class 'ValueError'>
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/home/vscode/.local/lib/python3.11/site-packages/litestar/middleware/exceptions/middleware.py", line 191, in __call__
await self.app(scope, receive, send)
File "/home/vscode/.local/lib/python3.11/site-packages/litestar/middleware/exceptions/middleware.py", line 205, in __call__
await self.handle_request_exception(
File "/home/vscode/.local/lib/python3.11/site-packages/litestar/middleware/exceptions/middleware.py", line 235, in handle_request_exception
await response.to_asgi_response(app=None, request=request)(scope=scope, receive=receive, send=send)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/vscode/.local/lib/python3.11/site-packages/litestar/response/base.py", line 444, in to_asgi_response
body=self.render(self.content, media_type, get_serializer(type_encoders)),
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/vscode/.local/lib/python3.11/site-packages/litestar/response/base.py", line 385, in render
return encode_json(content, enc_hook)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/vscode/.local/lib/python3.11/site-packages/litestar/serialization/msgspec_hooks.py", line 143, in encode_json
raise SerializationException(str(msgspec_error)) from msgspec_error
litestar.exceptions.base_exceptions.SerializationException: Unsupported type: <class 'ValueError'>
INFO - 2023-09-25 15:57:01,972 - httpx - _client - HTTP Request: POST http://testserver.local/user "HTTP/1.1 500 Internal Server Error"
Status code: 500
Traceback (most recent call last):
File "/workspaces/busshark.controller.trafficstore-api/main.py", line 28, in <module>
assert response.status_code == 400
^^^^^^^^^^^^^^^^^^^^^^^^^^^
AssertionError
```
</div>
<!-- POLAR PLEDGE BADGE START -->
---
## Funding
* If you would like to see an issue prioritized, make a pledge towards it!
* We receive the pledge once the issue is completed & verified
<a href="https://polar.sh/litestar-org/litestar/issues/2365">
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://polar.sh/api/github/litestar-org/litestar/issues/2365/pledge.svg?darkmode=1">
<img alt="Fund with Polar" src="https://polar.sh/api/github/litestar-org/litestar/issues/2365/pledge.svg">
</picture>
</a>
<!-- POLAR PLEDGE BADGE END -->
| Reproduces on `litestar==2.5.1` / `pydantic==2.5.3`.
Turns out this isn't really a bug as such and has nothing to do with the `field_validator` in particular. It's just that Pydantic v2 return the original `ValueError` raised as part of its own exception when formatted as a dict. Since we in turn pass this to our `ValidationException`, and later on try to serialize it, we're hitting a serialization error because we can't actually serialize `ValueError`.
The reason why this only affects DTOs is because they take a very different error handling path; This also results in slightly different error responses when DTOs are used.
@peterschutt I think we should tackle this for v3 as it can only be done with breaking changes. | 2024-03-29T18:49:54 |
litestar-org/litestar | 3,293 | litestar-org__litestar-3293 | [
"2700"
] | aae9c9385797d3b30bcd7a2780cc89a2383df6a6 | diff --git a/litestar/_openapi/path_item.py b/litestar/_openapi/path_item.py
--- a/litestar/_openapi/path_item.py
+++ b/litestar/_openapi/path_item.py
@@ -1,5 +1,6 @@
from __future__ import annotations
+import dataclasses
from inspect import cleandoc
from typing import TYPE_CHECKING
@@ -8,6 +9,7 @@
from litestar._openapi.responses import create_responses_for_handler
from litestar._openapi.utils import SEPARATORS_CLEANUP_PATTERN
from litestar.enums import HttpMethod
+from litestar.exceptions import ImproperlyConfiguredException
from litestar.openapi.spec import Operation, PathItem
from litestar.utils.helpers import unwrap_partial
@@ -16,7 +18,7 @@
from litestar.handlers.http_handlers import HTTPRouteHandler
from litestar.routes import HTTPRoute
-__all__ = ("create_path_item_for_route",)
+__all__ = ("create_path_item_for_route", "merge_path_item_operations")
class PathItemFactory:
@@ -135,3 +137,32 @@ def create_path_item_for_route(openapi_context: OpenAPIContext, route: HTTPRoute
"""
path_item_factory = PathItemFactory(openapi_context, route)
return path_item_factory.create_path_item()
+
+
+def merge_path_item_operations(source: PathItem, other: PathItem, for_path: str) -> PathItem:
+ """Merge operations from path items, creating a new path item that includes
+ operations from both.
+ """
+ attrs_to_merge = {"get", "put", "post", "delete", "options", "head", "patch", "trace"}
+ fields = {f.name for f in dataclasses.fields(PathItem)} - attrs_to_merge
+ if any(getattr(source, attr) and getattr(other, attr) for attr in attrs_to_merge):
+ raise ValueError("Cannot merge operation for PathItem if operation is set on both items")
+
+ if differing_values := [
+ (value_a, value_b) for attr in fields if (value_a := getattr(source, attr)) != (value_b := getattr(other, attr))
+ ]:
+ raise ImproperlyConfiguredException(
+ f"Conflicting OpenAPI path configuration for {for_path!r}. "
+ f"{', '.join(f'{a} != {b}' for a, b in differing_values)}"
+ )
+
+ return dataclasses.replace(
+ source,
+ get=source.get or other.get,
+ post=source.post or other.post,
+ patch=source.patch or other.patch,
+ put=source.put or other.put,
+ delete=source.delete or other.delete,
+ options=source.options or other.options,
+ trace=source.trace or other.trace,
+ )
diff --git a/litestar/_openapi/plugin.py b/litestar/_openapi/plugin.py
--- a/litestar/_openapi/plugin.py
+++ b/litestar/_openapi/plugin.py
@@ -3,7 +3,7 @@
from typing import TYPE_CHECKING
from litestar._openapi.datastructures import OpenAPIContext
-from litestar._openapi.path_item import create_path_item_for_route
+from litestar._openapi.path_item import create_path_item_for_route, merge_path_item_operations
from litestar.exceptions import ImproperlyConfiguredException
from litestar.plugins import InitPluginProtocol
from litestar.plugins.base import ReceiveRoutePlugin
@@ -13,7 +13,7 @@
from litestar.app import Litestar
from litestar.config.app import AppConfig
from litestar.openapi.config import OpenAPIConfig
- from litestar.openapi.spec import OpenAPI
+ from litestar.openapi.spec import OpenAPI, PathItem
from litestar.routes import BaseRoute
@@ -41,10 +41,15 @@ def _build_openapi_schema(self) -> OpenAPI:
openapi = openapi_config.to_openapi_schema()
context = OpenAPIContext(openapi_config=openapi_config, plugins=self.app.plugins.openapi)
- openapi.paths = {
- route.path_format or "/": create_path_item_for_route(context, route)
- for route in self.included_routes.values()
- }
+ path_items: dict[str, PathItem] = {}
+ for route in self.included_routes.values():
+ path = route.path_format or "/"
+ path_item = create_path_item_for_route(context, route)
+ if existing_path_item := path_items.get(path):
+ path_item = merge_path_item_operations(existing_path_item, path_item, for_path=path)
+ path_items[path] = path_item
+
+ openapi.paths = path_items
openapi.components.schemas = context.schema_registry.generate_components_schemas()
return openapi
| diff --git a/tests/unit/test_openapi/test_path_item.py b/tests/unit/test_openapi/test_path_item.py
--- a/tests/unit/test_openapi/test_path_item.py
+++ b/tests/unit/test_openapi/test_path_item.py
@@ -1,19 +1,21 @@
from __future__ import annotations
+import dataclasses
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, Callable, cast
+from unittest.mock import MagicMock
import pytest
from typing_extensions import TypeAlias
-from litestar import Controller, Litestar, Request, Router, delete, get
+from litestar import Controller, HttpMethod, Litestar, Request, Router, delete, get
from litestar._openapi.datastructures import OpenAPIContext
-from litestar._openapi.path_item import PathItemFactory
+from litestar._openapi.path_item import PathItemFactory, merge_path_item_operations
from litestar._openapi.utils import default_operation_id_creator
from litestar.exceptions import ImproperlyConfiguredException
from litestar.handlers.http_handlers import HTTPRouteHandler
from litestar.openapi.config import OpenAPIConfig
-from litestar.openapi.spec import Operation
+from litestar.openapi.spec import Operation, PathItem
from litestar.utils import find_index
if TYPE_CHECKING:
@@ -215,3 +217,30 @@ def handler_2() -> None: ...
schema = factory.create_path_item()
assert schema.get
assert schema.delete is None
+
+
[email protected]("method", HttpMethod)
+def test_merge_path_item_operations_operation_set_on_both_raises(method: HttpMethod) -> None:
+ with pytest.raises(ValueError, match="Cannot merge operation"):
+ merge_path_item_operations(
+ PathItem(**{method.value.lower(): MagicMock()}),
+ PathItem(**{method.value.lower(): MagicMock()}),
+ for_path="/",
+ )
+
+
[email protected](
+ "attr",
+ [
+ f.name
+ for f in dataclasses.fields(PathItem)
+ if f.name.upper()
+ not in [
+ *HttpMethod,
+ "TRACE", # remove once https://github.com/litestar-org/litestar/pull/3294 is merged
+ ]
+ ],
+)
+def test_merge_path_item_operation_differing_values_raises(attr: str) -> None:
+ with pytest.raises(ImproperlyConfiguredException, match="Conflicting OpenAPI path configuration for '/'"):
+ merge_path_item_operations(PathItem(), PathItem(**{attr: MagicMock()}), for_path="/")
diff --git a/tests/unit/test_openapi/test_schema.py b/tests/unit/test_openapi/test_schema.py
--- a/tests/unit/test_openapi/test_schema.py
+++ b/tests/unit/test_openapi/test_schema.py
@@ -23,14 +23,14 @@
from msgspec import Struct
from typing_extensions import Annotated, TypeAlias
-from litestar import Controller, MediaType, get
+from litestar import Controller, MediaType, get, post
from litestar._openapi.schema_generation.plugins import openapi_schema_plugins
from litestar._openapi.schema_generation.schema import (
KWARG_DEFINITION_ATTRIBUTE_TO_OPENAPI_PROPERTY_MAP,
SchemaCreator,
)
from litestar._openapi.schema_generation.utils import _get_normalized_schema_key, _type_or_first_not_none_inner_type
-from litestar.app import DEFAULT_OPENAPI_CONFIG
+from litestar.app import DEFAULT_OPENAPI_CONFIG, Litestar
from litestar.di import Provide
from litestar.enums import ParamType
from litestar.openapi.spec import ExternalDocumentation, OpenAPIType, Reference
@@ -570,3 +570,19 @@ def test_default_not_provided_for_kwarg_but_for_field() -> None:
schema = get_schema_for_field_definition(field_definition)
assert schema.default == 10
+
+
+def test_routes_with_different_path_param_types_get_merged() -> None:
+ @get("/{param:int}")
+ async def get_handler(param: int) -> None:
+ pass
+
+ @post("/{param:str}")
+ async def post_handler(param: str) -> None:
+ pass
+
+ app = Litestar([get_handler, post_handler])
+ assert app.openapi_schema.paths
+ paths = app.openapi_schema.paths["/{param}"]
+ assert paths.get is not None
+ assert paths.post is not None
| Bug: route not recognized in schemas for specific types
### Description
Currently some parameters types don't show in the docs. It seems `int` works fine, while `str` and `uuid` don't.
This is silent, no logs are shown, running `debug` mode. Same behavior for both `/schema/elements` and `/schema/swagger`.
### URL to code causing the issue
_No response_
### MCVE
```python
(Uncomment one at a time)
class BugRoutes(Controller):
tags = ["Bugs"]
path = "/bugs"
dependencies = dict(context=Provide(route_context))
@routes.get()
def get_bugs(self, context: AppContext) -> Response:
return Response({})
@routes.post()
def create_bug(self, context: AppContext, data: Any) -> Response:
return Response({})
# This works
# @routes.get("/{param:int}")
# def get_bug(self, context: AppContext, param: int) -> Response:
# return Response({})
# This doesn't work (not showing on docs)
# @routes.get("/{param:str}")
# def get_bug_str(self, context: AppContext, param: str) -> Response:
# return Response({})
# This doesn't work (not showing on docs)
# @routes.get("/{param:uuid}")
# def get_bug_uuid(self, context: AppContext, param: UUID) -> Response:
# return Response({})
@routes.patch("/{param:int}")
def update_bug(self, context: AppContext, param: int) -> Response:
return Response({})
@routes.delete("/{param:int}")
def delete_bug(self, context: AppContext, param: int) -> None:
return Response({})
```
### Steps to reproduce
_No response_
### Screenshots
_No response_
### Logs
_No response_
### Litestar Version
2.3.2
### Platform
- [X] Mac
- [ ] Linux
- [ ] Windows
- [ ] Other (Please specify in the description above)
<!-- POLAR PLEDGE BADGE START -->
---
> [!NOTE]
> While we are open for sponsoring on [GitHub Sponsors](https://github.com/sponsors/litestar-org/) and
> [OpenCollective](https://opencollective.com/litestar), we also utilize [Polar.sh](https://polar.sh/) to engage in pledge-based sponsorship.
>
> Check out all issues funded or available for funding [on our Polar.sh Litestar dashboard](https://polar.sh/litestar-org)
> * If you would like to see an issue prioritized, make a pledge towards it!
> * We receive the pledge once the issue is completed & verified
> * This, along with engagement in the community, helps us know which features are a priority to our users.
<a href="https://polar.sh/litestar-org/litestar/issues/2700">
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://polar.sh/api/github/litestar-org/litestar/issues/2700/pledge.svg?darkmode=1">
<img alt="Fund with Polar" src="https://polar.sh/api/github/litestar-org/litestar/issues/2700/pledge.svg">
</picture>
</a>
<!-- POLAR PLEDGE BADGE END -->
| The behavior is weird, if I have one route with `{param:int}` then all the routes using `{param:uuid}` stop showing and only the `{param:int}` ones show. However if all my routes are `{param:uuid}` then all of them appear.
This happened during development (it's not a common scenario in my case to have routes with multiple param types), so it might the case where the conversion from route definition to schema is not "atomic" and one route definition affects the other? If that was the case, there is some sort of implicit priority rule when building the schema, which is also unexpected.
It does appear to be the case where certain routes are not shown in the schema but are indeed accessible (can serve the request to their path).
Posting a slightly modified, runnable version of their code that demonstrates this
```python
from uuid import UUID
from litestar import Controller, Litestar, get, post, patch, delete
class BugRoutes(Controller):
path = "/bugs"
@get()
async def get_bugs(self) -> None:
return None
@post()
async def create_bug(self, data: dict[str, str]) -> None:
return None
# This works
# @get("/{param:int}")
# async def get_bug(self, param: int) -> None:
# return None
# This doesn't work (not showing on docs)
# @get("/{param:str}")
# async def get_bug_str(self, param: str) -> None:
# return None
# This doesn't work (not showing on docs)
# @get("/{param:uuid}")
# async def get_bug_uuid(self, param: UUID) -> None:
# return None
@patch("/{param:int}")
async def update_bug(self, param: int) -> None:
return None
@delete("/{param:int}")
async def delete_bug(self, param: int) -> None:
return None
app = Litestar([BugRoutes], debug=True)
```
@Alc-Alc did you make any progress here?
> @Alc-Alc did you make any progress here?
sorry, missed this notification. Sadly no progress since then. I will try to give it a second look today
@Alc-Alc this seems to be related to #2967?
> @Alc-Alc this seems to be related to #2967?
I just ran this on that branch where I fixed that and this is still an issue
@Alc-Alc @ccrvlh I can currently not reproduce this behaviour, or I do not understand what exactly the bug is here. Running the examples provided and looking at the generated schema and UI, everything seems to be working as expected. Can you confirm that this bug still exists?
Okay, I've managed to reproduce with this:
```python
from litestar import get, post, Litestar
@get("/{param:int}")
async def get_handler() -> None:
pass
@post("/{param:int}")
async def post_handler() -> None:
pass
app = Litestar([get_handler, post_handler])
paths = app.openapi_schema.paths["/{param}"]
assert paths.get
assert paths.post
``` | 2024-03-30T15:14:33 |
litestar-org/litestar | 3,295 | litestar-org__litestar-3295 | [
"3290"
] | 576bae979cdcda2c77fb1c2bac93a9039d85b530 | diff --git a/litestar/_openapi/parameters.py b/litestar/_openapi/parameters.py
--- a/litestar/_openapi/parameters.py
+++ b/litestar/_openapi/parameters.py
@@ -96,7 +96,7 @@ def __init__(
self.parameters = ParameterCollection(route_handler)
self.dependency_providers = route_handler.resolve_dependencies()
self.layered_parameters = route_handler.resolve_layered_parameters()
- self.path_parameters_names = {p.name for p in path_parameters}
+ self.path_parameters: dict[str, PathParameterDefinition] = {p.name: p for p in path_parameters}
def create_parameter(self, field_definition: FieldDefinition, parameter_name: str) -> Parameter:
"""Create an OpenAPI Parameter instance for a field definition.
@@ -111,7 +111,7 @@ def create_parameter(self, field_definition: FieldDefinition, parameter_name: st
field_definition.kwarg_definition if isinstance(field_definition.kwarg_definition, ParameterKwarg) else None
)
- if parameter_name in self.path_parameters_names:
+ if parameter_name in self.path_parameters:
param_in = ParamType.PATH
is_required = True
result = self.schema_creator.for_field_definition(field_definition)
@@ -215,6 +215,17 @@ def create_parameters_for_field_definitions(self, fields: dict[str, FieldDefinit
def create_parameters_for_handler(self) -> list[Parameter]:
"""Create a list of path/query/header Parameter models for the given PathHandler."""
handler_fields = self.route_handler.parsed_fn_signature.parameters
+ # not all path parameters have to be consumed by the handler. Because even not
+ # consumed path parameters must still be specified, we create stub parameters
+ # for the unconsumed ones so a correct OpenAPI schema can be generated
+ params_not_consumed_by_handler = set(self.path_parameters) - handler_fields.keys()
+ handler_fields.update(
+ {
+ param_name: FieldDefinition.from_kwarg(self.path_parameters[param_name].type, name=param_name)
+ for param_name in params_not_consumed_by_handler
+ }
+ )
+
self.create_parameters_for_field_definitions(handler_fields)
return self.parameters.list()
| diff --git a/tests/unit/test_openapi/test_schema.py b/tests/unit/test_openapi/test_schema.py
--- a/tests/unit/test_openapi/test_schema.py
+++ b/tests/unit/test_openapi/test_schema.py
@@ -35,6 +35,7 @@
from litestar.enums import ParamType
from litestar.openapi.spec import ExternalDocumentation, OpenAPIType, Reference
from litestar.openapi.spec.example import Example
+from litestar.openapi.spec.parameter import Parameter as OpenAPIParameter
from litestar.openapi.spec.schema import Schema
from litestar.pagination import ClassicPagination, CursorPagination, OffsetPagination
from litestar.params import KwargDefinition, Parameter, ParameterKwarg
@@ -573,6 +574,7 @@ def test_default_not_provided_for_kwarg_but_for_field() -> None:
def test_routes_with_different_path_param_types_get_merged() -> None:
+ # https://github.com/litestar-org/litestar/issues/2700
@get("/{param:int}")
async def get_handler(param: int) -> None:
pass
@@ -586,3 +588,20 @@ async def post_handler(param: str) -> None:
paths = app.openapi_schema.paths["/{param}"]
assert paths.get is not None
assert paths.post is not None
+
+
+def test_unconsumed_path_parameters_are_documented() -> None:
+ # https://github.com/litestar-org/litestar/issues/3290
+ @get("/{param:str}")
+ async def handler() -> None:
+ pass
+
+ app = Litestar([handler])
+ params = app.openapi_schema.paths["/{param}"].get.parameters # type: ignore[index, union-attr]
+ assert params
+ assert len(params) == 1
+ param = params[0]
+ assert isinstance(param, OpenAPIParameter)
+ assert param.name == "param"
+ assert param.required is True
+ assert param.param_in is ParamType.PATH
diff --git a/tests/unit/test_openapi/test_typescript_converter/test_converter.py b/tests/unit/test_openapi/test_typescript_converter/test_converter.py
--- a/tests/unit/test_openapi/test_typescript_converter/test_converter.py
+++ b/tests/unit/test_openapi/test_typescript_converter/test_converter.py
@@ -15,442 +15,475 @@ def test_openapi_to_typescript_converter(person_controller: Type[Controller], pe
result = convert_openapi_to_typescript(openapi_schema=app.openapi_schema)
assert (
- result.write().replace("\t", " ") == "export namespace API {\n"
- "\texport namespace PetOwnerOrPetGetPetsOrOwners {\n"
- "\texport namespace Http200 {\n"
- "\texport type ResponseBody = ({\n"
- "\tage: number;\n"
- "\tname: string;\n"
- '\tspecies?: "Cat" | "Dog" | "Monkey" | "Pig";\n'
- "} | {\n"
- "\tcomplex: {\n"
- "\t\n"
- "};\n"
- "\tfirst_name: string;\n"
- "\tid: string;\n"
- "\tlast_name: string;\n"
- "\toptional?: null | string;\n"
- "\tpets?: null | {\n"
- "\tage: number;\n"
- "\tname: string;\n"
- '\tspecies?: "Cat" | "Dog" | "Monkey" | "Pig";\n'
- "}[];\n"
- "})[];\n"
- "\n"
- "\texport interface ResponseHeaders {\n"
- '\t"x-my-tag"?: string;\n'
- "};\n"
- "};\n"
- "\n"
- "\texport namespace Http406 {\n"
- "\texport type ResponseBody = {\n"
- "\tdetail: string;\n"
- "\textra?: Record<string, unknown> | null | unknown[];\n"
- "\tstatus_code: number;\n"
- "};\n"
- "};\n"
- "};\n"
- "\n"
- "\texport namespace PetPets {\n"
- "\texport namespace Http200 {\n"
- "\texport type ResponseBody = {\n"
- "\tage: number;\n"
- "\tname: string;\n"
- '\tspecies?: "Cat" | "Dog" | "Monkey" | "Pig";\n'
- "}[];\n"
- "};\n"
- "};\n"
- "\n"
- "\texport namespace ServiceIdPersonBulkBulkCreatePerson {\n"
- "\texport interface HeaderParameters {\n"
- "\tsecret: string;\n"
- "};\n"
- "\n"
- "\texport namespace Http201 {\n"
- "\texport type ResponseBody = {\n"
- "\tcomplex: {\n"
- "\t\n"
- "};\n"
- "\tfirst_name: string;\n"
- "\tid: string;\n"
- "\tlast_name: string;\n"
- "\toptional: null | string;\n"
- "\tpets: null | {\n"
- "\tage: number;\n"
- "\tname: string;\n"
- '\tspecies: "Cat" | "Dog" | "Monkey" | "Pig";\n'
- "}[];\n"
- "}[];\n"
- "};\n"
- "\n"
- "\texport namespace Http400 {\n"
- "\texport type ResponseBody = {\n"
- "\tdetail: string;\n"
- "\textra?: Record<string, unknown> | null | unknown[];\n"
- "\tstatus_code: number;\n"
- "};\n"
- "};\n"
- "\n"
- "\texport type RequestBody = {\n"
- "\tcomplex: {\n"
- "\t\n"
- "};\n"
- "\tfirst_name: string;\n"
- "\tid: string;\n"
- "\tlast_name: string;\n"
- "\toptional: null | string;\n"
- "\tpets: null | {\n"
- "\tage: number;\n"
- "\tname: string;\n"
- '\tspecies: "Cat" | "Dog" | "Monkey" | "Pig";\n'
- "}[];\n"
- "}[];\n"
- "};\n"
- "\n"
- "\texport namespace ServiceIdPersonBulkBulkPartialUpdatePerson {\n"
- "\texport interface HeaderParameters {\n"
- "\tsecret: string;\n"
- "};\n"
- "\n"
- "\texport namespace Http200 {\n"
- "\texport type ResponseBody = {\n"
- "\tcomplex: {\n"
- "\t\n"
- "};\n"
- "\tfirst_name: string;\n"
- "\tid: string;\n"
- "\tlast_name: string;\n"
- "\toptional: null | string;\n"
- "\tpets: null | {\n"
- "\tage: number;\n"
- "\tname: string;\n"
- '\tspecies: "Cat" | "Dog" | "Monkey" | "Pig";\n'
- "}[];\n"
- "}[];\n"
- "};\n"
- "\n"
- "\texport namespace Http400 {\n"
- "\texport type ResponseBody = {\n"
- "\tdetail: string;\n"
- "\textra?: Record<string, unknown> | null | unknown[];\n"
- "\tstatus_code: number;\n"
- "};\n"
- "};\n"
- "\n"
- "\texport type RequestBody = {\n"
- "\tcomplex: {\n"
- "\t\n"
- "};\n"
- "\tfirst_name: string;\n"
- "\tid: string;\n"
- "\tlast_name: string;\n"
- "\toptional: null | string;\n"
- "\tpets: null | {\n"
- "\tage: number;\n"
- "\tname: string;\n"
- '\tspecies: "Cat" | "Dog" | "Monkey" | "Pig";\n'
- "}[];\n"
- "}[];\n"
- "};\n"
- "\n"
- "\texport namespace ServiceIdPersonBulkBulkUpdatePerson {\n"
- "\texport interface HeaderParameters {\n"
- "\tsecret: string;\n"
- "};\n"
- "\n"
- "\texport namespace Http200 {\n"
- "\texport type ResponseBody = {\n"
- "\tcomplex: {\n"
- "\t\n"
- "};\n"
- "\tfirst_name: string;\n"
- "\tid: string;\n"
- "\tlast_name: string;\n"
- "\toptional?: null | string;\n"
- "\tpets?: null | {\n"
- "\tage: number;\n"
- "\tname: string;\n"
- '\tspecies?: "Cat" | "Dog" | "Monkey" | "Pig";\n'
- "}[];\n"
- "}[];\n"
- "};\n"
- "\n"
- "\texport namespace Http400 {\n"
- "\texport type ResponseBody = {\n"
- "\tdetail: string;\n"
- "\textra?: Record<string, unknown> | null | unknown[];\n"
- "\tstatus_code: number;\n"
- "};\n"
- "};\n"
- "\n"
- "\texport type RequestBody = {\n"
- "\tcomplex: {\n"
- "\t\n"
- "};\n"
- "\tfirst_name: string;\n"
- "\tid: string;\n"
- "\tlast_name: string;\n"
- "\toptional?: null | string;\n"
- "\tpets?: null | {\n"
- "\tage: number;\n"
- "\tname: string;\n"
- '\tspecies?: "Cat" | "Dog" | "Monkey" | "Pig";\n'
- "}[];\n"
- "}[];\n"
- "};\n"
- "\n"
- "\texport namespace ServiceIdPersonCreatePerson {\n"
- "\texport interface HeaderParameters {\n"
- "\tsecret: string;\n"
- "};\n"
- "\n"
- "\texport namespace Http201 {\n"
- "\texport type ResponseBody = {\n"
- "\tcomplex: {\n"
- "\t\n"
- "};\n"
- "\tfirst_name: string;\n"
- "\tid: string;\n"
- "\tlast_name: string;\n"
- "\toptional?: null | string;\n"
- "\tpets?: null | {\n"
- "\tage: number;\n"
- "\tname: string;\n"
- '\tspecies?: "Cat" | "Dog" | "Monkey" | "Pig";\n'
- "}[];\n"
- "};\n"
- "};\n"
- "\n"
- "\texport namespace Http400 {\n"
- "\texport type ResponseBody = {\n"
- "\tdetail: string;\n"
- "\textra?: Record<string, unknown> | null | unknown[];\n"
- "\tstatus_code: number;\n"
- "};\n"
- "};\n"
- "\n"
- "\texport type RequestBody = {\n"
- "\tcomplex: {\n"
- "\t\n"
- "};\n"
- "\tfirst_name: string;\n"
- "\tid: string;\n"
- "\tlast_name: string;\n"
- "\toptional?: null | string;\n"
- "\tpets?: null | {\n"
- "\tage: number;\n"
- "\tname: string;\n"
- '\tspecies?: "Cat" | "Dog" | "Monkey" | "Pig";\n'
- "}[];\n"
- "};\n"
- "};\n"
- "\n"
- "\texport namespace ServiceIdPersonDataclassGetPersonDataclass {\n"
- "\texport namespace Http200 {\n"
- "\texport type ResponseBody = {\n"
- "\tcomplex: {\n"
- "\t\n"
- "};\n"
- "\tfirst_name: string;\n"
- "\tid: string;\n"
- "\tlast_name: string;\n"
- "\toptional?: null | string;\n"
- "\tpets?: null | {\n"
- "\tage: number;\n"
- "\tname: string;\n"
- '\tspecies?: "Cat" | "Dog" | "Monkey" | "Pig";\n'
- "}[];\n"
- "};\n"
- "};\n"
- "};\n"
- "\n"
- "\texport namespace ServiceIdPersonGetPersons {\n"
- "\texport interface CookieParameters {\n"
- "\tvalue: number;\n"
- "};\n"
- "\n"
- "\texport interface HeaderParameters {\n"
- "\tsecret: string;\n"
- "};\n"
- "\n"
- "\texport namespace Http200 {\n"
- "\texport type ResponseBody = {\n"
- "\tcomplex: {\n"
- "\t\n"
- "};\n"
- "\tfirst_name: string;\n"
- "\tid: string;\n"
- "\tlast_name: string;\n"
- "\toptional?: null | string;\n"
- "\tpets?: null | {\n"
- "\tage: number;\n"
- "\tname: string;\n"
- '\tspecies?: "Cat" | "Dog" | "Monkey" | "Pig";\n'
- "}[];\n"
- "}[];\n"
- "};\n"
- "\n"
- "\texport namespace Http400 {\n"
- "\texport type ResponseBody = {\n"
- "\tdetail: string;\n"
- "\textra?: Record<string, unknown> | null | unknown[];\n"
- "\tstatus_code: number;\n"
- "};\n"
- "};\n"
- "\n"
- "\texport interface PathParameters {\n"
- "\tservice_id: number;\n"
- "};\n"
- "\n"
- "\texport interface QueryParameters {\n"
- "\tfrom_date?: null | number | string | string;\n"
- '\tgender?: "A" | "F" | "M" | "O" | ("A" | "F" | "M" | "O")[] | null;\n'
- "\tname?: null | string | string[];\n"
- "\tpage: number;\n"
- "\tpageSize: number;\n"
- "\tto_date?: null | number | string | string;\n"
- "};\n"
- "};\n"
- "\n"
- "\texport namespace ServiceIdPersonPersonIdDeletePerson {\n"
- "\texport namespace Http204 {\n"
- "\texport type ResponseBody = undefined;\n"
- "};\n"
- "\n"
- "\texport namespace Http400 {\n"
- "\texport type ResponseBody = {\n"
- "\tdetail: string;\n"
- "\textra?: Record<string, unknown> | null | unknown[];\n"
- "\tstatus_code: number;\n"
- "};\n"
- "};\n"
- "\n"
- "\texport interface PathParameters {\n"
- "\tperson_id: string;\n"
- "};\n"
- "};\n"
- "\n"
- "\texport namespace ServiceIdPersonPersonIdGetPersonById {\n"
- "\texport namespace Http200 {\n"
- "\texport type ResponseBody = {\n"
- "\tcomplex: {\n"
- "\t\n"
- "};\n"
- "\tfirst_name: string;\n"
- "\tid: string;\n"
- "\tlast_name: string;\n"
- "\toptional?: null | string;\n"
- "\tpets?: null | {\n"
- "\tage: number;\n"
- "\tname: string;\n"
- '\tspecies?: "Cat" | "Dog" | "Monkey" | "Pig";\n'
- "}[];\n"
- "};\n"
- "};\n"
- "\n"
- "\texport namespace Http400 {\n"
- "\texport type ResponseBody = {\n"
- "\tdetail: string;\n"
- "\textra?: Record<string, unknown> | null | unknown[];\n"
- "\tstatus_code: number;\n"
- "};\n"
- "};\n"
- "\n"
- "\texport interface PathParameters {\n"
- "\tperson_id: string;\n"
- "};\n"
- "};\n"
- "\n"
- "\texport namespace ServiceIdPersonPersonIdPartialUpdatePerson {\n"
- "\texport namespace Http200 {\n"
- "\texport type ResponseBody = {\n"
- "\tcomplex: {\n"
- "\t\n"
- "};\n"
- "\tfirst_name: string;\n"
- "\tid: string;\n"
- "\tlast_name: string;\n"
- "\toptional: null | string;\n"
- "\tpets: null | {\n"
- "\tage: number;\n"
- "\tname: string;\n"
- '\tspecies: "Cat" | "Dog" | "Monkey" | "Pig";\n'
- "}[];\n"
- "};\n"
- "};\n"
- "\n"
- "\texport namespace Http400 {\n"
- "\texport type ResponseBody = {\n"
- "\tdetail: string;\n"
- "\textra?: Record<string, unknown> | null | unknown[];\n"
- "\tstatus_code: number;\n"
- "};\n"
- "};\n"
- "\n"
- "\texport interface PathParameters {\n"
- "\tperson_id: string;\n"
- "};\n"
- "\n"
- "\texport type RequestBody = {\n"
- "\tcomplex: {\n"
- "\t\n"
- "};\n"
- "\tfirst_name: string;\n"
- "\tid: string;\n"
- "\tlast_name: string;\n"
- "\toptional: null | string;\n"
- "\tpets: null | {\n"
- "\tage: number;\n"
- "\tname: string;\n"
- '\tspecies: "Cat" | "Dog" | "Monkey" | "Pig";\n'
- "}[];\n"
- "};\n"
- "};\n"
- "\n"
- "\texport namespace ServiceIdPersonPersonIdUpdatePerson {\n"
- "\texport namespace Http200 {\n"
- "\texport type ResponseBody = {\n"
- "\tcomplex: {\n"
- "\t\n"
- "};\n"
- "\tfirst_name: string;\n"
- "\tid: string;\n"
- "\tlast_name: string;\n"
- "\toptional?: null | string;\n"
- "\tpets?: null | {\n"
- "\tage: number;\n"
- "\tname: string;\n"
- '\tspecies?: "Cat" | "Dog" | "Monkey" | "Pig";\n'
- "}[];\n"
- "};\n"
- "};\n"
- "\n"
- "\texport namespace Http400 {\n"
- "\texport type ResponseBody = {\n"
- "\tdetail: string;\n"
- "\textra?: Record<string, unknown> | null | unknown[];\n"
- "\tstatus_code: number;\n"
- "};\n"
- "};\n"
- "\n"
- "\texport interface PathParameters {\n"
- "\tperson_id: string;\n"
- "};\n"
- "\n"
- "\texport type RequestBody = {\n"
- "\tcomplex: {\n"
- "\t\n"
- "};\n"
- "\tfirst_name: string;\n"
- "\tid: string;\n"
- "\tlast_name: string;\n"
- "\toptional?: null | string;\n"
- "\tpets?: null | {\n"
- "\tage: number;\n"
- "\tname: string;\n"
- '\tspecies?: "Cat" | "Dog" | "Monkey" | "Pig";\n'
- "}[];\n"
- "};\n"
- "};\n"
- "};"
+ result.write()
+ == """export namespace API {
+ export namespace PetOwnerOrPetGetPetsOrOwners {
+ export namespace Http200 {
+ export type ResponseBody = ({
+ age: number;
+ name: string;
+ species?: "Cat" | "Dog" | "Monkey" | "Pig";
+} | {
+ complex: {
+
+};
+ first_name: string;
+ id: string;
+ last_name: string;
+ optional?: null | string;
+ pets?: null | {
+ age: number;
+ name: string;
+ species?: "Cat" | "Dog" | "Monkey" | "Pig";
+}[];
+})[];
+
+ export interface ResponseHeaders {
+ "x-my-tag"?: string;
+};
+};
+
+ export namespace Http406 {
+ export type ResponseBody = {
+ detail: string;
+ extra?: Record<string, unknown> | null | unknown[];
+ status_code: number;
+};
+};
+};
+
+ export namespace PetPets {
+ export namespace Http200 {
+ export type ResponseBody = {
+ age: number;
+ name: string;
+ species?: "Cat" | "Dog" | "Monkey" | "Pig";
+}[];
+};
+};
+
+ export namespace ServiceIdPersonBulkBulkCreatePerson {
+ export interface HeaderParameters {
+ secret: string;
+};
+
+ export namespace Http201 {
+ export type ResponseBody = {
+ complex: {
+
+};
+ first_name: string;
+ id: string;
+ last_name: string;
+ optional: null | string;
+ pets: null | {
+ age: number;
+ name: string;
+ species: "Cat" | "Dog" | "Monkey" | "Pig";
+}[];
+}[];
+};
+
+ export namespace Http400 {
+ export type ResponseBody = {
+ detail: string;
+ extra?: Record<string, unknown> | null | unknown[];
+ status_code: number;
+};
+};
+
+ export interface PathParameters {
+ service_id: number;
+};
+
+ export type RequestBody = {
+ complex: {
+
+};
+ first_name: string;
+ id: string;
+ last_name: string;
+ optional: null | string;
+ pets: null | {
+ age: number;
+ name: string;
+ species: "Cat" | "Dog" | "Monkey" | "Pig";
+}[];
+}[];
+};
+
+ export namespace ServiceIdPersonBulkBulkPartialUpdatePerson {
+ export interface HeaderParameters {
+ secret: string;
+};
+
+ export namespace Http200 {
+ export type ResponseBody = {
+ complex: {
+
+};
+ first_name: string;
+ id: string;
+ last_name: string;
+ optional: null | string;
+ pets: null | {
+ age: number;
+ name: string;
+ species: "Cat" | "Dog" | "Monkey" | "Pig";
+}[];
+}[];
+};
+
+ export namespace Http400 {
+ export type ResponseBody = {
+ detail: string;
+ extra?: Record<string, unknown> | null | unknown[];
+ status_code: number;
+};
+};
+
+ export interface PathParameters {
+ service_id: number;
+};
+
+ export type RequestBody = {
+ complex: {
+
+};
+ first_name: string;
+ id: string;
+ last_name: string;
+ optional: null | string;
+ pets: null | {
+ age: number;
+ name: string;
+ species: "Cat" | "Dog" | "Monkey" | "Pig";
+}[];
+}[];
+};
+
+ export namespace ServiceIdPersonBulkBulkUpdatePerson {
+ export interface HeaderParameters {
+ secret: string;
+};
+
+ export namespace Http200 {
+ export type ResponseBody = {
+ complex: {
+
+};
+ first_name: string;
+ id: string;
+ last_name: string;
+ optional?: null | string;
+ pets?: null | {
+ age: number;
+ name: string;
+ species?: "Cat" | "Dog" | "Monkey" | "Pig";
+}[];
+}[];
+};
+
+ export namespace Http400 {
+ export type ResponseBody = {
+ detail: string;
+ extra?: Record<string, unknown> | null | unknown[];
+ status_code: number;
+};
+};
+
+ export interface PathParameters {
+ service_id: number;
+};
+
+ export type RequestBody = {
+ complex: {
+
+};
+ first_name: string;
+ id: string;
+ last_name: string;
+ optional?: null | string;
+ pets?: null | {
+ age: number;
+ name: string;
+ species?: "Cat" | "Dog" | "Monkey" | "Pig";
+}[];
+}[];
+};
+
+ export namespace ServiceIdPersonCreatePerson {
+ export interface HeaderParameters {
+ secret: string;
+};
+
+ export namespace Http201 {
+ export type ResponseBody = {
+ complex: {
+
+};
+ first_name: string;
+ id: string;
+ last_name: string;
+ optional?: null | string;
+ pets?: null | {
+ age: number;
+ name: string;
+ species?: "Cat" | "Dog" | "Monkey" | "Pig";
+}[];
+};
+};
+
+ export namespace Http400 {
+ export type ResponseBody = {
+ detail: string;
+ extra?: Record<string, unknown> | null | unknown[];
+ status_code: number;
+};
+};
+
+ export interface PathParameters {
+ service_id: number;
+};
+
+ export type RequestBody = {
+ complex: {
+
+};
+ first_name: string;
+ id: string;
+ last_name: string;
+ optional?: null | string;
+ pets?: null | {
+ age: number;
+ name: string;
+ species?: "Cat" | "Dog" | "Monkey" | "Pig";
+}[];
+};
+};
+
+ export namespace ServiceIdPersonDataclassGetPersonDataclass {
+ export namespace Http200 {
+ export type ResponseBody = {
+ complex: {
+
+};
+ first_name: string;
+ id: string;
+ last_name: string;
+ optional?: null | string;
+ pets?: null | {
+ age: number;
+ name: string;
+ species?: "Cat" | "Dog" | "Monkey" | "Pig";
+}[];
+};
+};
+
+ export namespace Http400 {
+ export type ResponseBody = {
+ detail: string;
+ extra?: Record<string, unknown> | null | unknown[];
+ status_code: number;
+};
+};
+
+ export interface PathParameters {
+ service_id: number;
+};
+};
+
+ export namespace ServiceIdPersonGetPersons {
+ export interface CookieParameters {
+ value: number;
+};
+
+ export interface HeaderParameters {
+ secret: string;
+};
+
+ export namespace Http200 {
+ export type ResponseBody = {
+ complex: {
+
+};
+ first_name: string;
+ id: string;
+ last_name: string;
+ optional?: null | string;
+ pets?: null | {
+ age: number;
+ name: string;
+ species?: "Cat" | "Dog" | "Monkey" | "Pig";
+}[];
+}[];
+};
+
+ export namespace Http400 {
+ export type ResponseBody = {
+ detail: string;
+ extra?: Record<string, unknown> | null | unknown[];
+ status_code: number;
+};
+};
+
+ export interface PathParameters {
+ service_id: number;
+};
+
+ export interface QueryParameters {
+ from_date?: null | number | string | string;
+ gender?: "A" | "F" | "M" | "O" | ("A" | "F" | "M" | "O")[] | null;
+ name?: null | string | string[];
+ page: number;
+ pageSize: number;
+ to_date?: null | number | string | string;
+};
+};
+
+ export namespace ServiceIdPersonPersonIdDeletePerson {
+ export namespace Http204 {
+ export type ResponseBody = undefined;
+};
+
+ export namespace Http400 {
+ export type ResponseBody = {
+ detail: string;
+ extra?: Record<string, unknown> | null | unknown[];
+ status_code: number;
+};
+};
+
+ export interface PathParameters {
+ person_id: string;
+ service_id: number;
+};
+};
+
+ export namespace ServiceIdPersonPersonIdGetPersonById {
+ export namespace Http200 {
+ export type ResponseBody = {
+ complex: {
+
+};
+ first_name: string;
+ id: string;
+ last_name: string;
+ optional?: null | string;
+ pets?: null | {
+ age: number;
+ name: string;
+ species?: "Cat" | "Dog" | "Monkey" | "Pig";
+}[];
+};
+};
+
+ export namespace Http400 {
+ export type ResponseBody = {
+ detail: string;
+ extra?: Record<string, unknown> | null | unknown[];
+ status_code: number;
+};
+};
+
+ export interface PathParameters {
+ person_id: string;
+ service_id: number;
+};
+};
+
+ export namespace ServiceIdPersonPersonIdPartialUpdatePerson {
+ export namespace Http200 {
+ export type ResponseBody = {
+ complex: {
+
+};
+ first_name: string;
+ id: string;
+ last_name: string;
+ optional: null | string;
+ pets: null | {
+ age: number;
+ name: string;
+ species: "Cat" | "Dog" | "Monkey" | "Pig";
+}[];
+};
+};
+
+ export namespace Http400 {
+ export type ResponseBody = {
+ detail: string;
+ extra?: Record<string, unknown> | null | unknown[];
+ status_code: number;
+};
+};
+
+ export interface PathParameters {
+ person_id: string;
+ service_id: number;
+};
+
+ export type RequestBody = {
+ complex: {
+
+};
+ first_name: string;
+ id: string;
+ last_name: string;
+ optional: null | string;
+ pets: null | {
+ age: number;
+ name: string;
+ species: "Cat" | "Dog" | "Monkey" | "Pig";
+}[];
+};
+};
+
+ export namespace ServiceIdPersonPersonIdUpdatePerson {
+ export namespace Http200 {
+ export type ResponseBody = {
+ complex: {
+
+};
+ first_name: string;
+ id: string;
+ last_name: string;
+ optional?: null | string;
+ pets?: null | {
+ age: number;
+ name: string;
+ species?: "Cat" | "Dog" | "Monkey" | "Pig";
+}[];
+};
+};
+
+ export namespace Http400 {
+ export type ResponseBody = {
+ detail: string;
+ extra?: Record<string, unknown> | null | unknown[];
+ status_code: number;
+};
+};
+
+ export interface PathParameters {
+ person_id: string;
+ service_id: number;
+};
+
+ export type RequestBody = {
+ complex: {
+
+};
+ first_name: string;
+ id: string;
+ last_name: string;
+ optional?: null | string;
+ pets?: null | {
+ age: number;
+ name: string;
+ species?: "Cat" | "Dog" | "Monkey" | "Pig";
+}[];
+};
+};
+};"""
)
| Bug: Path parameters missing from OpenAPI schema when not included in handler signature
### Description
When defining a path parameter, but not using it in the handler signature, the path parameter is not documented in the OpenAPI schema. This is likely due to how we handle their extraction, which is based on the handler.
This is an issue though because, even if the parameter value is not used in the handler itself, it is still required to specify and should therefore be documented.
### URL to code causing the issue
_No response_
### MCVE
```python
@get("/{param:str}")
async def handler() -> None:
...
app = Litestar([handler])
assert app.openapi_schema.paths["/{param}"].get.parameters
```
### Steps to reproduce
_No response_
### Screenshots
_No response_
### Logs
_No response_
### Litestar Version
2.7.1
### Platform
- [X] Linux
- [ ] Mac
- [ ] Windows
- [ ] Other (Please specify in the description above)
<!-- POLAR PLEDGE BADGE START -->
---
> [!NOTE]
> While we are open for sponsoring on [GitHub Sponsors](https://github.com/sponsors/litestar-org/) and
> [OpenCollective](https://opencollective.com/litestar), we also utilize [Polar.sh](https://polar.sh/) to engage in pledge-based sponsorship.
>
> Check out all issues funded or available for funding [on our Polar.sh dashboard](https://polar.sh/litestar-org)
> * If you would like to see an issue prioritized, make a pledge towards it!
> * We receive the pledge once the issue is completed & verified
> * This, along with engagement in the community, helps us know which features are a priority to our users.
<a href="https://polar.sh/litestar-org/litestar/issues/3290">
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://polar.sh/api/github/litestar-org/litestar/issues/3290/pledge.svg?darkmode=1">
<img alt="Fund with Polar" src="https://polar.sh/api/github/litestar-org/litestar/issues/3290/pledge.svg">
</picture>
</a>
<!-- POLAR PLEDGE BADGE END -->
| 2024-03-30T17:51:39 |
|
litestar-org/litestar | 3,314 | litestar-org__litestar-3314 | [
"2334",
"3047"
] | 61971371792d727c0682617c421ffbd06bcb715e | diff --git a/litestar/app.py b/litestar/app.py
--- a/litestar/app.py
+++ b/litestar/app.py
@@ -198,6 +198,7 @@ def __init__(
openapi_config: OpenAPIConfig | None = DEFAULT_OPENAPI_CONFIG,
opt: Mapping[str, Any] | None = None,
parameters: ParametersMap | None = None,
+ path: str | None = None,
plugins: Sequence[PluginProtocol] | None = None,
request_class: type[Request] | None = None,
response_cache_config: ResponseCacheConfig | None = None,
@@ -277,6 +278,10 @@ def __init__(
:class:`ASGI Scope <.types.Scope>`.
parameters: A mapping of :class:`Parameter <.params.Parameter>` definitions available to all application
paths.
+ path: A path fragment that is prefixed to all route handlers, controllers and routers associated
+ with the application instance.
+
+ .. versionadded:: 2.8.0
pdb_on_exception: Drop into the PDB when an exception occurs.
plugins: Sequence of plugins.
request_class: An optional subclass of :class:`Request <.connection.Request>` to use for http connections.
@@ -350,6 +355,7 @@ def __init__(
on_startup=list(on_startup or []),
openapi_config=openapi_config,
opt=dict(opt or {}),
+ path=path or "",
parameters=parameters or {},
pdb_on_exception=pdb_on_exception,
plugins=self._get_default_plugins(list(plugins or [])),
@@ -455,7 +461,7 @@ def __init__(
middleware=config.middleware,
opt=config.opt,
parameters=config.parameters,
- path="",
+ path=config.path,
request_class=self.request_class,
response_class=config.response_class,
response_cookies=config.response_cookies,
diff --git a/litestar/config/app.py b/litestar/config/app.py
--- a/litestar/config/app.py
+++ b/litestar/config/app.py
@@ -151,6 +151,12 @@ class AppConfig:
"""
parameters: ParametersMap = field(default_factory=dict)
"""A mapping of :class:`Parameter <.params.Parameter>` definitions available to all application paths."""
+ path: str = field(default="")
+ """A base path that prefixed to all route handlers, controllers and routers associated with the
+ application instance.
+
+ .. versionadded:: 2.8.0
+ """
pdb_on_exception: bool = field(default=False)
"""Drop into the PDB on an exception"""
plugins: list[PluginProtocol] = field(default_factory=list)
| diff --git a/litestar/testing/helpers.py b/litestar/testing/helpers.py
--- a/litestar/testing/helpers.py
+++ b/litestar/testing/helpers.py
@@ -86,6 +86,7 @@ def create_test_client(
openapi_config: OpenAPIConfig | None = DEFAULT_OPENAPI_CONFIG,
opt: Mapping[str, Any] | None = None,
parameters: ParametersMap | None = None,
+ path: str | None = None,
plugins: Sequence[PluginProtocol] | None = None,
lifespan: list[Callable[[Litestar], AbstractAsyncContextManager] | AbstractAsyncContextManager] | None = None,
raise_server_exceptions: bool = True,
@@ -201,6 +202,10 @@ def test_my_handler() -> None:
:class:`ASGI Scope <.types.Scope>`.
parameters: A mapping of :class:`Parameter <.params.Parameter>` definitions available to all application
paths.
+ path: A path fragment that is prefixed to all route handlers, controllers and routers associated
+ with the application instance.
+
+ .. versionadded:: 2.8.0
pdb_on_exception: Drop into the PDB when an exception occurs.
plugins: Sequence of plugins.
request_class: An optional subclass of :class:`Request <.connection.Request>` to use for http connections.
@@ -273,6 +278,7 @@ def test_my_handler() -> None:
openapi_config=openapi_config,
opt=opt,
parameters=parameters,
+ path=path,
pdb_on_exception=pdb_on_exception,
plugins=plugins,
request_class=request_class,
@@ -343,6 +349,7 @@ def create_async_test_client(
opt: Mapping[str, Any] | None = None,
parameters: ParametersMap | None = None,
pdb_on_exception: bool | None = None,
+ path: str | None = None,
plugins: Sequence[PluginProtocol] | None = None,
raise_server_exceptions: bool = True,
request_class: type[Request] | None = None,
@@ -456,6 +463,10 @@ async def test_my_handler() -> None:
:class:`ASGI Scope <.types.Scope>`.
parameters: A mapping of :class:`Parameter <.params.Parameter>` definitions available to all application
paths.
+ path: A path fragment that is prefixed to all route handlers, controllers and routers associated
+ with the application instance.
+
+ .. versionadded:: 2.8.0
pdb_on_exception: Drop into the PDB when an exception occurs.
plugins: Sequence of plugins.
request_class: An optional subclass of :class:`Request <.connection.Request>` to use for http connections.
@@ -527,6 +538,7 @@ async def test_my_handler() -> None:
openapi_config=openapi_config,
opt=opt,
parameters=parameters,
+ path=path,
pdb_on_exception=pdb_on_exception,
plugins=plugins,
request_class=request_class,
diff --git a/tests/unit/test_app.py b/tests/unit/test_app.py
--- a/tests/unit/test_app.py
+++ b/tests/unit/test_app.py
@@ -450,3 +450,12 @@ async def hook_b(app: Litestar) -> None:
def test_use_dto_codegen_feature_flag_warns() -> None:
with pytest.warns(LitestarWarning, match="Use of redundant experimental feature flag DTO_CODEGEN"):
Litestar(experimental_features=[ExperimentalFeatures.DTO_CODEGEN])
+
+
+def test_using_custom_path_parameter() -> None:
+ @get()
+ def my_route_handler() -> None: ...
+
+ with create_test_client(my_route_handler, path="/abc") as client:
+ response = client.get("/abc")
+ assert response.status_code == HTTP_200_OK
| Bug: Pydantic types that cannot be instantiated - deserialization
### Description
When deserializing object, Litestar checks if a given `value` matches a given `target_type`. Additional `type_decoders` can be provided to convert a type given after deserialization to the expected type.
The problem is - when using Pydantic models and PydanticDTO's - Pydantic has some types that cannot be instantiated and are really not supposed to be used elsewhere other than model declaration - for example `EmailStr`.
When deserializing an object like:
```
{
"email": "[email protected]"
}
```
using a DTO given in the MCVE -> the expected type of `email` during runtime is `EmailStr` (see screenshot). Msgspec after deserialization returns a `str` so a decoder is needed. However, [there is no way to instantiate `EmailStr`](https://github.com/pydantic/pydantic/blob/main/pydantic/networks.py#L171) - it is only meant to be used as a type annotation for a Pydantic model.
`cast` doesn't change the type at runtime it is only useful for type checkers. So such decoder is impossible.
The solution should be changing the `target_type` to `str` in the `default_deserializer` function - [line 91 of `litestar/serialization/msgspec_hooks.py`](https://github.com/litestar-org/litestar/blob/main/litestar/serialization/msgspec_hooks.py#L91) and consequently in the call to [`msgspec.json.decode` at line 187 of the same file](https://github.com/litestar-org/litestar/blob/main/litestar/serialization/msgspec_hooks.py#L187). Internally by Pydantic, they're treated as a `str` after validation is performed.
Which should be handled by the PydanticDTO. Also there's more types in Pydantic that cannot be instantiated like `ImportStr` and maybe others. The target type for these should always be they're naive counterpart.
### URL to code causing the issue
_No response_
### MCVE
```python
import logging
from litestar import Litestar, post
from litestar.contrib.pydantic import PydanticDTO
from litestar.dto import DTOConfig
from pydantic import BaseModel, EmailStr
from typing import cast
class EmailModel(BaseModel):
email: EmailStr
class EmailDTO(PydanticDTO[EmailModel]):
config = DTOConfig()
@post("/email", dto=EmailDTO)
async def accept_email(data: EmailModel) -> None:
logging.info(data)
def is_emailstr_type(obj_type: type[EmailStr]) -> bool:
return obj_type is EmailStr
def emailstr_decoder(obj_type: type[EmailStr], value: str) -> EmailStr:
# How?
return cast(EmailStr, value)
app = Litestar(
route_handlers=[accept_email], type_decoders=[(is_emailstr_type, emailstr_decoder)]
)
```
### Steps to reproduce
1. Run the Litestar app from the MCVE
2. Send POST request with the body of `{"email": "[email protected]"}` to `/email`
3. `msgspec.ValidationError` occurs
4. Also when removing `type_decoders`, a similar error appears
### Screenshots

### Logs
<details>
<summary>Full traceback</summary>
```
INFO: 127.0.0.1:58390 - "POST /email HTTP/1.1" 400 Bad Request
ERROR - 2023-09-22 21:00:43,846 - litestar - config - exception raised on http connection to route /email
Traceback (most recent call last):
Traceback (most recent call last):
File "/home/geeshta/prog/litestar/pydbug/.venv/lib/python3.11/site-packages/litestar/serialization/msgspec_hooks.py", line 187, in decode_json
return msgspec.json.decode(
^^^^^^^^^^^^^^^^^^^^
msgspec.ValidationError: decoding to str: need a bytes-like object, type found - at `$.email`
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/home/geeshta/prog/litestar/pydbug/.venv/lib/python3.11/site-packages/litestar/routes/http.py", line 184, in _get_response_data
kwargs["data"] = await kwargs["data"]
^^^^^^^^^^^^^^^^^^^^
File "/home/geeshta/prog/litestar/pydbug/.venv/lib/python3.11/site-packages/litestar/_kwargs/extractors.py", line 427, in dto_extractor
return data_dto(connection).decode_bytes(body)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/geeshta/prog/litestar/pydbug/.venv/lib/python3.11/site-packages/litestar/dto/base_dto.py", line 96, in decode_bytes
return backend.populate_data_from_raw(value, self.asgi_connection)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/geeshta/prog/litestar/pydbug/.venv/lib/python3.11/site-packages/litestar/dto/_backend.py", line 299, in populate_data_from_raw
source_data=self.parse_raw(raw, asgi_connection),
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/geeshta/prog/litestar/pydbug/.venv/lib/python3.11/site-packages/litestar/dto/_backend.py", line 206, in parse_raw
result = decode_json(value=raw, target_type=self.annotation, type_decoders=type_decoders)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/geeshta/prog/litestar/pydbug/.venv/lib/python3.11/site-packages/litestar/serialization/msgspec_hooks.py", line 191, in decode_json
raise SerializationException(str(msgspec_error)) from msgspec_error
litestar.exceptions.base_exceptions.SerializationException: decoding to str: need a bytes-like object, type found - at `$.email`
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/home/geeshta/prog/litestar/pydbug/.venv/lib/python3.11/site-packages/litestar/middleware/exceptions/middleware.py", line 191, in __call__
await self.app(scope, receive, send)
File "/home/geeshta/prog/litestar/pydbug/.venv/lib/python3.11/site-packages/litestar/routes/http.py", line 79, in handle
response = await self._get_response_for_request(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/geeshta/prog/litestar/pydbug/.venv/lib/python3.11/site-packages/litestar/routes/http.py", line 131, in _get_response_for_request
response = await self._call_handler_function(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/geeshta/prog/litestar/pydbug/.venv/lib/python3.11/site-packages/litestar/routes/http.py", line 160, in _call_handler_function
response_data, cleanup_group = await self._get_response_data(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/geeshta/prog/litestar/pydbug/.venv/lib/python3.11/site-packages/litestar/routes/http.py", line 186, in _get_response_data
raise ClientException(str(e)) from e
litestar.exceptions.http_exceptions.ClientException: 400: decoding to str: need a bytes-like object, type found - at `$.email`
Traceback (most recent call last):
File "/home/geeshta/prog/litestar/pydbug/.venv/lib/python3.11/site-packages/litestar/serialization/msgspec_hooks.py", line 113, in default_deserializer
return decoder(target_type, value)
^^^^^^^^^^^^^^^^^^^^^^^^^^^
TypeError: decoding to str: need a bytes-like object, type found
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/home/geeshta/prog/litestar/pydbug/.venv/lib/python3.11/site-packages/litestar/serialization/msgspec_hooks.py", line 187, in decode_json
return msgspec.json.decode(
^^^^^^^^^^^^^^^^^^^^
msgspec.ValidationError: decoding to str: need a bytes-like object, type found - at `$.email`
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/home/geeshta/prog/litestar/pydbug/.venv/lib/python3.11/site-packages/litestar/routes/http.py", line 184, in _get_response_data
kwargs["data"] = await kwargs["data"]
^^^^^^^^^^^^^^^^^^^^
File "/home/geeshta/prog/litestar/pydbug/.venv/lib/python3.11/site-packages/litestar/_kwargs/extractors.py", line 427, in dto_extractor
return data_dto(connection).decode_bytes(body)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/geeshta/prog/litestar/pydbug/.venv/lib/python3.11/site-packages/litestar/dto/base_dto.py", line 96, in decode_bytes
return backend.populate_data_from_raw(value, self.asgi_connection)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/geeshta/prog/litestar/pydbug/.venv/lib/python3.11/site-packages/litestar/dto/_backend.py", line 299, in populate_data_from_raw
source_data=self.parse_raw(raw, asgi_connection),
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/geeshta/prog/litestar/pydbug/.venv/lib/python3.11/site-packages/litestar/dto/_backend.py", line 206, in parse_raw
result = decode_json(value=raw, target_type=self.annotation, type_decoders=type_decoders)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/geeshta/prog/litestar/pydbug/.venv/lib/python3.11/site-packages/litestar/serialization/msgspec_hooks.py", line 191, in decode_json
raise SerializationException(str(msgspec_error)) from msgspec_error
litestar.exceptions.base_exceptions.SerializationException: decoding to str: need a bytes-like object, type found - at `$.email`
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/home/geeshta/prog/litestar/pydbug/.venv/lib/python3.11/site-packages/litestar/middleware/exceptions/middleware.py", line 191, in __call__
await self.app(scope, receive, send)
File "/home/geeshta/prog/litestar/pydbug/.venv/lib/python3.11/site-packages/litestar/routes/http.py", line 79, in handle
response = await self._get_response_for_request(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/geeshta/prog/litestar/pydbug/.venv/lib/python3.11/site-packages/litestar/routes/http.py", line 131, in _get_response_for_request
response = await self._call_handler_function(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/geeshta/prog/litestar/pydbug/.venv/lib/python3.11/site-packages/litestar/routes/http.py", line 160, in _call_handler_function
response_data, cleanup_group = await self._get_response_data(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/geeshta/prog/litestar/pydbug/.venv/lib/python3.11/site-packages/litestar/routes/http.py", line 186, in _get_response_data
raise ClientException(str(e)) from e
litestar.exceptions.http_exceptions.ClientException: 400: decoding to str: need a bytes-like object, type found - at `$.email`
```
</details>
### Litestar Version
2.0.1
### Platform
- [X] Linux
- [ ] Mac
- [ ] Windows
- [ ] Other (Please specify in the description above)
<!-- POLAR PLEDGE BADGE START -->
---
## Funding
* If you would like to see an issue prioritized, make a pledge towards it!
* We receive the pledge once the issue is completed & verified
<a href="https://polar.sh/litestar-org/litestar/issues/2334">
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://polar.sh/api/github/litestar-org/litestar/issues/2334/pledge.svg?darkmode=1">
<img alt="Fund with Polar" src="https://polar.sh/api/github/litestar-org/litestar/issues/2334/pledge.svg">
</picture>
</a>
<!-- POLAR PLEDGE BADGE END -->
Bug: RapiDoc and Stoplight Elements fail when used as root schema
### Description
Setting `root_schema_site="rapidoc"` or `root_schema_site="elements"` in the `OpenAPIConfig` results in an error when visiting `/schema`. Both `/schema/rapidoc` and `/schema/elements` work as expected.
My guess is that this might be related to the fact that both RapiDoc and StopLight elements [are configured with `spec-url`](https://github.com/litestar-org/litestar/blob/747fb90772721e7867f2e1ff2da6636f8442a6fb/litestar/openapi/controller.py#L494) whereas Swagger UI and Redoc HTML has the OpenAPI JSON inlined via a call to `OpenAPIController._get_schema_as_json`.
### MCVE
```python
from litestar import Litestar, get
from litestar.openapi import OpenAPIConfig
@get("/")
async def hello_world() -> str:
return "Hello world!"
app = Litestar(
route_handlers=[hello_world],
openapi_config=OpenAPIConfig(
title="Test app", version="0.0.1", root_schema_site="rapidoc"
),
)
```
### Steps to reproduce
```text
1. Run app with `litestar run`
2. Visit `localhost:8000/schema` in your browser.
```
### Screenshots
<img width="859" alt="image" src="https://github.com/litestar-org/litestar/assets/15220906/c962e9b0-acba-40bd-bf29-e0603756d91c">
### Litestar Version
2.5.0
### Platform
- [X] Linux
- [X] Mac
- [ ] Windows
- [ ] Other (Please specify in the description above)
| Schema validation of Pydantic and msgspec can't be used transparently/interchangeably: like pointed out by @geeshta, the type ```EmailStr``` is used by Pydantic for validation, effectively processing ```str.``` However, this type cannot be used in the schema validation of msgspec, as it expects an instance of ```EmailStr```, which is not feasible.
As far as I understand, currently in Litestar msgspec always does schema validation, irrespective if there's a validation later on by a plugin, e.g. Pydantic?
The problem is that when a PydanticDTO is used, BOTH Pydantic and msgspec do a schema validation, which poses a problem for types like ```EmailStr```. In this case, msgspec shouldn't do any schema validation. Schema validation by msgspec can be disabled by removing the ```type``` argument from ```msgspec.json.decode``` in ```msgspec_hooks.py```.
I think the behaviour should be that msgspec only does schema validation if there's no other plugin doing that instead. Would you agree?
When the msgspec transfer models are created for the pydantic type, the type on the transfer model should be annotated `str`, and the validation to email string should only happen when the pydantic model is instantiated with the data that has been decoded and validated by msgspec.
E.g., pydantic model:
```py
class WithEmail(BaseModel):
email: EmailStr
```
Transfer model produced by DTO should be:
```py
class WithEmailTransfer(msgspec.Struct):
email: str
```
Given:
- `{"email": 1}` - msgspec should fail this on decoding
- `{"email": "abc"}` - pydantic should fail this on model instantiation
- `{"email": "[email protected]"}` - should pass both
The down-typing from `EmailStr` to `str` should be able to be handled in https://github.com/litestar-org/litestar/blob/7c3d24f9b70731df372b29bff9eb5cf359055b3a/litestar/contrib/pydantic/pydantic_dto_factory.py#L54-L56
@peterschutt thanks for the hints!
I just made a pull-request
Just ran into this, any update or workaround?
EDIT: Not a general fix but at least for EmailStr this workaround seems to work for me:
```python
from pydantic import AfterValidator, validate_email
class EmailModel(BaseModel):
email: Annotated[str, AfterValidator(lambda v: validate_email(v)[1])]
```
Can you update to the latest version (`pip install -U litestar==2.5.3`) and confirm this issue persists?
Upgraded, and unfortunately the issue persists with `2.5.3` installed.
Using the provided MCVE I don't seem to be able to reproduce :\
```
~ via 🐋 colima via pyenv
➜ ntp -v 3047
Directory /tmp/testing/3047 created and switched to.
Virtual environment created and activated.
➜ vim mcve.py
➜ pip install litestar[standard] && litestar run --reload --reload-dir . --debug
➜ browse 127.0.0.1/schema
```
result:
<img width="1201" alt="image" src="https://github.com/litestar-org/litestar/assets/45884264/a39060ad-0e03-4d95-8172-791564cc71bd">
and same for Stoplight.
tested on release in 2.5 - 2.6.
Am i doing something dumb here?
Wow, that's really strange... I just tested again at my end with a clean virtual environment and the above example and saw the same issue.
```
pip install -U "litestar[standard]"
litestar run --reload --reload-dir . --debug
open http://127.0.0.1:8000/schema
```
The request is being received by the backend as expected
```
INFO: 127.0.0.1:55586 - "GET /schema HTTP/1.1" 200 OK
INFO: 127.0.0.1:55586 - "GET /openapi.json HTTP/1.1" 404 Not Found
ERROR - 2024-02-11 10:46:25,461 - litestar - config - exception raised on http connection to route /openapi.json
```
Issue does appear to be `/openapi.json` route as if I visit `/schema/rapidoc` instead I get
```
INFO: 127.0.0.1:55628 - "GET /schema/rapidoc HTTP/1.1" 200 OK
INFO: 127.0.0.1:55628 - "GET /schema/openapi.json HTTP/1.1" 200 OK
```
I tried private windows and force refreshing the page but always the same result
<img width="864" alt="image" src="https://github.com/litestar-org/litestar/assets/15220906/6aae1341-07b9-453c-9883-46ed004bc1be">
In case it helps I tested just now with Python 3.11.4 and this is the result of `pip freeze`.
[pip-freeze.txt](https://github.com/litestar-org/litestar/files/14232018/pip-freeze.txt)
No idea what's going on but I'm also very open to the possibility that I'm doing something dumb on this end 😅
@tcbegley I also can't reproduce this. Are you sure that you're running the correct application file?
Hey @provinzkraut, thanks for also looking into this.
I noticed from @JacobCoffee's screenshot that he has `/schema/` in his address bar whereas I have `/schema`. Sure enough if I visit `/schema/` then I don't get the error. By contrast `/schema/rapidoc` works but `/schema/rapidoc/` does not. I guess an issue with relative paths resolving to the wrong place? Ditto for StopLight Elements. Are you able to reproduce by visiting `/schema`?
To answer your original question, definitely running the right application file. To double check I added
```python
if __name__ == "__main__":
import uvicorn
uvicorn.run(app)
```
to the bottom of the above example and ran `python app.py` and observed the same result.
Yes, I was able to reproduce this with the help of @Alc-Alc who came to the same conclusion as you.
I'm not yet sure if this is an upstream issue or a bug on our side, but he provided some additional insight:
```
INFO: 127.0.0.1:49743 - "GET /schema HTTP/1.1" 200 OK
INFO: 127.0.0.1:49743 - "GET /openapi.json HTTP/1.1" 404 Not Found
INFO: 127.0.0.1:49743 - "GET /schema HTTP/1.1" 200 OK
INFO: 127.0.0.1:49743 - "GET /schema/openapi.json HTTP/1.1" 200 OK
```
these are the paths requested with/without the trailing slash, so something in there is not building them correctly. | 2024-04-04T18:47:42 |
litestar-org/litestar | 3,335 | litestar-org__litestar-3335 | [
"3334"
] | 8d6782db0a176e3d327b42f3ed36e728e199a229 | diff --git a/litestar/contrib/pydantic/pydantic_dto_factory.py b/litestar/contrib/pydantic/pydantic_dto_factory.py
--- a/litestar/contrib/pydantic/pydantic_dto_factory.py
+++ b/litestar/contrib/pydantic/pydantic_dto_factory.py
@@ -46,18 +46,24 @@
__all__ = ("PydanticDTO",)
-_down_types = {
- pydantic_v2.JsonValue: Any,
+_down_types: dict[Any, Any] = {
pydantic_v1.EmailStr: str,
- pydantic_v2.EmailStr: str,
pydantic_v1.IPvAnyAddress: str,
- pydantic_v2.IPvAnyAddress: str,
pydantic_v1.IPvAnyInterface: str,
- pydantic_v2.IPvAnyInterface: str,
pydantic_v1.IPvAnyNetwork: str,
- pydantic_v2.IPvAnyNetwork: str,
}
+if pydantic_v2 is not Empty: # type: ignore[comparison-overlap] # pragma: no cover
+ _down_types.update(
+ {
+ pydantic_v2.JsonValue: Any,
+ pydantic_v2.EmailStr: str,
+ pydantic_v2.IPvAnyAddress: str,
+ pydantic_v2.IPvAnyInterface: str,
+ pydantic_v2.IPvAnyNetwork: str,
+ }
+ )
+
def convert_validation_error(validation_error: ValidationErrorV1 | ValidationErrorV2) -> list[dict[str, Any]]:
error_list = validation_error.errors()
| Bug: App launching fails because of Pydantic version handling problem
### Description
Launching a Litestar app fails because of a variable that has a value of type `_EmptyEnum` and is expected to have an attribute `JsonValue`.
This is [a Pydantic version handling mechanism](https://github.com/litestar-org/litestar/blob/8d6782db0a176e3d327b42f3ed36e728e199a229/litestar/contrib/pydantic/pydantic_dto_factory.py#L50) that assigns a value of `Empty` ([here](https://github.com/litestar-org/litestar/blob/8d6782db0a176e3d327b42f3ed36e728e199a229/litestar/types/empty.py#L16)) to `pydantic_v2`.
### URL to code causing the issue
_No response_
### MCVE
```python
from litestar import Litestar, get
@get("/")
async def index() -> str:
return "Hello, world!"
@get("/books/{book_id:int}")
async def get_book(book_id: int) -> dict[str, int]:
return {"book_id": book_id}
app = Litestar([index, get_book])
```
### Steps to reproduce
```bash
1. Write the MCVE code in a file and name it `app.py`.
2. Run the app with `litestar --app app:app run --host 0.0.0.0 --port 8000`
```
### Screenshots
```bash
""
```
### Logs
```bash
Using Litestar app from env: 'sample_app:app'
Traceback (most recent call last):
File "/home/zahran/miniconda3/envs/sample_env/bin/litestar", line 8, in <module>
sys.exit(run_cli())
File "/home/zahran/miniconda3/envs/sample_env/lib/python3.10/site-packages/litestar/__main__.py", line 6, in run_cli
litestar_group()
File "/home/zahran/miniconda3/envs/sample_env/lib/python3.10/site-packages/click/core.py", line 1157, in __call__
return self.main(*args, **kwargs)
File "/home/zahran/miniconda3/envs/sample_env/lib/python3.10/site-packages/rich_click/rich_command.py", line 125, in main
with self.make_context(prog_name, args, **extra) as ctx:
File "/home/zahran/miniconda3/envs/sample_env/lib/python3.10/site-packages/litestar/cli/_utils.py", line 218, in make_context
self._prepare(ctx)
File "/home/zahran/miniconda3/envs/sample_env/lib/python3.10/site-packages/litestar/cli/_utils.py", line 200, in _prepare
env = ctx.obj = LitestarEnv.from_env(ctx.params.get("app_path"), ctx.params.get("app_dir"))
File "/home/zahran/miniconda3/envs/sample_env/lib/python3.10/site-packages/litestar/cli/_utils.py", line 106, in from_env
loaded_app = _load_app_from_path(app_path)
File "/home/zahran/miniconda3/envs/sample_env/lib/python3.10/site-packages/litestar/cli/_utils.py", line 271, in _load_app_from_path
module = importlib.import_module(module_path)
File "/home/zahran/miniconda3/envs/sample_env/lib/python3.10/importlib/__init__.py", line 126, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "<frozen importlib._bootstrap>", line 1050, in _gcd_import
File "<frozen importlib._bootstrap>", line 1027, in _find_and_load
File "<frozen importlib._bootstrap>", line 1006, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 688, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 883, in exec_module
File "<frozen importlib._bootstrap>", line 241, in _call_with_frames_removed
File "/home/ahmedzahran/projects/repos/agolo/neural-qbs/sample_app.py", line 14, in <module>
app = Litestar([index, get_book])
File "/home/zahran/miniconda3/envs/sample_env/lib/python3.10/site-packages/litestar/app.py", line 361, in __init__
plugins=self._get_default_plugins(list(plugins or [])),
File "/home/zahran/miniconda3/envs/sample_env/lib/python3.10/site-packages/litestar/app.py", line 521, in _get_default_plugins
from litestar.contrib.pydantic import (
File "/home/zahran/miniconda3/envs/sample_env/lib/python3.10/site-packages/litestar/contrib/pydantic/__init__.py", line 8, in <module>
from .pydantic_dto_factory import PydanticDTO
File "/home/zahran/miniconda3/envs/sample_env/lib/python3.10/site-packages/litestar/contrib/pydantic/pydantic_dto_factory.py", line 50, in <module>
pydantic_v2.JsonValue: Any,
AttributeError: '_EmptyEnum' object has no attribute 'JsonValue'
```
### Litestar Version
2.8.0
### Platform
- [X] Linux
- [ ] Mac
- [ ] Windows
- [ ] Other (Please specify in the description above)
<!-- POLAR PLEDGE BADGE START -->
---
> [!NOTE]
> While we are open for sponsoring on [GitHub Sponsors](https://github.com/sponsors/litestar-org/) and
> [OpenCollective](https://opencollective.com/litestar), we also utilize [Polar.sh](https://polar.sh/) to engage in pledge-based sponsorship.
>
> Check out all issues funded or available for funding [on our Polar.sh dashboard](https://polar.sh/litestar-org)
> * If you would like to see an issue prioritized, make a pledge towards it!
> * We receive the pledge once the issue is completed & verified
> * This, along with engagement in the community, helps us know which features are a priority to our users.
<a href="https://polar.sh/litestar-org/litestar/issues/3334">
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://polar.sh/api/github/litestar-org/litestar/issues/3334/pledge.svg?darkmode=1">
<img alt="Fund with Polar" src="https://polar.sh/api/github/litestar-org/litestar/issues/3334/pledge.svg">
</picture>
</a>
<!-- POLAR PLEDGE BADGE END -->
| ```
(.venv) peter@pop-os:~/PycharmProjects/litestar-import-test$ pip freeze
anyio==4.3.0
certifi==2024.2.2
click==8.1.7
Faker==24.7.1
h11==0.14.0
httpcore==1.0.5
httpx==0.27.0
idna==3.6
litestar==2.8.0
markdown-it-py==3.0.0
mdurl==0.1.2
msgspec==0.18.6
multidict==6.0.5
polyfactory==2.15.0
Pygments==2.17.2
python-dateutil==2.9.0.post0
PyYAML==6.0.1
rich==13.7.1
rich-click==1.7.4
six==1.16.0
sniffio==1.3.1
typing_extensions==4.11.0
uvicorn==0.29.0
```
```
(.venv) peter@pop-os:~/PycharmProjects/litestar-import-test$ litestar --app app:app run
Using Litestar app from env: 'app:app'
Starting server process ─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────
┌──────────────────────────────┬──────────────────────┐
│ Litestar version │ 2.8.0 │
│ Debug mode │ Disabled │
│ Python Debugger on exception │ Disabled │
│ CORS │ Disabled │
│ CSRF │ Disabled │
│ OpenAPI │ Enabled path=/schema │
│ Compression │ Disabled │
└──────────────────────────────┴──────────────────────┘
INFO: Started server process [138512]
INFO: Waiting for application startup.
INFO: Application startup complete.
INFO: Uvicorn running on http://127.0.0.1:8000 (Press CTRL+C to quit)
```
```
(.venv) peter@pop-os:~/PycharmProjects/litestar-import-test$ curl localhost:8000/
Hello, world!(.venv) peter@pop-os:~/PycharmProjects/litestar-import-test$
```
Can you provide output of `pip freeze` or similar to show us your environment? Although, I assume this is triggered by having pydantic v1 in there.
Thanks for reporting.
> Although, I assume this is triggered by having pydantic v1 in there.
```
(.venv) peter@pop-os:~/PycharmProjects/litestar-import-test$ python -m pip install pydantic==1.10
Collecting pydantic==1.10
Downloading pydantic-1.10.0-py3-none-any.whl.metadata (138 kB)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 138.4/138.4 kB 1.7 MB/s eta 0:00:00
Requirement already satisfied: typing-extensions>=4.1.0 in ./.venv/lib/python3.12/site-packages (from pydantic==1.10) (4.11.0)
Downloading pydantic-1.10.0-py3-none-any.whl (153 kB)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 153.5/153.5 kB 5.0 MB/s eta 0:00:00
Installing collected packages: pydantic
Successfully installed pydantic-1.10.0
(.venv) peter@pop-os:~/PycharmProjects/litestar-import-test$ litestar --app app:app run
Using Litestar app from env: 'app:app'
Traceback (most recent call last):
File "/home/peter/PycharmProjects/litestar-import-test/.venv/bin/litestar", line 8, in <module>
sys.exit(run_cli())
^^^^^^^^^
File "/home/peter/PycharmProjects/litestar-import-test/.venv/lib/python3.12/site-packages/litestar/__main__.py", line 6, in run_cli
litestar_group()
File "/home/peter/PycharmProjects/litestar-import-test/.venv/lib/python3.12/site-packages/click/core.py", line 1157, in __call__
return self.main(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/peter/PycharmProjects/litestar-import-test/.venv/lib/python3.12/site-packages/rich_click/rich_command.py", line 125, in main
with self.make_context(prog_name, args, **extra) as ctx:
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/peter/PycharmProjects/litestar-import-test/.venv/lib/python3.12/site-packages/litestar/cli/_utils.py", line 218, in make_context
self._prepare(ctx)
File "/home/peter/PycharmProjects/litestar-import-test/.venv/lib/python3.12/site-packages/litestar/cli/_utils.py", line 200, in _prepare
env = ctx.obj = LitestarEnv.from_env(ctx.params.get("app_path"), ctx.params.get("app_dir"))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/peter/PycharmProjects/litestar-import-test/.venv/lib/python3.12/site-packages/litestar/cli/_utils.py", line 106, in from_env
loaded_app = _load_app_from_path(app_path)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/peter/PycharmProjects/litestar-import-test/.venv/lib/python3.12/site-packages/litestar/cli/_utils.py", line 271, in _load_app_from_path
module = importlib.import_module(module_path)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/peter/.pyenv/versions/3.12.2/lib/python3.12/importlib/__init__.py", line 90, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "<frozen importlib._bootstrap>", line 1387, in _gcd_import
File "<frozen importlib._bootstrap>", line 1360, in _find_and_load
File "<frozen importlib._bootstrap>", line 1331, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 935, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 995, in exec_module
File "<frozen importlib._bootstrap>", line 488, in _call_with_frames_removed
File "/home/peter/PycharmProjects/litestar-import-test/app.py", line 14, in <module>
app = Litestar([index, get_book])
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/peter/PycharmProjects/litestar-import-test/.venv/lib/python3.12/site-packages/litestar/app.py", line 361, in __init__
plugins=self._get_default_plugins(list(plugins or [])),
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/peter/PycharmProjects/litestar-import-test/.venv/lib/python3.12/site-packages/litestar/app.py", line 521, in _get_default_plugins
from litestar.contrib.pydantic import (
File "/home/peter/PycharmProjects/litestar-import-test/.venv/lib/python3.12/site-packages/litestar/contrib/pydantic/__init__.py", line 8, in <module>
from .pydantic_dto_factory import PydanticDTO
File "/home/peter/PycharmProjects/litestar-import-test/.venv/lib/python3.12/site-packages/litestar/contrib/pydantic/pydantic_dto_factory.py", line 50, in <module>
pydantic_v2.JsonValue: Any,
^^^^^^^^^^^^^^^^^^^^^
AttributeError: '_EmptyEnum' object has no attribute 'JsonValue'
```
Nvm, confirmed. | 2024-04-08T02:15:56 |
|
litestar-org/litestar | 3,338 | litestar-org__litestar-3338 | [
"3337"
] | 32f0ffaf7703ed11441319663385f49b8ab26f37 | diff --git a/litestar/openapi/controller.py b/litestar/openapi/controller.py
--- a/litestar/openapi/controller.py
+++ b/litestar/openapi/controller.py
@@ -1,9 +1,10 @@
from __future__ import annotations
from functools import cached_property
-from typing import TYPE_CHECKING, Any, Callable, Literal
+from typing import TYPE_CHECKING, Any, Callable, Final, Literal
+from uuid import uuid4
-from litestar.constants import OPENAPI_JSON_HANDLER_NAME, OPENAPI_NOT_INITIALIZED
+from litestar.constants import OPENAPI_NOT_INITIALIZED
from litestar.controller import Controller
from litestar.enums import MediaType, OpenAPIMediaType
from litestar.exceptions import ImproperlyConfiguredException
@@ -14,13 +15,17 @@
from litestar.serialization.msgspec_hooks import decode_json
from litestar.status_codes import HTTP_404_NOT_FOUND
-__all__ = ("OpenAPIController",)
-
-
if TYPE_CHECKING:
from litestar.connection.request import Request
from litestar.openapi.spec.open_api import OpenAPI
+__all__ = ("OpenAPIController",)
+
+# NOTE: We are explicitly using a different name to the one defined in litestar.constants so that an openapi
+# controller can be added to a router on the same application as the openapi router.
+# See: https://github.com/litestar-org/litestar/issues/3337
+OPENAPI_JSON_HANDLER_NAME: Final = f"{uuid4().hex}_litestar_openapi_json"
+
class OpenAPIController(Controller):
"""Controller for OpenAPI endpoints."""
| diff --git a/tests/unit/test_openapi/test_integration.py b/tests/unit/test_openapi/test_integration.py
--- a/tests/unit/test_openapi/test_integration.py
+++ b/tests/unit/test_openapi/test_integration.py
@@ -10,7 +10,7 @@
import yaml
from typing_extensions import Annotated
-from litestar import Controller, Litestar, delete, get, patch, post
+from litestar import Controller, Litestar, Router, delete, get, patch, post
from litestar._openapi.plugin import OpenAPIPlugin
from litestar.enums import MediaType, OpenAPIMediaType, ParamType
from litestar.openapi import OpenAPIConfig, OpenAPIController
@@ -503,3 +503,40 @@ def delete_handler(self, data: B) -> None: ...
"test_components_schemas_in_alphabetical_order.C",
]
assert list(openapi.components.schemas.keys()) == expected_keys
+
+
+def test_openapi_controller_and_openapi_router_on_same_app() -> None:
+ """Test that OpenAPIController and OpenAPIRouter can coexist on the same app.
+
+ As part of backward compatibility with new plugin-based OpenAPI router approach, we did not consider
+ the case where an OpenAPIController is registered on the application by means other than via the
+ OpenAPIConfig object. This is an approach that has been used to serve the openapi both under the
+ `/schema` and `/some-prefix/schema` paths. This test ensures that the OpenAPIController and OpenAPIRouter
+ can coexist on the same app.
+
+ See: https://github.com/litestar-org/litestar/issues/3337
+ """
+ router = Router(path="/abc", route_handlers=[OpenAPIController])
+ openapi_config = OpenAPIConfig("Litestar", "v0.0.1") # no openapi_controller specified means we use the router
+ app = Litestar([router], openapi_config=openapi_config)
+ assert sorted(r.path for r in app.routes) == [
+ "/abc/schema",
+ "/abc/schema/elements",
+ "/abc/schema/oauth2-redirect.html",
+ "/abc/schema/openapi.json",
+ "/abc/schema/openapi.yaml",
+ "/abc/schema/openapi.yml",
+ "/abc/schema/rapidoc",
+ "/abc/schema/redoc",
+ "/abc/schema/swagger",
+ "/schema",
+ "/schema/elements",
+ "/schema/oauth2-redirect.html",
+ "/schema/openapi.json",
+ "/schema/openapi.yaml",
+ "/schema/openapi.yml",
+ "/schema/rapidoc",
+ "/schema/redoc",
+ "/schema/swagger",
+ "/schema/{path:str}",
+ ]
| Bug: In 2.8.0, openapi triggers an ImproperlyConfiguredException
### Description
[#route-handler-indexing](https://docs.litestar.dev/2/usage/routing/handlers.html#route-handler-indexing) states that an 'ImproperlyConfiguredException' will be raised if not all route have unique names.
After upgrading from 2.7.0 to 2.8.0, an ImproperlyConfiguredException is raised when launching my Litestar app.
The final lines of the output seems to suggest the exception is being triggered by a route handler name clash with litestar_openapi_json. The program runs fine in version 2.7.0 with no changes to the code.
### /app/app.py
```python
import logging
from litestar import get, Litestar, MediaType, Response, Router
from litestar.logging import LoggingConfig
from litestar.openapi import OpenAPIConfig, OpenAPIController
from litestar.response import Redirect
from app.settings import URL_PATH_PREFIX
from app.auth import auth_middleware
LOGGER = logging.getLogger()
logging_config = LoggingConfig(
root={"level": logging.getLevelName(logging.DEBUG), "handlers": ["console"]},
formatters={"standard": {"format": "%(asctime)s - %(name)s - %(levelname)s - %(message)s"}},
)
@get(["", "/index"], exclude_from_auth=True, include_in_schema=False)
async def index() -> Redirect:
return Redirect(f"/{URL_PATH_PREFIX}/schema/redoc")
@get("/health", exclude_from_auth=True, include_in_schema=False)
async def health() -> Response:
return Response(media_type=MediaType.TEXT, content="I'm feeling healthy!\n")
class MyOpenAPIController(OpenAPIController):
path = "/schema"
internal_router = Router(path="/", route_handlers=[health, index])
external_router = Router(path=f"/{URL_PATH_PREFIX}", route_handlers=[index, MyOpenAPIController])
app = Litestar(
[external_router, internal_router],
middleware=[auth_middleware],
openapi_config=OpenAPIConfig("api", version="0.9.0"),
)
```
### Steps to reproduce
```bash
uvicorn app.app:app --host 0.0.0.0 --port 7000 --reload
```
### Logs
```bash
PS C:\Users\user\GitLab\api> & 'c:\Users\user\GitLab\api\.venv\Scripts\python.exe' 'c:\Users\user\.vscode\extensions\ms-python.debugpy-2024.4.0-win32-x64\bundled\libs\debugpy\adapter/../..\debugpy\launcher' '63609' '--' '-m' 'uvicorn' 'app.app:app' '--host' '0.0.0.0' '--port' '7000' '--reload'
INFO: Will watch for changes in these directories: ['C:\\Users\\user\\GitLab\\api']
INFO: Uvicorn running on http://0.0.0.0:7000 (Press CTRL+C to quit)
INFO: Started reloader process [26988] using WatchFiles
Process SpawnProcess-1:
Traceback (most recent call last):
File "C:\Program Files\Python312\Lib\multiprocessing\process.py", line 314, in _bootstrap
self.run()
File "C:\Program Files\Python312\Lib\multiprocessing\process.py", line 108, in run
self._target(*self._args, **self._kwargs)
File "c:\Users\user\GitLab\api\.venv\Lib\site-packages\uvicorn\_subprocess.py", line 78, in subprocess_started
target(sockets=sockets)
File "c:\Users\user\GitLab\api\.venv\Lib\site-packages\uvicorn\server.py", line 65, in run
return asyncio.run(self.serve(sockets=sockets))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Program Files\Python312\Lib\asyncio\runners.py", line 194, in run
return runner.run(main)
^^^^^^^^^^^^^^^^
File "C:\Program Files\Python312\Lib\asyncio\runners.py", line 118, in run
return self._loop.run_until_complete(task)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Program Files\Python312\Lib\asyncio\base_events.py", line 685, in run_until_complete
return future.result()
^^^^^^^^^^^^^^^
File "c:\Users\user\GitLab\api\.venv\Lib\site-packages\uvicorn\server.py", line 69, in serve
await self._serve(sockets)
File "c:\Users\user\GitLab\api\.venv\Lib\site-packages\uvicorn\server.py", line 76, in _serve
config.load()
File "c:\Users\user\GitLab\api\.venv\Lib\site-packages\uvicorn\config.py", line 433, in load
self.loaded_app = import_from_string(self.app)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "c:\Users\user\GitLab\api\.venv\Lib\site-packages\uvicorn\importer.py", line 19, in import_from_string
module = importlib.import_module(module_str)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Program Files\Python312\Lib\importlib\__init__.py", line 90, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "<frozen importlib._bootstrap>", line 1387, in _gcd_import
File "<frozen importlib._bootstrap>", line 1360, in _find_and_load
File "<frozen importlib._bootstrap>", line 1331, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 935, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 995, in exec_module
File "<frozen importlib._bootstrap>", line 488, in _call_with_frames_removed
File "C:\Users\user\GitLab\api\app\app.py", line 32, in <module>
app = Litestar(
^^^^^^^^^
File "c:\Users\user\GitLab\api\.venv\Lib\site-packages\litestar\app.py", line 483, in __init__
self.register(route_handler)
File "c:\Users\user\GitLab\api\.venv\Lib\site-packages\litestar\app.py", line 678, in register
self.asgi_router.construct_routing_trie()
File "c:\Users\user\GitLab\api\.venv\Lib\site-packages\litestar\_asgi\asgi_router.py", line 144, in construct_routing_trie
self._store_handler_to_route_mapping(route)
File "c:\Users\user\GitLab\api\.venv\Lib\site-packages\litestar\_asgi\asgi_router.py", line 123, in _store_handler_to_route_mapping
raise ImproperlyConfiguredException(
litestar.exceptions.http_exceptions.ImproperlyConfiguredException: 500: route handler names must be unique - 097bebbb88884f068c1c19a750884fee_litestar_openapi_json is not unique.
```
### Litestar Version
2.8.0
### Platform
- [ ] Linux
- [ ] Mac
- [X] Windows
| Usually, the openapi controller should be registered on the `OpenAPIConfig` object, e.g.,:
```py
class MyOpenAPIController(OpenAPIController):
path = "/schema"
internal_router = Router(path="/", route_handlers=[health, index])
external_router = Router(path="/prefix", route_handlers=[index])
app = Litestar(
[internal_router, external_router],
openapi_config=OpenAPIConfig("api", version="0.9.0", openapi_controller=MyOpenAPIController),
)
```
Is there a reason for registering it directly on that router?
> Is there a reason for registering it directly on that router?
The schema end point needed to be reachable via both "/{URL_PATH_PREFIX}/schema" and "/schema".
Disabling its registration with the external_router resolves the issue, but removes the desired route.
I guess I need a solution that works with 2.8.0 that allows me multiple schema end points.
2.8.0 has deprecated the use of the `OpenAPIController` in favor of a plugin system where plugins are registered on the `OpenAPIConfig` object.
If an `OpenAPIController` is not registered on the `OpenAPIConfig` object, it implicitly opts into the new behavior which does not use the controller at all, and instead registers handlers for the openapi routes on a router. This is combined with a patch that was released in 2.7.1 that started using an explicit name for the openapi json route to facilitate rendering absolute urls to openapi.json into the javascript for the OpenAPI UI rendering.
To get the 2.7 behavior, also declare ~your controller~ `OpenAPIController` on the `OpenAPIConfig`, e.g.:
```py
class MyOpenAPIController(OpenAPIController):
path = "/schema"
internal_router = Router(path="/", route_handlers=[health, index])
external_router = Router(path="/prefix", route_handlers=[index, MyOpenAPIController])
app = Litestar(
[internal_router, external_router],
openapi_config=OpenAPIConfig("api", version="0.9.0", openapi_controller=OpenAPIController),
)
```
```
(.venv) peter@pop-os:~/PycharmProjects/litestar-import-test$ litestar routes --schema
Using Litestar app from app:app
/ (HTTP)
├── options_handler sync OPTIONS
├── / index async GET
└── /index index async GET
/health (HTTP)
├── health async GET
└── options_handler sync OPTIONS
/index (HTTP)
├── options_handler sync OPTIONS
├── / index async GET
└── /index index async GET
/prefix (HTTP)
├── options_handler sync OPTIONS
├── / index async GET
└── /index index async GET
/prefix/index (HTTP)
├── options_handler sync OPTIONS
├── / index async GET
└── /index index async GET
/prefix/schema (HTTP)
├── root sync GET
└── options_handler sync OPTIONS
/prefix/schema/elements (HTTP)
├── options_handler sync OPTIONS
└── stoplight_elements sync GET
/prefix/schema/oauth2-redirect.html (HTTP)
├── options_handler sync OPTIONS
└── swagger_ui_oauth2_redirect sync GET
/prefix/schema/openapi.json (HTTP)
├── options_handler sync OPTIONS
└── 020d496bb2644584b776daded6a21182_litestar_openapi_json sync GET
/prefix/schema/openapi.yaml (HTTP)
├── /openapi.yaml retrieve_schema_yaml sync GET
├── /openapi.yml retrieve_schema_yaml sync GET
└── options_handler sync OPTIONS
/prefix/schema/openapi.yml (HTTP)
├── /openapi.yaml retrieve_schema_yaml sync GET
├── /openapi.yml retrieve_schema_yaml sync GET
└── options_handler sync OPTIONS
/prefix/schema/rapidoc (HTTP)
├── rapidoc sync GET
└── options_handler sync OPTIONS
/prefix/schema/redoc (HTTP)
├── redoc sync GET
└── options_handler sync OPTIONS
/prefix/schema/swagger (HTTP)
├── swagger_ui sync GET
└── options_handler sync OPTIONS
/schema (HTTP)
├── root sync GET
└── options_handler sync OPTIONS
/schema/elements (HTTP)
├── stoplight_elements sync GET
└── options_handler sync OPTIONS
/schema/oauth2-redirect.html (HTTP)
├── swagger_ui_oauth2_redirect sync GET
└── options_handler sync OPTIONS
/schema/openapi.json (HTTP)
├── 020d496bb2644584b776daded6a21182_litestar_openapi_json sync GET
└── options_handler sync OPTIONS
/schema/openapi.yaml (HTTP)
├── /openapi.yaml retrieve_schema_yaml sync GET
├── /openapi.yml retrieve_schema_yaml sync GET
└── options_handler sync OPTIONS
/schema/openapi.yml (HTTP)
├── /openapi.yaml retrieve_schema_yaml sync GET
├── /openapi.yml retrieve_schema_yaml sync GET
└── options_handler sync OPTIONS
/schema/rapidoc (HTTP)
├── rapidoc sync GET
└── options_handler sync OPTIONS
/schema/redoc (HTTP)
├── redoc sync GET
└── options_handler sync OPTIONS
/schema/swagger (HTTP)
├── swagger_ui sync GET
└── options_handler sync OPTIONS
```
At this point, I'm not sure how I would implement your pattern in v3.0 when there is no OpenAPIController. This will need some thought, and is probably a feature request for that branch.
> At this point, I'm not sure how I would implement your pattern in v3.0 when there is no OpenAPIController. This will need some thought, and is probably a feature request for that branch.
Perhaps allowing a sequence of paths or routers on the config object, e.g., `OpenAPIConfig(path=["/schema", "/prefix/schema"])`.
Well your solution of assigning the OpenAPIController to the OpenAPIConfig works. Much thanks for the prompt and detailed reply. I'd be happy to remove the OpenAPIController if it is now deprecated, once a solution in >2.8.0 exists.
It might be better to give it a list of routers and a end point to OpenAPIConfig.
```py
app = Litestar(
[external_router, internal_router],
openapi_config=OpenAPIConfig(
...,
openapi_endpoint="/schema",
openapi_routers=[external_router, internal_router],
),
)
``` | 2024-04-08T08:16:49 |
litestar-org/litestar | 3,347 | litestar-org__litestar-3347 | [
"3348"
] | 13008fcfd54fcd15a33eb3a3d804506a3d2ce092 | diff --git a/litestar/contrib/pydantic/pydantic_dto_factory.py b/litestar/contrib/pydantic/pydantic_dto_factory.py
--- a/litestar/contrib/pydantic/pydantic_dto_factory.py
+++ b/litestar/contrib/pydantic/pydantic_dto_factory.py
@@ -25,13 +25,16 @@
try:
import pydantic as pydantic_v2
+
+ assert pydantic_v2.__version__.startswith("2.") # noqa: S101
+
from pydantic import ValidationError as ValidationErrorV2
from pydantic import v1 as pydantic_v1
from pydantic.v1 import ValidationError as ValidationErrorV1
ModelType: TypeAlias = "pydantic_v1.BaseModel | pydantic_v2.BaseModel"
-except ImportError:
+except AssertionError:
import pydantic as pydantic_v1 # type: ignore[no-redef]
pydantic_v2 = Empty # type: ignore[assignment]
diff --git a/litestar/contrib/pydantic/pydantic_init_plugin.py b/litestar/contrib/pydantic/pydantic_init_plugin.py
--- a/litestar/contrib/pydantic/pydantic_init_plugin.py
+++ b/litestar/contrib/pydantic/pydantic_init_plugin.py
@@ -14,18 +14,23 @@
from litestar.typing import _KWARG_META_EXTRACTORS
from litestar.utils import is_class_and_subclass
+try:
+ import pydantic as _ # noqa: F401
+except ImportError as e:
+ raise MissingDependencyException("pydantic") from e
+
try:
# check if we have pydantic v2 installed, and try to import both versions
import pydantic as pydantic_v2
+
+ assert pydantic_v2.__version__.startswith("2.") # noqa: S101
+
from pydantic import v1 as pydantic_v1
-except ImportError:
+except AssertionError:
# check if pydantic 1 is installed and import it
- try:
- import pydantic as pydantic_v1 # type: ignore[no-redef]
+ import pydantic as pydantic_v1 # type: ignore[no-redef]
- pydantic_v2 = None # type: ignore[assignment]
- except ImportError as e:
- raise MissingDependencyException("pydantic") from e
+ pydantic_v2 = None # type: ignore[assignment]
if TYPE_CHECKING:
diff --git a/litestar/contrib/pydantic/pydantic_schema_plugin.py b/litestar/contrib/pydantic/pydantic_schema_plugin.py
--- a/litestar/contrib/pydantic/pydantic_schema_plugin.py
+++ b/litestar/contrib/pydantic/pydantic_schema_plugin.py
@@ -20,18 +20,23 @@
from litestar.typing import FieldDefinition
from litestar.utils import is_class_and_subclass, is_generic
+try:
+ import pydantic as _ # noqa: F401
+except ImportError as e:
+ raise MissingDependencyException("pydantic") from e
+
try:
# check if we have pydantic v2 installed, and try to import both versions
import pydantic as pydantic_v2
+
+ assert pydantic_v2.__version__.startswith("2.") # noqa: S101
+
from pydantic import v1 as pydantic_v1
-except ImportError:
+except AssertionError:
# check if pydantic 1 is installed and import it
- try:
- import pydantic as pydantic_v1 # type: ignore[no-redef]
+ import pydantic as pydantic_v1 # type: ignore[no-redef]
- pydantic_v2 = None # type: ignore[assignment]
- except ImportError as e:
- raise MissingDependencyException("pydantic") from e
+ pydantic_v2 = None # type: ignore[assignment]
if TYPE_CHECKING:
from litestar._openapi.schema_generation.schema import SchemaCreator
| diff --git a/test_apps/pydantic_1_app.py b/test_apps/pydantic_1_app.py
--- a/test_apps/pydantic_1_app.py
+++ b/test_apps/pydantic_1_app.py
@@ -19,6 +19,8 @@ async def handler(data: Foo) -> Foo:
class TestApp(unittest.TestCase):
def test_app(self) -> None:
+ assert pydantic.__version__.startswith("1."), pydantic.__version__
+
with create_test_client([handler]) as client:
data = {"bar": "baz", "baz": ["a", "b", "c"]}
res = client.post("/", json=data)
| Bug: pydantic version logic breaks with latest pydantic
### Description
In [v1.10.15](https://github.com/pydantic/pydantic/releases/tag/v1.10.15) pydantic has added the `v1` namespace to the v1 line.
We rely on that not existing in the namespace to determine if we have v1 or v2 installed:
https://github.com/litestar-org/litestar/blob/13008fcfd54fcd15a33eb3a3d804506a3d2ce092/litestar/contrib/pydantic/pydantic_dto_factory.py#L26-L37
### URL to code causing the issue
_No response_
### MCVE
```python
peter@pop-os:~/litestar-test/litestar$ python -m venv .venv
peter@pop-os:~/litestar-test/litestar$ . .venv/bin/activate
(.venv) peter@pop-os:~/litestar-test/litestar$ python -m pip install litestar pydantic==1.*
Collecting litestar
Using cached litestar-2.8.1-py3-none-any.whl.metadata (97 kB)
Collecting pydantic==1.*
Downloading pydantic-1.10.15-py3-none-any.whl.metadata (150 kB)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 150.6/150.6 kB 3.6 MB/s eta 0:00:00
... snipped ...
Installing collected packages: typing-extensions, sniffio, six, pyyaml, pygments, multidict, msgspec, mdurl, idna, h11, click, certifi, python-dateutil, pydantic, markdown-it-py, httpcore, anyio, rich, httpx, faker, rich-click, polyfactory, litestar
Successfully installed anyio-4.3.0 certifi-2024.2.2 click-8.1.7 faker-24.7.1 h11-0.14.0 httpcore-1.0.5 httpx-0.27.0 idna-3.6 litestar-2.8.1 markdown-it-py-3.0.0 mdurl-0.1.2 msgspec-0.18.6 multidict-6.0.5 polyfactory-2.15.0 pydantic-1.10.15 pygments-2.17.2 python-dateutil-2.9.0.post0 pyyaml-6.0.1 rich-13.7.1 rich-click-1.7.4 six-1.16.0 sniffio-1.3.1 typing-extensions-4.11.0
(.venv) peter@pop-os:~/litestar-test/litestar$ python -c "from litestar.contrib.pydantic import pydantic_dto_factory"
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "/home/peter/litestar-test/litestar/litestar/contrib/pydantic/__init__.py", line 8, in <module>
from .pydantic_dto_factory import PydanticDTO
File "/home/peter/litestar-test/litestar/litestar/contrib/pydantic/pydantic_dto_factory.py", line 59, in <module>
pydantic_v2.JsonValue: Any,
^^^^^^^^^^^^^^^^^^^^^
AttributeError: module 'pydantic' has no attribute 'JsonValue'
(.venv) peter@pop-os:~/litestar-test/litestar$
```
### Steps to reproduce
```bash
1. Go to '...'
2. Click on '....'
3. Scroll down to '....'
4. See error
```
### Screenshots
```bash
""
```
### Logs
_No response_
### Litestar Version
v2.8.1
### Platform
- [X] Linux
- [ ] Mac
- [ ] Windows
- [ ] Other (Please specify in the description above)
<!-- POLAR PLEDGE BADGE START -->
---
> [!NOTE]
> While we are open for sponsoring on [GitHub Sponsors](https://github.com/sponsors/litestar-org/) and
> [OpenCollective](https://opencollective.com/litestar), we also utilize [Polar.sh](https://polar.sh/) to engage in pledge-based sponsorship.
>
> Check out all issues funded or available for funding [on our Polar.sh dashboard](https://polar.sh/litestar-org)
> * If you would like to see an issue prioritized, make a pledge towards it!
> * We receive the pledge once the issue is completed & verified
> * This, along with engagement in the community, helps us know which features are a priority to our users.
<a href="https://polar.sh/litestar-org/litestar/issues/3348">
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://polar.sh/api/github/litestar-org/litestar/issues/3348/pledge.svg?darkmode=1">
<img alt="Fund with Polar" src="https://polar.sh/api/github/litestar-org/litestar/issues/3348/pledge.svg">
</picture>
</a>
<!-- POLAR PLEDGE BADGE END -->
| 2024-04-09T02:19:24 |
|
litestar-org/litestar | 3,371 | litestar-org__litestar-3371 | [
"2929"
] | dd9c401115c83d503288414fc96cd46e7a16cb7d | diff --git a/litestar/dto/base_dto.py b/litestar/dto/base_dto.py
--- a/litestar/dto/base_dto.py
+++ b/litestar/dto/base_dto.py
@@ -195,11 +195,31 @@ def create_openapi_schema(
) -> Reference | Schema:
"""Create an OpenAPI request body.
+ Args:
+ field_definition: A parsed type annotation that represents the annotation used on the handler.
+ handler_id: ID of the route handler for which to create a DTO instance.
+ schema_creator: A factory for creating schemas. Has a ``for_field_definition()`` method that accepts a
+ :class:`~litestar.typing.FieldDefinition` instance.
+
Returns:
OpenAPI request body.
"""
key = "data_backend" if field_definition.name == "data" else "return_backend"
backend = cls._dto_backends[handler_id][key] # type: ignore[literal-required]
+
+ if backend.wrapper_attribute_name:
+ # The DTO has been built for a handler that has a DTO supported type wrapped in a generic type.
+ #
+ # The backend doesn't receive the full annotation, only the type of the attribute on the outer type that
+ # holds the DTO supported type.
+ #
+ # This special casing rebuilds the outer generic type annotation with the original model replaced by the DTO
+ # generated transfer model type in the type arguments.
+ transfer_model = backend.transfer_model_type
+ generic_args = tuple(transfer_model if a is cls.model_type else a for a in field_definition.args)
+ return schema_creator.for_field_definition(
+ FieldDefinition.from_annotation(field_definition.origin[generic_args])
+ )
return schema_creator.for_field_definition(FieldDefinition.from_annotation(backend.annotation))
@classmethod
| diff --git a/tests/conftest.py b/tests/conftest.py
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -9,7 +9,7 @@
from datetime import datetime
from os import urandom
from pathlib import Path
-from typing import TYPE_CHECKING, Any, AsyncGenerator, Callable, Generator, TypeVar, Union, cast
+from typing import TYPE_CHECKING, Any, AsyncGenerator, Callable, Generator, Union, cast
from unittest.mock import AsyncMock, MagicMock
import pytest
@@ -30,6 +30,7 @@
from litestar.stores.memory import MemoryStore
from litestar.stores.redis import RedisStore
from litestar.testing import RequestFactory
+from tests.helpers import not_none
if TYPE_CHECKING:
from types import ModuleType
@@ -228,11 +229,6 @@ def wrapped(source: str) -> ModuleType:
Returns:
An imported module.
"""
- T = TypeVar("T")
-
- def not_none(val: T | T | None) -> T:
- assert val is not None
- return val
def module_name_generator() -> str:
letters = string.ascii_lowercase
diff --git a/tests/helpers.py b/tests/helpers.py
--- a/tests/helpers.py
+++ b/tests/helpers.py
@@ -106,3 +106,8 @@ def cleanup_logging_impl() -> Generator:
queue_listener_handler.listener.stop()
queue_listener_handler.close()
del queue_listener_handler
+
+
+def not_none(val: T | None) -> T:
+ assert val is not None
+ return val
diff --git a/tests/unit/test_dto/test_factory/test_integration.py b/tests/unit/test_dto/test_factory/test_integration.py
--- a/tests/unit/test_dto/test_factory/test_integration.py
+++ b/tests/unit/test_dto/test_factory/test_integration.py
@@ -18,14 +18,19 @@
from litestar.dto import DataclassDTO, DTOConfig, DTOData, MsgspecDTO, dto_field
from litestar.dto.types import RenameStrategy
from litestar.enums import MediaType, RequestEncodingType
+from litestar.openapi.spec.response import OpenAPIResponse
+from litestar.openapi.spec.schema import Schema
from litestar.pagination import ClassicPagination, CursorPagination, OffsetPagination
from litestar.params import Body
from litestar.serialization import encode_json
from litestar.testing import create_test_client
+from tests.helpers import not_none
if TYPE_CHECKING:
from typing import Any
+ from litestar import Litestar
+
def test_url_encoded_form_data(use_experimental_dto_backend: bool) -> None:
@dataclass()
@@ -898,3 +903,103 @@ def handler(data: Foo) -> None:
with create_test_client([handler]) as client:
assert client.post("/", json={"bar": "1", "baz": "123"}).status_code == 201
+
+
+def test_openapi_schema_for_type_with_generic_pagination_type(
+ create_module: Callable[[str], ModuleType], use_experimental_dto_backend: bool
+) -> None:
+ module = create_module(
+ """
+from dataclasses import dataclass
+
+from litestar import Litestar, get
+from litestar.dto import DataclassDTO
+from litestar.pagination import ClassicPagination
+
+@dataclass
+class Test:
+ name: str
+ age: int
+
+@get("/without-dto", sync_to_thread=False)
+def without_dto() -> ClassicPagination[Test]:
+ return ClassicPagination(
+ items=[Test("John", 25), Test("Jane", 30)],
+ page_size=1,
+ current_page=2,
+ total_pages=2,
+ )
+
+@get("/with-dto", return_dto=DataclassDTO[Test], sync_to_thread=False)
+def with_dto() -> ClassicPagination[Test]:
+ return ClassicPagination(
+ items=[Test("John", 25), Test("Jane", 30)],
+ page_size=1,
+ current_page=2,
+ total_pages=2,
+ )
+
+app = Litestar([without_dto, with_dto])
+"""
+ )
+ openapi = cast("Litestar", module.app).openapi_schema
+ paths = not_none(openapi.paths)
+ without_dto_response = not_none(not_none(paths["/without-dto"].get).responses)["200"]
+ with_dto_response = not_none(not_none(paths["/with-dto"].get).responses)["200"]
+ assert isinstance(without_dto_response, OpenAPIResponse)
+ assert isinstance(with_dto_response, OpenAPIResponse)
+ without_dto_schema = not_none(without_dto_response.content)["application/json"].schema
+ with_dto_schema = not_none(with_dto_response.content)["application/json"].schema
+ assert isinstance(without_dto_schema, Schema)
+ assert isinstance(with_dto_schema, Schema)
+ assert not_none(without_dto_schema.properties).keys() == not_none(with_dto_schema.properties).keys()
+
+
+def test_openapi_schema_for_type_with_custom_generic_type(
+ create_module: Callable[[str], ModuleType], use_experimental_dto_backend: bool
+) -> None:
+ module = create_module(
+ """
+from dataclasses import dataclass
+from datetime import datetime
+from typing import Generic, List, TypeVar
+
+from sqlalchemy.orm import DeclarativeBase, Mapped, mapped_column
+
+from litestar import Litestar, get
+from litestar.contrib.sqlalchemy.dto import SQLAlchemyDTO
+from litestar.dto import DTOConfig
+
+T = TypeVar("T")
+
+@dataclass
+class WithCount(Generic[T]):
+ count: int
+ data: List[T]
+
+class Base(DeclarativeBase): ...
+
+class User(Base):
+ __tablename__ = "user"
+ id: Mapped[int] = mapped_column(primary_key=True)
+ name: Mapped[str]
+ password: Mapped[str]
+ created_at: Mapped[datetime]
+
+class UserDTO(SQLAlchemyDTO[User]):
+ config = DTOConfig(exclude={"password", "created_at"})
+
+@get("/users", dto=UserDTO, sync_to_thread=False)
+def get_users() -> WithCount[User]:
+ return WithCount(
+ count=1, data=[User(id=1, name="Litestar User", password="xyz", created_at=datetime.now())]
+ )
+
+app = Litestar(route_handlers=[get_users])
+"""
+ )
+ openapi = cast("Litestar", module.app).openapi_schema
+ schema = openapi.components.schemas["WithCount[litestar.dto._backend.GetUsersUserResponseBody]"]
+ assert not_none(schema.properties).keys() == {"count", "data"}
+ model_schema = openapi.components.schemas["GetUsersUserResponseBody"]
+ assert not_none(model_schema.properties).keys() == {"id", "name"}
| bug: openapi schema generation for DTO with generic wrapper
Handler such as:
```py
@get('/all-not-working', return_dto=QuotesSQLAlchemyDTO)
async def get_all_quotes_not_working(self, sqlite_session: AsyncSession) -> GetAllQuotesResponse[QuotesTable]:
res = GetAllQuotesResponse(quotes=await get_all_quotes(sqlite_session))
LOGGER.critical(f"sherlock {res}")
return res
```
Renders such as (only generates schema for the model type, not the wrapper type):

Issue is that we don't pass the full annotation from the DTO to the backend, we only pass the model type that is extracted from the annotation and the name of the attribute that it exists upon on the wrapper class. So, when the DTO is called upon to generate the openapi schema for the type, it doesn't incorporate the wrapper type, only the model.
<!-- POLAR PLEDGE BADGE START -->
---
> [!NOTE]
> While we are open for sponsoring on [GitHub Sponsors](https://github.com/sponsors/litestar-org/) and
> [OpenCollective](https://opencollective.com/litestar), we also utilize [Polar.sh](https://polar.sh/) to engage in pledge-based sponsorship.
>
> Check out all issues funded or available for funding [on our Polar.sh dashboard](https://polar.sh/litestar-org)
> * If you would like to see an issue prioritized, make a pledge towards it!
> * We receive the pledge once the issue is completed & verified
> * This, along with engagement in the community, helps us know which features are a priority to our users.
<a href="https://polar.sh/litestar-org/litestar/issues/2929">
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://polar.sh/api/github/litestar-org/litestar/issues/2929/pledge.svg?darkmode=1">
<img alt="Fund with Polar" src="https://polar.sh/api/github/litestar-org/litestar/issues/2929/pledge.svg">
</picture>
</a>
<!-- POLAR PLEDGE BADGE END -->
| 2024-04-10T22:36:47 |
|
litestar-org/litestar | 3,378 | litestar-org__litestar-3378 | [
"3372"
] | 40a56854008ce0c19e119417f7a3d4ec09aacfa2 | diff --git a/litestar/typing.py b/litestar/typing.py
--- a/litestar/typing.py
+++ b/litestar/typing.py
@@ -512,13 +512,11 @@ def from_annotation(cls, annotation: Any, **kwargs: Any) -> FieldDefinition:
if not kwargs.get("kwarg_definition"):
if isinstance(kwargs.get("default"), (KwargDefinition, DependencyKwarg)):
kwargs["kwarg_definition"] = kwargs.pop("default")
- elif any(isinstance(v, (KwargDefinition, DependencyKwarg)) for v in metadata):
- kwarg_definition = kwargs["kwarg_definition"] = next( # pragma: no cover
- # see https://github.com/nedbat/coveragepy/issues/475
- v
- for v in metadata
- if isinstance(v, (KwargDefinition, DependencyKwarg))
- )
+ elif kwarg_definition := next(
+ (v for v in metadata if isinstance(v, (KwargDefinition, DependencyKwarg))), None
+ ):
+ kwargs["kwarg_definition"] = kwarg_definition
+
if kwarg_definition.default is not Empty:
warnings.warn(
f"Deprecated default value specification for annotation '{annotation}'. Setting defaults "
@@ -529,7 +527,7 @@ def from_annotation(cls, annotation: Any, **kwargs: Any) -> FieldDefinition:
category=DeprecationWarning,
stacklevel=2,
)
- if "default" in kwargs and kwarg_definition.default != kwargs["default"]:
+ if kwargs.get("default", Empty) is not Empty and kwarg_definition.default != kwargs["default"]:
warnings.warn(
f"Ambiguous default values for annotation '{annotation}'. The default value "
f"'{kwarg_definition.default!r}' set inside the parameter annotation differs from the "
| diff --git a/tests/unit/test_typing.py b/tests/unit/test_typing.py
--- a/tests/unit/test_typing.py
+++ b/tests/unit/test_typing.py
@@ -9,6 +9,7 @@
import pytest
from typing_extensions import Annotated, NotRequired, Required, TypedDict, get_type_hints
+from litestar import get
from litestar.exceptions import LitestarWarning
from litestar.params import DependencyKwarg, KwargDefinition, Parameter, ParameterKwarg
from litestar.typing import FieldDefinition, _unpack_predicate
@@ -461,3 +462,17 @@ def test_warn_ambiguous_default_values() -> None:
def test_warn_defaults_inside_parameter_definition() -> None:
with pytest.warns(DeprecationWarning, match="Deprecated default value specification"):
FieldDefinition.from_annotation(Annotated[int, Parameter(default=1)], default=1)
+
+
+def test_warn_default_inside_kwarg_definition_and_default_empty() -> None:
+ with pytest.warns() as warnings:
+
+ @get(sync_to_thread=False)
+ def handler(foo: Annotated[int, Parameter(default=1)]) -> None:
+ pass
+
+ _ = handler.parsed_fn_signature
+
+ (record,) = warnings
+ assert record.category == DeprecationWarning
+ assert "Deprecated default value specification" in str(record.message)
| Bug: Improve warning message for ambiguous defaults
### Description
We should enhance the warning messages generated from #3280 in 2.8.
As a user just now upgrading and hitting this, across probably ~40 instances of that warning, this isn't terribly helpful:
```py
.venv/lib/python3.12/site-packages/litestar/typing.py:597: LitestarWarning: Ambiguous default values for annotation 'typing.Annotated[str, ParameterKwarg(examples=None, external_docs=None, content_encoding=None, default='+a', title=None, description='A string expression to filter something.', const=None, gt=None, ge=None, lt=None, le=None, multiple_of=None, min_items=None, max_items=None, min_length=None, max_length=None, pattern=None, lower_case=None, upper_case=None, format=None, enum=None, read_only=None, schema_extra=None, annotation=<_EmptyEnum.EMPTY: 0>, header=None, cookie=None, query=None, required=True)]'. The default value ''+a'' set inside the parameter annotation differs from the parameter default value '<_EmptyEnum.EMPTY: 0>'
return cls.from_annotation(
.venv/lib/python3.12/site-packages/litestar/typing.py:597: LitestarWarning: Ambiguous default values for annotation 'typing.Annotated[bool, ParameterKwarg(examples=None, external_docs=None, content_encoding=None, default=False, title=None, description='Return something', const=None, gt=None, ge=None, lt=None, le=None, multiple_of=None, min_items=None, max_items=None, min_length=None, max_length=None, pattern=None, lower_case=None, upper_case=None, format=None, enum=None, read_only=None, schema_extra=None, annotation=<_EmptyEnum.EMPTY: 0>, header=None, cookie=None, query=None, required=None)]'. The default value 'False' set inside the parameter annotation differs from the parameter default value '<_EmptyEnum.EMPTY: 0>'
return cls.from_annotation(
```
### URL to code causing the issue
_No response_
### MCVE
_No response_
### Steps to reproduce
```bash
1. Go to '...'
2. Click on '....'
3. Scroll down to '....'
4. See error
```
### Screenshots
```bash
""
```
### Logs
_No response_
### Litestar Version
2.8
### Platform
- [X] Linux
- [X] Mac
- [X] Windows
- [ ] Other (Please specify in the description above)
<!-- POLAR PLEDGE BADGE START -->
---
> [!NOTE]
> While we are open for sponsoring on [GitHub Sponsors](https://github.com/sponsors/litestar-org/) and
> [OpenCollective](https://opencollective.com/litestar), we also utilize [Polar.sh](https://polar.sh/) to engage in pledge-based sponsorship.
>
> Check out all issues funded or available for funding [on our Polar.sh dashboard](https://polar.sh/litestar-org)
> * If you would like to see an issue prioritized, make a pledge towards it!
> * We receive the pledge once the issue is completed & verified
> * This, along with engagement in the community, helps us know which features are a priority to our users.
<a href="https://polar.sh/litestar-org/litestar/issues/3372">
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://polar.sh/api/github/litestar-org/litestar/issues/3372/pledge.svg?darkmode=1">
<img alt="Fund with Polar" src="https://polar.sh/api/github/litestar-org/litestar/issues/3372/pledge.svg">
</picture>
</a>
<!-- POLAR PLEDGE BADGE END -->
| 2024-04-12T00:15:22 |
|
litestar-org/litestar | 3,380 | litestar-org__litestar-3380 | [
"3369"
] | 7b93d97a743ff28de6f90c112b8a273c845de430 | diff --git a/litestar/_openapi/parameters.py b/litestar/_openapi/parameters.py
--- a/litestar/_openapi/parameters.py
+++ b/litestar/_openapi/parameters.py
@@ -218,11 +218,15 @@ def create_parameters_for_handler(self) -> list[Parameter]:
# not all path parameters have to be consumed by the handler. Because even not
# consumed path parameters must still be specified, we create stub parameters
# for the unconsumed ones so a correct OpenAPI schema can be generated
+ dependency_fields = {
+ name for dep in self.dependency_providers.values() for name in dep.parsed_fn_signature.parameters
+ }
params_not_consumed_by_handler = set(self.path_parameters) - handler_fields.keys()
+ unconsumed_path_parameters = params_not_consumed_by_handler - dependency_fields
handler_fields.update(
{
param_name: FieldDefinition.from_kwarg(self.path_parameters[param_name].type, name=param_name)
- for param_name in params_not_consumed_by_handler
+ for param_name in unconsumed_path_parameters
}
)
| diff --git a/tests/unit/test_openapi/test_schema.py b/tests/unit/test_openapi/test_schema.py
--- a/tests/unit/test_openapi/test_schema.py
+++ b/tests/unit/test_openapi/test_schema.py
@@ -592,16 +592,24 @@ async def post_handler(param: str) -> None:
def test_unconsumed_path_parameters_are_documented() -> None:
# https://github.com/litestar-org/litestar/issues/3290
- @get("/{param:str}")
- async def handler() -> None:
+ # https://github.com/litestar-org/litestar/issues/3369
+
+ async def dd(param3: Annotated[str, Parameter(description="123")]) -> str:
+ return param3
+
+ async def d(dep_dep: str, param2: Annotated[str, Parameter(description="abc")]) -> str:
+ return f"{dep_dep}_{param2}"
+
+ @get("/{param1:str}/{param2:str}/{param3:str}", dependencies={"dep": d, "dep_dep": dd})
+ async def handler(dep: str) -> None:
pass
app = Litestar([handler])
- params = app.openapi_schema.paths["/{param}"].get.parameters # type: ignore[index, union-attr]
+ params = app.openapi_schema.paths["/{param1}/{param2}/{param3}"].get.parameters # type: ignore[index, union-attr]
assert params
- assert len(params) == 1
- param = params[0]
- assert isinstance(param, OpenAPIParameter)
- assert param.name == "param"
- assert param.required is True
- assert param.param_in is ParamType.PATH
+ assert len(params) == 3
+ for i, param in enumerate(sorted(params, key=lambda p: p.name), 1): # pyright: ignore
+ assert isinstance(param, OpenAPIParameter)
+ assert param.name == f"param{i}"
+ assert param.required is True
+ assert param.param_in is ParamType.PATH
| Bug: Path parameter consumed only by dependency treated as unconsumed (ref: #3295)
### Description
The unconsumed path parameter logic of #3295 doesn't consider path parameters consumed by dependencies.
If the type in path and the type in the dependency match exactly, this doesn't cause an issue.
If the dependency uses an Annotated type or type coercion (like int to datetime), this will raise an error while generating the OpenAPI schema.
In the case of the MCVE, the exact error follows:
```
litestar.exceptions.http_exceptions.ImproperlyConfiguredException: 500: OpenAPI schema generation for handler `app.my_route` detected multiple parameters named 'path_param' with different types.
```
### URL to code causing the issue
_No response_
### MCVE
```python
from typing import Annotated
from litestar import Litestar, get
from litestar.params import Parameter
from litestar.di import Provide
def my_depfn(
path_param: Annotated[str, Parameter(description="An annotated path parameter")],
) -> str:
return path_param
@get(path="/{path_param:str}", dependencies={"my_dep": Provide(my_depfn)})
def my_route(my_dep: str) -> str:
return my_dep
app = Litestar(
route_handlers=[my_route],
)
```
### Steps to reproduce
```bash
1. Write MCVE to app.py
2. Run `litestar schema openapi`
3. See error
```
### Screenshots
_No response_
### Logs
```bash
Using Litestar app from app:app
Traceback (most recent call last):
File "/dupe_path_param/.venv/bin/litestar", line 8, in <module>
sys.exit(run_cli())
^^^^^^^^^
File "/dupe_path_param/.venv/lib64/python3.11/site-packages/litestar/__main__.py", line 6, in run_cli
litestar_group()
File "/dupe_path_param/.venv/lib64/python3.11/site-packages/click/core.py", line 1157, in __call__
return self.main(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/dupe_path_param/.venv/lib64/python3.11/site-packages/rich_click/rich_command.py", line 126, in main
rv = self.invoke(ctx)
^^^^^^^^^^^^^^^^
File "/dupe_path_param/.venv/lib64/python3.11/site-packages/click/core.py", line 1688, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/dupe_path_param/.venv/lib64/python3.11/site-packages/click/core.py", line 1688, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/dupe_path_param/.venv/lib64/python3.11/site-packages/click/core.py", line 1434, in invoke
return ctx.invoke(self.callback, **ctx.params)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/dupe_path_param/.venv/lib64/python3.11/site-packages/click/core.py", line 783, in invoke
return __callback(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/dupe_path_param/.venv/lib64/python3.11/site-packages/click/decorators.py", line 33, in new_func
return f(get_current_context(), *args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/dupe_path_param/.venv/lib64/python3.11/site-packages/litestar/cli/_utils.py", line 248, in wrapped
return func(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^
File "/dupe_path_param/.venv/lib64/python3.11/site-packages/click/decorators.py", line 33, in new_func
return f(get_current_context(), *args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/dupe_path_param/.venv/lib64/python3.11/site-packages/litestar/cli/_utils.py", line 248, in wrapped
return func(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^
File "/dupe_path_param/.venv/lib64/python3.11/site-packages/litestar/cli/commands/schema.py", line 54, in generate_openapi_schema
_generate_openapi_schema(app, output)
File "/dupe_path_param/.venv/lib64/python3.11/site-packages/litestar/cli/commands/schema.py", line 34, in _generate_openapi_schema
encode_json(app.openapi_schema.to_schema(), serializer=serializer),
^^^^^^^^^^^^^^^^^^
File "/dupe_path_param/.venv/lib64/python3.11/site-packages/litestar/app.py", line 635, in openapi_schema
return self.plugins.get(OpenAPIPlugin).provide_openapi()
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/dupe_path_param/.venv/lib64/python3.11/site-packages/litestar/_openapi/plugin.py", line 94, in provide_openapi
self._openapi = self._build_openapi()
^^^^^^^^^^^^^^^^^^^^^
File "/dupe_path_param/.venv/lib64/python3.11/site-packages/litestar/_openapi/plugin.py", line 83, in _build_openapi
path_item = create_path_item_for_route(context, route)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/dupe_path_param/.venv/lib64/python3.11/site-packages/litestar/_openapi/path_item.py", line 139, in create_path_item_for_route
return path_item_factory.create_path_item()
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/dupe_path_param/.venv/lib64/python3.11/site-packages/litestar/_openapi/path_item.py", line 44, in create_path_item
operation = self.create_operation_for_handler_method(route_handler, HttpMethod(http_method))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/dupe_path_param/.venv/lib64/python3.11/site-packages/litestar/_openapi/path_item.py", line 63, in create_operation_for_handler_method
parameters = create_parameters_for_handler(self.context, route_handler, self.route.path_parameters)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/dupe_path_param/.venv/lib64/python3.11/site-packages/litestar/_openapi/parameters.py", line 244, in create_parameters_for_handler
return factory.create_parameters_for_handler()
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/dupe_path_param/.venv/lib64/python3.11/site-packages/litestar/_openapi/parameters.py", line 229, in create_parameters_for_handler
self.create_parameters_for_field_definitions(handler_fields)
File "/dupe_path_param/.venv/lib64/python3.11/site-packages/litestar/_openapi/parameters.py", line 207, in create_parameters_for_field_definitions
self.parameters.add(self.create_parameter(field_definition=field_definition, parameter_name=field_name))
File "/dupe_path_param/.venv/lib64/python3.11/site-packages/litestar/_openapi/parameters.py", line 67, in add
raise ImproperlyConfiguredException(
litestar.exceptions.http_exceptions.ImproperlyConfiguredException: 500: OpenAPI schema generation for handler `app.my_route` detected multiple parameters named 'path_param' with different types.
```
### Litestar Version
2.8.0 and 2.8.1
### Platform
- [X] Linux
- [ ] Mac
- [ ] Windows
- [ ] Other (Please specify in the description above)
<!-- POLAR PLEDGE BADGE START -->
---
> [!NOTE]
> While we are open for sponsoring on [GitHub Sponsors](https://github.com/sponsors/litestar-org/) and
> [OpenCollective](https://opencollective.com/litestar), we also utilize [Polar.sh](https://polar.sh/) to engage in pledge-based sponsorship.
>
> Check out all issues funded or available for funding [on our Polar.sh dashboard](https://polar.sh/litestar-org)
> * If you would like to see an issue prioritized, make a pledge towards it!
> * We receive the pledge once the issue is completed & verified
> * This, along with engagement in the community, helps us know which features are a priority to our users.
<a href="https://polar.sh/litestar-org/litestar/issues/3369">
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://polar.sh/api/github/litestar-org/litestar/issues/3369/pledge.svg?darkmode=1">
<img alt="Fund with Polar" src="https://polar.sh/api/github/litestar-org/litestar/issues/3369/pledge.svg">
</picture>
</a>
<!-- POLAR PLEDGE BADGE END -->
| 2024-04-12T07:38:29 |
|
litestar-org/litestar | 3,401 | litestar-org__litestar-3401 | [
"3402"
] | f80a13bcf76c146a5bd8d0e1bf46b2b1a300ca21 | diff --git a/litestar/middleware/_internal.py b/litestar/middleware/_internal.py
--- a/litestar/middleware/_internal.py
+++ b/litestar/middleware/_internal.py
@@ -2,14 +2,19 @@
from typing import TYPE_CHECKING
+from litestar.constants import DEFAULT_ALLOWED_CORS_HEADERS
from litestar.datastructures import Headers, MutableScopeHeaders
-from litestar.enums import ScopeType
+from litestar.enums import HttpMethod, MediaType, ScopeType
from litestar.middleware.base import AbstractMiddleware
+from litestar.response import Response
+from litestar.status_codes import HTTP_204_NO_CONTENT, HTTP_400_BAD_REQUEST
if TYPE_CHECKING:
from litestar.config.cors import CORSConfig
from litestar.types import ASGIApp, Message, Receive, Scope, Send
+__all__ = ("CORSMiddleware",)
+
class CORSMiddleware(AbstractMiddleware):
"""CORS Middleware."""
@@ -36,7 +41,15 @@ async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
None
"""
headers = Headers.from_scope(scope=scope)
- if origin := headers.get("origin"):
+ origin = headers.get("origin")
+
+ if scope["type"] == ScopeType.HTTP and scope["method"] == HttpMethod.OPTIONS and origin:
+ request = scope["app"].request_class(scope=scope, receive=receive, send=send)
+ asgi_response = self._create_preflight_response(origin=origin, request_headers=headers).to_asgi_response(
+ app=None, request=request
+ )
+ await asgi_response(scope, receive, send)
+ elif origin:
await self.app(scope, receive, self.send_wrapper(send=send, origin=origin, has_cookie="cookie" in headers))
else:
await self.app(scope, receive, send)
@@ -65,15 +78,55 @@ async def wrapped_send(message: Message) -> None:
headers["Access-Control-Allow-Origin"] = origin
headers["Vary"] = "Origin"
- # We don't want to overwrite this for preflight requests.
- allow_headers = headers.get("Access-Control-Allow-Headers")
- if not allow_headers and self.config.allow_headers:
- headers["Access-Control-Allow-Headers"] = ", ".join(sorted(set(self.config.allow_headers)))
+ headers["Access-Control-Allow-Headers"] = ", ".join(sorted(set(self.config.allow_headers)))
- allow_methods = headers.get("Access-Control-Allow-Methods")
- if not allow_methods and self.config.allow_methods:
- headers["Access-Control-Allow-Methods"] = ", ".join(sorted(set(self.config.allow_methods)))
+ headers["Access-Control-Allow-Methods"] = ", ".join(sorted(set(self.config.allow_methods)))
await send(message)
return wrapped_send
+
+ def _create_preflight_response(self, origin: str, request_headers: Headers) -> Response[str | None]:
+ pre_flight_method = request_headers.get("Access-Control-Request-Method")
+ failures = []
+
+ if not self.config.is_allow_all_methods and (
+ pre_flight_method and pre_flight_method not in self.config.allow_methods
+ ):
+ failures.append("method")
+
+ response_headers = self.config.preflight_headers.copy()
+
+ if not self.config.is_origin_allowed(origin):
+ failures.append("Origin")
+ elif response_headers.get("Access-Control-Allow-Origin") != "*":
+ response_headers["Access-Control-Allow-Origin"] = origin
+
+ pre_flight_requested_headers = [
+ header.strip()
+ for header in request_headers.get("Access-Control-Request-Headers", "").split(",")
+ if header.strip()
+ ]
+
+ if pre_flight_requested_headers:
+ if self.config.is_allow_all_headers:
+ response_headers["Access-Control-Allow-Headers"] = ", ".join(
+ sorted(set(pre_flight_requested_headers) | DEFAULT_ALLOWED_CORS_HEADERS) # pyright: ignore
+ )
+ elif any(header.lower() not in self.config.allow_headers for header in pre_flight_requested_headers):
+ failures.append("headers")
+
+ return (
+ Response(
+ content=f"Disallowed CORS {', '.join(failures)}",
+ status_code=HTTP_400_BAD_REQUEST,
+ media_type=MediaType.TEXT,
+ )
+ if failures
+ else Response(
+ content=None,
+ status_code=HTTP_204_NO_CONTENT,
+ media_type=MediaType.TEXT,
+ headers=response_headers,
+ )
+ )
diff --git a/litestar/routes/http.py b/litestar/routes/http.py
--- a/litestar/routes/http.py
+++ b/litestar/routes/http.py
@@ -5,15 +5,13 @@
from msgspec.msgpack import decode as _decode_msgpack_plain
-from litestar.constants import DEFAULT_ALLOWED_CORS_HEADERS
-from litestar.datastructures.headers import Headers
from litestar.datastructures.upload_file import UploadFile
from litestar.enums import HttpMethod, MediaType, ScopeType
from litestar.exceptions import ClientException, ImproperlyConfiguredException, SerializationException
from litestar.handlers.http_handlers import HTTPRouteHandler
from litestar.response import Response
from litestar.routes.base import BaseRoute
-from litestar.status_codes import HTTP_204_NO_CONTENT, HTTP_400_BAD_REQUEST
+from litestar.status_codes import HTTP_204_NO_CONTENT
from litestar.types.empty import Empty
from litestar.utils.scope.state import ScopeState
@@ -255,57 +253,6 @@ def options_handler(scope: Scope) -> Response:
Returns:
Response
"""
- cors_config = scope["app"].cors_config
- request_headers = Headers.from_scope(scope=scope)
- origin = request_headers.get("origin")
-
- if cors_config and origin:
- pre_flight_method = request_headers.get("Access-Control-Request-Method")
- failures = []
-
- if not cors_config.is_allow_all_methods and (
- pre_flight_method and pre_flight_method not in cors_config.allow_methods
- ):
- failures.append("method")
-
- response_headers = cors_config.preflight_headers.copy()
-
- if not cors_config.is_origin_allowed(origin):
- failures.append("Origin")
- elif response_headers.get("Access-Control-Allow-Origin") != "*":
- response_headers["Access-Control-Allow-Origin"] = origin
-
- pre_flight_requested_headers = [
- header.strip()
- for header in request_headers.get("Access-Control-Request-Headers", "").split(",")
- if header.strip()
- ]
-
- if pre_flight_requested_headers:
- if cors_config.is_allow_all_headers:
- response_headers["Access-Control-Allow-Headers"] = ", ".join(
- sorted(set(pre_flight_requested_headers) | DEFAULT_ALLOWED_CORS_HEADERS) # pyright: ignore
- )
- elif any(
- header.lower() not in cors_config.allow_headers for header in pre_flight_requested_headers
- ):
- failures.append("headers")
-
- return (
- Response(
- content=f"Disallowed CORS {', '.join(failures)}",
- status_code=HTTP_400_BAD_REQUEST,
- media_type=MediaType.TEXT,
- )
- if failures
- else Response(
- content=None,
- status_code=HTTP_204_NO_CONTENT,
- media_type=MediaType.TEXT,
- headers=response_headers,
- )
- )
-
return Response(
content=None,
status_code=HTTP_204_NO_CONTENT,
| diff --git a/tests/e2e/test_cors/__init__.py b/tests/e2e/test_cors/__init__.py
new file mode 100644
diff --git a/tests/e2e/test_cors/test_cors_allowed_headers.py b/tests/e2e/test_cors/test_cors_allowed_headers.py
new file mode 100644
--- /dev/null
+++ b/tests/e2e/test_cors/test_cors_allowed_headers.py
@@ -0,0 +1,49 @@
+from litestar import Litestar, get
+from litestar.config.cors import CORSConfig
+from litestar.status_codes import HTTP_204_NO_CONTENT, HTTP_400_BAD_REQUEST
+from litestar.testing import TestClient
+
+
+@get("/headers-test")
+async def headers_handler() -> str:
+ return "Test Successful!"
+
+
+cors_config = CORSConfig(
+ allow_methods=["GET"],
+ allow_origins=["https://allowed-origin.com"],
+ allow_headers=["X-Custom-Header", "Content-Type"],
+)
+app = Litestar(route_handlers=[headers_handler], cors_config=cors_config)
+
+
+def test_cors_with_specific_allowed_headers() -> None:
+ with TestClient(app) as client:
+ response = client.options(
+ "/endpoint",
+ headers={
+ "Origin": "https://allowed-origin.com",
+ "Access-Control-Request-Method": "GET",
+ "Access-Control-Request-Headers": "X-Custom-Header, Content-Type",
+ },
+ )
+ assert response.status_code == HTTP_204_NO_CONTENT
+ assert "x-custom-header" in response.headers["access-control-allow-headers"]
+ assert "content-type" in response.headers["access-control-allow-headers"]
+
+
+def test_cors_with_unauthorized_headers() -> None:
+ with TestClient(app) as client:
+ response = client.options(
+ "/endpoint",
+ headers={
+ "Origin": "https://allowed-origin.com",
+ "Access-Control-Request-Method": "GET",
+ "Access-Control-Request-Headers": "X-Not-Allowed-Header",
+ },
+ )
+ assert response.status_code == HTTP_400_BAD_REQUEST
+ assert (
+ "access-control-allow-headers" not in response.headers
+ or "x-not-allowed-header" not in response.headers.get("access-control-allow-headers", "")
+ )
diff --git a/tests/e2e/test_cors/test_cors_allowed_methods.py b/tests/e2e/test_cors/test_cors_allowed_methods.py
new file mode 100644
--- /dev/null
+++ b/tests/e2e/test_cors/test_cors_allowed_methods.py
@@ -0,0 +1,39 @@
+from http import HTTPStatus
+
+from litestar import Litestar, get
+from litestar.config.cors import CORSConfig
+from litestar.testing import TestClient
+
+
+@get("/method-test")
+async def method_handler() -> str:
+ return "Method Test Successful!"
+
+
+cors_config = CORSConfig(allow_methods=["GET", "POST"], allow_origins=["https://allowed-origin.com"])
+app = Litestar(route_handlers=[method_handler], cors_config=cors_config)
+
+
+def test_cors_allowed_methods() -> None:
+ with TestClient(app) as client:
+ response = client.options(
+ "/method-test", headers={"Origin": "https://allowed-origin.com", "Access-Control-Request-Method": "GET"}
+ )
+ assert response.status_code == HTTPStatus.NO_CONTENT
+ assert response.headers["access-control-allow-origin"] == "https://allowed-origin.com"
+ assert "GET" in response.headers["access-control-allow-methods"]
+
+ response = client.options(
+ "/method-test", headers={"Origin": "https://allowed-origin.com", "Access-Control-Request-Method": "POST"}
+ )
+ assert response.status_code == HTTPStatus.NO_CONTENT
+ assert "POST" in response.headers["access-control-allow-methods"]
+
+
+def test_cors_disallowed_methods() -> None:
+ with TestClient(app) as client:
+ response = client.options(
+ "/method-test", headers={"Origin": "https://allowed-origin.com", "Access-Control-Request-Method": "PUT"}
+ )
+ assert response.status_code == HTTPStatus.BAD_REQUEST
+ assert "PUT" not in response.headers.get("access-control-allow-methods", "")
diff --git a/tests/e2e/test_cors/test_cors_credentials.py b/tests/e2e/test_cors/test_cors_credentials.py
new file mode 100644
--- /dev/null
+++ b/tests/e2e/test_cors/test_cors_credentials.py
@@ -0,0 +1,39 @@
+from litestar import Litestar, get
+from litestar.config.cors import CORSConfig
+from litestar.status_codes import HTTP_204_NO_CONTENT
+from litestar.testing import TestClient
+
+
+@get("/credentials-test")
+async def credentials_handler() -> str:
+ return "Test Successful!"
+
+
+def test_cors_with_credentials_allowed() -> None:
+ cors_config = CORSConfig(
+ allow_methods=["GET"], allow_origins=["https://allowed-origin.com"], allow_credentials=True
+ )
+ app = Litestar(route_handlers=[credentials_handler], cors_config=cors_config)
+
+ with TestClient(app) as client:
+ response = client.options(
+ "/endpoint", headers={"Origin": "https://allowed-origin.com", "Access-Control-Request-Method": "GET"}
+ )
+ assert response.status_code == HTTP_204_NO_CONTENT
+ assert response.headers["access-control-allow-credentials"] == "true"
+
+
+def test_cors_with_credentials_disallowed() -> None:
+ cors_config = CORSConfig(
+ allow_methods=["GET"],
+ allow_origins=["https://allowed-origin.com"],
+ allow_credentials=False, # Credentials should not be allowed
+ )
+ app = Litestar(route_handlers=[credentials_handler], cors_config=cors_config)
+
+ with TestClient(app) as client:
+ response = client.options(
+ "/endpoint", headers={"Origin": "https://allowed-origin.com", "Access-Control-Request-Method": "GET"}
+ )
+ assert response.status_code == HTTP_204_NO_CONTENT
+ assert "access-control-allow-credentials" not in response.headers
diff --git a/tests/e2e/test_cors/test_cors_for_middleware_exception.py b/tests/e2e/test_cors/test_cors_for_middleware_exception.py
new file mode 100644
--- /dev/null
+++ b/tests/e2e/test_cors/test_cors_for_middleware_exception.py
@@ -0,0 +1,30 @@
+from http import HTTPStatus
+
+from litestar import Litestar, get
+from litestar.config.cors import CORSConfig
+from litestar.exceptions import HTTPException
+from litestar.middleware import AbstractMiddleware
+from litestar.testing import TestClient
+from litestar.types.asgi_types import Receive, Scope, Send
+
+
+class ExceptionMiddleware(AbstractMiddleware):
+ async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
+ raise HTTPException(status_code=HTTPStatus.INTERNAL_SERVER_ERROR, detail="Intentional Error")
+
+
+@get("/test")
+async def handler() -> str:
+ return "Should not reach this"
+
+
+cors_config = CORSConfig(allow_methods=["GET"], allow_origins=["https://allowed-origin.com"], allow_credentials=True)
+app = Litestar(route_handlers=[handler], cors_config=cors_config, middleware=[ExceptionMiddleware])
+
+
+def test_cors_on_middleware_exception() -> None:
+ with TestClient(app) as client:
+ response = client.get("/test", headers={"Origin": "https://allowed-origin.com"})
+ assert response.status_code == HTTPStatus.INTERNAL_SERVER_ERROR
+ assert response.headers["access-control-allow-origin"] == "https://allowed-origin.com"
+ assert response.headers["access-control-allow-credentials"] == "true"
diff --git a/tests/e2e/test_cors/test_cors_for_mount.py b/tests/e2e/test_cors/test_cors_for_mount.py
new file mode 100644
--- /dev/null
+++ b/tests/e2e/test_cors/test_cors_for_mount.py
@@ -0,0 +1,100 @@
+from __future__ import annotations
+
+from http import HTTPStatus
+from unittest.mock import MagicMock
+
+import pytest
+
+from litestar import Litestar, asgi
+from litestar.config.cors import CORSConfig
+from litestar.enums import ScopeType
+from litestar.testing import TestClient
+from litestar.types.asgi_types import ASGIApp, Receive, Scope, Send
+
+
[email protected](name="asgi_mock")
+def asgi_mock_fixture() -> MagicMock:
+ return MagicMock()
+
+
[email protected](name="asgi_app")
+def asgi_app_fixture(asgi_mock: MagicMock) -> ASGIApp:
+ async def asgi_app(scope: Scope, receive: Receive, send: Send) -> None:
+ asgi_mock()
+
+ assert scope["type"] == ScopeType.HTTP
+
+ while True:
+ event = await receive()
+ if event["type"] == "http.request" and not event.get("more_body", False):
+ break
+
+ await send(
+ {
+ "type": "http.response.start",
+ "status": 200,
+ "headers": [
+ (b"content-type", b"text/plain"),
+ ],
+ }
+ )
+
+ await send(
+ {
+ "type": "http.response.body",
+ "body": b"Hello, world!",
+ "more_body": False,
+ }
+ )
+
+ return asgi_app
+
+
+def test_cors_middleware_for_mount(asgi_app: ASGIApp, asgi_mock: MagicMock) -> None:
+ cors_config = CORSConfig(allow_methods=["*"], allow_origins=["https://some-domain.com"])
+ app = Litestar(
+ cors_config=cors_config,
+ route_handlers=[
+ asgi("/app", is_mount=True)(asgi_app),
+ ],
+ openapi_config=None,
+ )
+
+ with TestClient(app) as client:
+ response = client.options(
+ "http://127.0.0.1:8000/app",
+ headers={"origin": "https://some-domain.com"},
+ )
+ assert response.status_code == HTTPStatus.NO_CONTENT
+ assert response.headers["access-control-allow-origin"] == "https://some-domain.com"
+ asgi_mock.assert_not_called()
+
+
+def test_asgi_app_no_origin_header(asgi_app: ASGIApp, asgi_mock: MagicMock) -> None:
+ cors_config = CORSConfig(allow_methods=["*"], allow_origins=["https://some-domain.com"])
+ app = Litestar(
+ cors_config=cors_config,
+ route_handlers=[
+ asgi("/app", is_mount=True)(asgi_app),
+ ],
+ openapi_config=None,
+ )
+
+ with TestClient(app) as client:
+ response = client.options("http://127.0.0.1/app")
+ assert response.status_code == HTTPStatus.OK
+ assert response.headers["content-type"] == "text/plain"
+ asgi_mock.assert_called()
+
+
+def test_asgi_app_without_cors_configuration(asgi_app: ASGIApp, asgi_mock: MagicMock) -> None:
+ non_cors_app = Litestar(
+ route_handlers=[asgi("/app", is_mount=True)(asgi_app)],
+ openapi_config=None,
+ )
+
+ with TestClient(non_cors_app) as client:
+ response = client.options("http://127.0.0.1:8000/app")
+ assert response.status_code == HTTPStatus.OK
+ assert response.headers["content-type"] == "text/plain"
+ asgi_mock.assert_called()
diff --git a/tests/e2e/test_cors/test_cors_for_routing_exception.py b/tests/e2e/test_cors/test_cors_for_routing_exception.py
new file mode 100644
--- /dev/null
+++ b/tests/e2e/test_cors/test_cors_for_routing_exception.py
@@ -0,0 +1,22 @@
+from http import HTTPStatus
+
+from litestar import Litestar, get
+from litestar.config.cors import CORSConfig
+from litestar.testing import TestClient
+
+
+@get("/test")
+async def handler() -> str:
+ return "Should not reach this"
+
+
+cors_config = CORSConfig(allow_methods=["GET"], allow_origins=["https://allowed-origin.com"], allow_credentials=True)
+app = Litestar(route_handlers=[handler], cors_config=cors_config)
+
+
+def test_cors_on_middleware_exception_with_origin_header() -> None:
+ with TestClient(app) as client:
+ response = client.get("/testing", headers={"Origin": "https://allowed-origin.com"})
+ assert response.status_code == HTTPStatus.NOT_FOUND
+ assert response.headers["access-control-allow-origin"] == "https://allowed-origin.com"
+ assert response.headers["access-control-allow-credentials"] == "true"
diff --git a/tests/e2e/test_cors/test_cors_origins.py b/tests/e2e/test_cors/test_cors_origins.py
new file mode 100644
--- /dev/null
+++ b/tests/e2e/test_cors/test_cors_origins.py
@@ -0,0 +1,42 @@
+from http import HTTPStatus
+
+from litestar import Litestar, get
+from litestar.config.cors import CORSConfig
+from litestar.testing import TestClient
+
+
+@get("/endpoint")
+async def handler() -> str:
+ return "Hello, world!"
+
+
+cors_config = CORSConfig(
+ allow_methods=["GET"], allow_origins=["https://allowed-origin.com", "https://another-allowed-origin.com"]
+)
+app = Litestar(route_handlers=[handler], cors_config=cors_config)
+
+
+def test_cors_with_allowed_origins() -> None:
+ with TestClient(app) as client:
+ response = client.options(
+ "/custom-options", headers={"Origin": "https://allowed-origin.com", "Access-Control-Request-Method": "GET"}
+ )
+ assert response.status_code == HTTPStatus.NO_CONTENT
+ assert response.headers["access-control-allow-origin"] == "https://allowed-origin.com"
+
+ response = client.options(
+ "/custom-options",
+ headers={"Origin": "https://another-allowed-origin.com", "Access-Control-Request-Method": "GET"},
+ )
+ assert response.status_code == HTTPStatus.NO_CONTENT
+ assert response.headers["access-control-allow-origin"] == "https://another-allowed-origin.com"
+
+
+def test_cors_with_disallowed_origin() -> None:
+ with TestClient(app) as client:
+ response = client.options(
+ "/custom-options",
+ headers={"Origin": "https://disallowed-origin.com", "Access-Control-Request-Method": "GET"},
+ )
+ assert response.status_code == HTTPStatus.BAD_REQUEST
+ assert "access-control-allow-origin" not in response.headers
diff --git a/tests/e2e/test_cors/test_custom_options_handlers.py b/tests/e2e/test_cors/test_custom_options_handlers.py
new file mode 100644
--- /dev/null
+++ b/tests/e2e/test_cors/test_custom_options_handlers.py
@@ -0,0 +1,38 @@
+from http import HTTPStatus
+
+from litestar import Litestar, route
+from litestar.config.cors import CORSConfig
+from litestar.enums import HttpMethod
+from litestar.response import Response
+from litestar.testing import TestClient
+
+
+@route("/custom-options", http_method=HttpMethod.OPTIONS)
+async def custom_options_handler() -> Response[str]:
+ return Response(
+ status_code=200,
+ headers={"Custom-Handler": "Active"},
+ content="Handled by Custom Options",
+ )
+
+
+cors_config = CORSConfig(allow_methods=["GET", "OPTIONS"], allow_origins=["https://allowed-origin.com"])
+app = Litestar(route_handlers=[custom_options_handler], cors_config=cors_config)
+
+
+def test_custom_options_handler_cors_pre_flight_request() -> None:
+ with TestClient(app) as client:
+ response = client.options(
+ "/custom-options", headers={"Origin": "https://allowed-origin.com", "Access-Control-Request-Method": "GET"}
+ )
+ assert response.status_code == HTTPStatus.NO_CONTENT
+ assert "access-control-allow-origin" in response.headers
+ assert "Custom-Handler" not in response.headers
+
+
+def test_custom_options_handler_non_cors_request() -> None:
+ with TestClient(app) as client:
+ response = client.options("/custom-options")
+ assert response.status_code == 200
+ assert response.headers.get("Custom-Handler") == "Active"
+ assert response.text == "Handled by Custom Options"
diff --git a/tests/e2e/test_cors/test_non_cors_options.py b/tests/e2e/test_cors/test_non_cors_options.py
new file mode 100644
--- /dev/null
+++ b/tests/e2e/test_cors/test_non_cors_options.py
@@ -0,0 +1,33 @@
+from litestar import Litestar, get
+from litestar.config.cors import CORSConfig
+from litestar.status_codes import HTTP_204_NO_CONTENT
+from litestar.testing import TestClient
+
+
+@get("/handler")
+async def handler() -> str:
+ return "Handler"
+
+
+def test_non_cors_options_request_no_origin_header() -> None:
+ cors_config = CORSConfig(
+ allow_methods=["PUT"],
+ allow_origins=["https://specific-domain.com"],
+ )
+ app = Litestar(route_handlers=[handler], cors_config=cors_config)
+
+ with TestClient(app) as client:
+ # Request without an 'Origin' header
+ response = client.options("/handler")
+ assert response.status_code == HTTP_204_NO_CONTENT
+ assert response.headers["allow"] == "GET, OPTIONS"
+
+
+def test_non_cors_options_no_config() -> None:
+ app = Litestar(route_handlers=[handler])
+
+ with TestClient(app) as client:
+ # Request with an origin that does not require CORS handling
+ response = client.options("/handler", headers={"Origin": "https://not-configured-origin.com"})
+ assert response.status_code == HTTP_204_NO_CONTENT
+ assert response.headers["allow"] == "GET, OPTIONS"
| bug: CORS middleware not working with mounted applications
### Reported by
[__peter__](https://discord.com/users/947242363439448155) in Discord: CORS middleware not working with mounted applications
### Description
CORS pre-flight requests are passed to the mounted application, and not handled by the framework's CORS implementation.
### MCVE
```py
from http import HTTPStatus
import uvicorn
from litestar import Litestar, get, Response, asgi
from litestar.config.cors import CORSConfig
@get("/route")
async def route() -> Response[str]:
return Response(content="Content", status_code=HTTPStatus.METHOD_NOT_ALLOWED)
cors_config = CORSConfig(allow_methods=["*"], allow_origins=["https://some-domain.com"])
app = Litestar(
cors_config=cors_config,
route_handlers=[
asgi("/app", is_mount=True)(Litestar()),
route,
]
)
if __name__ == "__main__":
uvicorn.run("main:app", reload=True)
```
```py
import httpx
for path in ["/app", "/route"]:
response = httpx.options(
f"http://127.0.0.1:8000{path}",
headers={"origin": "https://some-domain.com"},
)
print(response.status_code)
print(response.content)
print(response.headers)
```
### Logs
```
/^\/^\
_|__| O|
\/ /~ \_/ \
\____|__________/ \
\_______ \
`\ \ \
| | \
/ / \
/ / \\
/ / \ \
/ / \ \
/ / _----_ \ \
/ / _-~ ~-_ | |
( ( _-~ _--_ ~-_ _/ |
\ ~-____-~ _-~ ~-_ ~-_-~ /
~-_ _-~ ~-_ _-~
~--______-~ ~-___-~
```
### Litestar Version
2.8.2
<!-- POLAR PLEDGE BADGE START -->
---
> [!NOTE]
> While we are open for sponsoring on [GitHub Sponsors](https://github.com/sponsors/litestar-org/) and
> [OpenCollective](https://opencollective.com/litestar), we also utilize [Polar.sh](https://polar.sh/) to engage in pledge-based sponsorship.
>
> Check out all issues funded or available for funding [on our Polar.sh dashboard](https://polar.sh/litestar-org)
> * If you would like to see an issue prioritized, make a pledge towards it!
> * We receive the pledge once the issue is completed & verified
> * This, along with engagement in the community, helps us know which features are a priority to our users.
<a href="https://polar.sh/litestar-org/litestar/issues/3402">
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://polar.sh/api/github/litestar-org/litestar/issues/3402/pledge.svg?darkmode=1">
<img alt="Fund with Polar" src="https://polar.sh/api/github/litestar-org/litestar/issues/3402/pledge.svg">
</picture>
</a>
<!-- POLAR PLEDGE BADGE END -->
| 2024-04-18T05:37:02 |
|
litestar-org/litestar | 3,404 | litestar-org__litestar-3404 | [
"3403"
] | fb5f7443468b1298582441128cd39594a32c8712 | diff --git a/litestar/app.py b/litestar/app.py
--- a/litestar/app.py
+++ b/litestar/app.py
@@ -32,7 +32,7 @@
NoRouteMatchFoundException,
)
from litestar.logging.config import LoggingConfig, get_logger_placeholder
-from litestar.middleware.cors import CORSMiddleware
+from litestar.middleware._internal import CORSMiddleware
from litestar.openapi.config import OpenAPIConfig
from litestar.plugins import (
CLIPluginProtocol,
@@ -245,7 +245,7 @@ def __init__(
this app. Can be overridden by route handlers.
compression_config: Configures compression behaviour of the application, this enabled a builtin or user
defined Compression middleware.
- cors_config: If set, configures :class:`CORSMiddleware <.middleware.cors.CORSMiddleware>`.
+ cors_config: If set, configures CORS handling for the application.
csrf_config: If set, configures :class:`CSRFMiddleware <.middleware.csrf.CSRFMiddleware>`.
debug: If ``True``, app errors rendered as HTML with a stack trace.
dependencies: A string keyed mapping of dependency :class:`Providers <.di.Provide>`.
diff --git a/litestar/middleware/_internal.py b/litestar/middleware/_internal.py
new file mode 100644
--- /dev/null
+++ b/litestar/middleware/_internal.py
@@ -0,0 +1,79 @@
+from __future__ import annotations
+
+from typing import TYPE_CHECKING
+
+from litestar.datastructures import Headers, MutableScopeHeaders
+from litestar.enums import ScopeType
+from litestar.middleware.base import AbstractMiddleware
+
+if TYPE_CHECKING:
+ from litestar.config.cors import CORSConfig
+ from litestar.types import ASGIApp, Message, Receive, Scope, Send
+
+
+class CORSMiddleware(AbstractMiddleware):
+ """CORS Middleware."""
+
+ def __init__(self, app: ASGIApp, config: CORSConfig) -> None:
+ """Middleware that adds CORS validation to the application.
+
+ Args:
+ app: The ``next`` ASGI app to call.
+ config: An instance of :class:`CORSConfig <litestar.config.cors.CORSConfig>`
+ """
+ super().__init__(app=app, scopes={ScopeType.HTTP})
+ self.config = config
+
+ async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
+ """ASGI callable.
+
+ Args:
+ scope: The ASGI connection scope.
+ receive: The ASGI receive function.
+ send: The ASGI send function.
+
+ Returns:
+ None
+ """
+ headers = Headers.from_scope(scope=scope)
+ if origin := headers.get("origin"):
+ await self.app(scope, receive, self.send_wrapper(send=send, origin=origin, has_cookie="cookie" in headers))
+ else:
+ await self.app(scope, receive, send)
+
+ def send_wrapper(self, send: Send, origin: str, has_cookie: bool) -> Send:
+ """Wrap ``send`` to ensure that state is not disconnected.
+
+ Args:
+ has_cookie: Boolean flag dictating if the connection has a cookie set.
+ origin: The value of the ``Origin`` header.
+ send: The ASGI send function.
+
+ Returns:
+ An ASGI send function.
+ """
+
+ async def wrapped_send(message: Message) -> None:
+ if message["type"] == "http.response.start":
+ message.setdefault("headers", [])
+ headers = MutableScopeHeaders.from_message(message=message)
+ headers.update(self.config.simple_headers)
+
+ if (self.config.is_allow_all_origins and has_cookie) or (
+ not self.config.is_allow_all_origins and self.config.is_origin_allowed(origin=origin)
+ ):
+ headers["Access-Control-Allow-Origin"] = origin
+ headers["Vary"] = "Origin"
+
+ # We don't want to overwrite this for preflight requests.
+ allow_headers = headers.get("Access-Control-Allow-Headers")
+ if not allow_headers and self.config.allow_headers:
+ headers["Access-Control-Allow-Headers"] = ", ".join(sorted(set(self.config.allow_headers)))
+
+ allow_methods = headers.get("Access-Control-Allow-Methods")
+ if not allow_methods and self.config.allow_methods:
+ headers["Access-Control-Allow-Methods"] = ", ".join(sorted(set(self.config.allow_methods)))
+
+ await send(message)
+
+ return wrapped_send
diff --git a/litestar/middleware/cors.py b/litestar/middleware/cors.py
--- a/litestar/middleware/cors.py
+++ b/litestar/middleware/cors.py
@@ -1,82 +1,19 @@
from __future__ import annotations
-from typing import TYPE_CHECKING
-
-from litestar.datastructures import Headers, MutableScopeHeaders
-from litestar.enums import ScopeType
-from litestar.middleware.base import AbstractMiddleware
-
-__all__ = ("CORSMiddleware",)
-
-
-if TYPE_CHECKING:
- from litestar.config.cors import CORSConfig
- from litestar.types import ASGIApp, Message, Receive, Scope, Send
-
-
-class CORSMiddleware(AbstractMiddleware):
- """CORS Middleware."""
-
- def __init__(self, app: ASGIApp, config: CORSConfig) -> None:
- """Middleware that adds CORS validation to the application.
-
- Args:
- app: The ``next`` ASGI app to call.
- config: An instance of :class:`CORSConfig <litestar.config.cors.CORSConfig>`
- """
- super().__init__(app=app, scopes={ScopeType.HTTP})
- self.config = config
-
- async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
- """ASGI callable.
-
- Args:
- scope: The ASGI connection scope.
- receive: The ASGI receive function.
- send: The ASGI send function.
-
- Returns:
- None
- """
- headers = Headers.from_scope(scope=scope)
- if origin := headers.get("origin"):
- await self.app(scope, receive, self.send_wrapper(send=send, origin=origin, has_cookie="cookie" in headers))
- else:
- await self.app(scope, receive, send)
-
- def send_wrapper(self, send: Send, origin: str, has_cookie: bool) -> Send:
- """Wrap ``send`` to ensure that state is not disconnected.
-
- Args:
- has_cookie: Boolean flag dictating if the connection has a cookie set.
- origin: The value of the ``Origin`` header.
- send: The ASGI send function.
-
- Returns:
- An ASGI send function.
- """
-
- async def wrapped_send(message: Message) -> None:
- if message["type"] == "http.response.start":
- message.setdefault("headers", [])
- headers = MutableScopeHeaders.from_message(message=message)
- headers.update(self.config.simple_headers)
-
- if (self.config.is_allow_all_origins and has_cookie) or (
- not self.config.is_allow_all_origins and self.config.is_origin_allowed(origin=origin)
- ):
- headers["Access-Control-Allow-Origin"] = origin
- headers["Vary"] = "Origin"
-
- # We don't want to overwrite this for preflight requests.
- allow_headers = headers.get("Access-Control-Allow-Headers")
- if not allow_headers and self.config.allow_headers:
- headers["Access-Control-Allow-Headers"] = ", ".join(sorted(set(self.config.allow_headers)))
-
- allow_methods = headers.get("Access-Control-Allow-Methods")
- if not allow_methods and self.config.allow_methods:
- headers["Access-Control-Allow-Methods"] = ", ".join(sorted(set(self.config.allow_methods)))
-
- await send(message)
-
- return wrapped_send
+from typing import Any
+
+from litestar.middleware import _internal
+from litestar.utils.deprecation import warn_deprecation
+
+
+def __getattr__(name: str) -> Any:
+ if name == "CORSMiddleware":
+ warn_deprecation(
+ version="2.9",
+ deprecated_name=name,
+ kind="class",
+ removal_in="3.0",
+ info="CORS middleware has been removed from the public API.",
+ )
+ return _internal.CORSMiddleware
+ raise AttributeError(f"module {__name__} has no attribute {name}")
| diff --git a/litestar/testing/helpers.py b/litestar/testing/helpers.py
--- a/litestar/testing/helpers.py
+++ b/litestar/testing/helpers.py
@@ -169,7 +169,7 @@ def test_my_handler() -> None:
this app. Can be overridden by route handlers.
compression_config: Configures compression behaviour of the application, this enabled a builtin or user
defined Compression middleware.
- cors_config: If set, configures :class:`CORSMiddleware <.middleware.cors.CORSMiddleware>`.
+ cors_config: If set, configures CORS handling for the application.
csrf_config: If set, configures :class:`CSRFMiddleware <.middleware.csrf.CSRFMiddleware>`.
debug: If ``True``, app errors rendered as HTML with a stack trace.
dependencies: A string keyed mapping of dependency :class:`Providers <.di.Provide>`.
@@ -430,7 +430,7 @@ async def test_my_handler() -> None:
this app. Can be overridden by route handlers.
compression_config: Configures compression behaviour of the application, this enabled a builtin or user
defined Compression middleware.
- cors_config: If set, configures :class:`CORSMiddleware <.middleware.cors.CORSMiddleware>`.
+ cors_config: If set, configures CORS handling for the application.
csrf_config: If set, configures :class:`CSRFMiddleware <.middleware.csrf.CSRFMiddleware>`.
debug: If ``True``, app errors rendered as HTML with a stack trace.
dependencies: A string keyed mapping of dependency :class:`Providers <.di.Provide>`.
diff --git a/tests/unit/test_deprecations.py b/tests/unit/test_deprecations.py
--- a/tests/unit/test_deprecations.py
+++ b/tests/unit/test_deprecations.py
@@ -168,3 +168,8 @@ def test_openapi_config_enabled_endpoints_deprecation() -> None:
with pytest.warns(DeprecationWarning):
OpenAPIConfig(title="API", version="1.0", enabled_endpoints={"redoc"})
+
+
+def test_cors_middleware_public_interface_deprecation() -> None:
+ with pytest.warns(DeprecationWarning):
+ from litestar.middleware.cors import CORSMiddleware # noqa: F401
diff --git a/tests/unit/test_middleware/test_cors_middleware.py b/tests/unit/test_middleware/test_cors_middleware.py
--- a/tests/unit/test_middleware/test_cors_middleware.py
+++ b/tests/unit/test_middleware/test_cors_middleware.py
@@ -4,7 +4,7 @@
from litestar import get
from litestar.config.cors import CORSConfig
-from litestar.middleware.cors import CORSMiddleware
+from litestar.middleware._internal import CORSMiddleware
from litestar.status_codes import HTTP_200_OK, HTTP_404_NOT_FOUND
from litestar.testing import create_test_client
from litestar.types.asgi_types import Method
| refactor: deprecate CORSMiddleware from public interface
We require a `CORSConfig` passed to the application object in order for pre-flight responses to be generated.
https://github.com/litestar-org/litestar/blob/a0d11092dae300af4025d619d8ac818790dede74/litestar/routes/http.py#L258-L262
There is no way to only supply the `CORSMiddleware` to a subset of routes via a `Router.middleware` or similar, because of the fact that the pre-flight response generation is conditional upon the application having access to the `CORSConfig` object. In other words, if a user didn't supply the `CORSConfig` object to the application, but did supply `DefineMiddleware(CORSMiddleware, config=CORSConfig(...))` to a router, then the pre-flight responses for that router wouldn't have worked anyway because there's no `CORSConfig` on the app. And if the `CORSConfig` is added to the app, then there is no point adding the middleware to a sub-router, because we'd then apply it to the whole application anyway.
Also, the `CORSMiddleware` class is hard coded as a wrapper around the `ASGIRouter`:
https://github.com/litestar-org/litestar/blob/a0d11092dae300af4025d619d8ac818790dede74/litestar/app.py#L842-L844
There is no way for a user to sub-class the middleware to alter its behavior, as there is no way to pass that sub-classed middleware to us.
So IMO, `CORSMiddleware` is implementation detail - the whole thing should be undocumented and we should probably deprecate it from public use.
_Originally posted by @peterschutt in https://github.com/litestar-org/litestar/pull/3395#discussion_r1567964790_
<!-- POLAR PLEDGE BADGE START -->
---
> [!NOTE]
> While we are open for sponsoring on [GitHub Sponsors](https://github.com/sponsors/litestar-org/) and
> [OpenCollective](https://opencollective.com/litestar), we also utilize [Polar.sh](https://polar.sh/) to engage in pledge-based sponsorship.
>
> Check out all issues funded or available for funding [on our Polar.sh dashboard](https://polar.sh/litestar-org)
> * If you would like to see an issue prioritized, make a pledge towards it!
> * We receive the pledge once the issue is completed & verified
> * This, along with engagement in the community, helps us know which features are a priority to our users.
<a href="https://polar.sh/litestar-org/litestar/issues/3403">
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://polar.sh/api/github/litestar-org/litestar/issues/3403/pledge.svg?darkmode=1">
<img alt="Fund with Polar" src="https://polar.sh/api/github/litestar-org/litestar/issues/3403/pledge.svg">
</picture>
</a>
<!-- POLAR PLEDGE BADGE END -->
| 2024-04-18T06:08:14 |
|
litestar-org/litestar | 3,408 | litestar-org__litestar-3408 | [
"3407"
] | c625ce7d13a7554b6fe2c25c7556e7a74362374e | diff --git a/litestar/_kwargs/extractors.py b/litestar/_kwargs/extractors.py
--- a/litestar/_kwargs/extractors.py
+++ b/litestar/_kwargs/extractors.py
@@ -16,7 +16,8 @@
from litestar.exceptions import ValidationException
from litestar.params import BodyKwarg
from litestar.types import Empty
-from litestar.utils.predicates import is_non_string_sequence
+from litestar.utils import make_non_optional_union
+from litestar.utils.predicates import is_non_string_sequence, is_optional_union
from litestar.utils.scope.state import ScopeState
if TYPE_CHECKING:
@@ -348,7 +349,10 @@ async def _extract_multipart(
if field_definition.is_non_string_sequence:
values = list(form_values.values())
- if field_definition.has_inner_subclass_of(UploadFile) and isinstance(values[0], list):
+ if isinstance(values[0], list) and (
+ field_definition.has_inner_subclass_of(UploadFile)
+ or (field_definition.is_optional and field_definition.inner_types[0].is_non_string_sequence)
+ ):
return values[0]
return values
@@ -364,7 +368,14 @@ async def _extract_multipart(
for name, tp in field_definition.get_type_hints().items():
value = form_values.get(name)
- if value is not None and is_non_string_sequence(tp) and not isinstance(value, list):
+ if (
+ value is not None
+ and not isinstance(value, list)
+ and (
+ is_non_string_sequence(tp)
+ or (is_optional_union(tp) and is_non_string_sequence(make_non_optional_union(tp)))
+ )
+ ):
form_values[name] = [value]
return form_values
| diff --git a/tests/unit/test_kwargs/test_multipart_data.py b/tests/unit/test_kwargs/test_multipart_data.py
--- a/tests/unit/test_kwargs/test_multipart_data.py
+++ b/tests/unit/test_kwargs/test_multipart_data.py
@@ -394,10 +394,15 @@ async def hello_world(data: UploadFile = Body(media_type=RequestEncodingType.MUL
assert response.status_code == HTTP_201_CREATED
[email protected]("optional", [True, False])
@pytest.mark.parametrize("file_count", (1, 2))
-def test_upload_multiple_files(file_count: int) -> None:
- @post("/")
- async def handler(data: List[UploadFile] = Body(media_type=RequestEncodingType.MULTI_PART)) -> None:
+def test_upload_multiple_files(file_count: int, optional: bool) -> None:
+ annotation = List[UploadFile]
+ if optional:
+ annotation = Optional[annotation] # type: ignore[misc, assignment]
+
+ @post("/", signature_namespace={"annotation": annotation})
+ async def handler(data: annotation = Body(media_type=RequestEncodingType.MULTI_PART)) -> None: # pyright: ignore[reportGeneralTypeIssues]
assert len(data) == file_count
for file in data:
@@ -415,13 +420,20 @@ class Files:
file_list: List[UploadFile]
+# https://github.com/litestar-org/litestar/issues/3407
+@dataclass
+class OptionalFiles:
+ file_list: Optional[List[UploadFile]]
+
+
[email protected]("file_model", (Files, OptionalFiles))
@pytest.mark.parametrize("file_count", (1, 2))
-def test_upload_multiple_files_in_model(file_count: int) -> None:
- @post("/")
- async def handler(data: Files = Body(media_type=RequestEncodingType.MULTI_PART)) -> None:
- assert len(data.file_list) == file_count
+def test_upload_multiple_files_in_model(file_count: int, file_model: type[Files | OptionalFiles]) -> None:
+ @post("/", signature_namespace={"file_model": file_model})
+ async def handler(data: file_model = Body(media_type=RequestEncodingType.MULTI_PART)) -> None: # type: ignore[valid-type]
+ assert len(data.file_list) == file_count # type: ignore[attr-defined]
- for file in data.file_list:
+ for file in data.file_list: # type: ignore[attr-defined]
assert await file.read() == b"1"
with create_test_client([handler]) as client:
| Bug: cant upload one file only if type is list[UploadFile] | None
### Description
I though I had the same issue as #2939 but it's slightly different, here is a test case that fails for one file and not for 2 files:
```
@dataclass
class OFiles:
file_list: Optional[List[UploadFile]]
@pytest.mark.parametrize("file_count", (1, 2))
def test_upload_multiple_ofiles_in_model(file_count: int) -> None:
@post("/")
async def handler(data: OFiles = Body(media_type=RequestEncodingType.MULTI_PART)) -> None:
assert len(data.file_list) == file_count
for file in data.file_list:
assert await file.read() == b"1"
with create_test_client([handler]) as client:
files_to_upload = [("file_list", b"1") for _ in range(file_count)]
response = client.post("/", files=files_to_upload)
assert response.status_code == HTTP_201_CREATED
```
### URL to code causing the issue
_No response_
### MCVE
```python
# Your MCVE code here
```
### Steps to reproduce
```bash
1. Go to '...'
2. Click on '....'
3. Scroll down to '....'
4. See error
```
### Screenshots
```bash
""
```
### Logs
_No response_
### Litestar Version
2.8.2final0
### Platform
- [X] Linux
- [ ] Mac
- [ ] Windows
- [ ] Other (Please specify in the description above)
<!-- POLAR PLEDGE BADGE START -->
---
> [!NOTE]
> While we are open for sponsoring on [GitHub Sponsors](https://github.com/sponsors/litestar-org/) and
> [OpenCollective](https://opencollective.com/litestar), we also utilize [Polar.sh](https://polar.sh/) to engage in pledge-based sponsorship.
>
> Check out all issues funded or available for funding [on our Polar.sh dashboard](https://polar.sh/litestar-org)
> * If you would like to see an issue prioritized, make a pledge towards it!
> * We receive the pledge once the issue is completed & verified
> * This, along with engagement in the community, helps us know which features are a priority to our users.
<a href="https://polar.sh/litestar-org/litestar/issues/3407">
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://polar.sh/api/github/litestar-org/litestar/issues/3407/pledge.svg?darkmode=1">
<img alt="Fund with Polar" src="https://polar.sh/api/github/litestar-org/litestar/issues/3407/pledge.svg">
</picture>
</a>
<!-- POLAR PLEDGE BADGE END -->
| May need to be a separate issue, but `| None` also removes the `Upload File` button in OpenAPI schema docs :(
```py
from litestar import Controller, Litestar, post
from litestar.datastructures import UploadFile
from litestar.enums import RequestEncodingType
from litestar.params import Body
class Files(Controller):
@post(path="/files/upload")
async def upload_files(
self,
data: UploadFile | None = Body(media_type=RequestEncodingType.MULTI_PART),
) -> str:
return data
app = Litestar(route_handlers=[Files])
```
Without `| None`
<img width="805" alt="image" src="https://github.com/litestar-org/litestar/assets/45884264/b6cb0694-aea9-4a82-929c-cdc79fb447fe">
With `| None`
<img width="1454" alt="image" src="https://github.com/litestar-org/litestar/assets/45884264/503e2f27-1319-4820-83bd-40438c66136d">
| 2024-04-20T10:11:02 |
litestar-org/litestar | 3,417 | litestar-org__litestar-3417 | [
"3416"
] | a31d1c6347714ae51cc72c08f0333cae414f7f22 | diff --git a/litestar/openapi/spec/base.py b/litestar/openapi/spec/base.py
--- a/litestar/openapi/spec/base.py
+++ b/litestar/openapi/spec/base.py
@@ -2,7 +2,11 @@
from dataclasses import asdict, dataclass, fields, is_dataclass
from enum import Enum
-from typing import Any
+from typing import TYPE_CHECKING, Any
+
+if TYPE_CHECKING:
+ from collections.abc import Iterator
+ from dataclasses import Field
__all__ = ("BaseSchemaObject",)
@@ -34,13 +38,16 @@ def _normalize_value(value: Any) -> Any:
class BaseSchemaObject:
"""Base class for schema spec objects"""
+ def _iter_fields(self) -> Iterator[Field[Any]]:
+ yield from fields(self)
+
def to_schema(self) -> dict[str, Any]:
"""Transform the spec dataclass object into a string keyed dictionary. This method traverses all nested values
recursively.
"""
result: dict[str, Any] = {}
- for field in fields(self):
+ for field in self._iter_fields():
value = _normalize_value(getattr(self, field.name, None))
if value is not None:
diff --git a/litestar/openapi/spec/header.py b/litestar/openapi/spec/header.py
--- a/litestar/openapi/spec/header.py
+++ b/litestar/openapi/spec/header.py
@@ -1,7 +1,9 @@
from __future__ import annotations
-from dataclasses import dataclass
-from typing import TYPE_CHECKING, Any, Literal
+from dataclasses import Field, dataclass
+from typing import TYPE_CHECKING, Any, Iterator, Literal
+
+from typing_extensions import override
from litestar.openapi.spec.base import BaseSchemaObject
@@ -119,3 +121,7 @@ class OpenAPIHeader(BaseSchemaObject):
The key is the media type and the value describes it. The map MUST only contain one entry.
"""
+
+ @override
+ def _iter_fields(self) -> Iterator[Field[Any]]:
+ yield from (f for f in super()._iter_fields() if f.name not in {"name", "param_in"})
| diff --git a/tests/e2e/test_openapi/__init__.py b/tests/e2e/test_openapi/__init__.py
new file mode 100644
diff --git a/tests/e2e/test_openapi/test_spec_headers.py b/tests/e2e/test_openapi/test_spec_headers.py
new file mode 100644
--- /dev/null
+++ b/tests/e2e/test_openapi/test_spec_headers.py
@@ -0,0 +1,29 @@
+from litestar import Litestar, Request, get
+from litestar.datastructures import ResponseHeader
+
+
+@get("/")
+async def hello_world1(request: Request) -> None:
+ request.logger.info("inside request")
+ return
+
+
+app1 = Litestar(
+ route_handlers=[hello_world1],
+ response_headers=[ResponseHeader(name="X-Version", value="ABCD", description="Test")],
+)
+
+
+def test_included_header_fields() -> None:
+ # https://github.com/litestar-org/litestar/issues/3416
+
+ assert app1.openapi_schema.to_schema()["paths"]["/"]["get"]["responses"]["200"]["headers"] == {
+ "X-Version": {
+ "allowEmptyValue": False,
+ "allowReserved": False,
+ "deprecated": False,
+ "description": "Test",
+ "required": False,
+ "schema": {"type": "string"},
+ }
+ }
diff --git a/tests/unit/test_openapi/test_config.py b/tests/unit/test_openapi/test_config.py
--- a/tests/unit/test_openapi/test_config.py
+++ b/tests/unit/test_openapi/test_config.py
@@ -28,16 +28,12 @@ def test_merged_components_correct() -> None:
"examples": {"example-one": {"summary": "an example"}},
"headers": {
"one": {
- "name": "",
- "in": "header",
"required": False,
"deprecated": False,
"allowEmptyValue": False,
"allowReserved": False,
},
"two": {
- "name": "",
- "in": "header",
"required": False,
"deprecated": False,
"allowEmptyValue": False,
| Bug: ResponseHeaders results in invalid OpenAPI schema
### Description
Use of `response_headers` on the Litestar object seems to result in invalid OpenAPI 3.1 schema generation.
The Header object on the response is invalid aiui according to the spec (see https://spec.openapis.org/oas/v3.1.0#header-object) and fails validation when using `redocly lint`, as it includes the `name` and `in` fields.
The schema is as follows,
```json
{
"info": {
"title": "Litestar API",
"version": "1.0.0"
},
"openapi": "3.1.0",
"servers": [
{
"url": "/"
}
],
"paths": {
"/": {
"get": {
"summary": "HelloWorld1",
"operationId": "HelloWorld1",
"responses": {
"200": {
"description": "Request fulfilled, document follows",
"headers": {
"X-Version": {
"schema": {
"type": "string"
},
"name": "X-Version",
"in": "header",
"description": "Test",
"required": false,
"deprecated": false,
"allowEmptyValue": false,
"allowReserved": false
}
}
}
},
"deprecated": false
}
}
},
"components": {
"schemas": {}
}
}
```
### URL to code causing the issue
_No response_
### MCVE
```python
from litestar import Litestar, Request, get
from litestar.datastructures import ResponseHeader
@get("/")
async def hello_world1(request: Request) -> None:
request.logger.info("inside request")
return None
app1 = Litestar(
route_handlers=[hello_world1],
response_headers=[ResponseHeader(name="X-Version", value="ABCD", description="Test")],
)
```
This endpoint works, but the resulting schema fails validation.
```bash
$ litestar schema openapi --output openapi_schema.json
$ npx @redocly/cli lint openapi_schema.json
...
[1] openapi_schema.json:25:33 at #/paths/~1/get/responses/200/headers/X-Version/name
Property `name` is not expected here.
23 | "type": "string"
24 | },
25 | "name": "X-Version",
26 | "in": "header",
27 | "description": "Test",
Error was generated by the spec rule.
[2] openapi_schema.json:26:33 at #/paths/~1/get/responses/200/headers/X-Version/in
Property `in` is not expected here.
24 | },
25 | "name": "X-Version",
26 | "in": "header",
27 | "description": "Test",
28 | "required": false,
...
```
### Steps to reproduce
_No response_
### Screenshots
_No response_
### Logs
_No response_
### Litestar Version
2.8.2, using Python 3.12.2
### Platform
- [ ] Linux
- [X] Mac
- [ ] Windows
- [ ] Other (Please specify in the description above)
<!-- POLAR PLEDGE BADGE START -->
---
> [!NOTE]
> While we are open for sponsoring on [GitHub Sponsors](https://github.com/sponsors/litestar-org/) and
> [OpenCollective](https://opencollective.com/litestar), we also utilize [Polar.sh](https://polar.sh/) to engage in pledge-based sponsorship.
>
> Check out all issues funded or available for funding [on our Polar.sh dashboard](https://polar.sh/litestar-org)
> * If you would like to see an issue prioritized, make a pledge towards it!
> * We receive the pledge once the issue is completed & verified
> * This, along with engagement in the community, helps us know which features are a priority to our users.
<a href="https://polar.sh/litestar-org/litestar/issues/3416">
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://polar.sh/api/github/litestar-org/litestar/issues/3416/pledge.svg?darkmode=1">
<img alt="Fund with Polar" src="https://polar.sh/api/github/litestar-org/litestar/issues/3416/pledge.svg">
</picture>
</a>
<!-- POLAR PLEDGE BADGE END -->
| Thanks for reporting!
We go to a lot of effort to document that these shouldn't be included also:
https://github.com/litestar-org/litestar/blob/a31d1c6347714ae51cc72c08f0333cae414f7f22/litestar/openapi/spec/header.py#L18-L36 | 2024-04-23T03:34:59 |
litestar-org/litestar | 3,418 | litestar-org__litestar-3418 | [
"3415"
] | c372633ee52e714c7554867412d69cebc76c9f6f | diff --git a/litestar/plugins/flash.py b/litestar/plugins/flash.py
--- a/litestar/plugins/flash.py
+++ b/litestar/plugins/flash.py
@@ -1,16 +1,22 @@
"""Plugin for creating and retrieving flash messages."""
+from __future__ import annotations
+
from dataclasses import dataclass
-from typing import Any, Mapping
+from typing import TYPE_CHECKING, Any, Mapping
-from litestar.config.app import AppConfig
-from litestar.connection import ASGIConnection
-from litestar.contrib.minijinja import MiniJinjaTemplateEngine
+from litestar.exceptions import MissingDependencyException
from litestar.plugins import InitPluginProtocol
-from litestar.template import TemplateConfig
from litestar.template.base import _get_request_from_context
from litestar.utils.scope.state import ScopeState
+if TYPE_CHECKING:
+ from collections.abc import Callable
+
+ from litestar.config.app import AppConfig
+ from litestar.connection import ASGIConnection
+ from litestar.template import TemplateConfig
+
@dataclass
class FlashConfig:
@@ -39,14 +45,18 @@ def on_app_init(self, app_config: AppConfig) -> AppConfig:
Returns:
The application configuration with the message callable registered.
"""
- if isinstance(self.config.template_config.engine_instance, MiniJinjaTemplateEngine):
- from litestar.contrib.minijinja import _transform_state
-
- self.config.template_config.engine_instance.register_template_callable(
- "get_flashes", _transform_state(get_flashes)
- )
+ template_callable: Callable[[Any], Any]
+ try:
+ from litestar.contrib.minijinja import MiniJinjaTemplateEngine, _transform_state
+ except MissingDependencyException: # pragma: no cover
+ template_callable = get_flashes
else:
- self.config.template_config.engine_instance.register_template_callable("get_flashes", get_flashes)
+ if isinstance(self.config.template_config.engine_instance, MiniJinjaTemplateEngine):
+ template_callable = _transform_state(get_flashes)
+ else:
+ template_callable = get_flashes
+
+ self.config.template_config.engine_instance.register_template_callable("get_flashes", template_callable) # pyright: ignore[reportGeneralTypeIssues]
return app_config
| Bug: minijinja import too early on FlashPlugin
### Description
error is:
```
❯ python main.py
Traceback (most recent call last):
File "/home/lotso/.cache/pypoetry/virtualenvs/tt-n-gxmD69-py3.12/lib/python3.12/site-packages/litestar/contrib/minijinja.py", line 21, in <module>
from minijinja import Environment # type:ignore[import-untyped]
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
ModuleNotFoundError: No module named 'minijinja'
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/home/lotso/toto/tt/main.py", line 2, in <module>
from litestar.plugins.flash import FlashPlugin
File "/home/lotso/.cache/pypoetry/virtualenvs/tt-n-gxmD69-py3.12/lib/python3.12/site-packages/litestar/plugins/flash.py", line 8, in <module>
from litestar.contrib.minijinja import MiniJinjaTemplateEngine
File "/home/lotso/.cache/pypoetry/virtualenvs/tt-n-gxmD69-py3.12/lib/python3.12/site-packages/litestar/contrib/minijinja.py", line 24, in <module>
raise MissingDependencyException("minijinja") from e
litestar.exceptions.base_exceptions.MissingDependencyException: Package 'minijinja' is not installed but required. You can install it by running 'pip install litestar[minijinja]' to install litestar with the required extra or 'pip install minijinja' to install the package separately
```
the imports for minijinja seems to be too early in the game,
### URL to code causing the issue
_No response_
### MCVE
```python
from litestar import Litestar
from litestar.plugins.flash import FlashPlugin
app = Litestar(route_handlers=[], plugins=[FlashPlugin])
```
### Steps to reproduce
```bash
1. Go to '...'
2. Click on '....'
3. Scroll down to '....'
4. See error
```
### Screenshots
```bash
""
```
### Logs
_No response_
### Litestar Version
2.8.2
### Platform
- [X] Linux
- [ ] Mac
- [ ] Windows
- [ ] Other (Please specify in the description above)
<!-- POLAR PLEDGE BADGE START -->
---
> [!NOTE]
> While we are open for sponsoring on [GitHub Sponsors](https://github.com/sponsors/litestar-org/) and
> [OpenCollective](https://opencollective.com/litestar), we also utilize [Polar.sh](https://polar.sh/) to engage in pledge-based sponsorship.
>
> Check out all issues funded or available for funding [on our Polar.sh dashboard](https://polar.sh/litestar-org)
> * If you would like to see an issue prioritized, make a pledge towards it!
> * We receive the pledge once the issue is completed & verified
> * This, along with engagement in the community, helps us know which features are a priority to our users.
<a href="https://polar.sh/litestar-org/litestar/issues/3415">
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://polar.sh/api/github/litestar-org/litestar/issues/3415/pledge.svg?darkmode=1">
<img alt="Fund with Polar" src="https://polar.sh/api/github/litestar-org/litestar/issues/3415/pledge.svg">
</picture>
</a>
<!-- POLAR PLEDGE BADGE END -->
| 2024-04-23T05:03:39 |
||
litestar-org/litestar | 3,420 | litestar-org__litestar-3420 | [
"3325"
] | 0ee3125c71c15603b76037c6aa3590c4519fb63a | diff --git a/litestar/plugins/flash.py b/litestar/plugins/flash.py
--- a/litestar/plugins/flash.py
+++ b/litestar/plugins/flash.py
@@ -6,16 +6,20 @@
from dataclasses import dataclass
from typing import TYPE_CHECKING, Any, Mapping
+import litestar.exceptions
+from litestar import Request
from litestar.exceptions import MissingDependencyException
+from litestar.middleware import DefineMiddleware
+from litestar.middleware.session import SessionMiddleware
from litestar.plugins import InitPluginProtocol
+from litestar.security.session_auth.middleware import MiddlewareWrapper
from litestar.template.base import _get_request_from_context
-from litestar.utils.scope.state import ScopeState
+from litestar.utils.predicates import is_class_and_subclass
if TYPE_CHECKING:
from collections.abc import Callable
from litestar.config.app import AppConfig
- from litestar.connection import ASGIConnection
from litestar.template import TemplateConfig
@@ -46,6 +50,13 @@ def on_app_init(self, app_config: AppConfig) -> AppConfig:
Returns:
The application configuration with the message callable registered.
"""
+ for mw in app_config.middleware:
+ if isinstance(mw, DefineMiddleware) and is_class_and_subclass(
+ mw.middleware, (MiddlewareWrapper, SessionMiddleware)
+ ):
+ break
+ else:
+ raise litestar.exceptions.ImproperlyConfiguredException("Flash messages require a session middleware.")
template_callable: Callable[[Any], Any] = get_flashes
with suppress(MissingDependencyException):
from litestar.contrib.minijinja import MiniJinjaTemplateEngine, _transform_state
@@ -57,26 +68,13 @@ def on_app_init(self, app_config: AppConfig) -> AppConfig:
return app_config
-def flash(connection: ASGIConnection, message: str, category: str) -> None:
- """Add a flash message to the request scope.
-
- Args:
- connection: The connection instance.
- message: The message to flash.
- category: The category of the message.
- """
- scope_state = ScopeState.from_scope(connection.scope)
- scope_state.flash_messages.append({"message": message, "category": category})
+def flash(
+ request: Request,
+ message: Any,
+ category: str,
+) -> None:
+ request.session.setdefault("_messages", []).append({"message": message, "category": category})
def get_flashes(context: Mapping[str, Any]) -> Any:
- """Get flash messages from the request scope, if any.
-
- Args:
- context: The context dictionary.
-
- Returns:
- The flash messages, if any.
- """
- scope_state = ScopeState.from_scope(_get_request_from_context(context).scope)
- return scope_state.flash_messages
+ return _get_request_from_context(context).session.pop("_messages", [])
| diff --git a/tests/unit/test_plugins/test_flash.py b/tests/unit/test_plugins/test_flash.py
--- a/tests/unit/test_plugins/test_flash.py
+++ b/tests/unit/test_plugins/test_flash.py
@@ -5,12 +5,15 @@
import pytest
-from litestar import Request, get
+from litestar import Litestar, Request, get, post
from litestar.contrib.jinja import JinjaTemplateEngine
from litestar.contrib.mako import MakoTemplateEngine
from litestar.contrib.minijinja import MiniJinjaTemplateEngine
+from litestar.exceptions import ImproperlyConfiguredException
+from litestar.middleware.rate_limit import RateLimitConfig
+from litestar.middleware.session.server_side import ServerSideSessionConfig
from litestar.plugins.flash import FlashConfig, FlashPlugin, flash
-from litestar.response import Template
+from litestar.response import Redirect, Template
from litestar.template import TemplateConfig, TemplateEngineProtocol
from litestar.testing import create_test_client
@@ -56,25 +59,55 @@ def test_flash_plugin(
category_enum: Enum,
) -> None:
Path(tmp_path / "flash.html").write_text(template_str)
- text_expected = "".join(
- [f'<span class="{category.value}">message {category.value}</span>' for category in category_enum] # type: ignore[attr-defined]
- )
- @get("/flash")
- def flash_handler(request: Request) -> Template:
- for category in category_enum: # type: ignore[attr-defined]
- flash(request, f"message {category.value}", category=category.value)
+ @get("/")
+ async def index() -> Redirect:
+ return Redirect("/login")
+
+ @get("/login")
+ async def login(request: Request) -> Template:
+ flash(request, "Flash Test!", category="info")
return Template("flash.html")
+ @post("/check")
+ async def check(request: Request) -> Redirect:
+ flash(request, "User not Found!", category="warning")
+ return Redirect("/login")
+
template_config: TemplateConfig = TemplateConfig(
directory=Path(tmp_path),
engine=engine,
)
+ session_config = ServerSideSessionConfig()
+ flash_config = FlashConfig(template_config=template_config)
with create_test_client(
- [flash_handler],
+ plugins=[FlashPlugin(config=flash_config)],
+ route_handlers=[index, login, check],
template_config=template_config,
- plugins=[FlashPlugin(config=FlashConfig(template_config=template_config))],
+ middleware=[session_config.middleware],
) as client:
- r = client.get("/flash")
+ r = client.get("/")
+ assert r.status_code == 200
+ assert "Flash Test!" in r.text
+ r = client.get("/login")
+ assert r.status_code == 200
+ assert "Flash Test!" in r.text
+ r = client.post("/check")
assert r.status_code == 200
- assert r.text == text_expected
+ assert "User not Found!" in r.text
+ assert "Flash Test!" in r.text
+
+
+def test_flash_config_doesnt_have_session() -> None:
+ template_config = TemplateConfig(directory=Path("tests/templates"), engine=JinjaTemplateEngine)
+ flash_config = FlashConfig(template_config=template_config)
+ with pytest.raises(ImproperlyConfiguredException):
+ Litestar(plugins=[FlashPlugin(config=flash_config)])
+
+
+def test_flash_config_has_wrong_middleware_type() -> None:
+ template_config = TemplateConfig(directory=Path("tests/templates"), engine=JinjaTemplateEngine)
+ flash_config = FlashConfig(template_config=template_config)
+ rate_limit_config = RateLimitConfig(rate_limit=("minute", 1), exclude=["/schema"])
+ with pytest.raises(ImproperlyConfiguredException):
+ Litestar(plugins=[FlashPlugin(config=flash_config)], middleware=[rate_limit_config.middleware])
| bug: Plugin "Flash Messages" not working with "Redirect" Response
### Litestar (2.8.0)
```python
from litestar import Litestar, Request, get, post
from litestar.contrib.jinja import JinjaTemplateEngine
from litestar.plugins.flash import FlashConfig, FlashPlugin, flash
from litestar.response import Template, Redirect
from litestar.template.config import TemplateConfig
template_config = TemplateConfig(engine=JinjaTemplateEngine, directory="templates")
flash_plugin = FlashPlugin(config=FlashConfig(template_config=template_config))
@get('/')
async def index() -> Redirect:
return Redirect('/login')
@get('/login')
async def login(request: Request) -> Template:
flash(request, "Flash Test!", category="info")
return Template(template_str='{% for f in get_flashes() %} <p>{{ f.message }}</p>{% endfor %}'
'<form action="/check" method="post"><button type="submit">login</button></form>')
@post('/check')
async def check(request: Request) -> Redirect:
flash(request, "User not Found!", category="warning")
return Redirect('/login')
app = Litestar(plugins=[flash_plugin], route_handlers=[index,login, check], template_config=template_config)
if __name__ == '__main__':
import uvicorn
uvicorn.run('1:app')
```
```
INFO: Started server process [13056]
INFO: Waiting for application startup.
INFO: Application startup complete.
INFO: Uvicorn running on http://127.0.0.1:8000 (Press CTRL+C to quit)
INFO: 127.0.0.1:50961 - "GET / HTTP/1.1" 302 Found
INFO: 127.0.0.1:50961 - "GET /login HTTP/1.1" 200 OK
INFO: 127.0.0.1:50961 - "GET /login HTTP/1.1" 200 OK
INFO: 127.0.0.1:50961 - "POST /check HTTP/1.1" 302 Found
INFO: 127.0.0.1:50961 - "GET /login HTTP/1.1" 200 OK
INFO: Shutting down
INFO: Waiting for application shutdown.
INFO: Application shutdown complete.
INFO: Finished server process [13056]
Process finished with exit code 0
```

### Problem: Flash Message from "POST /check" is missing at final page (screenshot above)
---
### Flask (3.0.2)
```python
from flask import Flask, flash, render_template_string, redirect
app = Flask(__name__)
app.secret_key = b'123'
@app.get('/')
def index() -> redirect:
return redirect('/login')
@app.get('/login')
def login():
flash('Flash Test!')
return render_template_string('{% for m in get_flashed_messages() %} <p>{{ m }}</p>{% endfor %}'
'<form action="/check" method="post"><button type="submit">login</button></form>')
@app.post('/check')
def check():
flash("User not Found!")
return redirect('/login')
if __name__ == '__main__':
app.run()
```
```
* Serving Flask app '2'
* Debug mode: off
WARNING: This is a development server. Do not use it in a production deployment. Use a production WSGI server instead.
* Running on http://127.0.0.1:5000
Press CTRL+C to quit
127.0.0.1 - - [06/Apr/2024 14:50:47] "GET / HTTP/1.1" 302 -
127.0.0.1 - - [06/Apr/2024 14:50:47] "GET /login HTTP/1.1" 200 -
127.0.0.1 - - [06/Apr/2024 14:50:48] "POST /check HTTP/1.1" 302 -
127.0.0.1 - - [06/Apr/2024 14:50:48] "GET /login HTTP/1.1" 200 -
```

<!-- POLAR PLEDGE BADGE START -->
---
> [!NOTE]
> While we are open for sponsoring on [GitHub Sponsors](https://github.com/sponsors/litestar-org/) and
> [OpenCollective](https://opencollective.com/litestar), we also utilize [Polar.sh](https://polar.sh/) to engage in pledge-based sponsorship.
>
> Check out all issues funded or available for funding [on our Polar.sh dashboard](https://polar.sh/litestar-org)
> * If you would like to see an issue prioritized, make a pledge towards it!
> * We receive the pledge once the issue is completed & verified
> * This, along with engagement in the community, helps us know which features are a priority to our users.
<a href="https://polar.sh/litestar-org/litestar/issues/3325">
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://polar.sh/api/github/litestar-org/litestar/issues/3325/pledge.svg?darkmode=1">
<img alt="Fund with Polar" src="https://polar.sh/api/github/litestar-org/litestar/issues/3325/pledge.svg">
</picture>
</a>
<!-- POLAR PLEDGE BADGE END -->
| @euri10? :eyes:
### If it helps, this is how we are doing it manually in Litestar
```python
from litestar import Litestar, Request, get, post
from litestar.contrib.jinja import JinjaTemplateEngine
from litestar.middleware.session.server_side import ServerSideSessionConfig
from litestar.response import Template, Redirect
from litestar.template.config import TemplateConfig
template_config = TemplateConfig(engine=JinjaTemplateEngine, directory="templates")
def flash(request: Request, message: str, category: str = "primary") -> None:
if "_messages" not in request.session:
request.session["_messages"] = []
request.session["_messages"].append({"message": message, "category": category})
def get_flashed_messages(request: Request) -> list[str]:
return request.session.pop("_messages") if "_messages" in request.session else []
template_config.engine_instance.engine.globals["get_flashed_messages"] = get_flashed_messages
@get('/')
async def index() -> Redirect:
return Redirect('/login')
@get('/login')
async def login(request: Request) -> Template:
flash(request, "Flash Test!", category="info")
return Template(template_str='{% for f in get_flashed_messages(request) %} <p>{{ f.message }}</p>{% endfor %}'
'<form action="/check" method="post"><button type="submit">login</button></form>')
@post('/check')
async def check(request: Request) -> Redirect:
flash(request, "User not Found!", category="warning")
return Redirect('/login')
app = Litestar(route_handlers=[index, login, check], template_config=template_config,
middleware=[ServerSideSessionConfig().middleware])
if __name__ == '__main__':
import uvicorn
uvicorn.run('1:app')
```
```
INFO: Started server process [8416]
INFO: Waiting for application startup.
INFO: Application startup complete.
INFO: Uvicorn running on http://127.0.0.1:8000 (Press CTRL+C to quit)
INFO: 127.0.0.1:51887 - "GET / HTTP/1.1" 302 Found
INFO: 127.0.0.1:51887 - "GET /login HTTP/1.1" 200 OK
INFO: 127.0.0.1:51887 - "POST /check HTTP/1.1" 302 Found
INFO: 127.0.0.1:51887 - "GET /login HTTP/1.1" 200 OK
INFO: Shutting down
INFO: Waiting for application shutdown.
INFO: Application shutdown complete.
INFO: Finished server process [8416]
```

We hoped 'flash Messages' would be integrated into the core functionality rather than as a plugin, which can often get abandoned. It's a feature present in Flask and Django, so it's likely quite important. Nonetheless, appreciate the work done so far. Litestar is at its best and continually improving. That's wonderful. Thank you!
Configuring Litestar via plugins is an explicit architectural decision - we also use them extensively for internal things. This plugin is in the main code base and is maintained alongside the rest of the library code so there should be no need to be concerned about commitment to its maintenance any more than any other part of the library.
ok I have the same issue now that I'm trying to switch from my implenetation (which is exactly the same as @saltcable ) to the official plugin:)
I'm not sure why I went passing the messages in the request state rather than in the session in the plugin though.
This said, this could be either fixed using the same method for passing flash messages around ie in a session (but that means the plugin requires to have the session middleware installed, we could check for that in the app_init and / or install a default config for it, I'd prefer just raise a misconfiguration error rather than providing a default config)
Or there is a way for the Redirect response to be aware of the scope state from the handler that issued the flash message, but I'm not sure that is possible.
| 2024-04-23T07:59:15 |
litestar-org/litestar | 3,430 | litestar-org__litestar-3430 | [
"3429"
] | c6dd4f3d37ec7c88a20b9e3256ce313f2085cc48 | diff --git a/litestar/_asgi/routing_trie/traversal.py b/litestar/_asgi/routing_trie/traversal.py
--- a/litestar/_asgi/routing_trie/traversal.py
+++ b/litestar/_asgi/routing_trie/traversal.py
@@ -142,8 +142,12 @@ def parse_path_to_route(
remaining_path = path[match.end() :]
# since we allow regular handlers under static paths, we must validate that the request does not match
# any such handler.
- children = [sub_route for sub_route in mount_node.children or [] if sub_route != mount_path]
- if not children or all(sub_route not in path for sub_route in children): # type: ignore[operator]
+ children = (
+ normalize_path(sub_route)
+ for sub_route in mount_node.children or []
+ if sub_route != mount_path and isinstance(sub_route, str)
+ )
+ if not any(remaining_path.startswith(f"{sub_route}/") for sub_route in children):
asgi_app, handler = parse_node_handlers(node=mount_node, method=method)
remaining_path = remaining_path or "/"
if not mount_node.is_static:
| diff --git a/tests/e2e/test_regular_handler_under_asgi_mount_path.py b/tests/e2e/test_regular_handler_under_asgi_mount_path.py
new file mode 100644
--- /dev/null
+++ b/tests/e2e/test_regular_handler_under_asgi_mount_path.py
@@ -0,0 +1,53 @@
+from __future__ import annotations
+
+from typing import TYPE_CHECKING
+
+from litestar import Litestar, asgi, get
+from litestar.enums import ScopeType
+from litestar.testing import TestClient
+
+if TYPE_CHECKING:
+ from litestar.types.asgi_types import Receive, Scope, Send
+
+
+async def asgi_app(scope: Scope, receive: Receive, send: Send) -> None:
+ assert scope["type"] == ScopeType.HTTP
+ await send(
+ {
+ "type": "http.response.start",
+ "status": 200,
+ "headers": [
+ (b"content-type", b"text/plain"),
+ (b"content-length", b"%d" % len(scope["raw_path"])),
+ ],
+ }
+ )
+ await send(
+ {
+ "type": "http.response.body",
+ "body": scope["raw_path"],
+ "more_body": False,
+ }
+ )
+
+
+asgi_handler = asgi("/", is_mount=True)(asgi_app)
+
+
+@get("/path")
+async def get_handler() -> str:
+ return "Hello, world!"
+
+
+app = Litestar(
+ route_handlers=[asgi_handler, get_handler],
+ openapi_config=None,
+ debug=True,
+)
+
+
+def test_regular_handler_under_mounted_asgi_app() -> None:
+ # https://github.com/litestar-org/litestar/issues/3429
+ with TestClient(app) as client:
+ resp = client.get("/some/path")
+ assert resp.content == b"/some/path"
| Bug: regular handler under asgi mount path conflicts with routing to asgi app
### Description
If we have a regular handler mounted under an asgi mounted path, and the path of the regular handler is a non-prefix sub-string of the a request path the request will not get routed to the asgi app.
I.e., if we have an asgi mounted on `"/"`, and a regular handler at `"/path"`, a request to `"/some/path"` does not get routed to the asgi app.
### URL to code causing the issue
_No response_
### MCVE
```python
from __future__ import annotations
from typing import TYPE_CHECKING
from litestar import Litestar, asgi, get
from litestar.testing import TestClient
if TYPE_CHECKING:
from litestar.types.asgi_types import Receive, Scope, Send
async def asgi_app(scope: Scope, receive: Receive, send: Send) -> None:
assert scope["type"] == "http"
await send({
"type": "http.response.start",
"status": 200,
"headers": [
(b"content-type", b"text/plain"),
(b"content-length", b"%d" % len(scope["raw_path"])),
],
})
await send({
"type": "http.response.body",
"body": scope["raw_path"],
})
asgi_handler = asgi("/", is_mount=True)(asgi_app)
@get("/path")
def get_handler() -> str:
return "Hello, world!"
def test_regular_handler_under_mounted_asgi_app() -> None:
app = Litestar(
route_handlers=[asgi("/", is_mount=True)(asgi_app), get_handler],
openapi_config=None,
debug=True,
)
with TestClient(app) as client:
resp = client.get("/some/path") # currently this is a 404
assert resp.content == b"/some/path"
```
### Steps to reproduce
```bash
1. Go to '...'
2. Click on '....'
3. Scroll down to '....'
4. See error
```
### Screenshots
```bash
""
```
### Logs
_No response_
### Litestar Version
main
### Platform
- [X] Linux
- [ ] Mac
- [ ] Windows
- [ ] Other (Please specify in the description above)
<!-- POLAR PLEDGE BADGE START -->
---
> [!NOTE]
> While we are open for sponsoring on [GitHub Sponsors](https://github.com/sponsors/litestar-org/) and
> [OpenCollective](https://opencollective.com/litestar), we also utilize [Polar.sh](https://polar.sh/) to engage in pledge-based sponsorship.
>
> Check out all issues funded or available for funding [on our Polar.sh dashboard](https://polar.sh/litestar-org)
> * If you would like to see an issue prioritized, make a pledge towards it!
> * We receive the pledge once the issue is completed & verified
> * This, along with engagement in the community, helps us know which features are a priority to our users.
<a href="https://polar.sh/litestar-org/litestar/issues/3429">
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://polar.sh/api/github/litestar-org/litestar/issues/3429/pledge.svg?darkmode=1">
<img alt="Fund with Polar" src="https://polar.sh/api/github/litestar-org/litestar/issues/3429/pledge.svg">
</picture>
</a>
<!-- POLAR PLEDGE BADGE END -->
| Issue looks to come from here: https://github.com/litestar-org/litestar/blob/7814d597afadb4793e6b947a5644e36dfbab22ba/litestar/_asgi/routing_trie/traversal.py#L145-L146
Where `path == "/some/path"` and `children == ["/path"]`, the request isn't directed to the mounted application because the path contains the child path. I think instead of testing whether the sub-route is a substring of the path, we should check `path.startswith(f"{sub_route}/")`. | 2024-04-26T09:50:47 |
litestar-org/litestar | 3,446 | litestar-org__litestar-3446 | [
"3441"
] | f06e951d18ca0b30f8a4309c9453a604b22e43c4 | diff --git a/litestar/middleware/session/client_side.py b/litestar/middleware/session/client_side.py
--- a/litestar/middleware/session/client_side.py
+++ b/litestar/middleware/session/client_side.py
@@ -5,9 +5,9 @@
import re
import time
from base64 import b64decode, b64encode
-from dataclasses import dataclass, field
+from dataclasses import dataclass, field, fields
from os import urandom
-from typing import TYPE_CHECKING, Any, Literal
+from typing import TYPE_CHECKING, Any, Final, Literal, Mapping
from litestar.datastructures import MutableScopeHeaders
from litestar.datastructures.cookie import Cookie
@@ -38,12 +38,19 @@
NONCE_SIZE = 12
CHUNK_SIZE = 4096 - 64
AAD = b"additional_authenticated_data="
+SET_COOKIE_INCLUDE = {f.name for f in fields(Cookie) if f.name not in {"key", "secret"}}
+CLEAR_COOKIE_INCLUDE = {f.name for f in fields(Cookie) if f.name not in {"key", "secret", "max_age"}}
class ClientSideSessionBackend(BaseSessionBackend["CookieBackendConfig"]):
"""Cookie backend for SessionMiddleware."""
- __slots__ = ("aesgcm", "cookie_re")
+ __slots__ = (
+ "_clear_cookie_params",
+ "_set_cookie_params",
+ "aesgcm",
+ "cookie_re",
+ )
def __init__(self, config: CookieBackendConfig) -> None:
"""Initialize ``ClientSideSessionBackend``.
@@ -54,6 +61,12 @@ def __init__(self, config: CookieBackendConfig) -> None:
super().__init__(config)
self.aesgcm = AESGCM(config.secret)
self.cookie_re = re.compile(rf"{self.config.key}(?:-\d+)?")
+ self._set_cookie_params: Final[Mapping[str, Any]] = dict(
+ extract_dataclass_items(config, exclude_none=True, include=SET_COOKIE_INCLUDE)
+ )
+ self._clear_cookie_params: Final[Mapping[str, Any]] = dict(
+ extract_dataclass_items(config, exclude_none=True, include=CLEAR_COOKIE_INCLUDE)
+ )
def dump_data(self, data: Any, scope: Scope | None = None) -> list[bytes]:
"""Given serializable data, including pydantic models and numpy types, dump it into a bytes string, encrypt,
@@ -107,19 +120,25 @@ def get_cookie_keys(self, connection: ASGIConnection) -> list[str]:
"""
return sorted(key for key in connection.cookies if self.cookie_re.fullmatch(key))
- def _create_session_cookies(self, data: list[bytes], cookie_params: dict[str, Any] | None = None) -> list[Cookie]:
+ def get_cookie_key_set(self, connection: ASGIConnection) -> set[str]:
+ """Return a set of cookie-keys from the connection if they match the session-cookie pattern.
+
+ .. versionadded:: 2.8.3
+
+ Args:
+ connection: An ASGIConnection instance
+
+ Returns:
+ A set of session-cookie keys
+ """
+ return {key for key in connection.cookies if self.cookie_re.fullmatch(key)}
+
+ def _create_session_cookies(self, data: list[bytes]) -> list[Cookie]:
"""Create a list of cookies containing the session data.
If the data is split into multiple cookies, the key will be of the format ``session-{segment number}``,
however if only one cookie is needed, the key will be ``session``.
"""
- if cookie_params is None:
- cookie_params = dict(
- extract_dataclass_items(
- self.config,
- exclude_none=True,
- include={f for f in Cookie.__dict__ if f not in ("key", "secret")},
- )
- )
+ cookie_params = self._set_cookie_params
if len(data) == 1:
return [
@@ -156,38 +175,23 @@ async def store_in_message(self, scope_session: ScopeSession, message: Message,
scope = connection.scope
headers = MutableScopeHeaders.from_message(message)
- cookie_keys = self.get_cookie_keys(connection)
+ connection_cookies = self.get_cookie_key_set(connection)
+ response_cookies: set[str] = set()
if scope_session and scope_session is not Empty:
data = self.dump_data(scope_session, scope=scope)
- cookie_params = dict(
- extract_dataclass_items(
- self.config,
- exclude_none=True,
- include={f for f in Cookie.__dict__ if f not in ("key", "secret")},
- )
- )
- for cookie in self._create_session_cookies(data, cookie_params):
+ for cookie in self._create_session_cookies(data):
headers.add("Set-Cookie", cookie.to_header(header=""))
- # Cookies with the same key overwrite the earlier cookie with that key. To expire earlier session
- # cookies, first check how many session cookies will not be overwritten in this upcoming response.
- # If leftover cookies are greater than or equal to 1, that means older session cookies have to be
- # expired and their names are in cookie_keys.
- cookies_to_clear = cookie_keys[len(data) :] if len(cookie_keys) - len(data) > 0 else []
+ response_cookies.add(cookie.key)
+
+ cookies_to_clear = connection_cookies - response_cookies
else:
- cookies_to_clear = cookie_keys
+ cookies_to_clear = connection_cookies
for cookie_key in cookies_to_clear:
- cookie_params = dict(
- extract_dataclass_items(
- self.config,
- exclude_none=True,
- include={f for f in Cookie.__dict__ if f not in ("key", "secret", "max_age")},
- )
- )
headers.add(
"Set-Cookie",
- Cookie(value="null", key=cookie_key, expires=0, **cookie_params).to_header(header=""),
+ Cookie(value="null", key=cookie_key, expires=0, **self._clear_cookie_params).to_header(header=""),
)
async def load_from_connection(self, connection: ASGIConnection) -> dict[str, Any]:
| diff --git a/tests/unit/test_middleware/test_session/test_client_side_backend.py b/tests/unit/test_middleware/test_session/test_client_side_backend.py
--- a/tests/unit/test_middleware/test_session/test_client_side_backend.py
+++ b/tests/unit/test_middleware/test_session/test_client_side_backend.py
@@ -9,6 +9,7 @@
from cryptography.exceptions import InvalidTag
from litestar import Request, get, post
+from litestar.datastructures.headers import MutableScopeHeaders
from litestar.exceptions import ImproperlyConfiguredException
from litestar.middleware.session import SessionMiddleware
from litestar.middleware.session.client_side import (
@@ -18,7 +19,8 @@
CookieBackendConfig,
)
from litestar.serialization import encode_json
-from litestar.testing import create_test_client
+from litestar.testing import RequestFactory, create_test_client
+from litestar.types.asgi_types import HTTPResponseStartEvent
from tests.helpers import randbytes
@@ -220,3 +222,25 @@ def test_load_data_should_raise_invalid_tag_if_tampered_aad(cookie_session_backe
with pytest.raises(InvalidTag):
cookie_session_backend.load_data(encoded)
+
+
+async def test_store_in_message_clears_cookies_when_session_grows_gt_chunk_size(
+ cookie_session_backend: ClientSideSessionBackend,
+) -> None:
+ """Should clear the cookies when the session grows larger than the chunk size."""
+ # we have a connection that already contains a cookie header with the "session" key in it
+ connection = RequestFactory().get("/", headers={"Cookie": "session=foo"})
+ # we want to persist a new session that is larger than the chunk size
+ # by the time the encrypted data, nonce and associated data are b64 encoded, the size of
+ # this session will be > 2x larger than the chunk size
+ session = create_session(size=CHUNK_SIZE)
+ message: HTTPResponseStartEvent = {"type": "http.response.start", "status": 200, "headers": []}
+ await cookie_session_backend.store_in_message(session, message, connection)
+ # due to the large session stored in multiple chunks, we now enumerate the name of the cookies
+ # e.g., session-0, session-1, session-2, etc. This means we need to have a cookie with the name
+ # "session" in the response headers that is set to null to clear the original cookie.
+ headers = MutableScopeHeaders.from_message(message)
+ assert len(headers.headers) > 1
+ header_name, header_content = headers.headers[-1]
+ assert header_name == b"set-cookie"
+ assert header_content.startswith(b"session=null;")
| Bug: Some session data got lost when trying to set session with big payload
### Description
I'm trying to store acess and refresh tokens in request.session with some other user data, and got an error. In middleware `AuthRequiredMiddleware` i have no user data, but it should be. If I remove two long fields with tokens, it works normaly.
Additionaly, i got a second issue - `SerializationException` when it trying to decode session with big payload, but can't reproduce it
### URL to code causing the issue
_No response_
### MCVE
```python
import secrets
import time
from typing import Any
from litestar import Litestar, Request, Router, get
from litestar.config.cors import CORSConfig
from litestar.middleware.base import DefineMiddleware, MiddlewareProtocol
from litestar.middleware.session.client_side import CookieBackendConfig
from litestar.openapi import OpenAPIConfig
from litestar.openapi.plugins import SwaggerRenderPlugin
from litestar.response import Redirect
from litestar.response.redirect import ASGIRedirectResponse
from litestar.types import ASGIApp, Receive, Scope, Send
from pydantic import BaseModel, Field
USER_SESSION_KEY = "user"
AUTH_REDIRECT_KEY = "auth_redirect_from"
class UserSchema(BaseModel):
username: str
fullname: str | None = Field(default=None)
firstname: str | None = Field(default=None)
lastname: str | None = Field(default=None)
email: str | None = Field(default=None)
position: str | None = Field(default=None)
class UserSessionSchema(BaseModel):
user_info: UserSchema | None = Field(default=None)
access_token: str | None = Field(default=None)
refresh_token: str | None = Field(default=None)
expire_at: int | None = Field(default=None)
class AuthRequiredMiddleware(MiddlewareProtocol):
def __init__(
self,
app: ASGIApp,
api_path: str,
auth_controller_path: str,
login_endpoint_path: str = "/login",
**_: Any,
) -> None:
self.app = app
self.api_path = api_path
self.auth_controller_path = auth_controller_path
self.login_endpoint_path = login_endpoint_path
self.login_url = self.api_path + self.auth_controller_path + self.login_endpoint_path
async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
request = Request(scope)
print("In middleware session:", request.session)
user_session = UserSessionSchema.model_validate(request.session.get(USER_SESSION_KEY, {}))
def _redirect_login():
response = ASGIRedirectResponse(path=self.login_url)
request.session[USER_SESSION_KEY] = user_session.model_dump()
request.session[AUTH_REDIRECT_KEY] = str(request.url)
return response(scope, receive, send)
if self.auth_controller_path not in request.url.path:
if not user_session.user_info:
return await _redirect_login()
if AUTH_REDIRECT_KEY in request.session:
del request.session[AUTH_REDIRECT_KEY]
await self.app(scope, receive, send)
@get("/protected_resource")
async def test(request: Request) -> str:
user_session = UserSessionSchema.model_validate(request.session[USER_SESSION_KEY])
assert user_session.user_info
return f"Hello, {user_session.user_info.username}!"
@get("/auth/login")
async def login(request: Request) -> Redirect:
request.session[USER_SESSION_KEY] = UserSessionSchema(
user_info=UserSchema(
username="some_username",
fullname="Иванов Ivan Jhon",
firstname="Ivan",
lastname="Иванов",
email="[email protected]",
position="Sernior999lvl",
),
expire_at=int(time.time()) + 900,
# XXX: Uncoment to reproduce
# access_token="a" * 2078,
# refresh_token="b" * 1074,
)
print("In login session:", request.session)
if AUTH_REDIRECT_KEY in request.session:
return Redirect(request.session.pop(AUTH_REDIRECT_KEY))
return Redirect("/")
router = Router("/", route_handlers=[test, login])
app = Litestar(
route_handlers=[router],
openapi_config=OpenAPIConfig(
title="Insourcing API",
version="0.1.0",
render_plugins=[SwaggerRenderPlugin()],
path="/api/v1/docs/",
),
cors_config=CORSConfig(
allow_origins=["http://localhost:8000"],
allow_credentials=True,
),
middleware=[
CookieBackendConfig(secret=secrets.token_urlsafe(24).encode()).middleware,
DefineMiddleware(
middleware=AuthRequiredMiddleware,
api_path="",
auth_controller_path="/auth",
),
],
debug=True,
)
```
### Steps to reproduce
```bash
1. Install litestar + pydantic2 + uvicorn + uvloop
2. Do not uncomment lines and run with uvicorn FILE_NAME:app
3. open localhost:8000/protected_resource. it should work without errors and you'll see - Hello, some_username!
4. Uncomment lines and run again. You will fall into redirect cycle
```
### Screenshots
_No response_
### Logs
_No response_
### Litestar Version
2.8.2
### Platform
WSL 2.1.5.0 + Ubuntu 22.04
- [ ] Linux
- [ ] Mac
- [ ] Windows
- [X] Other (Please specify in the description above)
<!-- POLAR PLEDGE BADGE START -->
---
> [!NOTE]
> While we are open for sponsoring on [GitHub Sponsors](https://github.com/sponsors/litestar-org/) and
> [OpenCollective](https://opencollective.com/litestar), we also utilize [Polar.sh](https://polar.sh/) to engage in pledge-based sponsorship.
>
> Check out all issues funded or available for funding [on our Polar.sh dashboard](https://polar.sh/litestar-org)
> * If you would like to see an issue prioritized, make a pledge towards it!
> * We receive the pledge once the issue is completed & verified
> * This, along with engagement in the community, helps us know which features are a priority to our users.
<a href="https://polar.sh/litestar-org/litestar/issues/3441">
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://polar.sh/api/github/litestar-org/litestar/issues/3441/pledge.svg?darkmode=1">
<img alt="Fund with Polar" src="https://polar.sh/api/github/litestar-org/litestar/issues/3441/pledge.svg">
</picture>
</a>
<!-- POLAR PLEDGE BADGE END -->
| Thanks @wallseat - good find!
Once cookies get above a certain size, we chunk them and store across multiple cookies. When this happens, cookies get stored with an enumeration, e.g., `session-0`, `session-1` etc.
In this case, the session is persisted in the headers for the first time when the request to `/protected_route` is redirected to the auth route. At this time, the session cookie is not greater than the chunk size and so it gets stored in the cookie under the name `session`.
After authentication, when the size of the session is much larger due to the presence of the tokens, the serialized session is greater than the chunk size, so the session cookie gets chunked and stored under `session-0`, `session-1`.
There is an issue with the algorithm that detects cookies that should be cleared under the condition where the cookie grows in size greater than a single chunk, and that is what we're hitting here. The original `session` cookie was not being cleared when it is superseded by a cookie called `session-0`. | 2024-04-28T05:34:27 |
litestar-org/litestar | 3,454 | litestar-org__litestar-3454 | [
"3011"
] | b9e501fba6ff2ab06ab3afb53d9071a0a8699c55 | diff --git a/docs/examples/responses/sse_responses.py b/docs/examples/responses/sse_responses.py
--- a/docs/examples/responses/sse_responses.py
+++ b/docs/examples/responses/sse_responses.py
@@ -2,15 +2,28 @@
from typing import AsyncGenerator
from litestar import Litestar, get
-from litestar.response import ServerSentEvent
+from litestar.response import ServerSentEvent, ServerSentEventMessage
+from litestar.types import SSEData
-async def my_generator() -> AsyncGenerator[bytes, None]:
+async def my_generator() -> AsyncGenerator[SSEData, None]:
count = 0
while count < 10:
await sleep(0.01)
count += 1
+ # In the generator you can yield integers, strings, bytes, dictionaries, or ServerSentEventMessage objects
+ # dicts can have the following keys: data, event, id, retry, comment
+
+ # here we yield an integer
+ yield count
+ # here a string
yield str(count)
+ # here bytes
+ yield str(count).encode("utf-8")
+ # here a dictionary
+ yield {"data": 2 * count, "event": "event2", "retry": 10}
+ # here a ServerSentEventMessage object
+ yield ServerSentEventMessage(event="something-with-comment", retry=1000, comment="some comment")
@get(path="/count", sync_to_thread=False)
| diff --git a/tests/examples/test_responses/test_sse_responses.py b/tests/examples/test_responses/test_sse_responses.py
--- a/tests/examples/test_responses/test_sse_responses.py
+++ b/tests/examples/test_responses/test_sse_responses.py
@@ -8,6 +8,4 @@ async def test_sse_responses_example() -> None:
async with AsyncTestClient(app=app) as client:
async with aconnect_sse(client, "GET", f"{client.base_url}/count") as event_source:
events = [sse async for sse in event_source.aiter_sse()]
- assert len(events) == 10
- assert all(e.event == "message" for e in events)
- assert all(e.data == str(i) for i, e in enumerate(events, 1))
+ assert len(events) == 50
| Docs: Document SSE
### Summary
The SSE documentation is currently lacking:
- Docs for `ServerSentEventMessage`
- Sending messages a dicts
<!-- POLAR PLEDGE BADGE START -->
---
> [!NOTE]
> While we are open for sponsoring on [GitHub Sponsors](https://github.com/sponsors/litestar-org/) and
> [OpenCollective](https://opencollective.com/litestar), we also utilize [Polar.sh](https://polar.sh/) to engage in pledge-based sponsorship.
>
> Check out all issues funded or available for funding [on our Polar.sh dashboard](https://polar.sh/litestar-org)
> * If you would like to see an issue prioritized, make a pledge towards it!
> * We receive the pledge once the issue is completed & verified
> * This, along with engagement in the community, helps us know which features are a priority to our users.
<a href="https://polar.sh/litestar-org/litestar/issues/3011">
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://polar.sh/api/github/litestar-org/litestar/issues/3011/pledge.svg?darkmode=1">
<img alt="Fund with Polar" src="https://polar.sh/api/github/litestar-org/litestar/issues/3011/pledge.svg">
</picture>
</a>
<!-- POLAR PLEDGE BADGE END -->
| Any suggestions here @euri10?
ok just reading the current docs here:
https://docs.litestar.dev/latest/usage/responses.html#server-sent-event-responses
I think everything @provinzkraut mentions is currently in the docs:
ServerSentEventMessage is mentionned, the dict option as well
now I see room for improvment I'd say in the example given; it currently only yield a str and we "should" maybe yield a ServerSentEventMessage and show it that way with all the other possibilities we test,
if agree I can tweak the example, does that make sense to you >? | 2024-04-30T07:07:27 |
litestar-org/litestar | 3,475 | litestar-org__litestar-3475 | [
"3464"
] | d75efa7e79be5a252920aa29f204eb0b310d62a4 | diff --git a/docs/examples/contrib/sqlalchemy/sqlalchemy_declarative_models.py b/docs/examples/contrib/sqlalchemy/sqlalchemy_declarative_models.py
--- a/docs/examples/contrib/sqlalchemy/sqlalchemy_declarative_models.py
+++ b/docs/examples/contrib/sqlalchemy/sqlalchemy_declarative_models.py
@@ -1,16 +1,17 @@
+from __future__ import annotations
+
+import uuid
from datetime import date
-from typing import TYPE_CHECKING
+from typing import List
from uuid import UUID
-from sqlalchemy import ForeignKey, select
+from sqlalchemy import ForeignKey, func, select
+from sqlalchemy.ext.asyncio import AsyncEngine, AsyncSession
from sqlalchemy.orm import Mapped, mapped_column, relationship
from litestar import Litestar, get
from litestar.contrib.sqlalchemy.base import UUIDAuditBase, UUIDBase
-from litestar.contrib.sqlalchemy.plugins import AsyncSessionConfig, SQLAlchemyAsyncConfig, SQLAlchemyInitPlugin
-
-if TYPE_CHECKING:
- from sqlalchemy.ext.asyncio import AsyncEngine, AsyncSession
+from litestar.contrib.sqlalchemy.plugins import AsyncSessionConfig, SQLAlchemyAsyncConfig, SQLAlchemyPlugin
# the SQLAlchemy base includes a declarative model for you to use in your models.
@@ -18,7 +19,7 @@
class Author(UUIDBase):
name: Mapped[str]
dob: Mapped[date]
- books: Mapped[list["Book"]] = relationship(back_populates="author", lazy="selectin")
+ books: Mapped[List[Book]] = relationship(back_populates="author", lazy="selectin")
# The `AuditBase` class includes the same UUID` based primary key (`id`) and 2
@@ -32,19 +33,24 @@ class Book(UUIDAuditBase):
session_config = AsyncSessionConfig(expire_on_commit=False)
sqlalchemy_config = SQLAlchemyAsyncConfig(
- connection_string="sqlite+aiosqlite:///test.sqlite", session_config=session_config
+ connection_string="sqlite+aiosqlite:///test.sqlite", session_config=session_config, create_all=True
) # Create 'async_session' dependency.
-sqlalchemy_plugin = SQLAlchemyInitPlugin(config=sqlalchemy_config)
async def on_startup() -> None:
- """Initializes the database."""
- async with sqlalchemy_config.get_engine().begin() as conn:
- await conn.run_sync(UUIDBase.metadata.create_all)
+ """Adds some dummy data if no data is present."""
+ async with sqlalchemy_config.get_session() as session:
+ statement = select(func.count()).select_from(Author)
+ count = await session.execute(statement)
+ if not count.scalar():
+ author_id = uuid.uuid4()
+ session.add(Author(name="Stephen King", dob=date(1954, 9, 21), id=author_id))
+ session.add(Book(title="It", author_id=author_id))
+ await session.commit()
@get(path="/authors")
-async def get_authors(db_session: "AsyncSession", db_engine: "AsyncEngine") -> list[Author]:
+async def get_authors(db_session: AsyncSession, db_engine: AsyncEngine) -> List[Author]:
"""Interact with SQLAlchemy engine and session."""
return list(await db_session.scalars(select(Author)))
@@ -52,5 +58,6 @@ async def get_authors(db_session: "AsyncSession", db_engine: "AsyncEngine") -> l
app = Litestar(
route_handlers=[get_authors],
on_startup=[on_startup],
- plugins=[SQLAlchemyInitPlugin(config=sqlalchemy_config)],
+ debug=True,
+ plugins=[SQLAlchemyPlugin(config=sqlalchemy_config)],
)
| diff --git a/tests/examples/test_contrib/test_sqlalchemy/test_sqlalchemy_examples.py b/tests/examples/test_contrib/test_sqlalchemy/test_sqlalchemy_examples.py
new file mode 100644
--- /dev/null
+++ b/tests/examples/test_contrib/test_sqlalchemy/test_sqlalchemy_examples.py
@@ -0,0 +1,14 @@
+import pytest
+
+from litestar.testing import TestClient
+
+pytestmark = pytest.mark.xdist_group("sqlalchemy_examples")
+
+
+def test_sqlalchemy_declarative_models() -> None:
+ from docs.examples.contrib.sqlalchemy.sqlalchemy_declarative_models import app
+
+ with TestClient(app) as client:
+ response = client.get("/authors")
+ assert response.status_code == 200
+ assert len(response.json()) > 0
| Bug: SerializationException when running modeling-and-features demo from docs
### Description
Hi,
First of all thanks for developing Litestar, it proves to be a very useful piece of software here. Unfortunately I ran into an issue.
I ran into an `msgspec_error` when requesting a page backed by sqlalchemy models which are connected via relationships. It seems that the database is correctly queried, a list of objects are returned, but then an exception is thrown when converting the objects to JSON.
I ran into this issue on my production code but when isolating an MCVE I noticed that the provided example in the documentation also shows the same unexpected behaviour on tested on two different machines. One crucial change to the code is however adding an author to the database.
Since this is quite a show-stopper for me: Thanks in advance for having a look at this!
### URL to code causing the issue
https://docs.litestar.dev/2/tutorials/repository-tutorial/01-modeling-and-features.html
### MCVE
```python
from datetime import date
from typing import TYPE_CHECKING
from uuid import UUID
from sqlalchemy import ForeignKey, select
from sqlalchemy.orm import Mapped, mapped_column, relationship
from litestar import Litestar, get
from litestar.contrib.sqlalchemy.base import UUIDAuditBase, UUIDBase
from litestar.contrib.sqlalchemy.plugins import AsyncSessionConfig, SQLAlchemyAsyncConfig, SQLAlchemyInitPlugin
if TYPE_CHECKING:
from sqlalchemy.ext.asyncio import AsyncEngine, AsyncSession
# the SQLAlchemy base includes a declarative model for you to use in your models.
# The `Base` class includes a `UUID` based primary key (`id`)
class Author(UUIDBase):
name: Mapped[str]
dob: Mapped[date]
books: Mapped[list["Book"]] = relationship(back_populates="author", lazy="selectin")
# The `AuditBase` class includes the same UUID` based primary key (`id`) and 2
# additional columns: `created_at` and `updated_at`. `created_at` is a timestamp of when the
# record created, and `updated_at` is the last time the record was modified.
class Book(UUIDAuditBase):
title: Mapped[str]
author_id: Mapped[UUID] = mapped_column(ForeignKey("author.id"))
author: Mapped[Author] = relationship(lazy="joined", innerjoin=True, viewonly=True)
session_config = AsyncSessionConfig(expire_on_commit=False)
sqlalchemy_config = SQLAlchemyAsyncConfig(
connection_string="sqlite+aiosqlite:///test.sqlite", session_config=session_config
) # Create 'async_session' dependency.
sqlalchemy_plugin = SQLAlchemyInitPlugin(config=sqlalchemy_config)
async def on_startup() -> None:
"""Initializes the database."""
async with sqlalchemy_config.get_engine().begin() as conn:
await conn.run_sync(UUIDBase.metadata.create_all)
#crucially there needs to be an author in the table for the error to appear
await conn.execute(Author.__table__.insert().values(name="F. Scott Fitzgerald"))
@get(path="/authors")
async def get_authors(db_session: "AsyncSession", db_engine: "AsyncEngine") -> list[Author]:
"""Interact with SQLAlchemy engine and session."""
return list(await db_session.scalars(select(Author)))
app = Litestar(
route_handlers=[get_authors],
on_startup=[on_startup],
plugins=[SQLAlchemyInitPlugin(config=sqlalchemy_config)],
debug=True
)
```
### Steps to reproduce
```bash
1. Go to the https://docs.litestar.dev/2/tutorials/repository-tutorial/01-modeling-and-features.html page
2. Download the code
3. Run the demo with minimal requirements installed and go to http://localhost:8000/authors
4. See the error
```
### Screenshots
_No response_
### Logs
```bash
File "/usr/local/lib/python3.12/site-packages/litestar/serialization/msgspec_hooks.py", line 143, in encode_json
raise SerializationException(str(msgspec_error)) from msgspec_error
litestar.exceptions.base_exceptions.SerializationException: Unsupported type: <class '__main__.Author'>
Traceback (most recent call last):
File "/usr/local/lib/python3.12/site-packages/litestar/serialization/msgspec_hooks.py", line 141, in encode_json
return msgspec.json.encode(value, enc_hook=serializer) if serializer else _msgspec_json_encoder.encode(value)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.12/site-packages/litestar/serialization/msgspec_hooks.py", line 88, in default_serializer
raise TypeError(f"Unsupported type: {type(value)!r}")
TypeError: Unsupported type: <class '__main__.Author'>
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.12/site-packages/litestar/middleware/exceptions/middleware.py", line 219, in __call__
await self.app(scope, receive, send)
File "/usr/local/lib/python3.12/site-packages/litestar/routes/http.py", line 82, in handle
response = await self._get_response_for_request(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.12/site-packages/litestar/routes/http.py", line 134, in _get_response_for_request
return await self._call_handler_function(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.12/site-packages/litestar/routes/http.py", line 158, in _call_handler_function
response: ASGIApp = await route_handler.to_response(app=scope["app"], data=response_data, request=request)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.12/site-packages/litestar/handlers/http_handlers/base.py", line 557, in to_response
return await response_handler(app=app, data=data, request=request) # type: ignore[call-arg]
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.12/site-packages/litestar/handlers/http_handlers/_utils.py", line 79, in handler
return response.to_asgi_response(app=None, request=request, headers=normalize_headers(headers), cookies=cookies) # pyright: ignore
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.12/site-packages/litestar/response/base.py", line 451, in to_asgi_response
body=self.render(self.content, media_type, get_serializer(type_encoders)),
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.12/site-packages/litestar/response/base.py", line 392, in render
return encode_json(content, enc_hook)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.12/site-packages/litestar/serialization/msgspec_hooks.py", line 143, in encode_json
raise SerializationException(str(msgspec_error)) from msgspec_error
litestar.exceptions.base_exceptions.SerializationException: Unsupported type: <class '__main__.Author'>
INFO: 127.0.0.1:44906 - "GET /authors HTTP/1.1" 500 Internal Server Error
```
### Litestar Version
2.8.2
### Platform
- [X] Linux
- [X] Mac
- [ ] Windows
- [ ] Other (Please specify in the description above)
<!-- POLAR PLEDGE BADGE START -->
---
> [!NOTE]
> While we are open for sponsoring on [GitHub Sponsors](https://github.com/sponsors/litestar-org/) and
> [OpenCollective](https://opencollective.com/litestar), we also utilize [Polar.sh](https://polar.sh/) to engage in pledge-based sponsorship.
>
> Check out all issues funded or available for funding [on our Polar.sh dashboard](https://polar.sh/litestar-org)
> * If you would like to see an issue prioritized, make a pledge towards it!
> * We receive the pledge once the issue is completed & verified
> * This, along with engagement in the community, helps us know which features are a priority to our users.
<a href="https://polar.sh/litestar-org/litestar/issues/3464">
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://polar.sh/api/github/litestar-org/litestar/issues/3464/pledge.svg?darkmode=1">
<img alt="Fund with Polar" src="https://polar.sh/api/github/litestar-org/litestar/issues/3464/pledge.svg">
</picture>
</a>
<!-- POLAR PLEDGE BADGE END -->
| Can you try `SQLAlchemyPlugin` instead of `SQLAlchemyInitPlugin`?
Allright, the following code, using SQLAlchemyPlugin works:
```python
from datetime import date
from typing import TYPE_CHECKING
from uuid import UUID
from sqlalchemy import ForeignKey, select
from sqlalchemy.orm import Mapped, mapped_column, relationship
from litestar import Litestar, get
from litestar.contrib.sqlalchemy.base import UUIDAuditBase, UUIDBase
from litestar.contrib.sqlalchemy.plugins import AsyncSessionConfig, SQLAlchemyAsyncConfig, SQLAlchemyPlugin
from litestar.testing import create_test_client
if TYPE_CHECKING:
from sqlalchemy.ext.asyncio import AsyncEngine, AsyncSession
# the SQLAlchemy base includes a declarative model for you to use in your models.
# The `Base` class includes a `UUID` based primary key (`id`)
class Author(UUIDBase):
name: Mapped[str]
dob: Mapped[date]
books: Mapped[list["Book"]] = relationship(back_populates="author", lazy="selectin")
# The `AuditBase` class includes the same UUID` based primary key (`id`) and 2
# additional columns: `created_at` and `updated_at`. `created_at` is a timestamp of when the
# record created, and `updated_at` is the last time the record was modified.
class Book(UUIDAuditBase):
title: Mapped[str]
author_id: Mapped[UUID] = mapped_column(ForeignKey("author.id"))
author: Mapped[Author] = relationship(lazy="joined", innerjoin=True, viewonly=True)
session_config = AsyncSessionConfig(expire_on_commit=False)
sqlalchemy_config = SQLAlchemyAsyncConfig(
connection_string="sqlite+aiosqlite:///testb.sqlite", session_config=session_config
) # Create 'async_session' dependency.
sqlalchemy_plugin = SQLAlchemyPlugin(config=sqlalchemy_config)
async def on_startup() -> None:
"""Initializes the database."""
async with sqlalchemy_config.get_engine().begin() as conn:
await conn.run_sync(UUIDBase.metadata.create_all)
async with sqlalchemy_config.get_session() as session:
session.add(Author(name="author1",dob=date(1990,1,1),id=UUID("00000000-0000-0000-0000-000000000001") ))
session.add(Book(title="blaat",author_id=UUID("00000000-0000-0000-0000-000000000001")))
await session.commit()
@get(path="/authors")
async def get_authors(db_session: "AsyncSession", db_engine: "AsyncEngine") -> list[Author]:
"""Interact with SQLAlchemy engine and session."""
return list(await db_session.scalars(select(Author)))
app = Litestar(
route_handlers=[get_authors],
on_startup=[on_startup],
plugins=[SQLAlchemyPlugin(config=sqlalchemy_config)],
debug=True
)
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=8001, log_level="debug")
`````
A change in the documentation would be nice
@JorenSix Feel free to PR if you want to, if not someone else will pick this up as per their availability | 2024-05-06T08:16:15 |
litestar-org/litestar | 3,476 | litestar-org__litestar-3476 | [
"3471"
] | 9579c15f8dd01c68b80850fae4e8e0b8fd202bf5 | diff --git a/litestar/contrib/pydantic/pydantic_schema_plugin.py b/litestar/contrib/pydantic/pydantic_schema_plugin.py
--- a/litestar/contrib/pydantic/pydantic_schema_plugin.py
+++ b/litestar/contrib/pydantic/pydantic_schema_plugin.py
@@ -286,7 +286,8 @@ def for_pydantic_model(cls, field_definition: FieldDefinition, schema_creator: S
else:
# pydantic v1 requires some workarounds here
model_annotations = {
- k: f.outer_type_ if f.required else Optional[f.outer_type_] for k, f in model.__fields__.items()
+ k: f.outer_type_ if f.required or f.default else Optional[f.outer_type_]
+ for k, f in model.__fields__.items()
}
if is_generic_model:
| diff --git a/tests/e2e/test_pydantic.py b/tests/e2e/test_pydantic.py
--- a/tests/e2e/test_pydantic.py
+++ b/tests/e2e/test_pydantic.py
@@ -1,6 +1,7 @@
import pydantic
+from pydantic import v1 as pydantic_v1
-from litestar import get
+from litestar import get, post
from litestar.testing import create_test_client
@@ -22,3 +23,80 @@ def handler_v2() -> ModelV2:
with create_test_client([handler_v1, handler_v2]) as client:
assert client.get("/v1").json() == {"foo": "bar"}
assert client.get("/v2").json() == {"foo": "bar"}
+
+
+def test_pydantic_v1_model_with_field_default() -> None:
+ # https://github.com/litestar-org/litestar/issues/3471
+
+ class TestDto(pydantic_v1.BaseModel):
+ test_str: str = pydantic_v1.Field(default="some_default", max_length=100)
+
+ @post(path="/test")
+ async def test(data: TestDto) -> str:
+ return "success"
+
+ with create_test_client(route_handlers=[test]) as client:
+ response = client.get("/schema/openapi.json")
+ assert response.status_code == 200
+ assert response.json() == {
+ "components": {
+ "schemas": {
+ "test_pydantic_v1_model_with_field_default.TestDto": {
+ "properties": {"test_str": {"default": "some_default", "maxLength": 100, "type": "string"}},
+ "required": [],
+ "title": "TestDto",
+ "type": "object",
+ }
+ }
+ },
+ "info": {"title": "Litestar API", "version": "1.0.0"},
+ "openapi": "3.1.0",
+ "paths": {
+ "/test": {
+ "post": {
+ "deprecated": False,
+ "operationId": "TestTest",
+ "requestBody": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/test_pydantic_v1_model_with_field_default.TestDto"
+ }
+ }
+ },
+ "required": True,
+ },
+ "responses": {
+ "201": {
+ "content": {"text/plain": {"schema": {"type": "string"}}},
+ "description": "Document " "created, " "URL " "follows",
+ "headers": {},
+ },
+ "400": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "description": "Validation " "Exception",
+ "examples": [{"detail": "Bad " "Request", "extra": {}, "status_code": 400}],
+ "properties": {
+ "detail": {"type": "string"},
+ "extra": {
+ "additionalProperties": {},
+ "type": ["null", "object", "array"],
+ },
+ "status_code": {"type": "integer"},
+ },
+ "required": ["detail", "status_code"],
+ "type": "object",
+ }
+ }
+ },
+ "description": "Bad " "request " "syntax or " "unsupported " "method",
+ },
+ },
+ "summary": "Test",
+ }
+ }
+ },
+ "servers": [{"url": "/"}],
+ }
| Bug: OpenAPI docs fail to generate with certain validators and pydantic v1
### Description
OpenAPI docs fail to generate when a request payload is a pydantic v1 model and has a str field with a validator combination of a default(or default_factory) and max_length. Removing any of the validators resolves the issue; it's only reproducible with their combination.
Reproducible with:
Python: 3.11.6
Litestar: 2.8.2
Pydantic: 1.10.15
### URL to code causing the issue
_No response_
### MCVE
```python
from litestar import Controller, post
from pydantic.fields import Field
from pydantic.main import BaseModel
OPENAPI_CONTROLLER_TAG = "OpenApiTest"
class TestDto(BaseModel):
test_str: str = Field(default="some_default", max_length=100)
class OpenApiTestController(Controller):
path = "/test-openapi"
@post(
path="/test",
tags=[OPENAPI_CONTROLLER_TAG],
summary="Test",
)
async def test(self, data: TestDto) -> str:
return "success"
```
### Steps to reproduce
```bash
1. Add the above controller to your litestar app.
2. Load any variation of OpenAPI UI(I reproduced with swagger, redoc, rapiddoc).
```
### Screenshots
```bash
""
```
### Logs
```bash
ERROR - 2024-05-05 09:36:30,092 - litestar - config - exception raised on http connection to route /schema/redoc
Traceback (most recent call last):
File "{project_parent_dir}/litestar-playground/.venv/lib/python3.11/site-packages/litestar/serialization/msgspec_hooks.py", line 141, in encode_json
return msgspec.json.encode(value, enc_hook=serializer) if serializer else _msgspec_json_encoder.encode(value)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "{project_parent_dir}/litestar-playground/.venv/lib/python3.11/site-packages/litestar/serialization/msgspec_hooks.py", line 88, in default_serializer
raise TypeError(f"Unsupported type: {type(value)!r}")
TypeError: Unsupported type: <class 'method_descriptor'>
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "{project_parent_dir}/litestar-playground/.venv/lib/python3.11/site-packages/litestar/middleware/exceptions/middleware.py", line 219, in __call__
await self.app(scope, receive, send)
File "{project_parent_dir}/litestar-playground/.venv/lib/python3.11/site-packages/litestar/routes/http.py", line 82, in handle
response = await self._get_response_for_request(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "{project_parent_dir}/litestar-playground/.venv/lib/python3.11/site-packages/litestar/routes/http.py", line 134, in _get_response_for_request
return await self._call_handler_function(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "{project_parent_dir}/litestar-playground/.venv/lib/python3.11/site-packages/litestar/routes/http.py", line 154, in _call_handler_function
response_data, cleanup_group = await self._get_response_data(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "{project_parent_dir}/litestar-playground/.venv/lib/python3.11/site-packages/litestar/routes/http.py", line 205, in _get_response_data
data = route_handler.fn(**parsed_kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "{project_parent_dir}/litestar-playground/.venv/lib/python3.11/site-packages/litestar/_openapi/plugin.py", line 161, in _handler
return plugin_.render(request, self.provide_openapi_schema())
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "{project_parent_dir}/litestar-playground/.venv/lib/python3.11/site-packages/litestar/openapi/plugins.py", line 310, in render
self.render_json(request, openapi_schema),
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "{project_parent_dir}/litestar-playground/.venv/lib/python3.11/site-packages/litestar/openapi/plugins.py", line 70, in render_json
return encode_json(openapi_schema, serializer=get_serializer(request.route_handler.resolve_type_encoders()))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "{project_parent_dir}/litestar-playground/.venv/lib/python3.11/site-packages/litestar/serialization/msgspec_hooks.py", line 143, in encode_json
raise SerializationException(str(msgspec_error)) from msgspec_error
litestar.exceptions.base_exceptions.SerializationException: Unsupported type: <class 'method_descriptor'>
Traceback (most recent call last):
File "{project_parent_dir}/litestar-playground/.venv/lib/python3.11/site-packages/litestar/serialization/msgspec_hooks.py", line 141, in encode_json
return msgspec.json.encode(value, enc_hook=serializer) if serializer else _msgspec_json_encoder.encode(value)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "{project_parent_dir}/litestar-playground/.venv/lib/python3.11/site-packages/litestar/serialization/msgspec_hooks.py", line 88, in default_serializer
raise TypeError(f"Unsupported type: {type(value)!r}")
TypeError: Unsupported type: <class 'method_descriptor'>
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "{project_parent_dir}/litestar-playground/.venv/lib/python3.11/site-packages/litestar/middleware/exceptions/middleware.py", line 219, in __call__
await self.app(scope, receive, send)
File "{project_parent_dir}/litestar-playground/.venv/lib/python3.11/site-packages/litestar/routes/http.py", line 82, in handle
response = await self._get_response_for_request(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "{project_parent_dir}/litestar-playground/.venv/lib/python3.11/site-packages/litestar/routes/http.py", line 134, in _get_response_for_request
return await self._call_handler_function(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "{project_parent_dir}/litestar-playground/.venv/lib/python3.11/site-packages/litestar/routes/http.py", line 154, in _call_handler_function
response_data, cleanup_group = await self._get_response_data(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "{project_parent_dir}/litestar-playground/.venv/lib/python3.11/site-packages/litestar/routes/http.py", line 205, in _get_response_data
data = route_handler.fn(**parsed_kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "{project_parent_dir}/litestar-playground/.venv/lib/python3.11/site-packages/litestar/_openapi/plugin.py", line 161, in _handler
return plugin_.render(request, self.provide_openapi_schema())
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "{project_parent_dir}/litestar-playground/.venv/lib/python3.11/site-packages/litestar/openapi/plugins.py", line 310, in render
self.render_json(request, openapi_schema),
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "{project_parent_dir}/litestar-playground/.venv/lib/python3.11/site-packages/litestar/openapi/plugins.py", line 70, in render_json
return encode_json(openapi_schema, serializer=get_serializer(request.route_handler.resolve_type_encoders()))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "{project_parent_dir}/litestar-playground/.venv/lib/python3.11/site-packages/litestar/serialization/msgspec_hooks.py", line 143, in encode_json
raise SerializationException(str(msgspec_error)) from msgspec_error
litestar.exceptions.base_exceptions.SerializationException: Unsupported type: <class 'method_descriptor'>
INFO: 127.0.0.1:61936 - "GET /schema/redoc HTTP/1.1" 500 Internal Server Error
INFO: 127.0.0.1:61936 - "GET /favicon.ico HTTP/1.1" 404 Not Found
ERROR - 2024-05-05 09:36:30,309 - litestar - config - exception raised on http connection to route /favicon.ico
Traceback (most recent call last):
File "{project_parent_dir}/litestar-playground/.venv/lib/python3.11/site-packages/litestar/_asgi/routing_trie/traversal.py", line 153, in parse_path_to_route
node, path_parameters, path = traverse_route_map(
^^^^^^^^^^^^^^^^^^^
File "{project_parent_dir}/litestar-playground/.venv/lib/python3.11/site-packages/litestar/_asgi/routing_trie/traversal.py", line 54, in traverse_route_map
raise NotFoundException()
litestar.exceptions.http_exceptions.NotFoundException: 404: Not Found
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "{project_parent_dir}/litestar-playground/.venv/lib/python3.11/site-packages/litestar/middleware/exceptions/middleware.py", line 219, in __call__
await self.app(scope, receive, send)
File "{project_parent_dir}/litestar-playground/.venv/lib/python3.11/site-packages/litestar/_asgi/asgi_router.py", line 86, in __call__
asgi_app, scope["route_handler"], scope["path"], scope["path_params"] = self.handle_routing(
^^^^^^^^^^^^^^^^^^^^
File "{project_parent_dir}/litestar-playground/.venv/lib/python3.11/site-packages/litestar/_asgi/asgi_router.py", line 102, in handle_routing
return parse_path_to_route(
^^^^^^^^^^^^^^^^^^^^
File "{project_parent_dir}/litestar-playground/.venv/lib/python3.11/site-packages/litestar/_asgi/routing_trie/traversal.py", line 170, in parse_path_to_route
raise NotFoundException() from e
litestar.exceptions.http_exceptions.NotFoundException: 404: Not Found
Traceback (most recent call last):
File "{project_parent_dir}/litestar-playground/.venv/lib/python3.11/site-packages/litestar/_asgi/routing_trie/traversal.py", line 153, in parse_path_to_route
node, path_parameters, path = traverse_route_map(
^^^^^^^^^^^^^^^^^^^
File "{project_parent_dir}/litestar-playground/.venv/lib/python3.11/site-packages/litestar/_asgi/routing_trie/traversal.py", line 54, in traverse_route_map
raise NotFoundException()
litestar.exceptions.http_exceptions.NotFoundException: 404: Not Found
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "{project_parent_dir}/litestar-playground/.venv/lib/python3.11/site-packages/litestar/middleware/exceptions/middleware.py", line 219, in __call__
await self.app(scope, receive, send)
File "{project_parent_dir}/litestar-playground/.venv/lib/python3.11/site-packages/litestar/_asgi/asgi_router.py", line 86, in __call__
asgi_app, scope["route_handler"], scope["path"], scope["path_params"] = self.handle_routing(
^^^^^^^^^^^^^^^^^^^^
File "{project_parent_dir}/litestar-playground/.venv/lib/python3.11/site-packages/litestar/_asgi/asgi_router.py", line 102, in handle_routing
return parse_path_to_route(
^^^^^^^^^^^^^^^^^^^^
File "{project_parent_dir}/litestar-playground/.venv/lib/python3.11/site-packages/litestar/_asgi/routing_trie/traversal.py", line 170, in parse_path_to_route
raise NotFoundException() from e
litestar.exceptions.http_exceptions.NotFoundException: 404: Not Found
```
### Litestar Version
2.8.2
### Platform
- [X] Linux
- [X] Mac
- [ ] Windows
- [ ] Other (Please specify in the description above)
<!-- POLAR PLEDGE BADGE START -->
---
> [!NOTE]
> While we are open for sponsoring on [GitHub Sponsors](https://github.com/sponsors/litestar-org/) and
> [OpenCollective](https://opencollective.com/litestar), we also utilize [Polar.sh](https://polar.sh/) to engage in pledge-based sponsorship.
>
> Check out all issues funded or available for funding [on our Polar.sh dashboard](https://polar.sh/litestar-org)
> * If you would like to see an issue prioritized, make a pledge towards it!
> * We receive the pledge once the issue is completed & verified
> * This, along with engagement in the community, helps us know which features are a priority to our users.
<a href="https://polar.sh/litestar-org/litestar/issues/3471">
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://polar.sh/api/github/litestar-org/litestar/issues/3471/pledge.svg?darkmode=1">
<img alt="Fund with Polar" src="https://polar.sh/api/github/litestar-org/litestar/issues/3471/pledge.svg">
</picture>
</a>
<!-- POLAR PLEDGE BADGE END -->
| 2024-05-07T01:22:17 |
|
litestar-org/litestar | 3,478 | litestar-org__litestar-3478 | [
"3477"
] | 9fd80e23958ce82eca62d499f98248fdfae8eea0 | diff --git a/litestar/middleware/logging.py b/litestar/middleware/logging.py
--- a/litestar/middleware/logging.py
+++ b/litestar/middleware/logging.py
@@ -191,7 +191,9 @@ def extract_response_data(self, scope: Scope) -> dict[str, Any]:
connection_state = ScopeState.from_scope(scope)
extracted_data = self.response_extractor(
messages=(
- connection_state.log_context.pop(HTTP_RESPONSE_START),
+ # NOTE: we don't pop the start message from the logging context in case
+ # there are multiple body messages to be logged
+ connection_state.log_context[HTTP_RESPONSE_START],
connection_state.log_context.pop(HTTP_RESPONSE_BODY),
),
)
@@ -224,6 +226,10 @@ async def send_wrapper(message: Message) -> None:
elif message["type"] == HTTP_RESPONSE_BODY:
connection_state.log_context[HTTP_RESPONSE_BODY] = message
self.log_response(scope=scope)
+
+ if not message["more_body"]:
+ connection_state.log_context.clear()
+
await send(message)
return send_wrapper
| diff --git a/tests/e2e/test_middleware/test_logging_middleware_with_multi_body_response.py b/tests/e2e/test_middleware/test_logging_middleware_with_multi_body_response.py
new file mode 100644
--- /dev/null
+++ b/tests/e2e/test_middleware/test_logging_middleware_with_multi_body_response.py
@@ -0,0 +1,30 @@
+from litestar import asgi
+from litestar.middleware.logging import LoggingMiddlewareConfig
+from litestar.testing import create_async_test_client
+from litestar.types.asgi_types import Receive, Scope, Send
+
+
+@asgi("/")
+async def asgi_app(scope: Scope, receive: Receive, send: Send) -> None:
+ await send(
+ {
+ "type": "http.response.start",
+ "status": 200,
+ "headers": [
+ (b"content-type", b"text/event-stream"),
+ (b"cache-control", b"no-cache"),
+ (b"connection", b"keep-alive"),
+ ],
+ }
+ )
+
+ # send two bodies
+ await send({"type": "http.response.body", "body": b"data: 1\n", "more_body": True})
+ await send({"type": "http.response.body", "body": b"data: 2\n", "more_body": False})
+
+
+async def test_app() -> None:
+ async with create_async_test_client(asgi_app, middleware=[LoggingMiddlewareConfig().middleware]) as client:
+ response = await client.get("/")
+ assert response.status_code == 200
+ assert response.text == "data: 1\ndata: 2\n"
| Bug: Multi-body response incompatible with LoggingMiddleware
### Description
When using `ServerSentEvent` Responses with `StructlogPlugin`, the application errors.
Preliminary research led me to https://github.com/litestar-org/litestar/blob/main/litestar/middleware/logging.py#L180, where `scope.state._ls_connection_state.log_context` does not have any values during second push of an `ServerSentEventMessage`.
### URL to code causing the issue
_No response_
### MCVE
```python
from asyncio import sleep
from collections.abc import AsyncGenerator
from litestar import Litestar, get
from litestar.plugins.structlog import StructlogPlugin
from litestar.response import ServerSentEvent, ServerSentEventMessage
from litestar.types import SSEData
async def my_generator() -> AsyncGenerator[SSEData, None]:
count = 0
while count < 10:
await sleep(0.01)
count += 1
yield ServerSentEventMessage(event="something-with-comment", retry=1000, comment="some comment")
@get(path="/count", sync_to_thread=False)
def sse_handler() -> ServerSentEvent:
return ServerSentEvent(my_generator())
app = Litestar(route_handlers=[sse_handler],
plugins=[StructlogPlugin()])
```
### Steps to reproduce
1. Run the server above
```bash
litestar --app main:app run
```
2. Send a request
```bash
curl localhost:8000/count
```
3. Observe error
### Screenshots
_No response_
### Logs
```bash
File "/.venv/lib/python3.11/site-packages/litestar/middleware/logging.py", line 194, in extract_response_data
connection_state.log_context.pop(HTTP_RESPONSE_START),
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
KeyError: 'http.response.start'
```
### Litestar Version
2.8.3
### Platform
- [ ] Linux
- [X] Mac
- [ ] Windows
- [ ] Other (Please specify in the description above)
<!-- POLAR PLEDGE BADGE START -->
---
> [!NOTE]
> While we are open for sponsoring on [GitHub Sponsors](https://github.com/sponsors/litestar-org/) and
> [OpenCollective](https://opencollective.com/litestar), we also utilize [Polar.sh](https://polar.sh/) to engage in pledge-based sponsorship.
>
> Check out all issues funded or available for funding [on our Polar.sh dashboard](https://polar.sh/litestar-org)
> * If you would like to see an issue prioritized, make a pledge towards it!
> * We receive the pledge once the issue is completed & verified
> * This, along with engagement in the community, helps us know which features are a priority to our users.
<a href="https://polar.sh/litestar-org/litestar/issues/3477">
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://polar.sh/api/github/litestar-org/litestar/issues/3477/pledge.svg?darkmode=1">
<img alt="Fund with Polar" src="https://polar.sh/api/github/litestar-org/litestar/issues/3477/pledge.svg">
</picture>
</a>
<!-- POLAR PLEDGE BADGE END -->
| This generalizes down to an incompatibility between logging middleware and any multi-body response:
```py
from litestar import asgi
from litestar.middleware.logging import LoggingMiddlewareConfig
from litestar.testing import create_async_test_client
from litestar.types.asgi_types import Receive, Scope, Send
@asgi("/")
async def asgi_app(scope: Scope, receive: Receive, send: Send) -> None:
await send(
{
"type": "http.response.start",
"status": 200,
"headers": [
(b"content-type", b"text/event-stream"),
(b"cache-control", b"no-cache"),
(b"connection", b"keep-alive"),
],
}
)
# send two bodies
await send({"type": "http.response.body", "body": b"data: 1\n", "more_body": True})
await send({"type": "http.response.body", "body": b"data: 2\n", "more_body": False})
async def test_app() -> None:
async with create_async_test_client(asgi_app, middleware=[LoggingMiddlewareConfig().middleware]) as client:
response = await client.get("/")
assert response.status_code == 200
assert response.text == "data: 1\ndata: 2\n"
``` | 2024-05-08T03:33:48 |
litestar-org/litestar | 3,486 | litestar-org__litestar-3486 | [
"3463"
] | 3a1fd7589bee8e4590636a765118dbe33cdda6ab | diff --git a/litestar/dto/_backend.py b/litestar/dto/_backend.py
--- a/litestar/dto/_backend.py
+++ b/litestar/dto/_backend.py
@@ -701,6 +701,24 @@ def _transfer_type_data(
)
return transfer_type.field_definition.instantiable_origin(source_value)
+
+ if isinstance(transfer_type, MappingType):
+ if transfer_type.has_nested:
+ return transfer_type.field_definition.instantiable_origin(
+ (
+ key,
+ _transfer_type_data(
+ source_value=value,
+ transfer_type=transfer_type.value_type,
+ nested_as_dict=False,
+ is_data_field=is_data_field,
+ ),
+ )
+ for key, value in source_value.items()
+ )
+
+ return transfer_type.field_definition.instantiable_origin(source_value)
+
return source_value
diff --git a/litestar/dto/_codegen_backend.py b/litestar/dto/_codegen_backend.py
--- a/litestar/dto/_codegen_backend.py
+++ b/litestar/dto/_codegen_backend.py
@@ -24,6 +24,7 @@
from litestar.dto._types import (
CollectionType,
CompositeType,
+ MappingType,
SimpleType,
TransferDTOFieldDefinition,
TransferType,
@@ -506,6 +507,21 @@ def _create_transfer_type_data_body(
self._add_stmt(f"{assignment_target} = {origin_name}({source_value_name})")
return
+ if isinstance(transfer_type, MappingType):
+ origin_name = self._add_to_fn_globals("origin", transfer_type.field_definition.instantiable_origin)
+ if transfer_type.has_nested:
+ transfer_type_data_fn = TransferFunctionFactory.create_transfer_type_data(
+ is_data_field=self.is_data_field, transfer_type=transfer_type.value_type
+ )
+ transfer_type_data_name = self._add_to_fn_globals("transfer_type_data", transfer_type_data_fn)
+ self._add_stmt(
+ f"{assignment_target} = {origin_name}((key, {transfer_type_data_name}(item)) for key, item in {source_value_name}.items())"
+ )
+ return
+
+ self._add_stmt(f"{assignment_target} = {origin_name}({source_value_name})")
+ return
+
self._add_stmt(f"{assignment_target} = {source_value_name}")
def _create_transfer_nested_union_type_data(
| diff --git a/tests/unit/test_dto/test_factory/test_backends/test_backends.py b/tests/unit/test_dto/test_factory/test_backends/test_backends.py
--- a/tests/unit/test_dto/test_factory/test_backends/test_backends.py
+++ b/tests/unit/test_dto/test_factory/test_backends/test_backends.py
@@ -3,7 +3,7 @@
from dataclasses import dataclass, field
from types import ModuleType
-from typing import TYPE_CHECKING, Callable, List, Optional
+from typing import TYPE_CHECKING, Callable, Dict, List, Optional
from unittest.mock import MagicMock
import pytest
@@ -40,6 +40,7 @@ class DC:
a: int
nested: NestedDC
nested_list: List[NestedDC]
+ nested_mapping: Dict[str, NestedDC]
b: str = field(default="b")
c: List[int] = field(default_factory=list)
optional: Optional[str] = None
@@ -51,13 +52,20 @@ class DC:
"c": [],
"nested": {"a": 1, "b": "two"},
"nested_list": [{"a": 1, "b": "two"}],
+ "nested_mapping": {"a": {"a": 1, "b": "two"}},
"optional": None,
}
-RAW = b'{"a":1,"nested":{"a":1,"b":"two"},"nested_list":[{"a":1,"b":"two"}],"b":"b","c":[],"optional":null}'
-COLLECTION_RAW = (
- b'[{"a":1,"nested":{"a":1,"b":"two"},"nested_list":[{"a":1,"b":"two"}],"b":"b","c":[],"optional":null}]'
+RAW = b'{"a":1,"nested":{"a":1,"b":"two"},"nested_list":[{"a":1,"b":"two"}],"nested_mapping":{"a":{"a":1,"b":"two"}},"b":"b","c":[],"optional":null}'
+COLLECTION_RAW = b'[{"a":1,"nested":{"a":1,"b":"two"},"nested_list":[{"a":1,"b":"two"}],"nested_mapping":{"a":{"a":1,"b":"two"}},"b":"b","c":[],"optional":null}]'
+STRUCTURED = DC(
+ a=1,
+ b="b",
+ c=[],
+ nested=NestedDC(a=1, b="two"),
+ nested_list=[NestedDC(a=1, b="two")],
+ nested_mapping={"a": NestedDC(a=1, b="two")},
+ optional=None,
)
-STRUCTURED = DC(a=1, b="b", c=[], nested=NestedDC(a=1, b="two"), nested_list=[NestedDC(a=1, b="two")], optional=None)
@pytest.fixture(name="dto_factory")
@@ -89,7 +97,10 @@ def test_backend_parse_raw_json(
wrapper_attribute_name=None,
is_data_field=True,
handler_id="test",
- ).parse_raw(b'{"a":1,"nested":{"a":1,"b":"two"},"nested_list":[{"a":1,"b":"two"}]}', asgi_connection)
+ ).parse_raw(
+ b'{"a":1,"nested":{"a":1,"b":"two"},"nested_list":[{"a":1,"b":"two"}],"nested_mapping":{"a":{"a":1,"b":"two"}}}',
+ asgi_connection,
+ )
)
== DESTRUCTURED
)
@@ -112,7 +123,7 @@ def _handler() -> None: ...
is_data_field=True,
handler_id="test",
).parse_raw(
- b"\x83\xa1a\x01\xa6nested\x82\xa1a\x01\xa1b\xa3two\xabnested_list\x91\x82\xa1a\x01\xa1b\xa3two",
+ b"\x87\xa1a\x01\xa6nested\x82\xa1a\x01\xa1b\xa3two\xabnested_list\x91\x82\xa1a\x01\xa1b\xa3two\xaenested_mapping\x81\xa1a\x82\xa1a\x01\xa1b\xa3two\xa1b\xa1b\xa1c\x90\xa8optional\xc0",
asgi_connection,
)
)
| DTO backend does not transferring mappings with nested models
### Reported by
[Alc](https://discord.com/users/314787529100361748) in Discord: DTO Unexpected Parsing Error
### Description
Route with DTO does not parse the input data, route without DTO does well, the tests below currently pass, but the one that raises 400 should ideally be 201
### MCVE
```py
from litestar import post
from litestar.testing import create_test_client
from litestar.contrib.pydantic import PydanticDTO
from pydantic import BaseModel
class Inner(BaseModel):
i1: int
i2: int
class Outer(BaseModel):
inner: dict[str, Inner]
@post(dto=PydanticDTO[Outer])
async def something(data: Outer) -> Outer:
return data
@post('no_dto')
async def something_no_dto(data: Outer) -> Outer:
return data
with create_test_client([something, something_no_dto]) as client:
json = {
"inner": {
"additionalProp1": {"i1": 0, "i2": 0},
"additionalProp2": {"i1": 0, "i2": 0},
"additionalProp3": {"i1": 0, "i2": 0},
}
}
response = client.post("", json=json)
assert response.status_code == 400
response = client.post("no_dto", json=json)
assert response.status_code == 201
```
### Logs
~~no one reads logs~~
### Litestar Version
main
<!-- POLAR PLEDGE BADGE START -->
---
> [!NOTE]
> While we are open for sponsoring on [GitHub Sponsors](https://github.com/sponsors/litestar-org/) and
> [OpenCollective](https://opencollective.com/litestar), we also utilize [Polar.sh](https://polar.sh/) to engage in pledge-based sponsorship.
>
> Check out all issues funded or available for funding [on our Polar.sh dashboard](https://polar.sh/litestar-org)
> * If you would like to see an issue prioritized, make a pledge towards it!
> * We receive the pledge once the issue is completed & verified
> * This, along with engagement in the community, helps us know which features are a priority to our users.
<a href="https://polar.sh/litestar-org/litestar/issues/3463">
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://polar.sh/api/github/litestar-org/litestar/issues/3463/pledge.svg?darkmode=1">
<img alt="Fund with Polar" src="https://polar.sh/api/github/litestar-org/litestar/issues/3463/pledge.svg">
</picture>
</a>
<!-- POLAR PLEDGE BADGE END -->
| @peterschutt FYI, I opened the original issue in discord. If you want me to test a build, please let me know here.
@JacobCoffee is it possible for byte to link to the message in discord that these originate from?
> @JacobCoffee is it possible for byte to link to the message in discord that these originate from?
I believe @Alc-Alc resolved the issue I'd created in discord after opening this issue, I can't find it anymore. Let me know if you need more information. | 2024-05-10T23:39:32 |
litestar-org/litestar | 3,509 | litestar-org__litestar-3509 | [
"3374"
] | 9a3bd38222b63ff4c5eaf4d23542c06cd2fd2a25 | diff --git a/litestar/static_files/config.py b/litestar/static_files/config.py
--- a/litestar/static_files/config.py
+++ b/litestar/static_files/config.py
@@ -10,7 +10,6 @@
from litestar.response.file import ASGIFileResponse # noqa: TCH001
from litestar.router import Router
from litestar.static_files.base import StaticFiles
-from litestar.types import Empty
from litestar.utils import normalize_path, warn_deprecation
__all__ = ("StaticFilesConfig",)
@@ -119,7 +118,7 @@ def create_static_files_router(
cache_control: CacheControlHeader | None = None,
exception_handlers: ExceptionHandlersMap | None = None,
guards: list[Guard] | None = None,
- include_in_schema: bool | EmptyType = Empty,
+ include_in_schema: bool | EmptyType = False,
middleware: Sequence[Middleware] | None = None,
opt: dict[str, Any] | None = None,
security: Sequence[SecurityRequirement] | None = None,
| Bug: New `create_static_files_router` automatically adds routes to schema docs
### Description
I found this suprising when using the new static files feature that
1: It auto adds these to the schema
2: It adds them under a sort've "ugly" router header `default`
We should fix one or both of those
### URL to code causing the issue
_No response_
### MCVE
```python
from litestar import Litestar
from litestar.static_files import create_static_files_router
app = Litestar(
route_handlers=[
create_static_files_router(directories=["assets"], path="/static"),
],
)
```
### Steps to reproduce
```bash
1. run mcve
2. visit /schema
3. See schema for default
```
### Screenshots
<img width="508" alt="image" src="https://github.com/litestar-org/litestar/assets/45884264/a6145e1a-370f-4105-bfec-cf73bddc90cd">
### Logs
_No response_
### Litestar Version
2.7/2.8
### Platform
- [X] Linux
- [X] Mac
- [X] Windows
- [ ] Other (Please specify in the description above)
| Why wouldn't it add them to the schema? It's just regular route handlers, which, by default, are added to the schema. You can use the regular `include_in_schema=False` if you don't want them included.
Not sure about the name; We do include a `name` param which defaults to `static`. No idea where the `default` comes from, that might be a bug.
We didn't include them before, but I feel like - by default - they should be disabled.
The `default` is a tagless section iirc.
I'm fine with whatever here, but at the very least I hope we would tag this as something more not-`default`-ey
> The `default` is a tagless section iirc.
If you pass `tags=["static"]` (or whatever you want to call it) to the `create_static_files_router()` function, does that replace the "default" section? Maybe we should have a default that is not `None` for that?
I, for one, chose to stick with StaticFilesConfig as much as possible just because it does NOT add the routes to the schema docs. Since I'll never use them, I'd rather not see/have them there. So, for what it's worth, I'd also classify this as a bug (especially if you take into consideration that create_static_files_router is presented as the replacement for StaticFilesConfig, but it does not replace/mimic its behavior exactly). | 2024-05-17T18:14:07 |
|
litestar-org/litestar | 3,510 | litestar-org__litestar-3510 | [
"3505"
] | 60cfcf82cd590456288a3d82d91c967cb9136255 | diff --git a/litestar/dto/base_dto.py b/litestar/dto/base_dto.py
--- a/litestar/dto/base_dto.py
+++ b/litestar/dto/base_dto.py
@@ -217,10 +217,13 @@ def create_openapi_schema(
# generated transfer model type in the type arguments.
transfer_model = backend.transfer_model_type
generic_args = tuple(transfer_model if a is cls.model_type else a for a in field_definition.args)
- return schema_creator.for_field_definition(
- FieldDefinition.from_annotation(field_definition.origin[generic_args])
- )
- return schema_creator.for_field_definition(FieldDefinition.from_annotation(backend.annotation))
+ annotation = field_definition.safe_generic_origin[generic_args]
+ else:
+ annotation = backend.annotation
+
+ return schema_creator.for_field_definition(
+ FieldDefinition.from_annotation(annotation, kwarg_definition=field_definition.kwarg_definition)
+ )
@classmethod
def resolve_generic_wrapper_type(
| diff --git a/tests/unit/test_dto/test_factory/test_integration.py b/tests/unit/test_dto/test_factory/test_integration.py
--- a/tests/unit/test_dto/test_factory/test_integration.py
+++ b/tests/unit/test_dto/test_factory/test_integration.py
@@ -1003,3 +1003,58 @@ def get_users() -> WithCount[User]:
assert not_none(schema.properties).keys() == {"count", "data"}
model_schema = openapi.components.schemas["GetUsersUserResponseBody"]
assert not_none(model_schema.properties).keys() == {"id", "name"}
+
+
+def test_openapi_schema_for_dto_includes_body_examples(create_module: Callable[[str], ModuleType]) -> None:
+ module = create_module(
+ """
+from dataclasses import dataclass
+from uuid import UUID
+
+from typing_extensions import Annotated
+
+from litestar import Litestar, post
+from litestar.dto import DataclassDTO
+from litestar.openapi.spec import Example
+from litestar.params import Body
+
+
+@dataclass
+class Item:
+ id: UUID
+ name: str
+
+
+body = Body(
+ title="Create item",
+ description="Create a new item.",
+ examples=[
+ Example(
+ summary="Post is Ok",
+ value={
+ "id": "3fa85f64-5717-4562-b3fc-2c963f66afa6",
+ "name": "Swatch",
+ },
+ )
+ ],
+)
+
+
+@post()
+async def create_item(data: Annotated[Item, body]) -> Item:
+ return data
+
+
+@post("dto", dto=DataclassDTO[Item])
+async def create_item_with_dto(data: Annotated[Item, body]) -> Item:
+ return data
+
+
+app = Litestar(route_handlers=[create_item, create_item_with_dto])
+"""
+ )
+
+ openapi_schema = module.app.openapi_schema
+ item_schema = openapi_schema.components.schemas["Item"]
+ item_with_dto_schema = openapi_schema.components.schemas["CreateItemWithDtoItemRequestBody"]
+ assert item_schema.examples == item_with_dto_schema.examples
| Examples not shown when using DTO
### Reported by
[Alc](https://discord.com/users/314787529100361748) in Discord: [#is Post request using OPENAPI body example working](https://discord.com/channels/919193495116337154/1240662108979597342/1240851853210554370)
### Description
When DTOs are used the example set in `Body` does not show up.
### MCVE
```py
from dataclasses import dataclass
from typing import Annotated
from litestar import Litestar, post
from litestar.dto import DataclassDTO
from litestar.openapi.spec import Example
from litestar.params import Body
@dataclass
class Item:
id: int
name: str
body = Body(
title="Create item",
description="Create a new item.",
examples=[
Example(
summary="Post is Ok",
value={
"id": "3fa85f64-5717-4562-b3fc-2c963f66afa6",
"name": "Swatch",
},
)
],
)
@post()
async def create_item(data: Annotated[Item, body]) -> Item:
return data
@post("dto", dto=DataclassDTO[Item])
async def create_item_with_dto(data: Annotated[Item, body]) -> Item:
return data
app = Litestar(route_handlers=[create_item, create_item_with_dto])
```
### Logs
no logs of interest
### Litestar Version
main
<!-- POLAR PLEDGE BADGE START -->
---
> [!NOTE]
> While we are open for sponsoring on [GitHub Sponsors](https://github.com/sponsors/litestar-org/) and
> [OpenCollective](https://opencollective.com/litestar), we also utilize [Polar.sh](https://polar.sh/) to engage in pledge-based sponsorship.
>
> Check out all issues funded or available for funding [on our Polar.sh dashboard](https://polar.sh/litestar-org)
> * If you would like to see an issue prioritized, make a pledge towards it!
> * We receive the pledge once the issue is completed & verified
> * This, along with engagement in the community, helps us know which features are a priority to our users.
<a href="https://polar.sh/litestar-org/litestar/issues/3505">
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://polar.sh/api/github/litestar-org/litestar/issues/3505/pledge.svg?darkmode=1">
<img alt="Fund with Polar" src="https://polar.sh/api/github/litestar-org/litestar/issues/3505/pledge.svg">
</picture>
</a>
<!-- POLAR PLEDGE BADGE END -->
| 2024-05-17T22:17:40 |
|
litestar-org/litestar | 3,511 | litestar-org__litestar-3511 | [
"3501"
] | 17db66b2ff17d36a89c949c55b07ec6c318acde6 | diff --git a/litestar/_asgi/routing_trie/traversal.py b/litestar/_asgi/routing_trie/traversal.py
--- a/litestar/_asgi/routing_trie/traversal.py
+++ b/litestar/_asgi/routing_trie/traversal.py
@@ -136,8 +136,8 @@ def parse_path_to_route(
asgi_app, handler = parse_node_handlers(node=root_node.children[path], method=method)
return asgi_app, handler, path, {}
- if mount_paths_regex and (match := mount_paths_regex.search(path)):
- mount_path = path[match.start() : match.end()]
+ if mount_paths_regex and (match := mount_paths_regex.match(path)):
+ mount_path = path[: match.end()]
mount_node = mount_routes[mount_path]
remaining_path = path[match.end() :]
# since we allow regular handlers under static paths, we must validate that the request does not match
| diff --git a/tests/unit/test_asgi/test_routing_trie/test_traversal.py b/tests/unit/test_asgi/test_routing_trie/test_traversal.py
new file mode 100644
--- /dev/null
+++ b/tests/unit/test_asgi/test_routing_trie/test_traversal.py
@@ -0,0 +1,93 @@
+from typing import Any
+
+from litestar import Router, asgi, get
+from litestar.response.base import ASGIResponse
+from litestar.status_codes import HTTP_404_NOT_FOUND
+from litestar.testing import create_test_client
+
+
+def test_parse_path_to_route_mounted_app_path_root() -> None:
+ # test that paths are correctly dispatched to handlers when mounting an app
+ # and other handlers to root path /
+
+ @asgi("/foobar", is_mount=True)
+ async def mounted_handler(scope: Any, receive: Any, send: Any) -> None:
+ response = ASGIResponse(body="mounted")
+ await response(scope, receive, send)
+
+ @get("/{number:int}/foobar/")
+ async def parametrized_handler() -> str:
+ return "parametrized"
+
+ @get("/static/foobar/")
+ async def static_handler() -> str:
+ return "static"
+
+ with create_test_client(
+ [
+ mounted_handler,
+ parametrized_handler,
+ static_handler,
+ ]
+ ) as client:
+ response = client.get("/foobar")
+ assert response.text == "mounted"
+
+ response = client.get("/foobar/123/")
+ assert response.text == "mounted"
+
+ response = client.get("/123/foobar/")
+ assert response.text == "parametrized"
+
+ response = client.get("/static/foobar/")
+ assert response.text == "static"
+
+ response = client.get("/unknown/foobar/")
+ assert response.status_code == HTTP_404_NOT_FOUND
+
+
+def test_parse_path_to_route_mounted_app_path_router() -> None:
+ # test that paths are correctly dispatched to handlers when mounting an app
+ # and other handlers inside subrouter
+
+ @asgi("/foobar", is_mount=True)
+ async def mounted_handler(scope: Any, receive: Any, send: Any) -> None:
+ response = ASGIResponse(body="mounted")
+ await response(scope, receive, send)
+
+ @get("/{number:int}/foobar/")
+ async def parametrized_handler() -> str:
+ return "parametrized"
+
+ @get("/static/foobar/")
+ async def static_handler() -> str:
+ return "static"
+
+ sub_router = Router(
+ path="/sub",
+ route_handlers=[
+ mounted_handler,
+ parametrized_handler,
+ static_handler,
+ ],
+ )
+ base_router = Router(path="/base", route_handlers=[sub_router])
+
+ with create_test_client([base_router]) as client:
+ response = client.get("/foobar")
+ assert response.status_code == HTTP_404_NOT_FOUND
+
+ response = client.get("/base/sub/foobar")
+ assert response.text == "mounted"
+
+ response = client.get("/base/sub/foobar/123/")
+ assert response.text == "mounted"
+
+ response = client.get("/base/sub/123/foobar/")
+ assert response.text == "parametrized"
+
+ response = client.get("/base/sub/static/foobar/")
+ assert response.text == "static"
+
+ response = client.get("/base/sub/unknown/foobar/")
+ assert response.status_code == HTTP_404_NOT_FOUND
| Bug: mounted app path interferes with regular paths
### Description
According to ["Mounting ASGI apps"](https://docs.litestar.dev/latest/usage/routing/overview.html#mounting-asgi-apps) documentation section, Litestar can mount ASGI apps in **sub-paths**. So it is expected that if ASGI app is mounted with `path='/magic'`, every route _starting with `/magic`_ will be handled by the ASGI app, and any other route will be handled by other handlers. However, it is not true.
Imagine this setup:
```python
@asgi("/magic", is_mount=True)
async def xxx(...):
print('Mounted')
...
@get("/{number:int}/magic/")
async def yyy() -> str:
print('Parametrized')
@get("/static/magic/")
async def zzz() -> str:
print('Static')
```
Here's "expectations VS reality" table:
| Request path | Expected output | Real output |
| -------------------- | ------------------------ | ------------------|
| `/magic` | `Mounted` | `Mounted` |
|`/123/magic/` | `Parametrized` | `Mounted` |
|`/static/magic/`| `Static` | `Static` |
|`/non-existent/magic/` | 404 error | `Mounted` |
## Why this happens?
`litestar/_asgi/routing_trie/traversal.py:parse_path_to_route` method has [this line](https://github.com/litestar-org/litestar/blob/main/litestar/_asgi/routing_trie/traversal.py#L139):
```
if mount_paths_regex and (match := mount_paths_regex.search(path)):
```
So instead of **matching** `/magic` to `path`, `re.search` is used which searches for occurrence of `/magic` anywhere in `path`, thus resulting in "false positives" for strings such as `/123/magic/`, `/non-existent/magic/` and `/non-existent/magic/something`.
## Possible solution
This cannot be solved by simply using regex:
```python
@asgi("^/magic", is_mount=True)
```
since `mount_paths_regex` becomes `re.compile('/^/magic')`, so it not only doesn't solve the problem, but the `/magic` endpoint itself stops working.
I believe it may be solved by replacing `mount_paths_regex.search(path)` with `mount_paths_regex.match(path)` - I did manual tests and it solved the problem completely, but ofc such a change requires tests to ensure nothing else is broken.
I am ready to create a full-fledged pull request with tests once the issue is approved :)
### URL to code causing the issue
_No response_
### MCVE
```python
from typing import Any
from litestar import Litestar, asgi, get
from litestar.response.base import ASGIResponse
@asgi("/magic", is_mount=True)
async def mounted_handler(scope: Any, receive: Any, send: Any) -> None:
body = 'mounted!'
response = ASGIResponse(body=body.encode("utf-8"))
await response(scope, receive, send)
@get("/{number:int}/magic/")
async def parametrized_handler() -> str:
return 'parametrized'
@get("/static/magic/")
async def static_handler() -> str:
return 'static'
app = Litestar(route_handlers=[
mounted_handler,
parametrized_handler,
static_handler,
])
```
### Steps to reproduce
```bash
1. Use the source code from MCVE to run Litestar app
2. Run curl to see wrong handler invoked for parametrized path:
> curl http://127.0.0.1:8000/123/magic
mounted!
3. Run curl to see wrong handler invoked for non-existent path:
> curl http://127.0.0.1:8000/whatever/magic
mounted!
```
### Screenshots
_No response_
### Logs
_No response_
### Litestar Version
2.8.3
### Platform
- [X] Linux
- [ ] Mac
- [ ] Windows
- [ ] Other (Please specify in the description above)
| Issue confirmed @0xE111 - I've assigned the issue to you. Thanks!
@all-contributors add @0xE111 for bug
@peterschutt
I've put up [a pull request](https://github.com/litestar-org/litestar/pull/3503) to add @0xE111! :tada:
Thanks @0xE111 for the very detailed report!
> I believe it may be solved by replacing `mount_paths_regex.search(path)` with `mount_paths_regex.match(path)` - I did manual tests and it solved the problem completely, but ofc such a change requires tests to ensure nothing else is broken.
This sounds like the correct fix here to get the intended behaviour. A PR would be very welcome :) | 2024-05-18T08:47:30 |
larq/larq | 14 | larq__larq-14 | [
"7"
] | 21fb16ba60d42f08b03537b65af29e10381bd93c | diff --git a/xquant/layers.py b/xquant/layers.py
--- a/xquant/layers.py
+++ b/xquant/layers.py
@@ -4,17 +4,24 @@
class QuantizerBase(tf.keras.layers.Layer):
- def __init__(self, *args, kernel_quantizer=None, input_quantizer=None, **kwargs):
+ """Base class for defining quantized layers
+
+ `input_quantizer` and `kernel_quantizer` are the element-wise quantization
+ functions to use. If both quantization functions are `None` this layer is
+ equivalent to `Layer`.
+ """
+
+ def __init__(self, *args, input_quantizer=None, kernel_quantizer=None, **kwargs):
super().__init__(*args, **kwargs)
- self.kernel_quantizer = quantizers.get(kernel_quantizer)
self.input_quantizer = quantizers.get(input_quantizer)
+ self.kernel_quantizer = quantizers.get(kernel_quantizer)
def call(self, inputs):
+ if self.input_quantizer:
+ inputs = self.input_quantizer(inputs)
if self.kernel_quantizer:
full_precision_kernel = self.kernel
self.kernel = self.kernel_quantizer(self.kernel)
- if self.input_quantizer:
- inputs = self.input_quantizer(inputs)
output = super().call(inputs)
if self.kernel_quantizer:
@@ -25,25 +32,34 @@ def call(self, inputs):
def get_config(self):
config = {
- "kernel_quantizer": quantizers.serialize(self.kernel_quantizer),
"input_quantizer": quantizers.serialize(self.input_quantizer),
+ "kernel_quantizer": quantizers.serialize(self.kernel_quantizer),
}
return {**super().get_config(), **config}
class QuantizerSeparableBase(tf.keras.layers.Layer):
+ """Base class for defining separable quantized layers
+
+ `input_quantizer`, `depthwise_quantizer` and `pointwise_quantizer` are the
+ element-wise quantization functions to use. If all quantization functions are `None`
+ this layer is equivalent to `SeparableConv1D`. If `use_bias` is True and
+ a bias initializer is provided, it adds a bias vector to the output.
+ It then optionally applies an activation function to produce the final output.
+ """
+
def __init__(
self,
*args,
+ input_quantizer=None,
depthwise_quantizer=None,
pointwise_quantizer=None,
- input_quantizer=None,
**kwargs
):
super().__init__(*args, **kwargs)
+ self.input_quantizer = quantizers.get(input_quantizer)
self.depthwise_quantizer = quantizers.get(depthwise_quantizer)
self.pointwise_quantizer = quantizers.get(pointwise_quantizer)
- self.input_quantizer = quantizers.get(input_quantizer)
def call(self, inputs):
if self.input_quantizer:
@@ -66,58 +82,797 @@ def call(self, inputs):
def get_config(self):
config = {
+ "input_quantizer": quantizers.serialize(self.input_quantizer),
"depthwise_quantizer": quantizers.serialize(self.depthwise_quantizer),
"pointwise_quantizer": quantizers.serialize(self.pointwise_quantizer),
- "input_quantizer": quantizers.serialize(self.input_quantizer),
}
return {**super().get_config(), **config}
@utils.register_keras_custom_object
class QuantConv1D(QuantizerBase, tf.keras.layers.Conv1D):
+ """1D quantized convolution layer (e.g. temporal convolution).
+
+ This layer creates a convolution kernel that is convolved with the layer input
+ over a single spatial (or temporal) dimension to produce a tensor of outputs.
+ `input_quantizer` and `kernel_quantizer` are the element-wise quantization
+ functions to use. If both quantization functions are `None` this layer is
+ equivalent to `Conv1D`.
+ If `use_bias` is True, a bias vector is created and added to the outputs.
+ Finally, if `activation` is not `None`, it is applied to the outputs as well.
+
+ When using this layer as the first layer in a model, provide an `input_shape`
+ argument (tuple of integers or `None`, e.g. `(10, 128)` for sequences of
+ 10 vectors of 128-dimensional vectors, or `(None, 128)` for variable-length
+ sequences of 128-dimensional vectors.
+
+ Arguments:
+ filters: Integer, the dimensionality of the output space
+ (i.e. the number of output filters in the convolution).
+ kernel_size: An integer or tuple/list of a single integer,
+ specifying the length of the 1D convolution window.
+ input_quantizer: Quantization function applied to the input of the layer.
+ kernel_quantizer: Quantization function applied to the `kernel` weights matrix.
+ strides: An integer or tuple/list of a single integer, specifying the stride
+ length of the convolution. Specifying any stride value != 1 is incompatible
+ with specifying any `dilation_rate` value != 1.
+ padding: One of `"valid"`, `"causal"` or `"same"` (case-insensitive).
+ `"causal"` results in causal (dilated) convolutions, e.g. output[t]
+ does not depend on input[t+1:]. Useful when modeling temporal data
+ where the model should not violate the temporal order.
+ See [WaveNet: A Generative Model for Raw Audio, section
+ 2.1](https://arxiv.org/abs/1609.03499).
+ data_format: A string, one of `channels_last` (default) or `channels_first`.
+ dilation_rate: an integer or tuple/list of a single integer, specifying
+ the dilation rate to use for dilated convolution.
+ Currently, specifying any `dilation_rate` value != 1 is
+ incompatible with specifying any `strides` value != 1.
+ activation: Activation function to use.
+ If you don't specify anything, no activation is applied
+ (ie. "linear" activation: `a(x) = x`).
+ use_bias: Boolean, whether the layer uses a bias vector.
+ kernel_initializer: Initializer for the `kernel` weights matrix.
+ bias_initializer: Initializer for the bias vector.
+ kernel_regularizer: Regularizer function applied to the `kernel` weights matrix.
+ bias_regularizer: Regularizer function applied to the bias vector.
+ activity_regularizer: Regularizer function applied to
+ the output of the layer (its "activation")..
+ kernel_constraint: Constraint function applied to the kernel matrix.
+ bias_constraint: Constraint function applied to the bias vector.
+
+ Input shape:
+ 3D tensor with shape: `(batch_size, steps, input_dim)`
+
+ Output shape:
+ 3D tensor with shape: `(batch_size, new_steps, filters)`
+ `steps` value might have changed due to padding or strides.
+ """
+
pass
@utils.register_keras_custom_object
class QuantConv2D(QuantizerBase, tf.keras.layers.Conv2D):
+ """2D quantized convolution layer (e.g. spatial convolution over images).
+
+ This layer creates a convolution kernel that is convolved
+ with the layer input to produce a tensor of outputs.
+ `input_quantizer` and `kernel_quantizer` are the element-wise quantization
+ functions to use. If both quantization functions are `None` this layer is
+ equivalent to `Conv2D`. If `use_bias` is True, a bias vector is created
+ and added to the outputs. Finally, if `activation` is not `None`,
+ it is applied to the outputs as well.
+
+ When using this layer as the first layer in a model, provide the keyword argument
+ `input_shape` (tuple of integers, does not include the sample axis),
+ e.g. `input_shape=(128, 128, 3)` for 128x128 RGB pictures in
+ `data_format="channels_last"`.
+
+ Arguments:
+ filters: Integer, the dimensionality of the output space
+ (i.e. the number of output filters in the convolution).
+ kernel_size: An integer or tuple/list of 2 integers, specifying the
+ height and width of the 2D convolution window. Can be a single integer
+ to specify the same value for all spatial dimensions.
+ input_quantizer: Quantization function applied to the input of the layer.
+ kernel_quantizer: Quantization function applied to the `kernel` weights matrix.
+ strides: An integer or tuple/list of 2 integers, specifying the strides of
+ the convolution along the height and width. Can be a single integer to
+ specify the same value for all spatial dimensions. Specifying any stride
+ value != 1 is incompatible with specifying any `dilation_rate` value != 1.
+ padding: one of `"valid"` or `"same"` (case-insensitive).
+ data_format: A string, one of `channels_last` (default) or `channels_first`.
+ The ordering of the dimensions in the inputs. `channels_last` corresponds
+ to inputs with shape `(batch, height, width, channels)`
+ while `channels_first` corresponds to inputs with shape
+ `(batch, channels, height, width)`.
+ It defaults to the `image_data_format` value found in your
+ Keras config file at `~/.keras/keras.json`.
+ If you never set it, then it will be "channels_last".
+ dilation_rate: an integer or tuple/list of 2 integers, specifying
+ the dilation rate to use for dilated convolution. Can be a single integer
+ to specify the same value for all spatial dimensions.
+ Currently, specifying any `dilation_rate` value != 1 is
+ incompatible with specifying any stride value != 1.
+ activation: Activation function to use.
+ If you don't specify anything, no activation is applied
+ (ie. "linear" activation: `a(x) = x`).
+ use_bias: Boolean, whether the layer uses a bias vector.
+ kernel_initializer: Initializer for the `kernel` weights matrix.
+ bias_initializer: Initializer for the bias vector.
+ kernel_regularizer: Regularizer function applied to the `kernel` weights matrix.
+ bias_regularizer: Regularizer function applied to the bias vector.
+ activity_regularizer: Regularizer function applied to
+ the output of the layer (its "activation").
+ kernel_constraint: Constraint function applied to the kernel matrix.
+ bias_constraint: Constraint function applied to the bias vector.
+
+ Input shape:
+ 4D tensor with shape:
+ `(samples, channels, rows, cols)` if data_format='channels_first'
+ or 4D tensor with shape:
+ `(samples, rows, cols, channels)` if data_format='channels_last'.
+
+ Output shape:
+ 4D tensor with shape:
+ `(samples, filters, new_rows, new_cols)` if data_format='channels_first'
+ or 4D tensor with shape:
+ `(samples, new_rows, new_cols, filters)` if data_format='channels_last'.
+ `rows` and `cols` values might have changed due to padding.
+ """
+
pass
@utils.register_keras_custom_object
class QuantConv3D(QuantizerBase, tf.keras.layers.Conv3D):
+ """3D convolution layer (e.g. spatial convolution over volumes).
+
+ This layer creates a convolution kernel that is convolved
+ with the layer input to produce a tensor of
+ outputs. `input_quantizer` and `kernel_quantizer` are the element-wise quantization
+ functions to use. If both quantization functions are `None` this layer is
+ equivalent to `Conv3D`. If `use_bias` is True, a bias vector is created and
+ added to the outputs. Finally, if `activation` is not `None`,
+ it is applied to the outputs as well.
+
+ When using this layer as the first layer in a model, provide the keyword argument
+ `input_shape` (tuple of integers, does not include the sample axis),
+ e.g. `input_shape=(128, 128, 128, 1)` for 128x128x128 volumes
+ with a single channel, in `data_format="channels_last"`.
+
+ Arguments:
+ filters: Integer, the dimensionality of the output space
+ (i.e. the number of output filters in the convolution).
+ kernel_size: An integer or tuple/list of 3 integers, specifying the
+ depth, height and width of the 3D convolution window. Can be a single
+ integer to specify the same value for all spatial dimensions.
+ input_quantizer: Quantization function applied to the input of the layer.
+ kernel_quantizer: Quantization function applied to the `kernel` weights matrix.
+ strides: An integer or tuple/list of 3 integers, specifying the strides of
+ the convolution along each spatial dimension. Can be a single integer
+ to specify the same value for all spatial dimensions.
+ Specifying any stride value != 1 is incompatible with specifying
+ any `dilation_rate` value != 1.
+ padding: one of `"valid"` or `"same"` (case-insensitive).
+ data_format: A string, one of `channels_last` (default) or `channels_first`.
+ The ordering of the dimensions in the inputs.
+ `channels_last` corresponds to inputs with shape
+ `(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
+ while `channels_first` corresponds to inputs with shape
+ `(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
+ It defaults to the `image_data_format` value found in your
+ Keras config file at `~/.keras/keras.json`.
+ If you never set it, then it will be "channels_last".
+ dilation_rate: an integer or tuple/list of 3 integers, specifying
+ the dilation rate to use for dilated convolution.
+ Can be a single integer to specify the same value for
+ all spatial dimensions.
+ Currently, specifying any `dilation_rate` value != 1 is
+ incompatible with specifying any stride value != 1.
+ activation: Activation function to use. If you don't specify anything,
+ no activation is applied (ie. "linear" activation: `a(x) = x`).
+ use_bias: Boolean, whether the layer uses a bias vector.
+ kernel_initializer: Initializer for the `kernel` weights matrix.
+ bias_initializer: Initializer for the bias vector.
+ kernel_regularizer: Regularizer function applied to the `kernel` weights matrix.
+ bias_regularizer: Regularizer function applied to the bias vector.
+ activity_regularizer: Regularizer function applied to
+ the output of the layer (its "activation").
+ kernel_constraint: Constraint function applied to the kernel matrix.
+ bias_constraint: Constraint function applied to the bias vector.
+
+ Input shape:
+ 5D tensor with shape:
+ `(samples, channels, conv_dim1, conv_dim2, conv_dim3)` if
+ data_format='channels_first'
+ or 5D tensor with shape:
+ `(samples, conv_dim1, conv_dim2, conv_dim3, channels)` if
+ data_format='channels_last'.
+
+ Output shape:
+ 5D tensor with shape:
+ `(samples, filters, new_conv_dim1, new_conv_dim2, new_conv_dim3)` if
+ data_format='channels_first'
+ or 5D tensor with shape:
+ `(samples, new_conv_dim1, new_conv_dim2, new_conv_dim3, filters)` if
+ data_format='channels_last'.
+ `new_conv_dim1`, `new_conv_dim2` and `new_conv_dim3` values might have
+ changed due to padding.
+ """
+
pass
@utils.register_keras_custom_object
class QuantConv2DTranspose(QuantizerBase, tf.keras.layers.Conv2DTranspose):
+ """Transposed quantized convolution layer (sometimes called Deconvolution).
+
+ The need for transposed convolutions generally arises from the desire to use a
+ transformation going in the opposite direction of a normal convolution, i.e.,
+ from something that has the shape of the output of some convolution to something
+ that has the shape of its input while maintaining a connectivity pattern
+ that is compatible with said convolution. `input_quantizer` and `kernel_quantizer`
+ are the element-wise quantization functions to use. If both quantization functions
+ are `None` this layer is equivalent to `Conv2DTranspose`.
+
+ When using this layer as the first layer in a model, provide the keyword argument
+ `input_shape` (tuple of integers, does not include the sample axis), e.g.
+ `input_shape=(128, 128, 3)` for 128x128 RGB pictures in
+ `data_format="channels_last"`.
+
+ Arguments:
+ filters: Integer, the dimensionality of the output space
+ (i.e. the number of output filters in the convolution).
+ kernel_size: An integer or tuple/list of 2 integers, specifying the
+ height and width of the 2D convolution window. Can be a single integer
+ to specify the same value for all spatial dimensions.
+ input_quantizer: Quantization function applied to the input of the layer.
+ kernel_quantizer: Quantization function applied to the `kernel` weights matrix.
+ strides: An integer or tuple/list of 2 integers, specifying the strides of
+ the convolution along the height and width. Can be a single integer to
+ specify the same value for all spatial dimensions. Specifying any stride
+ value != 1 is incompatible with specifying any `dilation_rate` value != 1.
+ padding: one of `"valid"` or `"same"` (case-insensitive).
+ output_padding: An integer or tuple/list of 2 integers, specifying the amount
+ of padding along the height and width of the output tensor. Can be a single
+ integer to specify the same value for all spatial dimensions. The amount of
+ output padding along a given dimension must be lower than the stride along
+ that same dimension.
+ If set to `None` (default), the output shape is inferred.
+ data_format: A string, one of `channels_last` (default) or `channels_first`.
+ The ordering of the dimensions in the inputs. `channels_last` corresponds
+ to inputs with shape `(batch, height, width, channels)` while
+ `channels_first` corresponds to inputs with shape
+ `(batch, channels, height, width)`. It defaults to the `image_data_format`
+ value found in your Keras config file at `~/.keras/keras.json`.
+ If you never set it, then it will be "channels_last".
+ dilation_rate: an integer or tuple/list of 2 integers, specifying
+ the dilation rate to use for dilated convolution. Can be a single integer
+ to specify the same value for all spatial dimensions. Currently, specifying
+ any `dilation_rate` value != 1 is incompatible with specifying any
+ stride value != 1.
+ activation: Activation function to use. If you don't specify anything,
+ no activation is applied (ie. "linear" activation: `a(x) = x`).
+ use_bias: Boolean, whether the layer uses a bias vector.
+ kernel_initializer: Initializer for the `kernel` weights matrix.
+ bias_initializer: Initializer for the bias vector.
+ kernel_regularizer: Regularizer function applied to the `kernel` weights matrix.
+ bias_regularizer: Regularizer function applied to the bias vector.
+ activity_regularizer: Regularizer function applied to
+ the output of the layer (its "activation").
+ kernel_constraint: Constraint function applied to the kernel matrix.
+ bias_constraint: Constraint function applied to the bias vector.
+
+ Input shape:
+ 4D tensor with shape:
+ `(batch, channels, rows, cols)` if data_format='channels_first'
+ or 4D tensor with shape:
+ `(batch, rows, cols, channels)` if data_format='channels_last'.
+
+ Output shape:
+ 4D tensor with shape:
+ `(batch, filters, new_rows, new_cols)` if data_format='channels_first'
+ or 4D tensor with shape:
+ `(batch, new_rows, new_cols, filters)` if data_format='channels_last'.
+ `rows` and `cols` values might have changed due to padding.
+
+ References:
+ - [A guide to convolution arithmetic for deep
+ learning](https://arxiv.org/abs/1603.07285v1)
+ - [Deconvolutional
+ Networks](https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf)
+ """
+
pass
@utils.register_keras_custom_object
class QuantConv3DTranspose(QuantizerBase, tf.keras.layers.Conv3DTranspose):
+ """Transposed quantized convolution layer (sometimes called Deconvolution).
+
+ The need for transposed convolutions generally arises
+ from the desire to use a transformation going in the opposite direction
+ of a normal convolution, i.e., from something that has the shape of the
+ output of some convolution to something that has the shape of its input
+ while maintaining a connectivity pattern that is compatible with
+ said convolution. `input_quantizer` and `kernel_quantizer`
+ are the element-wise quantization functions to use. If both quantization functions
+ are `None` this layer is equivalent to `Conv3DTranspose`.
+
+ When using this layer as the first layer in a model, provide the keyword argument
+ `input_shape` (tuple of integers, does not include the sample axis),
+ e.g. `input_shape=(128, 128, 128, 3)` for a 128x128x128 volume with 3 channels
+ if `data_format="channels_last"`.
+
+ Arguments:
+ filters: Integer, the dimensionality of the output space
+ (i.e. the number of output filters in the convolution).
+ kernel_size: An integer or tuple/list of 3 integers, specifying the
+ depth, height and width of the 3D convolution window.
+ Can be a single integer to specify the same value for
+ all spatial dimensions.
+ input_quantizer: Quantization function applied to the input of the layer.
+ kernel_quantizer: Quantization function applied to the `kernel` weights matrix.
+ strides: An integer or tuple/list of 3 integers, specifying the strides of the
+ convolution along the depth, height and width. Can be a single integer to
+ specify the same value for all spatial dimensions. Specifying any stride
+ value != 1 is incompatible with specifying any `dilation_rate` value != 1.
+ padding: one of `"valid"` or `"same"` (case-insensitive).
+ output_padding: An integer or tuple/list of 3 integers, specifying the amount
+ of padding along the depth, height, and width. Can be a single integer to
+ specify the same value for all spatial dimensions. The amount of output
+ padding along a given dimension must be lower than the stride along that
+ same dimension. If set to `None` (default), the output shape is inferred.
+ data_format: A string, one of `channels_last` (default) or `channels_first`.
+ The ordering of the dimensions in the inputs. `channels_last` corresponds
+ to inputs with shape `(batch, depth, height, width, channels)` while
+ `channels_first` corresponds to inputs with shape
+ `(batch, channels, depth, height, width)`. It defaults to the
+ `image_data_format` value found in your Keras config file at
+ `~/.keras/keras.json`. If you never set it, then it will be "channels_last".
+ dilation_rate: an integer or tuple/list of 3 integers, specifying the dilation
+ rate to use for dilated convolution. Can be a single integer to specify the
+ same value for all spatial dimensions. Currently, specifying any
+ `dilation_rate` value != 1 is incompatible with specifying any
+ stride value != 1.
+ activation: Activation function to use. If you don't specify anything,
+ no activation is applied (ie. "linear" activation: `a(x) = x`).
+ use_bias: Boolean, whether the layer uses a bias vector.
+ kernel_initializer: Initializer for the `kernel` weights matrix.
+ bias_initializer: Initializer for the bias vector.
+ kernel_regularizer: Regularizer function applied to the `kernel` weights matrix.
+ bias_regularizer: Regularizer function applied to the bias vector.
+ activity_regularizer: Regularizer function applied to
+ the output of the layer (its "activation").
+ kernel_constraint: Constraint function applied to the kernel matrix.
+ bias_constraint: Constraint function applied to the bias vector.
+
+ Input shape:
+ 5D tensor with shape:
+ `(batch, channels, depth, rows, cols)` if data_format='channels_first'
+ or 5D tensor with shape:
+ `(batch, depth, rows, cols, channels)` if data_format='channels_last'.
+
+ Output shape:
+ 5D tensor with shape:
+ `(batch, filters, new_depth, new_rows, new_cols)` if
+ data_format='channels_first'
+ or 5D tensor with shape:
+ `(batch, new_depth, new_rows, new_cols, filters)` if
+ data_format='channels_last'.
+ `depth` and `rows` and `cols` values might have changed due to padding.
+
+ References:
+ - [A guide to convolution arithmetic for deep
+ learning](https://arxiv.org/abs/1603.07285v1)
+ - [Deconvolutional
+ Networks](https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf)
+ """
+
pass
@utils.register_keras_custom_object
class QuantLocallyConnected1D(QuantizerBase, tf.keras.layers.LocallyConnected1D):
+ """Locally-connected quantized layer for 1D inputs.
+
+ The `QuantLocallyConnected1D` layer works similarly to the `QuantConv1D` layer,
+ except that weights are unshared, that is, a different set of filters is applied
+ at each different patch of the input. `input_quantizer` and `kernel_quantizer`
+ are the element-wise quantization functions to use. If both quantization functions
+ are `None` this layer is equivalent to `LocallyConnected1D`.
+
+ Example:
+ ```python
+ # apply a unshared weight convolution 1d of length 3 to a sequence with
+ # 10 timesteps, with 64 output filters
+ model = Sequential()
+ model.add(QuantLocallyConnected1D(64, 3, input_shape=(10, 32)))
+ # now model.output_shape == (None, 8, 64)
+ # add a new conv1d on top
+ model.add(QuantLocallyConnected1D(32, 3))
+ # now model.output_shape == (None, 6, 32)
+ ```
+
+ Arguments:
+ filters: Integer, the dimensionality of the output space
+ (i.e. the number of output filters in the convolution).
+ kernel_size: An integer or tuple/list of a single integer,
+ specifying the length of the 1D convolution window.
+ input_quantizer: Quantization function applied to the input of the layer.
+ kernel_quantizer: Quantization function applied to the `kernel` weights matrix.
+ strides: An integer or tuple/list of a single integer, specifying the stride
+ length of the convolution. Specifying any stride value != 1 is incompatible
+ with specifying any `dilation_rate` value != 1.
+ padding: Currently only supports `"valid"` (case-insensitive).
+ `"same"` may be supported in the future.
+ data_format: A string, one of `channels_last` (default) or `channels_first`.
+ The ordering of the dimensions in the inputs. `channels_last` corresponds
+ to inputs with shape `(batch, length, channels)` while `channels_first`
+ corresponds to inputs with shape `(batch, channels, length)`. It defaults
+ to the `image_data_format` value found in your Keras config file at
+ `~/.keras/keras.json`. If you never set it, then it will be "channels_last".
+ activation: Activation function to use. If you don't specify anything,
+ no activation is applied (ie. "linear" activation: `a(x) = x`).
+ use_bias: Boolean, whether the layer uses a bias vector.
+ kernel_initializer: Initializer for the `kernel` weights matrix.
+ bias_initializer: Initializer for the bias vector.
+ kernel_regularizer: Regularizer function applied to the `kernel` weights matrix.
+ bias_regularizer: Regularizer function applied to the bias vector.
+ activity_regularizer: Regularizer function applied to
+ the output of the layer (its "activation").
+ kernel_constraint: Constraint function applied to the kernel matrix.
+ bias_constraint: Constraint function applied to the bias vector.
+ implementation: implementation mode, either `1` or `2`.
+ `1` loops over input spatial locations to perform the forward pass.
+ It is memory-efficient but performs a lot of (small) ops.
+
+ `2` stores layer weights in a dense but sparsely-populated 2D matrix
+ and implements the forward pass as a single matrix-multiply. It uses
+ a lot of RAM but performs few (large) ops.
+
+ Depending on the inputs, layer parameters, hardware, and
+ `tf.executing_eagerly()` one implementation can be dramatically faster
+ (e.g. 50X) than another.
+
+ It is recommended to benchmark both in the setting of interest to pick
+ the most efficient one (in terms of speed and memory usage).
+
+ Following scenarios could benefit from setting `implementation=2`:
+ - eager execution;
+ - inference;
+ - running on CPU;
+ - large amount of RAM available;
+ - small models (few filters, small kernel);
+ - using `padding=same` (only possible with `implementation=2`).
+
+ Input shape:
+ 3D tensor with shape: `(batch_size, steps, input_dim)`
+
+ Output shape:
+ 3D tensor with shape: `(batch_size, new_steps, filters)`
+ `steps` value might have changed due to padding or strides.
+ """
+
pass
@utils.register_keras_custom_object
class QuantLocallyConnected2D(QuantizerBase, tf.keras.layers.LocallyConnected2D):
+ """Locally-connected quantized layer for 2D inputs.
+
+ The `QuantLocallyConnected2D` layer works similarly to the `QuantConv2D` layer,
+ except that weights are unshared, that is, a different set of filters is applied
+ at each different patch of the input. `input_quantizer` and `kernel_quantizer`
+ are the element-wise quantization functions to use. If both quantization functions
+ are `None` this layer is equivalent to `LocallyConnected2D`.
+
+ Examples:
+ ```python
+ # apply a 3x3 unshared weights convolution with 64 output filters on a
+ 32x32 image
+ # with `data_format="channels_last"`:
+ model = Sequential()
+ model.add(QuantLocallyConnected2D(64, (3, 3), input_shape=(32, 32, 3)))
+ # now model.output_shape == (None, 30, 30, 64)
+ # notice that this layer will consume (30*30)*(3*3*3*64) + (30*30)*64
+ parameters
+
+ # add a 3x3 unshared weights convolution on top, with 32 output filters:
+ model.add(QuantLocallyConnected2D(32, (3, 3)))
+ # now model.output_shape == (None, 28, 28, 32)
+ ```
+
+ Arguments:
+ filters: Integer, the dimensionality of the output space
+ (i.e. the number of output filters in the convolution).
+ kernel_size: An integer or tuple/list of 2 integers, specifying the
+ width and height of the 2D convolution window. Can be a single integer to
+ specify the same value for all spatial dimensions.
+ input_quantizer: Quantization function applied to the input of the layer.
+ kernel_quantizer: Quantization function applied to the `kernel` weights matrix.
+ strides: An integer or tuple/list of 2 integers, specifying the strides of the
+ convolution along the width and height. Can be a single integer to specify
+ the same value for all spatial dimensions.
+ padding: Currently only support `"valid"` (case-insensitive).
+ `"same"` will be supported in future.
+ data_format: A string, one of `channels_last` (default) or `channels_first`.
+ The ordering of the dimensions in the inputs. `channels_last` corresponds to
+ inputs with shape `(batch, height, width, channels)` while `channels_first`
+ corresponds to inputs with shape `(batch, channels, height, width)`. It
+ defaults to the `image_data_format` value found in your Keras config file at
+ `~/.keras/keras.json`. If you never set it, then it will be "channels_last".
+ activation: Activation function to use. If you don't specify anything,
+ no activation is applied (ie. "linear" activation: `a(x) = x`).
+ use_bias: Boolean, whether the layer uses a bias vector.
+ kernel_initializer: Initializer for the `kernel` weights matrix.
+ bias_initializer: Initializer for the bias vector.
+ kernel_regularizer: Regularizer function applied to the `kernel` weights matrix.
+ bias_regularizer: Regularizer function applied to the bias vector.
+ activity_regularizer: Regularizer function applied to
+ the output of the layer (its "activation").
+ kernel_constraint: Constraint function applied to the kernel matrix.
+ bias_constraint: Constraint function applied to the bias vector.
+ implementation: implementation mode, either `1` or `2`.
+ `1` loops over input spatial locations to perform the forward pass.
+ It is memory-efficient but performs a lot of (small) ops.
+
+ `2` stores layer weights in a dense but sparsely-populated 2D matrix
+ and implements the forward pass as a single matrix-multiply. It uses
+ a lot of RAM but performs few (large) ops.
+
+ Depending on the inputs, layer parameters, hardware, and
+ `tf.executing_eagerly()` one implementation can be dramatically faster
+ (e.g. 50X) than another.
+
+ It is recommended to benchmark both in the setting of interest to pick
+ the most efficient one (in terms of speed and memory usage).
+
+ Following scenarios could benefit from setting `implementation=2`:
+ - eager execution;
+ - inference;
+ - running on CPU;
+ - large amount of RAM available;
+ - small models (few filters, small kernel);
+ - using `padding=same` (only possible with `implementation=2`).
+
+ Input shape:
+ 4D tensor with shape:
+ `(samples, channels, rows, cols)` if data_format='channels_first'
+ or 4D tensor with shape:
+ `(samples, rows, cols, channels)` if data_format='channels_last'.
+
+ Output shape:
+ 4D tensor with shape:
+ `(samples, filters, new_rows, new_cols)` if data_format='channels_first'
+ or 4D tensor with shape:
+ `(samples, new_rows, new_cols, filters)` if data_format='channels_last'.
+ `rows` and `cols` values might have changed due to padding.
+ """
+
pass
@utils.register_keras_custom_object
class QuantDense(QuantizerBase, tf.keras.layers.Dense):
+ """Just your regular densely-connected quantized NN layer.
+
+ `QuantDense` implements the operation:
+ `output = activation(dot(input_quantizer(input), kernel_quantizer(kernel)) + bias)`
+ where `activation` is the element-wise activation function passed as the
+ `activation` argument, `kernel` is a weights matrix created by the layer, and `bias`
+ is a bias vector created by the layer (only applicable if `use_bias` is `True`).
+ `input_quantizer` and `kernel_quantizer` are the element-wise quantization
+ functions to use. If both quantization functions are `None` this layer is
+ equivalent to `Dense`.
+
+ Note: If the input to the layer has a rank greater than 2, then it is flattened
+ prior to the initial dot product with `kernel`.
+
+ Example:
+
+ ```python
+ # as first layer in a sequential model:
+ model = Sequential()
+ model.add(
+ QuantDense(
+ 32,
+ input_quantizer="sign_clip_ste",
+ kernel_quantizer="sign_clip_ste",
+ input_shape=(16,),
+ )
+ )
+ # now the model will take as input arrays of shape (*, 16)
+ # and output arrays of shape (*, 32)
+
+ # after the first layer, you don't need to specify
+ # the size of the input anymore:
+ model.add(
+ QuantDense(
+ 32,
+ input_quantizer="sign_clip_ste",
+ kernel_quantizer="sign_clip_ste",
+ )
+ )
+ ```
+
+ Arguments:
+ units: Positive integer, dimensionality of the output space.
+ input_quantizer: Quantization function applied to the input of the layer.
+ kernel_quantizer: Quantization function applied to the `kernel` weights matrix.
+ activation: Activation function to use.
+ If you don't specify anything, no activation is applied
+ (ie. "linear" activation: `a(x) = x`).
+ use_bias: Boolean, whether the layer uses a bias vector.
+ kernel_initializer: Initializer for the `kernel` weights matrix.
+ bias_initializer: Initializer for the bias vector.
+ kernel_regularizer: Regularizer function applied to
+ the `kernel` weights matrix.
+ bias_regularizer: Regularizer function applied to the bias vector.
+ activity_regularizer: Regularizer function applied to
+ the output of the layer (its "activation")..
+ kernel_constraint: Constraint function applied to the `kernel` weights matrix.
+ bias_constraint: Constraint function applied to the bias vector.
+
+ Input shape:
+ N-D tensor with shape: `(batch_size, ..., input_dim)`.
+ The most common situation would be
+ a 2D input with shape `(batch_size, input_dim)`.
+
+ Output shape:
+ N-D tensor with shape: `(batch_size, ..., units)`.
+ For instance, for a 2D input with shape `(batch_size, input_dim)`,
+ the output would have shape `(batch_size, units)`.
+ """
+
pass
@utils.register_keras_custom_object
class QuantSeparableConv1D(QuantizerSeparableBase, tf.keras.layers.SeparableConv1D):
+ """Depthwise separable 1D quantized convolution.
+
+ This layer performs a depthwise convolution that acts separately on channels,
+ followed by a pointwise convolution that mixes channels.
+ `input_quantizer`, `depthwise_quantizer` and `pointwise_quantizer` are the
+ element-wise quantization functions to use. If all quantization functions are `None`
+ this layer is equivalent to `SeparableConv1D`. If `use_bias` is True and
+ a bias initializer is provided, it adds a bias vector to the output.
+ It then optionally applies an activation function to produce the final output.
+
+ Arguments:
+ filters: Integer, the dimensionality of the output space (i.e. the number
+ of filters in the convolution).
+ kernel_size: A single integer specifying the spatial dimensions of the filters.
+ input_quantizer: Quantization function applied to the input of the layer.
+ depthwise_quantizer: Quantization function applied to the
+ depthwise convolution kernel.
+ pointwise_quantizer: Quantization function applied to the
+ pointwise convolution kernel.
+ strides: A single integer specifying the strides of the convolution.
+ Specifying any `stride` value != 1 is incompatible with specifying
+ any `dilation_rate` value != 1.
+ padding: One of `"valid"`, `"same"`, or `"causal"` (case-insensitive).
+ data_format: A string, one of `channels_last` (default) or `channels_first`.
+ The ordering of the dimensions in the inputs. `channels_last` corresponds
+ to inputs with shape `(batch, length, channels)` while `channels_first`
+ corresponds to inputs with shape `(batch, channels, length)`.
+ dilation_rate: A single integer, specifying the dilation rate to use for dilated
+ convolution. Currently, specifying any `dilation_rate` value != 1 is
+ incompatible with specifying any stride value != 1.
+ depth_multiplier: The number of depthwise convolution output channels for
+ each input channel. The total number of depthwise convolution output
+ channels will be equal to `num_filters_in * depth_multiplier`.
+ activation: Activation function. Set it to None to maintain a linear activation.
+ use_bias: Boolean, whether the layer uses a bias.
+ depthwise_initializer: An initializer for the depthwise convolution kernel.
+ pointwise_initializer: An initializer for the pointwise convolution kernel.
+ bias_initializer: An initializer for the bias vector. If None, the default
+ initializer will be used.
+ depthwise_regularizer: Optional regularizer for the
+ depthwise convolution kernel.
+ pointwise_regularizer: Optional regularizer for the
+ pointwise convolution kernel.
+ bias_regularizer: Optional regularizer for the bias vector.
+ activity_regularizer: Optional regularizer function for the output.
+ depthwise_constraint: Optional projection function to be applied to the
+ depthwise kernel after being updated by an `Optimizer`
+ (e.g. used for norm constraints or value constraints for layer weights).
+ The function must take as input the unprojected variable and must return
+ the projected variable (which must have the same shape). Constraints are
+ not safe to use when doing asynchronous distributed training.
+ pointwise_constraint: Optional projection function to be applied to the
+ pointwise kernel after being updated by an `Optimizer`.
+ bias_constraint: Optional projection function to be applied to the
+ bias after being updated by an `Optimizer`.
+ trainable: Boolean, if `True` the weights of this layer will be marked as
+ trainable (and listed in `layer.trainable_weights`).
+ name: A string, the name of the layer.
+ """
+
pass
@utils.register_keras_custom_object
class QuantSeparableConv2D(QuantizerSeparableBase, tf.keras.layers.SeparableConv2D):
+ """Depthwise separable 2D convolution.
+
+ Separable convolutions consist in first performing a depthwise spatial convolution
+ (which acts on each input channel separately) followed by a pointwise convolution
+ which mixes together the resulting output channels. The `depth_multiplier` argument
+ controls how many output channels are generated per input channel
+ in the depthwise step.
+ `input_quantizer`, `depthwise_quantizer` and `pointwise_quantizer` are the
+ element-wise quantization functions to use. If all quantization functions are `None`
+ this layer is equivalent to `SeparableConv1D`. If `use_bias` is True and
+ a bias initializer is provided, it adds a bias vector to the output.
+ It then optionally applies an activation function to produce the final output.
+
+ Intuitively, separable convolutions can be understood as a way to factorize a
+ convolution kernel into two smaller kernels,
+ or as an extreme version of an Inception block.
+
+ Arguments:
+ filters: Integer, the dimensionality of the output space
+ (i.e. the number of output filters in the convolution).
+ kernel_size: An integer or tuple/list of 2 integers, specifying the height and
+ width of the 2D convolution window. Can be a single integer to specify the
+ same value for all spatial dimensions.
+ input_quantizer: Quantization function applied to the input of the layer.
+ depthwise_quantizer: Quantization function applied to the
+ depthwise convolution kernel matrix.
+ pointwise_quantizer: Quantization function applied to the
+ pointwise convolution kernel matrix.
+ strides: An integer or tuple/list of 2 integers, specifying the strides of the
+ convolution along the height and width. Can be a single integer to specify
+ the same value for all spatial dimensions. Specifying any stride value != 1
+ is incompatible with specifying any `dilation_rate` value != 1.
+ padding: one of `"valid"` or `"same"` (case-insensitive).
+ data_format: A string, one of `channels_last` (default) or `channels_first`.
+ The ordering of the dimensions in the inputs. `channels_last` corresponds to
+ inputs with shape `(batch, height, width, channels)` while `channels_first`
+ corresponds to inputs with shape `(batch, channels, height, width)`. It
+ defaults to the `image_data_format` value found in your Keras config file at
+ `~/.keras/keras.json`. If you never set it, then it will be "channels_last".
+ dilation_rate: An integer or tuple/list of 2 integers, specifying the dilation
+ rate to use for dilated convolution. Currently, specifying any
+ `dilation_rate` value != 1 is incompatible with specifying any
+ `strides` value != 1.
+ depth_multiplier: The number of depthwise convolution output channels for each
+ input channel. The total number of depthwise convolution output channels
+ will be equal to `filters_in * depth_multiplier`.
+ activation: Activation function to use. If you don't specify anything,
+ no activation is applied (ie. "linear" activation: `a(x) = x`).
+ use_bias: Boolean, whether the layer uses a bias vector.
+ depthwise_initializer: Initializer for the depthwise kernel matrix.
+ pointwise_initializer: Initializer for the pointwise kernel matrix.
+ bias_initializer: Initializer for the bias vector.
+ depthwise_regularizer: Regularizer function applied to
+ the depthwise kernel matrix.
+ pointwise_regularizer: Regularizer function applied to
+ the pointwise kernel matrix.
+ bias_regularizer: Regularizer function applied to the bias vector.
+ activity_regularizer: Regularizer function applied to
+ the output of the layer (its "activation").
+ depthwise_constraint: Constraint function applied to
+ the depthwise kernel matrix.
+ pointwise_constraint: Constraint function applied to
+ the pointwise kernel matrix.
+ bias_constraint: Constraint function applied to the bias vector.
+
+ Input shape:
+ 4D tensor with shape:
+ `(batch, channels, rows, cols)` if data_format='channels_first'
+ or 4D tensor with shape:
+ `(batch, rows, cols, channels)` if data_format='channels_last'.
+
+ Output shape:
+ 4D tensor with shape:
+ `(batch, filters, new_rows, new_cols)` if data_format='channels_first'
+ or 4D tensor with shape:
+ `(batch, new_rows, new_cols, filters)` if data_format='channels_last'.
+ `rows` and `cols` values might have changed due to padding.
+ """
+
pass
| Add docstrings to layer
This will mainly be copy paste from the existing `tf.keras` layers documentation.
It would be great if there is a nice way to inherit them from parent class.
| 2019-03-22T15:34:05 |
||
larq/larq | 34 | larq__larq-34 | [
"32"
] | e082bcdb0c3d7c6e3649d7d45b88cdf1b8b0906b | diff --git a/xquant/quantizers.py b/xquant/quantizers.py
--- a/xquant/quantizers.py
+++ b/xquant/quantizers.py
@@ -2,12 +2,22 @@
from xquant import utils
+def sign(x):
+ """A sign function that will never be zero"""
+ return tf.sign(tf.sign(x) + 1e-10)
+
+
@utils.register_keras_custom_object
@tf.custom_gradient
def ste_sign(x):
r"""
Sign binarization function.
- \\[q(x) = \mathrm{Sign}(x)\\]
+ \\[
+ q(x) = \begin{cases}
+ -1 & x < 0 \\\
+ 1 & x \geq 0
+ \end{cases}
+ \\]
The gradient is estimated using the Straight-Through Estimator.
\\[\frac{\partial q(x)}{\partial x} = x\\]
@@ -26,7 +36,7 @@ def ste_sign(x):
def grad(dy):
return dy
- return tf.sign(x), grad
+ return sign(x), grad
@utils.register_keras_custom_object
@@ -34,7 +44,12 @@ def grad(dy):
def approx_sign(x):
r"""
Sign binarization function.
- \\[q(x) = \mathrm{Sign}(x)\\]
+ \\[
+ q(x) = \begin{cases}
+ -1 & x < 0 \\\
+ 1 & x \geq 0
+ \end{cases}
+ \\]
The gradient is estimated using the ApproxSign method.
\\[\frac{\partial q(x)}{\partial x} = (2 - 2 \left|x\right|))\\]
@@ -54,7 +69,7 @@ def approx_sign(x):
def grad(dy):
return (1 - tf.abs(x)) * 2 * dy
- return tf.sign(x), grad
+ return sign(x), grad
def serialize(initializer):
| tf.sign(0) = 0
| 2019-03-26T18:01:20 |
||
larq/larq | 39 | larq__larq-39 | [
"38"
] | 8e44f3990b209c638258056bb638b6768669e55f | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -21,7 +21,7 @@ def readme():
extras_require={
"tensorflow": ["tensorflow>=1.13.1"],
"tensorflow_gpu": ["tensorflow-gpu>=1.13.1"],
- "test": ["absl-py>=0.7.0", "pytest>=4.3.1"],
+ "test": ["absl-py>=0.7.0", "pytest>=4.3.1", "pytest-cov>=2.6.1"],
"docs": [
"pydoc-markdown@https://github.com/lgeiger/pydoc-markdown/archive/master.zip",
"mkdocs-material>=4.1.0",
| Add test coverage report to Azure Pipelines
https://docs.microsoft.com/en-us/azure/devops/pipelines/languages/python?view=azure-devops#test-with-pytest-and-collect-coverage-metrics-with-pytest-cov
| 2019-04-02T18:17:42 |
||
larq/larq | 53 | larq__larq-53 | [
"26"
] | 2e4e472b42714a9c72244970a4720bfc97bcb8cd | diff --git a/xquant/layers.py b/xquant/layers.py
--- a/xquant/layers.py
+++ b/xquant/layers.py
@@ -174,11 +174,11 @@ class QuantDense(QuantizerBase, tf.keras.layers.Dense):
# Arguments
units: Positive integer, dimensionality of the output space.
- input_quantizer: Quantization function applied to the input of the layer.
- kernel_quantizer: Quantization function applied to the `kernel` weights matrix.
activation: Activation function to use. If you don't specify anything,
no activation is applied (`a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
+ input_quantizer: Quantization function applied to the input of the layer.
+ kernel_quantizer: Quantization function applied to the `kernel` weights matrix.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to the `kernel` weights matrix.
@@ -197,7 +197,37 @@ class QuantDense(QuantizerBase, tf.keras.layers.Dense):
shape `(batch_size, input_dim)`, the output would have shape `(batch_size, units)`.
"""
- pass
+ def __init__(
+ self,
+ units,
+ activation=None,
+ use_bias=True,
+ input_quantizer=None,
+ kernel_quantizer=None,
+ kernel_initializer="glorot_uniform",
+ bias_initializer="zeros",
+ kernel_regularizer=None,
+ bias_regularizer=None,
+ activity_regularizer=None,
+ kernel_constraint=None,
+ bias_constraint=None,
+ **kwargs
+ ):
+ super().__init__(
+ units,
+ activation=activation,
+ use_bias=use_bias,
+ input_quantizer=input_quantizer,
+ kernel_quantizer=kernel_quantizer,
+ kernel_initializer=kernel_initializer,
+ bias_initializer=bias_initializer,
+ kernel_regularizer=kernel_regularizer,
+ bias_regularizer=bias_regularizer,
+ activity_regularizer=activity_regularizer,
+ kernel_constraint=kernel_constraint,
+ bias_constraint=bias_constraint,
+ **kwargs
+ )
@utils.register_keras_custom_object
@@ -222,8 +252,6 @@ class QuantConv1D(QuantizerBase, tf.keras.layers.Conv1D):
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of a single integer,
specifying the length of the 1D convolution window.
- input_quantizer: Quantization function applied to the input of the layer.
- kernel_quantizer: Quantization function applied to the `kernel` weights matrix.
strides: An integer or tuple/list of a single integer, specifying the stride
length of the convolution. Specifying any stride value != 1 is incompatible
with specifying any `dilation_rate` value != 1.
@@ -239,6 +267,8 @@ class QuantConv1D(QuantizerBase, tf.keras.layers.Conv1D):
activation: Activation function to use. If you don't specify anything, no activation
is applied (`a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
+ input_quantizer: Quantization function applied to the input of the layer.
+ kernel_quantizer: Quantization function applied to the `kernel` weights matrix.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to the `kernel` weights matrix.
@@ -256,7 +286,47 @@ class QuantConv1D(QuantizerBase, tf.keras.layers.Conv1D):
`steps` value might have changed due to padding or strides.
"""
- pass
+ def __init__(
+ self,
+ filters,
+ kernel_size,
+ strides=1,
+ padding="valid",
+ data_format="channels_last",
+ dilation_rate=1,
+ activation=None,
+ use_bias=True,
+ input_quantizer=None,
+ kernel_quantizer=None,
+ kernel_initializer="glorot_uniform",
+ bias_initializer="zeros",
+ kernel_regularizer=None,
+ bias_regularizer=None,
+ activity_regularizer=None,
+ kernel_constraint=None,
+ bias_constraint=None,
+ **kwargs
+ ):
+ super().__init__(
+ filters,
+ kernel_size,
+ strides=strides,
+ padding=padding,
+ data_format=data_format,
+ dilation_rate=dilation_rate,
+ activation=activation,
+ use_bias=use_bias,
+ input_quantizer=input_quantizer,
+ kernel_quantizer=kernel_quantizer,
+ kernel_initializer=kernel_initializer,
+ bias_initializer=bias_initializer,
+ kernel_regularizer=kernel_regularizer,
+ bias_regularizer=bias_regularizer,
+ activity_regularizer=activity_regularizer,
+ kernel_constraint=kernel_constraint,
+ bias_constraint=bias_constraint,
+ **kwargs
+ )
@utils.register_keras_custom_object
@@ -282,8 +352,6 @@ class QuantConv2D(QuantizerBase, tf.keras.layers.Conv2D):
kernel_size: An integer or tuple/list of 2 integers, specifying the
height and width of the 2D convolution window. Can be a single integer
to specify the same value for all spatial dimensions.
- input_quantizer: Quantization function applied to the input of the layer.
- kernel_quantizer: Quantization function applied to the `kernel` weights matrix.
strides: An integer or tuple/list of 2 integers, specifying the strides of
the convolution along the height and width. Can be a single integer to
specify the same value for all spatial dimensions. Specifying any stride
@@ -302,6 +370,8 @@ class QuantConv2D(QuantizerBase, tf.keras.layers.Conv2D):
activation: Activation function to use. If you don't specify anything,
no activation is applied (`a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
+ input_quantizer: Quantization function applied to the input of the layer.
+ kernel_quantizer: Quantization function applied to the `kernel` weights matrix.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to the `kernel` weights matrix.
@@ -325,7 +395,47 @@ class QuantConv2D(QuantizerBase, tf.keras.layers.Conv2D):
`rows` and `cols` values might have changed due to padding.
"""
- pass
+ def __init__(
+ self,
+ filters,
+ kernel_size,
+ strides=(1, 1),
+ padding="valid",
+ data_format=None,
+ dilation_rate=(1, 1),
+ activation=None,
+ use_bias=True,
+ input_quantizer=None,
+ kernel_quantizer=None,
+ kernel_initializer="glorot_uniform",
+ bias_initializer="zeros",
+ kernel_regularizer=None,
+ bias_regularizer=None,
+ activity_regularizer=None,
+ kernel_constraint=None,
+ bias_constraint=None,
+ **kwargs
+ ):
+ super().__init__(
+ filters,
+ kernel_size,
+ strides=strides,
+ padding=padding,
+ data_format=data_format,
+ dilation_rate=dilation_rate,
+ activation=activation,
+ use_bias=use_bias,
+ input_quantizer=input_quantizer,
+ kernel_quantizer=kernel_quantizer,
+ kernel_initializer=kernel_initializer,
+ bias_initializer=bias_initializer,
+ kernel_regularizer=kernel_regularizer,
+ bias_regularizer=bias_regularizer,
+ activity_regularizer=activity_regularizer,
+ kernel_constraint=kernel_constraint,
+ bias_constraint=bias_constraint,
+ **kwargs
+ )
@utils.register_keras_custom_object
@@ -351,8 +461,6 @@ class QuantConv3D(QuantizerBase, tf.keras.layers.Conv3D):
kernel_size: An integer or tuple/list of 3 integers, specifying the
depth, height and width of the 3D convolution window. Can be a single
integer to specify the same value for all spatial dimensions.
- input_quantizer: Quantization function applied to the input of the layer.
- kernel_quantizer: Quantization function applied to the `kernel` weights matrix.
strides: An integer or tuple/list of 3 integers, specifying the strides of the
convolution along each spatial dimension. Can be a single integer to specify the
same value for all spatial dimensions. Specifying any stride value != 1 is
@@ -372,6 +480,8 @@ class QuantConv3D(QuantizerBase, tf.keras.layers.Conv3D):
activation: Activation function to use. If you don't specify anything,
no activation is applied (`a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
+ input_quantizer: Quantization function applied to the input of the layer.
+ kernel_quantizer: Quantization function applied to the `kernel` weights matrix.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to the `kernel` weights matrix.
@@ -400,7 +510,47 @@ class QuantConv3D(QuantizerBase, tf.keras.layers.Conv3D):
changed due to padding.
"""
- pass
+ def __init__(
+ self,
+ filters,
+ kernel_size,
+ strides=(1, 1, 1),
+ padding="valid",
+ data_format=None,
+ dilation_rate=(1, 1, 1),
+ activation=None,
+ use_bias=True,
+ input_quantizer=None,
+ kernel_quantizer=None,
+ kernel_initializer="glorot_uniform",
+ bias_initializer="zeros",
+ kernel_regularizer=None,
+ bias_regularizer=None,
+ activity_regularizer=None,
+ kernel_constraint=None,
+ bias_constraint=None,
+ **kwargs
+ ):
+ super().__init__(
+ filters,
+ kernel_size,
+ strides=strides,
+ padding=padding,
+ data_format=data_format,
+ dilation_rate=dilation_rate,
+ activation=activation,
+ use_bias=use_bias,
+ input_quantizer=input_quantizer,
+ kernel_quantizer=kernel_quantizer,
+ kernel_initializer=kernel_initializer,
+ bias_initializer=bias_initializer,
+ kernel_regularizer=kernel_regularizer,
+ bias_regularizer=bias_regularizer,
+ activity_regularizer=activity_regularizer,
+ kernel_constraint=kernel_constraint,
+ bias_constraint=bias_constraint,
+ **kwargs
+ )
@utils.register_keras_custom_object
@@ -419,9 +569,6 @@ class QuantSeparableConv1D(QuantizerSeparableBase, tf.keras.layers.SeparableConv
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: A single integer specifying the spatial dimensions of the filters.
- input_quantizer: Quantization function applied to the input of the layer.
- depthwise_quantizer: Quantization function applied to the depthwise kernel.
- pointwise_quantizer: Quantization function applied to the pointwise kernel.
strides: A single integer specifying the strides of the convolution.
Specifying any `stride` value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
@@ -438,6 +585,9 @@ class QuantSeparableConv1D(QuantizerSeparableBase, tf.keras.layers.SeparableConv
channels will be equal to `num_filters_in * depth_multiplier`.
activation: Activation function. Set it to None to maintain a linear activation.
use_bias: Boolean, whether the layer uses a bias.
+ input_quantizer: Quantization function applied to the input of the layer.
+ depthwise_quantizer: Quantization function applied to the depthwise kernel.
+ pointwise_quantizer: Quantization function applied to the pointwise kernel.
depthwise_initializer: An initializer for the depthwise convolution kernel.
pointwise_initializer: An initializer for the pointwise convolution kernel.
bias_initializer: An initializer for the bias vector. If None, the default
@@ -461,7 +611,57 @@ class QuantSeparableConv1D(QuantizerSeparableBase, tf.keras.layers.SeparableConv
name: A string, the name of the layer.
"""
- pass
+ def __init__(
+ self,
+ filters,
+ kernel_size,
+ strides=1,
+ padding="valid",
+ data_format=None,
+ dilation_rate=1,
+ depth_multiplier=1,
+ activation=None,
+ use_bias=True,
+ input_quantizer=None,
+ depthwise_quantizer=None,
+ pointwise_quantizer=None,
+ depthwise_initializer="glorot_uniform",
+ pointwise_initializer="glorot_uniform",
+ bias_initializer="zeros",
+ depthwise_regularizer=None,
+ pointwise_regularizer=None,
+ bias_regularizer=None,
+ activity_regularizer=None,
+ depthwise_constraint=None,
+ pointwise_constraint=None,
+ bias_constraint=None,
+ **kwargs
+ ):
+ super().__init__(
+ filters,
+ kernel_size,
+ strides=strides,
+ padding=padding,
+ data_format=data_format,
+ dilation_rate=dilation_rate,
+ depth_multiplier=depth_multiplier,
+ activation=activation,
+ use_bias=use_bias,
+ input_quantizer=input_quantizer,
+ depthwise_quantizer=depthwise_quantizer,
+ pointwise_quantizer=pointwise_quantizer,
+ depthwise_initializer=depthwise_initializer,
+ pointwise_initializer=pointwise_initializer,
+ bias_initializer=bias_initializer,
+ depthwise_regularizer=depthwise_regularizer,
+ pointwise_regularizer=pointwise_regularizer,
+ bias_regularizer=bias_regularizer,
+ activity_regularizer=activity_regularizer,
+ depthwise_constraint=depthwise_constraint,
+ pointwise_constraint=pointwise_constraint,
+ bias_constraint=bias_constraint,
+ **kwargs
+ )
@utils.register_keras_custom_object
@@ -489,9 +689,6 @@ class QuantSeparableConv2D(QuantizerSeparableBase, tf.keras.layers.SeparableConv
kernel_size: An integer or tuple/list of 2 integers, specifying the height and
width of the 2D convolution window. Can be a single integer to specify the
same value for all spatial dimensions.
- input_quantizer: Quantization function applied to the input of the layer.
- depthwise_quantizer: Quantization function applied to the depthwise kernel matrix.
- pointwise_quantizer: Quantization function applied to the pointwise kernel matrix.
strides: An integer or tuple/list of 2 integers, specifying the strides of the
convolution along the height and width. Can be a single integer to specify
the same value for all spatial dimensions. Specifying any stride value != 1
@@ -512,6 +709,9 @@ class QuantSeparableConv2D(QuantizerSeparableBase, tf.keras.layers.SeparableConv
activation: Activation function to use. If you don't specify anything,
no activation is applied (`a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
+ input_quantizer: Quantization function applied to the input of the layer.
+ depthwise_quantizer: Quantization function applied to the depthwise kernel matrix.
+ pointwise_quantizer: Quantization function applied to the pointwise kernel matrix.
depthwise_initializer: Initializer for the depthwise kernel matrix.
pointwise_initializer: Initializer for the pointwise kernel matrix.
bias_initializer: Initializer for the bias vector.
@@ -538,7 +738,57 @@ class QuantSeparableConv2D(QuantizerSeparableBase, tf.keras.layers.SeparableConv
`rows` and `cols` values might have changed due to padding.
"""
- pass
+ def __init__(
+ self,
+ filters,
+ kernel_size,
+ strides=(1, 1),
+ padding="valid",
+ data_format=None,
+ dilation_rate=(1, 1),
+ depth_multiplier=1,
+ activation=None,
+ use_bias=True,
+ input_quantizer=None,
+ depthwise_quantizer=None,
+ pointwise_quantizer=None,
+ depthwise_initializer="glorot_uniform",
+ pointwise_initializer="glorot_uniform",
+ bias_initializer="zeros",
+ depthwise_regularizer=None,
+ pointwise_regularizer=None,
+ bias_regularizer=None,
+ activity_regularizer=None,
+ depthwise_constraint=None,
+ pointwise_constraint=None,
+ bias_constraint=None,
+ **kwargs
+ ):
+ super().__init__(
+ filters,
+ kernel_size,
+ strides=strides,
+ padding=padding,
+ data_format=data_format,
+ dilation_rate=dilation_rate,
+ depth_multiplier=depth_multiplier,
+ activation=activation,
+ use_bias=use_bias,
+ input_quantizer=input_quantizer,
+ depthwise_quantizer=depthwise_quantizer,
+ pointwise_quantizer=pointwise_quantizer,
+ depthwise_initializer=depthwise_initializer,
+ pointwise_initializer=pointwise_initializer,
+ bias_initializer=bias_initializer,
+ depthwise_regularizer=depthwise_regularizer,
+ pointwise_regularizer=pointwise_regularizer,
+ bias_regularizer=bias_regularizer,
+ activity_regularizer=activity_regularizer,
+ depthwise_constraint=depthwise_constraint,
+ pointwise_constraint=pointwise_constraint,
+ bias_constraint=bias_constraint,
+ **kwargs
+ )
@utils.register_keras_custom_object
@@ -564,8 +814,6 @@ class QuantConv2DTranspose(QuantizerBase, tf.keras.layers.Conv2DTranspose):
kernel_size: An integer or tuple/list of 2 integers, specifying the
height and width of the 2D convolution window. Can be a single integer
to specify the same value for all spatial dimensions.
- input_quantizer: Quantization function applied to the input of the layer.
- kernel_quantizer: Quantization function applied to the `kernel` weights matrix.
strides: An integer or tuple/list of 2 integers, specifying the strides of
the convolution along the height and width. Can be a single integer to
specify the same value for all spatial dimensions. Specifying any stride
@@ -590,6 +838,8 @@ class QuantConv2DTranspose(QuantizerBase, tf.keras.layers.Conv2DTranspose):
activation: Activation function to use. If you don't specify anything,
no activation is applied (`a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
+ input_quantizer: Quantization function applied to the input of the layer.
+ kernel_quantizer: Quantization function applied to the `kernel` weights matrix.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to the `kernel` weights matrix.
@@ -619,7 +869,48 @@ class QuantConv2DTranspose(QuantizerBase, tf.keras.layers.Conv2DTranspose):
Networks](https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf)
"""
- pass
+ def __init__(
+ self,
+ filters,
+ kernel_size,
+ strides=(1, 1),
+ padding="valid",
+ output_padding=None,
+ data_format=None,
+ dilation_rate=(1, 1),
+ activation=None,
+ use_bias=True,
+ input_quantizer=None,
+ kernel_quantizer=None,
+ kernel_initializer="glorot_uniform",
+ bias_initializer="zeros",
+ kernel_regularizer=None,
+ bias_regularizer=None,
+ activity_regularizer=None,
+ kernel_constraint=None,
+ bias_constraint=None,
+ **kwargs
+ ):
+ super().__init__(
+ filters,
+ kernel_size,
+ strides=strides,
+ padding=padding,
+ data_format=data_format,
+ dilation_rate=dilation_rate,
+ activation=activation,
+ use_bias=use_bias,
+ input_quantizer=input_quantizer,
+ kernel_quantizer=kernel_quantizer,
+ kernel_initializer=kernel_initializer,
+ bias_initializer=bias_initializer,
+ kernel_regularizer=kernel_regularizer,
+ bias_regularizer=bias_regularizer,
+ activity_regularizer=activity_regularizer,
+ kernel_constraint=kernel_constraint,
+ bias_constraint=bias_constraint,
+ **kwargs
+ )
@utils.register_keras_custom_object
@@ -646,8 +937,6 @@ class QuantConv3DTranspose(QuantizerBase, tf.keras.layers.Conv3DTranspose):
kernel_size: An integer or tuple/list of 3 integers, specifying the depth, height
and width of the 3D convolution window. Can be a single integer to specify the
same value for all spatial dimensions.
- input_quantizer: Quantization function applied to the input of the layer.
- kernel_quantizer: Quantization function applied to the `kernel` weights matrix.
strides: An integer or tuple/list of 3 integers, specifying the strides of the
convolution along the depth, height and width. Can be a single integer to
specify the same value for all spatial dimensions. Specifying any stride
@@ -671,6 +960,8 @@ class QuantConv3DTranspose(QuantizerBase, tf.keras.layers.Conv3DTranspose):
activation: Activation function to use. If you don't specify anything,
no activation is applied (`a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
+ input_quantizer: Quantization function applied to the input of the layer.
+ kernel_quantizer: Quantization function applied to the `kernel` weights matrix.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to the `kernel` weights matrix.
@@ -700,7 +991,46 @@ class QuantConv3DTranspose(QuantizerBase, tf.keras.layers.Conv3DTranspose):
Networks](https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf)
"""
- pass
+ def __init__(
+ self,
+ filters,
+ kernel_size,
+ strides=(1, 1, 1),
+ padding="valid",
+ output_padding=None,
+ data_format=None,
+ activation=None,
+ use_bias=True,
+ input_quantizer=None,
+ kernel_quantizer=None,
+ kernel_initializer="glorot_uniform",
+ bias_initializer="zeros",
+ kernel_regularizer=None,
+ bias_regularizer=None,
+ activity_regularizer=None,
+ kernel_constraint=None,
+ bias_constraint=None,
+ **kwargs
+ ):
+ super().__init__(
+ filters,
+ kernel_size,
+ strides=strides,
+ padding=padding,
+ data_format=data_format,
+ activation=activation,
+ use_bias=use_bias,
+ input_quantizer=input_quantizer,
+ kernel_quantizer=kernel_quantizer,
+ kernel_initializer=kernel_initializer,
+ bias_initializer=bias_initializer,
+ kernel_regularizer=kernel_regularizer,
+ bias_regularizer=bias_regularizer,
+ activity_regularizer=activity_regularizer,
+ kernel_constraint=kernel_constraint,
+ bias_constraint=bias_constraint,
+ **kwargs
+ )
@utils.register_keras_custom_object
@@ -730,8 +1060,6 @@ class QuantLocallyConnected1D(QuantizerBase, tf.keras.layers.LocallyConnected1D)
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of a single integer,
specifying the length of the 1D convolution window.
- input_quantizer: Quantization function applied to the input of the layer.
- kernel_quantizer: Quantization function applied to the `kernel` weights matrix.
strides: An integer or tuple/list of a single integer, specifying the stride
length of the convolution. Specifying any stride value != 1 is incompatible
with specifying any `dilation_rate` value != 1.
@@ -746,6 +1074,8 @@ class QuantLocallyConnected1D(QuantizerBase, tf.keras.layers.LocallyConnected1D)
activation: Activation function to use. If you don't specify anything,
no activation is applied (`a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
+ input_quantizer: Quantization function applied to the input of the layer.
+ kernel_quantizer: Quantization function applied to the `kernel` weights matrix.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to the `kernel` weights matrix.
@@ -786,7 +1116,47 @@ class QuantLocallyConnected1D(QuantizerBase, tf.keras.layers.LocallyConnected1D)
`steps` value might have changed due to padding or strides.
"""
- pass
+ def __init__(
+ self,
+ filters,
+ kernel_size,
+ strides=1,
+ padding="valid",
+ data_format=None,
+ activation=None,
+ use_bias=True,
+ input_quantizer=None,
+ kernel_quantizer=None,
+ kernel_initializer="glorot_uniform",
+ bias_initializer="zeros",
+ kernel_regularizer=None,
+ bias_regularizer=None,
+ activity_regularizer=None,
+ kernel_constraint=None,
+ bias_constraint=None,
+ implementation=1,
+ **kwargs
+ ):
+ super().__init__(
+ filters,
+ kernel_size,
+ strides=strides,
+ padding=padding,
+ data_format=data_format,
+ activation=activation,
+ use_bias=use_bias,
+ input_quantizer=input_quantizer,
+ kernel_quantizer=kernel_quantizer,
+ kernel_initializer=kernel_initializer,
+ bias_initializer=bias_initializer,
+ kernel_regularizer=kernel_regularizer,
+ bias_regularizer=bias_regularizer,
+ activity_regularizer=activity_regularizer,
+ kernel_constraint=kernel_constraint,
+ bias_constraint=bias_constraint,
+ implementation=implementation,
+ **kwargs
+ )
@utils.register_keras_custom_object
@@ -821,8 +1191,6 @@ class QuantLocallyConnected2D(QuantizerBase, tf.keras.layers.LocallyConnected2D)
kernel_size: An integer or tuple/list of 2 integers, specifying the
width and height of the 2D convolution window. Can be a single integer to
specify the same value for all spatial dimensions.
- input_quantizer: Quantization function applied to the input of the layer.
- kernel_quantizer: Quantization function applied to the `kernel` weights matrix.
strides: An integer or tuple/list of 2 integers, specifying the strides of the
convolution along the width and height. Can be a single integer to specify
the same value for all spatial dimensions.
@@ -837,6 +1205,8 @@ class QuantLocallyConnected2D(QuantizerBase, tf.keras.layers.LocallyConnected2D)
activation: Activation function to use. If you don't specify anything,
no activation is applied (`a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
+ input_quantizer: Quantization function applied to the input of the layer.
+ kernel_quantizer: Quantization function applied to the `kernel` weights matrix.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to the `kernel` weights matrix.
@@ -883,4 +1253,44 @@ class QuantLocallyConnected2D(QuantizerBase, tf.keras.layers.LocallyConnected2D)
`rows` and `cols` values might have changed due to padding.
"""
- pass
+ def __init__(
+ self,
+ filters,
+ kernel_size,
+ strides=(1, 1),
+ padding="valid",
+ data_format=None,
+ activation=None,
+ use_bias=True,
+ input_quantizer=None,
+ kernel_quantizer=None,
+ kernel_initializer="glorot_uniform",
+ bias_initializer="zeros",
+ kernel_regularizer=None,
+ bias_regularizer=None,
+ activity_regularizer=None,
+ kernel_constraint=None,
+ bias_constraint=None,
+ implementation=1,
+ **kwargs
+ ):
+ super().__init__(
+ filters,
+ kernel_size,
+ strides=strides,
+ padding=padding,
+ data_format=data_format,
+ activation=activation,
+ use_bias=use_bias,
+ input_quantizer=input_quantizer,
+ kernel_quantizer=kernel_quantizer,
+ kernel_initializer=kernel_initializer,
+ bias_initializer=bias_initializer,
+ kernel_regularizer=kernel_regularizer,
+ bias_regularizer=bias_regularizer,
+ activity_regularizer=activity_regularizer,
+ kernel_constraint=kernel_constraint,
+ bias_constraint=bias_constraint,
+ implementation=implementation,
+ **kwargs
+ )
| diff --git a/xquant/layers_test.py b/xquant/layers_test.py
--- a/xquant/layers_test.py
+++ b/xquant/layers_test.py
@@ -2,6 +2,8 @@
import numpy as np
from absl.testing import parameterized
import xquant as xq
+import pytest
+import inspect
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
@@ -183,3 +185,39 @@ def test_separable_layer_does_not_warn(caplog):
pointwise_constraint="weight_clip",
)
assert caplog.records == []
+
+
[email protected](
+ "quant_layer,layer",
+ [
+ (xq.layers.QuantDense, tf.keras.layers.Dense),
+ (xq.layers.QuantConv1D, tf.keras.layers.Conv1D),
+ (xq.layers.QuantConv2D, tf.keras.layers.Conv2D),
+ (xq.layers.QuantConv3D, tf.keras.layers.Conv3D),
+ (xq.layers.QuantConv2DTranspose, tf.keras.layers.Conv2DTranspose),
+ (xq.layers.QuantConv3DTranspose, tf.keras.layers.Conv3DTranspose),
+ (xq.layers.QuantLocallyConnected1D, tf.keras.layers.LocallyConnected1D),
+ (xq.layers.QuantLocallyConnected2D, tf.keras.layers.LocallyConnected2D),
+ ],
+)
+def test_layer_kwargs(quant_layer, layer):
+ quant_params = inspect.signature(quant_layer).parameters
+ params = inspect.signature(layer).parameters
+
+ quant_params_list = list(quant_params.keys())
+ params_list = list(params.keys())
+
+ for p in (
+ "input_quantizer",
+ "kernel_quantizer",
+ "depthwise_quantizer",
+ "pointwise_quantizer",
+ ):
+ try:
+ quant_params_list.remove(p)
+ except:
+ pass
+ assert quant_params_list == params_list
+
+ for param in params_list:
+ assert quant_params.get(param).default == params.get(param).default
| Reorder Layers arguments
Currently layers have the following signature:
```python
QuantLayer(*args, input_quantizer=None, kernel_quantizer=None, **kwargs)
```
This makes the implementation trivial since we don't need to forward the `args` and `kwargs` explicitly. On the other hand it probably makes sense to not have the `quantizer` keywords as the fist keywords, which would bring it more inline with the signature of the original keras layers:
## Currently
```python
QuantConv2D(
filters,
kernel_size,
input_quantizer=None,
kernel_quantizer=None,
strides=(1, 1),
padding="valid",
data_format=None,
dilation_rate=(1, 1),
activation=None,
use_bias=True,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
)
```
## Is this better?
```python
QuantConv2D(
filters,
kernel_size,
strides=(1, 1),
padding="valid",
data_format=None,
dilation_rate=(1, 1),
activation=None,
use_bias=True,
input_quantizer=None,
kernel_quantizer=None,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
)
```
To implement this we would need to explicitly forward `kwargs` (only a bit of copy paste), with the advantage that all keyword arguments will show up in our docs. Should we change it?
| 2019-04-08T11:34:51 |
|
larq/larq | 63 | larq__larq-63 | [
"31"
] | 29f7df5eda00a7fafc03445690bb6d02a62c013f | diff --git a/larq/__init__.py b/larq/__init__.py
--- a/larq/__init__.py
+++ b/larq/__init__.py
@@ -2,6 +2,7 @@
import larq.activations as activations
import larq.callbacks as callbacks
import larq.constraints as constraints
+import larq.models as models
import larq.quantizers as quantizers
import larq.optimizers as optimizers
@@ -10,6 +11,7 @@
"activations",
"callbacks",
"constraints",
+ "models",
"quantizers",
"optimizers",
]
diff --git a/larq/models.py b/larq/models.py
new file mode 100644
--- /dev/null
+++ b/larq/models.py
@@ -0,0 +1,93 @@
+import numpy as np
+from tabulate import tabulate
+
+
+def _count_params(weights):
+ """Count the total number of scalars composing the weights.
+
+ # Arguments
+ weights: An iterable containing the weights on which to compute params
+
+ # Returns
+ The total number of scalars composing the weights
+ """
+ return int(sum(np.prod(w.shape.as_list()) for w in weights))
+
+
+def _get_output_shape(layer):
+ try:
+ return tuple(dim if dim else -1 for dim in layer.output_shape)
+ except AttributeError:
+ return "multiple"
+ except RuntimeError: # output_shape unknown in Eager mode.
+ return "?"
+
+
+def _count_binarized_weights(layer):
+ if hasattr(layer, "quantized_latent_weights"):
+ return _count_params(layer.quantized_latent_weights)
+ return 0
+
+
+def _count_fp_weights(layer):
+ if hasattr(layer, "quantized_latent_weights"):
+ return int(
+ sum(
+ np.prod(w.shape.as_list())
+ for w in layer.weights
+ if w not in layer.quantized_latent_weights
+ )
+ )
+ return layer.count_params()
+
+
+def summary(model, tablefmt="simple", print_fn=None):
+ """Prints a string summary of the network.
+
+ # Arguments
+ model: `tf.keras` model instance.
+ tablefmt: Supported table formats are: `fancy_grid`, `github`, `grid`, `html`,
+ `jira`, `latex`, `latex_booktabs`, `latex_raw`, `mediawiki`, `moinmoin`,
+ `orgtbl`, `pipe`, `plain`, `presto`, `psql`, `rst`, `simple`, `textile`,
+ `tsv`, `youtrac`.
+ print_fn: Print function to use. Defaults to `print`. You can set it to a custom
+ function in order to capture the string summary.
+
+ # Raises
+ ValueError: if called before the model is built.
+ """
+
+ if not model.built:
+ raise ValueError(
+ "This model has not yet been built. Build the model first by calling "
+ "`model.build()` or calling `model.fit()` with some data, or specify an "
+ "`input_shape` argument in the first layer(s) for automatic build."
+ )
+
+ header = ("Layer", "Outputs", "# 1-bit", "# 32-bit")
+ table = [
+ [
+ layer.name,
+ _get_output_shape(layer),
+ _count_binarized_weights(layer),
+ _count_fp_weights(layer),
+ ]
+ for layer in model.layers
+ ]
+ table.append(["Total", None, sum(r[2] for r in table), sum(r[3] for r in table)])
+
+ model._check_trainable_weights_consistency()
+ if hasattr(model, "_collected_trainable_weights"):
+ trainable_count = _count_params(model._collected_trainable_weights)
+ else:
+ trainable_count = _count_params(model.trainable_weights)
+ non_trainable_count = _count_params(model.non_trainable_weights)
+
+ if print_fn is None:
+ print_fn = print
+
+ print_fn(tabulate(table, headers=header, tablefmt=tablefmt))
+ print_fn()
+ print_fn(f"Total params: {trainable_count + non_trainable_count}")
+ print_fn(f"Trainable params: {trainable_count}")
+ print_fn(f"Non-trainable params: {non_trainable_count}")
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -17,7 +17,7 @@ def readme():
url="https://github.com/plumerai/larq",
packages=find_packages(),
license="Apache 2.0",
- install_requires=["numpy >= 1.15.4, < 2.0"],
+ install_requires=["numpy >= 1.15.4, < 2.0", "tabulate >= 0.8.3"],
extras_require={
"tensorflow": ["tensorflow>=1.13.1"],
"tensorflow_gpu": ["tensorflow-gpu>=1.13.1"],
| diff --git a/larq/models_test.py b/larq/models_test.py
new file mode 100644
--- /dev/null
+++ b/larq/models_test.py
@@ -0,0 +1,21 @@
+import pytest
+import tensorflow as tf
+import larq as lq
+
+
+def test_summary():
+ model = tf.keras.models.Sequential(
+ [
+ lq.layers.QuantConv2D(
+ 32, (3, 3), kernel_quantizer="ste_sign", input_shape=(28, 28, 1)
+ ),
+ tf.keras.layers.MaxPooling2D((2, 2)),
+ tf.keras.layers.BatchNormalization(),
+ ]
+ )
+ lq.models.summary(model)
+
+
+def test_summary_invalid_model():
+ with pytest.raises(ValueError):
+ lq.models.summary(tf.keras.Model())
| HW specific profiling
Ideally, this would include precision-aware memory footprints and details on parallel branches/shortcut connections.
| 2019-04-10T14:51:18 |
|
larq/larq | 80 | larq__larq-80 | [
"69"
] | 93004ce3b1541dc1d3304e1e755b1fdac8950732 | diff --git a/larq/quantizers.py b/larq/quantizers.py
--- a/larq/quantizers.py
+++ b/larq/quantizers.py
@@ -64,7 +64,7 @@ def ste_sign(x):
@utils.register_keras_custom_object
def magnitude_aware_sign(x):
r"""
- Magnitude-aware sign for birealnet.
+ Magnitude-aware sign for Bi-Real Net.
# Arguments
| Add docs on how to define your own quantizer
| 2019-04-23T14:37:08 |
||
larq/larq | 93 | larq__larq-93 | [
"88"
] | 69c4033040643c0419154d268fc2c86b99a51c00 | diff --git a/generate_api_docs.py b/generate_api_docs.py
--- a/generate_api_docs.py
+++ b/generate_api_docs.py
@@ -1,5 +1,6 @@
"""https://github.com/NiklasRosenstein/pydoc-markdown/blob/master/pydocmd/__main__.py"""
+import inspect
import os
import sys
import yaml
@@ -10,6 +11,23 @@
from pydocmd.preprocessor import Preprocessor
+def callable_to_source_link(obj, scope):
+ path = scope.__file__.lstrip(".")
+ source = inspect.getsourcelines(obj)
+ line = source[-1] + 1 if source[0][0].startswith("@") else source[-1]
+ link = f"https://github.com/plumerai/larq/blob/master{path}#L{line}"
+ return f'<a class="headerlink code-link" style="float:right;" href="{link}" title="Source Code"></a>'
+
+
+class PythonLoaderWithSource(PythonLoader):
+ def load_section(self, section):
+ super().load_section(section)
+ obj = section.loader_context["obj"]
+ if callable(obj):
+ scope = section.loader_context["scope"]
+ section.title += callable_to_source_link(obj, scope)
+
+
with open("apidocs.yml", "r") as stream:
api_structure = yaml.safe_load(stream)
@@ -54,7 +72,7 @@ def create_sections(name, level):
doc = index.new_document(fname)
add_sections(doc, object_names)
-loader = PythonLoader({})
+loader = PythonLoaderWithSource({})
preproc = Preprocessor({})
preproc.link_lookup = {}
| Docs: Add links to source code
This is really handy if people want to understand what's going on behind the scenes or want to implement more advanced stuff
| 2019-05-10T14:03:42 |
||
larq/larq | 97 | larq__larq-97 | [
"87"
] | 6865472adddcb07827956bdef39b4e7e41ec5f56 | diff --git a/larq/callbacks.py b/larq/callbacks.py
--- a/larq/callbacks.py
+++ b/larq/callbacks.py
@@ -11,10 +11,7 @@ class QuantizationLogger(tf.keras.callbacks.Callback):
!!! example
```python
- callbacks = [
- QuantizationLogger(update_freq=100),
- tf.keras.callbacks.TensorBoard(update_freq=100),
- ]
+ callbacks = [QuantizationLogger(), tf.keras.callbacks.TensorBoard()]
model.fit(X_train, Y_train, callbacks=callbacks)
```
@@ -23,19 +20,18 @@ class QuantizationLogger(tf.keras.callbacks.Callback):
changed during the weight update.
# Arguments
- update_freq: `'batch'` or integer. When using `'batch'`, computes the metrics after
- each batch. If using an integer the callback will compute the metrics every
- `update_freq` batches. Note that computing too frequently can slow down training.
+ update_freq: `'batch'` or `'epoch'` or integer. When using `'batch'`, computes the
+ metrics after each batch. The same applies for `'epoch'`. If using an integer
+ the callback will compute the metrics every `update_freq` batches.
+ Note that computing too frequently can slow down training.
"""
- def __init__(self, update_freq="batch"):
- self.previous_weights = {}
+ def __init__(self, update_freq="epoch"):
+ self.batch_previous_weights = {}
+ self.epoch_previous_weights = {}
self.update_freq = update_freq if update_freq != "batch" else 1
- def on_batch_end(self, batch, logs=None):
- should_log = batch > 0 and (batch + 1) % self.update_freq == 0
- should_store = (batch + 2) % self.update_freq == 0
-
+ def _maybe_log_and_store(self, storage, logs, should_log=True, should_store=True):
if should_log or should_store:
ops = []
op_names = []
@@ -46,14 +42,29 @@ def on_batch_end(self, batch, logs=None):
op_names.append(layer.name if i == 0 else f"{layer.name}_{i}")
for key, value in zip(op_names, tf.keras.backend.batch_get_value(ops)):
+ value = value.astype(np.int8)
if should_log:
logs[f"changed_quantization_ration/{key.replace(':', '_')}"] = 1 - (
- np.count_nonzero(value == self.previous_weights[key])
- / value.size
+ np.count_nonzero(value == storage[key]) / value.size
)
if should_store:
- self.previous_weights[key] = value
+ storage[key] = value
if should_log and not should_store:
# We don't need it in the next batch anymore
- self.previous_weights = {}
+ storage = {}
+
+ def on_batch_end(self, batch, logs=None):
+ if self.update_freq != "epoch":
+ self._maybe_log_and_store(
+ self.batch_previous_weights,
+ logs,
+ should_log=batch > 0 and (batch + 1) % self.update_freq == 0,
+ should_store=(batch + 2) % self.update_freq == 0,
+ )
+
+ def on_train_begin(self, logs=None):
+ self._maybe_log_and_store(self.epoch_previous_weights, logs, should_log=False)
+
+ def on_epoch_end(self, epoch, logs=None):
+ self._maybe_log_and_store(self.epoch_previous_weights, logs)
| diff --git a/larq/callbacks_test.py b/larq/callbacks_test.py
--- a/larq/callbacks_test.py
+++ b/larq/callbacks_test.py
@@ -6,11 +6,18 @@
class LogHistory(tf.keras.callbacks.Callback):
def on_train_begin(self, logs={}):
- self.logs = []
+ self.batches = []
+ self.epochs = []
- def on_batch_end(self, batch, logs={}):
+ def _store_logs(self, storage, batch_or_epoch, logs={}):
if [key for key in logs if "changed_quantization_ration" in key]:
- self.logs.append(batch)
+ storage.append(batch_or_epoch)
+
+ def on_batch_end(self, batch, logs={}):
+ self._store_logs(self.batches, batch, logs)
+
+ def on_epoch_end(self, epoch, logs={}):
+ self._store_logs(self.epochs, epoch, logs)
@keras_parameterized.run_all_keras_modes
@@ -34,7 +41,8 @@ def test_quantization_logger(self):
logger = lq.callbacks.QuantizationLogger(update_freq=5)
history = LogHistory()
- x = np.ones((25, 3, 3))
- y = np.zeros((25, 2))
- model.fit(x, y, batch_size=1, epochs=1, callbacks=[logger, history])
- assert history.logs == [4, 9, 14, 19, 24]
+ x = np.ones((20, 3, 3))
+ y = np.zeros((20, 2))
+ model.fit(x, y, batch_size=1, epochs=3, callbacks=[logger, history])
+ assert history.batches == [4, 9, 14, 19] * 3
+ assert history.epochs == [0, 1, 2]
| Support epoch level logging for QuantizedLogger
Currently this only work on a per batch bases and not with tensorboard using `update_freq="epoch"`
| 2019-05-31T17:06:02 |
|
larq/larq | 111 | larq__larq-111 | [
"104"
] | f713c6ba741dc1c0267a3c0c4a0cc5c964ff4dda | diff --git a/larq/callbacks.py b/larq/callbacks.py
--- a/larq/callbacks.py
+++ b/larq/callbacks.py
@@ -74,3 +74,45 @@ def on_train_begin(self, logs=None):
def on_epoch_end(self, epoch, logs=None):
self._maybe_log_and_store(self.epoch_previous_weights, logs)
+
+
+class HyperparameterScheduler(tf.keras.callbacks.Callback):
+ """Generic hyperparameter scheduler.
+
+ # Arguments:
+ schedule: a function that takes an epoch index as input
+ (integer, indexed from 0) and returns a new hyperparameter as output.
+ hyperparameter: str. the name of the hyperparameter to be scheduled.
+ verbose: int. 0: quiet, 1: update messages.
+ """
+
+ def __init__(self, schedule, hyperparameter, verbose=0):
+ super(HyperparameterScheduler, self).__init__()
+ self.schedule = schedule
+ self.hyperparameter = hyperparameter
+ self.verbose = verbose
+
+ def on_epoch_begin(self, epoch, logs=None):
+ if not hasattr(self.model.optimizer, self.hyperparameter):
+ raise ValueError(
+ f'Optimizer must have a "{self.hyperparameter}" attribute.'
+ )
+
+ hp = getattr(self.model.optimizer, self.hyperparameter)
+ try: # new API
+ hyperparameter_val = tf.keras.backend.get_value(hp)
+ hyperparameter_val = self.schedule(epoch, hyperparameter_val)
+ except TypeError: # Support for old API for backward compatibility
+ hyperparameter_val = self.schedule(epoch)
+
+ tf.keras.backend.set_value(hp, hyperparameter_val)
+
+ if self.verbose > 0:
+ print(
+ f"Epoch {epoch + 1}: {self.hyperparameter} changning to {tf.keras.backend.get_value(hp)}."
+ )
+
+ def on_epoch_end(self, epoch, logs=None):
+ logs = logs or {}
+ hp = getattr(self.model.optimizer, self.hyperparameter)
+ logs[self.hyperparameter] = tf.keras.backend.get_value(hp)
diff --git a/larq/optimizers_v1.py b/larq/optimizers_v1.py
--- a/larq/optimizers_v1.py
+++ b/larq/optimizers_v1.py
@@ -137,8 +137,8 @@ def __init__(self, fp_optimizer, threshold=1e-5, gamma=1e-2, name="Bop", **kwarg
with tf.keras.backend.name_scope(self.__class__.__name__):
self.fp_optimizer = fp_optimizer
- self.threshold = threshold
- self.gamma = gamma
+ self.threshold = tf.keras.backend.variable(threshold, name="threshold")
+ self.gamma = tf.keras.backend.variable(gamma, name="gamma")
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
@@ -181,8 +181,8 @@ def __getattr__(self, name):
def get_config(self):
fp_optimizer_config = self.fp_optimizer.get_config()
config = {
- "threshold": self.threshold,
- "gamma": self.gamma,
+ "threshold": float(tf.keras.backend.get_value(self.threshold)),
+ "gamma": float(tf.keras.backend.get_value(self.gamma)),
"fp_optimizer": {
"class_name": self.fp_optimizer.__class__.__name__,
"config": fp_optimizer_config,
| diff --git a/larq/callbacks_test.py b/larq/callbacks_test.py
--- a/larq/callbacks_test.py
+++ b/larq/callbacks_test.py
@@ -1,6 +1,12 @@
import numpy as np
import tensorflow as tf
import larq as lq
+
+from larq import testing_utils as lq_testing_utils
+from larq.callbacks import HyperparameterScheduler
+
+from tensorflow import keras
+from tensorflow.python.keras import testing_utils
from tensorflow.python.keras import keras_parameterized
@@ -46,3 +52,64 @@ def test_quantization_logger(self):
model.fit(x, y, batch_size=1, epochs=3, callbacks=[logger, history])
assert history.batches == [4, 9, 14, 19] * 3
assert history.epochs == [0, 1, 2]
+
+
+class TestHyperparameterScheduler:
+ def test_hyper_parameter_scheduler(self):
+ np.random.seed(1337)
+ (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
+ train_samples=1000, test_samples=0, input_shape=(10,), num_classes=2
+ )
+
+ y_train = keras.utils.to_categorical(y_train)
+
+ model = lq_testing_utils.get_small_bnn_model(
+ x_train.shape[1], 20, y_train.shape[1]
+ )
+ bop_optimizer = lq.optimizers.Bop(fp_optimizer=tf.keras.optimizers.Adam(0.01))
+ model.compile(
+ loss="categorical_crossentropy",
+ optimizer=bop_optimizer,
+ metrics=["accuracy"],
+ )
+
+ def scheduler(x):
+ return 1.0 / (1.0 + x)
+
+ cbk_gamma_scheduler = HyperparameterScheduler(
+ scheduler, hyperparameter="gamma", verbose=1
+ )
+ cbk_threshold_scheduler = HyperparameterScheduler(
+ scheduler, hyperparameter="threshold", verbose=1
+ )
+ cbk_lr_scheduler = HyperparameterScheduler(
+ scheduler, hyperparameter="lr", verbose=1
+ )
+
+ num_epochs = 10
+ model.fit(
+ x_train,
+ y_train,
+ epochs=num_epochs,
+ batch_size=16,
+ callbacks=[cbk_gamma_scheduler, cbk_lr_scheduler, cbk_threshold_scheduler],
+ verbose=0,
+ )
+
+ np.testing.assert_almost_equal(
+ keras.backend.get_value(model.optimizer.gamma),
+ scheduler(num_epochs - 1),
+ decimal=8,
+ )
+
+ np.testing.assert_almost_equal(
+ keras.backend.get_value(model.optimizer.threshold),
+ scheduler(num_epochs - 1),
+ decimal=8,
+ )
+
+ np.testing.assert_almost_equal(
+ keras.backend.get_value(model.optimizer.lr),
+ scheduler(num_epochs - 1),
+ decimal=8,
+ )
| Generic hyperparameter scheduler
The Idea is to have a TF scheduler which is optimizer/hyperparameter agnostic.
As an example, it can be used for gamma, threshold or learning rate of the floating point optimizer in Bop.
| 2019-06-25T21:32:28 |
|
larq/larq | 146 | larq__larq-146 | [
"92"
] | 81e84988f85de937d6e4cd9d251481f426d67907 | diff --git a/larq/quantizers.py b/larq/quantizers.py
--- a/larq/quantizers.py
+++ b/larq/quantizers.py
@@ -1,5 +1,39 @@
"""A Quantizer defines the way of transforming a full precision input to a
-quantized output and the pseudo-gradient method used for the backwards pass."""
+quantized output and the pseudo-gradient method used for the backwards pass.
+
+Quantizers can either be used through quantizer arguments that are supported
+for Larq layers, such as `input_quantizer` and `kernel_quantizer`; or they
+can be used similar to activations, i.e. either through an `Activation` layer,
+or through the `activation` argument supported by all forward layer:
+
+```python
+import tensorflow as tf
+import larq as lq
+...
+x = lq.layers.QuantDense(64, activation=None)(x)
+x = lq.layers.QuantDense(64, input_quantizer="ste_sign")(x)
+```
+
+is equivalent to:
+
+```python
+x = lq.layers.QuantDense(64)(x)
+x = tf.keras.layers.Activation("ste_sign")(x)
+x = lq.layers.QuantDense(64)(x)
+```
+
+as well as:
+
+```python
+x = lq.layers.QuantDense(64, activation="ste_sign")(x)
+x = lq.layers.QuantDense(64)(x)
+```
+
+We highly recommend using the first of these formulations: for the
+other two formulations, intermediate layers - like batch normalization or
+average pooling - and shortcut connections may result in non-binary input
+to the convolutions.
+"""
import tensorflow as tf
from larq import utils, math
| Document how to use a quantizer as an activation function
`tf.keras.layers.Activation("ste_sign")`
| 2019-07-15T21:03:11 |
||
larq/larq | 180 | larq__larq-180 | [
"179"
] | ed698187e8e4254afef5750403b3be946d98b8f3 | diff --git a/larq/models.py b/larq/models.py
--- a/larq/models.py
+++ b/larq/models.py
@@ -68,9 +68,10 @@ def _get_output_shape(layer):
class WeightProfile:
- def __init__(self, weight, bitwidth=32):
+ def __init__(self, weight, bitwidth=32, trainable=True):
self._weight = weight
self.bitwidth = bitwidth
+ self.trainable = trainable
@property
def count(self):
@@ -84,10 +85,6 @@ def memory(self):
def fp_equivalent_memory(self):
return 32 * self.count
- @property
- def trainable(self):
- return self._weight.trainable
-
def is_bias(self):
return "bias" in self._weight.name
@@ -103,7 +100,11 @@ class LayerProfile:
def __init__(self, layer):
self._layer = layer
self.weight_profiles = [
- WeightProfile(weight, self._get_bitwidth(weight))
+ WeightProfile(
+ weight,
+ self._get_bitwidth(weight),
+ trainable=weight in layer.trainable_weights,
+ )
for weight in layer.weights
]
| diff --git a/larq/models_test.py b/larq/models_test.py
--- a/larq/models_test.py
+++ b/larq/models_test.py
@@ -33,7 +33,7 @@ def get_profile_model():
padding="same",
),
tf.keras.layers.Flatten(),
- tf.keras.layers.Dense(10),
+ tf.keras.layers.Dense(10, trainable=False),
]
)
diff --git a/larq/snapshots/snap_models_test.py b/larq/snapshots/snap_models_test.py
--- a/larq/snapshots/snap_models_test.py
+++ b/larq/snapshots/snap_models_test.py
@@ -7,10 +7,7 @@
snapshots = Snapshot()
-snapshots[
- "test_summary 1"
-] = """\
-+sequential stats----------------------------------------------------------------------------------------------------------------+
+snapshots['test_summary 1'] = '''+sequential stats----------------------------------------------------------------------------------------------------------------+
| Layer Input prec. Outputs # 1-bit # 2-bit # 32-bit Memory 1-bit MACs 2-bit MACs 32-bit MACs |
| (bit) x 1 x 1 x 1 (kB) (kB) (kB) (kB) |
+--------------------------------------------------------------------------------------------------------------------------------+
@@ -25,8 +22,8 @@
+--------------------------------------------------------------------------------------------------------------------------------+
+sequential summary--------------------------+
| Total params 40682 |
-| Trainable params 40682 |
-| Non-trainable params 0 |
+| Trainable params 1952 |
+| Non-trainable params 38730 |
| Model size: 0.15 MB |
| Float-32 Equivalent 0.16 MB |
| Compression Ratio of Memory 0.96 |
@@ -34,4 +31,4 @@
| Ratio of MACs that are binarized 0.1124 |
| Ratio of MACs that are ternarized 0.0247 |
+--------------------------------------------+
-"""
+'''
| model summery trainable params
If you set a keras.layers.Conv2D(...trainable=False), the lq.models.summery(model) report a wrong non-trainable number(and claim the non-trainable params trainable.)
Using model.summery can get the right answer.
I should be right, but please check it to confirm it again by yourself.
| That's interesting, it looks like TensorFlow marks the weights as trainable even if they are listed as non trainable variables:
```python-console
In [19]: model = keras.Sequential([keras.layers.Conv2D(16, 3, input_shape=(32, 32, 3), trainable=False)])
In [20]: model.layers[0].non_trainable_weights[0].trainable
Out[20]: True
```
See also here: https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/keras/layers/core.py#L1014-L1030
I'll take a look to see if there is a quick fix. | 2019-08-13T09:33:27 |
larq/larq | 191 | larq__larq-191 | [
"105"
] | e3b19ac6fbd5ea34018f32a32794df7df4e760ed | diff --git a/larq/optimizers_v1.py b/larq/optimizers_v1.py
--- a/larq/optimizers_v1.py
+++ b/larq/optimizers_v1.py
@@ -108,12 +108,25 @@ def from_config(cls, config, custom_objects=None):
class Bop(tf.keras.optimizers.Optimizer):
"""Binary optimizer (Bop).
- Bop is a latent-free optimizer for Binarized Neural Networks (BNNs).
+ Bop is a latent-free optimizer for Binarized Neural Networks (BNNs) and
+ Binary Weight Networks (BWN).
+
+ Bop maintains an exponential moving average of the gradients controlled by
+ `gamma`. If this average exceeds the `threshold`, a weight is flipped.
+ Additionally, Bop accepts a regular optimizer that is applied to the
+ non-binary weights in the network.
+
+ The hyperparameter `gamma` is somewhat analogues to the learning rate in
+ SGD methods: a high `gamma` results in rapid convergence but also makes
+ training more noisy.
+
+ Note that the default `threshold` is not optimal for all situations.
+ Setting the threshold too high results in little learning, while setting it
+ too low results in overly noisy behaviour.
!!! example
```python
optimizer = lq.optimizers.Bop(fp_optimizer=tf.keras.optimizers.Adam(0.01))
-
```
# Arguments
@@ -126,7 +139,7 @@ class Bop(tf.keras.optimizers.Optimizer):
- [Latent Weights Do Not Exist: Rethinking Binarized Neural Network Optimization](https://arxiv.org/abs/1906.02107)
"""
- def __init__(self, fp_optimizer, threshold=1e-5, gamma=1e-2, name="Bop", **kwargs):
+ def __init__(self, fp_optimizer, threshold=1e-7, gamma=1e-2, name="Bop", **kwargs):
super().__init__(**kwargs)
if not isinstance(fp_optimizer, tf.keras.optimizers.Optimizer):
diff --git a/larq/optimizers_v2.py b/larq/optimizers_v2.py
--- a/larq/optimizers_v2.py
+++ b/larq/optimizers_v2.py
@@ -9,7 +9,21 @@
class Bop(tf.keras.optimizers.Optimizer):
"""Binary optimizer (Bop).
- Bop is a latent-free optimizer for Binarized Neural Networks (BNNs).
+ Bop is a latent-free optimizer for Binarized Neural Networks (BNNs) and
+ Binary Weight Networks (BWN).
+
+ Bop maintains an exponential moving average of the gradients controlled by
+ `gamma`. If this average exceeds the `threshold`, a weight is flipped.
+ Additionally, Bop accepts a regular optimizer that is applied to the
+ non-binary weights in the network.
+
+ The hyperparameter `gamma` is somewhat analogues to the learning rate in
+ SGD methods: a high `gamma` results in rapid convergence but also makes
+ training more noisy.
+
+ Note that the default `threshold` is not optimal for all situations.
+ Setting the threshold too high results in little learning, while setting it
+ too low results in overly noisy behaviour.
!!! example
```python
@@ -26,7 +40,7 @@ class Bop(tf.keras.optimizers.Optimizer):
- [Latent Weights Do Not Exist: Rethinking Binarized Neural Network Optimization](https://arxiv.org/abs/1906.02107)
"""
- def __init__(self, fp_optimizer, threshold=1e-5, gamma=1e-2, name="Bop", **kwargs):
+ def __init__(self, fp_optimizer, threshold=1e-7, gamma=1e-2, name="Bop", **kwargs):
super().__init__(name=name, **kwargs)
if not isinstance(fp_optimizer, tf.keras.optimizers.Optimizer):
| Adding binary optimizer (Bop) documentation
- [ ] enhancing Bop documentation in code
- [ ] adding Bop documentation/tutorial to [Larq official documentation page](https://plumerai.github.io/larq/)
| is it ok to pick this issue up ?
Hi @goodship1 - that would be great! Let us know if you need any help. | 2019-08-30T09:27:42 |
|
larq/larq | 193 | larq__larq-193 | [
"192"
] | 678369e7aed4483a2c288922893ed01f8dbda267 | diff --git a/larq/models.py b/larq/models.py
--- a/larq/models.py
+++ b/larq/models.py
@@ -48,6 +48,34 @@ def _bitsize_as_str(bitsize):
raise NotImplementedError()
+def _number_as_readable_str(num):
+ # The initial rounding here is necessary so that e.g. `999000` gets
+ # formatted as `1.000 M` rather than `1000 k`
+ num = float(f"{num:.3g}")
+
+ # For numbers less than 1000, output them directly, stripping any trailing
+ # zeros and decimal places.
+ if num < 1000:
+ return str(num).rstrip("0").rstrip(".")
+
+ # For numbers that are at least 1000 trillion (1 quadrillion) format with
+ # scientific notation (3 s.f. = 2 d.p. in scientific notation).
+ if num >= 1e15:
+ return f"{num:#.2E}"
+
+ # Count the magnitude.
+ magnitude = 0
+ while abs(num) >= 1000 and magnitude < 4:
+ magnitude += 1
+ num /= 1000.0
+
+ # ':#.3g' formats the number with 3 significant figures, without stripping
+ # trailing zeros.
+ num = f"{num:#.3g}".rstrip(".")
+ unit = ["", " k", " M", " B", " T"][magnitude]
+ return num + unit
+
+
def _format_table_entry(x, units=1):
try:
assert not np.isnan(x)
@@ -295,9 +323,15 @@ def generate_table(self, include_macs=True):
def generate_summary(self, include_macs=True):
summary = [
- ["Total params", self.weight_count()],
- ["Trainable params", self.weight_count(trainable=True)],
- ["Non-trainable params", self.weight_count(trainable=False)],
+ ["Total params", _number_as_readable_str(self.weight_count())],
+ [
+ "Trainable params",
+ _number_as_readable_str(self.weight_count(trainable=True)),
+ ],
+ [
+ "Non-trainable params",
+ _number_as_readable_str(self.weight_count(trainable=False)),
+ ],
["Model size:", f"{self.memory / (8*1024*1024):.2f} MB"],
[
"Float-32 Equivalent",
@@ -309,7 +343,12 @@ def generate_summary(self, include_macs=True):
if include_macs:
binarization_ratio = self.op_count("mac", 1) / self.op_count(op_type="mac")
ternarization_ratio = self.op_count("mac", 2) / self.op_count(op_type="mac")
- summary.append(["Number of MACs", self.op_count(op_type="mac")])
+ summary.append(
+ [
+ "Number of MACs",
+ _number_as_readable_str(self.op_count(op_type="mac")),
+ ]
+ )
if binarization_ratio > 0:
summary.append(
["Ratio of MACs that are binarized", f"{binarization_ratio:.4f}"]
| diff --git a/larq/snapshots/snap_models_test.py b/larq/snapshots/snap_models_test.py
--- a/larq/snapshots/snap_models_test.py
+++ b/larq/snapshots/snap_models_test.py
@@ -21,13 +21,13 @@
| Total 1600 288 38794 151.80 19.38 4.25 148.73 |
+--------------------------------------------------------------------------------------------------------------------------------+
+sequential summary--------------------------+
-| Total params 40682 |
-| Trainable params 1952 |
-| Non-trainable params 38730 |
+| Total params 40.7 k |
+| Trainable params 1.95 k |
+| Non-trainable params 38.7 k |
| Model size: 0.15 MB |
| Float-32 Equivalent 0.16 MB |
| Compression Ratio of Memory 0.96 |
-| Number of MACs 1411968 |
+| Number of MACs 1.41 M |
| Ratio of MACs that are binarized 0.1124 |
| Ratio of MACs that are ternarized 0.0247 |
+--------------------------------------------+
| Human readable large numbers in `models.summary()`
At the moment the summary table generated by `models.summary()` has lines such as: `Number of MACs: 13228896000` where the raw integer is printed. It would be a lot more readable if this number were instead printed as `13.229 B` or `13.229 Billion`, or at least if there was a thousands separator (space or comma).
| 2019-08-30T12:04:48 |
|
larq/larq | 209 | larq__larq-209 | [
"208"
] | 6222ac18ffcadd8183686f5e90bf7bea505ee4a9 | diff --git a/larq/models.py b/larq/models.py
--- a/larq/models.py
+++ b/larq/models.py
@@ -282,10 +282,7 @@ def _generate_table_header(self, table_config):
for i in table_config["param_bidtwidths"]
),
f"Memory\n({_bitsize_as_str(table_config['memory_units'])})",
- *(
- f"{i}-bit MACs\n({_bitsize_as_str(table_config['mac_units'])})"
- for i in table_config["mac_precisions"]
- ),
+ *(f"{i}-bit MACs" for i in table_config["mac_precisions"]),
]
def _generate_table_total(self, table_config):
@@ -307,7 +304,7 @@ def generate_table(self, include_macs=True):
"mac_precisions": self.unique_op_precisions if include_macs else [],
"param_units": 1,
"memory_units": 8 * 1024,
- "mac_units": 8 * 1024,
+ "mac_units": 1,
}
table = []
| diff --git a/larq/snapshots/snap_models_test.py b/larq/snapshots/snap_models_test.py
--- a/larq/snapshots/snap_models_test.py
+++ b/larq/snapshots/snap_models_test.py
@@ -9,16 +9,16 @@
snapshots['test_summary 1'] = '''+sequential stats----------------------------------------------------------------------------------------------------------------+
| Layer Input prec. Outputs # 1-bit # 2-bit # 32-bit Memory 1-bit MACs 2-bit MACs 32-bit MACs |
-| (bit) x 1 x 1 x 1 (kB) (kB) (kB) (kB) |
+| (bit) x 1 x 1 x 1 (kB) |
+--------------------------------------------------------------------------------------------------------------------------------+
-| quant_conv2d - (-1, 64, 64, 32) 288 0 32 0.16 0 0 144.00 |
+| quant_conv2d - (-1, 64, 64, 32) 288 0 32 0.16 0 0 1179648 |
| max_pooling2d - (-1, 32, 32, 32) 0 0 0 0 0 0 0 |
-| quant_depthwise_conv2d 2 (-1, 11, 11, 32) 0 288 0 0.07 0 4.25 0 |
-| quant_separable_conv2d 1 (-1, 11, 11, 32) 1312 0 32 0.29 19.38 0 0 |
+| quant_depthwise_conv2d 2 (-1, 11, 11, 32) 0 288 0 0.07 0 34848 0 |
+| quant_separable_conv2d 1 (-1, 11, 11, 32) 1312 0 32 0.29 158752 0 0 |
| flatten - (-1, 3872) 0 0 0 0 0 0 0 |
-| dense - (-1, 10) 0 0 38730 151.29 0 0 4.73 |
+| dense - (-1, 10) 0 0 38730 151.29 0 0 38720 |
+--------------------------------------------------------------------------------------------------------------------------------+
-| Total 1600 288 38794 151.80 19.38 4.25 148.73 |
+| Total 1600 288 38794 151.80 158752 34848 1218368 |
+--------------------------------------------------------------------------------------------------------------------------------+
+sequential summary--------------------------+
| Total params 40.7 k |
| MAC output strings are wrong
In the table, but not in the summary, we are incorrectly dividing by `8 * 1024` and displaying a count in 'kB'.
| 2019-09-06T09:55:04 |
|
larq/larq | 210 | larq__larq-210 | [
"195"
] | 8f4dc63a664ca4e2f4bb20ae16293f7b03c6eaf5 | diff --git a/larq/layers_base.py b/larq/layers_base.py
--- a/larq/layers_base.py
+++ b/larq/layers_base.py
@@ -48,8 +48,11 @@ def build(self, input_shape):
def non_trainable_weights(self):
weights = super().non_trainable_weights
if hasattr(self, "flip_ratio"):
- metrics_weights = self.flip_ratio.weights
- return [weight for weight in weights if weight not in metrics_weights]
+ return [
+ weight
+ for weight in weights
+ if not any(weight is metric_w for metric_w in self.flip_ratio.weights)
+ ]
return weights
def call(self, inputs):
@@ -115,8 +118,11 @@ def build(self, input_shape):
def non_trainable_weights(self):
weights = super().non_trainable_weights
if hasattr(self, "flip_ratio"):
- metrics_weights = self.flip_ratio.weights
- return [weight for weight in weights if weight not in metrics_weights]
+ return [
+ weight
+ for weight in weights
+ if not any(weight is metric_w for metric_w in self.flip_ratio.weights)
+ ]
return weights
def call(self, inputs):
@@ -206,7 +212,11 @@ def non_trainable_weights(self):
if hasattr(self, "pointwise_flip_ratio"):
metrics_weights.extend(self.pointwise_flip_ratio.weights)
if metrics_weights:
- return [weight for weight in weights if weight not in metrics_weights]
+ return [
+ weight
+ for weight in weights
+ if not any(weight is metric_w for metric_w in metrics_weights)
+ ]
return weights
def call(self, inputs):
diff --git a/larq/metrics.py b/larq/metrics.py
--- a/larq/metrics.py
+++ b/larq/metrics.py
@@ -118,7 +118,7 @@ def result(self):
def reset_states(self):
tf.keras.backend.batch_set_value(
- [(v, 0) for v in self.variables if v != self._previous_values]
+ [(v, 0) for v in self.variables if v is not self._previous_values]
)
def get_config(self):
diff --git a/larq/models.py b/larq/models.py
--- a/larq/models.py
+++ b/larq/models.py
@@ -131,7 +131,7 @@ def __init__(self, layer):
WeightProfile(
weight,
self._get_bitwidth(weight),
- trainable=weight in layer.trainable_weights,
+ trainable=any(weight is w for w in layer.trainable_weights),
)
for weight in layer.weights
]
@@ -228,18 +228,16 @@ def generate_table_row(self, table_config):
return row
- def _quantized_weights(self):
- try:
- return self._layer.quantized_latent_weights
- except:
- return []
-
def _get_bitwidth(self, weight):
try:
- quantizer = self._layer.quantizers[self._quantized_weights().index(weight)]
- return quantizer.precision
+ for quantizer, quantized_weight in zip(
+ self._layer.quantizers, self._layer.quantized_latent_weights
+ ):
+ if quantized_weight is weight:
+ return quantizer.precision
except:
- return 32
+ pass
+ return 32
class ModelProfile(LayerProfile):
| Test against TensorFlow 2.0.0-rc0
| This failure looks like it is related to to our training metrics. Though I am not sure if this is a problem with our code or with TF 2rc0.
I have a workaround and posted an issue on TensorFlow to make sure this behaviour change is intended and not a bug: https://github.com/tensorflow/tensorflow/issues/32210 | 2019-09-06T15:49:54 |
|
larq/larq | 287 | larq__larq-287 | [
"286"
] | c2d3f9f4b7e27499706572d74fdf8aba37f78646 | diff --git a/larq/optimizers_v2.py b/larq/optimizers_v2.py
--- a/larq/optimizers_v2.py
+++ b/larq/optimizers_v2.py
@@ -58,12 +58,16 @@ def _create_slots(self, var_list):
self.add_slot(var, "m")
def apply_gradients(self, grads_and_vars, name=None):
- bin_grads_and_vars = [(g, v) for g, v in grads_and_vars if self.is_binary(v)]
- fp_grads_and_vars = [(g, v) for g, v in grads_and_vars if not self.is_binary(v)]
+ bin_grads_and_vars, fp_grads_and_vars = [], []
+ for grad, var in grads_and_vars:
+ if self.is_binary(var):
+ bin_grads_and_vars.append((grad, var))
+ else:
+ fp_grads_and_vars.append((grad, var))
bin_train_op = super().apply_gradients(bin_grads_and_vars, name=name)
- fp_train_op = self.fp_optimizer.apply_gradients(fp_grads_and_vars, name=name)
+ fp_train_op = self.fp_optimizer.apply_gradients(fp_grads_and_vars, name=name)
return tf.group(bin_train_op, fp_train_op, name="train_with_bop")
def _resource_apply_sparse(self, grad, var, indices):
| diff --git a/larq/optimizers_test.py b/larq/optimizers_test.py
--- a/larq/optimizers_test.py
+++ b/larq/optimizers_test.py
@@ -23,17 +23,30 @@ def assert_weights(weights, expected):
np.testing.assert_allclose(np.squeeze(w), e)
-def _test_optimizer(optimizer, target=0.75, test_kernels_are_binary=True):
+def _test_optimizer(
+ optimizer, target=0.75, test_kernels_are_binary=True, trainable_bn=True
+):
np.random.seed(1337)
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=1000, test_samples=0, input_shape=(10,), num_classes=2
)
y_train = keras.utils.to_categorical(y_train)
- model = lq_testing_utils.get_small_bnn_model(x_train.shape[1], 20, y_train.shape[1])
+ model = lq_testing_utils.get_small_bnn_model(
+ x_train.shape[1], 20, y_train.shape[1], trainable_bn=trainable_bn
+ )
model.compile(loss="categorical_crossentropy", optimizer=optimizer, metrics=["acc"])
+
+ initial_vars = [tf.keras.backend.get_value(w) for w in model.trainable_weights]
+
history = model.fit(x_train, y_train, epochs=2, batch_size=16, verbose=0)
+ trained_vars = [tf.keras.backend.get_value(w) for w in model.trainable_weights]
+
+ # check all trainable variables have actually been updated
+ for v0, v1 in zip(initial_vars, trained_vars):
+ assert not np.all(v0 == v1)
+
# Note that when kernels are treated as latent weights they need not be
# binary (see https://arxiv.org/abs/1906.02107 for further discussion)
if test_kernels_are_binary:
@@ -103,6 +116,13 @@ def test_bop_accuracy(self):
lq.optimizers.Bop(fp_optimizer=tf.keras.optimizers.Adam(0.01)),
test_kernels_are_binary=True,
)
+ # test optimizer on model with only binary trainable vars (low accuracy)
+ _test_optimizer(
+ lq.optimizers.Bop(fp_optimizer=tf.keras.optimizers.Adam(0.01)),
+ test_kernels_are_binary=True,
+ trainable_bn=False,
+ target=0,
+ )
@pytest.mark.skipif(
utils.tf_1_14_or_newer() is False,
diff --git a/larq/testing_utils.py b/larq/testing_utils.py
--- a/larq/testing_utils.py
+++ b/larq/testing_utils.py
@@ -7,7 +7,7 @@
from tensorflow.python.keras.testing_utils import _thread_local_data, should_run_eagerly
-def get_small_bnn_model(input_dim, num_hidden, output_dim):
+def get_small_bnn_model(input_dim, num_hidden, output_dim, trainable_bn=True):
model = tf.keras.models.Sequential()
model.add(
lq.layers.QuantDense(
@@ -19,7 +19,7 @@ def get_small_bnn_model(input_dim, num_hidden, output_dim):
use_bias=False,
)
)
- model.add(tf.keras.layers.BatchNormalization())
+ model.add(tf.keras.layers.BatchNormalization(trainable=trainable_bn))
model.add(
lq.layers.QuantDense(
units=output_dim,
| Bop not working with TF2 on MultiGPU
### Describe the bug
In a multi-gpu environment, using Bop generates the following error:
```
2019-10-11 13:45:47 UTC -- Epoch 1/150
2019-10-11 13:45:50 UTC -- Traceback (most recent call last):
2019-10-11 13:45:50 UTC -- File "/usr/local/bin/nf", line 11, in <module>
2019-10-11 13:45:50 UTC -- load_entry_point('project-final', 'console_scripts', 'nf')()
2019-10-11 13:45:50 UTC -- File "/usr/local/lib/python3.7/dist-packages/click/core.py", line 764, in __call__
2019-10-11 13:45:50 UTC -- return self.main(*args, **kwargs)
2019-10-11 13:45:50 UTC -- File "/usr/local/lib/python3.7/dist-packages/click/core.py", line 717, in main
2019-10-11 13:45:50 UTC -- rv = self.invoke(ctx)
2019-10-11 13:45:50 UTC -- File "/usr/local/lib/python3.7/dist-packages/click/core.py", line 1137, in invoke
2019-10-11 13:45:50 UTC -- return _process_result(sub_ctx.command.invoke(sub_ctx))
2019-10-11 13:45:50 UTC -- File "/usr/local/lib/python3.7/dist-packages/click/core.py", line 956, in invoke
2019-10-11 13:45:50 UTC -- return ctx.invoke(self.callback, **ctx.params)
2019-10-11 13:45:50 UTC -- File "/usr/local/lib/python3.7/dist-packages/click/core.py", line 555, in invoke
2019-10-11 13:45:50 UTC -- return callback(*args, **kwargs)
2019-10-11 13:45:50 UTC -- File "/usr/local/lib/python3.7/dist-packages/zookeeper/cli.py", line 114, in train
2019-10-11 13:45:50 UTC -- function(build_model, dataset, hparams, output_dir, **kwargs)
2019-10-11 13:45:50 UTC -- File "/code/project_final/train.py", line 110, in train
2019-10-11 13:45:50 UTC -- callbacks=callbacks,
2019-10-11 13:45:50 UTC -- File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/keras/engine/training.py", line 728, in fit
2019-10-11 13:45:50 UTC -- use_multiprocessing=use_multiprocessing)
2019-10-11 13:45:50 UTC -- File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/keras/engine/training_v2.py", line 324, in fit
2019-10-11 13:45:50 UTC -- total_epochs=epochs)
2019-10-11 13:45:50 UTC -- File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/keras/engine/training_v2.py", line 123, in run_one_epoch
2019-10-11 13:45:50 UTC -- batch_outs = execution_function(iterator)
2019-10-11 13:45:50 UTC -- File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/keras/engine/training_v2_utils.py", line 86, in execution_function
2019-10-11 13:45:50 UTC -- distributed_function(input_fn))
2019-10-11 13:45:50 UTC -- File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/eager/def_function.py", line 457, in __call__
2019-10-11 13:45:50 UTC -- result = self._call(*args, **kwds)
2019-10-11 13:45:50 UTC -- File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/eager/def_function.py", line 503, in _call
2019-10-11 13:45:50 UTC -- self._initialize(args, kwds, add_initializers_to=initializer_map)
2019-10-11 13:45:50 UTC -- File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/eager/def_function.py", line 408, in _initialize
2019-10-11 13:45:50 UTC -- *args, **kwds))
2019-10-11 13:45:50 UTC -- File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/eager/function.py", line 1848, in _get_concrete_function_internal_garbage_collected
2019-10-11 13:45:50 UTC -- graph_function, _, _ = self._maybe_define_function(args, kwargs)
2019-10-11 13:45:50 UTC -- File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/eager/function.py", line 2150, in _maybe_define_function
2019-10-11 13:45:50 UTC -- graph_function = self._create_graph_function(args, kwargs)
2019-10-11 13:45:50 UTC -- File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/eager/function.py", line 2041, in _create_graph_function
2019-10-11 13:45:50 UTC -- capture_by_value=self._capture_by_value),
2019-10-11 13:45:50 UTC -- File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/framework/func_graph.py", line 915, in func_graph_from_py_func
2019-10-11 13:45:50 UTC -- func_outputs = python_func(*func_args, **func_kwargs)
2019-10-11 13:45:50 UTC -- File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/eager/def_function.py", line 358, in wrapped_fn
2019-10-11 13:45:50 UTC -- return weak_wrapped_fn().__wrapped__(*args, **kwds)
2019-10-11 13:45:50 UTC -- File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/keras/engine/training_v2_utils.py", line 73, in distributed_function
2019-10-11 13:45:50 UTC -- per_replica_function, args=(model, x, y, sample_weights))
2019-10-11 13:45:50 UTC -- File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/distribute/distribute_lib.py", line 760, in experimental_run_v2
2019-10-11 13:45:50 UTC -- return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs)
2019-10-11 13:45:50 UTC -- File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/distribute/distribute_lib.py", line 1787, in call_for_each_replica
2019-10-11 13:45:50 UTC -- return self._call_for_each_replica(fn, args, kwargs)
2019-10-11 13:45:50 UTC -- File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/distribute/mirrored_strategy.py", line 661, in _call_for_each_replica
2019-10-11 13:45:50 UTC -- fn, args, kwargs)
2019-10-11 13:45:50 UTC -- File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/distribute/mirrored_strategy.py", line 196, in _call_for_each_replica
2019-10-11 13:45:50 UTC -- coord.join(threads)
2019-10-11 13:45:50 UTC -- File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/training/coordinator.py", line 389, in join
2019-10-11 13:45:50 UTC -- six.reraise(*self._exc_info_to_raise)
2019-10-11 13:45:50 UTC -- File "/usr/local/lib/python3.7/dist-packages/six.py", line 693, in reraise
2019-10-11 13:45:50 UTC -- raise value
2019-10-11 13:45:50 UTC -- File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/training/coordinator.py", line 297, in stop_on_exception
2019-10-11 13:45:50 UTC -- yield
2019-10-11 13:45:50 UTC -- File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/distribute/mirrored_strategy.py", line 190, in _call_for_each_replica
2019-10-11 13:45:50 UTC -- **merge_kwargs)
2019-10-11 13:45:50 UTC -- File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/keras/optimizer_v2/optimizer_v2.py", line 446, in _distributed_apply
2019-10-11 13:45:50 UTC -- ds_reduce_util.ReduceOp.SUM, grads_and_vars)
2019-10-11 13:45:50 UTC -- File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/distribute/distribute_lib.py", line 1481, in batch_reduce_to
2019-10-11 13:45:50 UTC -- return self._batch_reduce_to(reduce_op, value_destination_pairs)
2019-10-11 13:45:50 UTC -- File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/distribute/mirrored_strategy.py", line 707, in _batch_reduce_to
2019-10-11 13:45:50 UTC -- value_destination_pairs)
2019-10-11 13:45:50 UTC -- File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/distribute/cross_device_ops.py", line 317, in batch_reduce
2019-10-11 13:45:50 UTC -- value_destination_pairs[0][0].values) == 1:
2019-10-11 13:45:50 UTC -- IndexError: list index out of range
```
The code runs fine if I use `tf.keras.optimizers.Adam()` instead of Bop, or if I run it on a single GPU.
### To Reproduce
```
with tf.distribute.MirroredStrategy().scope():
model = build_model()
model.compile(
optimizer=lq.optimizers.Bop(tf.keras.optimizers.Adam()),
loss="categorical_crossentropy",
metrics=["categorical_accuracy", "top_k_categorical_accuracy"],
)
model.fit(train_data)
```
### Environment
TensorFlow version: 2.0.0
Larq version: 0.7.3
| 2019-10-14T14:54:26 |
|
larq/larq | 319 | larq__larq-319 | [
"314"
] | e9222a645ce19ac409576cb25862ac435289a99c | diff --git a/larq/conftest.py b/larq/conftest.py
--- a/larq/conftest.py
+++ b/larq/conftest.py
@@ -1,4 +1,5 @@
import pytest
+import tensorflow as tf
from tensorflow.python.eager import context
@@ -21,3 +22,27 @@ def eager_and_graph_mode(request):
"""pytest fixture for running test in eager and graph mode"""
with getattr(context, f"{request.param}_mode")():
yield request.param
+
+
[email protected](params=["graph", "tf_eager", "tf_keras_eager"])
+def keras_should_run_eagerly(request):
+ """Fixture to run in graph and two eager modes.
+
+ The modes are:
+ - Graph mode
+ - TensorFlow eager and Keras eager
+ - TensorFlow eager and Keras not eager
+
+ The `tf.context` sets graph/eager mode for TensorFlow. The yield is True if Keras
+ should run eagerly.
+ """
+
+ if request.param == "graph":
+ if int(tf.__version__[0]) >= 2:
+ pytest.skip("Skipping graph mode for TensorFlow 2+.")
+
+ with context.graph_mode():
+ yield
+ else:
+ with context.eager_mode():
+ yield request.param == "tf_keras_eager"
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -27,7 +27,6 @@ def readme():
"tensorflow": ["tensorflow>=1.14.0"],
"tensorflow_gpu": ["tensorflow-gpu>=1.14.0"],
"test": [
- "absl-py==0.8.1",
"pytest==5.2.2",
"pytest-cov==2.8.1",
"pytest-xdist==1.30.0",
| diff --git a/larq/layers_test.py b/larq/layers_test.py
--- a/larq/layers_test.py
+++ b/larq/layers_test.py
@@ -1,79 +1,78 @@
import tensorflow as tf
import numpy as np
-from absl.testing import parameterized
import larq as lq
import pytest
import inspect
from larq import testing_utils
-from tensorflow.python.keras import keras_parameterized
-def random_input(shape):
- for i, dim in enumerate(shape):
- if dim is None:
- shape[i] = np.random.randint(1, 4)
- data = 10 * np.random.random(shape) - 0.5
- return data.astype("float32")
-
-
-parameterized_all_layers = parameterized.named_parameters(
- ("QuantDense", lq.layers.QuantDense, tf.keras.layers.Dense, (3, 2), dict(units=3)),
+PARAMS_ALL_LAYERS = [
+ (lq.layers.QuantDense, tf.keras.layers.Dense, (3, 2), dict(units=3)),
(
- "QuantConv1D",
lq.layers.QuantConv1D,
tf.keras.layers.Conv1D,
(2, 3, 7),
dict(filters=2, kernel_size=3),
),
(
- "QuantConv2D",
lq.layers.QuantConv2D,
tf.keras.layers.Conv2D,
(2, 3, 7, 6),
dict(filters=2, kernel_size=3),
),
(
- "QuantConv3D",
lq.layers.QuantConv3D,
tf.keras.layers.Conv3D,
(2, 3, 7, 6, 5),
dict(filters=2, kernel_size=3),
),
(
- "QuantConv2DTranspose",
lq.layers.QuantConv2DTranspose,
tf.keras.layers.Conv2DTranspose,
(2, 3, 7, 6),
dict(filters=2, kernel_size=3),
),
(
- "QuantConv3DTranspose",
lq.layers.QuantConv3DTranspose,
tf.keras.layers.Conv3DTranspose,
(2, 3, 7, 6, 5),
dict(filters=2, kernel_size=3),
),
(
- "QuantLocallyConnected1D",
lq.layers.QuantLocallyConnected1D,
tf.keras.layers.LocallyConnected1D,
(2, 8, 5),
dict(filters=4, kernel_size=3),
),
(
- "QuantLocallyConnected2D",
lq.layers.QuantLocallyConnected2D,
tf.keras.layers.LocallyConnected2D,
(8, 6, 10, 4),
dict(filters=3, kernel_size=3),
),
-)
+]
+
+PARAMS_SEP_LAYERS = [
+ (lq.layers.QuantSeparableConv1D, tf.keras.layers.SeparableConv1D, (2, 3, 7),),
+ (lq.layers.QuantSeparableConv2D, tf.keras.layers.SeparableConv2D, (2, 3, 7, 6),),
+]
-@keras_parameterized.run_all_keras_modes
-class LayersTest(keras_parameterized.TestCase):
- @parameterized_all_layers
- def test_binarization(self, quantized_layer, layer, input_shape, kwargs):
+def random_input(shape):
+ for i, dim in enumerate(shape):
+ if dim is None:
+ shape[i] = np.random.randint(1, 4)
+ data = 10 * np.random.random(shape) - 0.5
+ return data.astype("float32")
+
+
+class TestLayers:
+ @pytest.mark.parametrize(
+ "quantized_layer, layer, input_shape, kwargs", PARAMS_ALL_LAYERS
+ )
+ def test_binarization(
+ self, quantized_layer, layer, input_shape, kwargs, keras_should_run_eagerly
+ ):
input_data = random_input(input_shape)
random_weight = np.random.random() - 0.5
@@ -87,6 +86,7 @@ def test_binarization(self, quantized_layer, layer, input_shape, kwargs):
kernel_initializer=tf.keras.initializers.constant(random_weight),
),
input_data=input_data,
+ should_run_eagerly=keras_should_run_eagerly,
)
fp_output = testing_utils.layer_test(
@@ -98,54 +98,15 @@ def test_binarization(self, quantized_layer, layer, input_shape, kwargs):
),
),
input_data=np.sign(input_data),
+ should_run_eagerly=keras_should_run_eagerly,
)
- self.assertAllClose(quant_output, fp_output)
+ np.testing.assert_allclose(quant_output, fp_output)
- def test_depthwise_layers(self):
- input_data = random_input((2, 3, 7, 6))
- random_weight = np.random.random() - 0.5
-
- with lq.metrics.scope(["flip_ratio"]):
- quant_output = testing_utils.layer_test(
- lq.layers.QuantDepthwiseConv2D,
- kwargs=dict(
- kernel_size=3,
- depthwise_quantizer="ste_sign",
- input_quantizer="ste_sign",
- depthwise_initializer=tf.keras.initializers.constant(random_weight),
- ),
- input_data=input_data,
- )
-
- fp_output = testing_utils.layer_test(
- tf.keras.layers.DepthwiseConv2D,
- kwargs=dict(
- kernel_size=3,
- depthwise_initializer=tf.keras.initializers.constant(
- np.sign(random_weight)
- ),
- ),
- input_data=np.sign(input_data),
- )
-
- self.assertAllClose(quant_output, fp_output)
-
- @parameterized.named_parameters(
- (
- "QuantSeparableConv1D",
- lq.layers.QuantSeparableConv1D,
- tf.keras.layers.SeparableConv1D,
- (2, 3, 7),
- ),
- (
- "QuantSeparableConv2D",
- lq.layers.QuantSeparableConv2D,
- tf.keras.layers.SeparableConv2D,
- (2, 3, 7, 6),
- ),
- )
- def test_separable_layers(self, quantized_layer, layer, input_shape):
+ @pytest.mark.parametrize("quantized_layer, layer, input_shape", PARAMS_SEP_LAYERS)
+ def test_separable_layers(
+ self, quantized_layer, layer, input_shape, keras_should_run_eagerly
+ ):
input_data = random_input(input_shape)
random_d_kernel = np.random.random() - 0.5
random_p_kernel = np.random.random() - 0.5
@@ -167,6 +128,7 @@ def test_separable_layers(self, quantized_layer, layer, input_shape):
),
),
input_data=input_data,
+ should_run_eagerly=keras_should_run_eagerly,
)
fp_output = testing_utils.layer_test(
@@ -182,56 +144,84 @@ def test_separable_layers(self, quantized_layer, layer, input_shape):
),
),
input_data=np.sign(input_data),
+ should_run_eagerly=keras_should_run_eagerly,
)
- self.assertAllClose(quant_output, fp_output)
+ np.testing.assert_allclose(quant_output, fp_output)
+ def test_depthwise_layers(self, keras_should_run_eagerly):
+ input_data = random_input((2, 3, 7, 6))
+ random_weight = np.random.random() - 0.5
-def test_layer_warns(caplog):
- lq.layers.QuantDense(5, kernel_quantizer="ste_sign")
- assert len(caplog.records) >= 1
- assert "kernel_constraint" in caplog.text
+ with lq.metrics.scope(["flip_ratio"]):
+ quant_output = testing_utils.layer_test(
+ lq.layers.QuantDepthwiseConv2D,
+ kwargs=dict(
+ kernel_size=3,
+ depthwise_quantizer="ste_sign",
+ input_quantizer="ste_sign",
+ depthwise_initializer=tf.keras.initializers.constant(random_weight),
+ ),
+ input_data=input_data,
+ should_run_eagerly=keras_should_run_eagerly,
+ )
+ fp_output = testing_utils.layer_test(
+ tf.keras.layers.DepthwiseConv2D,
+ kwargs=dict(
+ kernel_size=3,
+ depthwise_initializer=tf.keras.initializers.constant(
+ np.sign(random_weight)
+ ),
+ ),
+ input_data=np.sign(input_data),
+ should_run_eagerly=keras_should_run_eagerly,
+ )
-def test_layer_does_not_warn(caplog):
- lq.layers.QuantDense(
- 5, kernel_quantizer="ste_sign", kernel_constraint="weight_clip"
- )
- assert caplog.records == []
+ np.testing.assert_allclose(quant_output, fp_output)
-def test_depthwise_layer_warns(caplog):
- lq.layers.QuantDepthwiseConv2D(5, depthwise_quantizer="ste_sign")
- assert len(caplog.records) >= 1
- assert "depthwise_constraint" in caplog.text
+class TestLayerWarns:
+ def test_layer_warns(self, caplog):
+ lq.layers.QuantDense(5, kernel_quantizer="ste_sign")
+ assert len(caplog.records) >= 1
+ assert "kernel_constraint" in caplog.text
+ def test_layer_does_not_warn(self, caplog):
+ lq.layers.QuantDense(
+ 5, kernel_quantizer="ste_sign", kernel_constraint="weight_clip"
+ )
+ assert caplog.records == []
-def test_depthwise_layer_does_not_warn(caplog):
- lq.layers.QuantDepthwiseConv2D(
- 5, depthwise_quantizer="ste_sign", depthwise_constraint="weight_clip"
- )
- assert caplog.records == []
+ def test_depthwise_layer_warns(self, caplog):
+ lq.layers.QuantDepthwiseConv2D(5, depthwise_quantizer="ste_sign")
+ assert len(caplog.records) >= 1
+ assert "depthwise_constraint" in caplog.text
+ def test_depthwise_layer_does_not_warn(self, caplog):
+ lq.layers.QuantDepthwiseConv2D(
+ 5, depthwise_quantizer="ste_sign", depthwise_constraint="weight_clip"
+ )
+ assert caplog.records == []
-def test_separable_layer_warns(caplog):
- lq.layers.QuantSeparableConv2D(
- 3, 3, depthwise_quantizer="ste_sign", pointwise_quantizer="ste_sign"
- )
- assert len(caplog.records) == 2
- assert "depthwise_constraint" in caplog.text
- assert "pointwise_constraint" in caplog.text
-
-
-def test_separable_layer_does_not_warn(caplog):
- lq.layers.QuantSeparableConv2D(
- 3,
- 3,
- depthwise_quantizer="ste_sign",
- pointwise_quantizer="ste_sign",
- depthwise_constraint="weight_clip",
- pointwise_constraint="weight_clip",
- )
- assert caplog.records == []
+ def test_separable_layer_warns(self, caplog):
+ lq.layers.QuantSeparableConv2D(
+ 3, 3, depthwise_quantizer="ste_sign", pointwise_quantizer="ste_sign"
+ )
+ assert len(caplog.records) == 2
+ assert "depthwise_constraint" in caplog.text
+ assert "pointwise_constraint" in caplog.text
+
+ def test_separable_layer_does_not_warn(self, caplog):
+ lq.layers.QuantSeparableConv2D(
+ 3,
+ 3,
+ depthwise_quantizer="ste_sign",
+ pointwise_quantizer="ste_sign",
+ depthwise_constraint="weight_clip",
+ pointwise_constraint="weight_clip",
+ )
+ assert caplog.records == []
def test_metrics():
diff --git a/larq/testing_utils.py b/larq/testing_utils.py
--- a/larq/testing_utils.py
+++ b/larq/testing_utils.py
@@ -2,9 +2,6 @@
import numpy as np
import tensorflow as tf
-# We should find a better solution without relying on private objects
-from tensorflow.python.keras.testing_utils import _thread_local_data, should_run_eagerly
-
def generate_real_values_with_zeros(low=-2, high=2, shape=(4, 10)):
real_values = np.random.uniform(low, high, shape)
@@ -48,6 +45,7 @@ def layer_test(
input_data=None,
expected_output=None,
expected_output_dtype=None,
+ should_run_eagerly=False,
):
"""Test routine for a layer with a single input and single output.
Arguments:
@@ -148,17 +146,10 @@ def layer_test(
# train(). This was causing some error for layer with Defun as it body.
# See b/120160788 for more details. This should be mitigated after 2.0.
model = tf.keras.models.Model(x, layer(x))
- if _thread_local_data.run_eagerly is not None:
- model.compile(
- "rmsprop",
- "mse",
- weighted_metrics=["acc"],
- run_eagerly=should_run_eagerly(),
- )
- model.train_on_batch(input_data, actual_output)
- else:
- model.compile("rmsprop", "mse", weighted_metrics=["acc"])
- model.train_on_batch(input_data, actual_output)
+ model.compile(
+ "rmsprop", "mse", weighted_metrics=["acc"], run_eagerly=should_run_eagerly,
+ )
+ model.train_on_batch(input_data, actual_output)
# test as first layer in Sequential API
layer_config = layer.get_config()
| Refactor layer test to use pytest parameterized test
Currently we use a mix of unittests written in `pytest` style and tests using `tf.test.TestCase` in larq. In #313 I added some simple `pytest` fixtures that allow to run tests in both eager and graph mode.
The only part of the test suite that uses `tf.test.TestCase` are the [layer tests](https://github.com/larq/larq/blob/f6f9277a006f6cb07b1e5f9bc591a087f2261152/larq/layers_test.py#L73-L187) which rely on [`keras_parameterized.run_all_keras_modes`](https://github.com/tensorflow/tensorflow/blob/669080135924d204a1c67dd556223d794efbf664/tensorflow/python/keras/keras_parameterized.py#L176-L303) and a forked/copied version of [`testing_utils. layer_test`](https://github.com/tensorflow/tensorflow/blob/669080135924d204a1c67dd556223d794efbf664/tensorflow/python/keras/testing_utils.py#L75-L261).
I think it would be a lot cleaner to write a simple [`pytest` fixture](https://pytest.org/en/latest/fixture.html#fixture) similar to #313 that allows us to run a normal [`pytest` parameterized test](https://pytest.org/en/latest/parametrize.html#pytest-mark-parametrize) in all Keras modes and adapt the layer tests to use it. That way we can get rid of `absl.testing` and some requirements on TensorFlow internals.
| 2019-11-12T17:21:22 |
|
larq/larq | 356 | larq__larq-356 | [
"347"
] | a13559182f42e805763cfd98f1772e2f2e335262 | diff --git a/larq/callbacks.py b/larq/callbacks.py
--- a/larq/callbacks.py
+++ b/larq/callbacks.py
@@ -4,26 +4,40 @@
class HyperparameterScheduler(tf.keras.callbacks.Callback):
"""Generic hyperparameter scheduler.
+ !!! example
+ ```python
+ bop = lq.optimizers.Bop(threshold=1e-6, gamma=1e-3)
+ adam = tf.keras.optimizers.Adam(0.01)
+ optimizer = lq.optimizers.CaseOptimizer(
+ (lq.optimizers.Bop.is_binary_variable, bop), default_optimizer=adam,
+ )
+ callbacks = [
+ HyperparameterScheduler(lambda x: 0.001 * (0.1 ** (x // 30)), "gamma", bop)
+ ]
+ ```
# Arguments
+ optimizer: the optimizer that contains the hyperparameter that will be scheduled.
+ Defaults to `self.model.optimizer` if `optimizer == None`.
schedule: a function that takes an epoch index as input
(integer, indexed from 0) and returns a new hyperparameter as output.
hyperparameter: str. the name of the hyperparameter to be scheduled.
verbose: int. 0: quiet, 1: update messages.
"""
- def __init__(self, schedule, hyperparameter, verbose=0):
+ def __init__(self, schedule, hyperparameter, optimizer=None, verbose=0):
super(HyperparameterScheduler, self).__init__()
+ self.optimizer = optimizer if optimizer else self.model.optimizer
self.schedule = schedule
self.hyperparameter = hyperparameter
self.verbose = verbose
def on_epoch_begin(self, epoch, logs=None):
- if not hasattr(self.model.optimizer, self.hyperparameter):
+ if not hasattr(self.optimizer, self.hyperparameter):
raise ValueError(
f'Optimizer must have a "{self.hyperparameter}" attribute.'
)
- hp = getattr(self.model.optimizer, self.hyperparameter)
+ hp = getattr(self.optimizer, self.hyperparameter)
try: # new API
hyperparameter_val = tf.keras.backend.get_value(hp)
hyperparameter_val = self.schedule(epoch, hyperparameter_val)
@@ -34,10 +48,10 @@ def on_epoch_begin(self, epoch, logs=None):
if self.verbose > 0:
print(
- f"Epoch {epoch + 1}: {self.hyperparameter} changning to {tf.keras.backend.get_value(hp)}."
+ f"Epoch {epoch + 1}: {self.hyperparameter} changing to {tf.keras.backend.get_value(hp)}."
)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
- hp = getattr(self.model.optimizer, self.hyperparameter)
+ hp = getattr(self.optimizer, self.hyperparameter)
logs[self.hyperparameter] = tf.keras.backend.get_value(hp)
| diff --git a/larq/callbacks_test.py b/larq/callbacks_test.py
--- a/larq/callbacks_test.py
+++ b/larq/callbacks_test.py
@@ -1,4 +1,10 @@
+import numpy as np
import tensorflow as tf
+from tensorflow.python.keras import testing_utils
+
+import larq as lq
+from larq import testing_utils as lq_testing_utils
+from larq.callbacks import HyperparameterScheduler
class LogHistory(tf.keras.callbacks.Callback):
@@ -18,4 +24,76 @@ def on_epoch_end(self, epoch, logs={}):
class TestHyperparameterScheduler:
- pass
+ def test_hyper_parameter_scheduler(self):
+ np.random.seed(1337)
+ (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
+ train_samples=1000, test_samples=0, input_shape=(10,), num_classes=2
+ )
+
+ y_train = tf.keras.utils.to_categorical(y_train)
+
+ model = lq_testing_utils.get_small_bnn_model(
+ x_train.shape[1], 20, y_train.shape[1]
+ )
+
+ bop = lq.optimizers.Bop(threshold=1e-6, gamma=1e-3)
+ adam = tf.keras.optimizers.Adam(0.01)
+ case_optimizer = lq.optimizers.CaseOptimizer(
+ (lq.optimizers.Bop.is_binary_variable, bop), default_optimizer=adam,
+ )
+
+ model.compile(
+ loss="categorical_crossentropy",
+ optimizer=case_optimizer,
+ metrics=["accuracy"],
+ )
+
+ def scheduler(x):
+ return 1.0 / (1.0 + x)
+
+ cbk_gamma_scheduler = HyperparameterScheduler(
+ schedule=scheduler,
+ optimizer=model.optimizer.optimizers[0],
+ hyperparameter="gamma",
+ verbose=1,
+ )
+ cbk_threshold_scheduler = HyperparameterScheduler(
+ schedule=scheduler,
+ optimizer=model.optimizer.optimizers[0],
+ hyperparameter="threshold",
+ verbose=1,
+ )
+ cbk_lr_scheduler = HyperparameterScheduler(
+ schedule=scheduler,
+ optimizer=model.optimizer.optimizers[1],
+ hyperparameter="lr",
+ verbose=1,
+ )
+
+ num_epochs = 10
+ model.fit(
+ x_train,
+ y_train,
+ epochs=num_epochs,
+ batch_size=16,
+ callbacks=[cbk_gamma_scheduler, cbk_lr_scheduler, cbk_threshold_scheduler],
+ verbose=0,
+ )
+
+ np.testing.assert_almost_equal(
+ tf.keras.backend.get_value(model.optimizer.optimizers[0].gamma),
+ scheduler(num_epochs - 1),
+ decimal=8,
+ )
+
+ np.testing.assert_almost_equal(
+ tf.keras.backend.get_value(model.optimizer.optimizers[0].threshold),
+ scheduler(num_epochs - 1),
+ decimal=8,
+ )
+
+ np.testing.assert_almost_equal(
+ tf.keras.backend.get_value(model.optimizer.optimizers[1].lr),
+ scheduler(num_epochs - 1),
+ decimal=8,
+ )
| Make the HyperparameterScheduler compatible with the CaseOptimizer
### Feature motivation
The HyperparameterScheduler is not compatible with the CaseOptimizer since the hyperparameters are attributes of the optimizers inside the CaseOptimizer.
### Feature description
I propose one of the two possible solutions:
Either we could give HyperparameterScheduler the optimizer as an argument. It could be called via ``` HyperparameterScheduler(schedule, hyperparameter, optimizer, verbose=0) ``` and the right optimizer inside the CaseOptimizer can be addressed. (@koenhelwegen)
My second proposal would be to search the CaseOptimizer for optimizers that have the hyperparameter as attribute. Then the schedule can be applied to this optimizer only. The downside of this would be that in case there are two optimizers inside the CaseOptimizer that have a hyperparameter with the same name the schedule would be applied to both of them. I do not think this would happen very often but it could definitively be an issue. See code below for my second proposal.
### Feature implementation
``` python
class HyperparameterScheduler(tf.keras.callbacks.Callback):
"""Generic hyperparameter scheduler.
# Arguments
schedule: a function that takes an epoch index as input
(integer, indexed from 0) and returns a new hyperparameter as output.
hyperparameter: str. the name of the hyperparameter to be scheduled.
verbose: int. 0: quiet, 1: update messages.
"""
def __init__(self, schedule, hyperparameter, verbose=0):
super(HyperparameterScheduler, self).__init__()
self.schedule = schedule
self.hyperparameter = hyperparameter
self.verbose = verbose
def on_epoch_begin(self, epoch, logs=None):
for op in self.model.optimizer.optimizers:
if hasattr(op, self.hyperparameter):
hp = getattr(op, self.hyperparameter)
try: # new API
hyperparameter_val = tf.keras.backend.get_value(hp)
hyperparameter_val = self.schedule(epoch, hyperparameter_val)
except TypeError: # Support for old API for backward compatibility
hyperparameter_val = self.schedule(epoch)
tf.keras.backend.set_value(hp, hyperparameter_val)
if self.verbose > 0:
print(
f"Epoch {epoch + 1}: {self.hyperparameter} changning to {tf.keras.backend.get_value(hp)}."
)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
for op in self.model.optimizer.optimizers:
if hasattr(op, self.hyperparameter):
hp = getattr(op, self.hyperparameter)
logs[self.hyperparameter] = tf.keras.backend.get_value(hp)
```
| I'm leaning towards the first option because it's a bit more explicit in what's happening; but one drawback could be that I'm not sure if/how it'd work when the optimizers get reloaded on an experiment restart/resume. So I'm also good with doing option two if we document it well.
Resuming and Restarting works fine with the first option. I am leaning to the first option as well since it gives more flexibility. I will make a PR. | 2019-12-16T11:41:07 |
larq/larq | 359 | larq__larq-359 | [
"358"
] | a13559182f42e805763cfd98f1772e2f2e335262 | diff --git a/larq/layers_base.py b/larq/layers_base.py
--- a/larq/layers_base.py
+++ b/larq/layers_base.py
@@ -2,8 +2,8 @@
import tensorflow as tf
-from larq import metrics as lq_metrics
-from larq import quantized_scope, quantized_variable, quantizers
+from larq import metrics as lq_metrics, quantized_scope, quantizers
+from larq.quantized_variable import QuantizedVariable
log = logging.getLogger(__name__)
@@ -27,7 +27,7 @@ def _add_variable_with_custom_getter(self, name, **kwargs):
# Wrap `getter` with a version that returns a `QuantizedVariable`.
def getter(*args, **kwargs):
variable = old_getter(*args, **kwargs)
- return quantized_variable.create_quantized_variable(variable, quantizer)
+ return QuantizedVariable.from_variable(variable, quantizer)
return super()._add_variable_with_custom_getter(name, getter=getter, **kwargs)
diff --git a/larq/quantized_variable.py b/larq/quantized_variable.py
--- a/larq/quantized_variable.py
+++ b/larq/quantized_variable.py
@@ -39,6 +39,63 @@ def __init__(self, variable, quantizer=None, precision=None):
self.quantizer = quantizer
self.precision = precision or getattr(quantizer, "precision", None)
+ @classmethod
+ def from_variable(cls, variable, quantizer=None, precision=None):
+ """Creates a QuantizedVariable that wraps another variable.
+
+ This typically just returns `QuantizedVariable(variable)`. But, if the variable
+ is a DistributedVariable or one of its subclasses, we instead dynamically
+ create a class that subclasses from both QuantizedVariable and
+ variable.__class__. This is so the returned variable will still pass
+ `isinstance(variable, variable.__class__)`, which is required for
+ DistributedVariables and its subclasses to work properly.
+
+ # Arguments
+ variable: A floating-point resource variable to wrap.
+ quantizer: An optional quantizer to transform the floating-point variable to a
+ fake quantized variable.
+ precision: An optional integer defining the precision of the quantized variable.
+ If `None`, `quantizer.precision` is used.
+
+ # Returns
+ A QuantizedVariable that wraps the variable.
+ """
+ if not isinstance(variable, DistributedVariable): # type: ignore
+ return cls(variable, quantizer, precision)
+
+ class QuantizedDistributedVariable(cls, variable.__class__):
+ """A QuantizedVariable that also subclasses from DistributedVariable."""
+
+ def get(self, *args, **kwargs):
+ # For some reason this is needed to make unit `x + x` pass on TF 1.14
+ return self._quantize(self.latent_variable.get(*args, **kwargs))
+
+ return QuantizedDistributedVariable(variable, quantizer, precision)
+
+ @staticmethod
+ def _maybe_wrap(variable, quantizer, precision, wrap=True):
+ """Creates an QuantizedVariable that wraps another variable if applicable.
+
+ This function is used to wrap the return value of QuantizedVariable.assign.
+ Unfortunately MirroredVariable.assign will (incorrectly) return a Mirrored
+ value instead of a MirroredVariable. So we cannot properly wrap it in an
+ AutoCastVariable. We return the original variable in that case.
+
+ # Arguments
+ variable: A tf.Variable or op.
+ quantizer: An optional quantizer to transform the floating-point variable to a
+ fake quantized variable.
+ precision: An optional integer defining the precision of the quantized variable.
+ If `None`, `quantizer.precision` is used.
+ wrap: A boolean to define whether to wrap the variable in an QuantizedVariable.
+
+ # Returns
+ An QuantizedVariable if wrap is True and variable is a resource variable.
+ """
+ if wrap and resource_variable_ops.is_resource_variable(variable):
+ return QuantizedVariable.from_variable(variable, quantizer, precision)
+ return variable
+
def _quantize(self, value):
if self.quantizer and quantized_scope.should_quantize():
return self.quantizer(value)
@@ -134,59 +191,59 @@ def constraint(self):
def assign(self, value, use_locking=None, name=None, read_value=True):
op = self.latent_variable.assign(value, use_locking, name, read_value)
- return _maybe_wrap(op, self.quantizer, self.precision, wrap=read_value)
+ return self._maybe_wrap(op, self.quantizer, self.precision, wrap=read_value)
def assign_add(self, delta, use_locking=None, name=None, read_value=True):
op = self.latent_variable.assign_add(delta, use_locking, name, read_value)
- return _maybe_wrap(op, self.quantizer, self.precision, wrap=read_value)
+ return self._maybe_wrap(op, self.quantizer, self.precision, wrap=read_value)
def assign_sub(self, delta, use_locking=None, name=None, read_value=True):
op = self.latent_variable.assign_sub(delta, use_locking, name, read_value)
- return _maybe_wrap(op, self.quantizer, self.precision, wrap=read_value)
+ return self._maybe_wrap(op, self.quantizer, self.precision, wrap=read_value)
def scatter_sub(self, *args, **kwargs):
var = self.latent_variable.scatter_sub(*args, **kwargs)
- return _maybe_wrap(var, self.quantizer, self.precision)
+ return self._maybe_wrap(var, self.quantizer, self.precision)
def scatter_add(self, *args, **kwargs):
var = self.latent_variable.scatter_add(*args, **kwargs)
- return _maybe_wrap(var, self.quantizer, self.precision)
+ return self._maybe_wrap(var, self.quantizer, self.precision)
def scatter_max(self, *args, **kwargs):
var = self.latent_variable.scatter_max(*args, **kwargs)
- return _maybe_wrap(var, self.quantizer, self.precision)
+ return self._maybe_wrap(var, self.quantizer, self.precision)
def scatter_min(self, *args, **kwargs):
var = self.latent_variable.scatter_min(*args, **kwargs)
- return _maybe_wrap(var, self.quantizer, self.precision)
+ return self._maybe_wrap(var, self.quantizer, self.precision)
def scatter_mul(self, *args, **kwargs):
var = self.latent_variable.scatter_mul(*args, **kwargs)
- return _maybe_wrap(var, self.quantizer, self.precision)
+ return self._maybe_wrap(var, self.quantizer, self.precision)
def scatter_div(self, *args, **kwargs):
var = self.latent_variable.scatter_div(*args, **kwargs)
- return _maybe_wrap(var, self.quantizer, self.precision)
+ return self._maybe_wrap(var, self.quantizer, self.precision)
def scatter_update(self, *args, **kwargs):
var = self.latent_variable.scatter_update(*args, **kwargs)
- return _maybe_wrap(var, self.quantizer, self.precision)
+ return self._maybe_wrap(var, self.quantizer, self.precision)
def batch_scatter_update(self, *args, **kwargs):
var = self.latent_variable.batch_scatter_update(*args, **kwargs)
- return _maybe_wrap(var, self.quantizer, self.precision)
+ return self._maybe_wrap(var, self.quantizer, self.precision)
def scatter_nd_sub(self, *args, **kwargs):
var = self.latent_variable.scatter_nd_sub(*args, **kwargs)
- return _maybe_wrap(var, self.quantizer, self.precision)
+ return self._maybe_wrap(var, self.quantizer, self.precision)
def scatter_nd_add(self, *args, **kwargs):
var = self.latent_variable.scatter_nd_add(*args, **kwargs)
- return _maybe_wrap(var, self.quantizer, self.precision)
+ return self._maybe_wrap(var, self.quantizer, self.precision)
def scatter_nd_update(self, *args, **kwargs):
var = self.latent_variable.scatter_nd_update(*args, **kwargs)
- return _maybe_wrap(var, self.quantizer, self.precision)
+ return self._maybe_wrap(var, self.quantizer, self.precision)
def count_up_to(self, *args, **kwargs):
return self.latent_variable.count_up_to(*args, **kwargs)
@@ -252,60 +309,3 @@ def _as_graph_element(self):
QuantizedVariable, QuantizedVariable._dense_var_to_tensor
)
ops.register_dense_tensor_like_type(QuantizedVariable)
-
-
-def create_quantized_variable(variable, quantizer=None, precision=None):
- """Creates a QuantizedVariable that wraps another variable.
-
- This typically just returns `QuantizedVariable(variable)`. But, if the variable
- is a DistributedVariable or one of its subclasses, we instead dynamically
- create a class that subclasses from both QuantizedVariable and
- variable.__class__. This is so the returned variable will still pass
- `isinstance(variable, variable.__class__)`, which is required for
- DistributedVariables and its subclasses to work properly.
-
- # Arguments
- variable: A floating-point resource variable to wrap.
- quantizer: An optional quantizer to transform the floating-point variable to a
- fake quantized variable.
- precision: An optional integer defining the precision of the quantized variable.
- If `None`, `quantizer.precision` is used.
-
- # Returns
- A QuantizedVariable that wraps the variable.
- """
- if not isinstance(variable, DistributedVariable): # type: ignore
- return QuantizedVariable(variable, quantizer, precision)
-
- class QuantizedDistributedVariable(QuantizedVariable, variable.__class__):
- """A QuantizedVariable that also subclasses from DistributedVariable."""
-
- def get(self, *args, **kwargs):
- # For some reason this is needed to make unit `x + x` pass on TF 1.14
- return self._quantize(self.latent_variable.get(*args, **kwargs))
-
- return QuantizedDistributedVariable(variable, quantizer, precision)
-
-
-def _maybe_wrap(variable, quantizer, precision, wrap=True):
- """Creates an QuantizedVariable that wraps another variable if applicable.
-
- This function is used to wrap the return value of QuantizedVariable.assign.
- Unfortunately MirroredVariable.assign will (incorrectly) return a Mirrored
- value instead of a MirroredVariable. So we cannot properly wrap it in an
- AutoCastVariable. We return the original variable in that case.
-
- # Arguments
- variable: A tf.Variable or op.
- quantizer: An optional quantizer to transform the floating-point variable to a
- fake quantized variable.
- precision: An optional integer defining the precision of the quantized variable.
- If `None`, `quantizer.precision` is used.
- wrap: A boolean to define whether to wrap the variable in an QuantizedVariable.
-
- # Returns
- An QuantizedVariable if wrap is True and variable is a resource variable.
- """
- if wrap and resource_variable_ops.is_resource_variable(variable):
- return create_quantized_variable(variable, quantizer, precision)
- return variable
| diff --git a/larq/quantized_variable_test.py b/larq/quantized_variable_test.py
--- a/larq/quantized_variable_test.py
+++ b/larq/quantized_variable_test.py
@@ -5,7 +5,7 @@
from tensorflow.python.distribute.values import DistributedVariable
from larq import quantized_scope
-from larq.quantized_variable import QuantizedVariable, create_quantized_variable
+from larq.quantized_variable import QuantizedVariable
from larq.testing_utils import evaluate
@@ -15,14 +15,14 @@ def get_var(val, dtype=None, name=None):
def test_inheritance(distribute_scope):
variable = get_var(3.0)
- quantized_variable = create_quantized_variable(variable)
+ quantized_variable = QuantizedVariable.from_variable(variable)
assert isinstance(quantized_variable, QuantizedVariable)
assert isinstance(quantized_variable, tf.Variable)
assert isinstance(quantized_variable, DistributedVariable) is distribute_scope # type: ignore
def test_read(distribute_scope, eager_and_graph_mode):
- x = create_quantized_variable(get_var(3.5), quantizer=lambda x: 2 * x)
+ x = QuantizedVariable.from_variable(get_var(3.5), quantizer=lambda x: 2 * x)
evaluate(x.initializer)
assert evaluate(x) == 3.5
@@ -38,7 +38,7 @@ def test_read(distribute_scope, eager_and_graph_mode):
def test_sparse_reads(eager_and_graph_mode):
- x = create_quantized_variable(get_var([1.0, 2.0]), quantizer=lambda x: 2 * x)
+ x = QuantizedVariable.from_variable(get_var([1.0, 2.0]), quantizer=lambda x: 2 * x)
evaluate(x.initializer)
assert evaluate(x.sparse_read([0])) == 1
@@ -49,7 +49,7 @@ def test_sparse_reads(eager_and_graph_mode):
def test_read_nested_scopes(distribute_scope, eager_and_graph_mode):
- x = create_quantized_variable(get_var(3.5), quantizer=lambda x: 2 * x)
+ x = QuantizedVariable.from_variable(get_var(3.5), quantizer=lambda x: 2 * x)
evaluate(x.initializer)
with quantized_scope.scope(True):
assert evaluate(x.read_value()) == 7
@@ -59,7 +59,7 @@ def test_read_nested_scopes(distribute_scope, eager_and_graph_mode):
def test_method_delegations(distribute_scope, eager_and_graph_mode):
- x = create_quantized_variable(get_var(3.5), quantizer=lambda x: 2 * x)
+ x = QuantizedVariable.from_variable(get_var(3.5), quantizer=lambda x: 2 * x)
with quantized_scope.scope(True):
evaluate(x.initializer)
assert evaluate(x.value()) == 7
@@ -91,7 +91,7 @@ def test_method_delegations(distribute_scope, eager_and_graph_mode):
def test_scatter_method_delegations(eager_and_graph_mode):
- x = create_quantized_variable(get_var([3.5, 4]), quantizer=lambda x: 2 * x)
+ x = QuantizedVariable.from_variable(get_var([3.5, 4]), quantizer=lambda x: 2 * x)
evaluate(x.initializer)
with quantized_scope.scope(True):
assert_array_equal(evaluate(x.value()), [7, 8])
@@ -120,9 +120,9 @@ def slices(val, index):
def test_overloads(quantized, distribute_scope, eager_and_graph_mode):
if quantized:
- x = create_quantized_variable(get_var(3.5), quantizer=lambda x: 2 * x)
+ x = QuantizedVariable.from_variable(get_var(3.5), quantizer=lambda x: 2 * x)
else:
- x = create_quantized_variable(get_var(7.0))
+ x = QuantizedVariable.from_variable(get_var(7.0))
evaluate(x.initializer)
assert_almost_equal(8, evaluate(x + 1))
assert_almost_equal(10, evaluate(3 + x))
@@ -155,11 +155,11 @@ def test_overloads(quantized, distribute_scope, eager_and_graph_mode):
def test_tensor_equality(quantized, eager_mode):
if quantized:
- x = create_quantized_variable(
+ x = QuantizedVariable.from_variable(
get_var([3.5, 4.0, 4.5]), quantizer=lambda x: 2 * x
)
else:
- x = create_quantized_variable(get_var([7.0, 8.0, 9.0]))
+ x = QuantizedVariable.from_variable(get_var([7.0, 8.0, 9.0]))
evaluate(x.initializer)
assert_array_equal(evaluate(x), [7.0, 8.0, 9.0])
if version.parse(tf.__version__) >= version.parse("2"):
@@ -168,7 +168,9 @@ def test_tensor_equality(quantized, eager_mode):
def test_assign(quantized, distribute_scope, eager_and_graph_mode):
- x = create_quantized_variable(get_var(0.0, tf.float64), quantizer=lambda x: 2 * x)
+ x = QuantizedVariable.from_variable(
+ get_var(0.0, tf.float64), quantizer=lambda x: 2 * x
+ )
evaluate(x.initializer)
latent_value = 3.14
@@ -214,7 +216,7 @@ def test_assign(quantized, distribute_scope, eager_and_graph_mode):
def test_checkpoint(tmp_path, eager_and_graph_mode):
- x = create_quantized_variable(get_var(0.0), quantizer=lambda x: 2 * x)
+ x = QuantizedVariable.from_variable(get_var(0.0), quantizer=lambda x: 2 * x)
evaluate(x.initializer)
evaluate(x.assign(123.0))
@@ -230,11 +232,11 @@ def test_checkpoint(tmp_path, eager_and_graph_mode):
def test_invalid_wrapped_usage(distribute_scope):
with pytest.raises(ValueError, match="`variable` must be of type"):
- create_quantized_variable(tf.constant([1.0]))
+ QuantizedVariable.from_variable(tf.constant([1.0]))
with pytest.raises(ValueError, match="`quantizer` must be `callable` or `None`"):
- create_quantized_variable(get_var([1.0]), 1)
+ QuantizedVariable.from_variable(get_var([1.0]), 1)
with pytest.raises(ValueError, match="`precision` must be of type `int` or `None`"):
- create_quantized_variable(get_var([1.0]), precision=1.0)
+ QuantizedVariable.from_variable(get_var([1.0]), precision=1.0)
def test_repr(snapshot, eager_and_graph_mode):
@@ -244,14 +246,18 @@ class Quantizer:
def __call__(self, x):
return x
- snapshot.assert_match(repr(create_quantized_variable(x, quantizer=lambda x: 2 * x)))
- snapshot.assert_match(repr(create_quantized_variable(x, quantizer=Quantizer())))
- snapshot.assert_match(repr(create_quantized_variable(x, precision=1)))
+ snapshot.assert_match(
+ repr(QuantizedVariable.from_variable(x, quantizer=lambda x: 2 * x))
+ )
+ snapshot.assert_match(
+ repr(QuantizedVariable.from_variable(x, quantizer=Quantizer()))
+ )
+ snapshot.assert_match(repr(QuantizedVariable.from_variable(x, precision=1)))
@pytest.mark.parametrize("should_quantize", [True, False])
def test_optimizer(eager_mode, should_quantize):
- x = create_quantized_variable(get_var(1.0), quantizer=lambda x: -x)
+ x = QuantizedVariable.from_variable(get_var(1.0), quantizer=lambda x: -x)
opt = tf.keras.optimizers.SGD(1.0)
def loss():
| Use metaclass to allow direct instantiation of QuantizedVariable
This allows us to remove the `create_quantized_variable` function at the expense of a slightly more complicated implementation. Users can now directly instantiate `QantizedVariable` and don't have to worry about correctly handling distribution strategies.
| 2019-12-17T19:48:55 |
|
larq/larq | 363 | larq__larq-363 | [
"361"
] | 178c7eddd0f567666a1264655410dc32aeb5d09e | diff --git a/larq/optimizers.py b/larq/optimizers.py
--- a/larq/optimizers.py
+++ b/larq/optimizers.py
@@ -238,7 +238,7 @@ class Bop(tf.keras.optimizers.Optimizer):
- [Latent Weights Do Not Exist: Rethinking Binarized Neural Network Optimization](https://papers.nips.cc/paper/8971-latent-weights-do-not-exist-rethinking-binarized-neural-network-optimization)
"""
- def __init__(self, threshold=1e-7, gamma=1e-2, name="Bop", **kwargs):
+ def __init__(self, threshold=1e-8, gamma=1e-4, name="Bop", **kwargs):
super().__init__(name=name, **kwargs)
self._set_hyper("threshold", threshold)
| Change default hyperparameters of Bop
The default hyperparameters for Bop are not really optimal (i.e. gamma is probably too high for most task): `Bop(threshold=1e-07, gamma=0.01, name='Bop', **kwargs)`
For example in our [paper](https://papers.nips.cc/paper/8971-latent-weights-do-not-exist-rethinking-binarized-neural-network-optimization.pdf) we used a gamma decayed from 1e-4 to 1e-6 and a threshold of 1e-8 for the ImageNet experiments.
I think we should update the default parameters to a more sensible choice. @MariaHeuss What do you think?
| Yes, I agree that we should change the default. The values that you are mentioning are working well for a variety of models, although sometimes letting gamma start at 3e-4 works even better. I would set the threshold to 1e-8 and gamma to 1e-4 by default. I can make a PR later. | 2019-12-18T20:50:45 |
|
larq/larq | 387 | larq__larq-387 | [
"246"
] | 332cd68ec52ee19e568216c049bb4da9256adc7c | diff --git a/larq/conftest.py b/larq/conftest.py
--- a/larq/conftest.py
+++ b/larq/conftest.py
@@ -19,6 +19,7 @@ def graph_mode():
with context.graph_mode():
with tf.compat.v1.Session().as_default():
yield
+ tf.keras.backend.clear_session()
@pytest.fixture(params=["eager", "graph"])
@@ -28,6 +29,7 @@ def eager_and_graph_mode(request):
with context.graph_mode():
with tf.compat.v1.Session().as_default():
yield request.param
+ tf.keras.backend.clear_session()
else:
with context.eager_mode():
yield request.param
diff --git a/larq/quantizers.py b/larq/quantizers.py
--- a/larq/quantizers.py
+++ b/larq/quantizers.py
@@ -41,9 +41,6 @@
lq.layers.QuantDense(64, kernel_quantizer="ste_sign")
```
```python
-lq.layers.QuantDense(64, kernel_quantizer=lq.quantizers.ste_sign)
-```
-```python
lq.layers.QuantDense(64, kernel_quantizer=lq.quantizers.SteSign(clip_value=1.0))
```
"""
@@ -55,14 +52,8 @@
from larq import math, utils
__all__ = [
- "ste_sign",
- "approx_sign",
- "magnitude_aware_sign",
- "swish_sign",
- "ste_tern",
- "ste_heaviside",
- "dorefa_quantizer",
"SteSign",
+ "ApproxSign",
"MagnitudeAwareSign",
"SwishSign",
"SteTern",
@@ -80,75 +71,7 @@ def _clipped_gradient(x, dy, clip_value):
return tf.where(mask, dy, zeros)
-class QuantizerFunctionWrapper:
- """Wraps a quantizer function in a class that can be serialized.
-
- # Arguments
- fn: The quantizer function to wrap, with signature `fn(x, **kwargs)`.
- **kwargs: The keyword arguments that are passed on to `fn`.
- """
-
- def __init__(self, fn: Callable[[tf.Tensor], tf.Tensor], **kwargs):
- self.fn = fn
- self.precision = getattr(fn, "precision", 32)
- self._fn_kwargs = kwargs
-
- def __call__(self, x: tf.Tensor) -> tf.Tensor:
- """Invokes the `QuantizerFunctionWrapper` instance.
-
- # Arguments
- x: Input tensor.
-
- # Returns
- Quantized tensor.
- """
- return self.fn(x, **self._fn_kwargs)
-
- def get_config(self):
- return {
- k: tf.keras.backend.eval(v)
- if tf.is_tensor(v) or isinstance(v, tf.Variable)
- else v
- for k, v in self._fn_kwargs.items()
- }
-
-
[email protected]_keras_custom_object
[email protected]_precision(1)
def ste_sign(x: tf.Tensor, clip_value: float = 1.0) -> tf.Tensor:
- r"""Sign binarization function.
-
- \\[
- q(x) = \begin{cases}
- -1 & x < 0 \\\
- 1 & x \geq 0
- \end{cases}
- \\]
-
- The gradient is estimated using the Straight-Through Estimator
- (essentially the binarization is replaced by a clipped identity on the
- backward pass).
- \\[\frac{\partial q(x)}{\partial x} = \begin{cases}
- 1 & \left|x\right| \leq \texttt{clip_value} \\\
- 0 & \left|x\right| > \texttt{clip_value}
- \end{cases}\\]
-
- ```plot-activation
- quantizers.ste_sign
- ```
-
- # Arguments
- x: Input tensor.
- clip_value: Threshold for clipping gradients. If `None` gradients are not clipped.
-
- # Returns
- Binarized tensor.
-
- # References
- - [Binarized Neural Networks: Training Deep Neural Networks with Weights and
- Activations Constrained to +1 or -1](http://arxiv.org/abs/1602.02830)
- """
-
@tf.custom_gradient
def _call(x):
def grad(dy):
@@ -163,72 +86,8 @@ def _scaled_sign(x): # pragma: no cover
return 1.3 * ste_sign(x)
[email protected]_keras_custom_object
[email protected]_precision(1)
-def magnitude_aware_sign(x: tf.Tensor, clip_value: float = 1.0) -> tf.Tensor:
- r"""Magnitude-aware sign for Bi-Real Net.
-
- A scaled sign function computed according to Section 3.3 in
- [Zechun Liu et al](https://arxiv.org/abs/1808.00278).
-
- ```plot-activation
- quantizers._scaled_sign
- ```
-
- # Arguments
- x: Input tensor
- clip_value: Threshold for clipping gradients. If `None` gradients are not clipped.
-
- # Returns
- Scaled binarized tensor (with values in \\(\\{-a, a\\}\\), where \\(a\\) is a float).
-
- # References
- - [Bi-Real Net: Enhancing the Performance of 1-bit CNNs With Improved
- Representational Capability and Advanced Training
- Algorithm](https://arxiv.org/abs/1808.00278)
-
- """
- scale_factor = tf.reduce_mean(tf.abs(x), axis=list(range(len(x.shape) - 1)))
-
- return tf.stop_gradient(scale_factor) * ste_sign(x, clip_value=clip_value)
-
-
[email protected]_keras_custom_object
[email protected]_precision(1)
@tf.custom_gradient
def approx_sign(x: tf.Tensor) -> tf.Tensor:
- r"""
- Sign binarization function.
- \\[
- q(x) = \begin{cases}
- -1 & x < 0 \\\
- 1 & x \geq 0
- \end{cases}
- \\]
-
- The gradient is estimated using the ApproxSign method.
- \\[\frac{\partial q(x)}{\partial x} = \begin{cases}
- (2 - 2 \left|x\right|) & \left|x\right| \leq 1 \\\
- 0 & \left|x\right| > 1
- \end{cases}
- \\]
-
- ```plot-activation
- quantizers.approx_sign
- ```
-
- # Arguments
- x: Input tensor.
-
- # Returns
- Binarized tensor.
-
- # References
- - [Bi-Real Net: Enhancing the Performance of 1-bit CNNs With Improved
- Representational Capability and Advanced
- Training Algorithm](http://arxiv.org/abs/1808.00278)
- """
-
def grad(dy):
abs_x = tf.math.abs(x)
zeros = tf.zeros_like(dy)
@@ -238,38 +97,7 @@ def grad(dy):
return math.sign(x), grad
[email protected]_keras_custom_object
[email protected]_precision(1)
def swish_sign(x: tf.Tensor, beta: float = 5.0) -> tf.Tensor:
- r"""Sign binarization function.
-
- \\[
- q(x) = \begin{cases}
- -1 & x < 0 \\\
- 1 & x \geq 0
- \end{cases}
- \\]
-
- The gradient is estimated using the SignSwish method.
-
- \\[
- \frac{\partial q_{\beta}(x)}{\partial x} = \frac{\beta\left\\{2-\beta x \tanh \left(\frac{\beta x}{2}\right)\right\\}}{1+\cosh (\beta x)}
- \\]
-
- ```plot-activation
- quantizers.swish_sign
- ```
- # Arguments
- x: Input tensor.
- beta: Larger values result in a closer approximation to the derivative of the sign.
-
- # Returns
- Binarized tensor.
-
- # References
- - [BNN+: Improved Binary Network Training](https://arxiv.org/abs/1812.11800)
- """
-
@tf.custom_gradient
def _call(x):
def grad(dy):
@@ -281,58 +109,12 @@ def grad(dy):
return _call(x)
[email protected]_keras_custom_object
[email protected]_precision(2)
def ste_tern(
x: tf.Tensor,
threshold_value: float = 0.05,
ternary_weight_networks: bool = False,
clip_value: float = 1.0,
) -> tf.Tensor:
- r"""Ternarization function.
-
- \\[
- q(x) = \begin{cases}
- +1 & x > \Delta \\\
- 0 & |x| < \Delta \\\
- -1 & x < - \Delta
- \end{cases}
- \\]
-
- where \\(\Delta\\) is defined as the threshold and can be passed as an argument,
- or can be calculated as per the Ternary Weight Networks original paper, such that
-
- \\[
- \Delta = \frac{0.7}{n} \sum_{i=1}^{n} |W_i|
- \\]
- where we assume that \\(W_i\\) is generated from a normal distribution.
-
- The gradient is estimated using the Straight-Through Estimator
- (essentially the Ternarization is replaced by a clipped identity on the
- backward pass).
- \\[\frac{\partial q(x)}{\partial x} = \begin{cases}
- 1 & \left|x\right| \leq \texttt{clip_value} \\\
- 0 & \left|x\right| > \texttt{clip_value}
- \end{cases}\\]
-
- ```plot-activation
- quantizers.ste_tern
- ```
-
- # Arguments
- x: Input tensor.
- threshold_value: The value for the threshold, \\(\Delta\\).
- ternary_weight_networks: Boolean of whether to use the
- Ternary Weight Networks threshold calculation.
- clip_value: Threshold for clipping gradients. If `None` gradients are not clipped.
-
- # Returns
- Ternarized tensor.
-
- # References
- - [Ternary Weight Networks](http://arxiv.org/abs/1605.04711)
- """
-
@tf.custom_gradient
def _call(x):
if ternary_weight_networks:
@@ -348,40 +130,7 @@ def grad(dy):
return _call(x)
[email protected]_keras_custom_object
[email protected]_precision(1)
def ste_heaviside(x: tf.Tensor, clip_value: float = 1.0) -> tf.Tensor:
- r"""
- Binarization function with output values 0 and 1.
-
- \\[
- q(x) = \begin{cases}
- +1 & x > 0 \\\
- 0 & x \leq 0
- \end{cases}
- \\]
-
- The gradient is estimated using the Straight-Through Estimator
- (essentially the binarization is replaced by a clipped identity on the
- backward pass).
-
- \\[\frac{\partial q(x)}{\partial x} = \begin{cases}
- 1 & \left|x\right| \leq 1 \\\
- 0 & \left|x\right| > 1
- \end{cases}\\]
-
- ```plot-activation
- quantizers.ste_heaviside
- ```
-
- # Arguments
- x: Input tensor.
- clip_value: Threshold for clipping gradients. If `None` gradients are not clipped.
-
- # Returns
- AND-binarized tensor.
- """
-
@tf.custom_gradient
def _call(x):
def grad(dy):
@@ -392,56 +141,54 @@ def grad(dy):
return _call(x)
[email protected]_alias("ste_sign")
@utils.register_keras_custom_object
[email protected]_precision(2)
-def dorefa_quantizer(x: tf.Tensor, k_bit: int = 2) -> tf.Tensor:
- r"""k_bit quantizer as in the DoReFa paper.
+class SteSign(tf.keras.layers.Layer):
+ r"""Instantiates a serializable binary quantizer.
\\[
q(x) = \begin{cases}
- 0 & x < \frac{1}{2n} \\\
- \frac{i}{n} & \frac{2i-1}{2n} < x < \frac{2i+1}{2n} \text{ for } i \in \\{1,n-1\\}\\\
- 1 & \frac{2n-1}{2n} < x
+ -1 & x < 0 \\\
+ 1 & x \geq 0
\end{cases}
\\]
- where \\(n = 2^{\text{k_bit}} - 1\\). The number of bits, k_bit, needs to be passed as an argument.
The gradient is estimated using the Straight-Through Estimator
(essentially the binarization is replaced by a clipped identity on the
backward pass).
\\[\frac{\partial q(x)}{\partial x} = \begin{cases}
- 1 & 0 \leq x \leq 1 \\\
- 0 & \text{else}
+ 1 & \left|x\right| \leq \texttt{clip_value} \\\
+ 0 & \left|x\right| > \texttt{clip_value}
\end{cases}\\]
```plot-activation
- quantizers.dorefa_quantizer
+ quantizers.SteSign
```
# Arguments
- k_bit: number of bits for the quantization.
-
- # Returns
- quantized tensor
+ clip_value: Threshold for clipping gradients. If `None` gradients are not clipped.
# References
- - [DoReFa-Net: Training Low Bitwidth Convolutional Neural Networks
- with Low Bitwidth Gradients](https://arxiv.org/abs/1606.06160)
+ - [Binarized Neural Networks: Training Deep Neural Networks with Weights and
+ Activations Constrained to +1 or -1](http://arxiv.org/abs/1602.02830)
"""
- x = tf.clip_by_value(x, 0.0, 1.0)
+ precision = 1
- @tf.custom_gradient
- def _k_bit_with_identity_grad(x):
- n = 2 ** k_bit - 1
- return tf.round(x * n) / n, lambda dy: dy
+ def __init__(self, clip_value: float = 1.0, **kwargs):
+ self.clip_value = clip_value
+ super().__init__(**kwargs)
+
+ def call(self, inputs):
+ return ste_sign(inputs, clip_value=self.clip_value)
- return _k_bit_with_identity_grad(x)
+ def get_config(self):
+ return {**super().get_config(), "clip_value": self.clip_value}
[email protected]_alias("approx_sign")
@utils.register_keras_custom_object
-class SteSign(QuantizerFunctionWrapper):
+class ApproxSign(tf.keras.layers.Layer):
r"""Instantiates a serializable binary quantizer.
-
\\[
q(x) = \begin{cases}
-1 & x < 0 \\\
@@ -449,32 +196,31 @@ class SteSign(QuantizerFunctionWrapper):
\end{cases}
\\]
- The gradient is estimated using the Straight-Through Estimator
- (essentially the binarization is replaced by a clipped identity on the
- backward pass).
+ The gradient is estimated using the ApproxSign method.
\\[\frac{\partial q(x)}{\partial x} = \begin{cases}
- 1 & \left|x\right| \leq \texttt{clip_value} \\\
- 0 & \left|x\right| > \texttt{clip_value}
- \end{cases}\\]
+ (2 - 2 \left|x\right|) & \left|x\right| \leq 1 \\\
+ 0 & \left|x\right| > 1
+ \end{cases}
+ \\]
```plot-activation
- quantizers.ste_sign
+ quantizers.ApproxSign
```
- # Arguments
- clip_value: Threshold for clipping gradients. If `None` gradients are not clipped.
-
# References
- - [Binarized Neural Networks: Training Deep Neural Networks with Weights and
- Activations Constrained to +1 or -1](http://arxiv.org/abs/1602.02830)
+ - [Bi-Real Net: Enhancing the Performance of 1-bit CNNs With Improved
+ Representational Capability and Advanced
+ Training Algorithm](http://arxiv.org/abs/1808.00278)
"""
+ precision = 1
- def __init__(self, clip_value: float = 1.0):
- super().__init__(ste_sign, clip_value=clip_value)
+ def call(self, inputs):
+ return approx_sign(inputs)
[email protected]_alias("ste_heaviside")
@utils.register_keras_custom_object
-class SteHeaviside(QuantizerFunctionWrapper):
+class SteHeaviside(tf.keras.layers.Layer):
r"""
Instantiates a binarization quantizer with output values 0 and 1.
\\[
@@ -494,7 +240,7 @@ class SteHeaviside(QuantizerFunctionWrapper):
\end{cases}\\]
```plot-activation
- quantizers.ste_heaviside
+ quantizers.SteHeaviside
```
# Arguments
@@ -503,13 +249,22 @@ class SteHeaviside(QuantizerFunctionWrapper):
# Returns
AND Binarization function
"""
+ precision = 1
+
+ def __init__(self, clip_value: float = 1.0, **kwargs):
+ self.clip_value = clip_value
+ super().__init__(**kwargs)
- def __init__(self, clip_value: float = 1.0):
- super().__init__(ste_heaviside, clip_value=clip_value)
+ def call(self, inputs):
+ return ste_heaviside(inputs, clip_value=self.clip_value)
+
+ def get_config(self):
+ return {**super().get_config(), "clip_value": self.clip_value}
[email protected]_alias("swish_sign")
@utils.register_keras_custom_object
-class SwishSign(QuantizerFunctionWrapper):
+class SwishSign(tf.keras.layers.Layer):
r"""Sign binarization function.
\\[
@@ -526,7 +281,7 @@ class SwishSign(QuantizerFunctionWrapper):
\\]
```plot-activation
- quantizers.swish_sign
+ quantizers.SwishSign
```
# Arguments
beta: Larger values result in a closer approximation to the derivative of the sign.
@@ -537,13 +292,22 @@ class SwishSign(QuantizerFunctionWrapper):
# References
- [BNN+: Improved Binary Network Training](https://arxiv.org/abs/1812.11800)
"""
+ precision = 1
+
+ def __init__(self, beta: float = 5.0, **kwargs):
+ self.beta = beta
+ super().__init__(**kwargs)
+
+ def call(self, inputs):
+ return swish_sign(inputs, beta=self.beta)
- def __init__(self, beta: float = 5.0):
- super().__init__(swish_sign, beta=beta)
+ def get_config(self):
+ return {**super().get_config(), "beta": self.beta}
[email protected]_alias("magnitude_aware_sign")
@utils.register_keras_custom_object
-class MagnitudeAwareSign(QuantizerFunctionWrapper):
+class MagnitudeAwareSign(tf.keras.layers.Layer):
r"""Instantiates a serializable magnitude-aware sign quantizer for Bi-Real Net.
A scaled sign function computed according to Section 3.3 in
@@ -562,13 +326,26 @@ class MagnitudeAwareSign(QuantizerFunctionWrapper):
Algorithm](https://arxiv.org/abs/1808.00278)
"""
+ precision = 1
+
+ def __init__(self, clip_value: float = 1.0, **kwargs):
+ self.clip_value = clip_value
+ super().__init__(**kwargs)
+
+ def call(self, inputs):
+ scale_factor = tf.stop_gradient(
+ tf.reduce_mean(tf.abs(inputs), axis=list(range(len(inputs.shape) - 1)))
+ )
- def __init__(self, clip_value: float = 1.0):
- super().__init__(magnitude_aware_sign, clip_value=clip_value)
+ return scale_factor * ste_sign(inputs, clip_value=self.clip_value)
+
+ def get_config(self):
+ return {**super().get_config(), "clip_value": self.clip_value}
[email protected]_alias("ste_tern")
@utils.register_keras_custom_object
-class SteTern(QuantizerFunctionWrapper):
+class SteTern(tf.keras.layers.Layer):
r"""Instantiates a serializable ternarization quantizer.
\\[
@@ -596,7 +373,7 @@ class SteTern(QuantizerFunctionWrapper):
\end{cases}\\]
```plot-activation
- quantizers.ste_tern
+ quantizers.SteTern
```
# Arguments
@@ -609,22 +386,40 @@ class SteTern(QuantizerFunctionWrapper):
- [Ternary Weight Networks](http://arxiv.org/abs/1605.04711)
"""
+ precision = 2
+
def __init__(
self,
threshold_value: float = 0.05,
ternary_weight_networks: bool = False,
clip_value: float = 1.0,
+ **kwargs,
):
- super().__init__(
- ste_tern,
- threshold_value=threshold_value,
- ternary_weight_networks=ternary_weight_networks,
- clip_value=clip_value,
+ self.threshold_value = threshold_value
+ self.ternary_weight_networks = ternary_weight_networks
+ self.clip_value = clip_value
+ super().__init__(**kwargs)
+
+ def call(self, inputs):
+ return ste_tern(
+ inputs,
+ threshold_value=self.threshold_value,
+ ternary_weight_networks=self.ternary_weight_networks,
+ clip_value=self.clip_value,
)
+ def get_config(self):
+ return {
+ **super().get_config(),
+ "threshold_value": self.threshold_value,
+ "ternary_weight_networks": self.ternary_weight_networks,
+ "clip_value": self.clip_value,
+ }
+
[email protected]_alias("dorefa_quantizer")
@utils.register_keras_custom_object
-class DoReFaQuantizer(QuantizerFunctionWrapper):
+class DoReFaQuantizer(tf.keras.layers.Layer):
r"""Instantiates a serializable k_bit quantizer as in the DoReFa paper.
\\[
@@ -645,7 +440,7 @@ class DoReFaQuantizer(QuantizerFunctionWrapper):
\end{cases}\\]
```plot-activation
- quantizers.dorefa_quantizer
+ quantizers.DoReFaQuantizer
```
# Arguments
@@ -658,16 +453,30 @@ class DoReFaQuantizer(QuantizerFunctionWrapper):
- [DoReFa-Net: Training Low Bitwidth Convolutional Neural Networks
with Low Bitwidth Gradients](https://arxiv.org/abs/1606.06160)
"""
+ precision = None
- def __init__(self, k_bit: int):
- super().__init__(dorefa_quantizer, k_bit=k_bit)
+ def __init__(self, k_bit: int = 2, **kwargs):
self.precision = k_bit
+ super().__init__(**kwargs)
+
+ def call(self, inputs):
+ inputs = tf.clip_by_value(inputs, 0.0, 1.0)
+
+ @tf.custom_gradient
+ def _k_bit_with_identity_grad(x):
+ n = 2 ** self.precision - 1
+ return tf.round(x * n) / n, lambda dy: dy
+
+ return _k_bit_with_identity_grad(inputs)
+
+ def get_config(self):
+ return {**super().get_config(), "k_bit": self.precision}
-Quantizer = Union[QuantizerFunctionWrapper, Callable[[tf.Tensor], tf.Tensor]]
+Quantizer = Union[tf.keras.layers.Layer, Callable[[tf.Tensor], tf.Tensor]]
-def serialize(quantizer: Quantizer):
+def serialize(quantizer: tf.keras.layers.Layer):
return tf.keras.utils.serialize_keras_object(quantizer)
diff --git a/plot_altair.py b/plot_altair.py
--- a/plot_altair.py
+++ b/plot_altair.py
@@ -1,3 +1,4 @@
+import inspect
import os
import uuid
from functools import reduce
@@ -44,6 +45,8 @@ def html_format(source, language=None, css_class=None, options=None, md=None):
def plot_activation(source, language=None, css_class=None, options=None, md=None):
function = reduce(getattr, [lq, *source.split(".")])
+ if inspect.isclass(function):
+ function = function()
x = np.linspace(-2, 2, 500)
y, dy = calculate_activation(function, x)
data = pd.DataFrame({"x": x, "y": y, "dy / dx": dy})
| diff --git a/larq/quantized_variable_test.py b/larq/quantized_variable_test.py
--- a/larq/quantized_variable_test.py
+++ b/larq/quantized_variable_test.py
@@ -21,7 +21,7 @@ def test_inheritance(distribute_scope):
assert isinstance(quantized_variable, DistributedVariable) is distribute_scope # type: ignore
-def test_read(distribute_scope, eager_and_graph_mode):
+def test_read(eager_and_graph_mode, distribute_scope):
x = QuantizedVariable.from_variable(get_var(3.5), quantizer=lambda x: 2 * x)
evaluate(x.initializer)
@@ -48,7 +48,7 @@ def test_sparse_reads(eager_and_graph_mode):
assert evaluate(x.gather_nd([0])) == 2
-def test_read_nested_scopes(distribute_scope, eager_and_graph_mode):
+def test_read_nested_scopes(eager_and_graph_mode, distribute_scope):
x = QuantizedVariable.from_variable(get_var(3.5), quantizer=lambda x: 2 * x)
evaluate(x.initializer)
with quantized_scope.scope(True):
@@ -58,7 +58,7 @@ def test_read_nested_scopes(distribute_scope, eager_and_graph_mode):
assert evaluate(x.read_value()) == 7
-def test_method_delegations(distribute_scope, eager_and_graph_mode):
+def test_method_delegations(eager_and_graph_mode, distribute_scope):
x = QuantizedVariable.from_variable(get_var(3.5), quantizer=lambda x: 2 * x)
with quantized_scope.scope(True):
evaluate(x.initializer)
@@ -118,7 +118,7 @@ def slices(val, index):
)
-def test_overloads(quantized, distribute_scope, eager_and_graph_mode):
+def test_overloads(eager_and_graph_mode, quantized, distribute_scope):
if quantized:
x = QuantizedVariable.from_variable(get_var(3.5), quantizer=lambda x: 2 * x)
else:
@@ -167,7 +167,7 @@ def test_tensor_equality(quantized, eager_mode):
assert_array_equal(x != [7.0, 8.0, 10.0], [False, False, True])
-def test_assign(quantized, distribute_scope, eager_and_graph_mode):
+def test_assign(eager_and_graph_mode, quantized, distribute_scope):
x = QuantizedVariable.from_variable(
get_var(0.0, tf.float64), quantizer=lambda x: 2 * x
)
diff --git a/larq/quantizers_test.py b/larq/quantizers_test.py
--- a/larq/quantizers_test.py
+++ b/larq/quantizers_test.py
@@ -1,6 +1,7 @@
import numpy as np
import pytest
import tensorflow as tf
+from packaging import version
import larq as lq
from larq import testing_utils
@@ -22,37 +23,32 @@ class TestCommonFunctionality:
@pytest.mark.parametrize("module", [lq.quantizers, tf.keras.activations])
@pytest.mark.parametrize(
- "name",
- ["ste_sign", "approx_sign", "magnitude_aware_sign", "swish_sign", "ste_tern"],
+ "name,ref_cls",
+ [
+ ("ste_sign", lq.quantizers.SteSign),
+ ("approx_sign", lq.quantizers.ApproxSign),
+ ("ste_heaviside", lq.quantizers.SteHeaviside),
+ ("magnitude_aware_sign", lq.quantizers.MagnitudeAwareSign),
+ ("swish_sign", lq.quantizers.SwishSign),
+ ("ste_tern", lq.quantizers.SteTern),
+ ],
)
- def test_serialization(self, module, name):
+ def test_serialization(self, module, name, ref_cls):
fn = module.get(name)
- ref_fn = getattr(lq.quantizers, name)
- assert fn == ref_fn
+ assert fn.__class__ == ref_cls
+ fn = module.get(ref_cls())
+ assert fn.__class__ == ref_cls
assert type(fn.precision) == int
+ if module == tf.keras.activations and version.parse(
+ tf.__version__
+ ) < version.parse("1.15"):
+ pytest.skip(
+ "TensorFlow < 1.15 does not support Quantizer classes as activations"
+ )
config = module.serialize(fn)
fn = module.deserialize(config)
- assert fn == ref_fn
+ assert fn.__class__ == ref_cls
assert type(fn.precision) == int
- fn = module.get(ref_fn)
- assert fn == ref_fn
- assert type(fn.precision) == int
-
- @pytest.mark.parametrize(
- "ref_fn",
- [
- lq.quantizers.SteSign(),
- lq.quantizers.SteHeaviside(),
- lq.quantizers.MagnitudeAwareSign(),
- lq.quantizers.SwishSign(),
- lq.quantizers.SteTern(),
- ],
- )
- def test_serialization_cls(self, ref_fn):
- assert type(ref_fn.precision) == int
- config = lq.quantizers.serialize(ref_fn)
- fn = lq.quantizers.deserialize(config)
- assert fn.__class__ == ref_fn.__class__
def test_invalid_usage(self):
with pytest.raises(ValueError):
@@ -84,6 +80,7 @@ class TestQuantization:
"ste_sign",
lq.quantizers.SteSign(),
"approx_sign",
+ lq.quantizers.ApproxSign(),
"swish_sign",
lq.quantizers.SwishSign(),
],
@@ -122,7 +119,7 @@ def test_and_binarization(self, fn):
def test_magnitude_aware_sign_binarization(self, eager_mode):
a = np.random.uniform(-2, 2, (3, 2, 2, 3))
x = tf.Variable(a)
- y = lq.quantizers.magnitude_aware_sign(x)
+ y = lq.quantizers.MagnitudeAwareSign()(x)
assert y.shape == x.shape
@@ -207,12 +204,9 @@ def test_ternarization_with_ternary_weight_networks(self):
assert not np.any(result > 1)
assert not np.any(result < -1)
- @pytest.mark.parametrize(
- "fn", [lq.quantizers.dorefa_quantizer, lq.quantizers.DoReFaQuantizer(2)]
- )
- def test_dorefa_quantize(self, fn):
+ def test_dorefa_quantize(self):
x = tf.keras.backend.placeholder(ndim=2)
- f = tf.keras.backend.function([x], [fn(x)])
+ f = tf.keras.backend.function([x], [lq.quantizers.DoReFaQuantizer(2)(x)])
real_values = testing_utils.generate_real_values_with_zeros()
result = f([real_values])[0]
k_bit = 2
@@ -234,19 +228,27 @@ class TestGradients:
@pytest.mark.parametrize(
"fn",
- [lq.quantizers.ste_sign, lq.quantizers.ste_tern, lq.quantizers.ste_heaviside],
+ [
+ lq.quantizers.SteSign(clip_value=None),
+ lq.quantizers.SteTern(clip_value=None),
+ lq.quantizers.SteHeaviside(clip_value=None),
+ ],
)
def test_identity_ste_grad(self, eager_mode, fn):
x = testing_utils.generate_real_values_with_zeros(shape=(8, 3, 3, 16))
tf_x = tf.Variable(x)
with tf.GradientTape() as tape:
- activation = fn(tf_x, clip_value=None)
+ activation = fn(tf_x)
grad = tape.gradient(activation, tf_x)
np.testing.assert_allclose(grad.numpy(), np.ones_like(x))
@pytest.mark.parametrize(
"fn",
- [lq.quantizers.ste_sign, lq.quantizers.ste_tern, lq.quantizers.ste_heaviside],
+ [
+ lq.quantizers.SteSign(),
+ lq.quantizers.SteTern(),
+ lq.quantizers.SteHeaviside(),
+ ],
)
def test_ste_grad(self, eager_mode, fn):
@np.vectorize
@@ -272,12 +274,12 @@ def swish_grad(x, beta):
x = testing_utils.generate_real_values_with_zeros(shape=(8, 3, 3, 16))
tf_x = tf.Variable(x)
with tf.GradientTape() as tape:
- activation = lq.quantizers.swish_sign(tf_x)
+ activation = lq.quantizers.SwishSign()(tf_x)
grad = tape.gradient(activation, tf_x)
np.testing.assert_allclose(grad.numpy(), swish_grad(x, beta=5.0))
with tf.GradientTape() as tape:
- activation = lq.quantizers.swish_sign(tf_x, beta=10.0)
+ activation = lq.quantizers.SwishSign(beta=10.0)(tf_x)
grad = tape.gradient(activation, tf_x)
np.testing.assert_allclose(grad.numpy(), swish_grad(x, beta=10.0))
@@ -291,7 +293,7 @@ def approx_sign_grad(x):
x = testing_utils.generate_real_values_with_zeros(shape=(8, 3, 3, 16))
tf_x = tf.Variable(x)
with tf.GradientTape() as tape:
- activation = lq.quantizers.approx_sign(tf_x)
+ activation = lq.quantizers.ApproxSign()(tf_x)
grad = tape.gradient(activation, tf_x)
np.testing.assert_allclose(grad.numpy(), approx_sign_grad(x))
@@ -299,7 +301,7 @@ def test_magnitude_aware_sign_grad(self, eager_mode):
a = np.random.uniform(-2, 2, (3, 2, 2, 3))
x = tf.Variable(a)
with tf.GradientTape() as tape:
- y = lq.quantizers.magnitude_aware_sign(x)
+ y = lq.quantizers.MagnitudeAwareSign()(x)
grad = tape.gradient(y, x)
scale_vector = [
| Documentation (Regarding to JOSS review)
Some suggestions:
In the quantizers documentation:
1. It seems that SteSign/ste_sign (if they are the same) is repeated twice [here](https://larq.dev/api/quantizers/#ste_sign). The same for swish_sign/SwishSign.
2. These quantizers (magnitude_aware_sign,swish_sign)have no formula, nor description.
| Thanks for the suggestions. The feedback is really valuable for us 👍
> 1. It seems that SteSign/ste_sign (if they are the same) is repeated twice here. The same for swish_sign/SwishSign.
Indeed they are computing the same thing. The difference is the usage. Larq allows three different usage patterns outlined [here](https://larq.dev/api/quantizers/#larqquantizers):
1. In a larq layer the quantizer can be referenced with a string. Compare `kernel_quantizer="ste_sign"` and `activation="relu"`.
2. The quantizer can also be called as a function. Compare `kernel_quantizer=lq.quantizers.ste_sign` and `activation=tf.nn.relu`.
3. If the default arguments of the quantizer need to be configured, it is important that the quantizer class is serializable. That means the quantizer should be called like `kernel_quantizer=lq.quantizers.SteSign(clip_value=1.25)` similar to `kernel_constraint=tf.keras.constraints.MaxNorm(max_value=2)`
Options 1 and 2 are really only here for ease of use. What we could do though is remove option 2 and replace it with `kernel_quantizer=lq.quantizers.ste_sign()` which would than mimik [Keras Constraints](https://keras.io/constraints/) and would only require us to alias `ste_sign` to `SteSign` so both would be classes which would make implementation simpler and usage consistent as well.
Though I am not sure if people would expect quantizers to be used like activations.
@EduPH @koenhelwegen @timdebruin @AdamHillier What do you think?
> 2. These quantizers (magnitude_aware_sign,swish_sign)have no formula, nor description.
I added more docs and formulas in #257
I think that it is natural to use them as activation functions. However, if you prefer to keep as it is now, at least it should be clearer on the documentation and unified in some sense. Maybe you could combine both definitions and include the different ways to use them together. I haven't got strong opposition to any option you decide to choose.
> I think that it is natural to use them as activation functions. However, if you prefer to keep as it is now, at least it should be clearer on the documentation and unified in some sense. Maybe you could combine both definitions and include the different ways to use them together. I haven't got strong opposition to any option you decide to choose.
I have to think about this a bit more, of what makes sense. For now I reordered the API docs to make the distinction clearer: #274 | 2020-01-16T20:54:43 |
larq/larq | 437 | larq__larq-437 | [
"410"
] | 5de7d49b72bfd751f5f1edc26b56be58f6a948e0 | diff --git a/larq/layers_base.py b/larq/layers_base.py
--- a/larq/layers_base.py
+++ b/larq/layers_base.py
@@ -3,18 +3,19 @@
import tensorflow as tf
-from larq import metrics as lq_metrics, quantized_scope, quantizers
+from larq import quantized_scope, quantizers
from larq.quantized_variable import QuantizedVariable
from larq.quantizers import Quantizer
log = logging.getLogger(__name__)
-# TODO: find a good way remove duplication between QuantizerBase, QuantizerDepthwiseBase and QuantizerSeparableBase
-
-
class BaseLayer(tf.keras.layers.Layer):
- """Base class for defining quantized layers"""
+ """Base class for defining quantized layers.
+
+ `input_quantizer` is the element-wise quantization functions to use.
+ If `input_quantizer=None` this layer is equivalent to `tf.keras.layers.Layer`.
+ """
def __init__(self, *args, input_quantizer=None, **kwargs):
self.input_quantizer = quantizers.get(input_quantizer)
@@ -52,17 +53,14 @@ def getter(*args, **kwargs):
class QuantizerBase(BaseLayer):
- """Base class for defining quantized layers
+ """Base class for defining quantized layers with a single kernel.
- `input_quantizer` and `kernel_quantizer` are the element-wise quantization
- functions to use. If both quantization functions are `None` this layer is
- equivalent to `Layer`.
+ `kernel_quantizer` is the element-wise quantization functions to use.
+ If `kernel_quantizer=None` this layer is equivalent to `BaseLayer`.
"""
def __init__(self, *args, kernel_quantizer=None, **kwargs):
- self.kernel_quantizer = quantizers.get(kernel_quantizer)
- if self.kernel_quantizer and not self.kernel_quantizer._custom_metrics:
- self.kernel_quantizer._custom_metrics = lq_metrics.get_training_metrics()
+ self.kernel_quantizer = quantizers.get_kernel_quantizer(kernel_quantizer)
super().__init__(*args, **kwargs)
if kernel_quantizer and not self.kernel_constraint:
@@ -82,19 +80,16 @@ def get_config(self):
class QuantizerDepthwiseBase(BaseLayer):
- """Base class for defining quantized layers
+ """Base class for defining depthwise quantized layers
- `input_quantizer` and `depthwise_quantizer` are the element-wise quantization
- functions to use. If both quantization functions are `None` this layer is
- equivalent to `Layer`.
+ `depthwise_quantizer` is the element-wise quantization functions to use.
+ If `depthwise_quantizer=None` this layer is equivalent to `BaseLayer`.
"""
def __init__(
self, *args, depthwise_quantizer: Optional[Quantizer] = None, **kwargs,
):
- self.depthwise_quantizer = quantizers.get(depthwise_quantizer)
- if self.depthwise_quantizer and not self.depthwise_quantizer._custom_metrics:
- self.depthwise_quantizer._custom_metrics = lq_metrics.get_training_metrics()
+ self.depthwise_quantizer = quantizers.get_kernel_quantizer(depthwise_quantizer)
super().__init__(*args, **kwargs)
if depthwise_quantizer and not self.depthwise_constraint:
@@ -114,13 +109,11 @@ def get_config(self):
class QuantizerSeparableBase(BaseLayer):
- """Base class for defining separable quantized layers
+ """Base class for defining separable quantized layers.
- `input_quantizer`, `depthwise_quantizer` and `pointwise_quantizer` are the
- element-wise quantization functions to use. If all quantization functions are `None`
- this layer is equivalent to `SeparableConv1D`. If `use_bias` is True and
- a bias initializer is provided, it adds a bias vector to the output.
- It then optionally applies an activation function to produce the final output.
+ `depthwise_quantizer` and `pointwise_quantizer` are the element-wise quantization
+ functions to use. If all quantization functions are `None` this layer is equivalent
+ to `BaseLayer`.
"""
def __init__(
@@ -130,13 +123,8 @@ def __init__(
pointwise_quantizer: Optional[Quantizer] = None,
**kwargs,
):
- self.depthwise_quantizer = quantizers.get(depthwise_quantizer)
- if self.depthwise_quantizer and not self.depthwise_quantizer._custom_metrics:
- self.depthwise_quantizer._custom_metrics = lq_metrics.get_training_metrics()
-
- self.pointwise_quantizer = quantizers.get(pointwise_quantizer)
- if self.pointwise_quantizer and not self.pointwise_quantizer._custom_metrics:
- self.pointwise_quantizer._custom_metrics = lq_metrics.get_training_metrics()
+ self.depthwise_quantizer = quantizers.get_kernel_quantizer(depthwise_quantizer)
+ self.pointwise_quantizer = quantizers.get_kernel_quantizer(pointwise_quantizer)
super().__init__(*args, **kwargs)
if depthwise_quantizer and not self.depthwise_constraint:
diff --git a/larq/quantizers.py b/larq/quantizers.py
--- a/larq/quantizers.py
+++ b/larq/quantizers.py
@@ -554,3 +554,18 @@ def get(identifier):
raise ValueError(
f"Could not interpret quantization function identifier: {identifier}"
)
+
+
+def get_kernel_quantizer(identifier):
+ """Returns a quantizer from identifier and adds default kernel quantizer metrics.
+
+ # Arguments
+ identifier: Function or string
+
+ # Returns
+ `Quantizer` or `None`
+ """
+ quantizer = get(identifier)
+ if quantizer and not quantizer._custom_metrics:
+ quantizer._custom_metrics = lq_metrics.get_training_metrics()
+ return quantizer
| Merge `QuantizerBase` and `BaseLayer`
### Feature motivation
We now have both a `BaseQuantizer` (in `quantizers.py`) and a `QuantizerBase` (in `layers_base.py`). This is confusing. Following #402, it should be easier to fuse `QuantizerBase` into `BaseLayer`. See [this PR comment](https://github.com/larq/larq/pull/402#discussion_r375925215).
### Feature description
Combine the `QuantizerBase` into `BaseLayer` classes.
| 2020-02-28T10:33:54 |
||
larq/larq | 446 | larq__larq-446 | [
"443"
] | a383827ef079210667c031d1d3c41de103532b0a | diff --git a/larq/optimizers.py b/larq/optimizers.py
--- a/larq/optimizers.py
+++ b/larq/optimizers.py
@@ -210,6 +210,13 @@ def _compute_var_opt_mapping(self, grads_and_vars):
f"No `default_optimizer` provided to train variable `{var}`."
)
+ # Make sure that each optimizer touches at least one variable
+ for optimizer_index, (_, optimizer) in enumerate(self.pred_opt_pairs):
+ if optimizer_index not in self.var_opt_mapping.values():
+ raise ValueError(
+ f"Optimizer `{optimizer}` did not claim any variables."
+ )
+
@utils.register_keras_custom_object
class Bop(tf.keras.optimizers.Optimizer):
| diff --git a/larq/optimizers_test.py b/larq/optimizers_test.py
--- a/larq/optimizers_test.py
+++ b/larq/optimizers_test.py
@@ -82,7 +82,40 @@ def test_overlapping_predicates(self):
def test_missing_default(self):
with pytest.warns(Warning):
naughty_case_opt = lq.optimizers.CaseOptimizer(
- (lambda var: False, lq.optimizers.Bop())
+ (lq.optimizers.Bop.is_binary_variable, lq.optimizers.Bop()),
+ )
+
+ # Simple MNIST model
+ mnist = tf.keras.datasets.mnist
+ (train_images, train_labels), _ = mnist.load_data()
+ model = tf.keras.Sequential(
+ [
+ tf.keras.layers.Flatten(input_shape=(28, 28)),
+ lq.layers.QuantDense(
+ 64,
+ input_quantizer="ste_sign",
+ kernel_quantizer=lq.quantizers.NoOpQuantizer(precision=1),
+ activation="relu",
+ ),
+ tf.keras.layers.Dense(10, activation="softmax"),
+ ]
+ )
+ model.compile(
+ loss="sparse_categorical_crossentropy",
+ optimizer=naughty_case_opt,
+ metrics=["acc"],
+ )
+
+ # Should raise on first call to apply_gradients()
+ model.fit(train_images[:1], train_labels[:1], epochs=1)
+
+ def test_wrong_predicate(self):
+ """Make sure we throw when an optimizer does not claim variables."""
+
+ with pytest.raises(ValueError):
+ naughty_case_opt = lq.optimizers.CaseOptimizer(
+ (lambda var: False, lq.optimizers.Bop()),
+ default_optimizer=tf.keras.optimizers.Adam(0.01),
)
# Simple MNIST model
| CaseOptimizer should throw if optimizer matches no variable
### Feature motivation
We are changing the default behaviour of Bop in #442. If users are not careful this could lead to hard to debug errors when no variable matches any optimizer.
### Feature description
We should throw if one optimizer in used by `CaseOptimizer` doesn't receive any variables.
### Feature implementation
We should throw if any of the [`len(opt_grads_and_vars) == 0`](https://github.com/larq/larq/blob/c44ed976812edc2cc7d60c98155513942fc3af98/larq/optimizers.py#L140).
| 2020-03-04T16:45:40 |
|
larq/larq | 449 | larq__larq-449 | [
"448"
] | fb72fa5f271c5458c558acfa1cf831a8fd98e98e | diff --git a/larq/models.py b/larq/models.py
--- a/larq/models.py
+++ b/larq/models.py
@@ -110,6 +110,14 @@ def memory(self) -> int:
def fp_equivalent_memory(self) -> int:
return 32 * self.count
+ @property
+ def int8_fp_weights_memory(self) -> int:
+ """Count any 32- or 16-bit weights as 8 bits instead."""
+
+ if self.bitwidth > 8:
+ return self.count * 8
+ return self.bitwidth * self.count
+
def is_bias(self) -> bool:
return "bias" in self._weight.name
@@ -148,6 +156,10 @@ def __init__(self, layer: tf.keras.layers.Layer):
def memory(self) -> int:
return sum(p.memory for p in self.weight_profiles)
+ @property
+ def int8_fp_weights_memory(self) -> int:
+ return sum(p.int8_fp_weights_memory for p in self.weight_profiles)
+
@property
def fp_equivalent_memory(self) -> int:
return sum(p.fp_equivalent_memory for p in self.weight_profiles)
@@ -247,6 +259,10 @@ def __init__(self, model: tf.keras.models.Model):
def memory(self) -> int:
return sum(l.memory for l in self.layer_profiles)
+ @property
+ def int8_fp_weights_memory(self) -> int:
+ return sum(l.int8_fp_weights_memory for l in self.layer_profiles)
+
@property
def fp_equivalent_memory(self) -> int:
return sum(l.fp_equivalent_memory for l in self.layer_profiles)
@@ -336,7 +352,11 @@ def generate_summary(
"Non-trainable params",
_number_as_readable_str(self.weight_count(trainable=False)),
],
- ["Model size:", memory_as_readable_str(self.memory)],
+ ["Model size", memory_as_readable_str(self.memory)],
+ [
+ "Model size (8-bit FP weights)",
+ memory_as_readable_str(self.int8_fp_weights_memory),
+ ],
["Float-32 Equivalent", memory_as_readable_str(self.fp_equivalent_memory)],
[
"Compression Ratio of Memory",
@@ -415,6 +435,7 @@ def summary(
- total number of trainable weights,
- total number of non-trainable weights,
- model size,
+ - model size (8-bit FP weights): memory footprint if FP weights were 8 bit,
- float-32 equivalent size: memory footprint if all weights were 32 bit,
- compression ratio achieved by quantizing weights,
- total number of MAC operations,
| diff --git a/larq/models_test.py b/larq/models_test.py
--- a/larq/models_test.py
+++ b/larq/models_test.py
@@ -57,7 +57,7 @@ def test_layer_profile():
]
bias_count = [32, 0, 0, 32, 0, 10]
param_count = [k + b for k, b in zip(kernel_count, bias_count)]
- memory = [
+ memory = [ # bits * (c * w * h * b) + bits * bias
1 * (32 * 3 * 3 * 1) + 32 * 32,
0,
2 * (32 * 3 * 3),
@@ -65,6 +65,14 @@ def test_layer_profile():
0,
32 * (32 * 11 * 11 * 10 + 10),
]
+ int8_fp_weights_mem = [
+ 1 * (32 * 3 * 3 * 1) + 8 * 32,
+ 0,
+ 2 * (32 * 3 * 3),
+ 1 * (32 * 3 * 3 * 1 + 32 * 1 * 1 * 32) + 8 * 32,
+ 0,
+ 8 * (32 * 11 * 11 * 10 + 10),
+ ]
fp_equiv_mem = [32 * n for n in param_count]
input_precision = [None, None, 2, 1, None, None]
output_shape = [
@@ -95,6 +103,7 @@ def test_layer_profile():
assert profiles[i].unique_op_precisions == unique_op_precisions[i]
assert profiles[i].memory == memory[i]
assert profiles[i].fp_equivalent_memory == fp_equiv_mem[i]
+ assert profiles[i].int8_fp_weights_memory == int8_fp_weights_mem[i]
assert profiles[i].op_count("mac") == mac_count[i]
assert profiles[i].op_count("mac", 1) == bin_mac_count[i]
diff --git a/larq/snapshots/snap_models_test.py b/larq/snapshots/snap_models_test.py
--- a/larq/snapshots/snap_models_test.py
+++ b/larq/snapshots/snap_models_test.py
@@ -4,6 +4,7 @@
from snapshottest import Snapshot
+
snapshots = Snapshot()
snapshots[
@@ -25,7 +26,8 @@
| Total params 40.7 k |
| Trainable params 1.95 k |
| Non-trainable params 38.7 k |
-| Model size: 151.80 KiB |
+| Model size 151.80 KiB |
+| Model size (8-bit FP weights) 38.15 KiB |
| Float-32 Equivalent 158.91 KiB |
| Compression Ratio of Memory 0.96 |
| Number of MACs 1.41 M |
@@ -44,13 +46,14 @@
+--------------------------------------+
| Total 0 |
+--------------------------------------+
-+sequential_1 summary-----------------+
-| Total params 0 |
-| Trainable params 0 |
-| Non-trainable params 0 |
-| Model size: 0.00 B |
-| Float-32 Equivalent 0.00 B |
-| Compression Ratio of Memory 0.00 |
-| Number of MACs 0 |
-+-------------------------------------+
++sequential_1 summary-------------------+
+| Total params 0 |
+| Trainable params 0 |
+| Non-trainable params 0 |
+| Model size 0.00 B |
+| Model size (8-bit FP weights) 0.00 B |
+| Float-32 Equivalent 0.00 B |
+| Compression Ratio of Memory 0.00 |
+| Number of MACs 0 |
++---------------------------------------+
"""
| Add int8 quantized size to model summary
### Feature motivation
Our model summary currently reports the following two metrics for model size:
```
| Model size: 151.80 KiB |
| Float-32 Equivalent 158.91 KiB |
```
This is calculated as follows for models that contain both binary and full precision (FP) weights:
| Metric | Binary weights | FP weights |
| --- | --- | --- |
| `Model size` | counted as 1 bit | counted as 32 bits |
| `Float-32 Equivalent` | counted as 32 bits | counted as 32 bits |
For models in which FP weights will be 8-bit quantized for inference, it'd be useful to also know how large that model would be.
### Feature description
I propose adding another metric to the model summary:
| Metric | bin weights | FP weights |
| --- | --- | --- |
| `int-8 quantized equivalent` | counted as 1 bit | counted as 8 bits |
### Feature implementation
Adding another property to [`ModelProfile`](https://github.com/larq/larq/blob/master/larq/models.py#L242), similar to:
```python
@property
def fp_equivalent_memory(self) -> int:
return sum(l.fp_equivalent_memory for l in self.layer_profiles)
```
And then adding it to the model summary printout.
| Yes would be a useful feature! I would call it `Model size (max bits/param=8)` or something like that though, to me `int-8 quantized` suggests binary weights are counted as 8-bit as well. | 2020-03-06T15:14:44 |
larq/larq | 468 | larq__larq-468 | [
"467"
] | bed73bfac2ba46066d82d445e3df25a4d6ef0640 | diff --git a/larq/optimizers.py b/larq/optimizers.py
--- a/larq/optimizers.py
+++ b/larq/optimizers.py
@@ -20,7 +20,7 @@
!!! example
```python
- no_op_quantizer = lq.quantizers.NoOpQuantizer(precision=1)
+ no_op_quantizer = lq.quantizers.NoOp(precision=1)
layer = lq.layers.QuantDense(16, kernel_quantizer=no_op_quantizer)
case_optimizer = lq.optimizers.CaseOptimizer(
@@ -243,11 +243,11 @@ class Bop(tf.keras.optimizers.Optimizer):
!!! warning
The `is_binary_variable` check of this optimizer will only target variables that
- have been explicitly marked as being binary using `NoOpQuantizer(precision=1)`.
+ have been explicitly marked as being binary using `NoOp(precision=1)`.
!!! example
```python
- no_op_quantizer = lq.quantizers.NoOpQuantizer(precision=1)
+ no_op_quantizer = lq.quantizers.NoOp(precision=1)
layer = lq.layers.QuantDense(16, kernel_quantizer=no_op_quantizer)
optimizer = lq.optimizers.CaseOptimizer(
diff --git a/larq/quantizers.py b/larq/quantizers.py
--- a/larq/quantizers.py
+++ b/larq/quantizers.py
@@ -52,14 +52,16 @@
from larq import context, math, metrics as lq_metrics, utils
__all__ = [
- "SteSign",
"ApproxSign",
- "MagnitudeAwareSign",
- "SwishSign",
- "SteTern",
- "SteHeaviside",
+ "DoReFa",
"DoReFaQuantizer",
+ "MagnitudeAwareSign",
+ "NoOp",
"NoOpQuantizer",
+ "SteHeaviside",
+ "SteSign",
+ "SteTern",
+ "SwishSign",
]
@@ -166,7 +168,7 @@ def non_trainable_weights(self):
@utils.register_keras_custom_object
-class NoOpQuantizer(BaseQuantizer):
+class NoOp(BaseQuantizer):
r"""Instantiates a serializable no-op quantizer.
\\[
@@ -204,6 +206,11 @@ def get_config(self):
return {**super().get_config(), "precision": self.precision}
+# `NoOp` used to be called `NoOpQuantizer`; this alias is for
+# backwards-compatibility.
+NoOpQuantizer = NoOp
+
+
@utils.register_alias("ste_sign")
@utils.register_keras_custom_object
class SteSign(BaseQuantizer):
@@ -508,7 +515,7 @@ def get_config(self):
@utils.register_alias("dorefa_quantizer")
@utils.register_keras_custom_object
-class DoReFaQuantizer(BaseQuantizer):
+class DoReFa(BaseQuantizer):
r"""Instantiates a serializable k_bit quantizer as in the DoReFa paper.
\\[
@@ -529,7 +536,7 @@ class DoReFaQuantizer(BaseQuantizer):
\end{cases}\\]
```plot-activation
- quantizers.DoReFaQuantizer
+ quantizers.DoReFa
```
# Arguments
@@ -566,6 +573,11 @@ def get_config(self):
return {**super().get_config(), "k_bit": self.precision}
+# `DoReFa` used to be called `DoReFaQuantizer`; this alias is for
+# backwards-compatibility.
+DoReFaQuantizer = DoReFa
+
+
Quantizer = Union[tf.keras.layers.Layer, Callable[[tf.Tensor], tf.Tensor]]
| diff --git a/larq/optimizers_test.py b/larq/optimizers_test.py
--- a/larq/optimizers_test.py
+++ b/larq/optimizers_test.py
@@ -94,7 +94,7 @@ def test_missing_default(self):
lq.layers.QuantDense(
64,
input_quantizer="ste_sign",
- kernel_quantizer=lq.quantizers.NoOpQuantizer(precision=1),
+ kernel_quantizer=lq.quantizers.NoOp(precision=1),
activation="relu",
),
tf.keras.layers.Dense(10, activation="softmax"),
@@ -145,7 +145,7 @@ def test_weights(self):
lq.layers.QuantDense(
64,
input_quantizer="ste_sign",
- kernel_quantizer=lq.quantizers.NoOpQuantizer(precision=1),
+ kernel_quantizer=lq.quantizers.NoOp(precision=1),
activation="relu",
),
tf.keras.layers.Dense(10, activation="softmax"),
diff --git a/larq/quantizers_test.py b/larq/quantizers_test.py
--- a/larq/quantizers_test.py
+++ b/larq/quantizers_test.py
@@ -53,12 +53,12 @@ def test_serialization(self, module, name, ref_cls):
assert type(fn.precision) == int
def test_noop_serialization(self):
- fn = lq.quantizers.get(lq.quantizers.NoOpQuantizer(precision=1))
- assert fn.__class__ == lq.quantizers.NoOpQuantizer
+ fn = lq.quantizers.get(lq.quantizers.NoOp(precision=1))
+ assert fn.__class__ == lq.quantizers.NoOp
assert fn.precision == 1
config = lq.quantizers.serialize(fn)
fn = lq.quantizers.deserialize(config)
- assert fn.__class__ == lq.quantizers.NoOpQuantizer
+ assert fn.__class__ == lq.quantizers.NoOp
assert fn.precision == 1
def test_invalid_usage(self):
@@ -217,7 +217,7 @@ def test_ternarization_with_ternary_weight_networks(self):
def test_dorefa_quantize(self):
x = tf.keras.backend.placeholder(ndim=2)
- f = tf.keras.backend.function([x], [lq.quantizers.DoReFaQuantizer(2)(x)])
+ f = tf.keras.backend.function([x], [lq.quantizers.DoReFa(2)(x)])
real_values = testing_utils.generate_real_values_with_zeros()
result = f([real_values])[0]
k_bit = 2
@@ -333,7 +333,7 @@ def ste_grad(x):
x = testing_utils.generate_real_values_with_zeros(shape=(8, 3, 3, 16))
tf_x = tf.Variable(x)
with tf.GradientTape() as tape:
- activation = lq.quantizers.DoReFaQuantizer(2)(tf_x)
+ activation = lq.quantizers.DoReFa(2)(tf_x)
grad = tape.gradient(activation, tf_x)
np.testing.assert_allclose(grad.numpy(), ste_grad(x))
@@ -347,7 +347,7 @@ def ste_grad(x):
("swish_sign", lq.quantizers.SwishSign),
("magnitude_aware_sign", lq.quantizers.MagnitudeAwareSign),
("ste_tern", lq.quantizers.SteTern),
- ("dorefa_quantizer", lq.quantizers.DoReFaQuantizer),
+ ("dorefa_quantizer", lq.quantizers.DoReFa),
],
)
def test_metrics(quantizer):
@@ -406,3 +406,8 @@ def test_get_kernel_quantizer_accepts_function():
custom_quantizer = lq.quantizers.get_kernel_quantizer(lambda x: x)
assert callable(custom_quantizer)
assert not hasattr(custom_quantizer, "_custom_metrics")
+
+
+def test_backwards_compat_aliases():
+ assert lq.quantizers.DoReFaQuantizer == lq.quantizers.DoReFa
+ assert lq.quantizers.NoOpQuantizer == lq.quantizers.NoOp
| Inconsistent quantiser names
This is very minor and I'm very happy for this to be a `wontfix`.
At the moment two out of eight quantizers have a "Quantizer" suffix:
<img width="251" alt="image" src="https://user-images.githubusercontent.com/7688302/77927969-c04f2400-729f-11ea-8e3d-d04c8d85367f.png">
It seems to me it should be all of them with the suffix or none of them with the suffix. We could potentially standardise on one version but have an alias to the other version.
| I have a slight preference to dropping the suffixes for the two that have them, but no strong opinion. Also in favor of standardizing.
I agree, I'm in favour of standardising as well :+1:
I think dropping the suffix makes the most sense since the quantizers will likely be used as `lq.quantizers.*`.
Would you want to add an alias for the two that would change so that this wouldn't be a breaking change?
> Would you want to add an alias for the two that would change so that this wouldn't be a breaking change?
I think I'd prefer that for now, since it's not much work.
We can remove the alias for good when we have to make some breaking changes elsewhere. | 2020-03-30T16:27:56 |
larq/larq | 480 | larq__larq-480 | [
"479"
] | cfbf76f33b78c394ce07a278d2c75879255e053d | diff --git a/larq/models.py b/larq/models.py
--- a/larq/models.py
+++ b/larq/models.py
@@ -150,7 +150,7 @@ def __init__(self, layer: tf.keras.layers.Layer):
self.op_profiles = []
- if isinstance(layer, mac_containing_layers):
+ if isinstance(layer, mac_containing_layers) and self.output_pixels:
for p in self.weight_profiles:
if not p.is_bias():
self.op_profiles.append(
@@ -190,7 +190,10 @@ def op_count(
if op_type != "mac":
raise ValueError("Currently only counting of MAC-operations is supported.")
- if isinstance(self._layer, op_count_supported_layer_types):
+ if (
+ isinstance(self._layer, op_count_supported_layer_types)
+ and self.output_pixels
+ ):
count = 0
for op in self.op_profiles:
if (precision is None or op.precision == precision) and (
@@ -222,14 +225,15 @@ def output_shape_str(self) -> str:
return "?"
@property
- def output_pixels(self) -> int:
+ def output_pixels(self) -> Optional[int]:
"""Number of pixels for a single feature map (1 for fully connected layers)."""
+ if not self.output_shape:
+ return None
if len(self.output_shape) == 4:
return int(np.prod(self.output_shape[1:3]))
- elif len(self.output_shape) == 2:
+ if len(self.output_shape) == 2:
return 1
- else:
- raise NotImplementedError()
+ raise NotImplementedError()
@property
def unique_param_bidtwidths(self) -> Sequence[int]:
| diff --git a/larq/models_test.py b/larq/models_test.py
--- a/larq/models_test.py
+++ b/larq/models_test.py
@@ -6,6 +6,23 @@
from larq.models import ModelProfile
+class ToyModel(tf.keras.Model):
+ def __init__(self, **kwargs):
+ super().__init__(**kwargs)
+ self.conv = lq.layers.QuantConv2D(
+ filters=32,
+ kernel_size=(3, 3),
+ kernel_quantizer="ste_sign",
+ input_shape=(64, 64, 1),
+ padding="same",
+ )
+ self.pool = tf.keras.layers.GlobalAvgPool2D()
+ self.dense = tf.keras.layers.Dense(10, activation="softmax")
+
+ def call(self, inputs):
+ return self.dense(self.pool(self.conv(inputs)))
+
+
def get_profile_model():
return tf.keras.models.Sequential(
[
@@ -129,6 +146,14 @@ def test_summary(snapshot, capsys):
snapshot.assert_match(captured.out)
+def test_subclass_model_summary(snapshot, capsys):
+ model = ToyModel()
+ model.build((None, 32, 32, 3))
+ lq.models.summary(model)
+ captured = capsys.readouterr()
+ snapshot.assert_match(captured.out)
+
+
def test_summary_invalid_model():
with pytest.raises(ValueError):
lq.models.summary(tf.keras.Model())
diff --git a/larq/snapshots/snap_models_test.py b/larq/snapshots/snap_models_test.py
--- a/larq/snapshots/snap_models_test.py
+++ b/larq/snapshots/snap_models_test.py
@@ -54,3 +54,25 @@
| Number of MACs 0 |
+---------------------------------------+
'''
+
+snapshots['test_subclass_model_summary 1'] = '''+toy_model stats-------------------------------------------------------------+
+| Layer Input prec. Outputs # 1-bit # 32-bit Memory |
+| (bit) x 1 x 1 (kB) |
++----------------------------------------------------------------------------+
+| quant_conv2d - multiple 864 32 0.23 |
+| global_average_pooling2d - multiple 0 0 0 |
+| dense - multiple 0 330 1.29 |
++----------------------------------------------------------------------------+
+| Total 864 362 1.52 |
++----------------------------------------------------------------------------+
++toy_model summary------------------------+
+| Total params 1.23 k |
+| Trainable params 1.23 k |
+| Non-trainable params 0 |
+| Model size 1.52 KiB |
+| Model size (8-bit FP weights) 470.00 B |
+| Float-32 Equivalent 4.79 KiB |
+| Compression Ratio of Memory 0.32 |
+| Number of MACs 0 |
++-----------------------------------------+
+'''
| Summary fails for subclassed keras models
### Describe the bug
When using subclassed Keras models, calling `larq.models.summary` fails and expects calling
`model.build` first.
Even if `build` is called, it fails with another less obvious error (seems to be a problem with output_shape)
### To Reproduce
```python
import larq
import tensorflow as tf
from tensorflow import keras
from larq_zoo.sota import QuickNet
from zookeeper import cli, task
from typing import Callable, Optional
class EmbedderWrapperModel(keras.Model):
def __init__(self, zoo_class: Callable[..., keras.Model],
input_shape: int, num_classes: int, dynamic=False,
finetune_basenet=True, pretrained_basenet=True, cut_layer_name: Optional[str] = None):
super(EmbedderWrapperModel, self).__init__(dynamic=dynamic)
self.basenet = self._get_basenet(zoo_class, input_shape, finetune_basenet, pretrained_basenet, cut_layer_name)
global_pool_shape = self.basenet.output_shape[1], self.basenet.output_shape[2]
self.batch_norm = keras.layers.BatchNormalization(momentum=0.9, epsilon=1e-5)
self.global_pool = keras.layers.AveragePooling2D(pool_size=global_pool_shape)
self.dense_softmax = keras.layers.Dense(num_classes, activation=tf.nn.softmax)
def _get_basenet(self, zoo_class: Callable[..., keras.Model], input_shape: int,
finetune_basenet: bool, pretrained_basenet: bool, cut_layer_name: Optional[str]) -> keras.Model:
weights = "imagenet" if pretrained_basenet else None
if not cut_layer_name:
basenet = zoo_class(input_shape=(input_shape, input_shape, 3), include_top=False, weights=weights)
else:
full_zoo_model = zoo_class(input_shape=(input_shape, input_shape, 3), include_top=True, weights=weights)
inputs, outputs = full_zoo_model.inputs, full_zoo_model.get_layer(cut_layer_name).output
basenet = keras.Model(inputs=inputs, outputs=outputs)
basenet.trainable = finetune_basenet
return basenet
def call(self, inputs, training=False, mask=None):
x = self.basenet(inputs, training=training)
x = self.batch_norm(x, training=training)
x = self.global_pool(x)
x = keras.layers.Flatten()(x)
x = self.dense_softmax(x)
return x
@task
class BugTest:
INPUT_SHAPE = 224
CLASSES_NUM = 10
FINETUNE_BASENET = False
PRETRAINED_BASENET = True
def run(self):
cut_layer_name = "activation" # `activation` is last relu layer before global pooling
model = EmbedderWrapperModel(QuickNet, self.INPUT_SHAPE, self.CLASSES_NUM,
finetune_basenet=self.FINETUNE_BASENET, pretrained_basenet=self.PRETRAINED_BASENET,
cut_layer_name=cut_layer_name)
model.build(input_shape=(None, 224, 224, 3))
larq.models.summary(model)
if __name__ == "__main__":
cli()
```
### Expected behavior
Expected `model.build` to solve the problem, instead got:
```
Traceback (most recent call last):
File "C:/Users/User/PycharmProjects/BNN-Playground/summary_bug.py", line 68, in <module>
cli()
File "C:\Users\User\Anaconda3\lib\site-packages\click\core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "C:\Users\User\Anaconda3\lib\site-packages\click\core.py", line 782, in main
rv = self.invoke(ctx)
File "C:\Users\User\Anaconda3\lib\site-packages\click\core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "C:\Users\User\Anaconda3\lib\site-packages\click\core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "C:\Users\User\Anaconda3\lib\site-packages\click\core.py", line 610, in invoke
return callback(*args, **kwargs)
File "C:\Users\User\Anaconda3\lib\site-packages\zookeeper\core\task.py", line 59, in command
task_instance.run()
File "C:/Users/User/PycharmProjects/BNN-Playground/summary_bug.py", line 64, in run
larq.models.summary(model)
File "C:\Users\User\Anaconda3\lib\site-packages\larq\models.py", line 466, in summary
model_profile = ModelProfile(model)
File "C:\Users\User\Anaconda3\lib\site-packages\larq\models.py", line 261, in __init__
self.layer_profiles = [LayerProfile(l) for l in model.layers]
File "C:\Users\User\Anaconda3\lib\site-packages\larq\models.py", line 261, in <listcomp>
self.layer_profiles = [LayerProfile(l) for l in model.layers]
File "C:\Users\User\Anaconda3\lib\site-packages\larq\models.py", line 158, in __init__
n=p.count * self.output_pixels,
File "C:\Users\User\Anaconda3\lib\site-packages\larq\models.py", line 227, in output_pixels
if len(self.output_shape) == 4:
TypeError: object of type 'NoneType' has no len()
```
### Environment
TensorFlow version: 2.2.0rc3
Larq version: 0.9.4
| 2020-04-24T22:47:06 |
|
larq/larq | 485 | larq__larq-485 | [
"396"
] | 5d05d300ea2e58eac8f468276a51bced63f1e54c | diff --git a/larq/optimizers.py b/larq/optimizers.py
--- a/larq/optimizers.py
+++ b/larq/optimizers.py
@@ -139,10 +139,21 @@ def apply_gradients(self, grads_and_vars, name: Optional[str] = None, **kwargs):
if var.name in self.var_opt_mapping:
grad_var_lists[self.var_opt_mapping[var.name]].append((grad, var))
+ with tf.init_scope():
+ for optimizer, opt_grads_and_vars in zip(self.optimizers, grad_var_lists):
+ optimizer._create_slots([v for (_, v) in grads_and_vars])
+
+ return tf.distribute.get_replica_context().merge_call(
+ self._apply_gradients, args=(grad_var_lists, name), kwargs=kwargs
+ )
+
+ def _apply_gradients(self, distribution, grad_var_lists, name, **kwargs):
# Apply gradients to each optimizer
with tf.name_scope(self._name):
train_ops = [
- optimizer.apply_gradients(opt_grads_and_vars, **kwargs)
+ distribution.extended.call_for_each_replica(
+ optimizer.apply_gradients, args=(opt_grads_and_vars,), kwargs=kwargs
+ )
for optimizer, opt_grads_and_vars in zip(
self.optimizers, grad_var_lists
)
| `CaseOptimizer` broken on multi-GPU
### Describe the bug
When training a model using the `CaseOptimizer` on multi-GPU (4 of them in my case, both p100 and v100 will break), I get the following error:
```
WARNING:tensorflow:There is non-GPU devices in `tf.distribute.Strategy`, not using nccl allreduce.
distributed training: False
Train on 60000 samples
60000/60000 [==============================] - 4s 61us/sample - loss: 8.2390
Successfully fitted model
distributed training: True
Train on 60000 samples
INFO:tensorflow:Error reported to Coordinator: list index out of range
Traceback (most recent call last):
File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/training/coordinator.py", line 297, in stop_on_exception
yield
File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/distribute/mirrored_strategy.py", line 190, in _call_for_each_replica
**merge_kwargs)
File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/keras/optimizer_v2/optimizer_v2.py", line 446, in _distributed_apply
ds_reduce_util.ReduceOp.SUM, grads_and_vars)
File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/distribute/distribute_lib.py", line 1481, in batch_reduce_to
return self._batch_reduce_to(reduce_op, value_destination_pairs)
File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/distribute/mirrored_strategy.py", line 707, in _batch_reduce_to
value_destination_pairs)
File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/distribute/cross_device_ops.py", line 317, in batch_reduce
value_destination_pairs[0][0].values) == 1:
IndexError: list index out of range
32/60000 [..............................] - ETA: 10:32Exception raised:
list index out of range
```
### To Reproduce
```python
import contextlib
import numpy as np
import tensorflow.keras as keras
import larq as lq
import tensorflow as tf
def get_model():
model = keras.Sequential()
model.add(keras.layers.Flatten(input_shape=(28, 28)))
model.add(
lq.layers.QuantDense(
units=10,
input_quantizer="ste_sign",
kernel_quantizer="ste_sign",
kernel_constraint="weight_clip",
name="layer_1"
)
)
model.add(keras.layers.Dense(units=10, name="layer_2"))
def is_layer_1(var: tf.Variable) -> bool:
layer_name = var.name.split("/")[-2]
return layer_name == "layer_1"
optimizer = lq.optimizers.CaseOptimizer(
(is_layer_1, keras.optimizers.Adam()),
default_optimizer=keras.optimizers.Adam(),
)
# optimizer = keras.optimizers.Adam()
model.compile(
optimizer=optimizer, loss="sparse_categorical_crossentropy"
)
return model
def attempt_fit(distributed_training=False):
fashion_mnist = keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
train_images = train_images / 255.0
test_images = test_images / 255.0
with strategy.scope() if distributed_training else contextlib.nullcontext():
model = get_model()
model.fit(train_images, train_labels, epochs=1)
if __name__ == "__main__":
keras.backend.clear_session()
strategy = tf.distribute.MirroredStrategy()
for distributed_training in [False, True]:
print("distributed training: ", distributed_training)
try:
attempt_fit(distributed_training)
print("Successfully fitted model")
except Exception as e:
print("Exception raised: \n", e)
print()
```
For simplicity, you can change the predicate of the optimizer to `lambda x: False`; it makes no difference whether it actually selects any layers or not. Using `keras.optimizers.Adam` instead of the `CaseOptimizer` will work just fine.
### Expected behavior
I expected it to train, as it does in the single-GPU case.
### Environment
TensorFlow version: 2.0.0
Larq version: 0.8.3
| It's printing an interesting warning:
> WARNING:tensorflow:There is non-GPU devices in `tf.distribute.Strategy`, not using nccl allreduce.
Anyway, if I submit this to Polyaxon using a more complex environment, I get a different error:
```
2020-01-24 14:50:52 UTC -- Epoch 1/20
2020-01-24 14:50:55 UTC -- Traceback (most recent call last):
2020-01-24 14:50:55 UTC -- File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/distribute/values.py", line 470, in _type_spec
2020-01-24 14:50:55 UTC -- value_specs = [type_spec.type_spec_from_value(v) for v in self._values]
2020-01-24 14:50:55 UTC -- File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/distribute/values.py", line 470, in <listcomp>
2020-01-24 14:50:55 UTC -- value_specs = [type_spec.type_spec_from_value(v) for v in self._values]
2020-01-24 14:50:55 UTC -- File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/framework/type_spec.py", line 492, in type_spec_from_value
2020-01-24 14:50:55 UTC -- (value, type(value).__name__))
2020-01-24 14:50:55 UTC -- TypeError: Could not build a TypeSpec for <tf.Operation 'train_with_group' type=NoOp> with type Operation
2020-01-24 14:50:55 UTC --
2020-01-24 14:50:55 UTC -- The above exception was the direct cause of the following exception:
2020-01-24 14:50:55 UTC --
2020-01-24 14:50:55 UTC -- Traceback (most recent call last):
2020-01-24 14:50:55 UTC -- File "/usr/local/bin/project", line 11, in <module>
2020-01-24 14:50:55 UTC -- load_entry_point('research-project', 'console_scripts', 'project')()
2020-01-24 14:50:55 UTC -- File "/code/main.py", line 19, in cli
2020-01-24 14:50:55 UTC -- cli()
2020-01-24 14:50:55 UTC -- File "/usr/local/lib/python3.7/dist-packages/click/core.py", line 764, in __call__
2020-01-24 14:50:55 UTC -- return self.main(*args, **kwargs)
2020-01-24 14:50:55 UTC -- File "/usr/local/lib/python3.7/dist-packages/click/core.py", line 717, in main
2020-01-24 14:50:55 UTC -- rv = self.invoke(ctx)
2020-01-24 14:50:55 UTC -- File "/usr/local/lib/python3.7/dist-packages/click/core.py", line 1137, in invoke
2020-01-24 14:50:55 UTC -- return _process_result(sub_ctx.command.invoke(sub_ctx))
2020-01-24 14:50:55 UTC -- File "/usr/local/lib/python3.7/dist-packages/click/core.py", line 956, in invoke
2020-01-24 14:50:55 UTC -- return ctx.invoke(self.callback, **ctx.params)
2020-01-24 14:50:55 UTC -- File "/usr/local/lib/python3.7/dist-packages/click/core.py", line 555, in invoke
2020-01-24 14:50:55 UTC -- return callback(*args, **kwargs)
2020-01-24 14:50:55 UTC -- File "/usr/local/lib/python3.7/dist-packages/zookeeper/core/task.py", line 60, in command
2020-01-24 14:50:55 UTC -- task_instance.run()
2020-01-24 14:50:55 UTC -- File "/code/core/polyaxon_experiment.py", line 31, in wrapper
2020-01-24 14:50:55 UTC -- run_method(self)
2020-01-24 14:50:55 UTC -- File "/code/project/experiments/project_experiment.py", line 221, in run
2020-01-24 14:50:55 UTC -- callbacks=callbacks,
2020-01-24 14:50:55 UTC -- File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/keras/engine/training.py", line 728, in fit
2020-01-24 14:50:55 UTC -- use_multiprocessing=use_multiprocessing)
2020-01-24 14:50:55 UTC -- File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/keras/engine/training_v2.py", line 324, in fit
2020-01-24 14:50:55 UTC -- total_epochs=epochs)
2020-01-24 14:50:55 UTC -- File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/keras/engine/training_v2.py", line 123, in run_one_epoch
2020-01-24 14:50:55 UTC -- batch_outs = execution_function(iterator)
2020-01-24 14:50:55 UTC -- File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/keras/engine/training_v2_utils.py", line 86, in execution_function
2020-01-24 14:50:55 UTC -- distributed_function(input_fn))
2020-01-24 14:50:55 UTC -- File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/eager/def_function.py", line 457, in __call__
2020-01-24 14:50:55 UTC -- result = self._call(*args, **kwds)
2020-01-24 14:50:55 UTC -- File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/eager/def_function.py", line 503, in _call
2020-01-24 14:50:55 UTC -- self._initialize(args, kwds, add_initializers_to=initializer_map)
2020-01-24 14:50:55 UTC -- File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/eager/def_function.py", line 408, in _initialize
2020-01-24 14:50:55 UTC -- *args, **kwds))
2020-01-24 14:50:55 UTC -- File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/eager/function.py", line 1848, in _get_concrete_function_internal_garbage_collected
2020-01-24 14:50:55 UTC -- graph_function, _, _ = self._maybe_define_function(args, kwargs)
2020-01-24 14:50:55 UTC -- File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/eager/function.py", line 2150, in _maybe_define_function
2020-01-24 14:50:55 UTC -- graph_function = self._create_graph_function(args, kwargs)
2020-01-24 14:50:55 UTC -- File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/eager/function.py", line 2041, in _create_graph_function
2020-01-24 14:50:55 UTC -- capture_by_value=self._capture_by_value),
2020-01-24 14:50:55 UTC -- File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/framework/func_graph.py", line 915, in func_graph_from_py_func
2020-01-24 14:50:55 UTC -- func_outputs = python_func(*func_args, **func_kwargs)
2020-01-24 14:50:55 UTC -- File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/eager/def_function.py", line 358, in wrapped_fn
2020-01-24 14:50:55 UTC -- return weak_wrapped_fn().__wrapped__(*args, **kwds)
2020-01-24 14:50:55 UTC -- File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/keras/engine/training_v2_utils.py", line 73, in distributed_function
2020-01-24 14:50:55 UTC -- per_replica_function, args=(model, x, y, sample_weights))
2020-01-24 14:50:55 UTC -- File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/distribute/distribute_lib.py", line 760, in experimental_run_v2
2020-01-24 14:50:55 UTC -- return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs)
2020-01-24 14:50:55 UTC -- File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/distribute/distribute_lib.py", line 1787, in call_for_each_replica
2020-01-24 14:50:55 UTC -- return self._call_for_each_replica(fn, args, kwargs)
2020-01-24 14:50:55 UTC -- File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/distribute/mirrored_strategy.py", line 661, in _call_for_each_replica
2020-01-24 14:50:55 UTC -- fn, args, kwargs)
2020-01-24 14:50:55 UTC -- File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/distribute/mirrored_strategy.py", line 196, in _call_for_each_replica
2020-01-24 14:50:55 UTC -- coord.join(threads)
2020-01-24 14:50:55 UTC -- File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/training/coordinator.py", line 389, in join
2020-01-24 14:50:55 UTC -- six.reraise(*self._exc_info_to_raise)
2020-01-24 14:50:55 UTC -- File "/usr/local/lib/python3.7/dist-packages/six.py", line 703, in reraise
2020-01-24 14:50:55 UTC -- raise value
2020-01-24 14:50:55 UTC -- File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/training/coordinator.py", line 297, in stop_on_exception
2020-01-24 14:50:55 UTC -- yield
2020-01-24 14:50:55 UTC -- File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/distribute/mirrored_strategy.py", line 190, in _call_for_each_replica
2020-01-24 14:50:55 UTC -- **merge_kwargs)
2020-01-24 14:50:55 UTC -- File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/keras/mixed_precision/experimental/loss_scale_optimizer.py", line 241, in _apply_gradients_cross_replica
2020-01-24 14:50:55 UTC -- control_flow_ops.no_op)
2020-01-24 14:50:55 UTC -- File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/framework/smart_cond.py", line 59, in smart_cond
2020-01-24 14:50:55 UTC -- name=name)
2020-01-24 14:50:55 UTC -- File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/util/deprecation.py", line 507, in new_func
2020-01-24 14:50:55 UTC -- return func(*args, **kwargs)
2020-01-24 14:50:55 UTC -- File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/ops/control_flow_ops.py", line 1174, in cond
2020-01-24 14:50:55 UTC -- return cond_v2.cond_v2(pred, true_fn, false_fn, name)
2020-01-24 14:50:55 UTC -- File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/ops/cond_v2.py", line 84, in cond_v2
2020-01-24 14:50:55 UTC -- op_return_value=pred)
2020-01-24 14:50:55 UTC -- File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/framework/func_graph.py", line 920, in func_graph_from_py_func
2020-01-24 14:50:55 UTC -- expand_composites=True)
2020-01-24 14:50:55 UTC -- File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/util/nest.py", line 531, in map_structure
2020-01-24 14:50:55 UTC -- flat_structure = [flatten(s, expand_composites) for s in structure]
2020-01-24 14:50:55 UTC -- File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/util/nest.py", line 531, in <listcomp>
2020-01-24 14:50:55 UTC -- flat_structure = [flatten(s, expand_composites) for s in structure]
2020-01-24 14:50:55 UTC -- File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/util/nest.py", line 262, in flatten
2020-01-24 14:50:55 UTC -- return _pywrap_tensorflow.Flatten(structure, expand_composites)
2020-01-24 14:50:55 UTC -- File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/pywrap_tensorflow_internal.py", line 2651, in Flatten
2020-01-24 14:50:55 UTC -- return _pywrap_tensorflow_internal.Flatten(nested, expand_composites)
2020-01-24 14:50:55 UTC -- SystemError: <built-in function Flatten> returned a result with an error set
```
I think we had a similar issue with `Bop` in #286 where distribution strategy will fail if no variables are available to be assigned too via an optimizer : `TypeError: Could not build a TypeSpec for <tf.Operation 'train_with_group' type=NoOp> with type Operation`
Could you double check that both optimizers actually receive variables to train?
> I think we had a similar issue with `Bop` in #286 where distribution strategy will fail if no variables are available to be assigned too via an optimizer : `TypeError: Could not build a TypeSpec for <tf.Operation 'train_with_group' type=NoOp> with type Operation`
>
> Could you double check that both optimizers actually receive variables to train?
I have verified that `is_layer_1` returns `True` for the first layer and `False` for the second layer (when called from the CaseOptimizer). Is there a more direct way to check which variables are assigned to which optimizer?
I can reproduce the error above in this [notebook](https://colab.research.google.com/drive/1vU9P20rzPNd-ioiQhZYbruGXAxEvL3Ip) with TensorFlow 2.0.0. TensorFlow 2.1.0 doesn't seem to show this error.
I believe I also obtained this error with TF2.1.0, but will try again
> I believe I also obtained this error with TF2.1.0, but will try again
I can reproduce this issue in TF 1.15.x as well, but can't with 2.1. Checkout [this notebook](https://colab.research.google.com/drive/1BTTQVOyY737GtB_pddF-VvAdBrPN14HK)
@jneeven is this fixed for you by running experiments on TF2.1?
> @jneeven is this fixed for you by running experiments on TF2.1?
Nope, I still get the same error with TF2.1 (actually using multiple GPUs). I could not test the fake multi-GPU case, because I ran into some XLA problems.
I found that the case optimizer breaks on multi-gpu when using TF 2.0 due to a distribution strategy related error, however works fine when using TF 2.1 with lq.optimizers.Bop.is_binary_variable as a predicate.
Hi, I found the case optimizer works for both TF 2.0 and 2.1. The original script above can be fixed by calling `keras.backend.clear_session()` in between the single and multi gpu test ([notebook](https://colab.research.google.com/drive/1j5p4NZmYBWfQYLisAIVlhaYRs-IqSpH2)). I did encounter the problem with `SystemError: <built-in function Flatten> returned a result with an error set`, but only when running in reduced precision.
Could someone double check if this solves the problems with the case optimizer? If so, we can close this issue. | 2020-05-06T15:47:21 |
|
larq/larq | 596 | larq__larq-596 | [
"595"
] | 19869a823c470a269abfd58e8d3c532c95d9d005 | diff --git a/larq/__init__.py b/larq/__init__.py
--- a/larq/__init__.py
+++ b/larq/__init__.py
@@ -12,6 +12,14 @@
utils,
)
+try:
+ from importlib import metadata # type: ignore
+except ImportError:
+ # Running on pre-3.8 Python; use importlib-metadata package
+ import importlib_metadata as metadata # type: ignore
+
+__version__ = metadata.version("larq")
+
__all__ = [
"layers",
"activations",
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -22,6 +22,7 @@ def readme():
"numpy >= 1.15.4, < 2.0",
"terminaltables>=3.1.0",
"dataclasses ; python_version<'3.7'",
+ "importlib-metadata ~= 2.0 ; python_version<'3.8'",
],
extras_require={
"tensorflow": ["tensorflow>=1.14.0"],
| diff --git a/larq/version_test.py b/larq/version_test.py
new file mode 100644
--- /dev/null
+++ b/larq/version_test.py
@@ -0,0 +1,5 @@
+import larq
+
+
+def test_version():
+ assert hasattr(larq, "__version__") and "." in larq.__version__
| __version__
### Feature motivation
Is there a way to dynamically poll the version of larq (or lce or larq-zoo for that matter)?
If not, could it be done using `__version__` as usual for standard library modules?
### Feature description
```
import larq
print(larq.__version__)
```
| Currently we don't have a `__version__` identifier in larq, but I agree that it would be good to add since many other Python packages adhere to this convention as well.
Would you like to send a PR to fix this? Probably the first solution mentioned in https://packaging.python.org/guides/single-sourcing-package-version/ is the easiest to maintain, but I'm not sure what the general best practices here are.
In the meantime you can retrieve the version using:
```python
from importlib import metadata;
print(metadata.version("larq"))
```
| 2020-10-21T17:15:14 |
larq/larq | 621 | larq__larq-621 | [
"620"
] | 094eaf1cca5017cdc643bf68bbb1301ab3f1af0f | diff --git a/larq/quantizers.py b/larq/quantizers.py
--- a/larq/quantizers.py
+++ b/larq/quantizers.py
@@ -157,6 +157,7 @@ def __init__(self, *args, metrics=None, **kwargs):
def build(self, input_shape):
if self._custom_metrics and "flip_ratio" in self._custom_metrics:
self.flip_ratio = lq_metrics.FlipRatio(name=f"flip_ratio/{self.name}")
+ super().build(input_shape)
def call(self, inputs):
if hasattr(self, "flip_ratio"):
| build() call not forwarded via base class
My use case: I develop a custom quantizer class, which derives from *larq.quantizers.BaseQuantizer* and which is then used like any other pre-defined quantizer. The custom quantizer defines a *build()* method.
Inside [BaseQuantizer.build](https://github.com/larq/larq/blob/094eaf1cca5017cdc643bf68bbb1301ab3f1af0f/larq/quantizers.py#L157) the method call to *build* is not forwarded via its base class *tf.keras.layers.Layer*.
In the [Tutorial about custom layers](https://www.tensorflow.org/guide/keras/custom_layers_and_models#best_practice_deferring_weight_creation_until_the_shape_of_the_inputs_is_known) this is also skipped. But when looking at [the build-method code in the base class](https://github.com/tensorflow/tensorflow/blob/85c8b2a817f95a3e979ecd1ed95bff1dc1335cff/tensorflow/python/keras/engine/base_layer.py#L448-L467), it is seen that the call should be forwarded, in order to set *self.built*, such that it is ensured, that the build triggered via *call* is only done once.
This is not an issue in most cases, as most users do not use explicit calls to *build* and use implicit builds triggered via *tf.keras.layers.Layer.call*, only. In my case, the quantizer is built explicitly before using *call* the first time and *call* tries to build the layer again, which then fails, eventhough I use
```python
super().build(shape)
```
in my custom *build()* to forward the call into larq. If the build call would be forward in larq, too, the *self.built* attribute would be set during the first (explicit) *build* call and the second (implicit) *build* call triggered by *tf.keras.layers.Layer.call* would be skipped.
Thank you for reading this!
| 2021-02-12T14:38:58 |
||
ycm-core/ycmd | 280 | ycm-core__ycmd-280 | [
"277"
] | 24f5d51be3a92a746dfbd932d480e776842b3169 | diff --git a/ycmd/completers/javascript/tern_completer.py b/ycmd/completers/javascript/tern_completer.py
--- a/ycmd/completers/javascript/tern_completer.py
+++ b/ycmd/completers/javascript/tern_completer.py
@@ -70,6 +70,13 @@ def ShouldEnableTernCompleter():
return True
+def GlobalConfigExists( tern_config ):
+ """Returns whether or not the global config file with the supplied path
+ exists. This method primarily exists to allow testability and simply returns
+ whether the supplied file exists."""
+ return os.path.exists( tern_config )
+
+
def FindTernProjectFile( starting_directory ):
# We use a dummy_file here because AncestorFolders requires a file name and we
# don't have one. Something like '.' doesn't work because, while
@@ -81,6 +88,16 @@ def FindTernProjectFile( starting_directory ):
if os.path.exists( tern_project ):
return tern_project
+ # As described here: http://ternjs.net/doc/manual.html#server a global
+ # .tern-config file is also supported for the Tern server. This can provide
+ # meaningful defaults (for libs, and possibly also for require paths), so
+ # don't warn if we find one. The point is that if the user has a .tern-config
+ # set up, then she has deliberately done so and a ycmd warning is unlikely
+ # to be anything other than annoying.
+ tern_config = os.path.expanduser( '~/.tern-config' )
+ if GlobalConfigExists( tern_config ):
+ return tern_config
+
return None
@@ -107,6 +124,11 @@ def __init__( self, user_options ):
def _WarnIfMissingTernProject( self ):
+ # The Tern server will operate without a .tern-project file. However, it
+ # does not operate optimally, and will likely lead to issues reported that
+ # JavaScript completion is not working properly. So we raise a warning if we
+ # aren't able to detect some semblance of manual Tern configuration.
+
# We do this check after the server has started because the server does
# have nonzero use without a project file, however limited. We only do this
# check once, though because the server can only handle one project at a
@@ -120,7 +142,8 @@ def _WarnIfMissingTernProject( self ):
if not tern_project:
_logger.warning( 'No .tern-project file detected: ' + os.getcwd() )
raise RuntimeError( 'Warning: Unable to detect a .tern-project file '
- 'in the hierarchy before ' + os.getcwd() + '. '
+ 'in the hierarchy before ' + os.getcwd() +
+ ' and no global .tern-config file was found. '
'This is required for accurate JavaScript '
'completion. Please see the User Guide for '
'details.' )
| diff --git a/ycmd/tests/javascript/event_notification_test.py b/ycmd/tests/javascript/event_notification_test.py
--- a/ycmd/tests/javascript/event_notification_test.py
+++ b/ycmd/tests/javascript/event_notification_test.py
@@ -26,7 +26,7 @@
from pprint import pformat
import httplib
import os
-
+from mock import patch
class Javascript_EventNotification_test( Javascript_Handlers_test ):
@@ -60,7 +60,9 @@ def OnFileReadyToParse_ProjectFile_parentdir_test( self ):
assert_that( response.json, empty() )
- def OnFileReadyToParse_NoProjectFile_test( self ):
+ @patch( 'ycmd.completers.javascript.tern_completer.GlobalConfigExists',
+ return_value = False )
+ def OnFileReadyToParse_NoProjectFile_test( self, *args ):
# We raise an error if we can't detect a .tern-project file.
# We only do this on the first OnFileReadyToParse event after a
# server startup.
@@ -83,7 +85,8 @@ def OnFileReadyToParse_NoProjectFile_test( self ):
response.json,
self._ErrorMatcher( RuntimeError,
'Warning: Unable to detect a .tern-project file '
- 'in the hierarchy before ' + os.getcwd() + '. '
+ 'in the hierarchy before ' + os.getcwd() +
+ ' and no global .tern-config file was found. '
'This is required for accurate JavaScript '
'completion. Please see the User Guide for '
'details.' )
@@ -136,8 +139,28 @@ def OnFileReadyToParse_NoProjectFile_test( self ):
response.json,
self._ErrorMatcher( RuntimeError,
'Warning: Unable to detect a .tern-project file '
- 'in the hierarchy before ' + os.getcwd() + '. '
+ 'in the hierarchy before ' + os.getcwd() +
+ ' and no global .tern-config file was found. '
'This is required for accurate JavaScript '
'completion. Please see the User Guide for '
'details.' )
)
+
+
+ @patch( 'ycmd.completers.javascript.tern_completer.GlobalConfigExists',
+ return_value = True )
+ def OnFileReadyToParse_UseGlobalConfig_test( self, *args ):
+ os.chdir( self._PathToTestFile( '..' ) )
+
+ contents = open( self._PathToTestFile( 'simple_test.js' ) ).read()
+
+ response = self._app.post_json( '/event_notification',
+ self._BuildRequest(
+ event_name = 'FileReadyToParse',
+ contents = contents,
+ filetype = 'javascript' ),
+ expect_errors = True )
+
+ print( 'event response: {0}'.format( pformat( response.json ) ) )
+
+ eq_( response.status_code, httplib.OK )
| Tern completer: Recognise ~/.tern-config
## Problem
Tern supports a "global" project file (`~/.tern-config`) which contains global defaults. In theory this can be used instead of `.tern-project`.
The ycmd tern completer does not check for this file and issues its warning anyway if no `.tern-project` file could be found.
## Requirement
Don't issue a warning when a `.tern-config` file exists, as this could well contain enough info for useful javascript completion for many projects.
| 2015-12-20T13:29:00 |
|
ycm-core/ycmd | 281 | ycm-core__ycmd-281 | [
"278"
] | 24f5d51be3a92a746dfbd932d480e776842b3169 | diff --git a/ycmd/completers/javascript/tern_completer.py b/ycmd/completers/javascript/tern_completer.py
--- a/ycmd/completers/javascript/tern_completer.py
+++ b/ycmd/completers/javascript/tern_completer.py
@@ -254,7 +254,7 @@ def _PostRequest( self, request, request_data ):
the files are being updated.
The request block should contain the optional query block only. The file
- data and timeout are are added automatically."""
+ data are added automatically."""
if not self._ServerIsRunning():
raise ValueError( 'Not connected to server' )
@@ -271,7 +271,6 @@ def MakeIncompleteFile( name, file_data ):
full_request = {
'files': [ MakeIncompleteFile( x, file_data[ x ] )
for x in file_data.keys() ],
- 'timeout': 500,
}
full_request.update( request )
@@ -292,7 +291,7 @@ def _GetResponse( self, query, request_data ):
just updating file data in which case _PostRequest should be used directly.
The query block should contain the type and any parameters. The files,
- position, timeout etc. are added automatically."""
+ position, etc. are added automatically."""
def MakeTernLocation( request_data ):
return {
| Tern completer: Tern server crashes on moderate sized files
## Problem
On moderate to large sized projects (such as Tern itself), the Tern server is quitting, and `:YcmDebugInfo` reports the Tern server crashed. Users get no completions/etc. until issuing `:YcmCompleter StartServer`. The Tern server may then crash again.
## Cause
ycmd's Tern completer uses a (conservative) 500ms timeout for requests to the Tern server. The Tern docs indicate this should return a HTTP error when time-out is hit, but it does not. Instead the server crashes. See Tern issue: https://github.com/ternjs/tern/issues/702
## Requirement
Ideally, get a fix upstream in Tern and update the submodule. Otherwise, if the linked Tern issue is not resolved, ycmd tern completer needs to handle this error, potentially including any or all of the following:
- removing or increasing the timeout sent to Tern server
- using a client-side session timeout instead
- automatically restarting the crashed server
| 2015-12-20T15:12:37 |
||
ycm-core/ycmd | 340 | ycm-core__ycmd-340 | [
"333"
] | 8cb72bdc53a94744ddb0b301dda2dfe685c6d6ea | diff --git a/build.py b/build.py
--- a/build.py
+++ b/build.py
@@ -283,6 +283,8 @@ def BuildGoCode():
os.chdir( p.join( DIR_OF_THIS_SCRIPT, 'third_party', 'gocode' ) )
subprocess.check_call( [ 'go', 'build' ] )
+ os.chdir( p.join( DIR_OF_THIS_SCRIPT, 'third_party', 'godef' ) )
+ subprocess.check_call( [ 'go', 'build' ] )
def BuildRacerd():
diff --git a/ycmd/completers/go/gocode_completer.py b/ycmd/completers/go/gocode_completer.py
--- a/ycmd/completers/go/gocode_completer.py
+++ b/ycmd/completers/go/gocode_completer.py
@@ -27,10 +27,10 @@
from ycmd.completers.completer import Completer
GO_FILETYPES = set( [ 'go' ] )
-BINARY_NOT_FOUND_MESSAGE = ( 'Gocode binary not found. Did you build it? ' +
+BINARY_NOT_FOUND_MESSAGE = ( '{0} binary not found. Did you build it? ' +
'You can do so by running ' +
'"./install.py --gocode-completer".' )
-COMPLETION_ERROR_MESSAGE = 'Gocode shell call failed.'
+SHELL_ERROR_MESSAGE = '{0} shell call failed.'
PARSE_ERROR_MESSAGE = 'Gocode returned invalid JSON response.'
NO_COMPLETIONS_MESSAGE = 'Gocode returned empty JSON response.'
GOCODE_PANIC_MESSAGE = ( 'Gocode panicked trying to find completions, ' +
@@ -39,6 +39,10 @@
os.path.abspath( os.path.dirname( __file__ ) ),
'..', '..', '..', 'third_party', 'gocode',
'gocode' + ( '.exe' if utils.OnWindows() else '' ) )
+PATH_TO_GODEF_BINARY = os.path.join(
+ os.path.abspath( os.path.dirname( __file__ ) ),
+ '..', '..', '..', 'third_party', 'godef',
+ 'godef' + ( '.exe' if utils.OnWindows() else '' ) )
_logger = logging.getLogger( __name__ )
@@ -48,13 +52,22 @@ class GoCodeCompleter( Completer ):
def __init__( self, user_options ):
super( GoCodeCompleter, self ).__init__( user_options )
self._popener = utils.SafePopen # Overridden in test.
- self._binary = self.FindGoCodeBinary( user_options )
+ self._binary_gocode = self.FindBinary( 'gocode', user_options )
+ self._binary_godef = self.FindBinary( 'godef', user_options )
+
+ if not self._binary_gocode:
+ _logger.error( BINARY_NOT_FOUND_MESSAGE.format( 'Gocode' ) )
+ raise RuntimeError( BINARY_NOT_FOUND_MESSAGE.format( 'Gocode' ) )
- if not self._binary:
- _logger.error( BINARY_NOT_FOUND_MESSAGE )
- raise RuntimeError( BINARY_NOT_FOUND_MESSAGE )
+ _logger.info( 'Enabling go completion using %s binary',
+ self._binary_gocode )
- _logger.info( 'Enabling go completion using %s binary', self._binary )
+ if not self._binary_godef:
+ _logger.error( BINARY_NOT_FOUND_MESSAGE.format( 'Godef' ) )
+ raise RuntimeError( BINARY_NOT_FOUND_MESSAGE.format( 'Godef' ) )
+
+ _logger.info( 'Enabling go definitions using %s binary',
+ self._binary_godef )
def SupportedFiletypes( self ):
@@ -72,9 +85,10 @@ def ComputeCandidatesInner( self, request_data ):
offset = _ComputeOffset( contents, request_data[ 'line_num' ],
request_data[ 'column_num' ] )
- stdoutdata = self._ExecuteGoCodeBinary( '-f=json', 'autocomplete',
- filename, str(offset),
- contents = contents )
+ stdoutdata = self._ExecuteBinary( self._binary_gocode,
+ '-f=json', 'autocomplete',
+ filename, str(offset),
+ contents = contents )
try:
resultdata = json.loads( stdoutdata )
@@ -94,32 +108,42 @@ def ComputeCandidatesInner( self, request_data ):
def GetSubcommandsMap( self ):
return {
'StartServer': ( lambda self, request_data, args: self._StartServer() ),
- 'StopServer': ( lambda self, request_data, args: self._StopServer() )
+ 'StopServer': ( lambda self, request_data, args: self._StopServer() ),
+ 'GoTo' : ( lambda self, request_data, args:
+ self._GoToDefinition( request_data ) ),
+ 'GoToDefinition' : ( lambda self, request_data, args:
+ self._GoToDefinition( request_data ) ),
+ 'GoToDeclaration' : ( lambda self, request_data, args:
+ self._GoToDefinition( request_data ) ),
}
- def FindGoCodeBinary( self, user_options ):
- """ Find the path to the gocode binary.
+ def FindBinary( self, binary, user_options ):
+ """ Find the path to the gocode/godef binary.
- If 'gocode_binary_path' in the options is blank,
- use the version installed with YCM, if it exists,
- then the one on the path, if not.
+ If 'gocode_binary_path' or 'godef_binary_path'
+ in the options is blank, use the version installed
+ with YCM, if it exists, then the one on the path, if not.
- If the 'gocode_binary_path' is specified, use it
- as an absolute path.
+ If the 'gocode_binary_path' or 'godef_binary_path' is
+ specified, use it as an absolute path.
If the resolved binary exists, return the path,
otherwise return None. """
- if user_options.get( 'gocode_binary_path' ):
+ if user_options.get( '%s_binary_path' % binary ):
# The user has explicitly specified a path.
- if os.path.isfile( user_options[ 'gocode_binary_path' ] ):
- return user_options[ 'gocode_binary_path' ]
+ if os.path.isfile( user_options[ '%s_binary_path' % binary] ):
+ return user_options[ '%s_binary_path' % binary]
else:
return None
# Try to use the bundled binary or one on the path.
- if os.path.isfile( PATH_TO_GOCODE_BINARY ):
- return PATH_TO_GOCODE_BINARY
- return utils.PathToFirstExistingExecutable( [ 'gocode' ] )
+ if binary == 'gocode':
+ if os.path.isfile( PATH_TO_GOCODE_BINARY ):
+ return PATH_TO_GOCODE_BINARY
+ elif binary == 'godef':
+ if os.path.isfile( PATH_TO_GODEF_BINARY ):
+ return PATH_TO_GODEF_BINARY
+ return utils.PathToFirstExistingExecutable( [ binary ] )
def OnFileReadyToParse( self, request_data ):
@@ -132,31 +156,66 @@ def Shutdown( self ):
def _StartServer( self ):
""" Start the GoCode server """
- self._ExecuteGoCodeBinary()
+ self._ExecuteBinary( self._binary_gocode )
def _StopServer( self ):
""" Stop the GoCode server """
_logger.info( 'Stopping GoCode server' )
- self._ExecuteGoCodeBinary( 'close' )
+ self._ExecuteBinary( self._binary_gocode, 'close' )
- def _ExecuteGoCodeBinary( self, *args, **kwargs ):
- """ Execute the GoCode binary with given arguments. Use the contents
+ def _ExecuteBinary( self, binary, *args, **kwargs):
+ """ Execute the GoCode/GoDef binary with given arguments. Use the contents
argument to send data to GoCode. Return the standard output. """
- proc = self._popener(
- [ self._binary ] + list(args), stdin = subprocess.PIPE,
+ popen_handle = self._popener(
+ [ binary ] + list(args), stdin = subprocess.PIPE,
stdout = subprocess.PIPE, stderr = subprocess.PIPE )
contents = kwargs[ 'contents' ] if 'contents' in kwargs else None
- stdoutdata, stderrdata = proc.communicate( contents )
- if proc.returncode:
- _logger.error( COMPLETION_ERROR_MESSAGE + " code %i stderr: %s",
- proc.returncode, stderrdata)
- raise RuntimeError( COMPLETION_ERROR_MESSAGE )
+ stdoutdata, stderrdata = popen_handle.communicate( contents )
+ if popen_handle.returncode:
+ binary_str = "Gocode"
+ if binary == self._binary_godef:
+ binary_str = "Godef"
+
+ _logger.error( SHELL_ERROR_MESSAGE.format( binary_str ) +
+ " code %i stderr: %s",
+ popen_handle.returncode, stderrdata)
+ raise RuntimeError( SHELL_ERROR_MESSAGE.format( binary_str ) )
return stdoutdata
+ def _GoToDefinition( self, request_data ):
+ try:
+ filename = request_data[ 'filepath' ]
+ _logger.info( "godef GoTo request %s" % filename )
+ if not filename:
+ return
+ contents = utils.ToUtf8IfNeeded(
+ request_data[ 'file_data' ][ filename ][ 'contents' ] )
+ offset = _ComputeOffset( contents, request_data[ 'line_num' ],
+ request_data[ 'column_num' ] )
+ stdout = self._ExecuteBinary( self._binary_godef,
+ "-i",
+ "-f=%s" % filename,
+ '-json',
+ "-o=%s" % offset,
+ contents = contents )
+ return self._ConstructGoToFromResponse( stdout )
+ except Exception:
+ raise RuntimeError( 'Can\'t jump to definition.' )
+
+
+ def _ConstructGoToFromResponse( self, response_str ):
+ parsed = json.loads( response_str )
+ if 'filename' in parsed and 'column' in parsed:
+ return responses.BuildGoToResponse( parsed[ 'filename' ],
+ int( parsed[ 'line' ] ),
+ int( parsed[ 'column' ] ) )
+ raise RuntimeError( 'Can\'t jump to definition.' )
+
+
# Compute the byte offset in the file given the line and column.
# TODO(ekfriis): If this is slow, consider moving this to C++ ycm_core,
| diff --git a/ycmd/completers/go/tests/gocode_completer_test.py b/ycmd/completers/go/tests/gocode_completer_test.py
--- a/ycmd/completers/go/tests/gocode_completer_test.py
+++ b/ycmd/completers/go/tests/gocode_completer_test.py
@@ -65,15 +65,15 @@ def FindGoCodeBinary_test( self ):
user_options = user_options_store.DefaultOptions()
eq_( PATH_TO_GOCODE_BINARY,
- self._completer.FindGoCodeBinary( user_options ) )
+ self._completer.FindBinary( "gocode", user_options ) )
user_options[ 'gocode_binary_path' ] = DUMMY_BINARY
eq_( DUMMY_BINARY,
- self._completer.FindGoCodeBinary( user_options ) )
+ self._completer.FindBinary( "gocode", user_options ) )
user_options[ 'gocode_binary_path' ] = DATA_DIR
eq_( None,
- self._completer.FindGoCodeBinary( user_options ) )
+ self._completer.FindBinary( "gocode", user_options ) )
# Test line-col to offset in the file before any unicode occurrences.
diff --git a/ycmd/tests/go/subcommands_test.py b/ycmd/tests/go/subcommands_test.py
new file mode 100644
--- /dev/null
+++ b/ycmd/tests/go/subcommands_test.py
@@ -0,0 +1,75 @@
+#!/usr/bin/env python
+#
+# Copyright (C) 2016 ycmd contributors.
+#
+# This file is part of ycmd.
+#
+# ycmd is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# ycmd is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
+
+from go_handlers_test import Go_Handlers_test
+from nose.tools import eq_
+
+
+class Go_Subcommands_test( Go_Handlers_test ):
+
+
+ def _GoTo( self, params ):
+ filepath = self._PathToTestFile( 'goto.go' )
+ contents = open( filepath ).read()
+
+ command = params[ 'command' ]
+ goto_data = self._BuildRequest( completer_target = 'filetype_default',
+ command_arguments = [ command ],
+ line_num = 8,
+ column_num = 8,
+ contents = contents,
+ filetype = 'go',
+ filepath = filepath )
+
+ results = self._app.post_json( '/run_completer_command',
+ goto_data )
+
+ eq_( {
+ 'line_num': 3, 'column_num': 6, 'filepath': filepath
+ }, results.json )
+
+ filepath = self._PathToTestFile( 'win.go' )
+ contents = open( filepath ).read()
+
+ command = params[ 'command' ]
+ goto_data = self._BuildRequest( completer_target = 'filetype_default',
+ command_arguments = [ command ],
+ line_num = 4,
+ column_num = 7,
+ contents = contents,
+ filetype = 'go',
+ filepath = filepath )
+
+ results = self._app.post_json( '/run_completer_command',
+ goto_data )
+
+ eq_( {
+ 'line_num': 2, 'column_num': 6, 'filepath': filepath
+ }, results.json )
+
+
+ def GoTo_all_test( self ):
+ tests = [
+ { 'command': 'GoTo' },
+ { 'command': 'GoToDefinition' },
+ { 'command': 'GoToDeclaration' }
+ ]
+
+ for test in tests:
+ yield ( self._GoTo, test )
diff --git a/ycmd/tests/go/testdata/goto.go b/ycmd/tests/go/testdata/goto.go
new file mode 100644
--- /dev/null
+++ b/ycmd/tests/go/testdata/goto.go
@@ -0,0 +1,9 @@
+package main
+
+func dummy() {
+
+}
+
+func main() {
+ dummy() //GoTo
+}
\ No newline at end of file
diff --git a/ycmd/tests/go/testdata/win.go b/ycmd/tests/go/testdata/win.go
new file mode 100644
--- /dev/null
+++ b/ycmd/tests/go/testdata/win.go
@@ -0,0 +1,5 @@
+package main
+func foo() {}
+func main() {
+ foo()
+}
| Implement GoTo for Go
Backstory: After I had just fixed up the Sublime plugin to work with Rust, it turned out that I would be reading a lot of Go code for a while and I didn't actually had a chance to play with YCM for Rust much even though I usually program in it. The universe is cruel :unamused:
I miss GoTo a lot in Go (doubly so than Rust since the docs are less usable for me), and while I tried fixing up gocode to have GoTo, there are some core issues with its design which make this hard.
Anyway, someone pointed me to https://github.com/rogpeppe/godef, which I'd like to integrate. At first glance it seems easy; but that's what they always say :laughing:
I plan (at least for now) to do the work here; just opening this issue so that I can post notes/ask for help and in case you don't actually want this integration :smile:
Tips welcome!
| Thanks for expressing interest in working on ycmd! :)
Hm, having two engines (one for completion, the other for GoTo) for separate parts of a language's semantic support would be a first for us.
@micbou @puremourning @vheon @ekfriis Thoughts?
Yeah, that's one issue I expected.
Note that godef is a simple binary, without a server. (Gocode is also a simple binary, but it internally does cool stuff with spinning up a server and using unix sockets or something to communicate). Nor does godef _need_ a server, since GoTo perf isn't that important as long as it's within a few hundred milliseconds (unlike autocomplete which should be buttery smooth).
IMO it should be fine to call a different standalone binary for goto.
Also, godef seems to mostly let the Go AST/parse libraries do the heavy lifting, though it contains a copy of them for some reason (backcompat probably)
The only issue is that our build system becomes more complicated; we bundle and build _everything_ for the user. This being Go, it shouldn't be much of an issue.
If godef is this simple, why not simply use it as a library from within gocode? I'm guessing upstream wouldn't be too happy about that idea, but might be worth asking.
Forgot to mention: I'm personally OK with this idea. I'm not a _fan_ of it, but I can see how it would make users happy for not too much hackery. I would like to see how intrusive it would be code-wise though. (I don't expect this to be an issue.)
Other ycmd devs might have different insights.
The golang community follow the UNIX philosophy, so every functionality that you might want is actually a separate executable. For this reason I find this
> If godef is this simple, why not simply use it as a library from within gocode? I'm guessing upstream wouldn't be too happy about that idea, but might be worth asking.
to be unlikely accepted upstream. The downside that I see are:
- a "IDE like system" would reuse the AST for the code if it could.
- on our side we wouldn't spawn a new process every time we want a GoTo
On the other hand I don't see this as a drama as it would be for other languages:
- The Go language was designed to be easy to parse and to compile (removing also circular imports), so creating the AST for the source code is really fast.
- Being statically compiled we don't pay for the time of runtime linking which means that spawning a go tool is actually fast.
So I would say that it would be absolutely ok to integrate a different tool for different functionality.
P.S: I know that is not a good excuse but every other system supporting Go as a language would do exactly what we would do here.
Needs: https://github.com/rogpeppe/godef/pull/24
| 2016-02-02T19:16:14 |
ycm-core/ycmd | 355 | ycm-core__ycmd-355 | [
"354"
] | 7ea44df61e524338db66521fd2d5a917a7c472fb | diff --git a/ycmd/completers/go/gocode_completer.py b/ycmd/completers/go/gocode_completer.py
--- a/ycmd/completers/go/gocode_completer.py
+++ b/ycmd/completers/go/gocode_completer.py
@@ -30,10 +30,9 @@
from ycmd import responses
from ycmd import utils
-from ycmd.utils import ToBytes, ToUnicode
+from ycmd.utils import ToBytes, ToUnicode, ExecutableName
from ycmd.completers.completer import Completer
-GO_FILETYPES = set( [ 'go' ] )
BINARY_NOT_FOUND_MESSAGE = ( '{0} binary not found. Did you build it? ' +
'You can do so by running ' +
'"./install.py --gocode-completer".' )
@@ -42,43 +41,67 @@
NO_COMPLETIONS_MESSAGE = 'Gocode returned empty JSON response.'
GOCODE_PANIC_MESSAGE = ( 'Gocode panicked trying to find completions, ' +
'you likely have a syntax error.' )
-PATH_TO_GOCODE_BINARY = os.path.join(
+DIR_OF_THIRD_PARTY = os.path.join(
os.path.abspath( os.path.dirname( __file__ ) ),
- '..', '..', '..', 'third_party', 'gocode',
- 'gocode' + ( '.exe' if utils.OnWindows() else '' ) )
-PATH_TO_GODEF_BINARY = os.path.join(
- os.path.abspath( os.path.dirname( __file__ ) ),
- '..', '..', '..', 'third_party', 'godef',
- 'godef' + ( '.exe' if utils.OnWindows() else '' ) )
+ '..', '..', '..', 'third_party' )
+GO_BINARIES = dict( {
+ 'gocode': os.path.join( DIR_OF_THIRD_PARTY,
+ 'gocode',
+ ExecutableName( 'gocode' ) ),
+ 'godef': os.path.join( DIR_OF_THIRD_PARTY,
+ 'godef',
+ ExecutableName( 'godef' ) )
+} )
_logger = logging.getLogger( __name__ )
-class GoCodeCompleter( Completer ):
+def FindBinary( binary, user_options ):
+ """ Find the path to the gocode/godef binary.
- def __init__( self, user_options ):
- super( GoCodeCompleter, self ).__init__( user_options )
- self._popener = utils.SafePopen # Overridden in test.
- self._binary_gocode = self.FindBinary( 'gocode', user_options )
- self._binary_godef = self.FindBinary( 'godef', user_options )
+ If 'gocode_binary_path' or 'godef_binary_path'
+ in the options is blank, use the version installed
+ with YCM, if it exists.
- if not self._binary_gocode:
- _logger.error( BINARY_NOT_FOUND_MESSAGE.format( 'Gocode' ) )
- raise RuntimeError( BINARY_NOT_FOUND_MESSAGE.format( 'Gocode' ) )
+ If the 'gocode_binary_path' or 'godef_binary_path' is
+ specified, use it as an absolute path.
- _logger.info( 'Enabling go completion using %s binary',
- self._binary_gocode )
+ If the resolved binary exists, return the path,
+ otherwise return None. """
- if not self._binary_godef:
- _logger.error( BINARY_NOT_FOUND_MESSAGE.format( 'Godef' ) )
- raise RuntimeError( BINARY_NOT_FOUND_MESSAGE.format( 'Godef' ) )
+ def _FindPath():
+ key = '{0}_binary_path'.format( binary )
+ if user_options.get( key ):
+ return user_options[ key ]
+ return GO_BINARIES.get( binary )
- _logger.info( 'Enabling go definitions using %s binary',
- self._binary_godef )
+ binary_path = _FindPath()
+ if os.path.isfile( binary_path ):
+ return binary_path
+ return None
+
+
+def ShouldEnableGoCompleter( user_options ):
+ def _HasBinary( binary ):
+ binary_path = FindBinary( binary, user_options )
+ if not binary_path:
+ _logger.error( BINARY_NOT_FOUND_MESSAGE.format( binary ) )
+ return binary_path
+
+ return all( _HasBinary( binary ) for binary in [ 'gocode', 'godef' ] )
+
+
+class GoCodeCompleter( Completer ):
+
+ def __init__( self, user_options ):
+ super( GoCodeCompleter, self ).__init__( user_options )
+ self._popener = utils.SafePopen # Overridden in test.
+ self._binary_gocode = FindBinary( 'gocode', user_options )
+ self._binary_godef = FindBinary( 'godef', user_options )
def SupportedFiletypes( self ):
- return GO_FILETYPES
+ return [ 'go' ]
def ComputeCandidatesInner( self, request_data ):
@@ -127,34 +150,6 @@ def GetSubcommandsMap( self ):
}
- def FindBinary( self, binary, user_options ):
- """ Find the path to the gocode/godef binary.
-
- If 'gocode_binary_path' or 'godef_binary_path'
- in the options is blank, use the version installed
- with YCM, if it exists, then the one on the path, if not.
-
- If the 'gocode_binary_path' or 'godef_binary_path' is
- specified, use it as an absolute path.
-
- If the resolved binary exists, return the path,
- otherwise return None. """
- if user_options.get( '%s_binary_path' % binary ):
- # The user has explicitly specified a path.
- if os.path.isfile( user_options[ '%s_binary_path' % binary] ):
- return user_options[ '%s_binary_path' % binary]
- else:
- return None
- # Try to use the bundled binary or one on the path.
- if binary == 'gocode':
- if os.path.isfile( PATH_TO_GOCODE_BINARY ):
- return PATH_TO_GOCODE_BINARY
- elif binary == 'godef':
- if os.path.isfile( PATH_TO_GODEF_BINARY ):
- return PATH_TO_GODEF_BINARY
- return utils.PathToFirstExistingExecutable( [ binary ] )
-
-
def OnFileReadyToParse( self, request_data ):
self._StartServer()
diff --git a/ycmd/completers/go/hook.py b/ycmd/completers/go/hook.py
--- a/ycmd/completers/go/hook.py
+++ b/ycmd/completers/go/hook.py
@@ -23,7 +23,12 @@
standard_library.install_aliases()
from builtins import * # noqa
-from ycmd.completers.go.gocode_completer import GoCodeCompleter
+from ycmd.completers.go.gocode_completer import (
+ GoCodeCompleter, ShouldEnableGoCompleter )
+
def GetCompleter( user_options ):
+ if not ShouldEnableGoCompleter( user_options ):
+ return None
+
return GoCodeCompleter( user_options )
diff --git a/ycmd/utils.py b/ycmd/utils.py
--- a/ycmd/utils.py
+++ b/ycmd/utils.py
@@ -194,6 +194,10 @@ def FindExecutable( executable ):
return None
+def ExecutableName( executable ):
+ return executable + ( '.exe' if OnWindows() else '' )
+
+
def OnWindows():
return sys.platform == 'win32'
| diff --git a/ycmd/tests/go/gocode_completer_test.py b/ycmd/tests/go/gocode_completer_test.py
--- a/ycmd/tests/go/gocode_completer_test.py
+++ b/ycmd/tests/go/gocode_completer_test.py
@@ -27,8 +27,8 @@
import os
from nose.tools import eq_, raises
-from ycmd.completers.go.gocode_completer import ( GoCodeCompleter,
- PATH_TO_GOCODE_BINARY )
+from ycmd.completers.go.gocode_completer import ( GoCodeCompleter, GO_BINARIES,
+ FindBinary )
from ycmd.request_wrap import RequestWrap
from ycmd import user_options_store
from ycmd.utils import ReadFile
@@ -71,16 +71,13 @@ def _BuildRequest( self, line_num, column_num ):
def FindGoCodeBinary_test( self ):
user_options = user_options_store.DefaultOptions()
- eq_( PATH_TO_GOCODE_BINARY,
- self._completer.FindBinary( "gocode", user_options ) )
+ eq_( GO_BINARIES.get( "gocode" ), FindBinary( "gocode", user_options ) )
user_options[ 'gocode_binary_path' ] = DUMMY_BINARY
- eq_( DUMMY_BINARY,
- self._completer.FindBinary( "gocode", user_options ) )
+ eq_( DUMMY_BINARY, FindBinary( "gocode", user_options ) )
user_options[ 'gocode_binary_path' ] = DATA_DIR
- eq_( None,
- self._completer.FindBinary( "gocode", user_options ) )
+ eq_( None, FindBinary( "gocode", user_options ) )
# Test line-col to offset in the file before any unicode occurrences.
| Go completion kicking in even when built without --gocode-completer
Leading on from https://github.com/Valloric/YouCompleteMe/issues/1603#issuecomment-135237881, it seems that ycmd is enabling Go completion even when the `--gocode-completer` flag isn't provided on build; this is because if `gocode` and `godef` aren't found where they would be built given that flag, if they're in `PATH` they'll still be found and used regardless.
This seems like a bug, since I may want to make use of another plugin (vim-go in this case) for providing completion for Go files, while still making use of YCM for other languages.
| Have you tried using [this YCM option](https://github.com/Valloric/YouCompleteMe#the-gycm_filetype_specific_completion_to_disable-option)?
Turning semantic completion on/off is a client-level decision (so done in YCM for instance).
@Valloric he doesn't want to disable semantic completion for `go` entirely but use the `omnifunc` which is backed by `gocode` as well but `vim-go` add more stuff to it like parameter completion. IMHO we should not allow our implementation to use binary found on the PATH, but only use the one we ship.
| 2016-02-09T00:30:25 |
ycm-core/ycmd | 397 | ycm-core__ycmd-397 | [
"395"
] | 37501c00baab54100c1e8c232e41a2aeaa43d51e | diff --git a/ycmd/user_options_store.py b/ycmd/user_options_store.py
--- a/ycmd/user_options_store.py
+++ b/ycmd/user_options_store.py
@@ -27,8 +27,11 @@
import os
from frozendict import frozendict
+from ycmd.utils import ReadFile
+
_USER_OPTIONS = {}
+
def SetAll( new_options ):
global _USER_OPTIONS
_USER_OPTIONS = frozendict( new_options )
@@ -49,8 +52,6 @@ def LoadDefaults():
def DefaultOptions():
settings_path = os.path.join(
os.path.dirname( os.path.abspath( __file__ ) ), 'default_settings.json' )
- with open( settings_path ) as f:
- options = json.loads( f.read() )
- options.pop( 'hmac_secret', None )
- return options
-
+ options = json.loads( ReadFile( settings_path ) )
+ options.pop( 'hmac_secret', None )
+ return options
| LookupError: unknown encoding on Mac with Python 2
With latest `ycmd` I get the following error on Mac with Py2:
``` python
Traceback (most recent call last):
File "/usr/local/Cellar/python/2.7.11/Frameworks/Python.framework/Versions/2.7/lib/python2.7/runpy.py", line 162, in _run_module_as_main
"__main__", fname, loader, pkg_name)
File "/usr/local/Cellar/python/2.7.11/Frameworks/Python.framework/Versions/2.7/lib/python2.7/runpy.py", line 72, in _run_code
exec code in run_globals
File "/Users/peter/src/ycmd/ycmd/__main__.py", line 181, in <module>
Main()
File "/Users/peter/src/ycmd/ycmd/__main__.py", line 151, in Main
options, hmac_secret = SetupOptions( args.options_file )
File "/Users/peter/src/ycmd/ycmd/__main__.py", line 127, in SetupOptions
options = user_options_store.DefaultOptions()
File "/Users/peter/src/ycmd/ycmd/../ycmd/user_options_store.py", line 52, in DefaultOptions
with open( settings_path ) as f:
LookupError: unknown encoding:
Process ycmd-server exited abnormally with code 1
```
I have a python 2 installation from homebrew
| 2016-02-25T12:44:39 |
||
ycm-core/ycmd | 409 | ycm-core__ycmd-409 | [
"338"
] | 6df0fe50cacb8887eda0785b845e8dc049ca0408 | diff --git a/ycmd/completers/cs/cs_completer.py b/ycmd/completers/cs/cs_completer.py
--- a/ycmd/completers/cs/cs_completer.py
+++ b/ycmd/completers/cs/cs_completer.py
@@ -195,15 +195,6 @@ def GetSubcommandsMap( self ):
self._SolutionSubcommand( request_data,
method = 'ServerIsReady',
no_request_data = True ) ),
- 'SetOmnisharpPort' : ( lambda self, request_data, args:
- self._SolutionSubcommand( request_data,
- method = '_SetOmnisharpPort',
- port = args[ 0 ],
- no_request_data = True ) ),
- 'GetOmnisharpPort' : ( lambda self, request_data, args:
- self._SolutionSubcommand( request_data,
- method = '_GetOmnisharpPort',
- no_request_data = True ) ),
}
@@ -331,7 +322,6 @@ def __init__( self, solution_path, keep_logfiles, desired_omnisharp_port ):
self._omnisharp_phandle = None
self._desired_omnisharp_port = desired_omnisharp_port
self._server_state_lock = threading.RLock()
- self._external_omnisharp = False
def CodeCheck( self, request_data ):
@@ -383,7 +373,6 @@ def _StartServer( self ):
with open( self._filename_stdout, 'w' ) as fstdout:
self._omnisharp_phandle = utils.SafePopen(
command, stdout = fstdout, stderr = fstderr )
- self._external_omnisharp = False
self._solution_path = path_to_solutionfile
@@ -399,7 +388,7 @@ def _StopServer( self ):
self._TryToStopServer()
# Kill it if it's still up
- if self.ServerIsRunning() and not self.ServerIsExternal():
+ if self.ServerIsRunning():
self._logger.info( 'Killing OmniSharp server' )
self._omnisharp_phandle.kill()
@@ -544,27 +533,14 @@ def _DefaultParameters( self, request_data ):
return parameters
- def ServerIsExternal( self ):
- return self._external_omnisharp
-
-
- def ServerIsRunning( self, external_check = True ):
+ def ServerIsRunning( self ):
""" Check if our OmniSharp server is running (process is up)."""
- if not self.ServerIsExternal():
- return utils.ProcessIsRunning( self._omnisharp_phandle )
-
- if self._omnisharp_port is None:
- return False
-
- if external_check:
- return self.ServerIsHealthy()
-
- return True
+ return utils.ProcessIsRunning( self._omnisharp_phandle )
def ServerIsHealthy( self ):
""" Check if our OmniSharp server is healthy (up and serving)."""
- if not self.ServerIsRunning( external_check = False ):
+ if not self.ServerIsRunning():
return False
try:
@@ -575,7 +551,7 @@ def ServerIsHealthy( self ):
def ServerIsReady( self ):
""" Check if our OmniSharp server is ready (loaded solution file)."""
- if not self.ServerIsRunning( external_check = False ):
+ if not self.ServerIsRunning():
return False
try:
@@ -595,19 +571,6 @@ def _ServerLocation( self ):
return 'http://localhost:' + str( self._omnisharp_port )
- def _GetOmnisharpPort( self ):
- return responses.BuildDisplayMessageResponse( self._omnisharp_port )
-
-
- def _SetOmnisharpPort( self, port ):
- with self._server_state_lock:
- if self.ServerIsRunning():
- self.StopServer()
-
- self._omnisharp_port = port
- self._external_omnisharp = True
-
-
def _GetResponse( self, handler, parameters = {}, timeout = None ):
""" Handle communication with server """
target = urllib.parse.urljoin( self._ServerLocation(), handler )
| diff --git a/ycmd/tests/__init__.py b/ycmd/tests/__init__.py
--- a/ycmd/tests/__init__.py
+++ b/ycmd/tests/__init__.py
@@ -0,0 +1,59 @@
+# Copyright (C) 2016 ycmd contributors
+#
+# This file is part of ycmd.
+#
+# ycmd is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# ycmd is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import unicode_literals
+from __future__ import print_function
+from __future__ import division
+from __future__ import absolute_import
+from future import standard_library
+standard_library.install_aliases()
+from builtins import * # noqa
+
+import functools
+import os
+
+from ycmd.tests.test_utils import SetUpApp
+
+shared_app = None
+
+
+def PathToTestFile( *args ):
+ dir_of_current_script = os.path.dirname( os.path.abspath( __file__ ) )
+ return os.path.join( dir_of_current_script, 'testdata', *args )
+
+
+def setUpPackage():
+ """Initializes the ycmd server as a WebTest application that will be shared
+ by all tests using the SharedYcmd decorator in this package. Additional
+ configuration that is common to these tests, like starting a semantic
+ subserver, should be done here."""
+ global shared_app
+
+ shared_app = SetUpApp()
+
+
+def SharedYcmd( test ):
+ """Defines a decorator to be attached to tests of this package. This decorator
+ passes the shared ycmd application as a parameter.
+
+ Do NOT attach it to test generators but directly to the yielded tests."""
+ global shared_app
+
+ @functools.wraps( test )
+ def Wrapper( *args, **kwargs ):
+ return test( shared_app, *args, **kwargs )
+ return Wrapper
diff --git a/ycmd/tests/clang/__init__.py b/ycmd/tests/clang/__init__.py
--- a/ycmd/tests/clang/__init__.py
+++ b/ycmd/tests/clang/__init__.py
@@ -0,0 +1,79 @@
+# Copyright (C) 2016 ycmd contributors
+#
+# This file is part of ycmd.
+#
+# ycmd is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# ycmd is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import unicode_literals
+from __future__ import print_function
+from __future__ import division
+from __future__ import absolute_import
+from future import standard_library
+standard_library.install_aliases()
+from builtins import * # noqa
+
+import functools
+import os
+
+from ycmd import handlers
+from ycmd.tests.test_utils import SetUpApp
+
+shared_app = None
+
+
+def PathToTestFile( *args ):
+ dir_of_current_script = os.path.dirname( os.path.abspath( __file__ ) )
+ return os.path.join( dir_of_current_script, 'testdata', *args )
+
+
+def setUpPackage():
+ """Initializes the ycmd server as a WebTest application that will be shared
+ by all tests using the SharedYcmd decorator in this package. Additional
+ configuration that is common to these tests, like starting a semantic
+ subserver, should be done here."""
+ global shared_app
+
+ shared_app = SetUpApp()
+
+
+def SharedYcmd( test ):
+ """Defines a decorator to be attached to tests of this package. This decorator
+ passes the shared ycmd application as a parameter.
+
+ Do NOT attach it to test generators but directly to the yielded tests."""
+ global shared_app
+
+ @functools.wraps( test )
+ def Wrapper( *args, **kwargs ):
+ return test( shared_app, *args, **kwargs )
+ return Wrapper
+
+
+def IsolatedYcmd( test ):
+ """Defines a decorator to be attached to tests of this package. This decorator
+ passes a unique ycmd application as a parameter. It should be used on tests
+ that change the server state in a irreversible way (ex: a semantic subserver
+ is stopped or restarted) or expect a clean state (ex: no semantic subserver
+ started, no .ycm_extra_conf.py loaded, etc).
+
+ Do NOT attach it to test generators but directly to the yielded tests."""
+ @functools.wraps( test )
+ def Wrapper( *args, **kwargs ):
+ old_server_state = handlers._server_state
+
+ try:
+ test( SetUpApp(), *args, **kwargs )
+ finally:
+ handlers._server_state = old_server_state
+ return Wrapper
diff --git a/ycmd/tests/clang/clang_handlers_test.py b/ycmd/tests/clang/clang_handlers_test.py
deleted file mode 100644
--- a/ycmd/tests/clang/clang_handlers_test.py
+++ /dev/null
@@ -1,32 +0,0 @@
-# Copyright (C) 2015 ycmd contributors
-#
-# This file is part of ycmd.
-#
-# ycmd is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# ycmd is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
-
-from __future__ import unicode_literals
-from __future__ import print_function
-from __future__ import division
-from __future__ import absolute_import
-from future import standard_library
-standard_library.install_aliases()
-from builtins import * # noqa
-
-from ..handlers_test import Handlers_test
-
-
-class Clang_Handlers_test( Handlers_test ):
-
- def __init__( self ):
- self._file = __file__
diff --git a/ycmd/tests/clang/diagnostics_test.py b/ycmd/tests/clang/diagnostics_test.py
--- a/ycmd/tests/clang/diagnostics_test.py
+++ b/ycmd/tests/clang/diagnostics_test.py
@@ -23,19 +23,18 @@
standard_library.install_aliases()
from builtins import * # noqa
-from ...server_utils import SetUpPythonPath
-SetUpPythonPath()
from hamcrest import ( assert_that, contains, contains_string, has_entries,
has_entry, has_items, empty, equal_to )
-from .clang_handlers_test import Clang_Handlers_test
-from ycmd.utils import ReadFile
from pprint import pprint
+from ycmd.tests.clang import PathToTestFile, SharedYcmd
+from ycmd.tests.test_utils import BuildRequest
+from ycmd.utils import ReadFile
-class Clang_Diagnostics_test( Clang_Handlers_test ):
- def ZeroBasedLineAndColumn_test( self ):
- contents = """
+@SharedYcmd
+def Diagnostics_ZeroBasedLineAndColumn_test( app ):
+ contents = """
void foo() {
double baz = "foo";
}
@@ -43,46 +42,47 @@ def ZeroBasedLineAndColumn_test( self ):
// Padding to 5 lines
"""
- event_data = self._BuildRequest( compilation_flags = ['-x', 'c++'],
- event_name = 'FileReadyToParse',
- contents = contents,
- filetype = 'cpp' )
-
- results = self._app.post_json( '/event_notification', event_data ).json
- assert_that( results,
- contains(
- has_entries( {
- 'kind': equal_to( 'ERROR' ),
- 'text': contains_string( 'cannot initialize' ),
- 'ranges': contains( has_entries( {
- 'start': has_entries( {
- 'line_num': 3,
- 'column_num': 16,
- } ),
- 'end': has_entries( {
- 'line_num': 3,
- 'column_num': 21,
- } ),
- } ) ),
- 'location': has_entries( {
+ event_data = BuildRequest( compilation_flags = ['-x', 'c++'],
+ event_name = 'FileReadyToParse',
+ contents = contents,
+ filetype = 'cpp' )
+
+ results = app.post_json( '/event_notification', event_data ).json
+ assert_that( results,
+ contains(
+ has_entries( {
+ 'kind': equal_to( 'ERROR' ),
+ 'text': contains_string( 'cannot initialize' ),
+ 'ranges': contains( has_entries( {
+ 'start': has_entries( {
+ 'line_num': 3,
+ 'column_num': 16,
+ } ),
+ 'end': has_entries( {
+ 'line_num': 3,
+ 'column_num': 21,
+ } ),
+ } ) ),
+ 'location': has_entries( {
+ 'line_num': 3,
+ 'column_num': 10
+ } ),
+ 'location_extent': has_entries( {
+ 'start': has_entries( {
+ 'line_num': 3,
+ 'column_num': 10,
+ } ),
+ 'end': has_entries( {
'line_num': 3,
- 'column_num': 10
+ 'column_num': 13,
} ),
- 'location_extent': has_entries( {
- 'start': has_entries( {
- 'line_num': 3,
- 'column_num': 10,
- } ),
- 'end': has_entries( {
- 'line_num': 3,
- 'column_num': 13,
- } ),
- } )
- } ) ) )
-
-
- def SimpleLocationExtent_test( self ):
- contents = """
+ } )
+ } ) ) )
+
+
+@SharedYcmd
+def Diagnostics_SimpleLocationExtent_test( app ):
+ contents = """
void foo() {
baz = 5;
}
@@ -90,30 +90,31 @@ def SimpleLocationExtent_test( self ):
// Padding to 5 lines
"""
- event_data = self._BuildRequest( compilation_flags = ['-x', 'c++'],
- event_name = 'FileReadyToParse',
- contents = contents,
- filetype = 'cpp' )
-
- results = self._app.post_json( '/event_notification', event_data ).json
- assert_that( results,
- contains(
- has_entries( {
- 'location_extent': has_entries( {
- 'start': has_entries( {
- 'line_num': 3,
- 'column_num': 3,
- } ),
- 'end': has_entries( {
- 'line_num': 3,
- 'column_num': 6,
- } ),
- } )
- } ) ) )
-
-
- def PragmaOnceWarningIgnored_test( self ):
- contents = """
+ event_data = BuildRequest( compilation_flags = ['-x', 'c++'],
+ event_name = 'FileReadyToParse',
+ contents = contents,
+ filetype = 'cpp' )
+
+ results = app.post_json( '/event_notification', event_data ).json
+ assert_that( results,
+ contains(
+ has_entries( {
+ 'location_extent': has_entries( {
+ 'start': has_entries( {
+ 'line_num': 3,
+ 'column_num': 3,
+ } ),
+ 'end': has_entries( {
+ 'line_num': 3,
+ 'column_num': 6,
+ } ),
+ } )
+ } ) ) )
+
+
+@SharedYcmd
+def Diagnostics_PragmaOnceWarningIgnored_test( app ):
+ contents = """
#pragma once
struct Foo {
@@ -124,18 +125,19 @@ def PragmaOnceWarningIgnored_test( self ):
};
"""
- event_data = self._BuildRequest( compilation_flags = ['-x', 'c++'],
- event_name = 'FileReadyToParse',
- contents = contents,
- filepath = '/foo.h',
- filetype = 'cpp' )
+ event_data = BuildRequest( compilation_flags = ['-x', 'c++'],
+ event_name = 'FileReadyToParse',
+ contents = contents,
+ filepath = '/foo.h',
+ filetype = 'cpp' )
- response = self._app.post_json( '/event_notification', event_data ).json
- assert_that( response, empty() )
+ response = app.post_json( '/event_notification', event_data ).json
+ assert_that( response, empty() )
- def Works_test( self ):
- contents = """
+@SharedYcmd
+def Diagnostics_Works_test( app ):
+ contents = """
struct Foo {
int x // semicolon missing here!
int y;
@@ -144,24 +146,25 @@ def Works_test( self ):
};
"""
- diag_data = self._BuildRequest( compilation_flags = ['-x', 'c++'],
- line_num = 3,
- contents = contents,
- filetype = 'cpp' )
+ diag_data = BuildRequest( compilation_flags = ['-x', 'c++'],
+ line_num = 3,
+ contents = contents,
+ filetype = 'cpp' )
- event_data = diag_data.copy()
- event_data.update( {
- 'event_name': 'FileReadyToParse',
- } )
+ event_data = diag_data.copy()
+ event_data.update( {
+ 'event_name': 'FileReadyToParse',
+ } )
- self._app.post_json( '/event_notification', event_data )
- results = self._app.post_json( '/detailed_diagnostic', diag_data ).json
- assert_that( results,
- has_entry( 'message', contains_string( "expected ';'" ) ) )
+ app.post_json( '/event_notification', event_data )
+ results = app.post_json( '/detailed_diagnostic', diag_data ).json
+ assert_that( results,
+ has_entry( 'message', contains_string( "expected ';'" ) ) )
- def Multiline_test( self ):
- contents = """
+@SharedYcmd
+def Diagnostics_Multiline_test( app ):
+ contents = """
struct Foo {
Foo(int z) {}
};
@@ -171,49 +174,50 @@ def Multiline_test( self ):
}
"""
- diag_data = self._BuildRequest( compilation_flags = [ '-x', 'c++' ],
- line_num = 7,
- contents = contents,
- filetype = 'cpp' )
-
- event_data = diag_data.copy()
- event_data.update( {
- 'event_name': 'FileReadyToParse',
- } )
-
- self._app.post_json( '/event_notification', event_data )
- results = self._app.post_json( '/detailed_diagnostic', diag_data ).json
- assert_that( results,
- has_entry( 'message', contains_string( "\n" ) ) )
-
-
- def FixIt_Available_test( self ):
- contents = ReadFile( self._PathToTestFile( 'FixIt_Clang_cpp11.cpp' ) )
-
- event_data = self._BuildRequest( contents = contents,
- event_name = 'FileReadyToParse',
- filetype = 'cpp',
- compilation_flags = [ '-x', 'c++',
- '-std=c++03',
- '-Wall',
- '-Wextra',
- '-pedantic' ] )
-
- response = self._app.post_json( '/event_notification', event_data ).json
-
- pprint( response )
-
- assert_that( response, has_items(
- has_entries( {
- 'location': has_entries( { 'line_num': 16, 'column_num': 3 } ),
- 'text': equal_to( 'switch condition type \'A\' '
- 'requires explicit conversion to \'int\''),
- 'fixit_available': True
- } ),
- has_entries( {
- 'location': has_entries( { 'line_num': 11, 'column_num': 3 } ),
- 'text': equal_to(
- 'explicit conversion functions are a C++11 extension' ),
- 'fixit_available': False
- } ),
- ) )
+ diag_data = BuildRequest( compilation_flags = [ '-x', 'c++' ],
+ line_num = 7,
+ contents = contents,
+ filetype = 'cpp' )
+
+ event_data = diag_data.copy()
+ event_data.update( {
+ 'event_name': 'FileReadyToParse',
+ } )
+
+ app.post_json( '/event_notification', event_data )
+ results = app.post_json( '/detailed_diagnostic', diag_data ).json
+ assert_that( results,
+ has_entry( 'message', contains_string( "\n" ) ) )
+
+
+@SharedYcmd
+def Diagnostics_FixIt_Available_test( app ):
+ contents = ReadFile( PathToTestFile( 'FixIt_Clang_cpp11.cpp' ) )
+
+ event_data = BuildRequest( contents = contents,
+ event_name = 'FileReadyToParse',
+ filetype = 'cpp',
+ compilation_flags = [ '-x', 'c++',
+ '-std=c++03',
+ '-Wall',
+ '-Wextra',
+ '-pedantic' ] )
+
+ response = app.post_json( '/event_notification', event_data ).json
+
+ pprint( response )
+
+ assert_that( response, has_items(
+ has_entries( {
+ 'location': has_entries( { 'line_num': 16, 'column_num': 3 } ),
+ 'text': equal_to( 'switch condition type \'A\' '
+ 'requires explicit conversion to \'int\''),
+ 'fixit_available': True
+ } ),
+ has_entries( {
+ 'location': has_entries( { 'line_num': 11, 'column_num': 3 } ),
+ 'text': equal_to(
+ 'explicit conversion functions are a C++11 extension' ),
+ 'fixit_available': False
+ } ),
+ ) )
diff --git a/ycmd/tests/clang/get_completions_test.py b/ycmd/tests/clang/get_completions_test.py
--- a/ycmd/tests/clang/get_completions_test.py
+++ b/ycmd/tests/clang/get_completions_test.py
@@ -23,257 +23,259 @@
standard_library.install_aliases()
from builtins import * # noqa
-from webtest import TestApp
from nose.tools import eq_
from hamcrest import ( assert_that, contains, contains_inanyorder, empty,
has_item, has_items, has_entry, has_entries )
-from ...responses import UnknownExtraConf, NoExtraConfDetected
-from ... import handlers
-from ycmd.completers.cpp.clang_completer import NO_COMPLETIONS_MESSAGE
-from .clang_handlers_test import Clang_Handlers_test
-from ycmd.utils import ReadFile
import http.client
+from ycmd.completers.cpp.clang_completer import NO_COMPLETIONS_MESSAGE
+from ycmd.responses import UnknownExtraConf, NoExtraConfDetected
+from ycmd.tests.clang import IsolatedYcmd, PathToTestFile, SharedYcmd
+from ycmd.tests.test_utils import ( BuildRequest, CompletionEntryMatcher,
+ ErrorMatcher, UserOption )
+from ycmd.utils import ReadFile
-class Clang_GetCompletions_test( Clang_Handlers_test ):
-
- def __init__( self ):
- super( Clang_GetCompletions_test, self ).__init__()
- self._no_completions_error = self._ErrorMatcher( RuntimeError,
- NO_COMPLETIONS_MESSAGE )
-
-
- def _RunTest( self, test ):
- """
- Method to run a simple completion test and verify the result
-
- Note: uses the .ycm_extra_conf from general_fallback/ which:
- - supports cpp, c and objc
- - requires extra_conf_data containing 'filetype&' = the filetype
-
- this should be sufficient for many standard test cases
-
- test is a dictionary containing:
- 'request': kwargs for BuildRequest
- 'expect': {
- 'response': server response code (e.g. httplib.OK)
- 'data': matcher for the server response json
- }
- """
- self._app.post_json( '/load_extra_conf_file', {
- 'filepath': self._PathToTestFile( 'general_fallback',
- '.ycm_extra_conf.py' ) } )
-
-
- contents = ReadFile( test[ 'request' ][ 'filepath' ] )
-
- def CombineRequest( request, data ):
- kw = request
- request.update( data )
- return self._BuildRequest( **kw )
-
- # Because we aren't testing this command, we *always* ignore errors. This
- # is mainly because we (may) want to test scenarios where the completer
- # throws an exception and the easiest way to do that is to throw from
- # within the FlagsForFile function.
- self._app.post_json( '/event_notification',
- CombineRequest( test[ 'request' ], {
- 'event_name': 'FileReadyToParse',
- 'contents': contents,
- } ),
- expect_errors = True )
-
- # We also ignore errors here, but then we check the response code ourself.
- # This is to allow testing of requests returning errors.
- response = self._app.post_json( '/completions',
- CombineRequest( test[ 'request' ], {
- 'contents': contents
- } ),
- expect_errors = True )
-
- eq_( response.status_code, test[ 'expect' ][ 'response' ] )
-
- assert_that( response.json, test[ 'expect' ][ 'data' ] )
-
-
- def ForcedWithNoTrigger_test( self ):
- self._RunTest( {
- 'description': 'semantic completion with force query=DO_SO',
- 'request': {
- 'filetype' : 'cpp',
- 'filepath' : self._PathToTestFile( 'general_fallback',
- 'lang_cpp.cc' ),
- 'line_num' : 54,
- 'column_num': 8,
- 'extra_conf_data': { '&filetype': 'cpp' },
- 'force_semantic': True,
- },
- 'expect': {
- 'response': http.client.OK,
- 'data': has_entries( {
- 'completions': contains(
- self._CompletionEntryMatcher( 'DO_SOMETHING_TO', 'void' ),
- self._CompletionEntryMatcher( 'DO_SOMETHING_WITH', 'void' ),
- ),
- 'errors': empty(),
- } )
- },
- } )
-
-
- def Fallback_NoSuggestions_test( self ):
- # TESTCASE1 (general_fallback/lang_c.c)
- self._RunTest( {
- 'description': 'Triggered, fallback but no query so no completions',
- 'request': {
- 'filetype' : 'c',
- 'filepath' : self._PathToTestFile( 'general_fallback', 'lang_c.c' ),
- 'line_num' : 29,
- 'column_num': 21,
- 'extra_conf_data': { '&filetype': 'c' },
- 'force_semantic': False,
- },
- 'expect': {
- 'response': http.client.OK,
- 'data': has_entries( {
- 'completions': empty(),
- 'errors': has_item( self._ErrorMatcher( RuntimeError,
- NO_COMPLETIONS_MESSAGE ) ),
- } )
- },
- } )
-
-
- def Fallback_NoSuggestions_MinimumCharaceters_test( self ):
- # TESTCASE1 (general_fallback/lang_cpp.cc)
- self._RunTest( {
- 'description': 'fallback general completion obeys min chars setting '
- ' (query="a")',
- 'request': {
- 'filetype' : 'cpp',
- 'filepath' : self._PathToTestFile( 'general_fallback',
- 'lang_cpp.cc' ),
- 'line_num' : 21,
- 'column_num': 22,
- 'extra_conf_data': { '&filetype': 'cpp' },
- 'force_semantic': False,
- },
- 'expect': {
- 'response': http.client.OK,
- 'data': has_entries( {
- 'completions': empty(),
- 'errors': has_item( self._no_completions_error ),
- } )
- },
- } )
-
-
- def Fallback_Suggestions_test( self ):
- # TESTCASE1 (general_fallback/lang_c.c)
- self._RunTest( {
- 'description': '. after macro with some query text (.a_)',
- 'request': {
- 'filetype' : 'c',
- 'filepath' : self._PathToTestFile( 'general_fallback', 'lang_c.c' ),
- 'line_num' : 29,
- 'column_num': 23,
- 'extra_conf_data': { '&filetype': 'c' },
- 'force_semantic': False,
- },
- 'expect': {
- 'response': http.client.OK,
- 'data': has_entries( {
- 'completions': has_item( self._CompletionEntryMatcher( 'a_parameter',
- '[ID]' ) ),
- 'errors': has_item( self._no_completions_error ),
- } )
- },
- } )
-
-
- def Fallback_Exception_test( self ):
- # TESTCASE4 (general_fallback/lang_c.c)
- # extra conf throws exception
- self._RunTest( {
- 'description': '. on struct returns identifier because of error',
- 'request': {
- 'filetype' : 'c',
- 'filepath' : self._PathToTestFile( 'general_fallback', 'lang_c.c' ),
- 'line_num' : 62,
- 'column_num': 20,
- 'extra_conf_data': { '&filetype': 'c', 'throw': 'testy' },
- 'force_semantic': False,
- },
- 'expect': {
- 'response': http.client.OK,
- 'data': has_entries( {
- 'completions': contains(
- self._CompletionEntryMatcher( 'a_parameter', '[ID]' ),
- self._CompletionEntryMatcher( 'another_parameter', '[ID]' ),
- ),
- 'errors': has_item( self._ErrorMatcher( ValueError, 'testy' ) )
- } )
- },
- } )
-
-
- def Forced_NoFallback_test( self ):
- # TESTCASE2 (general_fallback/lang_c.c)
- self._RunTest( {
- 'description': '-> after macro with forced semantic',
- 'request': {
- 'filetype' : 'c',
- 'filepath' : self._PathToTestFile( 'general_fallback', 'lang_c.c' ),
- 'line_num' : 41,
- 'column_num': 30,
- 'extra_conf_data': { '&filetype': 'c' },
- 'force_semantic': True,
- },
- 'expect': {
- 'response': http.client.INTERNAL_SERVER_ERROR,
- 'data': self._no_completions_error,
- },
- } )
-
-
- def FilteredNoResults_Fallback_test( self ):
- # no errors because the semantic completer returned results, but they
- # were filtered out by the query, so this is considered working OK
- # (whereas no completions from the semantic engine is considered an
- # error)
-
- # TESTCASE5 (general_fallback/lang_cpp.cc)
- self._RunTest( {
- 'description': '. on struct returns IDs after query=do_',
- 'request': {
- 'filetype': 'c',
- 'filepath': self._PathToTestFile( 'general_fallback', 'lang_c.c' ),
- 'line_num': 71,
- 'column_num': 18,
- 'extra_conf_data': { '&filetype': 'c' },
- 'force_semantic': False,
- },
- 'expect': {
- 'response': http.client.OK,
- 'data': has_entries( {
- 'completions': contains_inanyorder(
- # do_ is an identifier because it is already in the file when we
- # load it
- self._CompletionEntryMatcher( 'do_', '[ID]' ),
- self._CompletionEntryMatcher( 'do_something', '[ID]' ),
- self._CompletionEntryMatcher( 'do_another_thing', '[ID]' ),
- ),
- 'errors': empty()
- } )
- },
- } )
-
-
- def WorksWithExplicitFlags_test( self ):
- self._app.post_json(
- '/ignore_extra_conf_file',
- { 'filepath': self._PathToTestFile( '.ycm_extra_conf.py' ) } )
- contents = """
+NO_COMPLETIONS_ERROR = ErrorMatcher( RuntimeError, NO_COMPLETIONS_MESSAGE )
+
+
+def RunTest( app, test ):
+ """
+ Method to run a simple completion test and verify the result
+
+ Note: uses the .ycm_extra_conf from general_fallback/ which:
+ - supports cpp, c and objc
+ - requires extra_conf_data containing 'filetype&' = the filetype
+
+ this should be sufficient for many standard test cases
+
+ test is a dictionary containing:
+ 'request': kwargs for BuildRequest
+ 'expect': {
+ 'response': server response code (e.g. httplib.OK)
+ 'data': matcher for the server response json
+ }
+ """
+ app.post_json( '/load_extra_conf_file', {
+ 'filepath': PathToTestFile( 'general_fallback',
+ '.ycm_extra_conf.py' ) } )
+
+
+ contents = ReadFile( test[ 'request' ][ 'filepath' ] )
+
+ def CombineRequest( request, data ):
+ kw = request
+ request.update( data )
+ return BuildRequest( **kw )
+
+ # Because we aren't testing this command, we *always* ignore errors. This
+ # is mainly because we (may) want to test scenarios where the completer
+ # throws an exception and the easiest way to do that is to throw from
+ # within the FlagsForFile function.
+ app.post_json( '/event_notification',
+ CombineRequest( test[ 'request' ], {
+ 'event_name': 'FileReadyToParse',
+ 'contents': contents,
+ } ),
+ expect_errors = True )
+
+ # We also ignore errors here, but then we check the response code ourself.
+ # This is to allow testing of requests returning errors.
+ response = app.post_json( '/completions',
+ CombineRequest( test[ 'request' ], {
+ 'contents': contents
+ } ),
+ expect_errors = True )
+
+ eq_( response.status_code, test[ 'expect' ][ 'response' ] )
+
+ assert_that( response.json, test[ 'expect' ][ 'data' ] )
+
+
+@SharedYcmd
+def GetCompletions_ForcedWithNoTrigger_test( app ):
+ RunTest( app, {
+ 'description': 'semantic completion with force query=DO_SO',
+ 'request': {
+ 'filetype' : 'cpp',
+ 'filepath' : PathToTestFile( 'general_fallback',
+ 'lang_cpp.cc' ),
+ 'line_num' : 54,
+ 'column_num': 8,
+ 'extra_conf_data': { '&filetype': 'cpp' },
+ 'force_semantic': True,
+ },
+ 'expect': {
+ 'response': http.client.OK,
+ 'data': has_entries( {
+ 'completions': contains(
+ CompletionEntryMatcher( 'DO_SOMETHING_TO', 'void' ),
+ CompletionEntryMatcher( 'DO_SOMETHING_WITH', 'void' ),
+ ),
+ 'errors': empty(),
+ } )
+ },
+ } )
+
+
+@SharedYcmd
+def GetCompletions_Fallback_NoSuggestions_test( app ):
+ # TESTCASE1 (general_fallback/lang_c.c)
+ RunTest( app, {
+ 'description': 'Triggered, fallback but no query so no completions',
+ 'request': {
+ 'filetype' : 'c',
+ 'filepath' : PathToTestFile( 'general_fallback', 'lang_c.c' ),
+ 'line_num' : 29,
+ 'column_num': 21,
+ 'extra_conf_data': { '&filetype': 'c' },
+ 'force_semantic': False,
+ },
+ 'expect': {
+ 'response': http.client.OK,
+ 'data': has_entries( {
+ 'completions': empty(),
+ 'errors': has_item( NO_COMPLETIONS_ERROR ),
+ } )
+ },
+ } )
+
+
+@SharedYcmd
+def GetCompletions_Fallback_NoSuggestions_MinimumCharaceters_test( app ):
+ # TESTCASE1 (general_fallback/lang_cpp.cc)
+ RunTest( app, {
+ 'description': 'fallback general completion obeys min chars setting '
+ ' (query="a")',
+ 'request': {
+ 'filetype' : 'cpp',
+ 'filepath' : PathToTestFile( 'general_fallback',
+ 'lang_cpp.cc' ),
+ 'line_num' : 21,
+ 'column_num': 22,
+ 'extra_conf_data': { '&filetype': 'cpp' },
+ 'force_semantic': False,
+ },
+ 'expect': {
+ 'response': http.client.OK,
+ 'data': has_entries( {
+ 'completions': empty(),
+ 'errors': has_item( NO_COMPLETIONS_ERROR ),
+ } )
+ },
+ } )
+
+
+@SharedYcmd
+def GetCompletions_Fallback_Suggestions_test( app ):
+ # TESTCASE1 (general_fallback/lang_c.c)
+ RunTest( app, {
+ 'description': '. after macro with some query text (.a_)',
+ 'request': {
+ 'filetype' : 'c',
+ 'filepath' : PathToTestFile( 'general_fallback', 'lang_c.c' ),
+ 'line_num' : 29,
+ 'column_num': 23,
+ 'extra_conf_data': { '&filetype': 'c' },
+ 'force_semantic': False,
+ },
+ 'expect': {
+ 'response': http.client.OK,
+ 'data': has_entries( {
+ 'completions': has_item( CompletionEntryMatcher( 'a_parameter',
+ '[ID]' ) ),
+ 'errors': has_item( NO_COMPLETIONS_ERROR ),
+ } )
+ },
+ } )
+
+
+@SharedYcmd
+def GetCompletions_Fallback_Exception_test( app ):
+ # TESTCASE4 (general_fallback/lang_c.c)
+ # extra conf throws exception
+ RunTest( app, {
+ 'description': '. on struct returns identifier because of error',
+ 'request': {
+ 'filetype' : 'c',
+ 'filepath' : PathToTestFile( 'general_fallback', 'lang_c.c' ),
+ 'line_num' : 62,
+ 'column_num': 20,
+ 'extra_conf_data': { '&filetype': 'c', 'throw': 'testy' },
+ 'force_semantic': False,
+ },
+ 'expect': {
+ 'response': http.client.OK,
+ 'data': has_entries( {
+ 'completions': contains(
+ CompletionEntryMatcher( 'a_parameter', '[ID]' ),
+ CompletionEntryMatcher( 'another_parameter', '[ID]' ),
+ ),
+ 'errors': has_item( ErrorMatcher( ValueError, 'testy' ) )
+ } )
+ },
+ } )
+
+
+@SharedYcmd
+def GetCompletions_Forced_NoFallback_test( app ):
+ # TESTCASE2 (general_fallback/lang_c.c)
+ RunTest( app, {
+ 'description': '-> after macro with forced semantic',
+ 'request': {
+ 'filetype' : 'c',
+ 'filepath' : PathToTestFile( 'general_fallback', 'lang_c.c' ),
+ 'line_num' : 41,
+ 'column_num': 30,
+ 'extra_conf_data': { '&filetype': 'c' },
+ 'force_semantic': True,
+ },
+ 'expect': {
+ 'response': http.client.INTERNAL_SERVER_ERROR,
+ 'data': NO_COMPLETIONS_ERROR,
+ },
+ } )
+
+
+@SharedYcmd
+def GetCompletions_FilteredNoResults_Fallback_test( app ):
+ # no errors because the semantic completer returned results, but they
+ # were filtered out by the query, so this is considered working OK
+ # (whereas no completions from the semantic engine is considered an
+ # error)
+
+ # TESTCASE5 (general_fallback/lang_cpp.cc)
+ RunTest( app, {
+ 'description': '. on struct returns IDs after query=do_',
+ 'request': {
+ 'filetype': 'c',
+ 'filepath': PathToTestFile( 'general_fallback', 'lang_c.c' ),
+ 'line_num': 71,
+ 'column_num': 18,
+ 'extra_conf_data': { '&filetype': 'c' },
+ 'force_semantic': False,
+ },
+ 'expect': {
+ 'response': http.client.OK,
+ 'data': has_entries( {
+ 'completions': contains_inanyorder(
+ # do_ is an identifier because it is already in the file when we
+ # load it
+ CompletionEntryMatcher( 'do_', '[ID]' ),
+ CompletionEntryMatcher( 'do_something', '[ID]' ),
+ CompletionEntryMatcher( 'do_another_thing', '[ID]' ),
+ ),
+ 'errors': empty()
+ } )
+ },
+ } )
+
+
+@SharedYcmd
+def GetCompletions_WorksWithExplicitFlags_test( app ):
+ app.post_json(
+ '/ignore_extra_conf_file',
+ { 'filepath': PathToTestFile( '.ycm_extra_conf.py' ) } )
+ contents = """
struct Foo {
int x;
int y;
@@ -287,28 +289,28 @@ def WorksWithExplicitFlags_test( self ):
}
"""
- completion_data = self._BuildRequest( filepath = '/foo.cpp',
- filetype = 'cpp',
- contents = contents,
- line_num = 11,
- column_num = 7,
- compilation_flags = ['-x', 'c++'] )
-
- response_data = self._app.post_json( '/completions', completion_data ).json
- assert_that( response_data[ 'completions'],
- has_items( self._CompletionEntryMatcher( 'c' ),
- self._CompletionEntryMatcher( 'x' ),
- self._CompletionEntryMatcher( 'y' ) ) )
- eq_( 7, response_data[ 'completion_start_column' ] )
-
-
- def NoCompletionsWhenAutoTriggerOff_test( self ):
- with self.UserOption( 'auto_trigger', False ):
- self._app = TestApp( handlers.app )
- self._app.post_json(
- '/ignore_extra_conf_file',
- { 'filepath': self._PathToTestFile( '.ycm_extra_conf.py' ) } )
- contents = """
+ completion_data = BuildRequest( filepath = '/foo.cpp',
+ filetype = 'cpp',
+ contents = contents,
+ line_num = 11,
+ column_num = 7,
+ compilation_flags = ['-x', 'c++'] )
+
+ response_data = app.post_json( '/completions', completion_data ).json
+ assert_that( response_data[ 'completions'],
+ has_items( CompletionEntryMatcher( 'c' ),
+ CompletionEntryMatcher( 'x' ),
+ CompletionEntryMatcher( 'y' ) ) )
+ eq_( 7, response_data[ 'completion_start_column' ] )
+
+
+@SharedYcmd
+def GetCompletions_NoCompletionsWhenAutoTriggerOff_test( app ):
+ with UserOption( 'auto_trigger', False ):
+ app.post_json(
+ '/ignore_extra_conf_file',
+ { 'filepath': PathToTestFile( '.ycm_extra_conf.py' ) } )
+ contents = """
struct Foo {
int x;
int y;
@@ -322,97 +324,101 @@ def NoCompletionsWhenAutoTriggerOff_test( self ):
}
"""
- completion_data = self._BuildRequest( filepath = '/foo.cpp',
- filetype = 'cpp',
- contents = contents,
- line_num = 11,
- column_num = 7,
- compilation_flags = ['-x', 'c++'] )
-
- results = self._app.post_json( '/completions',
- completion_data ).json[ 'completions' ]
- assert_that( results, empty() )
-
-
- def UnknownExtraConfException_test( self ):
- filepath = self._PathToTestFile( 'basic.cpp' )
- completion_data = self._BuildRequest( filepath = filepath,
- filetype = 'cpp',
- contents = ReadFile( filepath ),
- line_num = 11,
- column_num = 7,
- force_semantic = True )
-
- response = self._app.post_json( '/completions',
- completion_data,
- expect_errors = True )
-
- eq_( response.status_code, http.client.INTERNAL_SERVER_ERROR )
- assert_that( response.json,
- has_entry( 'exception',
- has_entry( 'TYPE', UnknownExtraConf.__name__ ) ) )
-
- self._app.post_json(
- '/ignore_extra_conf_file',
- { 'filepath': self._PathToTestFile( '.ycm_extra_conf.py' ) } )
-
- response = self._app.post_json( '/completions',
- completion_data,
- expect_errors = True )
-
- eq_( response.status_code, http.client.INTERNAL_SERVER_ERROR )
- assert_that( response.json,
- has_entry( 'exception',
- has_entry( 'TYPE',
- NoExtraConfDetected.__name__ ) ) )
-
-
- def WorksWhenExtraConfExplicitlyAllowed_test( self ):
- self._app.post_json(
- '/load_extra_conf_file',
- { 'filepath': self._PathToTestFile( '.ycm_extra_conf.py' ) } )
-
- filepath = self._PathToTestFile( 'basic.cpp' )
- completion_data = self._BuildRequest( filepath = filepath,
- filetype = 'cpp',
- contents = ReadFile( filepath ),
- line_num = 11,
- column_num = 7 )
-
- results = self._app.post_json( '/completions',
- completion_data ).json[ 'completions' ]
- assert_that( results, has_items( self._CompletionEntryMatcher( 'c' ),
- self._CompletionEntryMatcher( 'x' ),
- self._CompletionEntryMatcher( 'y' ) ) )
-
-
- def ExceptionWhenNoFlagsFromExtraConf_test( self ):
- self._app.post_json(
- '/load_extra_conf_file',
- { 'filepath': self._PathToTestFile( 'noflags',
- '.ycm_extra_conf.py' ) } )
-
- filepath = self._PathToTestFile( 'noflags', 'basic.cpp' )
-
- completion_data = self._BuildRequest( filepath = filepath,
- filetype = 'cpp',
- contents = ReadFile( filepath ),
- line_num = 11,
- column_num = 7,
- force_semantic = True )
-
- response = self._app.post_json( '/completions',
- completion_data,
- expect_errors = True )
- eq_( response.status_code, http.client.INTERNAL_SERVER_ERROR )
-
- assert_that( response.json,
- has_entry( 'exception',
- has_entry( 'TYPE', RuntimeError.__name__ ) ) )
-
-
- def ForceSemantic_OnlyFileteredCompletions_test( self ):
- contents = """
+ completion_data = BuildRequest( filepath = '/foo.cpp',
+ filetype = 'cpp',
+ contents = contents,
+ line_num = 11,
+ column_num = 7,
+ compilation_flags = ['-x', 'c++'] )
+
+ results = app.post_json( '/completions',
+ completion_data ).json[ 'completions' ]
+ assert_that( results, empty() )
+
+
+@IsolatedYcmd
+def GetCompletions_UnknownExtraConfException_test( app ):
+ filepath = PathToTestFile( 'basic.cpp' )
+ completion_data = BuildRequest( filepath = filepath,
+ filetype = 'cpp',
+ contents = ReadFile( filepath ),
+ line_num = 11,
+ column_num = 7,
+ force_semantic = True )
+
+ response = app.post_json( '/completions',
+ completion_data,
+ expect_errors = True )
+
+ eq_( response.status_code, http.client.INTERNAL_SERVER_ERROR )
+ assert_that( response.json,
+ has_entry( 'exception',
+ has_entry( 'TYPE', UnknownExtraConf.__name__ ) ) )
+
+ app.post_json(
+ '/ignore_extra_conf_file',
+ { 'filepath': PathToTestFile( '.ycm_extra_conf.py' ) } )
+
+ response = app.post_json( '/completions',
+ completion_data,
+ expect_errors = True )
+
+ eq_( response.status_code, http.client.INTERNAL_SERVER_ERROR )
+ assert_that( response.json,
+ has_entry( 'exception',
+ has_entry( 'TYPE',
+ NoExtraConfDetected.__name__ ) ) )
+
+
+@IsolatedYcmd
+def GetCompletions_WorksWhenExtraConfExplicitlyAllowed_test( app ):
+ app.post_json(
+ '/load_extra_conf_file',
+ { 'filepath': PathToTestFile( '.ycm_extra_conf.py' ) } )
+
+ filepath = PathToTestFile( 'basic.cpp' )
+ completion_data = BuildRequest( filepath = filepath,
+ filetype = 'cpp',
+ contents = ReadFile( filepath ),
+ line_num = 11,
+ column_num = 7 )
+
+ results = app.post_json( '/completions',
+ completion_data ).json[ 'completions' ]
+ assert_that( results, has_items( CompletionEntryMatcher( 'c' ),
+ CompletionEntryMatcher( 'x' ),
+ CompletionEntryMatcher( 'y' ) ) )
+
+
+@SharedYcmd
+def GetCompletions_ExceptionWhenNoFlagsFromExtraConf_test( app ):
+ app.post_json(
+ '/load_extra_conf_file',
+ { 'filepath': PathToTestFile( 'noflags',
+ '.ycm_extra_conf.py' ) } )
+
+ filepath = PathToTestFile( 'noflags', 'basic.cpp' )
+
+ completion_data = BuildRequest( filepath = filepath,
+ filetype = 'cpp',
+ contents = ReadFile( filepath ),
+ line_num = 11,
+ column_num = 7,
+ force_semantic = True )
+
+ response = app.post_json( '/completions',
+ completion_data,
+ expect_errors = True )
+ eq_( response.status_code, http.client.INTERNAL_SERVER_ERROR )
+
+ assert_that( response.json,
+ has_entry( 'exception',
+ has_entry( 'TYPE', RuntimeError.__name__ ) ) )
+
+
+@SharedYcmd
+def GetCompletions_ForceSemantic_OnlyFilteredCompletions_test( app ):
+ contents = """
int main()
{
int foobar;
@@ -424,64 +430,66 @@ def ForceSemantic_OnlyFileteredCompletions_test( self ):
}
"""
- completion_data = self._BuildRequest( filepath = '/foo.cpp',
- filetype = 'cpp',
- force_semantic = True,
- contents = contents,
- line_num = 9,
- column_num = 8,
- compilation_flags = ['-x', 'c++'] )
-
- results = self._app.post_json( '/completions',
- completion_data ).json[ 'completions' ]
- assert_that(
- results,
- contains_inanyorder( self._CompletionEntryMatcher( 'foobar' ),
- self._CompletionEntryMatcher( 'floozar' ) )
- )
-
-
- def ClientDataGivenToExtraConf_test( self ):
- self._app.post_json(
- '/load_extra_conf_file',
- { 'filepath': self._PathToTestFile( 'client_data',
- '.ycm_extra_conf.py' ) } )
-
- filepath = self._PathToTestFile( 'client_data', 'main.cpp' )
- completion_data = self._BuildRequest( filepath = filepath,
- filetype = 'cpp',
- contents = ReadFile( filepath ),
- line_num = 9,
- column_num = 7,
- extra_conf_data = {
- 'flags': ['-x', 'c++']
- } )
-
- results = self._app.post_json( '/completions',
- completion_data ).json[ 'completions' ]
- assert_that( results, has_item( self._CompletionEntryMatcher( 'x' ) ) )
-
-
- def FilenameCompleter_ClientDataGivenToExtraConf_test( self ):
- self._app.post_json(
- '/load_extra_conf_file',
- { 'filepath': self._PathToTestFile( 'client_data',
- '.ycm_extra_conf.py' ) } )
-
- filepath = self._PathToTestFile( 'client_data', 'include.cpp' )
- completion_data = self._BuildRequest( filepath = filepath,
- filetype = 'cpp',
- contents = ReadFile( filepath ),
- line_num = 1,
- column_num = 11,
- extra_conf_data = {
- 'flags': ['-x', 'c++']
- } )
-
- results = self._app.post_json( '/completions',
- completion_data ).json[ 'completions' ]
- assert_that(
- results,
- has_item( self._CompletionEntryMatcher( 'include.hpp',
- extra_menu_info = '[File]' ) )
- )
+ completion_data = BuildRequest( filepath = '/foo.cpp',
+ filetype = 'cpp',
+ force_semantic = True,
+ contents = contents,
+ line_num = 9,
+ column_num = 8,
+ compilation_flags = ['-x', 'c++'] )
+
+ results = app.post_json( '/completions',
+ completion_data ).json[ 'completions' ]
+ assert_that(
+ results,
+ contains_inanyorder( CompletionEntryMatcher( 'foobar' ),
+ CompletionEntryMatcher( 'floozar' ) )
+ )
+
+
+@SharedYcmd
+def GetCompletions_ClientDataGivenToExtraConf_test( app ):
+ app.post_json(
+ '/load_extra_conf_file',
+ { 'filepath': PathToTestFile( 'client_data',
+ '.ycm_extra_conf.py' ) } )
+
+ filepath = PathToTestFile( 'client_data', 'main.cpp' )
+ completion_data = BuildRequest( filepath = filepath,
+ filetype = 'cpp',
+ contents = ReadFile( filepath ),
+ line_num = 9,
+ column_num = 7,
+ extra_conf_data = {
+ 'flags': ['-x', 'c++']
+ } )
+
+ results = app.post_json( '/completions',
+ completion_data ).json[ 'completions' ]
+ assert_that( results, has_item( CompletionEntryMatcher( 'x' ) ) )
+
+
+@SharedYcmd
+def GetCompletions_FilenameCompleter_ClientDataGivenToExtraConf_test( app ):
+ app.post_json(
+ '/load_extra_conf_file',
+ { 'filepath': PathToTestFile( 'client_data',
+ '.ycm_extra_conf.py' ) } )
+
+ filepath = PathToTestFile( 'client_data', 'include.cpp' )
+ completion_data = BuildRequest( filepath = filepath,
+ filetype = 'cpp',
+ contents = ReadFile( filepath ),
+ line_num = 1,
+ column_num = 11,
+ extra_conf_data = {
+ 'flags': ['-x', 'c++']
+ } )
+
+ results = app.post_json( '/completions',
+ completion_data ).json[ 'completions' ]
+ assert_that(
+ results,
+ has_item( CompletionEntryMatcher( 'include.hpp',
+ extra_menu_info = '[File]' ) )
+ )
diff --git a/ycmd/tests/clang/subcommands_test.py b/ycmd/tests/clang/subcommands_test.py
--- a/ycmd/tests/clang/subcommands_test.py
+++ b/ycmd/tests/clang/subcommands_test.py
@@ -23,692 +23,698 @@
standard_library.install_aliases()
from builtins import * # noqa
-from webtest import AppError
-from nose.tools import eq_
from hamcrest import ( assert_that, calling, contains, equal_to,
has_entries, raises )
-from ycmd.completers.cpp.clang_completer import NO_DOCUMENTATION_MESSAGE
-from .clang_handlers_test import Clang_Handlers_test
-from ycmd.utils import ReadFile
+from nose.tools import eq_
from pprint import pprint
-import os.path
+from webtest import AppError
import http.client
+import os.path
+from ycmd.completers.cpp.clang_completer import NO_DOCUMENTATION_MESSAGE
+from ycmd.tests.clang import PathToTestFile, SharedYcmd
+from ycmd.tests.test_utils import BuildRequest, ErrorMatcher
+from ycmd.utils import ReadFile
-class Clang_Subcommands_test( Clang_Handlers_test ):
-
- def GoTo_ZeroBasedLineAndColumn_test( self ):
- contents = ReadFile( self._PathToTestFile(
- 'GoTo_Clang_ZeroBasedLineAndColumn_test.cc' ) )
-
- goto_data = self._BuildRequest( completer_target = 'filetype_default',
- command_arguments = ['GoToDefinition'],
- compilation_flags = ['-x', 'c++'],
- line_num = 10,
- column_num = 3,
- contents = contents,
- filetype = 'cpp' )
-
- eq_( {
- 'filepath': os.path.abspath( '/foo' ),
- 'line_num': 2,
- 'column_num': 8
- }, self._app.post_json( '/run_completer_command', goto_data ).json )
-
-
- def _GoTo_all( self, filename, command, test ):
- contents = ReadFile( self._PathToTestFile( filename ) )
- common_request = {
- 'completer_target' : 'filetype_default',
- 'command_arguments': command,
- 'compilation_flags': ['-x',
- 'c++'],
- 'line_num' : 10,
- 'column_num' : 3,
- 'contents' : contents,
- 'filetype' : 'cpp'
- }
- common_response = {
- 'filepath': os.path.abspath( '/foo' ),
- }
-
- request = common_request
- request.update( {
- 'line_num' : test['request'][0],
- 'column_num': test['request'][1],
- })
- response = common_response
- response.update({
- 'line_num' : test['response'][0],
- 'column_num': test['response'][1],
- })
-
- goto_data = self._BuildRequest( **request )
-
- eq_( response,
- self._app.post_json( '/run_completer_command', goto_data ).json )
-
-
- def GoTo_all_test( self ):
- # GoToDeclaration
- tests = [
- # Local::x -> declaration of x
- { 'request': [23, 21], 'response': [ 4, 9] },
- # Local::in_line -> declaration of Local::in_line
- { 'request': [24, 26], 'response': [ 6, 10] },
- # Local -> declaration of Local
- { 'request': [24, 16], 'response': [ 2, 11] },
- # Local::out_of_line -> declaration of Local::out_of_line
- { 'request': [25, 27], 'response': [11, 10] },
- # GoToDeclaration on definition of out_of_line moves to declaration
- { 'request': [14, 13], 'response': [11, 10] },
- # main -> declaration of main
- { 'request': [21, 7], 'response': [19, 5] },
- ]
-
- for test in tests:
- yield ( self._GoTo_all,
- 'GoTo_all_Clang_test.cc',
- [ 'GoToDeclaration' ],
- test )
-
- # GoToDefinition - identical to GoToDeclaration
- #
- # The semantics of this seem the wrong way round to me. GoToDefinition
- # should go to where a method is implemented, not where it is declared.
- #
- tests = [
- # Local::x -> declaration of x
- { 'request': [23, 21], 'response': [ 4, 9] },
- # Local::in_line -> declaration of Local::in_line
- { 'request': [24, 26], 'response': [ 6, 10] },
- # Local -> declaration of Local
- { 'request': [24, 16], 'response': [ 2, 11] },
- # sic: Local::out_of_line -> definition of Local::out_of_line
- { 'request': [25, 27], 'response': [14, 13] }, # sic
- # sic: GoToDeclaration on definition of out_of_line moves to itself
- { 'request': [14, 13], 'response': [14, 13] }, # sic
- # main -> definition of main (not declaration)
- { 'request': [21, 7], 'response': [21, 5] }, # sic
- ]
-
- for test in tests:
- yield ( self._GoTo_all,
- 'GoTo_all_Clang_test.cc',
- [ 'GoToDefinition' ],
- test )
-
- # GoTo - identical to GoToDeclaration
- #
- # The semantics of this seem the wrong way round to me. GoToDefinition
- # should go to where a method is implemented, not where it is declared.
- #
- tests = [
- # Local::x -> declaration of x
- { 'request': [23, 21], 'response': [ 4, 9] },
- # Local::in_line -> declaration of Local::in_line
- { 'request': [24, 26], 'response': [ 6, 10] },
- # Local -> declaration of Local
- { 'request': [24, 16], 'response': [ 2, 11] },
- # sic: Local::out_of_line -> definition of Local::out_of_line
- { 'request': [25, 27], 'response': [14, 13] }, # sic
- # sic: GoToDeclaration on definition of out_of_line moves to itself
- { 'request': [14, 13], 'response': [14, 13] }, # sic
- # main -> definition of main (not declaration)
- { 'request': [21, 7], 'response': [21, 5] }, # sic
- ]
-
- for test in tests:
- yield ( self._GoTo_all,
- 'GoTo_all_Clang_test.cc',
- [ 'GoTo' ],
- test )
-
- # GoToImprecise - identical to GoToDeclaration
- #
- # The semantics of this seem the wrong way round to me. GoToDefinition
- # should go to where a method is implemented, not where it is declared.
- #
- tests = [
- # Local::x -> declaration of x
- { 'request': [23, 21], 'response': [ 4, 9] },
- # Local::in_line -> declaration of Local::in_line
- { 'request': [24, 26], 'response': [ 6, 10] },
- # Local -> declaration of Local
- { 'request': [24, 16], 'response': [ 2, 11] },
- # sic: Local::out_of_line -> definition of Local::out_of_line
- { 'request': [25, 27], 'response': [14, 13] }, # sic
- # sic: GoToDeclaration on definition of out_of_line moves to itself
- { 'request': [14, 13], 'response': [14, 13] }, # sic
- # main -> definition of main (not declaration)
- { 'request': [21, 7], 'response': [21, 5] }, # sic
- ]
-
- for test in tests:
- yield ( self._GoTo_all,
- 'GoTo_all_Clang_test.cc',
- [ 'GoToImprecise' ],
- test )
-
-
- def _GoToInclude( self, command, test ):
- self._app.post_json(
- '/load_extra_conf_file',
- { 'filepath': self._PathToTestFile( 'test-include',
- '.ycm_extra_conf.py' ) } )
-
- filepath = self._PathToTestFile( 'test-include', 'main.cpp' )
- goto_data = self._BuildRequest( filepath = filepath,
- filetype = 'cpp',
- contents = ReadFile( filepath ),
- command_arguments = [ command ],
- line_num = test[ 'request' ][ 0 ],
- column_num = test[ 'request' ][ 1 ] )
-
- response = {
- 'filepath' : self._PathToTestFile( 'test-include', test[ 'response' ] ),
- 'line_num' : 1,
- 'column_num' : 1,
- }
-
- eq_( response,
- self._app.post_json( '/run_completer_command', goto_data ).json )
-
-
- def GoToInclude_test( self ):
- tests = [
- { 'request': [ 1, 1 ], 'response': 'a.hpp' },
- { 'request': [ 2, 1 ], 'response': os.path.join( 'system', 'a.hpp' ) },
- { 'request': [ 3, 1 ], 'response': os.path.join( 'quote', 'b.hpp' ) },
- { 'request': [ 5, 1 ], 'response': os.path.join( 'system', 'c.hpp' ) },
- { 'request': [ 6, 1 ], 'response': os.path.join( 'system', 'c.hpp' ) },
- ]
- for test in tests:
- yield self._GoToInclude, 'GoToInclude', test
- yield self._GoToInclude, 'GoTo', test
- yield self._GoToInclude, 'GoToImprecise', test
-
-
- def GoToInclude_Fail_test( self ):
- test = { 'request': [ 4, 1 ], 'response': '' }
- assert_that(
- calling( self._GoToInclude ).with_args( 'GoToInclude', test ),
- raises( AppError, 'Include file not found.' ) )
- assert_that(
- calling( self._GoToInclude ).with_args( 'GoTo', test ),
- raises( AppError, 'Include file not found.' ) )
- assert_that(
- calling( self._GoToInclude ).with_args( 'GoToImprecise', test ),
- raises( AppError, 'Include file not found.' ) )
-
- test = { 'request': [ 7, 1 ], 'response': '' }
- assert_that(
- calling( self._GoToInclude ).with_args( 'GoToInclude', test ),
- raises( AppError, 'Not an include/import line.' ) )
- assert_that(
- calling( self._GoToInclude ).with_args( 'GoTo', test ),
- raises( AppError, r'Can\\\'t jump to definition or declaration.' ) )
- assert_that(
- calling( self._GoToInclude ).with_args( 'GoToImprecise', test ),
- raises( AppError, r'Can\\\'t jump to definition or declaration.' ) )
-
-
- def _Message( self, filename, test, command):
- contents = ReadFile( self._PathToTestFile( filename ) )
-
- # We use the -fno-delayed-template-parsing flag to not delay
- # parsing of templates on Windows. This is the default on
- # other platforms. See the _ExtraClangFlags function in
- # ycmd/completers/cpp/flags.py file for more information.
- common_args = {
- 'completer_target' : 'filetype_default',
- 'command_arguments': command,
+
+@SharedYcmd
+def Subcommands_GoTo_ZeroBasedLineAndColumn_test( app ):
+ contents = ReadFile( PathToTestFile(
+ 'GoTo_Clang_ZeroBasedLineAndColumn_test.cc' ) )
+
+ goto_data = BuildRequest( completer_target = 'filetype_default',
+ command_arguments = ['GoToDefinition'],
+ compilation_flags = ['-x', 'c++'],
+ line_num = 10,
+ column_num = 3,
+ contents = contents,
+ filetype = 'cpp' )
+
+ eq_( {
+ 'filepath': os.path.abspath( '/foo' ),
+ 'line_num': 2,
+ 'column_num': 8
+ }, app.post_json( '/run_completer_command', goto_data ).json )
+
+
+@SharedYcmd
+def RunGoToTest_all( app, filename, command, test ):
+ contents = ReadFile( PathToTestFile( filename ) )
+ common_request = {
+ 'completer_target' : 'filetype_default',
+ 'command_arguments': command,
+ 'compilation_flags': ['-x',
+ 'c++'],
+ 'line_num' : 10,
+ 'column_num' : 3,
+ 'contents' : contents,
+ 'filetype' : 'cpp'
+ }
+ common_response = {
+ 'filepath': os.path.abspath( '/foo' ),
+ }
+
+ request = common_request
+ request.update( {
+ 'line_num' : test['request'][0],
+ 'column_num': test['request'][1],
+ })
+ response = common_response
+ response.update({
+ 'line_num' : test['response'][0],
+ 'column_num': test['response'][1],
+ })
+
+ goto_data = BuildRequest( **request )
+
+ eq_( response,
+ app.post_json( '/run_completer_command', goto_data ).json )
+
+
+def Subcommands_GoTo_all_test():
+ # GoToDeclaration
+ tests = [
+ # Local::x -> declaration of x
+ { 'request': [23, 21], 'response': [ 4, 9] },
+ # Local::in_line -> declaration of Local::in_line
+ { 'request': [24, 26], 'response': [ 6, 10] },
+ # Local -> declaration of Local
+ { 'request': [24, 16], 'response': [ 2, 11] },
+ # Local::out_of_line -> declaration of Local::out_of_line
+ { 'request': [25, 27], 'response': [11, 10] },
+ # GoToDeclaration on definition of out_of_line moves to declaration
+ { 'request': [14, 13], 'response': [11, 10] },
+ # main -> declaration of main
+ { 'request': [21, 7], 'response': [19, 5] },
+ ]
+
+ for test in tests:
+ yield ( RunGoToTest_all,
+ 'GoTo_all_Clang_test.cc',
+ [ 'GoToDeclaration' ],
+ test )
+
+ # GoToDefinition - identical to GoToDeclaration
+ #
+ # The semantics of this seem the wrong way round to me. GoToDefinition should
+ # go to where a method is implemented, not where it is declared.
+ #
+ tests = [
+ # Local::x -> declaration of x
+ { 'request': [23, 21], 'response': [ 4, 9] },
+ # Local::in_line -> declaration of Local::in_line
+ { 'request': [24, 26], 'response': [ 6, 10] },
+ # Local -> declaration of Local
+ { 'request': [24, 16], 'response': [ 2, 11] },
+ # sic: Local::out_of_line -> definition of Local::out_of_line
+ { 'request': [25, 27], 'response': [14, 13] }, # sic
+ # sic: GoToDeclaration on definition of out_of_line moves to itself
+ { 'request': [14, 13], 'response': [14, 13] }, # sic
+ # main -> definition of main (not declaration)
+ { 'request': [21, 7], 'response': [21, 5] }, # sic
+ ]
+
+ for test in tests:
+ yield ( RunGoToTest_all,
+ 'GoTo_all_Clang_test.cc',
+ [ 'GoToDefinition' ],
+ test )
+
+ # GoTo - identical to GoToDeclaration
+ #
+ # The semantics of this seem the wrong way round to me. GoToDefinition should
+ # go to where a method is implemented, not where it is declared.
+ #
+ tests = [
+ # Local::x -> declaration of x
+ { 'request': [23, 21], 'response': [ 4, 9] },
+ # Local::in_line -> declaration of Local::in_line
+ { 'request': [24, 26], 'response': [ 6, 10] },
+ # Local -> declaration of Local
+ { 'request': [24, 16], 'response': [ 2, 11] },
+ # sic: Local::out_of_line -> definition of Local::out_of_line
+ { 'request': [25, 27], 'response': [14, 13] }, # sic
+ # sic: GoToDeclaration on definition of out_of_line moves to itself
+ { 'request': [14, 13], 'response': [14, 13] }, # sic
+ # main -> definition of main (not declaration)
+ { 'request': [21, 7], 'response': [21, 5] }, # sic
+ ]
+
+ for test in tests:
+ yield ( RunGoToTest_all,
+ 'GoTo_all_Clang_test.cc',
+ [ 'GoTo' ],
+ test )
+
+ # GoToImprecise - identical to GoToDeclaration
+ #
+ # The semantics of this seem the wrong way round to me. GoToDefinition should
+ # go to where a method is implemented, not where it is declared.
+ #
+ tests = [
+ # Local::x -> declaration of x
+ { 'request': [23, 21], 'response': [ 4, 9] },
+ # Local::in_line -> declaration of Local::in_line
+ { 'request': [24, 26], 'response': [ 6, 10] },
+ # Local -> declaration of Local
+ { 'request': [24, 16], 'response': [ 2, 11] },
+ # sic: Local::out_of_line -> definition of Local::out_of_line
+ { 'request': [25, 27], 'response': [14, 13] }, # sic
+ # sic: GoToDeclaration on definition of out_of_line moves to itself
+ { 'request': [14, 13], 'response': [14, 13] }, # sic
+ # main -> definition of main (not declaration)
+ { 'request': [21, 7], 'response': [21, 5] }, # sic
+ ]
+
+ for test in tests:
+ yield ( RunGoToTest_all,
+ 'GoTo_all_Clang_test.cc',
+ [ 'GoToImprecise' ],
+ test )
+
+
+@SharedYcmd
+def RunGoToIncludeTest( app, command, test ):
+ app.post_json(
+ '/load_extra_conf_file',
+ { 'filepath': PathToTestFile( 'test-include',
+ '.ycm_extra_conf.py' ) } )
+
+ filepath = PathToTestFile( 'test-include', 'main.cpp' )
+ goto_data = BuildRequest( filepath = filepath,
+ filetype = 'cpp',
+ contents = ReadFile( filepath ),
+ command_arguments = [ command ],
+ line_num = test[ 'request' ][ 0 ],
+ column_num = test[ 'request' ][ 1 ] )
+
+ response = {
+ 'filepath' : PathToTestFile( 'test-include', test[ 'response' ] ),
+ 'line_num' : 1,
+ 'column_num' : 1,
+ }
+
+ eq_( response,
+ app.post_json( '/run_completer_command', goto_data ).json )
+
+
+def Subcommands_GoToInclude_test():
+ tests = [
+ { 'request': [ 1, 1 ], 'response': 'a.hpp' },
+ { 'request': [ 2, 1 ], 'response': os.path.join( 'system', 'a.hpp' ) },
+ { 'request': [ 3, 1 ], 'response': os.path.join( 'quote', 'b.hpp' ) },
+ { 'request': [ 5, 1 ], 'response': os.path.join( 'system', 'c.hpp' ) },
+ { 'request': [ 6, 1 ], 'response': os.path.join( 'system', 'c.hpp' ) },
+ ]
+ for test in tests:
+ yield RunGoToIncludeTest, 'GoToInclude', test
+ yield RunGoToIncludeTest, 'GoTo', test
+ yield RunGoToIncludeTest, 'GoToImprecise', test
+
+
+def Subcommands_GoToInclude_Fail_test():
+ test = { 'request': [ 4, 1 ], 'response': '' }
+ assert_that(
+ calling( RunGoToIncludeTest ).with_args( 'GoToInclude', test ),
+ raises( AppError, 'Include file not found.' ) )
+ assert_that(
+ calling( RunGoToIncludeTest ).with_args( 'GoTo', test ),
+ raises( AppError, 'Include file not found.' ) )
+ assert_that(
+ calling( RunGoToIncludeTest ).with_args( 'GoToImprecise', test ),
+ raises( AppError, 'Include file not found.' ) )
+
+ test = { 'request': [ 7, 1 ], 'response': '' }
+ assert_that(
+ calling( RunGoToIncludeTest ).with_args( 'GoToInclude', test ),
+ raises( AppError, 'Not an include/import line.' ) )
+ assert_that(
+ calling( RunGoToIncludeTest ).with_args( 'GoTo', test ),
+ raises( AppError, r'Can\\\'t jump to definition or declaration.' ) )
+ assert_that(
+ calling( RunGoToIncludeTest ).with_args( 'GoToImprecise', test ),
+ raises( AppError, r'Can\\\'t jump to definition or declaration.' ) )
+
+
+@SharedYcmd
+def RunGetSemanticTest( app, filename, test, command):
+ contents = ReadFile( PathToTestFile( filename ) )
+
+ # We use the -fno-delayed-template-parsing flag to not delay
+ # parsing of templates on Windows. This is the default on
+ # other platforms. See the _ExtraClangFlags function in
+ # ycmd/completers/cpp/flags.py file for more information.
+ common_args = {
+ 'completer_target' : 'filetype_default',
+ 'command_arguments': command,
+ 'compilation_flags': [ '-x',
+ 'c++',
+ # C++11 flag is needed for lambda functions
+ '-std=c++11',
+ '-fno-delayed-template-parsing' ],
+ 'line_num' : 10,
+ 'column_num' : 3,
+ 'contents' : contents,
+ 'filetype' : 'cpp'
+ }
+
+ args = test[ 0 ]
+ expected = test[ 1 ]
+
+ request = common_args
+ request.update( args )
+
+ request_data = BuildRequest( **request )
+
+ eq_( { 'message': expected },
+ app.post_json( '/run_completer_command', request_data ).json )
+
+
+def Subcommands_GetType_test():
+ tests = [
+ # Basic pod types
+ [{'line_num': 20, 'column_num': 3}, 'Foo'],
+ [{'line_num': 1, 'column_num': 1}, 'Internal error: cursor not valid'],
+ [{'line_num': 12, 'column_num': 2}, 'Foo'],
+ [{'line_num': 12, 'column_num': 8}, 'Foo'],
+ [{'line_num': 12, 'column_num': 9}, 'Foo'],
+ [{'line_num': 12, 'column_num': 10}, 'Foo'],
+ [{'line_num': 13, 'column_num': 3}, 'int'],
+ [{'line_num': 13, 'column_num': 7}, 'int'],
+ [{'line_num': 15, 'column_num': 7}, 'char'],
+
+ # Function
+ [{'line_num': 18, 'column_num': 2}, 'int ()'],
+ [{'line_num': 18, 'column_num': 6}, 'int ()'],
+
+ # Declared and canonical type
+ # On Ns:: (Unknown)
+ [{'line_num': 21, 'column_num': 3}, 'Unknown type'], # sic
+ # On Type (Type)
+ [{'line_num': 21, 'column_num': 8}, 'Type => Ns::BasicType<char>'], # sic
+ # On "a" (Ns::Type)
+ [{'line_num': 21, 'column_num': 15}, 'Ns::Type => Ns::BasicType<char>'],
+ [{'line_num': 22, 'column_num': 13}, 'Ns::Type => Ns::BasicType<char>'],
+
+ # Cursor on decl for refs & pointers
+ [{'line_num': 35, 'column_num': 3}, 'Foo'],
+ [{'line_num': 35, 'column_num': 11}, 'Foo &'],
+ [{'line_num': 35, 'column_num': 15}, 'Foo'],
+ [{'line_num': 36, 'column_num': 3}, 'Foo'],
+ [{'line_num': 36, 'column_num': 11}, 'Foo *'],
+ [{'line_num': 36, 'column_num': 18}, 'Foo'],
+ [{'line_num': 38, 'column_num': 3}, 'const Foo &'],
+ [{'line_num': 38, 'column_num': 16}, 'const Foo &'],
+ [{'line_num': 39, 'column_num': 3}, 'const Foo *'],
+ [{'line_num': 39, 'column_num': 16}, 'const Foo *'],
+
+ # Cursor on usage
+ [{'line_num': 41, 'column_num': 13}, 'const Foo'],
+ [{'line_num': 41, 'column_num': 19}, 'const int'],
+ [{'line_num': 42, 'column_num': 13}, 'const Foo *'],
+ [{'line_num': 42, 'column_num': 20}, 'const int'],
+ [{'line_num': 43, 'column_num': 12}, 'Foo'],
+ [{'line_num': 43, 'column_num': 17}, 'int'],
+ [{'line_num': 44, 'column_num': 12}, 'Foo *'],
+ [{'line_num': 44, 'column_num': 18}, 'int'],
+
+ # Auto behaves strangely (bug in libclang)
+ [{'line_num': 24, 'column_num': 3}, 'auto &'], # sic
+ [{'line_num': 24, 'column_num': 11}, 'auto &'], # sic
+ [{'line_num': 24, 'column_num': 18}, 'Foo'],
+ [{'line_num': 25, 'column_num': 3}, 'auto *'], # sic
+ [{'line_num': 25, 'column_num': 11}, 'auto *'], # sic
+ [{'line_num': 25, 'column_num': 18}, 'Foo'],
+ [{'line_num': 27, 'column_num': 3}, 'const auto &'], # sic
+ [{'line_num': 27, 'column_num': 16}, 'const auto &'], # sic
+ [{'line_num': 28, 'column_num': 3}, 'const auto *'], # sic
+ [{'line_num': 28, 'column_num': 16}, 'const auto *'], # sic
+
+ # Auto sort of works in usage (but canonical types apparently differ)
+ [{'line_num': 30, 'column_num': 14}, 'const Foo => const Foo'], # sic
+ [{'line_num': 30, 'column_num': 21}, 'const int'],
+ [{'line_num': 31, 'column_num': 14}, 'const Foo * => const Foo *'], # sic
+ [{'line_num': 31, 'column_num': 22}, 'const int'],
+ [{'line_num': 32, 'column_num': 13}, 'Foo => Foo'], # sic
+ [{'line_num': 32, 'column_num': 19}, 'int'],
+ [{'line_num': 33, 'column_num': 13}, 'Foo * => Foo *'], # sic
+ [{'line_num': 33, 'column_num': 20}, 'int'],
+ ]
+
+ for test in tests:
+ yield ( RunGetSemanticTest,
+ 'GetType_Clang_test.cc',
+ test,
+ [ 'GetType' ] )
+
+
+def Subcommands_GetParent_test():
+ tests = [
+ [{'line_num': 1, 'column_num': 1}, 'Internal error: cursor not valid'],
+ # Would be file name if we had one:
+ [{'line_num': 2, 'column_num': 8}, '/foo'],
+
+ # The reported scope does not include parents
+ [{'line_num': 3, 'column_num': 11}, 'A'],
+ [{'line_num': 4, 'column_num': 13}, 'B'],
+ [{'line_num': 5, 'column_num': 13}, 'B'],
+ [{'line_num': 9, 'column_num': 17}, 'do_z_inline()'],
+ [{'line_num': 15, 'column_num': 22}, 'do_anything(T &)'],
+ [{'line_num': 19, 'column_num': 9}, 'A'],
+ [{'line_num': 20, 'column_num': 9}, 'A'],
+ [{'line_num': 22, 'column_num': 12}, 'A'],
+ [{'line_num': 23, 'column_num': 5}, 'do_Z_inline()'],
+ [{'line_num': 24, 'column_num': 12}, 'do_Z_inline()'],
+ [{'line_num': 28, 'column_num': 14}, 'A'],
+
+ [{'line_num': 34, 'column_num': 1}, 'do_anything(T &)'],
+ [{'line_num': 39, 'column_num': 1}, 'do_x()'],
+ [{'line_num': 44, 'column_num': 1}, 'do_y()'],
+ [{'line_num': 49, 'column_num': 1}, 'main()'],
+
+ # Lambdas report the name of the variable
+ [{'line_num': 49, 'column_num': 14}, 'l'],
+ [{'line_num': 50, 'column_num': 19}, 'l'],
+ [{'line_num': 51, 'column_num': 16}, 'main()'],
+ ]
+
+ for test in tests:
+ yield ( RunGetSemanticTest,
+ 'GetParent_Clang_test.cc',
+ test,
+ [ 'GetParent' ] )
+
+
+@SharedYcmd
+def RunFixItTest( app, line, column, lang, file_name, check ):
+ contents = ReadFile( PathToTestFile( file_name ) )
+
+ language_options = {
+ 'cpp11': {
'compilation_flags': [ '-x',
'c++',
- # C++11 flag is needed for lambda functions
'-std=c++11',
- '-fno-delayed-template-parsing' ],
- 'line_num' : 10,
- 'column_num' : 3,
- 'contents' : contents,
- 'filetype' : 'cpp'
- }
-
- args = test[ 0 ]
- expected = test[ 1 ]
-
- request = common_args
- request.update( args )
-
- request_data = self._BuildRequest( **request )
-
- eq_( { 'message': expected },
- self._app.post_json( '/run_completer_command', request_data ).json )
-
-
- def GetType_test( self ):
- tests = [
- # Basic pod types
- [{'line_num': 20, 'column_num': 3}, 'Foo'],
- [{'line_num': 1, 'column_num': 1}, 'Internal error: cursor not valid'],
- [{'line_num': 12, 'column_num': 2}, 'Foo'],
- [{'line_num': 12, 'column_num': 8}, 'Foo'],
- [{'line_num': 12, 'column_num': 9}, 'Foo'],
- [{'line_num': 12, 'column_num': 10}, 'Foo'],
- [{'line_num': 13, 'column_num': 3}, 'int'],
- [{'line_num': 13, 'column_num': 7}, 'int'],
- [{'line_num': 15, 'column_num': 7}, 'char'],
-
- # Function
- [{'line_num': 18, 'column_num': 2}, 'int ()'],
- [{'line_num': 18, 'column_num': 6}, 'int ()'],
-
- # Declared and canonical type
- # On Ns:: (Unknown)
- [{'line_num': 21, 'column_num': 3}, 'Unknown type'], # sic
- # On Type (Type)
- [{'line_num': 21, 'column_num': 8}, 'Type => Ns::BasicType<char>'], # sic
- # On "a" (Ns::Type)
- [{'line_num': 21, 'column_num': 15}, 'Ns::Type => Ns::BasicType<char>'],
- [{'line_num': 22, 'column_num': 13}, 'Ns::Type => Ns::BasicType<char>'],
-
- # Cursor on decl for refs & pointers
- [{'line_num': 35, 'column_num': 3}, 'Foo'],
- [{'line_num': 35, 'column_num': 11}, 'Foo &'],
- [{'line_num': 35, 'column_num': 15}, 'Foo'],
- [{'line_num': 36, 'column_num': 3}, 'Foo'],
- [{'line_num': 36, 'column_num': 11}, 'Foo *'],
- [{'line_num': 36, 'column_num': 18}, 'Foo'],
- [{'line_num': 38, 'column_num': 3}, 'const Foo &'],
- [{'line_num': 38, 'column_num': 16}, 'const Foo &'],
- [{'line_num': 39, 'column_num': 3}, 'const Foo *'],
- [{'line_num': 39, 'column_num': 16}, 'const Foo *'],
-
- # Cursor on usage
- [{'line_num': 41, 'column_num': 13}, 'const Foo'],
- [{'line_num': 41, 'column_num': 19}, 'const int'],
- [{'line_num': 42, 'column_num': 13}, 'const Foo *'],
- [{'line_num': 42, 'column_num': 20}, 'const int'],
- [{'line_num': 43, 'column_num': 12}, 'Foo'],
- [{'line_num': 43, 'column_num': 17}, 'int'],
- [{'line_num': 44, 'column_num': 12}, 'Foo *'],
- [{'line_num': 44, 'column_num': 18}, 'int'],
-
- # Auto behaves strangely (bug in libclang)
- [{'line_num': 24, 'column_num': 3}, 'auto &'], # sic
- [{'line_num': 24, 'column_num': 11}, 'auto &'], # sic
- [{'line_num': 24, 'column_num': 18}, 'Foo'],
- [{'line_num': 25, 'column_num': 3}, 'auto *'], # sic
- [{'line_num': 25, 'column_num': 11}, 'auto *'], # sic
- [{'line_num': 25, 'column_num': 18}, 'Foo'],
- [{'line_num': 27, 'column_num': 3}, 'const auto &'], # sic
- [{'line_num': 27, 'column_num': 16}, 'const auto &'], # sic
- [{'line_num': 28, 'column_num': 3}, 'const auto *'], # sic
- [{'line_num': 28, 'column_num': 16}, 'const auto *'], # sic
-
- # Auto sort of works in usage (but canonical types apparently differ)
- [{'line_num': 30, 'column_num': 14}, 'const Foo => const Foo'], # sic
- [{'line_num': 30, 'column_num': 21}, 'const int'],
- [{'line_num': 31, 'column_num': 14}, 'const Foo * => const Foo *'], # sic
- [{'line_num': 31, 'column_num': 22}, 'const int'],
- [{'line_num': 32, 'column_num': 13}, 'Foo => Foo'], # sic
- [{'line_num': 32, 'column_num': 19}, 'int'],
- [{'line_num': 33, 'column_num': 13}, 'Foo * => Foo *'], # sic
- [{'line_num': 33, 'column_num': 20}, 'int'],
- ]
-
- for test in tests:
- yield ( self._Message,
- 'GetType_Clang_test.cc',
- test,
- [ 'GetType' ] )
-
-
- def GetParent_test( self ):
- tests = [
- [{'line_num': 1, 'column_num': 1}, 'Internal error: cursor not valid'],
- # Would be file name if we had one:
- [{'line_num': 2, 'column_num': 8}, '/foo'],
-
- # The reported scope does not include parents
- [{'line_num': 3, 'column_num': 11}, 'A'],
- [{'line_num': 4, 'column_num': 13}, 'B'],
- [{'line_num': 5, 'column_num': 13}, 'B'],
- [{'line_num': 9, 'column_num': 17}, 'do_z_inline()'],
- [{'line_num': 15, 'column_num': 22}, 'do_anything(T &)'],
- [{'line_num': 19, 'column_num': 9}, 'A'],
- [{'line_num': 20, 'column_num': 9}, 'A'],
- [{'line_num': 22, 'column_num': 12}, 'A'],
- [{'line_num': 23, 'column_num': 5}, 'do_Z_inline()'],
- [{'line_num': 24, 'column_num': 12}, 'do_Z_inline()'],
- [{'line_num': 28, 'column_num': 14}, 'A'],
-
- [{'line_num': 34, 'column_num': 1}, 'do_anything(T &)'],
- [{'line_num': 39, 'column_num': 1}, 'do_x()'],
- [{'line_num': 44, 'column_num': 1}, 'do_y()'],
- [{'line_num': 49, 'column_num': 1}, 'main()'],
-
- # Lambdas report the name of the variable
- [{'line_num': 49, 'column_num': 14}, 'l'],
- [{'line_num': 50, 'column_num': 19}, 'l'],
- [{'line_num': 51, 'column_num': 16}, 'main()'],
- ]
-
- for test in tests:
- yield ( self._Message,
- 'GetParent_Clang_test.cc',
- test,
- [ 'GetParent' ] )
-
-
- def _RunFixIt( self, line, column, lang, file_name, check ):
- contents = ReadFile( self._PathToTestFile( file_name ) )
-
- language_options = {
- 'cpp11': {
- 'compilation_flags': [ '-x',
- 'c++',
- '-std=c++11',
- '-Wall',
- '-Wextra',
- '-pedantic' ],
- 'filetype' : 'cpp',
- },
- 'objective-c': {
- 'compilation_flags': [ '-x',
- 'objective-c',
- '-Wall',
- '-Wextra' ],
- 'filetype' : 'objc',
- },
- }
-
- # Build the command arguments from the standard ones and the
- # language-specific arguments.
- args = {
- 'completer_target' : 'filetype_default',
- 'contents' : contents,
- 'command_arguments': [ 'FixIt' ],
- 'line_num' : line,
- 'column_num' : column,
- }
- args.update( language_options[ lang ] )
-
- # get the diagnostics for the file
- event_data = self._BuildRequest( **args )
-
- results = self._app.post_json( '/run_completer_command', event_data ).json
-
- pprint( results )
- check( results )
-
-
- def _FixIt_Check_cpp11_Ins( self, results ):
- # First fixit
- # switch(A()) { // expected-error{{explicit conversion to}}
- assert_that( results, has_entries( {
- 'fixits': contains( has_entries( {
- 'chunks': contains(
- has_entries( {
- 'replacement_text': equal_to('static_cast<int>('),
- 'range': has_entries( {
- 'start': has_entries( { 'line_num': 16, 'column_num': 10 } ),
- 'end' : has_entries( { 'line_num': 16, 'column_num': 10 } ),
- } ),
+ '-Wall',
+ '-Wextra',
+ '-pedantic' ],
+ 'filetype' : 'cpp',
+ },
+ 'objective-c': {
+ 'compilation_flags': [ '-x',
+ 'objective-c',
+ '-Wall',
+ '-Wextra' ],
+ 'filetype' : 'objc',
+ },
+ }
+
+ # Build the command arguments from the standard ones and the language-specific
+ # arguments.
+ args = {
+ 'completer_target' : 'filetype_default',
+ 'contents' : contents,
+ 'command_arguments': [ 'FixIt' ],
+ 'line_num' : line,
+ 'column_num' : column,
+ }
+ args.update( language_options[ lang ] )
+
+ # Get the diagnostics for the file.
+ event_data = BuildRequest( **args )
+
+ results = app.post_json( '/run_completer_command', event_data ).json
+
+ pprint( results )
+ check( results )
+
+
+def FixIt_Check_cpp11_Ins( results ):
+ # First fixit
+ # switch(A()) { // expected-error{{explicit conversion to}}
+ assert_that( results, has_entries( {
+ 'fixits': contains( has_entries( {
+ 'chunks': contains(
+ has_entries( {
+ 'replacement_text': equal_to('static_cast<int>('),
+ 'range': has_entries( {
+ 'start': has_entries( { 'line_num': 16, 'column_num': 10 } ),
+ 'end' : has_entries( { 'line_num': 16, 'column_num': 10 } ),
} ),
- has_entries( {
- 'replacement_text': equal_to(')'),
- 'range': has_entries( {
- 'start': has_entries( { 'line_num': 16, 'column_num': 13 } ),
- 'end' : has_entries( { 'line_num': 16, 'column_num': 13 } ),
- } ),
- } )
- ),
- 'location': has_entries( { 'line_num': 16, 'column_num': 3 } )
- } ) )
+ } ),
+ has_entries( {
+ 'replacement_text': equal_to(')'),
+ 'range': has_entries( {
+ 'start': has_entries( { 'line_num': 16, 'column_num': 13 } ),
+ 'end' : has_entries( { 'line_num': 16, 'column_num': 13 } ),
+ } ),
+ } )
+ ),
+ 'location': has_entries( { 'line_num': 16, 'column_num': 3 } )
} ) )
+ } ) )
- def _FixIt_Check_cpp11_InsMultiLine( self, results ):
- # Similar to _FixIt_Check_cpp11_1 but inserts split across lines
- #
- assert_that( results, has_entries( {
- 'fixits': contains( has_entries( {
- 'chunks': contains(
- has_entries( {
- 'replacement_text': equal_to('static_cast<int>('),
- 'range': has_entries( {
- 'start': has_entries( { 'line_num': 26, 'column_num': 7 } ),
- 'end' : has_entries( { 'line_num': 26, 'column_num': 7 } ),
- } ),
+def FixIt_Check_cpp11_InsMultiLine( results ):
+ # Similar to FixIt_Check_cpp11_1 but inserts split across lines
+ #
+ assert_that( results, has_entries( {
+ 'fixits': contains( has_entries( {
+ 'chunks': contains(
+ has_entries( {
+ 'replacement_text': equal_to('static_cast<int>('),
+ 'range': has_entries( {
+ 'start': has_entries( { 'line_num': 26, 'column_num': 7 } ),
+ 'end' : has_entries( { 'line_num': 26, 'column_num': 7 } ),
} ),
- has_entries( {
- 'replacement_text': equal_to(')'),
- 'range': has_entries( {
- 'start': has_entries( { 'line_num': 28, 'column_num': 2 } ),
- 'end' : has_entries( { 'line_num': 28, 'column_num': 2 } ),
- } ),
- } )
- ),
- 'location': has_entries( { 'line_num': 25, 'column_num': 3 } )
- } ) )
+ } ),
+ has_entries( {
+ 'replacement_text': equal_to(')'),
+ 'range': has_entries( {
+ 'start': has_entries( { 'line_num': 28, 'column_num': 2 } ),
+ 'end' : has_entries( { 'line_num': 28, 'column_num': 2 } ),
+ } ),
+ } )
+ ),
+ 'location': has_entries( { 'line_num': 25, 'column_num': 3 } )
+ } ) )
+ } ) )
+
+
+def FixIt_Check_cpp11_Del( results ):
+ # Removal of ::
+ assert_that( results, has_entries( {
+ 'fixits': contains( has_entries( {
+ 'chunks': contains(
+ has_entries( {
+ 'replacement_text': equal_to(''),
+ 'range': has_entries( {
+ 'start': has_entries( { 'line_num': 35, 'column_num': 7 } ),
+ 'end' : has_entries( { 'line_num': 35, 'column_num': 9 } ),
+ } ),
+ } )
+ ),
+ 'location': has_entries( { 'line_num': 35, 'column_num': 7 } )
+ } ) )
+ } ) )
+
+
+def FixIt_Check_cpp11_Repl( results ):
+ assert_that( results, has_entries( {
+ 'fixits': contains( has_entries( {
+ 'chunks': contains(
+ has_entries( {
+ 'replacement_text': equal_to('foo'),
+ 'range': has_entries( {
+ 'start': has_entries( { 'line_num': 40, 'column_num': 6 } ),
+ 'end' : has_entries( { 'line_num': 40, 'column_num': 9 } ),
+ } ),
+ } )
+ ),
+ 'location': has_entries( { 'line_num': 40, 'column_num': 6 } )
+ } ) )
+ } ) )
+
+
+def FixIt_Check_cpp11_DelAdd( results ):
+ assert_that( results, has_entries( {
+ 'fixits': contains( has_entries( {
+ 'chunks': contains(
+ has_entries( {
+ 'replacement_text': equal_to(''),
+ 'range': has_entries( {
+ 'start': has_entries( { 'line_num': 48, 'column_num': 3 } ),
+ 'end' : has_entries( { 'line_num': 48, 'column_num': 4 } ),
+ } ),
+ } ),
+ has_entries( {
+ 'replacement_text': equal_to('~'),
+ 'range': has_entries( {
+ 'start': has_entries( { 'line_num': 48, 'column_num': 9 } ),
+ 'end' : has_entries( { 'line_num': 48, 'column_num': 9 } ),
+ } ),
+ } ),
+ ),
+ 'location': has_entries( { 'line_num': 48, 'column_num': 3 } )
+ } ) )
+ } ) )
+
+
+def FixIt_Check_objc( results ):
+ assert_that( results, has_entries( {
+ 'fixits': contains( has_entries( {
+ 'chunks': contains(
+ has_entries( {
+ 'replacement_text': equal_to('id'),
+ 'range': has_entries( {
+ 'start': has_entries( { 'line_num': 5, 'column_num': 3 } ),
+ 'end' : has_entries( { 'line_num': 5, 'column_num': 3 } ),
+ } ),
+ } )
+ ),
+ 'location': has_entries( { 'line_num': 5, 'column_num': 3 } )
} ) )
+ } ) )
+
+
+def FixIt_Check_objc_NoFixIt( results ):
+ # and finally, a warning with no fixits
+ assert_that( results, equal_to( { 'fixits': [] } ) )
- def _FixIt_Check_cpp11_Del( self, results ):
- # Removal of ::
- assert_that( results, has_entries( {
- 'fixits': contains( has_entries( {
+def FixIt_Check_cpp11_MultiFirst( results ):
+ assert_that( results, has_entries( {
+ 'fixits': contains(
+ # first fix-it at 54,16
+ has_entries( {
'chunks': contains(
has_entries( {
- 'replacement_text': equal_to(''),
+ 'replacement_text': equal_to('foo'),
'range': has_entries( {
- 'start': has_entries( { 'line_num': 35, 'column_num': 7 } ),
- 'end' : has_entries( { 'line_num': 35, 'column_num': 9 } ),
+ 'start': has_entries( { 'line_num': 54, 'column_num': 16 } ),
+ 'end' : has_entries( { 'line_num': 54, 'column_num': 19 } ),
} ),
} )
),
- 'location': has_entries( { 'line_num': 35, 'column_num': 7 } )
- } ) )
- } ) )
-
-
- def _FixIt_Check_cpp11_Repl( self, results ):
- assert_that( results, has_entries( {
- 'fixits': contains( has_entries( {
+ 'location': has_entries( { 'line_num': 54, 'column_num': 16 } )
+ } ),
+ # second fix-it at 54,52
+ has_entries( {
'chunks': contains(
has_entries( {
- 'replacement_text': equal_to('foo'),
+ 'replacement_text': equal_to(''),
'range': has_entries( {
- 'start': has_entries( { 'line_num': 40, 'column_num': 6 } ),
- 'end' : has_entries( { 'line_num': 40, 'column_num': 9 } ),
+ 'start': has_entries( { 'line_num': 54, 'column_num': 52 } ),
+ 'end' : has_entries( { 'line_num': 54, 'column_num': 53 } ),
} ),
- } )
+ } ),
+ has_entries( {
+ 'replacement_text': equal_to('~'),
+ 'range': has_entries( {
+ 'start': has_entries( { 'line_num': 54, 'column_num': 58 } ),
+ 'end' : has_entries( { 'line_num': 54, 'column_num': 58 } ),
+ } ),
+ } ),
),
- 'location': has_entries( { 'line_num': 40, 'column_num': 6 } )
- } ) )
- } ) )
+ 'location': has_entries( { 'line_num': 54, 'column_num': 52 } )
+ } )
+ )
+ } ) )
- def _FixIt_Check_cpp11_DelAdd( self, results ):
- assert_that( results, has_entries( {
- 'fixits': contains( has_entries( {
+def FixIt_Check_cpp11_MultiSecond( results ):
+ assert_that( results, has_entries( {
+ 'fixits': contains(
+ # second fix-it at 54,52
+ has_entries( {
'chunks': contains(
has_entries( {
'replacement_text': equal_to(''),
'range': has_entries( {
- 'start': has_entries( { 'line_num': 48, 'column_num': 3 } ),
- 'end' : has_entries( { 'line_num': 48, 'column_num': 4 } ),
+ 'start': has_entries( { 'line_num': 54, 'column_num': 52 } ),
+ 'end' : has_entries( { 'line_num': 54, 'column_num': 53 } ),
} ),
} ),
has_entries( {
'replacement_text': equal_to('~'),
'range': has_entries( {
- 'start': has_entries( { 'line_num': 48, 'column_num': 9 } ),
- 'end' : has_entries( { 'line_num': 48, 'column_num': 9 } ),
+ 'start': has_entries( { 'line_num': 54, 'column_num': 58 } ),
+ 'end' : has_entries( { 'line_num': 54, 'column_num': 58 } ),
} ),
} ),
),
- 'location': has_entries( { 'line_num': 48, 'column_num': 3 } )
- } ) )
- } ) )
-
-
- def _FixIt_Check_objc( self, results ):
- assert_that( results, has_entries( {
- 'fixits': contains( has_entries( {
+ 'location': has_entries( { 'line_num': 54, 'column_num': 52 } )
+ } ),
+ # first fix-it at 54,16
+ has_entries( {
'chunks': contains(
has_entries( {
- 'replacement_text': equal_to('id'),
+ 'replacement_text': equal_to('foo'),
'range': has_entries( {
- 'start': has_entries( { 'line_num': 5, 'column_num': 3 } ),
- 'end' : has_entries( { 'line_num': 5, 'column_num': 3 } ),
+ 'start': has_entries( { 'line_num': 54, 'column_num': 16 } ),
+ 'end' : has_entries( { 'line_num': 54, 'column_num': 19 } ),
} ),
} )
),
- 'location': has_entries( { 'line_num': 5, 'column_num': 3 } )
- } ) )
- } ) )
-
-
- def _FixIt_Check_objc_NoFixIt( self, results ):
- # and finally, a warning with no fixits
- assert_that( results, equal_to( { 'fixits': [] } ) )
-
-
- def _FixIt_Check_cpp11_MultiFirst( self, results ):
- assert_that( results, has_entries( {
- 'fixits': contains(
- # first fix-it at 54,16
- has_entries( {
- 'chunks': contains(
- has_entries( {
- 'replacement_text': equal_to('foo'),
- 'range': has_entries( {
- 'start': has_entries( { 'line_num': 54, 'column_num': 16 } ),
- 'end' : has_entries( { 'line_num': 54, 'column_num': 19 } ),
- } ),
- } )
- ),
- 'location': has_entries( { 'line_num': 54, 'column_num': 16 } )
- } ),
- # second fix-it at 54,52
- has_entries( {
- 'chunks': contains(
- has_entries( {
- 'replacement_text': equal_to(''),
- 'range': has_entries( {
- 'start': has_entries( { 'line_num': 54, 'column_num': 52 } ),
- 'end' : has_entries( { 'line_num': 54, 'column_num': 53 } ),
- } ),
- } ),
- has_entries( {
- 'replacement_text': equal_to('~'),
- 'range': has_entries( {
- 'start': has_entries( { 'line_num': 54, 'column_num': 58 } ),
- 'end' : has_entries( { 'line_num': 54, 'column_num': 58 } ),
- } ),
- } ),
- ),
- 'location': has_entries( { 'line_num': 54, 'column_num': 52 } )
- } )
- )
- } ) )
-
-
- def _FixIt_Check_cpp11_MultiSecond( self, results ):
- assert_that( results, has_entries( {
- 'fixits': contains(
- # second fix-it at 54,52
- has_entries( {
- 'chunks': contains(
- has_entries( {
- 'replacement_text': equal_to(''),
- 'range': has_entries( {
- 'start': has_entries( { 'line_num': 54, 'column_num': 52 } ),
- 'end' : has_entries( { 'line_num': 54, 'column_num': 53 } ),
- } ),
- } ),
- has_entries( {
- 'replacement_text': equal_to('~'),
- 'range': has_entries( {
- 'start': has_entries( { 'line_num': 54, 'column_num': 58 } ),
- 'end' : has_entries( { 'line_num': 54, 'column_num': 58 } ),
- } ),
- } ),
- ),
- 'location': has_entries( { 'line_num': 54, 'column_num': 52 } )
- } ),
- # first fix-it at 54,16
- has_entries( {
- 'chunks': contains(
- has_entries( {
- 'replacement_text': equal_to('foo'),
- 'range': has_entries( {
- 'start': has_entries( { 'line_num': 54, 'column_num': 16 } ),
- 'end' : has_entries( { 'line_num': 54, 'column_num': 19 } ),
- } ),
- } )
- ),
- 'location': has_entries( { 'line_num': 54, 'column_num': 16 } )
- } )
- )
- } ) )
-
-
- def FixIt_all_test( self ):
- cfile = 'FixIt_Clang_cpp11.cpp'
- mfile = 'FixIt_Clang_objc.m'
-
- tests = [
- [ 16, 0, 'cpp11', cfile, self._FixIt_Check_cpp11_Ins ],
- [ 16, 1, 'cpp11', cfile, self._FixIt_Check_cpp11_Ins ],
- [ 16, 10, 'cpp11', cfile, self._FixIt_Check_cpp11_Ins ],
- [ 25, 14, 'cpp11', cfile, self._FixIt_Check_cpp11_InsMultiLine ],
- [ 25, 0, 'cpp11', cfile, self._FixIt_Check_cpp11_InsMultiLine ],
- [ 35, 7, 'cpp11', cfile, self._FixIt_Check_cpp11_Del ],
- [ 40, 6, 'cpp11', cfile, self._FixIt_Check_cpp11_Repl ],
- [ 48, 3, 'cpp11', cfile, self._FixIt_Check_cpp11_DelAdd ],
-
- [ 5, 3, 'objective-c', mfile, self._FixIt_Check_objc ],
- [ 7, 1, 'objective-c', mfile, self._FixIt_Check_objc_NoFixIt ],
-
- # multiple errors on a single line; both with fixits
- [ 54, 15, 'cpp11', cfile, self._FixIt_Check_cpp11_MultiFirst ],
- [ 54, 16, 'cpp11', cfile, self._FixIt_Check_cpp11_MultiFirst ],
- [ 54, 16, 'cpp11', cfile, self._FixIt_Check_cpp11_MultiFirst ],
- [ 54, 17, 'cpp11', cfile, self._FixIt_Check_cpp11_MultiFirst ],
- [ 54, 18, 'cpp11', cfile, self._FixIt_Check_cpp11_MultiFirst ],
-
- # should put closest fix-it first?
- [ 54, 51, 'cpp11', cfile, self._FixIt_Check_cpp11_MultiSecond ],
- [ 54, 52, 'cpp11', cfile, self._FixIt_Check_cpp11_MultiSecond ],
- [ 54, 53, 'cpp11', cfile, self._FixIt_Check_cpp11_MultiSecond ],
- ]
-
- for test in tests:
- yield self._RunFixIt, test[0], test[1], test[2], test[3], test[4]
-
-
- def GetDoc_Variable_test( self ):
- filepath = self._PathToTestFile( 'GetDoc_Clang.cc' )
- contents = ReadFile( filepath )
-
- event_data = self._BuildRequest( filepath = filepath,
- filetype = 'cpp',
- compilation_flags = [ '-x', 'c++' ],
- line_num = 70,
- column_num = 24,
- contents = contents,
- command_arguments = [ 'GetDoc' ],
- completer_target = 'filetype_default' )
-
- response = self._app.post_json( '/run_completer_command', event_data ).json
-
- pprint( response )
-
- eq_( response, {
- 'detailed_info': """\
+ 'location': has_entries( { 'line_num': 54, 'column_num': 16 } )
+ } )
+ )
+ } ) )
+
+
+def Subcommands_FixIt_all_test():
+ cfile = 'FixIt_Clang_cpp11.cpp'
+ mfile = 'FixIt_Clang_objc.m'
+
+ tests = [
+ [ 16, 0, 'cpp11', cfile, FixIt_Check_cpp11_Ins ],
+ [ 16, 1, 'cpp11', cfile, FixIt_Check_cpp11_Ins ],
+ [ 16, 10, 'cpp11', cfile, FixIt_Check_cpp11_Ins ],
+ [ 25, 14, 'cpp11', cfile, FixIt_Check_cpp11_InsMultiLine ],
+ [ 25, 0, 'cpp11', cfile, FixIt_Check_cpp11_InsMultiLine ],
+ [ 35, 7, 'cpp11', cfile, FixIt_Check_cpp11_Del ],
+ [ 40, 6, 'cpp11', cfile, FixIt_Check_cpp11_Repl ],
+ [ 48, 3, 'cpp11', cfile, FixIt_Check_cpp11_DelAdd ],
+
+ [ 5, 3, 'objective-c', mfile, FixIt_Check_objc ],
+ [ 7, 1, 'objective-c', mfile, FixIt_Check_objc_NoFixIt ],
+
+ # multiple errors on a single line; both with fixits
+ [ 54, 15, 'cpp11', cfile, FixIt_Check_cpp11_MultiFirst ],
+ [ 54, 16, 'cpp11', cfile, FixIt_Check_cpp11_MultiFirst ],
+ [ 54, 16, 'cpp11', cfile, FixIt_Check_cpp11_MultiFirst ],
+ [ 54, 17, 'cpp11', cfile, FixIt_Check_cpp11_MultiFirst ],
+ [ 54, 18, 'cpp11', cfile, FixIt_Check_cpp11_MultiFirst ],
+
+ # should put closest fix-it first?
+ [ 54, 51, 'cpp11', cfile, FixIt_Check_cpp11_MultiSecond ],
+ [ 54, 52, 'cpp11', cfile, FixIt_Check_cpp11_MultiSecond ],
+ [ 54, 53, 'cpp11', cfile, FixIt_Check_cpp11_MultiSecond ],
+ ]
+
+ for test in tests:
+ yield RunFixItTest, test[0], test[1], test[2], test[3], test[4]
+
+
+@SharedYcmd
+def Subcommands_GetDoc_Variable_test( app ):
+ filepath = PathToTestFile( 'GetDoc_Clang.cc' )
+ contents = ReadFile( filepath )
+
+ event_data = BuildRequest( filepath = filepath,
+ filetype = 'cpp',
+ compilation_flags = [ '-x', 'c++' ],
+ line_num = 70,
+ column_num = 24,
+ contents = contents,
+ command_arguments = [ 'GetDoc' ],
+ completer_target = 'filetype_default' )
+
+ response = app.post_json( '/run_completer_command', event_data ).json
+
+ pprint( response )
+
+ eq_( response, {
+ 'detailed_info': """\
char a_global_variable
This really is a global variable.
Type: char
@@ -719,25 +725,26 @@ def GetDoc_Variable_test( self ):
The first line of comment is the brief.""" } )
- def GetDoc_Method_test( self ):
- filepath = self._PathToTestFile( 'GetDoc_Clang.cc' )
- contents = ReadFile( filepath )
+@SharedYcmd
+def Subcommands_GetDoc_Method_test( app ):
+ filepath = PathToTestFile( 'GetDoc_Clang.cc' )
+ contents = ReadFile( filepath )
- event_data = self._BuildRequest( filepath = filepath,
- filetype = 'cpp',
- compilation_flags = [ '-x', 'c++' ],
- line_num = 22,
- column_num = 13,
- contents = contents,
- command_arguments = [ 'GetDoc' ],
- completer_target = 'filetype_default' )
+ event_data = BuildRequest( filepath = filepath,
+ filetype = 'cpp',
+ compilation_flags = [ '-x', 'c++' ],
+ line_num = 22,
+ column_num = 13,
+ contents = contents,
+ command_arguments = [ 'GetDoc' ],
+ completer_target = 'filetype_default' )
- response = self._app.post_json( '/run_completer_command', event_data ).json
+ response = app.post_json( '/run_completer_command', event_data ).json
- pprint( response )
+ pprint( response )
- eq_( response, {
- 'detailed_info': """\
+ eq_( response, {
+ 'detailed_info': """\
char with_brief()
brevity is for suckers
Type: char ()
@@ -752,25 +759,26 @@ def GetDoc_Method_test( self ):
""" } )
- def GetDoc_Namespace_test( self ):
- filepath = self._PathToTestFile( 'GetDoc_Clang.cc' )
- contents = ReadFile( filepath )
+@SharedYcmd
+def Subcommands_GetDoc_Namespace_test( app ):
+ filepath = PathToTestFile( 'GetDoc_Clang.cc' )
+ contents = ReadFile( filepath )
- event_data = self._BuildRequest( filepath = filepath,
- filetype = 'cpp',
- compilation_flags = [ '-x', 'c++' ],
- line_num = 65,
- column_num = 14,
- contents = contents,
- command_arguments = [ 'GetDoc' ],
- completer_target = 'filetype_default' )
+ event_data = BuildRequest( filepath = filepath,
+ filetype = 'cpp',
+ compilation_flags = [ '-x', 'c++' ],
+ line_num = 65,
+ column_num = 14,
+ contents = contents,
+ command_arguments = [ 'GetDoc' ],
+ completer_target = 'filetype_default' )
- response = self._app.post_json( '/run_completer_command', event_data ).json
+ response = app.post_json( '/run_completer_command', event_data ).json
- pprint( response )
+ pprint( response )
- eq_( response, {
- 'detailed_info': """\
+ eq_( response, {
+ 'detailed_info': """\
namespace Test {}
This is a test namespace
Type:
@@ -779,80 +787,82 @@ def GetDoc_Namespace_test( self ):
This is a test namespace""" } ) # noqa
- def GetDoc_Undocumented_test( self ):
- filepath = self._PathToTestFile( 'GetDoc_Clang.cc' )
- contents = ReadFile( filepath )
+@SharedYcmd
+def Subcommands_GetDoc_Undocumented_test( app ):
+ filepath = PathToTestFile( 'GetDoc_Clang.cc' )
+ contents = ReadFile( filepath )
- event_data = self._BuildRequest( filepath = filepath,
- filetype = 'cpp',
- compilation_flags = [ '-x', 'c++' ],
- line_num = 81,
- column_num = 17,
- contents = contents,
- command_arguments = [ 'GetDoc' ],
- completer_target = 'filetype_default' )
+ event_data = BuildRequest( filepath = filepath,
+ filetype = 'cpp',
+ compilation_flags = [ '-x', 'c++' ],
+ line_num = 81,
+ column_num = 17,
+ contents = contents,
+ command_arguments = [ 'GetDoc' ],
+ completer_target = 'filetype_default' )
- response = self._app.post_json( '/run_completer_command',
- event_data,
- expect_errors = True )
+ response = app.post_json( '/run_completer_command',
+ event_data,
+ expect_errors = True )
- eq_( response.status_code, http.client.INTERNAL_SERVER_ERROR )
+ eq_( response.status_code, http.client.INTERNAL_SERVER_ERROR )
- assert_that( response.json,
- self._ErrorMatcher( ValueError, NO_DOCUMENTATION_MESSAGE ) )
+ assert_that( response.json,
+ ErrorMatcher( ValueError, NO_DOCUMENTATION_MESSAGE ) )
- def GetDoc_NoCursor_test( self ):
- filepath = self._PathToTestFile( 'GetDoc_Clang.cc' )
- contents = ReadFile( filepath )
+@SharedYcmd
+def Subcommands_GetDoc_NoCursor_test( app ):
+ filepath = PathToTestFile( 'GetDoc_Clang.cc' )
+ contents = ReadFile( filepath )
- event_data = self._BuildRequest( filepath = filepath,
- filetype = 'cpp',
- compilation_flags = [ '-x', 'c++' ],
- line_num = 1,
- column_num = 1,
- contents = contents,
- command_arguments = [ 'GetDoc' ],
- completer_target = 'filetype_default' )
+ event_data = BuildRequest( filepath = filepath,
+ filetype = 'cpp',
+ compilation_flags = [ '-x', 'c++' ],
+ line_num = 1,
+ column_num = 1,
+ contents = contents,
+ command_arguments = [ 'GetDoc' ],
+ completer_target = 'filetype_default' )
- response = self._app.post_json( '/run_completer_command',
- event_data,
- expect_errors = True )
+ response = app.post_json( '/run_completer_command',
+ event_data,
+ expect_errors = True )
- eq_( response.status_code, http.client.INTERNAL_SERVER_ERROR )
+ eq_( response.status_code, http.client.INTERNAL_SERVER_ERROR )
- assert_that( response.json,
- self._ErrorMatcher( ValueError, NO_DOCUMENTATION_MESSAGE ) )
+ assert_that( response.json,
+ ErrorMatcher( ValueError, NO_DOCUMENTATION_MESSAGE ) )
- # Following tests repeat the tests above, but without re-parsing the file
- def GetDocQuick_Variable_test( self ):
- filepath = self._PathToTestFile( 'GetDoc_Clang.cc' )
- contents = ReadFile( filepath )
+# Following tests repeat the tests above, but without re-parsing the file
+@SharedYcmd
+def Subcommands_GetDocQuick_Variable_test( app ):
+ filepath = PathToTestFile( 'GetDoc_Clang.cc' )
+ contents = ReadFile( filepath )
- self._app.post_json( '/event_notification',
- self._BuildRequest( filepath = filepath,
- filetype = 'cpp',
- compilation_flags = [ '-x',
- 'c++' ],
- contents = contents,
- event_name = 'FileReadyToParse' ) )
+ app.post_json( '/event_notification',
+ BuildRequest( filepath = filepath,
+ filetype = 'cpp',
+ compilation_flags = [ '-x', 'c++' ],
+ contents = contents,
+ event_name = 'FileReadyToParse' ) )
- event_data = self._BuildRequest( filepath = filepath,
- filetype = 'cpp',
- compilation_flags = [ '-x', 'c++' ],
- line_num = 70,
- column_num = 24,
- contents = contents,
- command_arguments = [ 'GetDocQuick' ],
- completer_target = 'filetype_default' )
+ event_data = BuildRequest( filepath = filepath,
+ filetype = 'cpp',
+ compilation_flags = [ '-x', 'c++' ],
+ line_num = 70,
+ column_num = 24,
+ contents = contents,
+ command_arguments = [ 'GetDocQuick' ],
+ completer_target = 'filetype_default' )
- response = self._app.post_json( '/run_completer_command', event_data ).json
+ response = app.post_json( '/run_completer_command', event_data ).json
- pprint( response )
+ pprint( response )
- eq_( response, {
- 'detailed_info': """\
+ eq_( response, {
+ 'detailed_info': """\
char a_global_variable
This really is a global variable.
Type: char
@@ -863,34 +873,35 @@ def GetDocQuick_Variable_test( self ):
The first line of comment is the brief.""" } )
- def GetDocQuick_Method_test( self ):
- filepath = self._PathToTestFile( 'GetDoc_Clang.cc' )
- contents = ReadFile( filepath )
+@SharedYcmd
+def Subcommands_GetDocQuick_Method_test( app ):
+ filepath = PathToTestFile( 'GetDoc_Clang.cc' )
+ contents = ReadFile( filepath )
- self._app.post_json(
- '/event_notification',
- self._BuildRequest( filepath = filepath,
- filetype = 'cpp',
- compilation_flags = [ '-x', 'c++' ],
- contents = contents,
- event_name = 'FileReadyToParse' )
- )
+ app.post_json(
+ '/event_notification',
+ BuildRequest( filepath = filepath,
+ filetype = 'cpp',
+ compilation_flags = [ '-x', 'c++' ],
+ contents = contents,
+ event_name = 'FileReadyToParse' )
+ )
- event_data = self._BuildRequest( filepath = filepath,
- filetype = 'cpp',
- compilation_flags = [ '-x', 'c++' ],
- line_num = 22,
- column_num = 13,
- contents = contents,
- command_arguments = [ 'GetDocQuick' ],
- completer_target = 'filetype_default' )
+ event_data = BuildRequest( filepath = filepath,
+ filetype = 'cpp',
+ compilation_flags = [ '-x', 'c++' ],
+ line_num = 22,
+ column_num = 13,
+ contents = contents,
+ command_arguments = [ 'GetDocQuick' ],
+ completer_target = 'filetype_default' )
- response = self._app.post_json( '/run_completer_command', event_data ).json
+ response = app.post_json( '/run_completer_command', event_data ).json
- pprint( response )
+ pprint( response )
- eq_( response, {
- 'detailed_info': """\
+ eq_( response, {
+ 'detailed_info': """\
char with_brief()
brevity is for suckers
Type: char ()
@@ -905,34 +916,35 @@ def GetDocQuick_Method_test( self ):
""" } )
- def GetDocQuick_Namespace_test( self ):
- filepath = self._PathToTestFile( 'GetDoc_Clang.cc' )
- contents = ReadFile( filepath )
+@SharedYcmd
+def Subcommands_GetDocQuick_Namespace_test( app ):
+ filepath = PathToTestFile( 'GetDoc_Clang.cc' )
+ contents = ReadFile( filepath )
- self._app.post_json(
- '/event_notification',
- self._BuildRequest( filepath = filepath,
- filetype = 'cpp',
- compilation_flags = [ '-x', 'c++' ],
- contents = contents,
- event_name = 'FileReadyToParse' )
- )
+ app.post_json(
+ '/event_notification',
+ BuildRequest( filepath = filepath,
+ filetype = 'cpp',
+ compilation_flags = [ '-x', 'c++' ],
+ contents = contents,
+ event_name = 'FileReadyToParse' )
+ )
- event_data = self._BuildRequest( filepath = filepath,
- filetype = 'cpp',
- compilation_flags = [ '-x', 'c++' ],
- line_num = 65,
- column_num = 14,
- contents = contents,
- command_arguments = [ 'GetDocQuick' ],
- completer_target = 'filetype_default' )
+ event_data = BuildRequest( filepath = filepath,
+ filetype = 'cpp',
+ compilation_flags = [ '-x', 'c++' ],
+ line_num = 65,
+ column_num = 14,
+ contents = contents,
+ command_arguments = [ 'GetDocQuick' ],
+ completer_target = 'filetype_default' )
- response = self._app.post_json( '/run_completer_command', event_data ).json
+ response = app.post_json( '/run_completer_command', event_data ).json
- pprint( response )
+ pprint( response )
- eq_( response, {
- 'detailed_info': """\
+ eq_( response, {
+ 'detailed_info': """\
namespace Test {}
This is a test namespace
Type:
@@ -941,87 +953,90 @@ def GetDocQuick_Namespace_test( self ):
This is a test namespace""" } ) # noqa
- def GetDocQuick_Undocumented_test( self ):
- filepath = self._PathToTestFile( 'GetDoc_Clang.cc' )
- contents = ReadFile( filepath )
-
- self._app.post_json(
- '/event_notification',
- self._BuildRequest( filepath = filepath,
- filetype = 'cpp',
- compilation_flags = [ '-x', 'c++' ],
- contents = contents,
- event_name = 'FileReadyToParse' )
- )
-
- event_data = self._BuildRequest( filepath = filepath,
- filetype = 'cpp',
- compilation_flags = [ '-x', 'c++' ],
- line_num = 81,
- column_num = 17,
- contents = contents,
- command_arguments = [ 'GetDocQuick' ],
- completer_target = 'filetype_default' )
-
- response = self._app.post_json( '/run_completer_command',
- event_data,
- expect_errors = True )
-
- eq_( response.status_code, http.client.INTERNAL_SERVER_ERROR )
-
- assert_that( response.json,
- self._ErrorMatcher( ValueError, NO_DOCUMENTATION_MESSAGE ) )
-
-
- def GetDocQuick_NoCursor_test( self ):
- filepath = self._PathToTestFile( 'GetDoc_Clang.cc' )
- contents = ReadFile( filepath )
-
- self._app.post_json(
- '/event_notification',
- self._BuildRequest( filepath = filepath,
- filetype = 'cpp',
- compilation_flags = [ '-x', 'c++' ],
- contents = contents,
- event_name = 'FileReadyToParse' )
- )
-
- event_data = self._BuildRequest( filepath = filepath,
- filetype = 'cpp',
- compilation_flags = [ '-x', 'c++' ],
- line_num = 1,
- column_num = 1,
- contents = contents,
- command_arguments = [ 'GetDocQuick' ],
- completer_target = 'filetype_default' )
-
- response = self._app.post_json( '/run_completer_command',
- event_data,
- expect_errors = True )
-
- eq_( response.status_code, http.client.INTERNAL_SERVER_ERROR )
-
- assert_that( response.json,
- self._ErrorMatcher( ValueError, NO_DOCUMENTATION_MESSAGE ) )
-
-
- def GetDocQuick_NoReadyToParse_test( self ):
- filepath = self._PathToTestFile( 'GetDoc_Clang.cc' )
- contents = ReadFile( filepath )
-
- event_data = self._BuildRequest( filepath = filepath,
- filetype = 'cpp',
- compilation_flags = [ '-x', 'c++' ],
- line_num = 11,
- column_num = 18,
- contents = contents,
- command_arguments = [ 'GetDocQuick' ],
- completer_target = 'filetype_default' )
-
- response = self._app.post_json( '/run_completer_command', event_data ).json
-
- eq_( response, {
- 'detailed_info': """\
+@SharedYcmd
+def Subcommands_GetDocQuick_Undocumented_test( app ):
+ filepath = PathToTestFile( 'GetDoc_Clang.cc' )
+ contents = ReadFile( filepath )
+
+ app.post_json(
+ '/event_notification',
+ BuildRequest( filepath = filepath,
+ filetype = 'cpp',
+ compilation_flags = [ '-x', 'c++' ],
+ contents = contents,
+ event_name = 'FileReadyToParse' )
+ )
+
+ event_data = BuildRequest( filepath = filepath,
+ filetype = 'cpp',
+ compilation_flags = [ '-x', 'c++' ],
+ line_num = 81,
+ column_num = 17,
+ contents = contents,
+ command_arguments = [ 'GetDocQuick' ],
+ completer_target = 'filetype_default' )
+
+ response = app.post_json( '/run_completer_command',
+ event_data,
+ expect_errors = True )
+
+ eq_( response.status_code, http.client.INTERNAL_SERVER_ERROR )
+
+ assert_that( response.json,
+ ErrorMatcher( ValueError, NO_DOCUMENTATION_MESSAGE ) )
+
+
+@SharedYcmd
+def Subcommands_GetDocQuick_NoCursor_test( app ):
+ filepath = PathToTestFile( 'GetDoc_Clang.cc' )
+ contents = ReadFile( filepath )
+
+ app.post_json(
+ '/event_notification',
+ BuildRequest( filepath = filepath,
+ filetype = 'cpp',
+ compilation_flags = [ '-x', 'c++' ],
+ contents = contents,
+ event_name = 'FileReadyToParse' )
+ )
+
+ event_data = BuildRequest( filepath = filepath,
+ filetype = 'cpp',
+ compilation_flags = [ '-x', 'c++' ],
+ line_num = 1,
+ column_num = 1,
+ contents = contents,
+ command_arguments = [ 'GetDocQuick' ],
+ completer_target = 'filetype_default' )
+
+ response = app.post_json( '/run_completer_command',
+ event_data,
+ expect_errors = True )
+
+ eq_( response.status_code, http.client.INTERNAL_SERVER_ERROR )
+
+ assert_that( response.json,
+ ErrorMatcher( ValueError, NO_DOCUMENTATION_MESSAGE ) )
+
+
+@SharedYcmd
+def Subcommands_GetDocQuick_NoReadyToParse_test( app ):
+ filepath = PathToTestFile( 'GetDoc_Clang.cc' )
+ contents = ReadFile( filepath )
+
+ event_data = BuildRequest( filepath = filepath,
+ filetype = 'cpp',
+ compilation_flags = [ '-x', 'c++' ],
+ line_num = 11,
+ column_num = 18,
+ contents = contents,
+ command_arguments = [ 'GetDocQuick' ],
+ completer_target = 'filetype_default' )
+
+ response = app.post_json( '/run_completer_command', event_data ).json
+
+ eq_( response, {
+ 'detailed_info': """\
int get_a_global_variable(bool test)
This is a method which is only pretend global
Type: int (bool)
diff --git a/ycmd/tests/cs/__init__.py b/ycmd/tests/cs/__init__.py
--- a/ycmd/tests/cs/__init__.py
+++ b/ycmd/tests/cs/__init__.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2016 ycmd contributors.
+# Copyright (C) 2016 ycmd contributors
#
# This file is part of ycmd.
#
@@ -15,8 +15,136 @@
# You should have received a copy of the GNU General Public License
# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
-from .cs_handlers_test import StopAllOmniSharpServers
+from __future__ import unicode_literals
+from __future__ import print_function
+from __future__ import division
+from __future__ import absolute_import
+from future import standard_library
+standard_library.install_aliases()
+from builtins import * # noqa
+from contextlib import contextmanager
+import functools
+import os
+import time
-def teardownPackage():
- StopAllOmniSharpServers()
+from ycmd import handlers
+from ycmd.tests.test_utils import BuildRequest, SetUpApp
+
+shared_app = None
+shared_filepaths = []
+
+
+def PathToTestFile( *args ):
+ dir_of_current_script = os.path.dirname( os.path.abspath( __file__ ) )
+ return os.path.join( dir_of_current_script, 'testdata', *args )
+
+
+def StartOmniSharpServer( app, filepath ):
+ app.post_json( '/run_completer_command',
+ BuildRequest( completer_target = 'filetype_default',
+ command_arguments = [ "StartServer" ],
+ filepath = filepath,
+ filetype = 'cs' ) )
+
+
+def StopOmniSharpServer( app, filepath ):
+ app.post_json( '/run_completer_command',
+ BuildRequest( completer_target = 'filetype_default',
+ command_arguments = [ 'StopServer' ],
+ filepath = filepath,
+ filetype = 'cs' ) )
+
+
+def WaitUntilOmniSharpServerReady( app, filepath ):
+ retries = 100
+ success = False
+
+ # If running on Travis CI, keep trying forever. Travis will kill the worker
+ # after 10 mins if nothing happens.
+ while retries > 0 or OnTravis():
+ result = app.get( '/ready', { 'subserver': 'cs' } ).json
+ if result:
+ success = True
+ break
+ request = BuildRequest( completer_target = 'filetype_default',
+ command_arguments = [ 'ServerIsRunning' ],
+ filepath = filepath,
+ filetype = 'cs' )
+ result = app.post_json( '/run_completer_command', request ).json
+ if not result:
+ raise RuntimeError( "OmniSharp failed during startup." )
+ time.sleep( 0.2 )
+ retries = retries - 1
+
+ if not success:
+ raise RuntimeError( "Timeout waiting for OmniSharpServer" )
+
+
+def setUpPackage():
+ """Initializes the ycmd server as a WebTest application that will be shared
+ by all tests using the SharedYcmd decorator in this package. Additional
+ configuration that is common to these tests, like starting a semantic
+ subserver, should be done here."""
+ global shared_app
+
+ shared_app = SetUpApp()
+ shared_app.post_json(
+ '/ignore_extra_conf_file',
+ { 'filepath': PathToTestFile( '.ycm_extra_conf.py' ) } )
+
+
+def tearDownPackage():
+ """Cleans up the tests using the SharedYcmd decorator in this package. It is
+ executed once after running all the tests in the package."""
+ global shared_app, shared_filepaths
+
+ for filepath in shared_filepaths:
+ StopOmniSharpServer( shared_app, filepath )
+
+
+@contextmanager
+def WrapOmniSharpServer( app, filepath ):
+ global shared_filepaths
+
+ if filepath not in shared_filepaths:
+ StartOmniSharpServer( app, filepath )
+ shared_filepaths.append( filepath )
+ WaitUntilOmniSharpServerReady( app, filepath )
+ yield
+
+
+def SharedYcmd( test ):
+ """Defines a decorator to be attached to tests of this package. This decorator
+ passes the shared ycmd application as a parameter.
+
+ Do NOT attach it to test generators but directly to the yielded tests."""
+ global shared_app
+
+ @functools.wraps( test )
+ def Wrapper( *args, **kwargs ):
+ return test( shared_app, *args, **kwargs )
+ return Wrapper
+
+
+def IsolatedYcmd( test ):
+ """Defines a decorator to be attached to tests of this package. This decorator
+ passes a unique ycmd application as a parameter. It should be used on tests
+ that change the server state in a irreversible way (ex: a semantic subserver
+ is stopped or restarted) or expect a clean state (ex: no semantic subserver
+ started, no .ycm_extra_conf.py loaded, etc).
+
+ Do NOT attach it to test generators but directly to the yielded tests."""
+ @functools.wraps( test )
+ def Wrapper( *args, **kwargs ):
+ old_server_state = handlers._server_state
+
+ try:
+ app = SetUpApp()
+ app.post_json(
+ '/ignore_extra_conf_file',
+ { 'filepath': PathToTestFile( '.ycm_extra_conf.py' ) } )
+ test( app, *args, **kwargs )
+ finally:
+ handlers._server_state = old_server_state
+ return Wrapper
diff --git a/ycmd/tests/cs/cs_handlers_test.py b/ycmd/tests/cs/cs_handlers_test.py
deleted file mode 100644
--- a/ycmd/tests/cs/cs_handlers_test.py
+++ /dev/null
@@ -1,180 +0,0 @@
-# Copyright (C) 2015 ycmd contributors
-#
-# This file is part of ycmd.
-#
-# ycmd is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# ycmd is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
-
-from __future__ import unicode_literals
-from __future__ import print_function
-from __future__ import division
-from __future__ import absolute_import
-from future import standard_library
-standard_library.install_aliases()
-from builtins import * # noqa
-from future.utils import PY2
-
-from ..handlers_test import Handlers_test
-from ycmd.utils import OnTravis, OnWindows
-import time
-from contextlib import contextmanager
-
-# If INSTANCE_PER_TEST is set, each test case will start up and shutdown an
-# instance of Omnisharp server. Otherwise - the default - it will reuse the
-# Omnisharp instances between individual test cases. Non caching (false) is
-# much faster, but test cases are not totally isolated from each other.
-# For test case isolation, set to true.
-# Reusing Omnisharp instances this way on Windows and Python 3 will randomly
-# raise the error "OSError: [WinError 6] The handle is invalid" in tests so
-# we set it to true in this case.
-INSTANCE_PER_TEST = True if OnWindows() and not PY2 else False
-
-
-class Cs_Handlers_test( Handlers_test ):
-
- omnisharp_file_solution = {}
- omnisharp_solution_port = {}
- omnisharp_solution_file = {}
-
- def __init__( self ):
- self._file = __file__
-
-
- def setUp( self ):
- super( Cs_Handlers_test, self ).setUp()
- self._app.post_json(
- '/ignore_extra_conf_file',
- { 'filepath': self._PathToTestFile( '.ycm_extra_conf.py' ) } )
-
-
- # See __init__.py for teardownPackage
-
-
- @contextmanager
- def _WrapOmniSharpServer( self, filepath ):
- self._SetupOmniSharpServer( filepath )
- yield
- self._TeardownOmniSharpServer( filepath )
-
-
- def _SetupOmniSharpServer( self, filepath ):
- solution_path = self._FindOmniSharpSolutionPath( filepath )
- if solution_path in Cs_Handlers_test.omnisharp_solution_port:
- port = Cs_Handlers_test.omnisharp_solution_port[ solution_path ]
- self._SetOmnisharpPort( filepath, port )
- self._WaitUntilOmniSharpServerReady( filepath )
- else:
- self._StartOmniSharpServer( filepath )
- self._WaitUntilOmniSharpServerReady( filepath )
- port = self._GetOmnisharpPort( filepath )
- Cs_Handlers_test.omnisharp_solution_port[ solution_path ] = port
-
-
- def _TeardownOmniSharpServer( self, filepath ):
- if INSTANCE_PER_TEST:
- self._StopOmniSharpServer( filepath )
- try:
- solution = self._FindOmniSharpSolutionPath( filepath )
- del Cs_Handlers_test.omnisharp_solution_port[ solution ]
- del Cs_Handlers_test.omnisharp_solution_file[ solution ]
- except KeyError:
- pass
-
-
- def _StartOmniSharpServer( self, filepath ):
- self._app.post_json( '/run_completer_command',
- self._BuildRequest( completer_target = 'filetype_default',
- command_arguments = [ "StartServer" ],
- filepath = filepath,
- filetype = 'cs' ) )
-
-
- def _FindOmniSharpSolutionPath( self, filepath ):
- if filepath in Cs_Handlers_test.omnisharp_file_solution:
- return Cs_Handlers_test.omnisharp_file_solution[ filepath ]
-
- solution_request = self._BuildRequest(
- completer_target = 'filetype_default',
- filepath = filepath,
- command_arguments = [ "SolutionFile" ],
- filetype = 'cs' )
- solution_path = self._app.post_json( '/run_completer_command',
- solution_request ).json
- Cs_Handlers_test.omnisharp_file_solution[ filepath ] = solution_path
- Cs_Handlers_test.omnisharp_solution_file[ solution_path ] = filepath
-
- return solution_path
-
-
- def _SetOmnisharpPort( self, filepath, port ):
- command_arguments = [ 'SetOmnisharpPort', port ]
- self._app.post_json( '/run_completer_command',
- self._BuildRequest( completer_target = 'filetype_default',
- command_arguments = command_arguments,
- filepath = filepath,
- filetype = 'cs' ) )
-
-
- def _GetOmnisharpPort( self, filepath ):
- request = self._BuildRequest( completer_target = 'filetype_default',
- command_arguments = [ "GetOmnisharpPort" ],
- filepath = filepath,
- filetype = 'cs' )
- result = self._app.post_json( '/run_completer_command', request ).json
-
- return int( result[ "message" ] )
-
-
- def _StopOmniSharpServer( self, filepath ):
- self._app.post_json( '/run_completer_command',
- self._BuildRequest( completer_target = 'filetype_default',
- command_arguments = [ 'StopServer' ],
- filepath = filepath,
- filetype = 'cs' ) )
-
-
- def _WaitUntilOmniSharpServerReady( self, filepath ):
- retries = 100
- success = False
-
- # If running on Travis CI, keep trying forever. Travis will kill the worker
- # after 10 mins if nothing happens.
- while retries > 0 or OnTravis():
- result = self._app.get( '/ready', { 'subserver': 'cs' } ).json
- if result:
- success = True
- break
- request = self._BuildRequest( completer_target = 'filetype_default',
- command_arguments = [ 'ServerIsRunning' ],
- filepath = filepath,
- filetype = 'cs' )
- result = self._app.post_json( '/run_completer_command', request ).json
- if not result:
- raise RuntimeError( "OmniSharp failed during startup." )
- time.sleep( 0.2 )
- retries = retries - 1
-
- if not success:
- raise RuntimeError( "Timeout waiting for OmniSharpServer" )
-
-
-def StopAllOmniSharpServers():
- self = Cs_Handlers_test()
- with self.UserOption( 'auto_start_csharp_server', False ):
- with self.UserOption( 'confirm_extra_conf', False ):
- self.setUp()
- while Cs_Handlers_test.omnisharp_solution_port:
- ( solution, port ) = Cs_Handlers_test.omnisharp_solution_port.popitem()
- filepath = Cs_Handlers_test.omnisharp_solution_file[ solution ]
- self._SetOmnisharpPort( filepath, port )
- self._StopOmniSharpServer( filepath )
diff --git a/ycmd/tests/cs/diagnostics_test.py b/ycmd/tests/cs/diagnostics_test.py
--- a/ycmd/tests/cs/diagnostics_test.py
+++ b/ycmd/tests/cs/diagnostics_test.py
@@ -25,114 +25,114 @@
from hamcrest import ( assert_that, contains, contains_string, equal_to,
has_entries, has_entry )
-from .cs_handlers_test import Cs_Handlers_test
-from ycmd.utils import ReadFile
+from ycmd.tests.cs import PathToTestFile, SharedYcmd, WrapOmniSharpServer
+from ycmd.tests.test_utils import BuildRequest
+from ycmd.utils import ReadFile
-class Cs_Diagnostics_test( Cs_Handlers_test ):
- def ZeroBasedLineAndColumn_test( self ):
- filepath = self._PathToTestFile( 'testy', 'Program.cs' )
- with self._WrapOmniSharpServer( filepath ):
+@SharedYcmd
+def Diagnostics_Basic_test( app ):
+ filepath = PathToTestFile( 'testy', 'Program.cs' )
+ with WrapOmniSharpServer( app, filepath ):
+ contents = ReadFile( filepath )
+
+ event_data = BuildRequest( filepath = filepath,
+ event_name = 'FileReadyToParse',
+ filetype = 'cs',
+ contents = contents )
+ app.post_json( '/event_notification', event_data )
+
+ diag_data = BuildRequest( filepath = filepath,
+ filetype = 'cs',
+ contents = contents,
+ line_num = 11,
+ column_num = 2 )
+
+ results = app.post_json( '/detailed_diagnostic', diag_data ).json
+ assert_that( results,
+ has_entry(
+ 'message',
+ contains_string(
+ "Unexpected symbol `}'', expecting identifier" ) ) )
+
+
+@SharedYcmd
+def Diagnostics_ZeroBasedLineAndColumn_test( app ):
+ filepath = PathToTestFile( 'testy', 'Program.cs' )
+ with WrapOmniSharpServer( app, filepath ):
+ contents = ReadFile( filepath )
+
+ results = {}
+ for _ in ( 0, 1 ): # First call always returns blank for some reason
+ event_data = BuildRequest( filepath = filepath,
+ event_name = 'FileReadyToParse',
+ filetype = 'cs',
+ contents = contents )
+
+ results = app.post_json( '/event_notification', event_data ).json
+
+ assert_that( results,
+ contains(
+ has_entries( {
+ 'kind': equal_to( 'ERROR' ),
+ 'text': contains_string(
+ "Unexpected symbol `}'', expecting identifier" ),
+ 'location': has_entries( {
+ 'line_num': 11,
+ 'column_num': 2
+ } ),
+ 'location_extent': has_entries( {
+ 'start': has_entries( {
+ 'line_num': 11,
+ 'column_num': 2,
+ } ),
+ 'end': has_entries( {
+ 'line_num': 11,
+ 'column_num': 2,
+ } ),
+ } )
+ } ) ) )
+
+
+@SharedYcmd
+def Diagnostics_MultipleSolution_test( app ):
+ filepaths = [ PathToTestFile( 'testy', 'Program.cs' ),
+ PathToTestFile( 'testy-multiple-solutions',
+ 'solution-named-like-folder',
+ 'testy', 'Program.cs' ) ]
+ lines = [ 11, 10 ]
+ for filepath, line in zip( filepaths, lines ):
+ with WrapOmniSharpServer( app, filepath ):
contents = ReadFile( filepath )
results = {}
- for _ in ( 0, 1 ): # First call always returns blank for some reason
- event_data = self._BuildRequest( filepath = filepath,
- event_name = 'FileReadyToParse',
- filetype = 'cs',
- contents = contents )
-
- results = self._app.post_json( '/event_notification', event_data ).json
-
- assert_that( results,
- contains(
- has_entries( {
- 'kind': equal_to( 'ERROR' ),
- 'text': contains_string(
- "Unexpected symbol `}'', expecting identifier" ),
- 'location': has_entries( {
- 'line_num': 11,
- 'column_num': 2
- } ),
- 'location_extent': has_entries( {
- 'start': has_entries( {
- 'line_num': 11,
- 'column_num': 2,
- } ),
- 'end': has_entries( {
- 'line_num': 11,
- 'column_num': 2,
- } ),
- } )
- } ) ) )
-
-
- def MultipleSolution_test( self ):
- filepaths = [ self._PathToTestFile( 'testy', 'Program.cs' ),
- self._PathToTestFile( 'testy-multiple-solutions',
- 'solution-named-like-folder',
- 'testy',
- 'Program.cs' ) ]
- lines = [ 11, 10 ]
- for filepath, line in zip( filepaths, lines ):
- with self._WrapOmniSharpServer( filepath ):
- contents = ReadFile( filepath )
-
- results = {}
- for _ in ( 0, 1 ): # First call always returns blank for some reason
- event_data = self._BuildRequest( filepath = filepath,
- event_name = 'FileReadyToParse',
- filetype = 'cs',
- contents = contents )
-
- results = self._app.post_json( '/event_notification',
- event_data ).json
-
- assert_that( results,
- contains(
- has_entries( {
- 'kind': equal_to( 'ERROR' ),
- 'text': contains_string( "Unexpected symbol `}'', "
- "expecting identifier" ),
- 'location': has_entries( {
- 'line_num': line,
- 'column_num': 2
- } ),
- 'location_extent': has_entries( {
- 'start': has_entries( {
- 'line_num': line,
- 'column_num': 2,
- } ),
- 'end': has_entries( {
- 'line_num': line,
- 'column_num': 2,
- } ),
- } )
- } ) ) )
-
-
- # This test seems identical to ZeroBasedLineAndColumn one
- def Basic_test( self ):
- filepath = self._PathToTestFile( 'testy', 'Program.cs' )
- with self._WrapOmniSharpServer( filepath ):
- contents = ReadFile( filepath )
-
- event_data = self._BuildRequest( filepath = filepath,
- event_name = 'FileReadyToParse',
- filetype = 'cs',
- contents = contents )
- self._app.post_json( '/event_notification', event_data )
+ for _ in ( 0, 1 ): # First call always returns blank for some reason
+ event_data = BuildRequest( filepath = filepath,
+ event_name = 'FileReadyToParse',
+ filetype = 'cs',
+ contents = contents )
- diag_data = self._BuildRequest( filepath = filepath,
- filetype = 'cs',
- contents = contents,
- line_num = 11,
- column_num = 2 )
+ results = app.post_json( '/event_notification', event_data ).json
- results = self._app.post_json( '/detailed_diagnostic', diag_data ).json
assert_that( results,
- has_entry(
- 'message',
- contains_string(
- "Unexpected symbol `}'', expecting identifier" ) ) )
+ contains(
+ has_entries( {
+ 'kind': equal_to( 'ERROR' ),
+ 'text': contains_string( "Unexpected symbol `}'', "
+ "expecting identifier" ),
+ 'location': has_entries( {
+ 'line_num': line,
+ 'column_num': 2
+ } ),
+ 'location_extent': has_entries( {
+ 'start': has_entries( {
+ 'line_num': line,
+ 'column_num': 2,
+ } ),
+ 'end': has_entries( {
+ 'line_num': line,
+ 'column_num': 2,
+ } ),
+ } )
+ } ) ) )
diff --git a/ycmd/tests/cs/get_completions_test.py b/ycmd/tests/cs/get_completions_test.py
--- a/ycmd/tests/cs/get_completions_test.py
+++ b/ycmd/tests/cs/get_completions_test.py
@@ -25,355 +25,373 @@
standard_library.install_aliases()
from builtins import * # noqa
-from webtest import AppError
-from nose.tools import eq_
from hamcrest import ( assert_that, empty, greater_than, has_item, has_items,
has_entries )
-from .cs_handlers_test import Cs_Handlers_test
+from nose.tools import eq_
+from webtest import AppError
+
+from ycmd.tests.cs import PathToTestFile, SharedYcmd, WrapOmniSharpServer
+from ycmd.tests.test_utils import BuildRequest, CompletionEntryMatcher
from ycmd.utils import ReadFile
-class Cs_GetCompletions_test( Cs_Handlers_test ):
+@SharedYcmd
+def GetCompletions_Basic_test( app ):
+ filepath = PathToTestFile( 'testy', 'Program.cs' )
+ with WrapOmniSharpServer( app, filepath ):
+ contents = ReadFile( filepath )
- def Basic_test( self ):
- filepath = self._PathToTestFile( 'testy', 'Program.cs' )
- with self._WrapOmniSharpServer( filepath ):
+ completion_data = BuildRequest( filepath = filepath,
+ filetype = 'cs',
+ contents = contents,
+ line_num = 10,
+ column_num = 12 )
+ response_data = app.post_json( '/completions', completion_data ).json
+ assert_that( response_data[ 'completions' ],
+ has_items( CompletionEntryMatcher( 'CursorLeft' ),
+ CompletionEntryMatcher( 'CursorSize' ) ) )
+ eq_( 12, response_data[ 'completion_start_column' ] )
+
+
+@SharedYcmd
+def GetCompletions_MultipleSolution_test( app ):
+ filepaths = [ PathToTestFile( 'testy', 'Program.cs' ),
+ PathToTestFile( 'testy-multiple-solutions',
+ 'solution-named-like-folder',
+ 'testy',
+ 'Program.cs' ) ]
+ lines = [ 10, 9 ]
+ for filepath, line in zip( filepaths, lines ):
+ with WrapOmniSharpServer( app, filepath ):
contents = ReadFile( filepath )
- completion_data = self._BuildRequest( filepath = filepath,
- filetype = 'cs',
- contents = contents,
- line_num = 10,
- column_num = 12 )
- response_data = self._app.post_json( '/completions',
- completion_data ).json
+ completion_data = BuildRequest( filepath = filepath,
+ filetype = 'cs',
+ contents = contents,
+ line_num = line,
+ column_num = 12 )
+ response_data = app.post_json( '/completions',
+ completion_data ).json
assert_that( response_data[ 'completions' ],
- has_items( self._CompletionEntryMatcher( 'CursorLeft' ),
- self._CompletionEntryMatcher( 'CursorSize' ) ) )
+ has_items( CompletionEntryMatcher( 'CursorLeft' ),
+ CompletionEntryMatcher( 'CursorSize' ) ) )
eq_( 12, response_data[ 'completion_start_column' ] )
- def MultipleSolution_test( self ):
- filepaths = [ self._PathToTestFile( 'testy', 'Program.cs' ),
- self._PathToTestFile( 'testy-multiple-solutions',
- 'solution-named-like-folder',
- 'testy',
- 'Program.cs' ) ]
- lines = [ 10, 9 ]
- for filepath, line in zip( filepaths, lines ):
- with self._WrapOmniSharpServer( filepath ):
- contents = ReadFile( filepath )
-
- completion_data = self._BuildRequest( filepath = filepath,
- filetype = 'cs',
- contents = contents,
- line_num = line,
- column_num = 12 )
- response_data = self._app.post_json( '/completions',
- completion_data ).json
- assert_that( response_data[ 'completions' ],
- has_items( self._CompletionEntryMatcher( 'CursorLeft' ),
- self._CompletionEntryMatcher( 'CursorSize' ) ) )
- eq_( 12, response_data[ 'completion_start_column' ] )
-
-
- def PathWithSpace_test( self ):
- filepath = self._PathToTestFile( u'неприличное слово', 'Program.cs' )
- with self._WrapOmniSharpServer( filepath ):
- contents = ReadFile( filepath )
+@SharedYcmd
+def GetCompletions_PathWithSpace_test( app ):
+ filepath = PathToTestFile( u'неприличное слово', 'Program.cs' )
+ with WrapOmniSharpServer( app, filepath ):
+ contents = ReadFile( filepath )
- completion_data = self._BuildRequest( filepath = filepath,
- filetype = 'cs',
- contents = contents,
- line_num = 9,
- column_num = 12 )
- response_data = self._app.post_json( '/completions',
- completion_data ).json
- assert_that( response_data[ 'completions' ],
- has_items( self._CompletionEntryMatcher( 'CursorLeft' ),
- self._CompletionEntryMatcher( 'CursorSize' ) ) )
- eq_( 12, response_data[ 'completion_start_column' ] )
+ completion_data = BuildRequest( filepath = filepath,
+ filetype = 'cs',
+ contents = contents,
+ line_num = 9,
+ column_num = 12 )
+ response_data = app.post_json( '/completions', completion_data ).json
+ assert_that( response_data[ 'completions' ],
+ has_items( CompletionEntryMatcher( 'CursorLeft' ),
+ CompletionEntryMatcher( 'CursorSize' ) ) )
+ eq_( 12, response_data[ 'completion_start_column' ] )
+
+
+@SharedYcmd
+def GetCompletions_HasBothImportsAndNonImport_test( app ):
+ filepath = PathToTestFile( 'testy', 'ImportTest.cs' )
+ with WrapOmniSharpServer( app, filepath ):
+ contents = ReadFile( filepath )
+
+ completion_data = BuildRequest( filepath = filepath,
+ filetype = 'cs',
+ contents = contents,
+ line_num = 9,
+ column_num = 12,
+ force_semantic = True,
+ query = 'Date' )
+ response_data = app.post_json( '/completions', completion_data ).json
+
+ assert_that(
+ response_data[ 'completions' ],
+ has_items( CompletionEntryMatcher( 'DateTime' ),
+ CompletionEntryMatcher( 'DateTimeStyles' ) )
+ )
+
+
+@SharedYcmd
+def GetCompletions_ImportsOrderedAfter_test( app ):
+ filepath = PathToTestFile( 'testy', 'ImportTest.cs' )
+ with WrapOmniSharpServer( app, filepath ):
+ contents = ReadFile( filepath )
+ completion_data = BuildRequest( filepath = filepath,
+ filetype = 'cs',
+ contents = contents,
+ line_num = 9,
+ column_num = 12,
+ force_semantic = True,
+ query = 'Date' )
+ response_data = app.post_json( '/completions', completion_data ).json
+
+ min_import_index = min(
+ loc for loc, val
+ in enumerate( response_data[ 'completions' ] )
+ if val[ 'extra_data' ][ 'required_namespace_import' ]
+ )
+
+ max_nonimport_index = max(
+ loc for loc, val
+ in enumerate( response_data[ 'completions' ] )
+ if not val[ 'extra_data' ][ 'required_namespace_import' ]
+ )
+
+ assert_that( min_import_index, greater_than( max_nonimport_index ) ),
+
+
+@SharedYcmd
+def GetCompletions_ForcedReturnsResults_test( app ):
+ filepath = PathToTestFile( 'testy', 'ContinuousTest.cs' )
+ with WrapOmniSharpServer( app, filepath ):
+ contents = ReadFile( filepath )
- def HasBothImportsAndNonImport_test( self ):
- filepath = self._PathToTestFile( 'testy', 'ImportTest.cs' )
- with self._WrapOmniSharpServer( filepath ):
- contents = ReadFile( filepath )
+ completion_data = BuildRequest( filepath = filepath,
+ filetype = 'cs',
+ contents = contents,
+ line_num = 9,
+ column_num = 21,
+ force_semantic = True,
+ query = 'Date' )
+ response_data = app.post_json( '/completions', completion_data ).json
- completion_data = self._BuildRequest( filepath = filepath,
- filetype = 'cs',
- contents = contents,
- line_num = 9,
- column_num = 12,
- force_semantic = True,
- query = 'Date' )
- response_data = self._app.post_json( '/completions',
- completion_data ).json
-
- assert_that(
- response_data[ 'completions' ],
- has_items( self._CompletionEntryMatcher( 'DateTime' ),
- self._CompletionEntryMatcher( 'DateTimeStyles' ) )
- )
-
-
- def ImportsOrderedAfter_test( self ):
- filepath = self._PathToTestFile( 'testy', 'ImportTest.cs' )
- with self._WrapOmniSharpServer( filepath ):
- contents = ReadFile( filepath )
+ assert_that( response_data[ 'completions' ],
+ has_items( CompletionEntryMatcher( 'String' ),
+ CompletionEntryMatcher( 'StringBuilder' ) ) )
- completion_data = self._BuildRequest( filepath = filepath,
- filetype = 'cs',
- contents = contents,
- line_num = 9,
- column_num = 12,
- force_semantic = True,
- query = 'Date' )
- response_data = self._app.post_json( '/completions',
- completion_data ).json
-
- min_import_index = min(
- loc for loc, val
- in enumerate( response_data[ 'completions' ] )
- if val[ 'extra_data' ][ 'required_namespace_import' ]
- )
-
- max_nonimport_index = max(
- loc for loc, val
- in enumerate( response_data[ 'completions' ] )
- if not val[ 'extra_data' ][ 'required_namespace_import' ]
- )
-
- assert_that( min_import_index, greater_than( max_nonimport_index ) ),
-
-
- def ForcedReturnsResults_test( self ):
- filepath = self._PathToTestFile( 'testy', 'ContinuousTest.cs' )
- with self._WrapOmniSharpServer( filepath ):
- contents = ReadFile( filepath )
- completion_data = self._BuildRequest( filepath = filepath,
- filetype = 'cs',
- contents = contents,
- line_num = 9,
- column_num = 21,
- force_semantic = True,
- query = 'Date' )
- response_data = self._app.post_json( '/completions',
- completion_data ).json
-
- assert_that(
- response_data[ 'completions' ],
- has_items( self._CompletionEntryMatcher( 'String' ),
- self._CompletionEntryMatcher( 'StringBuilder' ) ) )
-
-
- def NonForcedReturnsNoResults_test( self ):
- filepath = self._PathToTestFile( 'testy', 'ContinuousTest.cs' )
- with self._WrapOmniSharpServer( filepath ):
- contents = ReadFile( filepath )
- event_data = self._BuildRequest( filepath = filepath,
- filetype = 'cs',
- contents = contents,
- event_name = 'FileReadyToParse' )
-
- self._app.post_json( '/event_notification', event_data )
-
- completion_data = self._BuildRequest( filepath = filepath,
- filetype = 'cs',
- contents = contents,
- line_num = 9,
- column_num = 21,
- force_semantic = False,
- query = 'Date' )
- results = self._app.post_json( '/completions', completion_data ).json
-
- # There are no semantic completions. However, we fall back to identifier
- # completer in this case.
- assert_that( results, has_entries( {
- 'completions': has_item( has_entries( {
- 'insertion_text' : 'String',
- 'extra_menu_info': '[ID]',
- } ) ),
- 'errors': empty(),
- } ) )
-
-
- def ForcedDividesCache_test( self ):
- filepath = self._PathToTestFile( 'testy', 'ContinuousTest.cs' )
- with self._WrapOmniSharpServer( filepath ):
- contents = ReadFile( filepath )
- event_data = self._BuildRequest( filepath = filepath,
- filetype = 'cs',
- contents = contents,
- event_name = 'FileReadyToParse' )
-
- self._app.post_json( '/event_notification', event_data )
-
- completion_data = self._BuildRequest( filepath = filepath,
- filetype = 'cs',
- contents = contents,
- line_num = 9,
- column_num = 21,
- force_semantic = True,
- query = 'Date' )
- results = self._app.post_json( '/completions', completion_data ).json
-
- assert_that( results[ 'completions' ], not( empty() ) )
- assert_that( results[ 'errors' ], empty() )
-
- completion_data = self._BuildRequest( filepath = filepath,
- filetype = 'cs',
- contents = contents,
- line_num = 9,
- column_num = 21,
- force_semantic = False,
- query = 'Date' )
- results = self._app.post_json( '/completions', completion_data ).json
-
- # There are no semantic completions. However, we fall back to identifier
- # completer in this case.
- assert_that( results, has_entries( {
- 'completions': has_item( has_entries( {
- 'insertion_text' : 'String',
- 'extra_menu_info': '[ID]',
- } ) ),
- 'errors': empty(),
- } ) )
-
-
- def ReloadSolution_Basic_test( self ):
- filepath = self._PathToTestFile( 'testy', 'Program.cs' )
- with self._WrapOmniSharpServer( filepath ):
- result = self._app.post_json(
+@SharedYcmd
+def GetCompletions_NonForcedReturnsNoResults_test( app ):
+ filepath = PathToTestFile( 'testy', 'ContinuousTest.cs' )
+ with WrapOmniSharpServer( app, filepath ):
+ contents = ReadFile( filepath )
+ event_data = BuildRequest( filepath = filepath,
+ filetype = 'cs',
+ contents = contents,
+ event_name = 'FileReadyToParse' )
+
+ app.post_json( '/event_notification', event_data )
+
+ completion_data = BuildRequest( filepath = filepath,
+ filetype = 'cs',
+ contents = contents,
+ line_num = 9,
+ column_num = 21,
+ force_semantic = False,
+ query = 'Date' )
+ results = app.post_json( '/completions', completion_data ).json
+
+ # There are no semantic completions. However, we fall back to identifier
+ # completer in this case.
+ assert_that( results, has_entries( {
+ 'completions': has_item( has_entries( {
+ 'insertion_text' : 'String',
+ 'extra_menu_info': '[ID]',
+ } ) ),
+ 'errors': empty(),
+ } ) )
+
+
+@SharedYcmd
+def GetCompletions_ForcedDividesCache_test( app ):
+ filepath = PathToTestFile( 'testy', 'ContinuousTest.cs' )
+ with WrapOmniSharpServer( app, filepath ):
+ contents = ReadFile( filepath )
+ event_data = BuildRequest( filepath = filepath,
+ filetype = 'cs',
+ contents = contents,
+ event_name = 'FileReadyToParse' )
+
+ app.post_json( '/event_notification', event_data )
+
+ completion_data = BuildRequest( filepath = filepath,
+ filetype = 'cs',
+ contents = contents,
+ line_num = 9,
+ column_num = 21,
+ force_semantic = True,
+ query = 'Date' )
+ results = app.post_json( '/completions', completion_data ).json
+
+ assert_that( results[ 'completions' ], not( empty() ) )
+ assert_that( results[ 'errors' ], empty() )
+
+ completion_data = BuildRequest( filepath = filepath,
+ filetype = 'cs',
+ contents = contents,
+ line_num = 9,
+ column_num = 21,
+ force_semantic = False,
+ query = 'Date' )
+ results = app.post_json( '/completions', completion_data ).json
+
+ # There are no semantic completions. However, we fall back to identifier
+ # completer in this case.
+ assert_that( results, has_entries( {
+ 'completions': has_item( has_entries( {
+ 'insertion_text' : 'String',
+ 'extra_menu_info': '[ID]',
+ } ) ),
+ 'errors': empty(),
+ } ) )
+
+
+@SharedYcmd
+def GetCompletions_ReloadSolution_Basic_test( app ):
+ filepath = PathToTestFile( 'testy', 'Program.cs' )
+ with WrapOmniSharpServer( app, filepath ):
+ result = app.post_json(
+ '/run_completer_command',
+ BuildRequest( completer_target = 'filetype_default',
+ command_arguments = [ 'ReloadSolution' ],
+ filepath = filepath,
+ filetype = 'cs' ) ).json
+
+ eq_( result, True )
+
+
+@SharedYcmd
+def GetCompletions_ReloadSolution_MultipleSolution_test( app ):
+ filepaths = [ PathToTestFile( 'testy', 'Program.cs' ),
+ PathToTestFile( 'testy-multiple-solutions',
+ 'solution-named-like-folder',
+ 'testy',
+ 'Program.cs' ) ]
+ for filepath in filepaths:
+ with WrapOmniSharpServer( app, filepath ):
+ result = app.post_json(
'/run_completer_command',
- self._BuildRequest( completer_target = 'filetype_default',
- command_arguments = [ 'ReloadSolution' ],
- filepath = filepath,
- filetype = 'cs' ) ).json
+ BuildRequest( completer_target = 'filetype_default',
+ command_arguments = [ 'ReloadSolution' ],
+ filepath = filepath,
+ filetype = 'cs' ) ).json
eq_( result, True )
- def ReloadSolution_MultipleSolution_test( self ):
- filepaths = [ self._PathToTestFile( 'testy', 'Program.cs' ),
- self._PathToTestFile( 'testy-multiple-solutions',
- 'solution-named-like-folder',
- 'testy',
- 'Program.cs' ) ]
- for filepath in filepaths:
- with self._WrapOmniSharpServer( filepath ):
- result = self._app.post_json(
- '/run_completer_command',
- self._BuildRequest( completer_target = 'filetype_default',
- command_arguments = [ 'ReloadSolution' ],
- filepath = filepath,
- filetype = 'cs' ) ).json
-
- eq_( result, True )
-
-
- def _SolutionSelectCheck( self, sourcefile, reference_solution,
- extra_conf_store = None ):
- # reusable test: verify that the correct solution (reference_solution) is
- # detected for a given source file (and optionally a given extra_conf)
- if extra_conf_store:
- self._app.post_json( '/load_extra_conf_file',
- { 'filepath': extra_conf_store } )
-
- result = self._app.post_json(
- '/run_completer_command',
- self._BuildRequest( completer_target = 'filetype_default',
- command_arguments = [ 'SolutionFile' ],
- filepath = sourcefile,
- filetype = 'cs' ) ).json
-
- # Now that cleanup is done, verify solution file
- eq_( reference_solution, result)
-
-
- def UsesSubfolderHint_test( self ):
- self._SolutionSelectCheck(
- self._PathToTestFile( 'testy-multiple-solutions',
- 'solution-named-like-folder',
- 'testy', 'Program.cs' ),
- self._PathToTestFile( 'testy-multiple-solutions',
- 'solution-named-like-folder',
- 'testy.sln' ) )
-
-
- def UsesSuperfolderHint_test( self ):
- self._SolutionSelectCheck(
- self._PathToTestFile( 'testy-multiple-solutions',
- 'solution-named-like-folder',
- 'not-testy', 'Program.cs' ),
- self._PathToTestFile( 'testy-multiple-solutions',
- 'solution-named-like-folder',
- 'solution-named-like-folder.sln' ) )
-
-
- def ExtraConfStoreAbsolute_test( self ):
- self._SolutionSelectCheck(
- self._PathToTestFile( 'testy-multiple-solutions',
- 'solution-not-named-like-folder', 'extra-conf-abs',
- 'testy', 'Program.cs' ),
- self._PathToTestFile( 'testy-multiple-solutions',
- 'solution-not-named-like-folder', 'testy2.sln' ),
- self._PathToTestFile( 'testy-multiple-solutions',
- 'solution-not-named-like-folder', 'extra-conf-abs',
- '.ycm_extra_conf.py' ) )
-
-
- def ExtraConfStoreRelative_test( self ):
- self._SolutionSelectCheck(
- self._PathToTestFile( 'testy-multiple-solutions',
- 'solution-not-named-like-folder', 'extra-conf-rel',
- 'testy', 'Program.cs' ),
- self._PathToTestFile( 'testy-multiple-solutions',
- 'solution-not-named-like-folder', 'extra-conf-rel',
- 'testy2.sln' ),
- self._PathToTestFile( 'testy-multiple-solutions',
- 'solution-not-named-like-folder', 'extra-conf-rel',
- '.ycm_extra_conf.py' ) )
-
-
- def ExtraConfStoreNonexisting_test( self ):
- self._SolutionSelectCheck(
- self._PathToTestFile( 'testy-multiple-solutions',
- 'solution-not-named-like-folder', 'extra-conf-bad',
- 'testy', 'Program.cs' ),
- self._PathToTestFile( 'testy-multiple-solutions',
- 'solution-not-named-like-folder', 'extra-conf-bad',
- 'testy2.sln' ),
- self._PathToTestFile( 'testy-multiple-solutions',
- 'solution-not-named-like-folder', 'extra-conf-bad',
- 'testy', '.ycm_extra_conf.py' ) )
-
-
- def DoesntStartWithAmbiguousMultipleSolutions_test( self ):
- filepath = self._PathToTestFile( 'testy-multiple-solutions',
- 'solution-not-named-like-folder',
- 'testy', 'Program.cs' )
- contents = ReadFile( filepath )
- event_data = self._BuildRequest( filepath = filepath,
- filetype = 'cs',
- contents = contents,
- event_name = 'FileReadyToParse' )
-
- exception_caught = False
- try:
- self._app.post_json( '/event_notification', event_data )
- except AppError as e:
- if 'Autodetection of solution file failed' in str( e ):
- exception_caught = True
-
- # The test passes if we caught an exception when trying to start it,
- # so raise one if it managed to start
- if not exception_caught:
- self._WaitUntilOmniSharpServerReady( filepath )
- self._StopOmniSharpServer( filepath )
- raise Exception( 'The Omnisharp server started, despite us not being '
- 'able to find a suitable solution file to feed it. Did '
- 'you fiddle with the solution finding code in '
- 'cs_completer.py? Hopefully you\'ve enhanced it: you '
- 'need to update this test then :)' )
+def SolutionSelectCheck( app, sourcefile, reference_solution,
+ extra_conf_store = None ):
+ # reusable test: verify that the correct solution (reference_solution) is
+ # detected for a given source file (and optionally a given extra_conf)
+ if extra_conf_store:
+ app.post_json( '/load_extra_conf_file',
+ { 'filepath': extra_conf_store } )
+
+ result = app.post_json( '/run_completer_command',
+ BuildRequest( completer_target = 'filetype_default',
+ command_arguments = [ 'SolutionFile' ],
+ filepath = sourcefile,
+ filetype = 'cs' ) ).json
+
+ # Now that cleanup is done, verify solution file
+ eq_( reference_solution, result)
+
+
+@SharedYcmd
+def GetCompletions_UsesSubfolderHint_test( app ):
+ SolutionSelectCheck( app,
+ PathToTestFile( 'testy-multiple-solutions',
+ 'solution-named-like-folder',
+ 'testy', 'Program.cs' ),
+ PathToTestFile( 'testy-multiple-solutions',
+ 'solution-named-like-folder',
+ 'testy.sln' ) )
+
+
+@SharedYcmd
+def GetCompletions_UsesSuperfolderHint_test( app ):
+ SolutionSelectCheck( app,
+ PathToTestFile( 'testy-multiple-solutions',
+ 'solution-named-like-folder',
+ 'not-testy', 'Program.cs' ),
+ PathToTestFile( 'testy-multiple-solutions',
+ 'solution-named-like-folder',
+ 'solution-named-like-folder.sln' ) )
+
+
+@SharedYcmd
+def GetCompletions_ExtraConfStoreAbsolute_test( app ):
+ SolutionSelectCheck( app,
+ PathToTestFile( 'testy-multiple-solutions',
+ 'solution-not-named-like-folder',
+ 'extra-conf-abs',
+ 'testy', 'Program.cs' ),
+ PathToTestFile( 'testy-multiple-solutions',
+ 'solution-not-named-like-folder',
+ 'testy2.sln' ),
+ PathToTestFile( 'testy-multiple-solutions',
+ 'solution-not-named-like-folder',
+ 'extra-conf-abs',
+ '.ycm_extra_conf.py' ) )
+
+
+@SharedYcmd
+def GetCompletions_ExtraConfStoreRelative_test( app ):
+ SolutionSelectCheck( app,
+ PathToTestFile( 'testy-multiple-solutions',
+ 'solution-not-named-like-folder',
+ 'extra-conf-rel',
+ 'testy', 'Program.cs' ),
+ PathToTestFile( 'testy-multiple-solutions',
+ 'solution-not-named-like-folder',
+ 'extra-conf-rel',
+ 'testy2.sln' ),
+ PathToTestFile( 'testy-multiple-solutions',
+ 'solution-not-named-like-folder',
+ 'extra-conf-rel',
+ '.ycm_extra_conf.py' ) )
+
+
+@SharedYcmd
+def GetCompletions_ExtraConfStoreNonexisting_test( app ):
+ SolutionSelectCheck( app,
+ PathToTestFile( 'testy-multiple-solutions',
+ 'solution-not-named-like-folder',
+ 'extra-conf-bad',
+ 'testy', 'Program.cs' ),
+ PathToTestFile( 'testy-multiple-solutions',
+ 'solution-not-named-like-folder',
+ 'extra-conf-bad',
+ 'testy2.sln' ),
+ PathToTestFile( 'testy-multiple-solutions',
+ 'solution-not-named-like-folder',
+ 'extra-conf-bad',
+ 'testy', '.ycm_extra_conf.py' ) )
+
+
+@SharedYcmd
+def GetCompletions_DoesntStartWithAmbiguousMultipleSolutions_test( app ):
+ filepath = PathToTestFile( 'testy-multiple-solutions',
+ 'solution-not-named-like-folder',
+ 'testy', 'Program.cs' )
+ contents = ReadFile( filepath )
+ event_data = BuildRequest( filepath = filepath,
+ filetype = 'cs',
+ contents = contents,
+ event_name = 'FileReadyToParse' )
+
+ exception_caught = False
+ try:
+ app.post_json( '/event_notification', event_data )
+ except AppError as e:
+ if 'Autodetection of solution file failed' in str( e ):
+ exception_caught = True
+
+ # The test passes if we caught an exception when trying to start it,
+ # so raise one if it managed to start
+ if not exception_caught:
+ WaitUntilOmniSharpServerReady( app, filepath )
+ StopOmniSharpServer( app, filepath )
+ raise Exception( 'The Omnisharp server started, despite us not being '
+ 'able to find a suitable solution file to feed it. Did '
+ 'you fiddle with the solution finding code in '
+ 'cs_completer.py? Hopefully you\'ve enhanced it: you '
+ 'need to update this test then :)' )
diff --git a/ycmd/tests/cs/subcommands_test.py b/ycmd/tests/cs/subcommands_test.py
--- a/ycmd/tests/cs/subcommands_test.py
+++ b/ycmd/tests/cs/subcommands_test.py
@@ -23,542 +23,562 @@
standard_library.install_aliases()
from builtins import * # noqa
-from webtest import TestApp, AppError
from nose.tools import eq_, ok_
-from ... import handlers
-from .cs_handlers_test import Cs_Handlers_test
-from ycmd.utils import ReadFile
+from webtest import AppError
import re
import os.path
+from ycmd.tests.cs import ( IsolatedYcmd, PathToTestFile, SharedYcmd,
+ StopOmniSharpServer, WaitUntilOmniSharpServerReady,
+ WrapOmniSharpServer )
+from ycmd.tests.test_utils import BuildRequest, UserOption
+from ycmd.utils import ReadFile
+
-class Cs_Subcommands_test( Cs_Handlers_test ):
-
- def GoTo_Basic_test( self ):
- filepath = self._PathToTestFile( 'testy', 'GotoTestCase.cs' )
- with self._WrapOmniSharpServer( filepath ):
- contents = ReadFile( filepath )
-
- goto_data = self._BuildRequest( completer_target = 'filetype_default',
- command_arguments = [ 'GoTo' ],
- line_num = 9,
- column_num = 15,
- contents = contents,
- filetype = 'cs',
- filepath = filepath )
-
- eq_( {
- 'filepath': self._PathToTestFile( 'testy', 'Program.cs' ),
- 'line_num': 7,
- 'column_num': 3
- }, self._app.post_json( '/run_completer_command', goto_data ).json )
-
-
- def GoToImplementation_Basic_test( self ):
- filepath = self._PathToTestFile( 'testy', 'GotoTestCase.cs' )
- with self._WrapOmniSharpServer( filepath ):
- contents = ReadFile( filepath )
-
- goto_data = self._BuildRequest(
- completer_target = 'filetype_default',
- command_arguments = [ 'GoToImplementation' ],
- line_num = 13,
- column_num = 13,
- contents = contents,
- filetype = 'cs',
- filepath = filepath
- )
-
- eq_( {
- 'filepath': self._PathToTestFile( 'testy', 'GotoTestCase.cs' ),
- 'line_num': 30,
- 'column_num': 3
- }, self._app.post_json( '/run_completer_command', goto_data ).json )
-
-
- def GoToImplementation_NoImplementation_test( self ):
- filepath = self._PathToTestFile( 'testy', 'GotoTestCase.cs' )
- with self._WrapOmniSharpServer( filepath ):
- contents = ReadFile( filepath )
-
- goto_data = self._BuildRequest(
- completer_target = 'filetype_default',
- command_arguments = [ 'GoToImplementation' ],
- line_num = 17,
- column_num = 13,
- contents = contents,
- filetype = 'cs',
- filepath = filepath
- )
-
- try:
- self._app.post_json( '/run_completer_command', goto_data ).json
- raise Exception("Expected a 'No implementations found' error")
- except AppError as e:
- if 'No implementations found' in str(e):
- pass
- else:
- raise
-
-
- def CsCompleter_InvalidLocation_test( self ):
- filepath = self._PathToTestFile( 'testy', 'GotoTestCase.cs' )
- with self._WrapOmniSharpServer( filepath ):
- contents = ReadFile( filepath )
-
- goto_data = self._BuildRequest(
- completer_target = 'filetype_default',
- command_arguments = [ 'GoToImplementation' ],
- line_num = 2,
- column_num = 1,
- contents = contents,
- filetype = 'cs',
- filepath = filepath
- )
-
- try:
- self._app.post_json( '/run_completer_command', goto_data ).json
- raise Exception( 'Expected a "Can\\\'t jump to implementation" error' )
- except AppError as e:
- if 'Can\\\'t jump to implementation' in str(e):
- pass
- else:
- raise
-
-
- def GoToImplementationElseDeclaration_NoImplementation_test( self ):
- filepath = self._PathToTestFile( 'testy', 'GotoTestCase.cs' )
- with self._WrapOmniSharpServer( filepath ):
- contents = ReadFile( filepath )
-
- goto_data = self._BuildRequest(
- completer_target = 'filetype_default',
- command_arguments = [ 'GoToImplementationElseDeclaration' ],
- line_num = 17,
- column_num = 13,
- contents = contents,
- filetype = 'cs',
- filepath = filepath
- )
-
- eq_( {
- 'filepath': self._PathToTestFile( 'testy', 'GotoTestCase.cs' ),
- 'line_num': 35,
- 'column_num': 3
- }, self._app.post_json( '/run_completer_command', goto_data ).json )
-
-
- def GoToImplementationElseDeclaration_SingleImplementation_test( self ):
- filepath = self._PathToTestFile( 'testy', 'GotoTestCase.cs' )
- with self._WrapOmniSharpServer( filepath ):
- contents = ReadFile( filepath )
-
- goto_data = self._BuildRequest(
- completer_target = 'filetype_default',
- command_arguments = [ 'GoToImplementationElseDeclaration' ],
- line_num = 13,
- column_num = 13,
- contents = contents,
- filetype = 'cs',
- filepath = filepath
- )
-
- eq_( {
- 'filepath': self._PathToTestFile( 'testy', 'GotoTestCase.cs' ),
- 'line_num': 30,
- 'column_num': 3
- }, self._app.post_json( '/run_completer_command', goto_data ).json )
-
-
- def GoToImplementationElseDeclaration_MultipleImplementations_test( self ):
- filepath = self._PathToTestFile( 'testy', 'GotoTestCase.cs' )
- with self._WrapOmniSharpServer( filepath ):
- contents = ReadFile( filepath )
-
- goto_data = self._BuildRequest(
- completer_target = 'filetype_default',
- command_arguments = [ 'GoToImplementationElseDeclaration' ],
- line_num = 21,
- column_num = 13,
- contents = contents,
- filetype = 'cs',
- filepath = filepath
- )
-
- eq_( [ {
- 'filepath': self._PathToTestFile( 'testy', 'GotoTestCase.cs' ),
- 'line_num': 43,
- 'column_num': 3
- }, {
- 'filepath': self._PathToTestFile( 'testy', 'GotoTestCase.cs' ),
- 'line_num': 48,
- 'column_num': 3
- } ], self._app.post_json( '/run_completer_command', goto_data ).json )
-
-
- def GetType_EmptyMessage_test( self ):
- filepath = self._PathToTestFile( 'testy', 'GetTypeTestCase.cs' )
- with self._WrapOmniSharpServer( filepath ):
- contents = ReadFile( filepath )
-
- gettype_data = self._BuildRequest( completer_target = 'filetype_default',
- command_arguments = [ 'GetType' ],
- line_num = 1,
- column_num = 1,
- contents = contents,
- filetype = 'cs',
- filepath = filepath )
-
- eq_( {
- u'message': u""
- }, self._app.post_json( '/run_completer_command', gettype_data ).json )
-
-
- def GetType_VariableDeclaration_test( self ):
- filepath = self._PathToTestFile( 'testy', 'GetTypeTestCase.cs' )
- with self._WrapOmniSharpServer( filepath ):
- contents = ReadFile( filepath )
-
- gettype_data = self._BuildRequest( completer_target = 'filetype_default',
- command_arguments = [ 'GetType' ],
- line_num = 4,
- column_num = 5,
- contents = contents,
- filetype = 'cs',
- filepath = filepath )
-
- eq_( {
- u'message': u"string"
- }, self._app.post_json( '/run_completer_command', gettype_data ).json )
-
-
- def GetType_VariableUsage_test( self ):
- filepath = self._PathToTestFile( 'testy', 'GetTypeTestCase.cs' )
- with self._WrapOmniSharpServer( filepath ):
- contents = ReadFile( filepath )
-
- gettype_data = self._BuildRequest( completer_target = 'filetype_default',
- command_arguments = [ 'GetType' ],
- line_num = 5,
- column_num = 5,
- contents = contents,
- filetype = 'cs',
- filepath = filepath )
-
- eq_( {
- u'message': u"string str"
- }, self._app.post_json( '/run_completer_command', gettype_data ).json )
-
-
- def GetType_Constant_test( self ):
- filepath = self._PathToTestFile( 'testy', 'GetTypeTestCase.cs' )
- with self._WrapOmniSharpServer( filepath ):
- contents = ReadFile( filepath )
-
- gettype_data = self._BuildRequest( completer_target = 'filetype_default',
- command_arguments = [ 'GetType' ],
- line_num = 4,
- column_num = 14,
- contents = contents,
- filetype = 'cs',
- filepath = filepath )
-
- eq_( {
- u'message': u"System.String"
- }, self._app.post_json( '/run_completer_command', gettype_data ).json )
-
-
- def GetType_DocsIgnored_test( self ):
- filepath = self._PathToTestFile( 'testy', 'GetTypeTestCase.cs' )
- with self._WrapOmniSharpServer( filepath ):
- contents = ReadFile( filepath )
-
- gettype_data = self._BuildRequest( completer_target = 'filetype_default',
- command_arguments = [ 'GetType' ],
- line_num = 9,
- column_num = 34,
- contents = contents,
- filetype = 'cs',
- filepath = filepath )
-
- eq_( {
- u'message': u"int GetTypeTestCase.an_int_with_docs;",
- }, self._app.post_json( '/run_completer_command', gettype_data ).json )
-
-
- def GetDoc_Variable_test( self ):
- filepath = self._PathToTestFile( 'testy', 'GetDocTestCase.cs' )
- with self._WrapOmniSharpServer( filepath ):
- contents = ReadFile( filepath )
-
- getdoc_data = self._BuildRequest( completer_target = 'filetype_default',
- command_arguments = [ 'GetDoc' ],
- line_num = 13,
- column_num = 28,
- contents = contents,
- filetype = 'cs',
- filepath = filepath )
-
- eq_( {
- 'detailed_info': 'int GetDocTestCase.an_int;\n'
- 'an integer, or something',
- }, self._app.post_json( '/run_completer_command', getdoc_data ).json )
-
-
- def GetDoc_Function_test( self ):
- filepath = self._PathToTestFile( 'testy', 'GetDocTestCase.cs' )
- with self._WrapOmniSharpServer( filepath ):
- contents = ReadFile( filepath )
-
- getdoc_data = self._BuildRequest( completer_target = 'filetype_default',
- command_arguments = [ 'GetDoc' ],
- line_num = 33,
- column_num = 27,
- contents = contents,
- filetype = 'cs',
- filepath = filepath )
-
- # It seems that Omnisharp server eats newlines
- eq_( {
- 'detailed_info': 'int GetDocTestCase.DoATest();\n'
- ' Very important method. With multiple lines of '
- 'commentary And Format- -ting',
- }, self._app.post_json( '/run_completer_command', getdoc_data ).json )
-
-
- def _RunFixIt( self, line, column, expected_result ):
- filepath = self._PathToTestFile( 'testy', 'FixItTestCase.cs' )
- with self._WrapOmniSharpServer( filepath ):
- contents = ReadFile( filepath )
-
- fixit_data = self._BuildRequest( completer_target = 'filetype_default',
- command_arguments = [ 'FixIt' ],
- line_num = line,
- column_num = column,
- contents = contents,
- filetype = 'cs',
- filepath = filepath )
-
- eq_( expected_result,
- self._app.post_json( '/run_completer_command', fixit_data ).json )
-
-
- def FixIt_RemoveSingleLine_test( self ):
- filepath = self._PathToTestFile( 'testy', 'FixItTestCase.cs' )
- self._RunFixIt( 11, 1, {
- u'fixits': [
- {
- u'location': {
- u'line_num': 11,
- u'column_num': 1,
- u'filepath': filepath
- },
- u'chunks': [
- {
- u'replacement_text': '',
- u'range': {
- u'start': {
- u'line_num': 10,
- u'column_num': 20,
- u'filepath': filepath
- },
- u'end': {
- u'line_num': 11,
- u'column_num': 30,
- u'filepath': filepath
- },
- }
+@SharedYcmd
+def Subcommands_GoTo_Basic_test( app ):
+ filepath = PathToTestFile( 'testy', 'GotoTestCase.cs' )
+ with WrapOmniSharpServer( app, filepath ):
+ contents = ReadFile( filepath )
+
+ goto_data = BuildRequest( completer_target = 'filetype_default',
+ command_arguments = [ 'GoTo' ],
+ line_num = 9,
+ column_num = 15,
+ contents = contents,
+ filetype = 'cs',
+ filepath = filepath )
+
+ eq_( {
+ 'filepath': PathToTestFile( 'testy', 'Program.cs' ),
+ 'line_num': 7,
+ 'column_num': 3
+ }, app.post_json( '/run_completer_command', goto_data ).json )
+
+
+@SharedYcmd
+def Subcommands_GoToImplementation_Basic_test( app ):
+ filepath = PathToTestFile( 'testy', 'GotoTestCase.cs' )
+ with WrapOmniSharpServer( app, filepath ):
+ contents = ReadFile( filepath )
+
+ goto_data = BuildRequest(
+ completer_target = 'filetype_default',
+ command_arguments = [ 'GoToImplementation' ],
+ line_num = 13,
+ column_num = 13,
+ contents = contents,
+ filetype = 'cs',
+ filepath = filepath
+ )
+
+ eq_( {
+ 'filepath': PathToTestFile( 'testy', 'GotoTestCase.cs' ),
+ 'line_num': 30,
+ 'column_num': 3
+ }, app.post_json( '/run_completer_command', goto_data ).json )
+
+
+@SharedYcmd
+def Subcommands_GoToImplementation_NoImplementation_test( app ):
+ filepath = PathToTestFile( 'testy', 'GotoTestCase.cs' )
+ with WrapOmniSharpServer( app, filepath ):
+ contents = ReadFile( filepath )
+
+ goto_data = BuildRequest(
+ completer_target = 'filetype_default',
+ command_arguments = [ 'GoToImplementation' ],
+ line_num = 17,
+ column_num = 13,
+ contents = contents,
+ filetype = 'cs',
+ filepath = filepath
+ )
+
+ try:
+ app.post_json( '/run_completer_command', goto_data ).json
+ raise Exception("Expected a 'No implementations found' error")
+ except AppError as e:
+ if 'No implementations found' in str(e):
+ pass
+ else:
+ raise
+
+
+@SharedYcmd
+def Subcommands_CsCompleter_InvalidLocation_test( app ):
+ filepath = PathToTestFile( 'testy', 'GotoTestCase.cs' )
+ with WrapOmniSharpServer( app, filepath ):
+ contents = ReadFile( filepath )
+
+ goto_data = BuildRequest(
+ completer_target = 'filetype_default',
+ command_arguments = [ 'GoToImplementation' ],
+ line_num = 2,
+ column_num = 1,
+ contents = contents,
+ filetype = 'cs',
+ filepath = filepath
+ )
+
+ try:
+ app.post_json( '/run_completer_command', goto_data ).json
+ raise Exception( 'Expected a "Can\\\'t jump to implementation" error' )
+ except AppError as e:
+ if 'Can\\\'t jump to implementation' in str(e):
+ pass
+ else:
+ raise
+
+
+@SharedYcmd
+def Subcommands_GoToImplementationElseDeclaration_NoImplementation_test( app ):
+ filepath = PathToTestFile( 'testy', 'GotoTestCase.cs' )
+ with WrapOmniSharpServer( app, filepath ):
+ contents = ReadFile( filepath )
+
+ goto_data = BuildRequest(
+ completer_target = 'filetype_default',
+ command_arguments = [ 'GoToImplementationElseDeclaration' ],
+ line_num = 17,
+ column_num = 13,
+ contents = contents,
+ filetype = 'cs',
+ filepath = filepath
+ )
+
+ eq_( {
+ 'filepath': PathToTestFile( 'testy', 'GotoTestCase.cs' ),
+ 'line_num': 35,
+ 'column_num': 3
+ }, app.post_json( '/run_completer_command', goto_data ).json )
+
+
+@SharedYcmd
+def Subcommands_GoToImplementationElseDeclaration_SingleImplementation_test(
+ app ):
+ filepath = PathToTestFile( 'testy', 'GotoTestCase.cs' )
+ with WrapOmniSharpServer( app, filepath ):
+ contents = ReadFile( filepath )
+
+ goto_data = BuildRequest(
+ completer_target = 'filetype_default',
+ command_arguments = [ 'GoToImplementationElseDeclaration' ],
+ line_num = 13,
+ column_num = 13,
+ contents = contents,
+ filetype = 'cs',
+ filepath = filepath
+ )
+
+ eq_( {
+ 'filepath': PathToTestFile( 'testy', 'GotoTestCase.cs' ),
+ 'line_num': 30,
+ 'column_num': 3
+ }, app.post_json( '/run_completer_command', goto_data ).json )
+
+
+@SharedYcmd
+def Subcommands_GoToImplementationElseDeclaration_MultipleImplementations_test(
+ app ):
+ filepath = PathToTestFile( 'testy', 'GotoTestCase.cs' )
+ with WrapOmniSharpServer( app, filepath ):
+ contents = ReadFile( filepath )
+
+ goto_data = BuildRequest(
+ completer_target = 'filetype_default',
+ command_arguments = [ 'GoToImplementationElseDeclaration' ],
+ line_num = 21,
+ column_num = 13,
+ contents = contents,
+ filetype = 'cs',
+ filepath = filepath
+ )
+
+ eq_( [ {
+ 'filepath': PathToTestFile( 'testy', 'GotoTestCase.cs' ),
+ 'line_num': 43,
+ 'column_num': 3
+ }, {
+ 'filepath': PathToTestFile( 'testy', 'GotoTestCase.cs' ),
+ 'line_num': 48,
+ 'column_num': 3
+ } ], app.post_json( '/run_completer_command', goto_data ).json )
+
+
+@SharedYcmd
+def Subcommands_GetType_EmptyMessage_test( app ):
+ filepath = PathToTestFile( 'testy', 'GetTypeTestCase.cs' )
+ with WrapOmniSharpServer( app, filepath ):
+ contents = ReadFile( filepath )
+
+ gettype_data = BuildRequest( completer_target = 'filetype_default',
+ command_arguments = [ 'GetType' ],
+ line_num = 1,
+ column_num = 1,
+ contents = contents,
+ filetype = 'cs',
+ filepath = filepath )
+
+ eq_( {
+ u'message': u""
+ }, app.post_json( '/run_completer_command', gettype_data ).json )
+
+
+@SharedYcmd
+def Subcommands_GetType_VariableDeclaration_test( app ):
+ filepath = PathToTestFile( 'testy', 'GetTypeTestCase.cs' )
+ with WrapOmniSharpServer( app, filepath ):
+ contents = ReadFile( filepath )
+
+ gettype_data = BuildRequest( completer_target = 'filetype_default',
+ command_arguments = [ 'GetType' ],
+ line_num = 4,
+ column_num = 5,
+ contents = contents,
+ filetype = 'cs',
+ filepath = filepath )
+
+ eq_( {
+ u'message': u"string"
+ }, app.post_json( '/run_completer_command', gettype_data ).json )
+
+
+@SharedYcmd
+def Subcommands_GetType_VariableUsage_test( app ):
+ filepath = PathToTestFile( 'testy', 'GetTypeTestCase.cs' )
+ with WrapOmniSharpServer( app, filepath ):
+ contents = ReadFile( filepath )
+
+ gettype_data = BuildRequest( completer_target = 'filetype_default',
+ command_arguments = [ 'GetType' ],
+ line_num = 5,
+ column_num = 5,
+ contents = contents,
+ filetype = 'cs',
+ filepath = filepath )
+
+ eq_( {
+ u'message': u"string str"
+ }, app.post_json( '/run_completer_command', gettype_data ).json )
+
+
+@SharedYcmd
+def Subcommands_GetType_Constant_test( app ):
+ filepath = PathToTestFile( 'testy', 'GetTypeTestCase.cs' )
+ with WrapOmniSharpServer( app, filepath ):
+ contents = ReadFile( filepath )
+
+ gettype_data = BuildRequest( completer_target = 'filetype_default',
+ command_arguments = [ 'GetType' ],
+ line_num = 4,
+ column_num = 14,
+ contents = contents,
+ filetype = 'cs',
+ filepath = filepath )
+
+ eq_( {
+ u'message': u"System.String"
+ }, app.post_json( '/run_completer_command', gettype_data ).json )
+
+
+@SharedYcmd
+def Subcommands_GetType_DocsIgnored_test( app ):
+ filepath = PathToTestFile( 'testy', 'GetTypeTestCase.cs' )
+ with WrapOmniSharpServer( app, filepath ):
+ contents = ReadFile( filepath )
+
+ gettype_data = BuildRequest( completer_target = 'filetype_default',
+ command_arguments = [ 'GetType' ],
+ line_num = 9,
+ column_num = 34,
+ contents = contents,
+ filetype = 'cs',
+ filepath = filepath )
+
+ eq_( {
+ u'message': u"int GetTypeTestCase.an_int_with_docs;",
+ }, app.post_json( '/run_completer_command', gettype_data ).json )
+
+
+@SharedYcmd
+def Subcommands_GetDoc_Variable_test( app ):
+ filepath = PathToTestFile( 'testy', 'GetDocTestCase.cs' )
+ with WrapOmniSharpServer( app, filepath ):
+ contents = ReadFile( filepath )
+
+ getdoc_data = BuildRequest( completer_target = 'filetype_default',
+ command_arguments = [ 'GetDoc' ],
+ line_num = 13,
+ column_num = 28,
+ contents = contents,
+ filetype = 'cs',
+ filepath = filepath )
+
+ eq_( {
+ 'detailed_info': 'int GetDocTestCase.an_int;\n'
+ 'an integer, or something',
+ }, app.post_json( '/run_completer_command', getdoc_data ).json )
+
+
+@SharedYcmd
+def Subcommands_GetDoc_Function_test( app ):
+ filepath = PathToTestFile( 'testy', 'GetDocTestCase.cs' )
+ with WrapOmniSharpServer( app, filepath ):
+ contents = ReadFile( filepath )
+
+ getdoc_data = BuildRequest( completer_target = 'filetype_default',
+ command_arguments = [ 'GetDoc' ],
+ line_num = 33,
+ column_num = 27,
+ contents = contents,
+ filetype = 'cs',
+ filepath = filepath )
+
+ # It seems that Omnisharp server eats newlines
+ eq_( {
+ 'detailed_info': 'int GetDocTestCase.DoATest();\n'
+ ' Very important method. With multiple lines of '
+ 'commentary And Format- -ting',
+ }, app.post_json( '/run_completer_command', getdoc_data ).json )
+
+
+def RunFixItTest( app, line, column, expected_result ):
+ filepath = PathToTestFile( 'testy', 'FixItTestCase.cs' )
+ with WrapOmniSharpServer( app, filepath ):
+ contents = ReadFile( filepath )
+
+ fixit_data = BuildRequest( completer_target = 'filetype_default',
+ command_arguments = [ 'FixIt' ],
+ line_num = line,
+ column_num = column,
+ contents = contents,
+ filetype = 'cs',
+ filepath = filepath )
+
+ eq_( expected_result,
+ app.post_json( '/run_completer_command', fixit_data ).json )
+
+
+@SharedYcmd
+def Subcommands_FixIt_RemoveSingleLine_test( app ):
+ filepath = PathToTestFile( 'testy', 'FixItTestCase.cs' )
+ RunFixItTest( app, 11, 1, {
+ u'fixits': [
+ {
+ u'location': {
+ u'line_num': 11,
+ u'column_num': 1,
+ u'filepath': filepath
+ },
+ u'chunks': [
+ {
+ u'replacement_text': '',
+ u'range': {
+ u'start': {
+ u'line_num': 10,
+ u'column_num': 20,
+ u'filepath': filepath
+ },
+ u'end': {
+ u'line_num': 11,
+ u'column_num': 30,
+ u'filepath': filepath
+ },
}
- ]
- }
- ]
- } )
-
-
- def FixIt_MultipleLines_test( self ):
- filepath = self._PathToTestFile( 'testy', 'FixItTestCase.cs' )
- self._RunFixIt( 19, 1, {
- u'fixits': [
- {
- u'location': {
- u'line_num': 19,
- u'column_num': 1,
- u'filepath': filepath
- },
- u'chunks': [
- {
- u'replacement_text': "return On",
- u'range': {
- u'start': {
- u'line_num': 20,
- u'column_num': 13,
- u'filepath': filepath
- },
- u'end': {
- u'line_num': 21,
- u'column_num': 35,
- u'filepath': filepath
- },
- }
+ }
+ ]
+ }
+ ]
+ } )
+
+
+@SharedYcmd
+def Subcommands_FixIt_MultipleLines_test( app ):
+ filepath = PathToTestFile( 'testy', 'FixItTestCase.cs' )
+ RunFixItTest( app, 19, 1, {
+ u'fixits': [
+ {
+ u'location': {
+ u'line_num': 19,
+ u'column_num': 1,
+ u'filepath': filepath
+ },
+ u'chunks': [
+ {
+ u'replacement_text': "return On",
+ u'range': {
+ u'start': {
+ u'line_num': 20,
+ u'column_num': 13,
+ u'filepath': filepath
+ },
+ u'end': {
+ u'line_num': 21,
+ u'column_num': 35,
+ u'filepath': filepath
+ },
}
- ]
- }
- ]
- } )
-
-
- def FixIt_SpanFileEdge_test( self ):
- filepath = self._PathToTestFile( 'testy', 'FixItTestCase.cs' )
- self._RunFixIt( 1, 1, {
- u'fixits': [
- {
- u'location': {
- u'line_num': 1,
- u'column_num': 1,
- u'filepath': filepath
- },
- u'chunks': [
- {
- u'replacement_text': 'System',
- u'range': {
- u'start': {
- u'line_num': 1,
- u'column_num': 7,
- u'filepath': filepath
- },
- u'end': {
- u'line_num': 3,
- u'column_num': 18,
- u'filepath': filepath
- },
- }
+ }
+ ]
+ }
+ ]
+ } )
+
+
+@SharedYcmd
+def Subcommands_FixIt_SpanFileEdge_test( app ):
+ filepath = PathToTestFile( 'testy', 'FixItTestCase.cs' )
+ RunFixItTest( app, 1, 1, {
+ u'fixits': [
+ {
+ u'location': {
+ u'line_num': 1,
+ u'column_num': 1,
+ u'filepath': filepath
+ },
+ u'chunks': [
+ {
+ u'replacement_text': 'System',
+ u'range': {
+ u'start': {
+ u'line_num': 1,
+ u'column_num': 7,
+ u'filepath': filepath
+ },
+ u'end': {
+ u'line_num': 3,
+ u'column_num': 18,
+ u'filepath': filepath
+ },
}
- ]
- }
- ]
- } )
-
-
- def FixIt_AddTextInLine_test( self ):
- filepath = self._PathToTestFile( 'testy', 'FixItTestCase.cs' )
- self._RunFixIt( 9, 1, {
- u'fixits': [
- {
- u'location': {
- u'line_num': 9,
- u'column_num': 1,
- u'filepath': filepath
- },
- u'chunks': [
- {
- u'replacement_text': ', StringComparison.Ordinal',
- u'range': {
- u'start': {
- u'line_num': 9,
- u'column_num': 29,
- u'filepath': filepath
- },
- u'end': {
- u'line_num': 9,
- u'column_num': 29,
- u'filepath': filepath
- },
- }
+ }
+ ]
+ }
+ ]
+ } )
+
+
+@SharedYcmd
+def Subcommands_FixIt_AddTextInLine_test( app ):
+ filepath = PathToTestFile( 'testy', 'FixItTestCase.cs' )
+ RunFixItTest( app, 9, 1, {
+ u'fixits': [
+ {
+ u'location': {
+ u'line_num': 9,
+ u'column_num': 1,
+ u'filepath': filepath
+ },
+ u'chunks': [
+ {
+ u'replacement_text': ', StringComparison.Ordinal',
+ u'range': {
+ u'start': {
+ u'line_num': 9,
+ u'column_num': 29,
+ u'filepath': filepath
+ },
+ u'end': {
+ u'line_num': 9,
+ u'column_num': 29,
+ u'filepath': filepath
+ },
}
- ]
- }
- ]
- } )
-
-
- def FixIt_ReplaceTextInLine_test( self ):
- filepath = self._PathToTestFile( 'testy', 'FixItTestCase.cs' )
- self._RunFixIt( 10, 1, {
- u'fixits': [
- {
- u'location': {
- u'line_num': 10,
- u'column_num': 1,
- u'filepath': filepath
- },
- u'chunks': [
- {
- u'replacement_text': 'const int',
- u'range': {
- u'start': {
- u'line_num': 10,
- u'column_num': 13,
- u'filepath': filepath
- },
- u'end': {
- u'line_num': 10,
- u'column_num': 16,
- u'filepath': filepath
- },
- }
+ }
+ ]
+ }
+ ]
+ } )
+
+
+@SharedYcmd
+def Subcommands_FixIt_ReplaceTextInLine_test( app ):
+ filepath = PathToTestFile( 'testy', 'FixItTestCase.cs' )
+ RunFixItTest( app, 10, 1, {
+ u'fixits': [
+ {
+ u'location': {
+ u'line_num': 10,
+ u'column_num': 1,
+ u'filepath': filepath
+ },
+ u'chunks': [
+ {
+ u'replacement_text': 'const int',
+ u'range': {
+ u'start': {
+ u'line_num': 10,
+ u'column_num': 13,
+ u'filepath': filepath
+ },
+ u'end': {
+ u'line_num': 10,
+ u'column_num': 16,
+ u'filepath': filepath
+ },
}
- ]
- }
- ]
- } )
-
-
- def StopServer_NoErrorIfNotStarted_test( self ):
- filepath = self._PathToTestFile( 'testy', 'GotoTestCase.cs' )
- self._StopOmniSharpServer( filepath )
- # Success = no raise
-
-
- def StopServer_KeepLogFiles_test( self ):
- yield self._StopServer_KeepLogFiles, True
- yield self._StopServer_KeepLogFiles, False
-
-
- def _StopServer_KeepLogFiles( self, keeping_log_files ):
- with self.UserOption( 'server_keep_logfiles', keeping_log_files ):
- self._app = TestApp( handlers.app )
- self._app.post_json(
- '/ignore_extra_conf_file',
- { 'filepath': self._PathToTestFile( '.ycm_extra_conf.py' ) } )
- filepath = self._PathToTestFile( 'testy', 'GotoTestCase.cs' )
- contents = ReadFile( filepath )
- event_data = self._BuildRequest( filepath = filepath,
- filetype = 'cs',
- contents = contents,
- event_name = 'FileReadyToParse' )
-
- self._app.post_json( '/event_notification', event_data )
- self._WaitUntilOmniSharpServerReady( filepath )
-
- event_data = self._BuildRequest( filetype = 'cs', filepath = filepath )
-
- debuginfo = self._app.post_json( '/debug_info', event_data ).json
-
- log_files_match = re.search( "^OmniSharp logfiles:\n(.*)\n(.*)",
- debuginfo,
- re.MULTILINE )
- stdout_logfiles_location = log_files_match.group( 1 )
- stderr_logfiles_location = log_files_match.group( 2 )
-
- try:
- ok_( os.path.exists(stdout_logfiles_location ),
- "Logfile should exist at {0}".format( stdout_logfiles_location ) )
- ok_( os.path.exists( stderr_logfiles_location ),
- "Logfile should exist at {0}".format( stderr_logfiles_location ) )
- finally:
- self._StopOmniSharpServer( filepath )
-
- if keeping_log_files:
- ok_( os.path.exists( stdout_logfiles_location ),
- "Logfile should still exist at "
- "{0}".format( stdout_logfiles_location ) )
- ok_( os.path.exists( stderr_logfiles_location ),
- "Logfile should still exist at "
- "{0}".format( stderr_logfiles_location ) )
- else:
- ok_( not os.path.exists( stdout_logfiles_location ),
- "Logfile should no longer exist at "
- "{0}".format( stdout_logfiles_location ) )
- ok_( not os.path.exists( stderr_logfiles_location ),
- "Logfile should no longer exist at "
- "{0}".format( stderr_logfiles_location ) )
+ }
+ ]
+ }
+ ]
+ } )
+
+
+@IsolatedYcmd
+def Subcommands_StopServer_NoErrorIfNotStarted_test( app ):
+ filepath = PathToTestFile( 'testy', 'GotoTestCase.cs' )
+ StopOmniSharpServer( app, filepath )
+ # Success = no raise
+
+
+@IsolatedYcmd
+def StopServer_KeepLogFiles( app, keeping_log_files ):
+ with UserOption( 'server_keep_logfiles', keeping_log_files ):
+ filepath = PathToTestFile( 'testy', 'GotoTestCase.cs' )
+ contents = ReadFile( filepath )
+ event_data = BuildRequest( filepath = filepath,
+ filetype = 'cs',
+ contents = contents,
+ event_name = 'FileReadyToParse' )
+
+ app.post_json( '/event_notification', event_data )
+ WaitUntilOmniSharpServerReady( app, filepath )
+
+ event_data = BuildRequest( filetype = 'cs', filepath = filepath )
+
+ debuginfo = app.post_json( '/debug_info', event_data ).json
+
+ log_files_match = re.search( "^OmniSharp logfiles:\n(.*)\n(.*)",
+ debuginfo,
+ re.MULTILINE )
+ stdout_logfiles_location = log_files_match.group( 1 )
+ stderr_logfiles_location = log_files_match.group( 2 )
+
+ try:
+ ok_( os.path.exists(stdout_logfiles_location ),
+ "Logfile should exist at {0}".format( stdout_logfiles_location ) )
+ ok_( os.path.exists( stderr_logfiles_location ),
+ "Logfile should exist at {0}".format( stderr_logfiles_location ) )
+ finally:
+ StopOmniSharpServer( app, filepath )
+
+ if keeping_log_files:
+ ok_( os.path.exists( stdout_logfiles_location ),
+ "Logfile should still exist at "
+ "{0}".format( stdout_logfiles_location ) )
+ ok_( os.path.exists( stderr_logfiles_location ),
+ "Logfile should still exist at "
+ "{0}".format( stderr_logfiles_location ) )
+ else:
+ ok_( not os.path.exists( stdout_logfiles_location ),
+ "Logfile should no longer exist at "
+ "{0}".format( stdout_logfiles_location ) )
+ ok_( not os.path.exists( stderr_logfiles_location ),
+ "Logfile should no longer exist at "
+ "{0}".format( stderr_logfiles_location ) )
+
+
+def Subcommands_StopServer_KeepLogFiles_test():
+ yield StopServer_KeepLogFiles, True
+ yield StopServer_KeepLogFiles, False
diff --git a/ycmd/tests/detailed_diagnostics_test.py b/ycmd/tests/detailed_diagnostics_test.py
deleted file mode 100644
--- a/ycmd/tests/detailed_diagnostics_test.py
+++ /dev/null
@@ -1,60 +0,0 @@
-# Copyright (C) 2016 ycmd contributors
-#
-# This file is part of ycmd.
-#
-# ycmd is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# ycmd is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
-
-from __future__ import unicode_literals
-from __future__ import print_function
-from __future__ import division
-from __future__ import absolute_import
-from future import standard_library
-standard_library.install_aliases()
-from builtins import * # noqa
-
-from nose.tools import eq_
-from hamcrest import assert_that
-from ..responses import NoDiagnosticSupport, BuildDisplayMessageResponse
-from .handlers_test import Handlers_test
-from .test_utils import DummyCompleter
-from mock import patch
-import http.client
-
-
-class Diagnostics_test( Handlers_test ):
-
- def DoesntWork_test( self ):
- with self.PatchCompleter( DummyCompleter, filetype = 'dummy_filetype' ):
- diag_data = self._BuildRequest( contents = "foo = 5",
- line_num = 2,
- filetype = 'dummy_filetype' )
-
- response = self._app.post_json( '/detailed_diagnostic',
- diag_data,
- expect_errors = True )
-
- eq_( response.status_code, http.client.INTERNAL_SERVER_ERROR )
- assert_that( response.json, self._ErrorMatcher( NoDiagnosticSupport ) )
-
-
- @patch( 'ycmd.tests.test_utils.DummyCompleter.GetDetailedDiagnostic',
- return_value = BuildDisplayMessageResponse( "detailed diagnostic" ) )
- def DoesWork_test( self, *args ):
- with self.PatchCompleter( DummyCompleter, filetype = 'dummy_filetype' ):
- diag_data = self._BuildRequest( contents = "foo = 5",
- filetype = 'dummy_filetype' )
-
- response = self._app.post_json( '/detailed_diagnostic', diag_data )
- assert_that( response.json,
- self._MessageMatcher( "detailed diagnostic" ) )
diff --git a/ycmd/tests/diagnostics_test.py b/ycmd/tests/diagnostics_test.py
new file mode 100644
--- /dev/null
+++ b/ycmd/tests/diagnostics_test.py
@@ -0,0 +1,61 @@
+# Copyright (C) 2016 ycmd contributors
+#
+# This file is part of ycmd.
+#
+# ycmd is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# ycmd is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import unicode_literals
+from __future__ import print_function
+from __future__ import division
+from __future__ import absolute_import
+from future import standard_library
+standard_library.install_aliases()
+from builtins import * # noqa
+
+from hamcrest import assert_that
+from mock import patch
+from nose.tools import eq_
+import http.client
+
+from ycmd.responses import NoDiagnosticSupport, BuildDisplayMessageResponse
+from ycmd.tests import SharedYcmd
+from ycmd.tests.test_utils import ( BuildRequest, DummyCompleter, ErrorMatcher,
+ MessageMatcher, PatchCompleter )
+
+
+@SharedYcmd
+def Diagnostics_DoesntWork_test( app ):
+ with PatchCompleter( DummyCompleter, filetype = 'dummy_filetype' ):
+ diag_data = BuildRequest( contents = "foo = 5",
+ line_num = 2,
+ filetype = 'dummy_filetype' )
+
+ response = app.post_json( '/detailed_diagnostic',
+ diag_data,
+ expect_errors = True )
+
+ eq_( response.status_code, http.client.INTERNAL_SERVER_ERROR )
+ assert_that( response.json, ErrorMatcher( NoDiagnosticSupport ) )
+
+
+@SharedYcmd
+@patch( 'ycmd.tests.test_utils.DummyCompleter.GetDetailedDiagnostic',
+ return_value = BuildDisplayMessageResponse( 'detailed diagnostic' ) )
+def Diagnostics_DoesWork_test( app, *args ):
+ with PatchCompleter( DummyCompleter, filetype = 'dummy_filetype' ):
+ diag_data = BuildRequest( contents = 'foo = 5',
+ filetype = 'dummy_filetype' )
+
+ response = app.post_json( '/detailed_diagnostic', diag_data )
+ assert_that( response.json, MessageMatcher( 'detailed diagnostic' ) )
diff --git a/ycmd/tests/get_completions_test.py b/ycmd/tests/get_completions_test.py
--- a/ycmd/tests/get_completions_test.py
+++ b/ycmd/tests/get_completions_test.py
@@ -24,171 +24,178 @@
standard_library.install_aliases()
from builtins import * # noqa
-from webtest import TestApp
-from nose.tools import eq_
from hamcrest import assert_that, has_items
-from .. import handlers
-from .handlers_test import Handlers_test
-from ycmd.tests.test_utils import DummyCompleter
from mock import patch
+from nose.tools import eq_
-
-class GetCompletions_test( Handlers_test ):
-
- def RequestValidation_NoLineNumException_test( self ):
- response = self._app.post_json( '/semantic_completion_available', {
- 'column_num': 0,
- 'filepath': '/foo',
- 'file_data': {
- '/foo': {
- 'filetypes': [ 'text' ],
- 'contents': 'zoo'
- }
+from ycmd.tests import SharedYcmd
+from ycmd.tests.test_utils import ( BuildRequest, CompletionEntryMatcher,
+ DummyCompleter, PatchCompleter,
+ UserOption )
+
+
+@SharedYcmd
+def GetCompletions_RequestValidation_NoLineNumException_test( app ):
+ response = app.post_json( '/semantic_completion_available', {
+ 'column_num': 0,
+ 'filepath': '/foo',
+ 'file_data': {
+ '/foo': {
+ 'filetypes': [ 'text' ],
+ 'contents': 'zoo'
}
- }, status = '5*', expect_errors = True )
- response.mustcontain( 'missing', 'line_num' )
-
-
- def IdentifierCompleter_Works_test( self ):
- event_data = self._BuildRequest( contents = 'foo foogoo ba',
- event_name = 'FileReadyToParse' )
-
- self._app.post_json( '/event_notification', event_data )
-
- # query is 'oo'
- completion_data = self._BuildRequest( contents = 'oo foo foogoo ba',
- column_num = 3 )
- response_data = self._app.post_json( '/completions', completion_data ).json
-
- eq_( 1, response_data[ 'completion_start_column' ] )
- assert_that(
- response_data[ 'completions' ],
- has_items( self._CompletionEntryMatcher( 'foo', '[ID]' ),
- self._CompletionEntryMatcher( 'foogoo', '[ID]' ) )
- )
-
-
- def IdentifierCompleter_StartColumn_AfterWord_test( self ):
- completion_data = self._BuildRequest( contents = 'oo foo foogoo ba',
- column_num = 11 )
- response_data = self._app.post_json( '/completions', completion_data ).json
- eq_( 8, response_data[ 'completion_start_column' ] )
-
-
- def IdentifierCompleter_WorksForSpecialIdentifierChars_test( self ):
- contents = """
- textarea {
- font-family: sans-serif;
- font-size: 12px;
- }"""
- event_data = self._BuildRequest( contents = contents,
- filetype = 'css',
- event_name = 'FileReadyToParse' )
-
- self._app.post_json( '/event_notification', event_data )
-
- # query is 'fo'
- completion_data = self._BuildRequest( contents = 'fo ' + contents,
- filetype = 'css',
- column_num = 3 )
- results = self._app.post_json( '/completions',
- completion_data ).json[ 'completions' ]
-
- assert_that(
- results,
- has_items( self._CompletionEntryMatcher( 'font-size', '[ID]' ),
- self._CompletionEntryMatcher( 'font-family', '[ID]' ) )
+ }
+ }, status = '5*', expect_errors = True )
+ response.mustcontain( 'missing', 'line_num' )
+
+
+@SharedYcmd
+def GetCompletions_IdentifierCompleter_Works_test( app ):
+ event_data = BuildRequest( contents = 'foo foogoo ba',
+ event_name = 'FileReadyToParse' )
+
+ app.post_json( '/event_notification', event_data )
+
+ # query is 'oo'
+ completion_data = BuildRequest( contents = 'oo foo foogoo ba',
+ column_num = 3 )
+ response_data = app.post_json( '/completions', completion_data ).json
+
+ eq_( 1, response_data[ 'completion_start_column' ] )
+ assert_that(
+ response_data[ 'completions' ],
+ has_items( CompletionEntryMatcher( 'foo', '[ID]' ),
+ CompletionEntryMatcher( 'foogoo', '[ID]' ) )
+ )
+
+
+@SharedYcmd
+def GetCompletions_IdentifierCompleter_StartColumn_AfterWord_test( app ):
+ completion_data = BuildRequest( contents = 'oo foo foogoo ba',
+ column_num = 11 )
+ response_data = app.post_json( '/completions', completion_data ).json
+ eq_( 8, response_data[ 'completion_start_column' ] )
+
+
+@SharedYcmd
+def GetCompletions_IdentifierCompleter_WorksForSpecialIdentifierChars_test(
+ app ):
+ contents = """
+ textarea {
+ font-family: sans-serif;
+ font-size: 12px;
+ }"""
+ event_data = BuildRequest( contents = contents,
+ filetype = 'css',
+ event_name = 'FileReadyToParse' )
+
+ app.post_json( '/event_notification', event_data )
+
+ # query is 'fo'
+ completion_data = BuildRequest( contents = 'fo ' + contents,
+ filetype = 'css',
+ column_num = 3 )
+ results = app.post_json( '/completions',
+ completion_data ).json[ 'completions' ]
+
+ assert_that(
+ results,
+ has_items( CompletionEntryMatcher( 'font-size', '[ID]' ),
+ CompletionEntryMatcher( 'font-family', '[ID]' ) )
+ )
+
+
+@SharedYcmd
+@patch( 'ycmd.tests.test_utils.DummyCompleter.CandidatesList',
+ return_value = [ 'foo', 'bar', 'qux' ] )
+def GetCompletions_ForceSemantic_Works_test( app, *args ):
+ with PatchCompleter( DummyCompleter, 'dummy_filetype' ):
+ completion_data = BuildRequest( filetype = 'dummy_filetype',
+ force_semantic = True )
+
+ results = app.post_json( '/completions',
+ completion_data ).json[ 'completions' ]
+ assert_that( results, has_items( CompletionEntryMatcher( 'foo' ),
+ CompletionEntryMatcher( 'bar' ),
+ CompletionEntryMatcher( 'qux' ) ) )
+
+
+@SharedYcmd
+def GetCompletions_IdentifierCompleter_SyntaxKeywordsAdded_test( app ):
+ event_data = BuildRequest( event_name = 'FileReadyToParse',
+ syntax_keywords = ['foo', 'bar', 'zoo'] )
+
+ app.post_json( '/event_notification', event_data )
+
+ completion_data = BuildRequest( contents = 'oo ',
+ column_num = 3 )
+
+ results = app.post_json( '/completions',
+ completion_data ).json[ 'completions' ]
+ assert_that( results,
+ has_items( CompletionEntryMatcher( 'foo' ),
+ CompletionEntryMatcher( 'zoo' ) ) )
+
+
+@SharedYcmd
+def GetCompletions_UltiSnipsCompleter_Works_test( app ):
+ event_data = BuildRequest(
+ event_name = 'BufferVisit',
+ ultisnips_snippets = [
+ {'trigger': 'foo', 'description': 'bar'},
+ {'trigger': 'zoo', 'description': 'goo'},
+ ] )
+
+ app.post_json( '/event_notification', event_data )
+
+ completion_data = BuildRequest( contents = 'oo ',
+ column_num = 3 )
+
+ results = app.post_json( '/completions',
+ completion_data ).json[ 'completions' ]
+ assert_that(
+ results,
+ has_items(
+ CompletionEntryMatcher( 'foo', extra_menu_info='<snip> bar' ),
+ CompletionEntryMatcher( 'zoo', extra_menu_info='<snip> goo' )
)
+ )
- @patch( 'ycmd.tests.test_utils.DummyCompleter.CandidatesList',
- return_value = [ 'foo', 'bar', 'qux' ] )
- def ForceSemantic_Works_test( self, *args ):
- with self.PatchCompleter( DummyCompleter, 'dummy_filetype' ):
- completion_data = self._BuildRequest( filetype = 'dummy_filetype',
- force_semantic = True )
-
- results = self._app.post_json( '/completions',
- completion_data ).json[ 'completions' ]
- assert_that( results, has_items( self._CompletionEntryMatcher( 'foo' ),
- self._CompletionEntryMatcher( 'bar' ),
- self._CompletionEntryMatcher( 'qux' ) ) )
-
-
- def IdentifierCompleter_SyntaxKeywordsAdded_test( self ):
- event_data = self._BuildRequest( event_name = 'FileReadyToParse',
- syntax_keywords = ['foo', 'bar', 'zoo'] )
-
- self._app.post_json( '/event_notification', event_data )
-
- completion_data = self._BuildRequest( contents = 'oo ',
- column_num = 3 )
-
- results = self._app.post_json( '/completions',
- completion_data ).json[ 'completions' ]
- assert_that( results,
- has_items( self._CompletionEntryMatcher( 'foo' ),
- self._CompletionEntryMatcher( 'zoo' ) ) )
-
-
- def UltiSnipsCompleter_Works_test( self ):
- event_data = self._BuildRequest(
+@SharedYcmd
+def GetCompletions_UltiSnipsCompleter_UnusedWhenOffWithOption_test( app ):
+ with UserOption( 'use_ultisnips_completer', False ):
+ event_data = BuildRequest(
event_name = 'BufferVisit',
ultisnips_snippets = [
{'trigger': 'foo', 'description': 'bar'},
{'trigger': 'zoo', 'description': 'goo'},
] )
- self._app.post_json( '/event_notification', event_data )
-
- completion_data = self._BuildRequest( contents = 'oo ',
- column_num = 3 )
-
- results = self._app.post_json( '/completions',
- completion_data ).json[ 'completions' ]
- assert_that(
- results,
- has_items(
- self._CompletionEntryMatcher( 'foo', extra_menu_info='<snip> bar' ),
- self._CompletionEntryMatcher( 'zoo', extra_menu_info='<snip> goo' )
- )
- )
-
-
- def UltiSnipsCompleter_UnusedWhenOffWithOption_test( self ):
- with self.UserOption( 'use_ultisnips_completer', False ):
- self._app = TestApp( handlers.app )
+ app.post_json( '/event_notification', event_data )
- event_data = self._BuildRequest(
- event_name = 'BufferVisit',
- ultisnips_snippets = [
- {'trigger': 'foo', 'description': 'bar'},
- {'trigger': 'zoo', 'description': 'goo'},
- ] )
+ completion_data = BuildRequest( contents = 'oo ', column_num = 3 )
- self._app.post_json( '/event_notification', event_data )
+ eq_( [],
+ app.post_json( '/completions',
+ completion_data ).json[ 'completions' ] )
- completion_data = self._BuildRequest( contents = 'oo ',
- column_num = 3 )
- eq_( [],
- self._app.post_json( '/completions',
- completion_data ).json[ 'completions' ] )
+@SharedYcmd
+@patch( 'ycmd.tests.test_utils.DummyCompleter.CandidatesList',
+ return_value = [ 'some_candidate' ] )
+def GetCompletions_SemanticCompleter_WorksWhenTriggerIsIdentifier_test(
+ app, *args ):
+ with UserOption( 'semantic_triggers',
+ { 'dummy_filetype': [ '_' ] } ):
+ with PatchCompleter( DummyCompleter, 'dummy_filetype' ):
+ completion_data = BuildRequest( filetype = 'dummy_filetype',
+ contents = 'some_can',
+ column_num = 9 )
-
- @patch( 'ycmd.tests.test_utils.DummyCompleter.CandidatesList',
- return_value = [ 'some_candidate' ] )
- def SemanticCompleter_WorksWhenTriggerIsIdentifier_test( self, *args ):
- with self.UserOption( 'semantic_triggers',
- { 'dummy_filetype': [ '_' ] } ):
- with self.PatchCompleter( DummyCompleter, 'dummy_filetype' ):
- completion_data = self._BuildRequest( filetype = 'dummy_filetype',
- contents = 'some_can',
- column_num = 9 )
-
- results = self._app.post_json( '/completions',
- completion_data ).json[ 'completions' ]
- assert_that(
- results,
- has_items( self._CompletionEntryMatcher( 'some_candidate' ) )
- )
+ results = app.post_json( '/completions',
+ completion_data ).json[ 'completions' ]
+ assert_that(
+ results,
+ has_items( CompletionEntryMatcher( 'some_candidate' ) )
+ )
diff --git a/ycmd/tests/go/__init__.py b/ycmd/tests/go/__init__.py
--- a/ycmd/tests/go/__init__.py
+++ b/ycmd/tests/go/__init__.py
@@ -0,0 +1,72 @@
+# Copyright (C) 2016 ycmd contributors
+#
+# This file is part of ycmd.
+#
+# ycmd is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# ycmd is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import unicode_literals
+from __future__ import print_function
+from __future__ import division
+from __future__ import absolute_import
+from future import standard_library
+standard_library.install_aliases()
+from builtins import * # noqa
+
+import functools
+import os
+
+from ycmd.tests.test_utils import BuildRequest, SetUpApp
+
+shared_app = None
+
+
+def PathToTestFile( *args ):
+ dir_of_current_script = os.path.dirname( os.path.abspath( __file__ ) )
+ return os.path.join( dir_of_current_script, 'testdata', *args )
+
+
+def StopGoCodeServer( app ):
+ app.post_json( '/run_completer_command',
+ BuildRequest( completer_target = 'filetype_default',
+ command_arguments = [ 'StopServer' ],
+ filetype = 'go' ) )
+
+
+def setUpPackage():
+ """Initializes the ycmd server as a WebTest application that will be shared
+ by all tests using the SharedYcmd decorator in this package. Additional
+ configuration that is common to these tests, like starting a semantic
+ subserver, should be done here."""
+ global shared_app
+
+ shared_app = SetUpApp()
+
+
+def tearDownPackage():
+ global shared_app
+
+ StopGoCodeServer( shared_app )
+
+
+def SharedYcmd( test ):
+ """Defines a decorator to be attached to tests of this package. This decorator
+ passes the shared ycmd application as a parameter.
+
+ Do NOT attach it to test generators but directly to the yielded tests."""
+ global shared_app
+
+ @functools.wraps( test )
+ def Wrapper( *args, **kwargs ):
+ return test( shared_app, *args, **kwargs )
+ return Wrapper
diff --git a/ycmd/tests/go/get_completions_test.py b/ycmd/tests/go/get_completions_test.py
--- a/ycmd/tests/go/get_completions_test.py
+++ b/ycmd/tests/go/get_completions_test.py
@@ -24,22 +24,23 @@
from builtins import * # noqa
from hamcrest import assert_that, has_item
-from .go_handlers_test import Go_Handlers_test
-from ycmd.utils import ReadFile
+from ycmd.tests.go import PathToTestFile, SharedYcmd
+from ycmd.tests.test_utils import BuildRequest, CompletionEntryMatcher
+from ycmd.utils import ReadFile
-class Go_GetCompletions_test( Go_Handlers_test ):
- def Basic_test( self ):
- filepath = self._PathToTestFile( 'test.go' )
- completion_data = self._BuildRequest( filepath = filepath,
- filetype = 'go',
- contents = ReadFile( filepath ),
- force_semantic = True,
- line_num = 9,
- column_num = 11 )
+@SharedYcmd
+def GetCompletions_Basic_test( app ):
+ filepath = PathToTestFile( 'test.go' )
+ completion_data = BuildRequest( filepath = filepath,
+ filetype = 'go',
+ contents = ReadFile( filepath ),
+ force_semantic = True,
+ line_num = 9,
+ column_num = 11 )
- results = self._app.post_json( '/completions',
- completion_data ).json[ 'completions' ]
- assert_that( results,
- has_item( self._CompletionEntryMatcher( u'Logger' ) ) )
+ results = app.post_json( '/completions',
+ completion_data ).json[ 'completions' ]
+ assert_that( results,
+ has_item( CompletionEntryMatcher( u'Logger' ) ) )
diff --git a/ycmd/tests/go/go_handlers_test.py b/ycmd/tests/go/go_handlers_test.py
deleted file mode 100644
--- a/ycmd/tests/go/go_handlers_test.py
+++ /dev/null
@@ -1,44 +0,0 @@
-# Copyright (C) 2015 ycmd contributors
-#
-# This file is part of ycmd.
-#
-# ycmd is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# ycmd is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
-
-from __future__ import unicode_literals
-from __future__ import print_function
-from __future__ import division
-from __future__ import absolute_import
-from future import standard_library
-standard_library.install_aliases()
-from builtins import * # noqa
-
-from ..handlers_test import Handlers_test
-
-
-class Go_Handlers_test( Handlers_test ):
-
- def __init__( self ):
- self._file = __file__
-
-
- def tearDown( self ):
- self._StopGoCodeServer()
-
-
- def _StopGoCodeServer( self ):
- self._app.post_json(
- '/run_completer_command',
- self._BuildRequest( completer_target = 'filetype_default',
- command_arguments = [ 'StopServer' ],
- filetype = 'go' ) )
diff --git a/ycmd/tests/go/subcommands_test.py b/ycmd/tests/go/subcommands_test.py
--- a/ycmd/tests/go/subcommands_test.py
+++ b/ycmd/tests/go/subcommands_test.py
@@ -23,59 +23,60 @@
standard_library.install_aliases()
from builtins import * # noqa
-from .go_handlers_test import Go_Handlers_test
from nose.tools import eq_
-from ycmd.utils import ReadFile
+from ycmd.tests.go import PathToTestFile, SharedYcmd
+from ycmd.tests.test_utils import BuildRequest
+from ycmd.utils import ReadFile
-class Go_Subcommands_test( Go_Handlers_test ):
- def _GoTo( self, params ):
- filepath = self._PathToTestFile( 'goto.go' )
- contents = ReadFile( filepath )
+@SharedYcmd
+def RunGoToTest( app, params ):
+ filepath = PathToTestFile( 'goto.go' )
+ contents = ReadFile( filepath )
- command = params[ 'command' ]
- goto_data = self._BuildRequest( completer_target = 'filetype_default',
- command_arguments = [ command ],
- line_num = 8,
- column_num = 8,
- contents = contents,
- filetype = 'go',
- filepath = filepath )
+ command = params[ 'command' ]
+ goto_data = BuildRequest( completer_target = 'filetype_default',
+ command_arguments = [ command ],
+ line_num = 8,
+ column_num = 8,
+ contents = contents,
+ filetype = 'go',
+ filepath = filepath )
- results = self._app.post_json( '/run_completer_command',
- goto_data )
+ results = app.post_json( '/run_completer_command',
+ goto_data )
- eq_( {
- 'line_num': 3, 'column_num': 6, 'filepath': filepath
- }, results.json )
+ eq_( {
+ 'line_num': 3, 'column_num': 6, 'filepath': filepath
+ }, results.json )
- filepath = self._PathToTestFile( 'win.go' )
- contents = ReadFile( filepath )
+ filepath = PathToTestFile( 'win.go' )
+ contents = ReadFile( filepath )
- command = params[ 'command' ]
- goto_data = self._BuildRequest( completer_target = 'filetype_default',
- command_arguments = [ command ],
- line_num = 4,
- column_num = 7,
- contents = contents,
- filetype = 'go',
- filepath = filepath )
+ command = params[ 'command' ]
+ goto_data = BuildRequest( completer_target = 'filetype_default',
+ command_arguments = [ command ],
+ line_num = 4,
+ column_num = 7,
+ contents = contents,
+ filetype = 'go',
+ filepath = filepath )
- results = self._app.post_json( '/run_completer_command',
- goto_data )
+ results = app.post_json( '/run_completer_command',
+ goto_data )
- eq_( {
- 'line_num': 2, 'column_num': 6, 'filepath': filepath
- }, results.json )
+ eq_( {
+ 'line_num': 2, 'column_num': 6, 'filepath': filepath
+ }, results.json )
- def GoTo_all_test( self ):
- tests = [
- { 'command': 'GoTo' },
- { 'command': 'GoToDefinition' },
- { 'command': 'GoToDeclaration' }
- ]
+def Subcommands_GoTo_all_test():
+ tests = [
+ { 'command': 'GoTo' },
+ { 'command': 'GoToDefinition' },
+ { 'command': 'GoToDeclaration' }
+ ]
- for test in tests:
- yield ( self._GoTo, test )
+ for test in tests:
+ yield RunGoToTest, test
diff --git a/ycmd/tests/handlers_test.py b/ycmd/tests/handlers_test.py
deleted file mode 100644
--- a/ycmd/tests/handlers_test.py
+++ /dev/null
@@ -1,116 +0,0 @@
-# Copyright (C) 2015 ycmd contributors
-#
-# This file is part of ycmd.
-#
-# ycmd is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# ycmd is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
-
-from __future__ import absolute_import
-from __future__ import unicode_literals
-from __future__ import print_function
-from __future__ import division
-from future import standard_library
-standard_library.install_aliases()
-from builtins import * # noqa
-
-from ..server_utils import SetUpPythonPath
-SetUpPythonPath()
-from webtest import TestApp
-from .. import handlers
-from ycmd import user_options_store
-from hamcrest import has_entries, has_entry, contains_string
-from .test_utils import BuildRequest
-from mock import patch
-import contextlib
-import bottle
-import os
-
-
-class Handlers_test( object ):
-
- def __init__( self ):
- self._file = __file__
-
-
- def setUp( self ):
- bottle.debug( True )
- handlers.SetServerStateToDefaults()
- self._app = TestApp( handlers.app )
-
-
- @contextlib.contextmanager
- def PatchCompleter( self, completer, filetype ):
- user_options = handlers._server_state._user_options
- with patch.dict( 'ycmd.handlers._server_state._filetype_completers',
- { filetype: completer( user_options ) } ):
- yield
-
-
- @contextlib.contextmanager
- def UserOption( self, key, value ):
- try:
- current_options = dict( user_options_store.GetAll() )
- user_options = current_options.copy()
- user_options.update( { key: value } )
- handlers.UpdateUserOptions( user_options )
- yield
- finally:
- handlers.UpdateUserOptions( current_options )
-
-
- @staticmethod
- def _BuildRequest( **kwargs ):
- return BuildRequest( **kwargs )
-
-
- @staticmethod
- def _CompletionEntryMatcher( insertion_text,
- extra_menu_info = None,
- extra_params = None ):
- match = { 'insertion_text': insertion_text }
-
- if extra_menu_info:
- match.update( { 'extra_menu_info': extra_menu_info } )
-
- if extra_params:
- match.update( extra_params )
-
- return has_entries( match )
-
-
- @staticmethod
- def _CompletionLocationMatcher( location_type, value ):
- return has_entry( 'extra_data',
- has_entry( 'location',
- has_entry( location_type, value ) ) )
-
-
- @staticmethod
- def _ErrorMatcher( cls, msg = None ):
- """ Returns a hamcrest matcher for a server exception response """
- entry = { 'exception': has_entry( 'TYPE', cls.__name__ ) }
-
- if msg:
- entry.update( { 'message': msg } )
-
- return has_entries( entry )
-
-
- @staticmethod
- def _MessageMatcher( msg ):
- return has_entry( 'message', contains_string( msg ) )
-
-
- def _PathToTestFile( self, *args ):
- dir_of_current_script = os.path.dirname( os.path.abspath( self._file ) )
- return os.path.join( dir_of_current_script, 'testdata', *args )
diff --git a/ycmd/tests/javascript/__init__.py b/ycmd/tests/javascript/__init__.py
--- a/ycmd/tests/javascript/__init__.py
+++ b/ycmd/tests/javascript/__init__.py
@@ -0,0 +1,129 @@
+# Copyright (C) 2016 ycmd contributors
+#
+# This file is part of ycmd.
+#
+# ycmd is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# ycmd is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import unicode_literals
+from __future__ import print_function
+from __future__ import division
+from __future__ import absolute_import
+from future import standard_library
+standard_library.install_aliases()
+from builtins import * # noqa
+
+import functools
+import os
+import time
+
+from ycmd import handlers
+from ycmd.tests.test_utils import BuildRequest, SetUpApp
+
+shared_app = None
+shared_current_dir = None
+
+
+def PathToTestFile( *args ):
+ dir_of_current_script = os.path.dirname( os.path.abspath( __file__ ) )
+ return os.path.join( dir_of_current_script, 'testdata', *args )
+
+
+def WaitUntilTernServerReady( app ):
+ app.post_json( '/run_completer_command', BuildRequest(
+ command_arguments = [ 'StartServer' ],
+ completer_target = 'filetype_default',
+ filetype = 'javascript',
+ filepath = '/foo.js',
+ contents = '',
+ line_num = '1'
+ ) )
+
+ retries = 100
+ while retries > 0:
+ result = app.get( '/ready', { 'subserver': 'javascript' } ).json
+ if result:
+ return
+
+ time.sleep( 0.2 )
+ retries = retries - 1
+
+ raise RuntimeError( 'Timeout waiting for Tern.js server to be ready' )
+
+
+def StopTernServer( app ):
+ app.post_json( '/run_completer_command',
+ BuildRequest( completer_target = 'filetype_default',
+ command_arguments = [ 'StopServer' ],
+ filetype = 'javascript' ),
+ expect_errors = True )
+
+
+def setUpPackage():
+ """Initializes the ycmd server as a WebTest application that will be shared
+ by all tests using the SharedYcmd decorator in this package. Additional
+ configuration that is common to these tests, like starting a semantic
+ subserver, should be done here."""
+ global shared_app, shared_current_dir
+
+ shared_app = SetUpApp()
+ shared_current_dir = os.getcwd()
+ os.chdir( PathToTestFile() )
+ WaitUntilTernServerReady( shared_app )
+
+
+def tearDownPackage():
+ """Cleans up the tests using the SharedYcmd decorator in this package. It is
+ executed once after running all the tests in the package."""
+ global shared_app, shared_current_dir
+
+ StopTernServer( shared_app )
+ os.chdir( shared_current_dir )
+
+
+def SharedYcmd( test ):
+ """Defines a decorator to be attached to tests of this package. This decorator
+ passes the shared ycmd application as a parameter.
+
+ Do NOT attach it to test generators but directly to the yielded tests."""
+ global shared_app
+
+ @functools.wraps( test )
+ def Wrapper( *args, **kwargs ):
+ return test( shared_app, *args, **kwargs )
+ return Wrapper
+
+
+def IsolatedYcmd( test ):
+ """Defines a decorator to be attached to tests of this package. This decorator
+ passes a unique ycmd application as a parameter. It should be used on tests
+ that change the server state in a irreversible way (ex: a semantic subserver
+ is stopped or restarted) or expect a clean state (ex: no semantic subserver
+ started, no .ycm_extra_conf.py loaded, etc).
+
+ Do NOT attach it to test generators but directly to the yielded tests."""
+ @functools.wraps( test )
+ def Wrapper( *args, **kwargs ):
+ old_server_state = handlers._server_state
+ old_current_dir = os.getcwd()
+
+ try:
+ os.chdir( PathToTestFile() )
+ app = SetUpApp()
+ WaitUntilTernServerReady( app )
+ test( app, *args, **kwargs )
+ StopTernServer( app )
+ finally:
+ os.chdir( old_current_dir )
+ handlers._server_state = old_server_state
+ return Wrapper
diff --git a/ycmd/tests/javascript/event_notification_test.py b/ycmd/tests/javascript/event_notification_test.py
--- a/ycmd/tests/javascript/event_notification_test.py
+++ b/ycmd/tests/javascript/event_notification_test.py
@@ -23,149 +23,146 @@
standard_library.install_aliases()
from builtins import * # noqa
-from ycmd.server_utils import SetUpPythonPath
-from ycmd.utils import ReadFile
-SetUpPythonPath()
-from nose.tools import eq_
from hamcrest import assert_that, empty
-
-from .javascript_handlers_test import Javascript_Handlers_test
+from mock import patch
+from nose.tools import eq_
from pprint import pformat
import http.client
import os
-from mock import patch
+from ycmd.tests.test_utils import BuildRequest, ErrorMatcher
+from ycmd.tests.javascript import ( IsolatedYcmd, PathToTestFile,
+ WaitUntilTernServerReady )
+from ycmd.utils import ReadFile
-class Javascript_EventNotification_test( Javascript_Handlers_test ):
-
- def OnFileReadyToParse_ProjectFile_cwd_test( self ):
- contents = ReadFile( self._PathToTestFile( 'simple_test.js' ) )
- response = self._app.post_json( '/event_notification',
- self._BuildRequest(
- event_name = 'FileReadyToParse',
- contents = contents,
- filetype = 'javascript' ),
- expect_errors = True)
-
- eq_( response.status_code, http.client.OK )
- assert_that( response.json, empty() )
-
-
- def OnFileReadyToParse_ProjectFile_parentdir_test( self ):
- os.chdir( self._PathToTestFile( 'lamelib' ) )
- contents = ReadFile( self._PathToTestFile( 'simple_test.js' ) )
-
- response = self._app.post_json( '/event_notification',
- self._BuildRequest(
- event_name = 'FileReadyToParse',
- contents = contents,
- filetype = 'javascript' ),
- expect_errors = True)
-
- eq_( response.status_code, http.client.OK )
- assert_that( response.json, empty() )
-
-
- @patch( 'ycmd.completers.javascript.tern_completer.GlobalConfigExists',
- return_value = False )
- def OnFileReadyToParse_NoProjectFile_test( self, *args ):
- # We raise an error if we can't detect a .tern-project file.
- # We only do this on the first OnFileReadyToParse event after a
- # server startup.
- os.chdir( self._PathToTestFile( '..' ) )
- contents = ReadFile( self._PathToTestFile( 'simple_test.js' ) )
-
- response = self._app.post_json( '/event_notification',
- self._BuildRequest(
- event_name = 'FileReadyToParse',
- contents = contents,
- filetype = 'javascript' ),
- expect_errors = True )
-
- print( 'event response: {0}'.format( pformat( response.json ) ) )
-
- eq_( response.status_code, http.client.INTERNAL_SERVER_ERROR )
-
- assert_that(
- response.json,
- self._ErrorMatcher( RuntimeError,
- 'Warning: Unable to detect a .tern-project file '
- 'in the hierarchy before ' + os.getcwd() +
- ' and no global .tern-config file was found. '
- 'This is required for accurate JavaScript '
- 'completion. Please see the User Guide for '
- 'details.' )
- )
-
- # Check that a subsequent call does *not* raise the error
-
- response = self._app.post_json( '/event_notification',
- self._BuildRequest(
- event_name = 'FileReadyToParse',
- contents = contents,
- filetype = 'javascript' ),
- expect_errors = True )
-
- print( 'event response: {0}'.format( pformat( response.json ) ) )
-
- eq_( response.status_code, http.client.OK )
- assert_that( response.json, empty() )
-
- # Restart the server and check that it raises it again
-
- self._app.post_json(
- '/run_completer_command',
- self._BuildRequest( command_arguments = [ 'StopServer' ],
- filetype = 'javascript',
- contents = contents,
- completer_target = 'filetype_default' )
- )
- self._app.post_json(
- '/run_completer_command',
- self._BuildRequest( command_arguments = [ 'StartServer' ],
- filetype = 'javascript',
- contents = contents,
- completer_target = 'filetype_default' ) )
-
- self._WaitUntilTernServerReady()
-
- response = self._app.post_json( '/event_notification',
- self._BuildRequest(
- event_name = 'FileReadyToParse',
- contents = contents,
- filetype = 'javascript' ),
- expect_errors = True)
-
- print( 'event response: {0}'.format( pformat( response.json ) ) )
-
- eq_( response.status_code, http.client.INTERNAL_SERVER_ERROR )
-
- assert_that(
- response.json,
- self._ErrorMatcher( RuntimeError,
- 'Warning: Unable to detect a .tern-project file '
- 'in the hierarchy before ' + os.getcwd() +
- ' and no global .tern-config file was found. '
- 'This is required for accurate JavaScript '
- 'completion. Please see the User Guide for '
- 'details.' )
- )
-
-
- @patch( 'ycmd.completers.javascript.tern_completer.GlobalConfigExists',
- return_value = True )
- def OnFileReadyToParse_UseGlobalConfig_test( self, *args ):
- os.chdir( self._PathToTestFile( '..' ) )
- contents = ReadFile( self._PathToTestFile( 'simple_test.js' ) )
-
- response = self._app.post_json( '/event_notification',
- self._BuildRequest(
- event_name = 'FileReadyToParse',
- contents = contents,
- filetype = 'javascript' ),
- expect_errors = True )
-
- print( 'event response: {0}'.format( pformat( response.json ) ) )
-
- eq_( response.status_code, http.client.OK )
+@IsolatedYcmd
+def EventNotification_OnFileReadyToParse_ProjectFile_cwd_test( app ):
+ contents = ReadFile( PathToTestFile( 'simple_test.js' ) )
+
+ response = app.post_json( '/event_notification',
+ BuildRequest(
+ event_name = 'FileReadyToParse',
+ contents = contents,
+ filetype = 'javascript' ),
+ expect_errors = True)
+
+ eq_( response.status_code, http.client.OK )
+ assert_that( response.json, empty() )
+
+
+@IsolatedYcmd
+def EventNotification_OnFileReadyToParse_ProjectFile_parentdir_test( app ):
+ os.chdir( PathToTestFile( 'lamelib' ) )
+ contents = ReadFile( PathToTestFile( 'simple_test.js' ) )
+
+ response = app.post_json( '/event_notification',
+ BuildRequest(
+ event_name = 'FileReadyToParse',
+ contents = contents,
+ filetype = 'javascript' ),
+ expect_errors = True)
+
+ eq_( response.status_code, http.client.OK )
+ assert_that( response.json, empty() )
+
+
+@IsolatedYcmd
+@patch( 'ycmd.completers.javascript.tern_completer.GlobalConfigExists',
+ return_value = False )
+def EventNotification_OnFileReadyToParse_NoProjectFile_test( app, *args ):
+ # We raise an error if we can't detect a .tern-project file.
+ # We only do this on the first OnFileReadyToParse event after a
+ # server startup.
+ os.chdir( PathToTestFile( '..' ) )
+ contents = ReadFile( PathToTestFile( 'simple_test.js' ) )
+
+ response = app.post_json( '/event_notification',
+ BuildRequest(
+ event_name = 'FileReadyToParse',
+ contents = contents,
+ filetype = 'javascript' ),
+ expect_errors = True )
+
+ print( 'event response: {0}'.format( pformat( response.json ) ) )
+
+ eq_( response.status_code, http.client.INTERNAL_SERVER_ERROR )
+
+ assert_that(
+ response.json,
+ ErrorMatcher( RuntimeError,
+ 'Warning: Unable to detect a .tern-project file '
+ 'in the hierarchy before ' + os.getcwd() +
+ ' and no global .tern-config file was found. '
+ 'This is required for accurate JavaScript '
+ 'completion. Please see the User Guide for '
+ 'details.' )
+ )
+
+ # Check that a subsequent call does *not* raise the error
+
+ response = app.post_json( '/event_notification',
+ BuildRequest(
+ event_name = 'FileReadyToParse',
+ contents = contents,
+ filetype = 'javascript' ),
+ expect_errors = True )
+
+ print( 'event response: {0}'.format( pformat( response.json ) ) )
+
+ eq_( response.status_code, http.client.OK )
+ assert_that( response.json, empty() )
+
+ # Restart the server and check that it raises it again
+
+ app.post_json( '/run_completer_command',
+ BuildRequest( command_arguments = [ 'StopServer' ],
+ filetype = 'javascript',
+ contents = contents,
+ completer_target = 'filetype_default' ) )
+ app.post_json( '/run_completer_command',
+ BuildRequest( command_arguments = [ 'StartServer' ],
+ filetype = 'javascript',
+ contents = contents,
+ completer_target = 'filetype_default' ) )
+
+ WaitUntilTernServerReady( app )
+
+ response = app.post_json( '/event_notification',
+ BuildRequest( event_name = 'FileReadyToParse',
+ contents = contents,
+ filetype = 'javascript' ),
+ expect_errors = True )
+
+ print( 'event response: {0}'.format( pformat( response.json ) ) )
+
+ eq_( response.status_code, http.client.INTERNAL_SERVER_ERROR )
+
+ assert_that(
+ response.json,
+ ErrorMatcher( RuntimeError,
+ 'Warning: Unable to detect a .tern-project file '
+ 'in the hierarchy before ' + os.getcwd() +
+ ' and no global .tern-config file was found. '
+ 'This is required for accurate JavaScript '
+ 'completion. Please see the User Guide for '
+ 'details.' )
+ )
+
+
+@IsolatedYcmd
+@patch( 'ycmd.completers.javascript.tern_completer.GlobalConfigExists',
+ return_value = True )
+def EventNotification_OnFileReadyToParse_UseGlobalConfig_test( app, *args ):
+ os.chdir( PathToTestFile( '..' ) )
+ contents = ReadFile( PathToTestFile( 'simple_test.js' ) )
+
+ response = app.post_json( '/event_notification',
+ BuildRequest( event_name = 'FileReadyToParse',
+ contents = contents,
+ filetype = 'javascript' ),
+ expect_errors = True )
+
+ print( 'event response: {0}'.format( pformat( response.json ) ) )
+
+ eq_( response.status_code, http.client.OK )
diff --git a/ycmd/tests/javascript/get_completions_test.py b/ycmd/tests/javascript/get_completions_test.py
--- a/ycmd/tests/javascript/get_completions_test.py
+++ b/ycmd/tests/javascript/get_completions_test.py
@@ -23,14 +23,16 @@
standard_library.install_aliases()
from builtins import * # noqa
-from nose.tools import eq_
from hamcrest import ( assert_that, contains, contains_inanyorder, empty,
has_entries )
-from .javascript_handlers_test import Javascript_Handlers_test
+from nose.tools import eq_
from pprint import pformat
-from ycmd.utils import ReadFile
import http.client
+from ycmd.tests.javascript import PathToTestFile, SharedYcmd
+from ycmd.tests.test_utils import BuildRequest, CompletionEntryMatcher
+from ycmd.utils import ReadFile
+
# The following properties/methods are in Object.prototype, so are present
# on all objects:
#
@@ -42,259 +44,264 @@
# isPrototypeOf()
-class Javascript_GetCompletions_test( Javascript_Handlers_test ):
+def RunTest( app, test ):
+ """
+ Method to run a simple completion test and verify the result
- def _RunTest( self, test ):
- """
- Method to run a simple completion test and verify the result
+ test is a dictionary containing:
+ 'request': kwargs for BuildRequest
+ 'expect': {
+ 'response': server response code (e.g. httplib.OK)
+ 'data': matcher for the server response json
+ }
+ """
- test is a dictionary containing:
- 'request': kwargs for BuildRequest
- 'expect': {
- 'response': server response code (e.g. httplib.OK)
- 'data': matcher for the server response json
- }
- """
+ contents = ReadFile( test[ 'request' ][ 'filepath' ] )
- contents = ReadFile( test[ 'request' ][ 'filepath' ] )
+ def CombineRequest( request, data ):
+ kw = request
+ request.update( data )
+ return BuildRequest( **kw )
- def CombineRequest( request, data ):
- kw = request
- request.update( data )
- return self._BuildRequest( **kw )
+ app.post_json( '/event_notification',
+ CombineRequest( test[ 'request' ], {
+ 'event_name': 'FileReadyToParse',
+ 'contents': contents,
+ } ),
+ expect_errors = True )
- self._app.post_json( '/event_notification',
- CombineRequest( test[ 'request' ], {
- 'event_name': 'FileReadyToParse',
- 'contents': contents,
- } ),
- expect_errors = True )
+ # We ignore errors here and we check the response code ourself.
+ # This is to allow testing of requests returning errors.
+ response = app.post_json( '/completions',
+ CombineRequest( test[ 'request' ], {
+ 'contents': contents
+ } ),
+ expect_errors = True )
- # We ignore errors here and we check the response code ourself.
- # This is to allow testing of requests returning errors.
- response = self._app.post_json( '/completions',
- CombineRequest( test[ 'request' ], {
- 'contents': contents
- } ),
- expect_errors = True )
+ print( 'completer response: {0}'.format( pformat( response.json ) ) )
- print( 'completer response: {0}'.format( pformat( response.json ) ) )
+ eq_( response.status_code, test[ 'expect' ][ 'response' ] )
- eq_( response.status_code, test[ 'expect' ][ 'response' ] )
+ assert_that( response.json, test[ 'expect' ][ 'data' ] )
- assert_that( response.json, test[ 'expect' ][ 'data' ] )
+@SharedYcmd
+def GetCompletions_NoQuery_test( app ):
+ RunTest( app, {
+ 'description': 'semantic completion works for simple object no query',
+ 'request': {
+ 'filetype' : 'javascript',
+ 'filepath' : PathToTestFile( 'simple_test.js' ),
+ 'line_num' : 13,
+ 'column_num': 43,
+ },
+ 'expect': {
+ 'response': http.client.OK,
+ 'data': has_entries( {
+ 'completions': contains_inanyorder(
+ CompletionEntryMatcher( 'a_simple_function',
+ 'fn(param: ?) -> string' ),
+ CompletionEntryMatcher( 'basic_type', 'number' ),
+ CompletionEntryMatcher( 'object', 'object' ),
+ CompletionEntryMatcher( 'toString', 'fn() -> string' ),
+ CompletionEntryMatcher( 'toLocaleString', 'fn() -> string' ),
+ CompletionEntryMatcher( 'valueOf', 'fn() -> number' ),
+ CompletionEntryMatcher( 'hasOwnProperty',
+ 'fn(prop: string) -> bool' ),
+ CompletionEntryMatcher( 'isPrototypeOf',
+ 'fn(obj: ?) -> bool' ),
+ CompletionEntryMatcher( 'propertyIsEnumerable',
+ 'fn(prop: string) -> bool' ),
+ ),
+ 'errors': empty(),
+ } )
+ },
+ } )
- def NoQuery_test( self ):
- self._RunTest( {
- 'description': 'semantic completion works for simple object no query',
- 'request': {
- 'filetype' : 'javascript',
- 'filepath' : self._PathToTestFile( 'simple_test.js' ),
- 'line_num' : 13,
- 'column_num': 43,
- },
- 'expect': {
- 'response': http.client.OK,
- 'data': has_entries( {
- 'completions': contains_inanyorder(
- self._CompletionEntryMatcher( 'a_simple_function',
- 'fn(param: ?) -> string' ),
- self._CompletionEntryMatcher( 'basic_type', 'number' ),
- self._CompletionEntryMatcher( 'object', 'object' ),
- self._CompletionEntryMatcher( 'toString', 'fn() -> string' ),
- self._CompletionEntryMatcher( 'toLocaleString', 'fn() -> string' ),
- self._CompletionEntryMatcher( 'valueOf', 'fn() -> number' ),
- self._CompletionEntryMatcher( 'hasOwnProperty',
- 'fn(prop: string) -> bool' ),
- self._CompletionEntryMatcher( 'isPrototypeOf',
- 'fn(obj: ?) -> bool' ),
- self._CompletionEntryMatcher( 'propertyIsEnumerable',
- 'fn(prop: string) -> bool' ),
- ),
- 'errors': empty(),
- } )
- },
- } )
+@SharedYcmd
+def GetCompletions_Query_test( app ):
+ RunTest( app, {
+ 'description': 'semantic completion works for simple object with query',
+ 'request': {
+ 'filetype' : 'javascript',
+ 'filepath' : PathToTestFile( 'simple_test.js' ),
+ 'line_num' : 14,
+ 'column_num': 45,
+ },
+ 'expect': {
+ 'response': http.client.OK,
+ 'data': has_entries( {
+ 'completions': contains(
+ CompletionEntryMatcher( 'basic_type', 'number' ),
+ CompletionEntryMatcher( 'isPrototypeOf',
+ 'fn(obj: ?) -> bool' ),
+ ),
+ 'errors': empty(),
+ } )
+ },
+ } )
- def Query_test( self ):
- self._RunTest( {
- 'description': 'semantic completion works for simple object with query',
- 'request': {
- 'filetype' : 'javascript',
- 'filepath' : self._PathToTestFile( 'simple_test.js' ),
- 'line_num' : 14,
- 'column_num': 45,
- },
- 'expect': {
- 'response': http.client.OK,
- 'data': has_entries( {
- 'completions': contains(
- self._CompletionEntryMatcher( 'basic_type', 'number' ),
- self._CompletionEntryMatcher( 'isPrototypeOf',
- 'fn(obj: ?) -> bool' ),
- ),
- 'errors': empty(),
- } )
- },
- } )
+@SharedYcmd
+def GetCompletions_Require_NoQuery_test( app ):
+ RunTest( app, {
+ 'description': 'semantic completion works for simple object no query',
+ 'request': {
+ 'filetype' : 'javascript',
+ 'filepath' : PathToTestFile( 'requirejs_test.js' ),
+ 'line_num' : 2,
+ 'column_num': 15,
+ },
+ 'expect': {
+ 'response': http.client.OK,
+ 'data': has_entries( {
+ 'completions': contains_inanyorder(
+ CompletionEntryMatcher( 'mine_bitcoin',
+ 'fn(how_much: ?) -> number' ),
+ CompletionEntryMatcher( 'get_number', 'number' ),
+ CompletionEntryMatcher( 'get_string', 'string' ),
+ CompletionEntryMatcher( 'get_thing',
+ 'fn(a: ?) -> number|string' ),
+ CompletionEntryMatcher( 'toString', 'fn() -> string' ),
+ CompletionEntryMatcher( 'toLocaleString', 'fn() -> string' ),
+ CompletionEntryMatcher( 'valueOf', 'fn() -> number' ),
+ CompletionEntryMatcher( 'hasOwnProperty',
+ 'fn(prop: string) -> bool' ),
+ CompletionEntryMatcher( 'isPrototypeOf',
+ 'fn(obj: ?) -> bool' ),
+ CompletionEntryMatcher( 'propertyIsEnumerable',
+ 'fn(prop: string) -> bool' ),
+ ),
+ 'errors': empty(),
+ } )
+ },
+ } )
- def Require_NoQuery_test( self ):
- self._RunTest( {
- 'description': 'semantic completion works for simple object no query',
- 'request': {
- 'filetype' : 'javascript',
- 'filepath' : self._PathToTestFile( 'requirejs_test.js' ),
- 'line_num' : 2,
- 'column_num': 15,
- },
- 'expect': {
- 'response': http.client.OK,
- 'data': has_entries( {
- 'completions': contains_inanyorder(
- self._CompletionEntryMatcher( 'mine_bitcoin',
- 'fn(how_much: ?) -> number' ),
- self._CompletionEntryMatcher( 'get_number', 'number' ),
- self._CompletionEntryMatcher( 'get_string', 'string' ),
- self._CompletionEntryMatcher( 'get_thing',
- 'fn(a: ?) -> number|string' ),
- self._CompletionEntryMatcher( 'toString', 'fn() -> string' ),
- self._CompletionEntryMatcher( 'toLocaleString', 'fn() -> string' ),
- self._CompletionEntryMatcher( 'valueOf', 'fn() -> number' ),
- self._CompletionEntryMatcher( 'hasOwnProperty',
- 'fn(prop: string) -> bool' ),
- self._CompletionEntryMatcher( 'isPrototypeOf',
- 'fn(obj: ?) -> bool' ),
- self._CompletionEntryMatcher( 'propertyIsEnumerable',
- 'fn(prop: string) -> bool' ),
- ),
- 'errors': empty(),
- } )
- },
- } )
+@SharedYcmd
+def GetCompletions_Require_Query_test( app ):
+ RunTest( app, {
+ 'description': 'semantic completion works for require object with query',
+ 'request': {
+ 'filetype' : 'javascript',
+ 'filepath' : PathToTestFile( 'requirejs_test.js' ),
+ 'line_num' : 3,
+ 'column_num': 17,
+ },
+ 'expect': {
+ 'response': http.client.OK,
+ 'data': has_entries( {
+ 'completions': contains(
+ CompletionEntryMatcher( 'mine_bitcoin',
+ 'fn(how_much: ?) -> number' ),
+ ),
+ 'errors': empty(),
+ } )
+ },
+ } )
- def Require_Query_test( self ):
- self._RunTest( {
- 'description': 'semantic completion works for require object with query',
- 'request': {
- 'filetype' : 'javascript',
- 'filepath' : self._PathToTestFile( 'requirejs_test.js' ),
- 'line_num' : 3,
- 'column_num': 17,
- },
- 'expect': {
- 'response': http.client.OK,
- 'data': has_entries( {
- 'completions': contains(
- self._CompletionEntryMatcher( 'mine_bitcoin',
- 'fn(how_much: ?) -> number' ),
- ),
- 'errors': empty(),
- } )
- },
- } )
+@SharedYcmd
+def GetCompletions_Require_Query_LCS_test( app ):
+ RunTest( app, {
+ 'description': ( 'completion works for require object '
+ 'with query not prefix' ),
+ 'request': {
+ 'filetype' : 'javascript',
+ 'filepath' : PathToTestFile( 'requirejs_test.js' ),
+ 'line_num' : 4,
+ 'column_num': 17,
+ },
+ 'expect': {
+ 'response': http.client.OK,
+ 'data': has_entries( {
+ 'completions': contains(
+ CompletionEntryMatcher( 'get_number', 'number' ),
+ CompletionEntryMatcher( 'get_thing',
+ 'fn(a: ?) -> number|string' ),
+ CompletionEntryMatcher( 'get_string', 'string' ),
+ ),
+ 'errors': empty(),
+ } )
+ },
+ } )
- def Require_Query_LCS_test( self ):
- self._RunTest( {
- 'description': ( 'completion works for require object '
- 'with query not prefix' ),
- 'request': {
- 'filetype' : 'javascript',
- 'filepath' : self._PathToTestFile( 'requirejs_test.js' ),
- 'line_num' : 4,
- 'column_num': 17,
- },
- 'expect': {
- 'response': http.client.OK,
- 'data': has_entries( {
- 'completions': contains(
- self._CompletionEntryMatcher( 'get_number', 'number' ),
- self._CompletionEntryMatcher( 'get_thing',
- 'fn(a: ?) -> number|string' ),
- self._CompletionEntryMatcher( 'get_string', 'string' ),
- ),
- 'errors': empty(),
- } )
- },
- } )
-
- def DirtyNamedBuffers_test( self ):
- # This tests that when we have dirty buffers in our editor, tern actually
- # uses them correctly
- self._RunTest( {
- 'description': ( 'completion works for require object '
- 'with query not prefix' ),
- 'request': {
- 'filetype' : 'javascript',
- 'filepath' : self._PathToTestFile( 'requirejs_test.js' ),
- 'line_num' : 18,
- 'column_num': 11,
- 'file_data': {
- self._PathToTestFile( 'no_such_lib', 'no_such_file.js' ): {
- 'contents': (
- 'define( [], function() { return { big_endian_node: 1 } } )' ),
- 'filetypes': [ 'javascript' ]
- }
- },
- },
- 'expect': {
- 'response': http.client.OK,
- 'data': has_entries( {
- 'completions': contains_inanyorder(
- self._CompletionEntryMatcher( 'big_endian_node', 'number' ),
- self._CompletionEntryMatcher( 'toString', 'fn() -> string' ),
- self._CompletionEntryMatcher( 'toLocaleString', 'fn() -> string' ),
- self._CompletionEntryMatcher( 'valueOf', 'fn() -> number' ),
- self._CompletionEntryMatcher( 'hasOwnProperty',
- 'fn(prop: string) -> bool' ),
- self._CompletionEntryMatcher( 'isPrototypeOf',
- 'fn(obj: ?) -> bool' ),
- self._CompletionEntryMatcher( 'propertyIsEnumerable',
- 'fn(prop: string) -> bool' ),
- ),
- 'errors': empty(),
- } )
+@SharedYcmd
+def GetCompletions_DirtyNamedBuffers_test( app ):
+ # This tests that when we have dirty buffers in our editor, tern actually
+ # uses them correctly
+ RunTest( app, {
+ 'description': ( 'completion works for require object '
+ 'with query not prefix' ),
+ 'request': {
+ 'filetype' : 'javascript',
+ 'filepath' : PathToTestFile( 'requirejs_test.js' ),
+ 'line_num' : 18,
+ 'column_num': 11,
+ 'file_data': {
+ PathToTestFile( 'no_such_lib', 'no_such_file.js' ): {
+ 'contents': (
+ 'define( [], function() { return { big_endian_node: 1 } } )' ),
+ 'filetypes': [ 'javascript' ]
+ }
},
- } )
+ },
+ 'expect': {
+ 'response': http.client.OK,
+ 'data': has_entries( {
+ 'completions': contains_inanyorder(
+ CompletionEntryMatcher( 'big_endian_node', 'number' ),
+ CompletionEntryMatcher( 'toString', 'fn() -> string' ),
+ CompletionEntryMatcher( 'toLocaleString', 'fn() -> string' ),
+ CompletionEntryMatcher( 'valueOf', 'fn() -> number' ),
+ CompletionEntryMatcher( 'hasOwnProperty',
+ 'fn(prop: string) -> bool' ),
+ CompletionEntryMatcher( 'isPrototypeOf',
+ 'fn(obj: ?) -> bool' ),
+ CompletionEntryMatcher( 'propertyIsEnumerable',
+ 'fn(prop: string) -> bool' ),
+ ),
+ 'errors': empty(),
+ } )
+ },
+ } )
- def ReturnsDocsInCompletions_test( self ):
- # This tests that we supply docs for completions
- self._RunTest( {
- 'description': 'completions supply docs',
- 'request': {
- 'filetype' : 'javascript',
- 'filepath' : self._PathToTestFile( 'requirejs_test.js' ),
- 'line_num' : 8,
- 'column_num': 15,
- },
- 'expect': {
- 'response': http.client.OK,
- 'data': has_entries( {
- 'completions': contains_inanyorder(
- self._CompletionEntryMatcher(
- 'a_function',
- 'fn(bar: ?) -> {a_value: string}', {
- 'detailed_info': ( 'fn(bar: ?) -> {a_value: string}\n'
- 'This is a short documentation string'),
- } ),
- self._CompletionEntryMatcher( 'options', 'options' ),
- self._CompletionEntryMatcher( 'toString', 'fn() -> string' ),
- self._CompletionEntryMatcher( 'toLocaleString', 'fn() -> string' ),
- self._CompletionEntryMatcher( 'valueOf', 'fn() -> number' ),
- self._CompletionEntryMatcher( 'hasOwnProperty',
- 'fn(prop: string) -> bool' ),
- self._CompletionEntryMatcher( 'isPrototypeOf',
- 'fn(obj: ?) -> bool' ),
- self._CompletionEntryMatcher( 'propertyIsEnumerable',
- 'fn(prop: string) -> bool' ),
- ),
- 'errors': empty(),
- } )
- },
- } )
+@SharedYcmd
+def GetCompletions_ReturnsDocsInCompletions_test( app ):
+ # This tests that we supply docs for completions
+ RunTest( app, {
+ 'description': 'completions supply docs',
+ 'request': {
+ 'filetype' : 'javascript',
+ 'filepath' : PathToTestFile( 'requirejs_test.js' ),
+ 'line_num' : 8,
+ 'column_num': 15,
+ },
+ 'expect': {
+ 'response': http.client.OK,
+ 'data': has_entries( {
+ 'completions': contains_inanyorder(
+ CompletionEntryMatcher(
+ 'a_function',
+ 'fn(bar: ?) -> {a_value: string}', {
+ 'detailed_info': ( 'fn(bar: ?) -> {a_value: string}\n'
+ 'This is a short documentation string'),
+ } ),
+ CompletionEntryMatcher( 'options', 'options' ),
+ CompletionEntryMatcher( 'toString', 'fn() -> string' ),
+ CompletionEntryMatcher( 'toLocaleString', 'fn() -> string' ),
+ CompletionEntryMatcher( 'valueOf', 'fn() -> number' ),
+ CompletionEntryMatcher( 'hasOwnProperty',
+ 'fn(prop: string) -> bool' ),
+ CompletionEntryMatcher( 'isPrototypeOf',
+ 'fn(obj: ?) -> bool' ),
+ CompletionEntryMatcher( 'propertyIsEnumerable',
+ 'fn(prop: string) -> bool' ),
+ ),
+ 'errors': empty(),
+ } )
+ },
+ } )
diff --git a/ycmd/tests/javascript/javascript_handlers_test.py b/ycmd/tests/javascript/javascript_handlers_test.py
deleted file mode 100644
--- a/ycmd/tests/javascript/javascript_handlers_test.py
+++ /dev/null
@@ -1,83 +0,0 @@
-# Copyright (C) 2015 ycmd contributors
-#
-# This file is part of ycmd.
-#
-# ycmd is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# ycmd is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
-
-from __future__ import unicode_literals
-from __future__ import print_function
-from __future__ import division
-from __future__ import absolute_import
-from future import standard_library
-standard_library.install_aliases()
-from builtins import * # noqa
-
-from ..handlers_test import Handlers_test
-import os
-import time
-
-
-class Javascript_Handlers_test( Handlers_test ):
-
- def __init__( self ):
- self._file = __file__
-
-
- def setUp( self ):
- super( Javascript_Handlers_test, self ).setUp()
-
- self._prev_current_dir = os.getcwd()
- os.chdir( self._PathToTestFile() )
-
- self._WaitUntilTernServerReady()
-
-
- def tearDown( self ):
- self._StopTernServer()
-
- os.chdir( self._prev_current_dir )
-
-
- def _StopTernServer( self ):
- try:
- self._app.post_json(
- '/run_completer_command',
- self._BuildRequest( command_arguments = [ 'StopServer' ],
- filetype = 'javascript',
- completer_target = 'filetype_default' )
- )
- except:
- pass
-
-
- def _WaitUntilTernServerReady( self ):
- self._app.post_json( '/run_completer_command', self._BuildRequest(
- command_arguments = [ 'StartServer' ],
- completer_target = 'filetype_default',
- filetype = 'javascript',
- filepath = '/foo.js',
- contents = '',
- line_num = '1'
- ) )
-
- retries = 100
- while retries > 0:
- result = self._app.get( '/ready', { 'subserver': 'javascript' } ).json
- if result:
- return
-
- time.sleep( 0.2 )
- retries = retries - 1
-
- raise RuntimeError( 'Timeout waiting for Tern.js server to be ready' )
diff --git a/ycmd/tests/javascript/subcommands_test.py b/ycmd/tests/javascript/subcommands_test.py
--- a/ycmd/tests/javascript/subcommands_test.py
+++ b/ycmd/tests/javascript/subcommands_test.py
@@ -23,16 +23,15 @@
standard_library.install_aliases()
from builtins import * # noqa
+from hamcrest import assert_that, contains, contains_inanyorder, has_entries
from nose.tools import eq_
-from hamcrest import ( assert_that,
- contains,
- contains_inanyorder,
- has_entries )
-from .javascript_handlers_test import Javascript_Handlers_test
-from ycmd.utils import ReadFile
from pprint import pformat
import http.client
+from ycmd.tests.javascript import PathToTestFile, SharedYcmd
+from ycmd.tests.test_utils import BuildRequest, ErrorMatcher
+from ycmd.utils import ReadFile
+
def LocationMatcher( filepath, column_num, line_num ):
return has_entries( {
@@ -52,355 +51,362 @@ def ChunkMatcher( replacement_text, start, end ):
} )
-class Javascript_Subcommands_test( Javascript_Handlers_test ):
-
- def _RunTest( self, test ):
- contents = ReadFile( test[ 'request' ][ 'filepath' ] )
-
- def CombineRequest( request, data ):
- kw = request
- request.update( data )
- return self._BuildRequest( **kw )
-
- # Because we aren't testing this command, we *always* ignore errors. This
- # is mainly because we (may) want to test scenarios where the completer
- # throws an exception and the easiest way to do that is to throw from
- # within the FlagsForFile function.
- self._app.post_json( '/event_notification',
- CombineRequest( test[ 'request' ], {
- 'event_name': 'FileReadyToParse',
- 'contents': contents,
- } ),
- expect_errors = True )
-
- # We also ignore errors here, but then we check the response code
- # ourself. This is to allow testing of requests returning errors.
- response = self._app.post_json(
- '/run_completer_command',
- CombineRequest( test[ 'request' ], {
- 'completer_target': 'filetype_default',
- 'contents': contents,
- 'filetype': 'javascript',
- 'command_arguments': ( [ test[ 'request' ][ 'command' ] ]
- + test[ 'request' ].get( 'arguments', [] ) )
- } ),
- expect_errors = True
- )
-
- print( 'completer response: {0}'.format( pformat( response.json ) ) )
-
- eq_( response.status_code, test[ 'expect' ][ 'response' ] )
-
- assert_that( response.json, test[ 'expect' ][ 'data' ] )
-
-
- def DefinedSubcommands_test( self ):
- subcommands_data = self._BuildRequest( completer_target = 'javascript' )
-
- self._WaitUntilTernServerReady()
-
- eq_( sorted( [ 'GoToDefinition',
- 'GoTo',
- 'GetDoc',
- 'GetType',
- 'StartServer',
- 'StopServer',
- 'GoToReferences',
- 'RefactorRename' ] ),
- self._app.post_json( '/defined_subcommands',
- subcommands_data ).json )
-
-
- def GoToDefinition_test( self ):
- self._RunTest( {
- 'description': 'GoToDefinition works within file',
- 'request': {
- 'command': 'GoToDefinition',
- 'line_num': 13,
- 'column_num': 25,
- 'filepath': self._PathToTestFile( 'simple_test.js' ),
- },
- 'expect': {
- 'response': http.client.OK,
- 'data': has_entries( {
- 'filepath': self._PathToTestFile( 'simple_test.js' ),
- 'line_num': 1,
- 'column_num': 5,
- } )
- }
- } )
+@SharedYcmd
+def Subcommands_DefinedSubcommands_test( app ):
+ subcommands_data = BuildRequest( completer_target = 'javascript' )
+
+ eq_( sorted( [ 'GoToDefinition',
+ 'GoTo',
+ 'GetDoc',
+ 'GetType',
+ 'StartServer',
+ 'StopServer',
+ 'GoToReferences',
+ 'RefactorRename' ] ),
+ app.post_json( '/defined_subcommands',
+ subcommands_data ).json )
+
+
+def RunTest( app, test ):
+ contents = ReadFile( test[ 'request' ][ 'filepath' ] )
+
+ def CombineRequest( request, data ):
+ kw = request
+ request.update( data )
+ return BuildRequest( **kw )
+
+ # Because we aren't testing this command, we *always* ignore errors. This
+ # is mainly because we (may) want to test scenarios where the completer
+ # throws an exception and the easiest way to do that is to throw from
+ # within the FlagsForFile function.
+ app.post_json( '/event_notification',
+ CombineRequest( test[ 'request' ], {
+ 'event_name': 'FileReadyToParse',
+ 'contents': contents,
+ } ),
+ expect_errors = True )
+
+ # We also ignore errors here, but then we check the response code
+ # ourself. This is to allow testing of requests returning errors.
+ response = app.post_json(
+ '/run_completer_command',
+ CombineRequest( test[ 'request' ], {
+ 'completer_target': 'filetype_default',
+ 'contents': contents,
+ 'filetype': 'javascript',
+ 'command_arguments': ( [ test[ 'request' ][ 'command' ] ]
+ + test[ 'request' ].get( 'arguments', [] ) )
+ } ),
+ expect_errors = True
+ )
+
+ print( 'completer response: {0}'.format( pformat( response.json ) ) )
+
+ eq_( response.status_code, test[ 'expect' ][ 'response' ] )
+
+ assert_that( response.json, test[ 'expect' ][ 'data' ] )
+
+
+@SharedYcmd
+def Subcommands_GoToDefinition_test( app ):
+ RunTest( app, {
+ 'description': 'GoToDefinition works within file',
+ 'request': {
+ 'command': 'GoToDefinition',
+ 'line_num': 13,
+ 'column_num': 25,
+ 'filepath': PathToTestFile( 'simple_test.js' ),
+ },
+ 'expect': {
+ 'response': http.client.OK,
+ 'data': has_entries( {
+ 'filepath': PathToTestFile( 'simple_test.js' ),
+ 'line_num': 1,
+ 'column_num': 5,
+ } )
+ }
+ } )
- def GoTo_test( self ):
- self._RunTest( {
- 'description': 'GoTo works the same as GoToDefinition within file',
- 'request': {
- 'command': 'GoTo',
- 'line_num': 13,
- 'column_num': 25,
- 'filepath': self._PathToTestFile( 'simple_test.js' ),
- },
- 'expect': {
- 'response': http.client.OK,
- 'data': has_entries( {
- 'filepath': self._PathToTestFile( 'simple_test.js' ),
- 'line_num': 1,
- 'column_num': 5,
- } )
- }
- } )
+@SharedYcmd
+def Subcommands_GoTo_test( app ):
+ RunTest( app, {
+ 'description': 'GoTo works the same as GoToDefinition within file',
+ 'request': {
+ 'command': 'GoTo',
+ 'line_num': 13,
+ 'column_num': 25,
+ 'filepath': PathToTestFile( 'simple_test.js' ),
+ },
+ 'expect': {
+ 'response': http.client.OK,
+ 'data': has_entries( {
+ 'filepath': PathToTestFile( 'simple_test.js' ),
+ 'line_num': 1,
+ 'column_num': 5,
+ } )
+ }
+ } )
- def GetDoc_test( self ):
- self._RunTest( {
- 'description': 'GetDoc works within file',
- 'request': {
- 'command': 'GetDoc',
- 'line_num': 7,
- 'column_num': 16,
- 'filepath': self._PathToTestFile( 'coollib', 'cool_object.js' ),
- },
- 'expect': {
- 'response': http.client.OK,
- 'data': has_entries( {
- 'detailed_info': (
- 'Name: mine_bitcoin\n'
- 'Type: fn(how_much: ?) -> number\n\n'
- 'This function takes a number and invests it in bitcoin. It '
- 'returns\nthe expected value (in notional currency) after 1 year.'
- )
- } )
- }
- } )
+@SharedYcmd
+def Subcommands_GetDoc_test( app ):
+ RunTest( app, {
+ 'description': 'GetDoc works within file',
+ 'request': {
+ 'command': 'GetDoc',
+ 'line_num': 7,
+ 'column_num': 16,
+ 'filepath': PathToTestFile( 'coollib', 'cool_object.js' ),
+ },
+ 'expect': {
+ 'response': http.client.OK,
+ 'data': has_entries( {
+ 'detailed_info': (
+ 'Name: mine_bitcoin\n'
+ 'Type: fn(how_much: ?) -> number\n\n'
+ 'This function takes a number and invests it in bitcoin. It '
+ 'returns\nthe expected value (in notional currency) after 1 year.'
+ )
+ } )
+ }
+ } )
- def GetType_test( self ):
- self._RunTest( {
- 'description': 'GetType works within file',
- 'request': {
- 'command': 'GetType',
- 'line_num': 11,
- 'column_num': 14,
- 'filepath': self._PathToTestFile( 'coollib', 'cool_object.js' ),
- },
- 'expect': {
- 'response': http.client.OK,
- 'data': has_entries( {
- 'message': 'number'
- } )
- }
- } )
+@SharedYcmd
+def Subcommands_GetType_test( app ):
+ RunTest( app, {
+ 'description': 'GetType works within file',
+ 'request': {
+ 'command': 'GetType',
+ 'line_num': 11,
+ 'column_num': 14,
+ 'filepath': PathToTestFile( 'coollib', 'cool_object.js' ),
+ },
+ 'expect': {
+ 'response': http.client.OK,
+ 'data': has_entries( {
+ 'message': 'number'
+ } )
+ }
+ } )
- def GoToReferences_test( self ):
- self._RunTest( {
- 'description': 'GoToReferences works within file',
- 'request': {
- 'command': 'GoToReferences',
- 'line_num': 17,
- 'column_num': 29,
- 'filepath': self._PathToTestFile( 'coollib', 'cool_object.js' ),
- },
- 'expect': {
- 'response': http.client.OK,
- 'data': contains_inanyorder(
- has_entries( {
- 'filepath': self._PathToTestFile( 'coollib', 'cool_object.js' ),
- 'line_num': 17,
- 'column_num': 29,
- } ),
- has_entries( {
- 'filepath': self._PathToTestFile( 'coollib', 'cool_object.js' ),
- 'line_num': 12,
- 'column_num': 9,
- } )
- )
- }
- } )
+@SharedYcmd
+def Subcommands_GoToReferences_test( app ):
+ RunTest( app, {
+ 'description': 'GoToReferences works within file',
+ 'request': {
+ 'command': 'GoToReferences',
+ 'line_num': 17,
+ 'column_num': 29,
+ 'filepath': PathToTestFile( 'coollib', 'cool_object.js' ),
+ },
+ 'expect': {
+ 'response': http.client.OK,
+ 'data': contains_inanyorder(
+ has_entries( {
+ 'filepath': PathToTestFile( 'coollib', 'cool_object.js' ),
+ 'line_num': 17,
+ 'column_num': 29,
+ } ),
+ has_entries( {
+ 'filepath': PathToTestFile( 'coollib', 'cool_object.js' ),
+ 'line_num': 12,
+ 'column_num': 9,
+ } )
+ )
+ }
+ } )
- def GetDocWithNoItendifier_test( self ):
- self._RunTest( {
- 'description': 'GetDoc works when no identifier',
- 'request': {
- 'command': 'GetDoc',
- 'filepath': self._PathToTestFile( 'simple_test.js' ),
- 'line_num': 12,
- 'column_num': 1,
- },
- 'expect': {
- 'response': http.client.INTERNAL_SERVER_ERROR,
- 'data': self._ErrorMatcher( RuntimeError, 'TernError: No type found '
- 'at the given position.' ),
- }
- } )
+@SharedYcmd
+def Subcommands_GetDocWithNoItendifier_test( app ):
+ RunTest( app, {
+ 'description': 'GetDoc works when no identifier',
+ 'request': {
+ 'command': 'GetDoc',
+ 'filepath': PathToTestFile( 'simple_test.js' ),
+ 'line_num': 12,
+ 'column_num': 1,
+ },
+ 'expect': {
+ 'response': http.client.INTERNAL_SERVER_ERROR,
+ 'data': ErrorMatcher( RuntimeError, 'TernError: No type found '
+ 'at the given position.' ),
+ }
+ } )
- def RefactorRename_Simple_test( self ):
- filepath = self._PathToTestFile( 'simple_test.js' )
- self._RunTest( {
- 'description': 'RefactorRename works within a single scope/file',
- 'request': {
- 'command': 'RefactorRename',
- 'arguments': [ 'test' ],
- 'filepath': filepath,
- 'line_num': 15,
- 'column_num': 32,
- },
- 'expect': {
- 'response': http.client.OK,
- 'data': {
- 'fixits': contains( has_entries( {
- 'chunks': contains(
- ChunkMatcher( 'test',
- LocationMatcher( filepath, 1, 5 ),
- LocationMatcher( filepath, 1, 22 ) ),
- ChunkMatcher( 'test',
- LocationMatcher( filepath, 13, 25 ),
- LocationMatcher( filepath, 13, 42 ) ),
- ChunkMatcher( 'test',
- LocationMatcher( filepath, 14, 24 ),
- LocationMatcher( filepath, 14, 41 ) ),
- ChunkMatcher( 'test',
- LocationMatcher( filepath, 15, 24 ),
- LocationMatcher( filepath, 15, 41 ) ),
- ChunkMatcher( 'test',
- LocationMatcher( filepath, 21, 7 ),
- LocationMatcher( filepath, 21, 24 ) ),
- # On the same line, ensuring offsets are as expected (as
- # unmodified source, similar to clang)
- ChunkMatcher( 'test',
- LocationMatcher( filepath, 21, 28 ),
- LocationMatcher( filepath, 21, 45 ) ),
- ) ,
- 'location': LocationMatcher( filepath, 15, 32 )
- } ) )
- }
+@SharedYcmd
+def Subcommands_RefactorRename_Simple_test( app ):
+ filepath = PathToTestFile( 'simple_test.js' )
+ RunTest( app, {
+ 'description': 'RefactorRename works within a single scope/file',
+ 'request': {
+ 'command': 'RefactorRename',
+ 'arguments': [ 'test' ],
+ 'filepath': filepath,
+ 'line_num': 15,
+ 'column_num': 32,
+ },
+ 'expect': {
+ 'response': http.client.OK,
+ 'data': {
+ 'fixits': contains( has_entries( {
+ 'chunks': contains(
+ ChunkMatcher( 'test',
+ LocationMatcher( filepath, 1, 5 ),
+ LocationMatcher( filepath, 1, 22 ) ),
+ ChunkMatcher( 'test',
+ LocationMatcher( filepath, 13, 25 ),
+ LocationMatcher( filepath, 13, 42 ) ),
+ ChunkMatcher( 'test',
+ LocationMatcher( filepath, 14, 24 ),
+ LocationMatcher( filepath, 14, 41 ) ),
+ ChunkMatcher( 'test',
+ LocationMatcher( filepath, 15, 24 ),
+ LocationMatcher( filepath, 15, 41 ) ),
+ ChunkMatcher( 'test',
+ LocationMatcher( filepath, 21, 7 ),
+ LocationMatcher( filepath, 21, 24 ) ),
+ # On the same line, ensuring offsets are as expected (as
+ # unmodified source, similar to clang)
+ ChunkMatcher( 'test',
+ LocationMatcher( filepath, 21, 28 ),
+ LocationMatcher( filepath, 21, 45 ) ),
+ ),
+ 'location': LocationMatcher( filepath, 15, 32 )
+ } ) )
}
- } )
-
+ }
+ } )
- def RefactorRename_MultipleFiles_test( self ):
- file1 = self._PathToTestFile( 'file1.js' )
- file2 = self._PathToTestFile( 'file2.js' )
- file3 = self._PathToTestFile( 'file3.js' )
- self._RunTest( {
- 'description': 'RefactorRename works across files',
- 'request': {
- 'command': 'RefactorRename',
- 'arguments': [ 'a-quite-long-string' ],
- 'filepath': file1,
- 'line_num': 3,
- 'column_num': 14,
- },
- 'expect': {
- 'response': http.client.OK,
- 'data': {
- 'fixits': contains( has_entries( {
- 'chunks': contains(
- ChunkMatcher(
- 'a-quite-long-string',
- LocationMatcher( file1, 1, 5 ),
- LocationMatcher( file1, 1, 11 ) ),
- ChunkMatcher(
- 'a-quite-long-string',
- LocationMatcher( file1, 3, 14 ),
- LocationMatcher( file1, 3, 19 ) ),
- ChunkMatcher(
- 'a-quite-long-string',
- LocationMatcher( file2, 2, 14 ),
- LocationMatcher( file2, 2, 19 ) ),
- ChunkMatcher(
- 'a-quite-long-string',
- LocationMatcher( file3, 3, 12 ),
- LocationMatcher( file3, 3, 17 ) )
- ) ,
- 'location': LocationMatcher( file1, 3, 14 )
- } ) )
- }
+@SharedYcmd
+def Subcommands_RefactorRename_MultipleFiles_test( app ):
+ file1 = PathToTestFile( 'file1.js' )
+ file2 = PathToTestFile( 'file2.js' )
+ file3 = PathToTestFile( 'file3.js' )
+
+ RunTest( app, {
+ 'description': 'RefactorRename works across files',
+ 'request': {
+ 'command': 'RefactorRename',
+ 'arguments': [ 'a-quite-long-string' ],
+ 'filepath': file1,
+ 'line_num': 3,
+ 'column_num': 14,
+ },
+ 'expect': {
+ 'response': http.client.OK,
+ 'data': {
+ 'fixits': contains( has_entries( {
+ 'chunks': contains(
+ ChunkMatcher(
+ 'a-quite-long-string',
+ LocationMatcher( file1, 1, 5 ),
+ LocationMatcher( file1, 1, 11 ) ),
+ ChunkMatcher(
+ 'a-quite-long-string',
+ LocationMatcher( file1, 3, 14 ),
+ LocationMatcher( file1, 3, 19 ) ),
+ ChunkMatcher(
+ 'a-quite-long-string',
+ LocationMatcher( file2, 2, 14 ),
+ LocationMatcher( file2, 2, 19 ) ),
+ ChunkMatcher(
+ 'a-quite-long-string',
+ LocationMatcher( file3, 3, 12 ),
+ LocationMatcher( file3, 3, 17 ) )
+ ),
+ 'location': LocationMatcher( file1, 3, 14 )
+ } ) )
}
- } )
+ }
+ } )
- def RefactorRename_MultipleFiles_OnFileReadyToParse_test( self ):
- file1 = self._PathToTestFile( 'file1.js' )
- file2 = self._PathToTestFile( 'file2.js' )
- file3 = self._PathToTestFile( 'file3.js' )
-
- # This test is roughly the same as the previous one, except here file4.js is
- # pushed into the Tern engine via 'opening it in the editor' (i.e.
- # FileReadyToParse event). The first 3 are loaded into the tern server
- # because they are listed in the .tern-project file's loadEagerly option.
- file4 = self._PathToTestFile( 'file4.js' )
-
- self._app.post_json( '/event_notification',
- self._BuildRequest( **{
- 'filetype': 'javascript',
- 'event_name': 'FileReadyToParse',
- 'contents': ReadFile( file4 ),
- 'filepath': file4,
- } ),
- expect_errors = False )
-
- self._RunTest( {
- 'description': 'FileReadyToParse loads files into tern server',
- 'request': {
- 'command': 'RefactorRename',
- 'arguments': [ 'a-quite-long-string' ],
- 'filepath': file1,
- 'line_num': 3,
- 'column_num': 14,
- },
- 'expect': {
- 'response': http.client.OK,
- 'data': {
- 'fixits': contains( has_entries( {
- 'chunks': contains(
- ChunkMatcher(
- 'a-quite-long-string',
- LocationMatcher( file1, 1, 5 ),
- LocationMatcher( file1, 1, 11 ) ),
- ChunkMatcher(
- 'a-quite-long-string',
- LocationMatcher( file1, 3, 14 ),
- LocationMatcher( file1, 3, 19 ) ),
- ChunkMatcher(
- 'a-quite-long-string',
- LocationMatcher( file2, 2, 14 ),
- LocationMatcher( file2, 2, 19 ) ),
- ChunkMatcher(
- 'a-quite-long-string',
- LocationMatcher( file3, 3, 12 ),
- LocationMatcher( file3, 3, 17 ) ),
- ChunkMatcher(
- 'a-quite-long-string',
- LocationMatcher( file4, 4, 22 ),
- LocationMatcher( file4, 4, 28 ) )
- ) ,
- 'location': LocationMatcher( file1, 3, 14 )
- } ) )
- }
+@SharedYcmd
+def Subcommands_RefactorRename_MultipleFiles_OnFileReadyToParse_test( app ):
+ file1 = PathToTestFile( 'file1.js' )
+ file2 = PathToTestFile( 'file2.js' )
+ file3 = PathToTestFile( 'file3.js' )
+
+ # This test is roughly the same as the previous one, except here file4.js is
+ # pushed into the Tern engine via 'opening it in the editor' (i.e.
+ # FileReadyToParse event). The first 3 are loaded into the tern server
+ # because they are listed in the .tern-project file's loadEagerly option.
+ file4 = PathToTestFile( 'file4.js' )
+
+ app.post_json( '/event_notification',
+ BuildRequest( **{
+ 'filetype': 'javascript',
+ 'event_name': 'FileReadyToParse',
+ 'contents': ReadFile( file4 ),
+ 'filepath': file4,
+ } ),
+ expect_errors = False )
+
+ RunTest( app, {
+ 'description': 'FileReadyToParse loads files into tern server',
+ 'request': {
+ 'command': 'RefactorRename',
+ 'arguments': [ 'a-quite-long-string' ],
+ 'filepath': file1,
+ 'line_num': 3,
+ 'column_num': 14,
+ },
+ 'expect': {
+ 'response': http.client.OK,
+ 'data': {
+ 'fixits': contains( has_entries( {
+ 'chunks': contains(
+ ChunkMatcher(
+ 'a-quite-long-string',
+ LocationMatcher( file1, 1, 5 ),
+ LocationMatcher( file1, 1, 11 ) ),
+ ChunkMatcher(
+ 'a-quite-long-string',
+ LocationMatcher( file1, 3, 14 ),
+ LocationMatcher( file1, 3, 19 ) ),
+ ChunkMatcher(
+ 'a-quite-long-string',
+ LocationMatcher( file2, 2, 14 ),
+ LocationMatcher( file2, 2, 19 ) ),
+ ChunkMatcher(
+ 'a-quite-long-string',
+ LocationMatcher( file3, 3, 12 ),
+ LocationMatcher( file3, 3, 17 ) ),
+ ChunkMatcher(
+ 'a-quite-long-string',
+ LocationMatcher( file4, 4, 22 ),
+ LocationMatcher( file4, 4, 28 ) )
+ ),
+ 'location': LocationMatcher( file1, 3, 14 )
+ } ) )
}
- } )
+ }
+ } )
- def RefactorRename_Missing_New_Name_test( self ):
- self._RunTest( {
- 'description': 'FixItRename raises an error without new name',
- 'request': {
- 'command': 'FixItRename',
- 'line_num': 17,
- 'column_num': 29,
- 'filepath': self._PathToTestFile( 'coollib', 'cool_object.js' ),
+@SharedYcmd
+def Subcommands_RefactorRename_Missing_New_Name_test( app ):
+ RunTest( app, {
+ 'description': 'FixItRename raises an error without new name',
+ 'request': {
+ 'command': 'FixItRename',
+ 'line_num': 17,
+ 'column_num': 29,
+ 'filepath': PathToTestFile( 'coollib', 'cool_object.js' ),
+ },
+ 'expect': {
+ 'response': http.client.INTERNAL_SERVER_ERROR,
+ 'data': {
+ 'exception': ErrorMatcher(
+ ValueError,
+ 'Please specify a new name to rename it to.\n'
+ 'Usage: RefactorRename <new name>' ),
},
- 'expect': {
- 'response': http.client.INTERNAL_SERVER_ERROR,
- 'data': {
- 'exception': self._ErrorMatcher(
- ValueError,
- 'Please specify a new name to rename it to.\n'
- 'Usage: RefactorRename <new name>' ),
- },
- }
- } )
+ }
+ } )
diff --git a/ycmd/tests/misc_handlers_test.py b/ycmd/tests/misc_handlers_test.py
--- a/ycmd/tests/misc_handlers_test.py
+++ b/ycmd/tests/misc_handlers_test.py
@@ -25,39 +25,39 @@
from builtins import * # noqa
from nose.tools import ok_
-from .handlers_test import Handlers_test
-from ycmd.tests.test_utils import DummyCompleter
from hamcrest import assert_that, contains
+from ycmd.tests import SharedYcmd
+from ycmd.tests.test_utils import BuildRequest, DummyCompleter, PatchCompleter
-class MiscHandlers_test( Handlers_test ):
- def SemanticCompletionAvailable_test( self ):
- with self.PatchCompleter( DummyCompleter, filetype = 'dummy_filetype' ):
- request_data = self._BuildRequest( filetype = 'dummy_filetype' )
- ok_( self._app.post_json( '/semantic_completion_available',
- request_data ).json )
+@SharedYcmd
+def MiscHandlers_SemanticCompletionAvailable_test( app ):
+ with PatchCompleter( DummyCompleter, filetype = 'dummy_filetype' ):
+ request_data = BuildRequest( filetype = 'dummy_filetype' )
+ ok_( app.post_json( '/semantic_completion_available', request_data ).json )
- def EventNotification_AlwaysJsonResponse_test( self ):
- event_data = self._BuildRequest( contents = 'foo foogoo ba',
- event_name = 'FileReadyToParse' )
+@SharedYcmd
+def MiscHandlers_EventNotification_AlwaysJsonResponse_test( app ):
+ event_data = BuildRequest( contents = 'foo foogoo ba',
+ event_name = 'FileReadyToParse' )
- self._app.post_json( '/event_notification', event_data ).json
+ app.post_json( '/event_notification', event_data ).json
- def FilterAndSortCandidates_Basic_test( self ):
- candidate1 = { 'prop1': 'aoo', 'prop2': 'bar' }
- candidate2 = { 'prop1': 'bfo', 'prop2': 'zoo' }
- candidate3 = { 'prop1': 'cfo', 'prop2': 'moo' }
+@SharedYcmd
+def MiscHandlers_FilterAndSortCandidates_Basic_test( app ):
+ candidate1 = { 'prop1': 'aoo', 'prop2': 'bar' }
+ candidate2 = { 'prop1': 'bfo', 'prop2': 'zoo' }
+ candidate3 = { 'prop1': 'cfo', 'prop2': 'moo' }
- data = {
- 'candidates': [ candidate3, candidate1, candidate2 ],
- 'sort_property': 'prop1',
- 'query': 'fo'
- }
+ data = {
+ 'candidates': [ candidate3, candidate1, candidate2 ],
+ 'sort_property': 'prop1',
+ 'query': 'fo'
+ }
- response_data = self._app.post_json(
- '/filter_and_sort_candidates', data ).json
+ response_data = app.post_json( '/filter_and_sort_candidates', data ).json
- assert_that( response_data, contains( candidate2, candidate3 ) )
+ assert_that( response_data, contains( candidate2, candidate3 ) )
diff --git a/ycmd/tests/python/__init__.py b/ycmd/tests/python/__init__.py
--- a/ycmd/tests/python/__init__.py
+++ b/ycmd/tests/python/__init__.py
@@ -0,0 +1,113 @@
+# Copyright (C) 2016 ycmd contributors
+#
+# This file is part of ycmd.
+#
+# ycmd is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# ycmd is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import unicode_literals
+from __future__ import print_function
+from __future__ import division
+from __future__ import absolute_import
+from future import standard_library
+standard_library.install_aliases()
+from builtins import * # noqa
+
+import functools
+import os
+import time
+
+from ycmd import handlers
+from ycmd.tests.test_utils import BuildRequest, SetUpApp
+
+shared_app = None
+
+
+def PathToTestFile( *args ):
+ dir_of_current_script = os.path.dirname( os.path.abspath( __file__ ) )
+ return os.path.join( dir_of_current_script, 'testdata', *args )
+
+
+def WaitUntilJediHTTPServerReady( app ):
+ retries = 100
+
+ while retries > 0:
+ result = app.get( '/ready', { 'subserver': 'python' } ).json
+ if result:
+ return
+
+ time.sleep( 0.2 )
+ retries = retries - 1
+
+ raise RuntimeError( "Timeout waiting for JediHTTP" )
+
+
+def StopJediHTTPServer( app ):
+ # We don't actually start a JediHTTP server on every test, so we just
+ # ignore errors when stopping the server
+ app.post_json( '/run_completer_command',
+ BuildRequest( completer_target = 'filetype_default',
+ command_arguments = [ 'StopServer' ],
+ filetype = 'python' ),
+ expect_errors = True )
+
+
+def setUpPackage():
+ """Initializes the ycmd server as a WebTest application that will be shared
+ by all tests using the SharedYcmd decorator in this package. Additional
+ configuration that is common to these tests, like starting a semantic
+ subserver, should be done here."""
+ global shared_app
+
+ shared_app = SetUpApp()
+ WaitUntilJediHTTPServerReady( shared_app )
+
+
+def tearDownPackage():
+ """Cleans up the tests using the SharedYcmd decorator in this package. It is
+ executed once after running all the tests in the package."""
+ global shared_app
+
+ StopJediHTTPServer( shared_app )
+
+
+def SharedYcmd( test ):
+ """Defines a decorator to be attached to tests of this package. This decorator
+ passes the shared ycmd application as a parameter.
+
+ Do NOT attach it to test generators but directly to the yielded tests."""
+ global shared_app
+
+ @functools.wraps( test )
+ def Wrapper( *args, **kwargs ):
+ return test( shared_app, *args, **kwargs )
+ return Wrapper
+
+
+def IsolatedYcmd( test ):
+ """Defines a decorator to be attached to tests of this package. This decorator
+ passes a unique ycmd application as a parameter. It should be used on tests
+ that change the server state in a irreversible way (ex: a semantic subserver
+ is stopped or restarted) or expect a clean state (ex: no semantic subserver
+ started, no .ycm_extra_conf.py loaded, etc).
+
+ Do NOT attach it to test generators but directly to the yielded tests."""
+ @functools.wraps( test )
+ def Wrapper( *args, **kwargs ):
+ old_server_state = handlers._server_state
+
+ try:
+ test( SetUpApp(), *args, **kwargs )
+ finally:
+ handlers._server_state = old_server_state
+ return Wrapper
diff --git a/ycmd/tests/python/get_completions_test.py b/ycmd/tests/python/get_completions_test.py
--- a/ycmd/tests/python/get_completions_test.py
+++ b/ycmd/tests/python/get_completions_test.py
@@ -28,114 +28,112 @@
from nose.tools import eq_
from hamcrest import ( assert_that, has_item, has_items, has_entry,
has_entries, contains, empty, contains_string )
-from .python_handlers_test import Python_Handlers_test
from ycmd.utils import ReadFile
+from ycmd.tests.python import PathToTestFile, SharedYcmd
+from ycmd.tests.test_utils import ( BuildRequest, CompletionEntryMatcher,
+ CompletionLocationMatcher )
import http.client
-class Python_GetCompletions_test( Python_Handlers_test ):
-
- def setUp( self ):
- super( Python_GetCompletions_test, self ).setUp()
- self.WaitUntilJediHTTPServerReady()
-
-
- def _RunTest( self, test ):
- """
- Method to run a simple completion test and verify the result
-
- test is a dictionary containing:
- 'request': kwargs for BuildRequest
- 'expect': {
- 'response': server response code (e.g. httplib.OK)
- 'data': matcher for the server response json
- }
- """
- contents = ReadFile( test[ 'request' ][ 'filepath' ] )
-
- def CombineRequest( request, data ):
- kw = request
- request.update( data )
- return self._BuildRequest( **kw )
-
- self._app.post_json( '/event_notification',
- CombineRequest( test[ 'request' ], {
- 'event_name': 'FileReadyToParse',
- 'contents': contents,
- } ) )
-
- # We ignore errors here and we check the response code ourself.
- # This is to allow testing of requests returning errors.
- response = self._app.post_json( '/completions',
- CombineRequest( test[ 'request' ], {
- 'contents': contents
- } ),
- expect_errors = True )
-
- eq_( response.status_code, test[ 'expect' ][ 'response' ] )
-
- assert_that( response.json, test[ 'expect' ][ 'data' ] )
-
-
- def Basic_test( self ):
- filepath = self._PathToTestFile( 'basic.py' )
- completion_data = self._BuildRequest( filepath = filepath,
- filetype = 'python',
- contents = ReadFile( filepath ),
- line_num = 7,
- column_num = 3)
-
- results = self._app.post_json( '/completions',
- completion_data ).json[ 'completions' ]
-
- assert_that( results,
- has_items(
- self._CompletionEntryMatcher( 'a' ),
- self._CompletionEntryMatcher( 'b' ),
- self._CompletionLocationMatcher( 'line_num', 3 ),
- self._CompletionLocationMatcher( 'line_num', 4 ),
- self._CompletionLocationMatcher( 'column_num', 10 ),
- self._CompletionLocationMatcher( 'filepath', filepath ) ) )
-
-
- def UnicodeDescription_test( self ):
- filepath = self._PathToTestFile( 'unicode.py' )
- completion_data = self._BuildRequest( filepath = filepath,
- filetype = 'python',
- contents = ReadFile( filepath ),
- force_semantic = True,
- line_num = 5,
- column_num = 3)
-
- results = self._app.post_json( '/completions',
- completion_data ).json[ 'completions' ]
- assert_that( results, has_item(
- has_entry( 'detailed_info', contains_string( u'aafäö' ) ) ) )
-
-
- def NoSuggestions_Fallback_test( self ):
- # Python completer doesn't raise NO_COMPLETIONS_MESSAGE, so this is a
- # different code path to the Clang completer cases
-
- # TESTCASE2 (general_fallback/lang_python.py)
- self._RunTest( {
- 'description': 'param jedi does not know about (id). query="a_p"',
- 'request': {
- 'filetype' : 'python',
- 'filepath' : self._PathToTestFile( 'general_fallback',
- 'lang_python.py' ),
- 'line_num' : 28,
- 'column_num': 20,
- 'force_semantic': False,
- },
- 'expect': {
- 'response': http.client.OK,
- 'data': has_entries( {
- 'completions': contains(
- self._CompletionEntryMatcher( 'a_parameter', '[ID]' ),
- self._CompletionEntryMatcher( 'another_parameter', '[ID]' ),
- ),
- 'errors': empty(),
- } )
- },
- } )
+@SharedYcmd
+def GetCompletions_Basic_test( app ):
+ filepath = PathToTestFile( 'basic.py' )
+ completion_data = BuildRequest( filepath = filepath,
+ filetype = 'python',
+ contents = ReadFile( filepath ),
+ line_num = 7,
+ column_num = 3)
+
+ results = app.post_json( '/completions',
+ completion_data ).json[ 'completions' ]
+
+ assert_that( results,
+ has_items(
+ CompletionEntryMatcher( 'a' ),
+ CompletionEntryMatcher( 'b' ),
+ CompletionLocationMatcher( 'line_num', 3 ),
+ CompletionLocationMatcher( 'line_num', 4 ),
+ CompletionLocationMatcher( 'column_num', 10 ),
+ CompletionLocationMatcher( 'filepath', filepath ) ) )
+
+
+@SharedYcmd
+def GetCompletions_UnicodeDescription_test( app ):
+ filepath = PathToTestFile( 'unicode.py' )
+ completion_data = BuildRequest( filepath = filepath,
+ filetype = 'python',
+ contents = ReadFile( filepath ),
+ force_semantic = True,
+ line_num = 5,
+ column_num = 3)
+
+ results = app.post_json( '/completions',
+ completion_data ).json[ 'completions' ]
+ assert_that( results, has_item(
+ has_entry( 'detailed_info', contains_string( u'aafäö' ) ) ) )
+
+
+def RunTest( app, test ):
+ """
+ Method to run a simple completion test and verify the result
+
+ test is a dictionary containing:
+ 'request': kwargs for BuildRequest
+ 'expect': {
+ 'response': server response code (e.g. httplib.OK)
+ 'data': matcher for the server response json
+ }
+ """
+ contents = ReadFile( test[ 'request' ][ 'filepath' ] )
+
+ def CombineRequest( request, data ):
+ kw = request
+ request.update( data )
+ return BuildRequest( **kw )
+
+ app.post_json( '/event_notification',
+ CombineRequest( test[ 'request' ], {
+ 'event_name': 'FileReadyToParse',
+ 'contents': contents,
+ } ) )
+
+ # We ignore errors here and we check the response code ourself.
+ # This is to allow testing of requests returning errors.
+ response = app.post_json( '/completions',
+ CombineRequest( test[ 'request' ], {
+ 'contents': contents
+ } ),
+ expect_errors = True )
+
+ eq_( response.status_code, test[ 'expect' ][ 'response' ] )
+
+ assert_that( response.json, test[ 'expect' ][ 'data' ] )
+
+
+@SharedYcmd
+def GetCompletions_NoSuggestions_Fallback_test( app ):
+ # Python completer doesn't raise NO_COMPLETIONS_MESSAGE, so this is a
+ # different code path to the Clang completer cases
+
+ # TESTCASE2 (general_fallback/lang_python.py)
+ RunTest( app, {
+ 'description': 'param jedi does not know about (id). query="a_p"',
+ 'request': {
+ 'filetype' : 'python',
+ 'filepath' : PathToTestFile( 'general_fallback',
+ 'lang_python.py' ),
+ 'line_num' : 28,
+ 'column_num': 20,
+ 'force_semantic': False,
+ },
+ 'expect': {
+ 'response': http.client.OK,
+ 'data': has_entries( {
+ 'completions': contains(
+ CompletionEntryMatcher( 'a_parameter', '[ID]' ),
+ CompletionEntryMatcher( 'another_parameter', '[ID]' ),
+ ),
+ 'errors': empty(),
+ } )
+ },
+ } )
diff --git a/ycmd/tests/python/python_handlers_test.py b/ycmd/tests/python/python_handlers_test.py
deleted file mode 100644
--- a/ycmd/tests/python/python_handlers_test.py
+++ /dev/null
@@ -1,62 +0,0 @@
-# Copyright (C) 2015 ycmd contributors
-#
-# This file is part of ycmd.
-#
-# ycmd is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# ycmd is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
-
-from __future__ import unicode_literals
-from __future__ import print_function
-from __future__ import division
-from __future__ import absolute_import
-from future import standard_library
-standard_library.install_aliases()
-from builtins import * # noqa
-
-import time
-from ..handlers_test import Handlers_test
-
-
-class Python_Handlers_test( Handlers_test ):
-
- def __init__( self ):
- self._file = __file__
-
-
- def tearDown( self ):
- self.StopJediHTTPServer()
-
-
- def WaitUntilJediHTTPServerReady( self ):
- retries = 100
-
- while retries > 0:
- result = self._app.get( '/ready', { 'subserver': 'python' } ).json
- if result:
- return
-
- time.sleep( 0.2 )
- retries = retries - 1
-
- raise RuntimeError( "Timeout waiting for JediHTTP" )
-
-
- def StopJediHTTPServer( self ):
- request = self._BuildRequest( completer_target = 'filetype_default',
- command_arguments = [ 'StopServer' ],
- filetype = 'python' )
- # We don't actually start a JediHTTP server on every test, so we just
- # ignore errors when stopping the server
- self._app.post_json( '/run_completer_command',
- request,
- expect_errors = True )
diff --git a/ycmd/tests/python/subcommands_test.py b/ycmd/tests/python/subcommands_test.py
--- a/ycmd/tests/python/subcommands_test.py
+++ b/ycmd/tests/python/subcommands_test.py
@@ -25,45 +25,57 @@
from hamcrest import assert_that
from nose.tools import eq_
-from .python_handlers_test import Python_Handlers_test
-from ycmd.utils import ReadFile
import os.path
-
-class Python_Subcommands_test( Python_Handlers_test ):
-
- def setUp( self ):
- super( Python_Subcommands_test, self ).setUp()
- self.WaitUntilJediHTTPServerReady()
-
-
- def GoTo_Variation_ZeroBasedLineAndColumn_test( self ):
- tests = [
- {
- 'command_arguments': [ 'GoToDefinition' ],
- 'response': {
- 'filepath': os.path.abspath( '/foo.py' ),
- 'line_num': 2,
- 'column_num': 5
- }
- },
- {
- 'command_arguments': [ 'GoToDeclaration' ],
- 'response': {
- 'filepath': os.path.abspath( '/foo.py' ),
- 'line_num': 7,
- 'column_num': 1
- }
- }
- ]
- for test in tests:
- yield self._Run_GoTo_Variation_ZeroBasedLineAndColumn, test
-
-
- def _Run_GoTo_Variation_ZeroBasedLineAndColumn( self, test ):
- # Example taken directly from jedi docs
- # http://jedi.jedidjah.ch/en/latest/docs/plugin-api.html#examples
- contents = """
+from ycmd.utils import ReadFile
+from ycmd.tests.python import PathToTestFile, SharedYcmd
+from ycmd.tests.test_utils import BuildRequest, ErrorMatcher
+
+
+@SharedYcmd
+def RunGoToTest( app, test ):
+ filepath = PathToTestFile( test[ 'request' ][ 'filename' ] )
+ goto_data = BuildRequest( completer_target = 'filetype_default',
+ command_arguments = [ 'GoTo' ],
+ line_num = test[ 'request' ][ 'line_num' ],
+ contents = ReadFile( filepath ),
+ filetype = 'python',
+ filepath = filepath )
+
+ eq_( test[ 'response' ],
+ app.post_json( '/run_completer_command', goto_data ).json )
+
+
+def Subcommands_GoTo_test():
+ # Tests taken from https://github.com/Valloric/YouCompleteMe/issues/1236
+ tests = [
+ {
+ 'request': { 'filename': 'goto_file1.py', 'line_num': 2 },
+ 'response': {
+ 'filepath': PathToTestFile( 'goto_file3.py' ),
+ 'line_num': 1,
+ 'column_num': 5
+ }
+ },
+ {
+ 'request': { 'filename': 'goto_file4.py', 'line_num': 2 },
+ 'response': {
+ 'filepath': PathToTestFile( 'goto_file4.py' ),
+ 'line_num': 1,
+ 'column_num': 18
+ }
+ }
+ ]
+
+ for test in tests:
+ yield RunGoToTest, test
+
+
+@SharedYcmd
+def RunGoToTest_Variation_ZeroBasedLineAndColumn( app, test ):
+ # Example taken directly from jedi docs
+ # http://jedi.jedidjah.ch/en/latest/docs/plugin-api.html#examples
+ contents = """
def my_func():
print 'called'
@@ -74,145 +86,140 @@ def my_func():
inception()
"""
- goto_data = self._BuildRequest(
- completer_target = 'filetype_default',
- command_arguments = test[ 'command_arguments' ],
- line_num = 9,
- contents = contents,
- filetype = 'python',
- filepath = '/foo.py'
- )
-
- eq_( test[ 'response' ],
- self._app.post_json( '/run_completer_command', goto_data ).json )
-
-
- def GoToDefinition_NotFound_test( self ):
- filepath = self._PathToTestFile( 'goto_file5.py' )
- goto_data = self._BuildRequest( command_arguments = [ 'GoToDefinition' ],
- line_num = 4,
- contents = ReadFile( filepath ),
- filetype = 'python',
- filepath = filepath )
-
- response = self._app.post_json( '/run_completer_command',
- goto_data,
- expect_errors = True ).json
- assert_that( response,
- self._ErrorMatcher( RuntimeError,
- "Can\'t jump to definition." ) )
-
-
- def GoTo_test( self ):
- # Tests taken from https://github.com/Valloric/YouCompleteMe/issues/1236
- tests = [
- {
- 'request': { 'filename': 'goto_file1.py', 'line_num': 2 },
- 'response': {
- 'filepath': self._PathToTestFile( 'goto_file3.py' ),
- 'line_num': 1,
- 'column_num': 5
- }
- },
- {
- 'request': { 'filename': 'goto_file4.py', 'line_num': 2 },
- 'response': {
- 'filepath': self._PathToTestFile( 'goto_file4.py' ),
- 'line_num': 1,
- 'column_num': 18
- }
- }
- ]
- for test in tests:
- yield self._Run_GoTo, test
-
-
- def _Run_GoTo( self, test ):
- filepath = self._PathToTestFile( test[ 'request' ][ 'filename' ] )
- goto_data = self._BuildRequest( completer_target = 'filetype_default',
- command_arguments = [ 'GoTo' ],
- line_num = test[ 'request' ][ 'line_num' ],
- contents = ReadFile( filepath ),
- filetype = 'python',
- filepath = filepath )
-
- eq_( test[ 'response' ],
- self._app.post_json( '/run_completer_command', goto_data ).json )
-
-
- def GetDoc_Method_test( self ):
- # Testcase1
- filepath = self._PathToTestFile( 'GetDoc.py' )
- contents = ReadFile( filepath )
-
- event_data = self._BuildRequest( filepath = filepath,
- filetype = 'python',
- line_num = 17,
- column_num = 9,
- contents = contents,
- command_arguments = [ 'GetDoc' ],
- completer_target = 'filetype_default' )
-
- response = self._app.post_json( '/run_completer_command', event_data ).json
-
- eq_( response, {
- 'detailed_info': '_ModuleMethod()\n\n'
- 'Module method docs\n'
- 'Are dedented, like you might expect',
- } )
-
-
- def GetDoc_Class_test( self ):
- # Testcase1
- filepath = self._PathToTestFile( 'GetDoc.py' )
- contents = ReadFile( filepath )
-
- event_data = self._BuildRequest( filepath = filepath,
- filetype = 'python',
- line_num = 19,
- column_num = 2,
- contents = contents,
- command_arguments = [ 'GetDoc' ],
- completer_target = 'filetype_default' )
-
- response = self._app.post_json( '/run_completer_command', event_data ).json
-
- eq_( response, {
- 'detailed_info': 'Class Documentation',
- } )
-
-
- def GoToReferences_test( self ):
- filepath = self._PathToTestFile( 'goto_references.py' )
- contents = ReadFile( filepath )
-
- event_data = self._BuildRequest( filepath = filepath,
- filetype = 'python',
- line_num = 4,
- column_num = 5,
- contents = contents,
- command_arguments = [ 'GoToReferences' ],
- completer_target = 'filetype_default' )
-
- response = self._app.post_json( '/run_completer_command', event_data ).json
-
- eq_( response, [ {
- 'filepath': self._PathToTestFile( 'goto_references.py' ),
+ goto_data = BuildRequest(
+ completer_target = 'filetype_default',
+ command_arguments = test[ 'command_arguments' ],
+ line_num = 9,
+ contents = contents,
+ filetype = 'python',
+ filepath = '/foo.py'
+ )
+
+ eq_( test[ 'response' ],
+ app.post_json( '/run_completer_command', goto_data ).json )
+
+
+def Subcommands_GoTo_Variation_ZeroBasedLineAndColumn_test():
+ tests = [
+ {
+ 'command_arguments': [ 'GoToDefinition' ],
+ 'response': {
+ 'filepath': os.path.abspath( '/foo.py' ),
+ 'line_num': 2,
+ 'column_num': 5
+ }
+ },
+ {
+ 'command_arguments': [ 'GoToDeclaration' ],
+ 'response': {
+ 'filepath': os.path.abspath( '/foo.py' ),
+ 'line_num': 7,
+ 'column_num': 1
+ }
+ }
+ ]
+
+ for test in tests:
+ yield RunGoToTest_Variation_ZeroBasedLineAndColumn, test
+
+
+@SharedYcmd
+def Subcommands_GoToDefinition_NotFound_test( app ):
+ filepath = PathToTestFile( 'goto_file5.py' )
+ goto_data = BuildRequest( command_arguments = [ 'GoToDefinition' ],
+ line_num = 4,
+ contents = ReadFile( filepath ),
+ filetype = 'python',
+ filepath = filepath )
+
+ response = app.post_json( '/run_completer_command',
+ goto_data,
+ expect_errors = True ).json
+ assert_that( response,
+ ErrorMatcher( RuntimeError,
+ "Can\'t jump to definition." ) )
+
+
+@SharedYcmd
+def Subcommands_GetDoc_Method_test( app ):
+ # Testcase1
+ filepath = PathToTestFile( 'GetDoc.py' )
+ contents = ReadFile( filepath )
+
+ event_data = BuildRequest( filepath = filepath,
+ filetype = 'python',
+ line_num = 17,
+ column_num = 9,
+ contents = contents,
+ command_arguments = [ 'GetDoc' ],
+ completer_target = 'filetype_default' )
+
+ response = app.post_json( '/run_completer_command', event_data ).json
+
+ eq_( response, {
+ 'detailed_info': '_ModuleMethod()\n\n'
+ 'Module method docs\n'
+ 'Are dedented, like you might expect',
+ } )
+
+
+@SharedYcmd
+def Subcommands_GetDoc_Class_test( app ):
+ # Testcase1
+ filepath = PathToTestFile( 'GetDoc.py' )
+ contents = ReadFile( filepath )
+
+ event_data = BuildRequest( filepath = filepath,
+ filetype = 'python',
+ line_num = 19,
+ column_num = 2,
+ contents = contents,
+ command_arguments = [ 'GetDoc' ],
+ completer_target = 'filetype_default' )
+
+ response = app.post_json( '/run_completer_command', event_data ).json
+
+ eq_( response, {
+ 'detailed_info': 'Class Documentation',
+ } )
+
+
+@SharedYcmd
+def Subcommands_GoToReferences_test( app ):
+ filepath = PathToTestFile( 'goto_references.py' )
+ contents = ReadFile( filepath )
+
+ event_data = BuildRequest( filepath = filepath,
+ filetype = 'python',
+ line_num = 4,
+ column_num = 5,
+ contents = contents,
+ command_arguments = [ 'GoToReferences' ],
+ completer_target = 'filetype_default' )
+
+ response = app.post_json( '/run_completer_command', event_data ).json
+
+ eq_( response, [
+ {
+ 'filepath': PathToTestFile( 'goto_references.py' ),
'column_num': 5,
'description': 'def f',
'line_num': 1
- }, {
- 'filepath': self._PathToTestFile( 'goto_references.py' ),
+ },
+ {
+ 'filepath': PathToTestFile( 'goto_references.py' ),
'column_num': 5,
'description': 'a = f()',
'line_num': 4
- }, {
- 'filepath': self._PathToTestFile( 'goto_references.py' ),
+ },
+ {
+ 'filepath': PathToTestFile( 'goto_references.py' ),
'column_num': 5,
'description': 'b = f()',
'line_num': 5
- }, {
- 'filepath': self._PathToTestFile( 'goto_references.py' ),
+ },
+ {
+ 'filepath': PathToTestFile( 'goto_references.py' ),
'column_num': 5,
'description': 'c = f()',
'line_num': 6
diff --git a/ycmd/tests/python/user_defined_python_test.py b/ycmd/tests/python/user_defined_python_test.py
--- a/ycmd/tests/python/user_defined_python_test.py
+++ b/ycmd/tests/python/user_defined_python_test.py
@@ -23,13 +23,15 @@
standard_library.install_aliases()
from builtins import * # noqa
-from .python_handlers_test import Python_Handlers_test
+from hamcrest.core.base_matcher import BaseMatcher
+from hamcrest import assert_that, has_item, contains, equal_to, is_not # noqa
from mock import patch
+import sys
+
from ycmd import utils
from ycmd.completers.python.jedi_completer import BINARY_NOT_FOUND_MESSAGE
-from hamcrest.core.base_matcher import BaseMatcher
-from hamcrest import assert_that, has_item, contains, equal_to, is_not # noqa
-import sys
+from ycmd.tests.python import IsolatedYcmd
+from ycmd.tests.test_utils import BuildRequest, ErrorMatcher, UserOption
class CalledWith( BaseMatcher ):
@@ -75,79 +77,79 @@ def was_called_with_python( python ):
return CalledWith( python )
-
-class UserDefinedPython_test( Python_Handlers_test ):
-
- @patch( 'ycmd.utils.SafePopen' )
- def WithoutAnyOption_DefaultToYcmdPython_test( self, *args ):
- self._app.get( '/ready', { 'subserver': 'python' } )
- assert_that( utils.SafePopen, was_called_with_python( sys.executable ) )
-
-
- @patch( 'ycmd.utils.SafePopen' )
- @patch( 'ycmd.completers.python.jedi_completer.'
- 'JediCompleter._CheckBinaryExists',
- return_value = False )
- def WhenNonExistentPythonIsGiven_ReturnAnError_test( self, *args ):
- python = '/non/existing/path/python'
- with self.UserOption( 'python_binary_path', python ):
- response = self._app.get( '/ready',
- { 'subserver': 'python' },
- expect_errors = True ).json
-
- msg = BINARY_NOT_FOUND_MESSAGE.format( python )
- assert_that( response, self._ErrorMatcher( RuntimeError, msg ) )
- utils.SafePopen.assert_not_called()
-
-
- @patch( 'ycmd.utils.SafePopen' )
- @patch( 'ycmd.completers.python.jedi_completer.'
- 'JediCompleter._CheckBinaryExists',
- return_value = True )
- def WhenExistingPythonIsGiven_ThatIsUsed_test( self, *args ):
- python = '/existing/python'
- with self.UserOption( 'python_binary_path', python ):
- self._app.get( '/ready', { 'subserver': 'python' } ).json
- assert_that( utils.SafePopen, was_called_with_python( python ) )
-
-
- @patch( 'ycmd.utils.SafePopen' )
- @patch( 'ycmd.completers.python.jedi_completer.'
- 'JediCompleter._CheckBinaryExists',
- return_value = True )
- def RestartServerWithoutArguments_WillReuseTheLastPython_test( self, *args ):
- request = self._BuildRequest( filetype = 'python',
- command_arguments = [ 'RestartServer' ] )
- self._app.post_json( '/run_completer_command', request )
- assert_that( utils.SafePopen, was_called_with_python( sys.executable ) )
-
-
- @patch( 'ycmd.utils.SafePopen' )
- @patch( 'ycmd.completers.python.jedi_completer.'
- 'JediCompleter._CheckBinaryExists',
- return_value = True )
- def RestartServerWithArgument_WillUseTheSpecifiedPython_test( self, *args ):
- python = '/existing/python'
- request = self._BuildRequest( filetype = 'python',
- command_arguments = [ 'RestartServer',
- python ] )
- self._app.post_json( '/run_completer_command', request )
- assert_that( utils.SafePopen, was_called_with_python( python ) )
+@IsolatedYcmd
+@patch( 'ycmd.utils.SafePopen' )
+def UserDefinedPython_WithoutAnyOption_DefaultToYcmdPython_test( app, *args ):
+ app.get( '/ready', { 'subserver': 'python' } )
+ assert_that( utils.SafePopen, was_called_with_python( sys.executable ) )
- @patch( 'ycmd.utils.SafePopen' )
- @patch( 'ycmd.completers.python.jedi_completer.'
- 'JediCompleter._CheckBinaryExists',
- return_value = False )
- def RestartServerWithNonExistingPythonArgument_test( self, *args ):
- python = '/non/existing/python'
- request = self._BuildRequest( filetype = 'python',
- command_arguments = [ 'RestartServer',
- python ] )
- response = self._app.post_json( '/run_completer_command',
- request,
- expect_errors = True ).json
+@IsolatedYcmd
+@patch( 'ycmd.utils.SafePopen' )
+@patch( 'ycmd.completers.python.jedi_completer.JediCompleter.'
+ '_CheckBinaryExists', return_value = False )
+def UserDefinedPython_WhenNonExistentPythonIsGiven_ReturnAnError_test( app,
+ *args ):
+ python = '/non/existing/path/python'
+ with UserOption( 'python_binary_path', python ):
+ response = app.get( '/ready',
+ { 'subserver': 'python' },
+ expect_errors = True ).json
msg = BINARY_NOT_FOUND_MESSAGE.format( python )
- assert_that( response, self._ErrorMatcher( RuntimeError, msg ) )
- assert_that( utils.SafePopen, was_called_with_python( sys.executable ) )
+ assert_that( response, ErrorMatcher( RuntimeError, msg ) )
+ utils.SafePopen.assert_not_called()
+
+
+@IsolatedYcmd
+@patch( 'ycmd.utils.SafePopen' )
+@patch( 'ycmd.completers.python.jedi_completer.JediCompleter.'
+ '_CheckBinaryExists', return_value = True )
+def UserDefinedPython_WhenExistingPythonIsGiven_ThatIsUsed_test( app, *args ):
+ python = '/existing/python'
+ with UserOption( 'python_binary_path', python ):
+ app.get( '/ready', { 'subserver': 'python' } ).json
+ assert_that( utils.SafePopen, was_called_with_python( python ) )
+
+
+@IsolatedYcmd
+@patch( 'ycmd.utils.SafePopen' )
+@patch( 'ycmd.completers.python.jedi_completer.JediCompleter.'
+ '_CheckBinaryExists', return_value = True )
+def UserDefinedPython_RestartServerWithoutArguments_WillReuseTheLastPython_test(
+ app, *args ):
+ request = BuildRequest( filetype = 'python',
+ command_arguments = [ 'RestartServer' ] )
+ app.post_json( '/run_completer_command', request )
+ assert_that( utils.SafePopen, was_called_with_python( sys.executable ) )
+
+
+@IsolatedYcmd
+@patch( 'ycmd.utils.SafePopen' )
+@patch( 'ycmd.completers.python.jedi_completer.JediCompleter.'
+ '_CheckBinaryExists', return_value = True )
+def UserDefinedPython_RestartServerWithArgument_WillUseTheSpecifiedPython_test(
+ app, *args ):
+ python = '/existing/python'
+ request = BuildRequest( filetype = 'python',
+ command_arguments = [ 'RestartServer', python ] )
+ app.post_json( '/run_completer_command', request )
+ assert_that( utils.SafePopen, was_called_with_python( python ) )
+
+
+@IsolatedYcmd
+@patch( 'ycmd.utils.SafePopen' )
+@patch( 'ycmd.completers.python.jedi_completer.JediCompleter.'
+ '_CheckBinaryExists', return_value = False )
+def UserDefinedPython_RestartServerWithNonExistingPythonArgument_test( app,
+ *args ):
+ python = '/non/existing/python'
+ request = BuildRequest( filetype = 'python',
+ command_arguments = [ 'RestartServer', python ] )
+ response = app.post_json( '/run_completer_command',
+ request,
+ expect_errors = True ).json
+
+ msg = BINARY_NOT_FOUND_MESSAGE.format( python )
+ assert_that( response, ErrorMatcher( RuntimeError, msg ) )
+ assert_that( utils.SafePopen, was_called_with_python( sys.executable ) )
diff --git a/ycmd/tests/rust/__init__.py b/ycmd/tests/rust/__init__.py
--- a/ycmd/tests/rust/__init__.py
+++ b/ycmd/tests/rust/__init__.py
@@ -0,0 +1,115 @@
+# Copyright (C) 2016 ycmd contributors
+#
+# This file is part of ycmd.
+#
+# ycmd is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# ycmd is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import unicode_literals
+from __future__ import print_function
+from __future__ import division
+from __future__ import absolute_import
+from future import standard_library
+standard_library.install_aliases()
+from builtins import * # noqa
+
+import functools
+import os
+import time
+
+from ycmd.tests.test_utils import BuildRequest, SetUpApp
+from ycmd import handlers
+
+shared_app = None
+
+
+def PathToTestFile( *args ):
+ dir_of_current_script = os.path.dirname( os.path.abspath( __file__ ) )
+ return os.path.join( dir_of_current_script, 'testdata', *args )
+
+
+def WaitUntilRacerdServerReady( app ):
+ retries = 100
+
+ while retries > 0:
+ result = app.get( '/ready', { 'subserver': 'rust' } ).json
+ if result:
+ return
+
+ time.sleep( 0.2 )
+ retries = retries - 1
+
+ raise RuntimeError( "Timeout waiting for JediHTTP" )
+
+
+def StopRacerdServer( app ):
+ app.post_json( '/run_completer_command',
+ BuildRequest( completer_target = 'filetype_default',
+ command_arguments = [ 'StopServer' ],
+ filetype = 'rust' ),
+ expect_errors = True )
+
+
+def setUpPackage():
+ """Initializes the ycmd server as a WebTest application that will be shared
+ by all tests using the SharedYcmd decorator in this package. Additional
+ configuration that is common to these tests, like starting a semantic
+ subserver, should be done here."""
+ global shared_app
+
+ shared_app = SetUpApp()
+
+ WaitUntilRacerdServerReady( shared_app )
+
+
+def tearDownPackage():
+ """Cleans up the tests using the SharedYcmd decorator in this package. It is
+ executed once after running all the tests in the package."""
+ global shared_app
+
+ StopRacerdServer( shared_app )
+
+
+def SharedYcmd( test ):
+ """Defines a decorator to be attached to tests of this package. This decorator
+ passes the shared ycmd application as a parameter.
+
+ Do NOT attach it to test generators but directly to the yielded tests."""
+ global shared_app
+
+ @functools.wraps( test )
+ def Wrapper( *args, **kwargs ):
+ return test( shared_app, *args, **kwargs )
+ return Wrapper
+
+
+def IsolatedYcmd( test ):
+ """Defines a decorator to be attached to tests of this package. This decorator
+ passes a unique ycmd application as a parameter. It should be used on tests
+ that change the server state in a irreversible way (ex: a semantic subserver
+ is stopped or restarted) or expect a clean state (ex: no semantic subserver
+ started, no .ycm_extra_conf.py loaded, etc).
+
+ Do NOT attach it to test generators but directly to the yielded tests."""
+ @functools.wraps( test )
+ def Wrapper( *args, **kwargs ):
+ old_server_state = handlers._server_state
+
+ try:
+ app = SetUpApp()
+ WaitUntilRacerdServerReady( app )
+ test( app, *args, **kwargs )
+ StopRacerdServer( app )
+ finally:
+ handlers._server_state = old_server_state
+ return Wrapper
diff --git a/ycmd/tests/rust/get_completions_test.py b/ycmd/tests/rust/get_completions_test.py
--- a/ycmd/tests/rust/get_completions_test.py
+++ b/ycmd/tests/rust/get_completions_test.py
@@ -23,51 +23,51 @@
standard_library.install_aliases()
from builtins import * # noqa
-from ycmd.utils import ReadFile
from hamcrest import assert_that, has_entry, has_items, contains_string
-from .rust_handlers_test import Rust_Handlers_test
-
-
-class Rust_GetCompletions_test( Rust_Handlers_test ):
+from ycmd.tests.rust import IsolatedYcmd, PathToTestFile, SharedYcmd
+from ycmd.tests.test_utils import BuildRequest, CompletionEntryMatcher
+from ycmd.utils import ReadFile
- def Basic_test( self ):
- filepath = self._PathToTestFile( 'test.rs' )
- contents = ReadFile( filepath )
-
- self._WaitUntilServerReady()
- completion_data = self._BuildRequest( filepath = filepath,
- filetype = 'rust',
- contents = contents,
- force_semantic = True,
- line_num = 9,
- column_num = 11 )
+@SharedYcmd
+def GetCompletions_Basic_test( app ):
+ filepath = PathToTestFile( 'test.rs' )
+ contents = ReadFile( filepath )
- results = self._app.post_json( '/completions',
- completion_data ).json[ 'completions' ]
+ completion_data = BuildRequest( filepath = filepath,
+ filetype = 'rust',
+ contents = contents,
+ force_semantic = True,
+ line_num = 9,
+ column_num = 11 )
- assert_that( results,
- has_items( self._CompletionEntryMatcher( 'build_rocket' ),
- self._CompletionEntryMatcher( 'build_shuttle' ) ) )
+ results = app.post_json( '/completions',
+ completion_data ).json[ 'completions' ]
+ assert_that( results,
+ has_items( CompletionEntryMatcher( 'build_rocket' ),
+ CompletionEntryMatcher( 'build_shuttle' ) ) )
- def WhenStandardLibraryCompletionFails_MentionRustSrcPath_test( self ):
- filepath = self._PathToTestFile( 'std_completions.rs' )
- contents = ReadFile( filepath )
- self._WaitUntilServerReady()
+# This test is isolated because it affects the GoTo tests, although it
+# shouldn't.
+@IsolatedYcmd
+def GetCompletions_WhenStandardLibraryCompletionFails_MentionRustSrcPath_test(
+ app ):
+ filepath = PathToTestFile( 'std_completions.rs' )
+ contents = ReadFile( filepath )
- completion_data = self._BuildRequest( filepath = filepath,
- filetype = 'rust',
- contents = contents,
- force_semantic = True,
- line_num = 5,
- column_num = 11 )
+ completion_data = BuildRequest( filepath = filepath,
+ filetype = 'rust',
+ contents = contents,
+ force_semantic = True,
+ line_num = 5,
+ column_num = 11 )
- response = self._app.post_json( '/completions',
- completion_data,
- expect_errors = True ).json
- assert_that( response,
- has_entry( 'message',
- contains_string( 'rust_src_path' ) ) )
+ response = app.post_json( '/completions',
+ completion_data,
+ expect_errors = True ).json
+ assert_that( response,
+ has_entry( 'message',
+ contains_string( 'rust_src_path' ) ) )
diff --git a/ycmd/tests/rust/rust_handlers_test.py b/ycmd/tests/rust/rust_handlers_test.py
deleted file mode 100644
--- a/ycmd/tests/rust/rust_handlers_test.py
+++ /dev/null
@@ -1,64 +0,0 @@
-# Copyright (C) 2015 ycmd contributors
-#
-# This file is part of ycmd.
-#
-# ycmd is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# ycmd is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
-
-from __future__ import unicode_literals
-from __future__ import print_function
-from __future__ import division
-from __future__ import absolute_import
-from future import standard_library
-standard_library.install_aliases()
-from builtins import * # noqa
-
-from ..handlers_test import Handlers_test
-
-import time
-
-
-class Rust_Handlers_test( Handlers_test ):
-
-
- def __init__( self ):
- self._file = __file__
-
-
- def tearDown( self ):
- self._StopServer()
-
-
- def _StopServer( self ):
- try:
- self._app.post_json(
- '/run_completer_command',
- self._BuildRequest( command_arguments = [ 'StopServer' ],
- filetype = 'rust',
- completer_target = 'filetype_default' )
- )
- except:
- pass
-
-
- def _WaitUntilServerReady( self ):
- retries = 100
-
- while retries > 0:
- result = self._app.get( '/ready', { 'subserver': 'rust' } ).json
- if result:
- return
- time.sleep( 0.2 )
- retries = retries - 1
-
- raise RuntimeError( "Timeout waiting for racerd" )
diff --git a/ycmd/tests/rust/subcommands_test.py b/ycmd/tests/rust/subcommands_test.py
--- a/ycmd/tests/rust/subcommands_test.py
+++ b/ycmd/tests/rust/subcommands_test.py
@@ -23,43 +23,41 @@
standard_library.install_aliases()
from builtins import * # noqa
-from .rust_handlers_test import Rust_Handlers_test
from nose.tools import eq_
-from ycmd.utils import ReadFile
-
-
-class Rust_Subcommands_test( Rust_Handlers_test ):
+from ycmd.tests.rust import PathToTestFile, SharedYcmd
+from ycmd.tests.test_utils import BuildRequest
+from ycmd.utils import ReadFile
- def _GoTo( self, params ):
- filepath = self._PathToTestFile( 'test.rs' )
- contents = ReadFile( filepath )
- self._WaitUntilServerReady()
+@SharedYcmd
+def RunGoToTest( app, params ):
+ filepath = PathToTestFile( 'test.rs' )
+ contents = ReadFile( filepath )
- command = params[ 'command' ]
- goto_data = self._BuildRequest( completer_target = 'filetype_default',
- command_arguments = [ command ],
- line_num = 7,
- column_num = 12,
- contents = contents,
- filetype = 'rust',
- filepath = filepath )
+ command = params[ 'command' ]
+ goto_data = BuildRequest( completer_target = 'filetype_default',
+ command_arguments = [ command ],
+ line_num = 7,
+ column_num = 12,
+ contents = contents,
+ filetype = 'rust',
+ filepath = filepath )
- results = self._app.post_json( '/run_completer_command',
- goto_data )
+ results = app.post_json( '/run_completer_command',
+ goto_data )
- eq_( {
- 'line_num': 1, 'column_num': 8, 'filepath': filepath
- }, results.json )
+ eq_( {
+ 'line_num': 1, 'column_num': 8, 'filepath': filepath
+ }, results.json )
- def GoTo_all_test( self ):
- tests = [
- { 'command': 'GoTo' },
- { 'command': 'GoToDefinition' },
- { 'command': 'GoToDeclaration' }
- ]
+def Subcommands_GoTo_all_test():
+ tests = [
+ { 'command': 'GoTo' },
+ { 'command': 'GoToDefinition' },
+ { 'command': 'GoToDeclaration' }
+ ]
- for test in tests:
- yield ( self._GoTo, test )
+ for test in tests:
+ yield RunGoToTest, test
diff --git a/ycmd/tests/subcommands_test.py b/ycmd/tests/subcommands_test.py
--- a/ycmd/tests/subcommands_test.py
+++ b/ycmd/tests/subcommands_test.py
@@ -24,36 +24,34 @@
standard_library.install_aliases()
from builtins import * # noqa
-from nose.tools import eq_
-from .handlers_test import Handlers_test
-from ycmd.tests.test_utils import DummyCompleter
from mock import patch
+from nose.tools import eq_
+from ycmd.tests import SharedYcmd
+from ycmd.tests.test_utils import BuildRequest, DummyCompleter, PatchCompleter
-class Subcommands_test( Handlers_test ):
- @patch( 'ycmd.tests.test_utils.DummyCompleter.GetSubcommandsMap',
- return_value = { 'A': lambda x: x,
- 'B': lambda x: x,
- 'C': lambda x: x } )
- def Basic_test( self, *args ):
- with self.PatchCompleter( DummyCompleter, 'dummy_filetype' ):
- subcommands_data = self._BuildRequest(
- completer_target = 'dummy_filetype' )
+@SharedYcmd
+@patch( 'ycmd.tests.test_utils.DummyCompleter.GetSubcommandsMap',
+ return_value = { 'A': lambda x: x,
+ 'B': lambda x: x,
+ 'C': lambda x: x } )
+def Subcommands_Basic_test( app, *args ):
+ with PatchCompleter( DummyCompleter, 'dummy_filetype' ):
+ subcommands_data = BuildRequest( completer_target = 'dummy_filetype' )
- eq_( [ 'A', 'B', 'C' ],
- self._app.post_json( '/defined_subcommands',
- subcommands_data ).json )
+ eq_( [ 'A', 'B', 'C' ],
+ app.post_json( '/defined_subcommands', subcommands_data ).json )
- @patch( 'ycmd.tests.test_utils.DummyCompleter.GetSubcommandsMap',
- return_value = { 'A': lambda x: x,
- 'B': lambda x: x,
- 'C': lambda x: x } )
- def NoExplicitCompleterTargetSpecified_test( self, *args ):
- with self.PatchCompleter( DummyCompleter, 'dummy_filetype' ):
- subcommands_data = self._BuildRequest( filetype = 'dummy_filetype' )
+@SharedYcmd
+@patch( 'ycmd.tests.test_utils.DummyCompleter.GetSubcommandsMap',
+ return_value = { 'A': lambda x: x,
+ 'B': lambda x: x,
+ 'C': lambda x: x } )
+def Subcommands_NoExplicitCompleterTargetSpecified_test( app, *args ):
+ with PatchCompleter( DummyCompleter, 'dummy_filetype' ):
+ subcommands_data = BuildRequest( filetype = 'dummy_filetype' )
- eq_( [ 'A', 'B', 'C' ],
- self._app.post_json( '/defined_subcommands',
- subcommands_data ).json )
+ eq_( [ 'A', 'B', 'C' ],
+ app.post_json( '/defined_subcommands', subcommands_data ).json )
diff --git a/ycmd/tests/test_utils.py b/ycmd/tests/test_utils.py
--- a/ycmd/tests/test_utils.py
+++ b/ycmd/tests/test_utils.py
@@ -27,11 +27,17 @@
from builtins import * # noqa
from future.utils import PY2
+from hamcrest import contains_string, has_entry, has_entries
+from mock import patch
+from webtest import TestApp
+import bottle
+import contextlib
+
+from ycmd import handlers, user_options_store
from ycmd.completers.completer import Completer
from ycmd.responses import BuildCompletionData
from ycmd.utils import OnMac, OnWindows
import ycm_core
-import os.path
try:
from unittest import skipIf
@@ -76,9 +82,64 @@ def BuildRequest( **kwargs ):
return request
-def PathToTestFile( *args ):
- dir_of_current_script = os.path.dirname( os.path.abspath( __file__ ) )
- return os.path.join( dir_of_current_script, 'testdata', *args )
+def ErrorMatcher( cls, msg = None ):
+ """ Returns a hamcrest matcher for a server exception response """
+ entry = { 'exception': has_entry( 'TYPE', cls.__name__ ) }
+
+ if msg:
+ entry.update( { 'message': msg } )
+
+ return has_entries( entry )
+
+
+def CompletionEntryMatcher( insertion_text,
+ extra_menu_info = None,
+ extra_params = None ):
+ match = { 'insertion_text': insertion_text }
+
+ if extra_menu_info:
+ match.update( { 'extra_menu_info': extra_menu_info } )
+
+ if extra_params:
+ match.update( extra_params )
+
+ return has_entries( match )
+
+
+def CompletionLocationMatcher( location_type, value ):
+ return has_entry( 'extra_data',
+ has_entry( 'location',
+ has_entry( location_type, value ) ) )
+
+
+def MessageMatcher( msg ):
+ return has_entry( 'message', contains_string( msg ) )
+
+
[email protected]
+def PatchCompleter( completer, filetype ):
+ user_options = handlers._server_state._user_options
+ with patch.dict( 'ycmd.handlers._server_state._filetype_completers',
+ { filetype: completer( user_options ) } ):
+ yield
+
+
[email protected]
+def UserOption( key, value ):
+ try:
+ current_options = dict( user_options_store.GetAll() )
+ user_options = current_options.copy()
+ user_options.update( { key: value } )
+ handlers.UpdateUserOptions( user_options )
+ yield
+ finally:
+ handlers.UpdateUserOptions( current_options )
+
+
+def SetUpApp():
+ bottle.debug( True )
+ handlers.SetServerStateToDefaults()
+ return TestApp( handlers.app )
class DummyCompleter( Completer ):
diff --git a/ycmd/tests/typescript/__init__.py b/ycmd/tests/typescript/__init__.py
--- a/ycmd/tests/typescript/__init__.py
+++ b/ycmd/tests/typescript/__init__.py
@@ -0,0 +1,59 @@
+# Copyright (C) 2016 ycmd contributors
+#
+# This file is part of ycmd.
+#
+# ycmd is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# ycmd is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import unicode_literals
+from __future__ import print_function
+from __future__ import division
+from __future__ import absolute_import
+from future import standard_library
+standard_library.install_aliases()
+from builtins import * # noqa
+
+import functools
+import os
+
+from ycmd.tests.test_utils import SetUpApp
+
+shared_app = None
+
+
+def PathToTestFile( *args ):
+ dir_of_current_script = os.path.dirname( os.path.abspath( __file__ ) )
+ return os.path.join( dir_of_current_script, 'testdata', *args )
+
+
+def setUpPackage():
+ """Initializes the ycmd server as a WebTest application that will be shared
+ by all tests using the SharedYcmd decorator in this package. Additional
+ configuration that is common to these tests, like starting a semantic
+ subserver, should be done here."""
+ global shared_app
+
+ shared_app = SetUpApp()
+
+
+def SharedYcmd( test ):
+ """Defines a decorator to be attached to tests of this package. This decorator
+ passes the shared ycmd application as a parameter.
+
+ Do NOT attach it to test generators but directly to the yielded tests."""
+ global shared_app
+
+ @functools.wraps( test )
+ def Wrapper( *args, **kwargs ):
+ return test( shared_app, *args, **kwargs )
+ return Wrapper
diff --git a/ycmd/tests/typescript/get_completions_test.py b/ycmd/tests/typescript/get_completions_test.py
--- a/ycmd/tests/typescript/get_completions_test.py
+++ b/ycmd/tests/typescript/get_completions_test.py
@@ -24,67 +24,67 @@
from builtins import * # noqa
from hamcrest import assert_that, contains_inanyorder, has_entries
-from .typescript_handlers_test import Typescript_Handlers_test
-from ycmd.utils import ReadFile
from mock import patch
+from ycmd.tests.typescript import PathToTestFile, SharedYcmd
+from ycmd.tests.test_utils import BuildRequest, CompletionEntryMatcher
+from ycmd.utils import ReadFile
+
-class TypeScript_GetCompletions_test( Typescript_Handlers_test ):
+def RunTest( app, test ):
+ filepath = PathToTestFile( 'test.ts' )
+ contents = ReadFile( filepath )
- def _RunTest( self, test ):
- filepath = self._PathToTestFile( 'test.ts' )
- contents = ReadFile( filepath )
+ event_data = BuildRequest( filepath = filepath,
+ filetype = 'typescript',
+ contents = contents,
+ event_name = 'BufferVisit' )
- event_data = self._BuildRequest( filepath = filepath,
- filetype = 'typescript',
- contents = contents,
- event_name = 'BufferVisit' )
+ app.post_json( '/event_notification', event_data )
- self._app.post_json( '/event_notification', event_data )
+ completion_data = BuildRequest( filepath = filepath,
+ filetype = 'typescript',
+ contents = contents,
+ force_semantic = True,
+ line_num = 12,
+ column_num = 6 )
- completion_data = self._BuildRequest( filepath = filepath,
- filetype = 'typescript',
- contents = contents,
- force_semantic = True,
- line_num = 12,
- column_num = 6 )
+ response = app.post_json( '/completions', completion_data )
- response = self._app.post_json( '/completions', completion_data )
- assert_that( response.json, test[ 'expect' ][ 'data' ] )
+ assert_that( response.json, test[ 'expect' ][ 'data' ] )
- def Basic_test( self ):
- self._RunTest( {
- 'expect': {
- 'data': has_entries( {
- 'completions': contains_inanyorder(
- self.CompletionEntryMatcher(
- 'methodA',
- 'methodA (method) Foo.methodA(): void' ),
- self.CompletionEntryMatcher(
- 'methodB',
- 'methodB (method) Foo.methodB(): void' ),
- self.CompletionEntryMatcher(
- 'methodC',
- 'methodC (method) Foo.methodC(): void' ),
- )
- } )
- }
- } )
+@SharedYcmd
+def GetCompletions_Basic_test( app ):
+ RunTest( app, {
+ 'expect': {
+ 'data': has_entries( {
+ 'completions': contains_inanyorder(
+ CompletionEntryMatcher( 'methodA', extra_params = {
+ 'menu_text': 'methodA (method) Foo.methodA(): void' } ),
+ CompletionEntryMatcher( 'methodB', extra_params = {
+ 'menu_text': 'methodB (method) Foo.methodB(): void' } ),
+ CompletionEntryMatcher( 'methodC', extra_params = {
+ 'menu_text': 'methodC (method) Foo.methodC(): void' } ),
+ )
+ } )
+ }
+ } )
- @patch( 'ycmd.completers.typescript.'
- 'typescript_completer.MAX_DETAILED_COMPLETIONS',
- 2 )
- def MaxDetailedCompletion_test( self ):
- self._RunTest( {
- 'expect': {
- 'data': has_entries( {
- 'completions': contains_inanyorder(
- self.CompletionEntryMatcher( 'methodA' ),
- self.CompletionEntryMatcher( 'methodB' ),
- self.CompletionEntryMatcher( 'methodC' )
- )
- } )
- }
- } )
+@SharedYcmd
+@patch( 'ycmd.completers.typescript.'
+ 'typescript_completer.MAX_DETAILED_COMPLETIONS',
+ 2 )
+def GetCompletions_MaxDetailedCompletion_test( app ):
+ RunTest( app, {
+ 'expect': {
+ 'data': has_entries( {
+ 'completions': contains_inanyorder(
+ CompletionEntryMatcher( 'methodA' ),
+ CompletionEntryMatcher( 'methodB' ),
+ CompletionEntryMatcher( 'methodC' )
+ )
+ } )
+ }
+ } )
diff --git a/ycmd/tests/typescript/subcommands_test.py b/ycmd/tests/typescript/subcommands_test.py
--- a/ycmd/tests/typescript/subcommands_test.py
+++ b/ycmd/tests/typescript/subcommands_test.py
@@ -24,201 +24,202 @@
from builtins import * # noqa
from hamcrest import assert_that, has_items, has_entries
-from .typescript_handlers_test import Typescript_Handlers_test
-from ycmd.utils import ReadFile
-
-class TypeScript_Subcommands_test( Typescript_Handlers_test ):
-
- def GetType_Basic_test( self ):
- filepath = self._PathToTestFile( 'test.ts' )
- contents = ReadFile( filepath )
-
- event_data = self._BuildRequest( filepath = filepath,
- filetype = 'typescript',
- contents = contents,
- event_name = 'BufferVisit' )
-
- self._app.post_json( '/event_notification', event_data )
+from ycmd.tests.typescript import PathToTestFile, SharedYcmd
+from ycmd.tests.test_utils import BuildRequest, ErrorMatcher, MessageMatcher
+from ycmd.utils import ReadFile
- gettype_data = self._BuildRequest( completer_target = 'filetype_default',
- command_arguments = [ 'GetType' ],
- line_num = 12,
- column_num = 1,
- contents = contents,
- filetype = 'typescript',
- filepath = filepath )
- response = self._app.post_json( '/run_completer_command',
- gettype_data ).json
- assert_that( response, self._MessageMatcher( 'var foo: Foo' ) )
-
-
- def GetType_HasNoType_test( self ):
- filepath = self._PathToTestFile( 'test.ts' )
- contents = ReadFile( filepath )
-
- event_data = self._BuildRequest( filepath = filepath,
- filetype = 'typescript',
- contents = contents,
- event_name = 'BufferVisit' )
-
- self._app.post_json( '/event_notification', event_data )
-
- gettype_data = self._BuildRequest( completer_target = 'filetype_default',
- command_arguments = [ 'GetType' ],
- line_num = 2,
- column_num = 1,
- contents = contents,
- filetype = 'typescript',
- filepath = filepath )
-
- response = self._app.post_json( '/run_completer_command',
- gettype_data,
- expect_errors = True ).json
- assert_that( response,
- self._ErrorMatcher( RuntimeError, 'No content available.' ) )
-
-
- def GetDoc_Method_test( self ):
- filepath = self._PathToTestFile( 'test.ts' )
- contents = ReadFile( filepath )
-
- event_data = self._BuildRequest( filepath = filepath,
- filetype = 'typescript',
- contents = contents,
- event_name = 'BufferVisit' )
-
- self._app.post_json( '/event_notification', event_data )
-
- gettype_data = self._BuildRequest( completer_target = 'filetype_default',
- command_arguments = [ 'GetDoc' ],
- line_num = 29,
- column_num = 9,
- contents = contents,
- filetype = 'typescript',
- filepath = filepath )
-
- response = self._app.post_json( '/run_completer_command',
- gettype_data ).json
- assert_that( response,
- has_entries( {
- 'detailed_info': '(method) Bar.testMethod(): void\n\n'
- 'Method documentation'
- } ) )
-
-
- def GetDoc_Class_test( self ):
- filepath = self._PathToTestFile( 'test.ts' )
- contents = ReadFile( filepath )
-
- event_data = self._BuildRequest( filepath = filepath,
- filetype = 'typescript',
- contents = contents,
- event_name = 'BufferVisit' )
-
- self._app.post_json( '/event_notification', event_data )
-
- gettype_data = self._BuildRequest( completer_target = 'filetype_default',
- command_arguments = [ 'GetDoc' ],
- line_num = 32,
- column_num = 2,
- contents = contents,
- filetype = 'typescript',
- filepath = filepath )
-
- response = self._app.post_json( '/run_completer_command',
- gettype_data ).json
- assert_that( response,
- has_entries( {
- 'detailed_info': 'class Bar\n\n'
- 'Class documentation\n\n'
- 'Multi-line'
- } ) )
-
-
- def GoToReferences_test( self ):
- filepath = self._PathToTestFile( 'test.ts' )
- contents = ReadFile( filepath )
-
- event_data = self._BuildRequest( filepath = filepath,
- filetype = 'typescript',
- contents = contents,
- event_name = 'BufferVisit' )
-
- self._app.post_json( '/event_notification', event_data )
-
- references_data = self._BuildRequest(
- completer_target = 'filetype_default',
- command_arguments = [ 'GoToReferences' ],
- line_num = 28,
- column_num = 6,
- contents = contents,
- filetype = 'typescript',
- filepath = filepath )
-
- expected = has_items(
- has_entries( { 'description': 'var bar = new Bar();',
- 'line_num' : 28,
- 'column_num' : 5 } ),
- has_entries( { 'description': 'bar.testMethod();',
- 'line_num' : 29,
- 'column_num' : 1 } ) )
- actual = self._app.post_json( '/run_completer_command',
- references_data ).json
- assert_that( actual, expected )
-
-
- def GoTo_test( self ):
- filepath = self._PathToTestFile( 'test.ts' )
- contents = ReadFile( filepath )
-
- event_data = self._BuildRequest( filepath = filepath,
- filetype = 'typescript',
- contents = contents,
- event_name = 'BufferVisit' )
-
- self._app.post_json( '/event_notification', event_data )
-
- goto_data = self._BuildRequest( completer_target = 'filetype_default',
- command_arguments = [ 'GoToDefinition' ],
- line_num = 29,
- column_num = 9,
- contents = contents,
- filetype = 'typescript',
- filepath = filepath )
-
- response = self._app.post_json( '/run_completer_command', goto_data ).json
- assert_that( response,
- has_entries( {
- 'filepath': filepath,
- 'line_num': 25,
- 'column_num': 3,
- } ) )
-
-
- def GoTo_Fail_test( self ):
- filepath = self._PathToTestFile( 'test.ts' )
- contents = ReadFile( filepath )
-
- event_data = self._BuildRequest( filepath = filepath,
- filetype = 'typescript',
- contents = contents,
- event_name = 'BufferVisit' )
-
- self._app.post_json( '/event_notification', event_data )
-
- goto_data = self._BuildRequest( completer_target = 'filetype_default',
- command_arguments = [ 'GoToDefinition' ],
- line_num = 30,
- column_num = 6,
- contents = contents,
- filetype = 'typescript',
- filepath = filepath )
-
- response = self._app.post_json( '/run_completer_command',
- goto_data,
- expect_errors = True ).json
- assert_that( response,
- self._ErrorMatcher( RuntimeError,
- 'Could not find definition' ) )
+@SharedYcmd
+def Subcommands_GetType_Basic_test( app ):
+ filepath = PathToTestFile( 'test.ts' )
+ contents = ReadFile( filepath )
+
+ event_data = BuildRequest( filepath = filepath,
+ filetype = 'typescript',
+ contents = contents,
+ event_name = 'BufferVisit' )
+
+ app.post_json( '/event_notification', event_data )
+
+ gettype_data = BuildRequest( completer_target = 'filetype_default',
+ command_arguments = [ 'GetType' ],
+ line_num = 12,
+ column_num = 1,
+ contents = contents,
+ filetype = 'typescript',
+ filepath = filepath )
+
+ response = app.post_json( '/run_completer_command', gettype_data ).json
+ assert_that( response, MessageMatcher( 'var foo: Foo' ) )
+
+
+@SharedYcmd
+def Subcommands_GetType_HasNoType_test( app ):
+ filepath = PathToTestFile( 'test.ts' )
+ contents = ReadFile( filepath )
+
+ event_data = BuildRequest( filepath = filepath,
+ filetype = 'typescript',
+ contents = contents,
+ event_name = 'BufferVisit' )
+
+ app.post_json( '/event_notification', event_data )
+
+ gettype_data = BuildRequest( completer_target = 'filetype_default',
+ command_arguments = [ 'GetType' ],
+ line_num = 2,
+ column_num = 1,
+ contents = contents,
+ filetype = 'typescript',
+ filepath = filepath )
+
+ response = app.post_json( '/run_completer_command',
+ gettype_data,
+ expect_errors = True ).json
+ assert_that( response,
+ ErrorMatcher( RuntimeError, 'No content available.' ) )
+
+
+@SharedYcmd
+def Subcommands_GetDoc_Method_test( app ):
+ filepath = PathToTestFile( 'test.ts' )
+ contents = ReadFile( filepath )
+
+ event_data = BuildRequest( filepath = filepath,
+ filetype = 'typescript',
+ contents = contents,
+ event_name = 'BufferVisit' )
+
+ app.post_json( '/event_notification', event_data )
+
+ gettype_data = BuildRequest( completer_target = 'filetype_default',
+ command_arguments = [ 'GetDoc' ],
+ line_num = 29,
+ column_num = 9,
+ contents = contents,
+ filetype = 'typescript',
+ filepath = filepath )
+
+ response = app.post_json( '/run_completer_command', gettype_data ).json
+ assert_that( response,
+ has_entries( {
+ 'detailed_info': '(method) Bar.testMethod(): void\n\n'
+ 'Method documentation'
+ } ) )
+
+
+@SharedYcmd
+def Subcommands_GetDoc_Class_test( app ):
+ filepath = PathToTestFile( 'test.ts' )
+ contents = ReadFile( filepath )
+
+ event_data = BuildRequest( filepath = filepath,
+ filetype = 'typescript',
+ contents = contents,
+ event_name = 'BufferVisit' )
+
+ app.post_json( '/event_notification', event_data )
+
+ gettype_data = BuildRequest( completer_target = 'filetype_default',
+ command_arguments = [ 'GetDoc' ],
+ line_num = 32,
+ column_num = 2,
+ contents = contents,
+ filetype = 'typescript',
+ filepath = filepath )
+
+ response = app.post_json( '/run_completer_command', gettype_data ).json
+ assert_that( response,
+ has_entries( {
+ 'detailed_info': 'class Bar\n\n'
+ 'Class documentation\n\n'
+ 'Multi-line'
+ } ) )
+
+
+@SharedYcmd
+def Subcommands_GoToReferences_test( app ):
+ filepath = PathToTestFile( 'test.ts' )
+ contents = ReadFile( filepath )
+
+ event_data = BuildRequest( filepath = filepath,
+ filetype = 'typescript',
+ contents = contents,
+ event_name = 'BufferVisit' )
+
+ app.post_json( '/event_notification', event_data )
+
+ references_data = BuildRequest( completer_target = 'filetype_default',
+ command_arguments = [ 'GoToReferences' ],
+ line_num = 28,
+ column_num = 6,
+ contents = contents,
+ filetype = 'typescript',
+ filepath = filepath )
+
+ expected = has_items(
+ has_entries( { 'description': 'var bar = new Bar();',
+ 'line_num' : 28,
+ 'column_num' : 5 } ),
+ has_entries( { 'description': 'bar.testMethod();',
+ 'line_num' : 29,
+ 'column_num' : 1 } ) )
+ actual = app.post_json( '/run_completer_command', references_data ).json
+ assert_that( actual, expected )
+
+
+@SharedYcmd
+def Subcommands_GoTo_test( app ):
+ filepath = PathToTestFile( 'test.ts' )
+ contents = ReadFile( filepath )
+
+ event_data = BuildRequest( filepath = filepath,
+ filetype = 'typescript',
+ contents = contents,
+ event_name = 'BufferVisit' )
+
+ app.post_json( '/event_notification', event_data )
+
+ goto_data = BuildRequest( completer_target = 'filetype_default',
+ command_arguments = [ 'GoToDefinition' ],
+ line_num = 29,
+ column_num = 9,
+ contents = contents,
+ filetype = 'typescript',
+ filepath = filepath )
+
+ response = app.post_json( '/run_completer_command', goto_data ).json
+ assert_that( response,
+ has_entries( {
+ 'filepath': filepath,
+ 'line_num': 25,
+ 'column_num': 3,
+ } ) )
+
+
+@SharedYcmd
+def Subcommands_GoTo_Fail_test( app ):
+ filepath = PathToTestFile( 'test.ts' )
+ contents = ReadFile( filepath )
+
+ event_data = BuildRequest( filepath = filepath,
+ filetype = 'typescript',
+ contents = contents,
+ event_name = 'BufferVisit' )
+
+ app.post_json( '/event_notification', event_data )
+
+ goto_data = BuildRequest( completer_target = 'filetype_default',
+ command_arguments = [ 'GoToDefinition' ],
+ line_num = 30,
+ column_num = 6,
+ contents = contents,
+ filetype = 'typescript',
+ filepath = filepath )
+
+ response = app.post_json( '/run_completer_command',
+ goto_data,
+ expect_errors = True ).json
+ assert_that( response,
+ ErrorMatcher( RuntimeError, 'Could not find definition' ) )
diff --git a/ycmd/tests/typescript/typescript_handlers_test.py b/ycmd/tests/typescript/typescript_handlers_test.py
deleted file mode 100644
--- a/ycmd/tests/typescript/typescript_handlers_test.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# Copyright (C) 2015 ycmd contributors
-#
-# This file is part of ycmd.
-#
-# ycmd is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# ycmd is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
-
-from __future__ import unicode_literals
-from __future__ import print_function
-from __future__ import division
-from __future__ import absolute_import
-from future import standard_library
-standard_library.install_aliases()
-from builtins import * # noqa
-
-from ..handlers_test import Handlers_test
-
-
-class Typescript_Handlers_test( Handlers_test ):
-
- def __init__( self ):
- self._file = __file__
-
-
- def CompletionEntryMatcher( self, insertion_text, menu_text = None ):
- if not menu_text:
- menu_text = insertion_text
-
- extra_params = { 'menu_text': menu_text }
- return self._CompletionEntryMatcher( insertion_text,
- extra_params = extra_params )
diff --git a/ycmd/tests/utils_test.py b/ycmd/tests/utils_test.py
--- a/ycmd/tests/utils_test.py
+++ b/ycmd/tests/utils_test.py
@@ -31,7 +31,8 @@
from mock import patch, call
from nose.tools import eq_, ok_
from ycmd import utils
-from ycmd.tests.test_utils import PathToTestFile, Py2Only, Py3Only, WindowsOnly
+from ycmd.tests.test_utils import Py2Only, Py3Only, WindowsOnly
+from ycmd.tests import PathToTestFile
# NOTE: isinstance() vs type() is carefully used in this test file. Before
# changing things here, read the comments in utils.ToBytes.
| Refactor tests to reuse semantic servers
We currently bring up a new instance of omnisharp, gocode, racerd etc for every single test that needs them. It makes tests take _forever_, especially the omnisharp tests.
We need to figure out a way to reuse the server instances once they're up without making tests clobber each other. If we only manage to get C# tests to reuse omnisharp, I'll call that a win.
@puremourning @micbou @vheon Any takers? :)
| I already have [a implementation](https://github.com/Valloric/ycmd/compare/master...mispencer:PersistentOmnisharp) of this, though it's quite out of date at the moment.
https://github.com/Valloric/ycmd/pull/339 implements this for Omnisharp
@Valloric apparently @mispencer got here first...
| 2016-02-29T23:27:32 |
ycm-core/ycmd | 448 | ycm-core__ycmd-448 | [
"446"
] | c3e6904f436463f7fdb3aed5055552e893774009 | diff --git a/ycmd/server_utils.py b/ycmd/server_utils.py
--- a/ycmd/server_utils.py
+++ b/ycmd/server_utils.py
@@ -25,6 +25,7 @@
import sys
import os
import io
+import re
VERSION_FILENAME = 'CORE_VERSION'
CORE_NOT_COMPATIBLE_MESSAGE = (
@@ -32,6 +33,7 @@
)
DIR_OF_CURRENT_SCRIPT = os.path.dirname( os.path.abspath( __file__ ) )
+DIR_PACKAGES_REGEX = re.compile( '(site|dist)-packages$' )
def SetUpPythonPath():
@@ -86,11 +88,19 @@ def AddNearestThirdPartyFoldersToSysPath( filepath ):
# under its 'src' folder, but SOME of its modules are only meant to be
# accessible under py2, not py3. This is because these modules (like
# `queue`) are implementations of modules present in the py3 standard
- # library. So to work around issues, we place the python-future last on
- # sys.path so that they can be overriden by the standard library.
+ # library. Furthermore, we need to be sure that they are not overriden by
+ # already installed packages (for example, the 'builtins' module from
+ # 'pies2overrides' or a different version of 'python-future'). To work
+ # around these issues, we place the python-future just before the first
+ # path ending with 'site-packages' (or 'dist-packages' for Debian-like
+ # distributions) so that its modules can be overridden by the standard
+ # library but not by installed packages.
if folder == 'python-future':
folder = os.path.join( folder, 'src' )
- sys.path.append( os.path.realpath( os.path.join( path_to_third_party,
+ packages_indices = ( sys.path.index( path ) for path in sys.path
+ if DIR_PACKAGES_REGEX.search( path ) )
+ sys.path.insert( next( packages_indices, len( sys.path ) ),
+ os.path.realpath( os.path.join( path_to_third_party,
folder ) ) )
continue
sys.path.insert( 0, os.path.realpath( os.path.join( path_to_third_party,
| diff --git a/ycmd/tests/server_utils_test.py b/ycmd/tests/server_utils_test.py
--- a/ycmd/tests/server_utils_test.py
+++ b/ycmd/tests/server_utils_test.py
@@ -23,11 +23,31 @@
standard_library.install_aliases()
from builtins import * # noqa
-from hamcrest import raises, assert_that, calling
+from hamcrest import ( assert_that, calling, contains, contains_inanyorder,
+ raises )
+from mock import patch
from nose.tools import ok_
-from ycmd.server_utils import ( PathToNearestThirdPartyFolder,
- AddNearestThirdPartyFoldersToSysPath )
import os.path
+import sys
+
+from ycmd.server_utils import ( AddNearestThirdPartyFoldersToSysPath,
+ PathToNearestThirdPartyFolder )
+
+DIR_OF_THIRD_PARTY = os.path.abspath(
+ os.path.join( os.path.dirname( __file__ ), '..', '..', 'third_party' ) )
+THIRD_PARTY_FOLDERS = (
+ os.path.join( DIR_OF_THIRD_PARTY, 'argparse' ),
+ os.path.join( DIR_OF_THIRD_PARTY, 'bottle' ),
+ os.path.join( DIR_OF_THIRD_PARTY, 'frozendict' ),
+ os.path.join( DIR_OF_THIRD_PARTY, 'godef' ),
+ os.path.join( DIR_OF_THIRD_PARTY, 'gocode' ),
+ os.path.join( DIR_OF_THIRD_PARTY, 'JediHTTP' ),
+ os.path.join( DIR_OF_THIRD_PARTY, 'OmniSharpServer' ),
+ os.path.join( DIR_OF_THIRD_PARTY, 'racerd' ),
+ os.path.join( DIR_OF_THIRD_PARTY, 'requests' ),
+ os.path.join( DIR_OF_THIRD_PARTY, 'tern_runtime' ),
+ os.path.join( DIR_OF_THIRD_PARTY, 'waitress' )
+)
def PathToNearestThirdPartyFolder_Success_test():
@@ -43,3 +63,53 @@ def AddNearestThirdPartyFoldersToSysPath_Failure_test():
calling( AddNearestThirdPartyFoldersToSysPath ).with_args(
os.path.expanduser( '~' ) ),
raises( RuntimeError, '.*third_party folder.*' ) )
+
+
+@patch( 'sys.path', [ '/some/path',
+ '/first/path/to/site-packages',
+ '/another/path',
+ '/second/path/to/site-packages' ] )
+def AddNearestThirdPartyFoldersToSysPath_FutureBeforeSitePackages_test():
+ AddNearestThirdPartyFoldersToSysPath( __file__ )
+ assert_that( sys.path[ : len( THIRD_PARTY_FOLDERS ) ], contains_inanyorder(
+ *THIRD_PARTY_FOLDERS
+ ) )
+ assert_that( sys.path[ len( THIRD_PARTY_FOLDERS ) : ], contains(
+ '/some/path',
+ os.path.join( DIR_OF_THIRD_PARTY, 'python-future', 'src' ),
+ '/first/path/to/site-packages',
+ '/another/path',
+ '/second/path/to/site-packages',
+ ) )
+
+
+@patch( 'sys.path', [ '/some/path',
+ '/first/path/to/dist-packages',
+ '/another/path',
+ '/second/path/to/dist-packages' ] )
+def AddNearestThirdPartyFoldersToSysPath_FutureBeforeDistPackages_test():
+ AddNearestThirdPartyFoldersToSysPath( __file__ )
+ assert_that( sys.path[ : len( THIRD_PARTY_FOLDERS ) ], contains_inanyorder(
+ *THIRD_PARTY_FOLDERS
+ ) )
+ assert_that( sys.path[ len( THIRD_PARTY_FOLDERS ) : ], contains(
+ '/some/path',
+ os.path.join( DIR_OF_THIRD_PARTY, 'python-future', 'src' ),
+ '/first/path/to/dist-packages',
+ '/another/path',
+ '/second/path/to/dist-packages',
+ ) )
+
+
+@patch( 'sys.path', [ '/some/path',
+ '/another/path' ] )
+def AddNearestThirdPartyFoldersToSysPath_FutureLastIfNoPackages_test():
+ AddNearestThirdPartyFoldersToSysPath( __file__ )
+ assert_that( sys.path[ : len( THIRD_PARTY_FOLDERS ) ], contains_inanyorder(
+ *THIRD_PARTY_FOLDERS
+ ) )
+ assert_that( sys.path[ len( THIRD_PARTY_FOLDERS ) : ], contains(
+ '/some/path',
+ '/another/path',
+ os.path.join( DIR_OF_THIRD_PARTY, 'python-future', 'src' ),
+ ) )
| replace `builtins` with `future.builtins`, see Valloric/YouCompleteMe/issues/2024
Basically, I just run:
`find . -type f -iname '*.py' -print0 | xargs -0 -P 2 -n 1 sed -i '' -e 's/from builtins/from future.builtins/g'`
This patch fixed YouCompleteMe on my machine (Mac OS 10.11.3).
> VIM - Vi IMproved 7.4 (2013 Aug 10, compiled Mar 30 2016 11:47:02)
> MacOS X (unix) version
> Included patches: 1-1655
> Compiled by Homebrew
> Huge version without GUI. Features included (+) or not (-):
> +acl +farsi +mouse_netterm +tag_binary
> +arabic +file_in_path +mouse_sgr +tag_old_static
> +autocmd +find_in_path -mouse_sysmouse -tag_any_white
> -balloon_eval +float +mouse_urxvt -tcl
> -browse +folding +mouse_xterm +terminfo
> ++builtin_terms -footer +multi_byte +termresponse
> +byte_offset +fork() +multi_lang +textobjects
> +channel -gettext -mzscheme +timers
> +cindent -hangul_input +netbeans_intg +title
> -clientserver +iconv +packages -toolbar
> +clipboard +insert_expand +path_extra +user_commands
> +cmdline_compl +job +perl +vertsplit
> +cmdline_hist +jumplist +persistent_undo +virtualedit
> +cmdline_info +keymap +postscript +visual
> +comments +langmap +printer +visualextra
> +conceal +libcall +profile +viminfo
> +cryptv +linebreak +python +vreplace
> +cscope +lispindent -python3 +wildignore
> +cursorbind +listcmds +quickfix +wildmenu
> +cursorshape +localmap +reltime +windows
> +dialog_con -lua +rightleft +writebackup
> +diff +menu +ruby -X11
> +digraphs +mksession +scrollbind -xfontset
> -dnd +modify_fname +signs -xim
> -ebcdic +mouse +smartindent -xsmp
> +emacs_tags -mouseshape +startuptime -xterm_clipboard
> +eval +mouse_dec +statusline -xterm_save
> +ex_extra -mouse_gpm -sun_workshop -xpm
> +extra_search -mouse_jsbterm +syntax
> system vimrc file: "$VIM/vimrc"
> user vimrc file: "$HOME/.vimrc"
> 2nd user vimrc file: "~/.vim/vimrc"
> user exrc file: "$HOME/.exrc"
> fall-back for $VIM: "/usr/local/share/vim"
> Compilation: /usr/bin/clang -c -I. -Iproto -DHAVE_CONFIG_H -F/usr/local/Frameworks -DMACOS_X_UNIX -Os -w -pipe -march=native -mmacosx-version-min=10.11 -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=1
> Linking: /usr/bin/clang -L. -fstack-protector -L/usr/local/lib -L/usr/local/opt/libyaml/lib -L/usr/local/opt/openssl/lib -L/usr/local/opt/readline/lib -L/usr/local/lib -F/usr/local/Frameworks -Wl,-headerpad_max_install_names -o vim -lm -lncurses -liconv -framework Cocoa -fstack-protector -L/System/Library/Perl/5.18/darwin-thread-multi-2level/CORE -lperl -F/usr/local/Cellar/python/2.7.11/Frameworks -framework Python -lruby.2.3.0 -lobjc -L/usr/local/Cellar/ruby/2.3.0/lib
<!-- Reviewable:start -->
---
This change is [<img src="https://reviewable.io/review_button.svg" height="35" align="absmiddle" alt="Reviewable"/>](https://reviewable.io/reviews/valloric/ycmd/446)
<!-- Reviewable:end -->
| 2016-04-01T22:53:33 |
|
ycm-core/ycmd | 481 | ycm-core__ycmd-481 | [
"196",
"479"
] | 8617de85073fa280c071ae55152b24f909de47ae | diff --git a/build.py b/build.py
--- a/build.py
+++ b/build.py
@@ -7,12 +7,17 @@
from __future__ import division
from __future__ import absolute_import
+from shutil import rmtree
+from tempfile import mkdtemp
+import errno
+import re
+import multiprocessing
import os
-import subprocess
import os.path as p
-import sys
+import platform
import shlex
-import errno
+import subprocess
+import sys
PY_MAJOR, PY_MINOR = sys.version_info[ 0 : 2 ]
if not ( ( PY_MAJOR == 2 and PY_MINOR >= 6 ) or
@@ -33,11 +38,22 @@
sys.path.insert( 1, p.abspath( p.join( DIR_OF_THIRD_PARTY, 'argparse' ) ) )
-from tempfile import mkdtemp
-from shutil import rmtree
-import platform
import argparse
-import multiprocessing
+
+NO_DYNAMIC_PYTHON_ERROR = (
+ 'ERROR: found static Python library ({library}) but a dynamic one is '
+ 'required. You must use a Python compiled with the {flag} flag. '
+ 'If using pyenv, you need to run the command:\n'
+ ' export PYTHON_CONFIGURE_OPTS="{flag}"\n'
+ 'before installing a Python version.' )
+NO_PYTHON_LIBRARY_ERROR = 'ERROR: unable to find an appropriate Python library.'
+
+LIBRARY_LDCONFIG_REGEX = re.compile(
+ '(?P<library>\S+) \(.*\) => (?P<path>\S+)' )
+
+
+def OnLinux():
+ return platform.system() == 'Linux'
def OnMac():
@@ -120,82 +136,125 @@ def CheckOutput( *popen_args, **kwargs ):
return output
+def GetPythonNameOnUnix():
+ python_name = 'python' + str( PY_MAJOR ) + '.' + str( PY_MINOR )
+ # Python 3 has an 'm' suffix on Unix platforms, for instance libpython3.3m.so.
+ if PY_MAJOR == 3:
+ python_name += 'm'
+ return python_name
+
+
+def GetStandardPythonLocationsOnUnix( prefix, name ):
+ return ( '{0}/lib/lib{1}'.format( prefix, name ),
+ '{0}/include/{1}'.format( prefix, name ) )
+
+
+def FindPythonLibrariesOnLinux():
+ python_name = GetPythonNameOnUnix()
+ python_library_root, python_include = GetStandardPythonLocationsOnUnix(
+ sys.exec_prefix, python_name )
+
+ python_library = python_library_root + '.so'
+ if p.isfile( python_library ):
+ return python_library, python_include
+
+ python_library = python_library_root + '.a'
+ if p.isfile( python_library ):
+ sys.exit( NO_DYNAMIC_PYTHON_ERROR.format( library = python_library,
+ flag = '--enable-shared' ) )
+
+ # On some distributions (Ubuntu for instance), the Python system library is
+ # not installed in its default path: /usr/lib. We use the ldconfig tool to
+ # find it.
+ python_library = 'lib' + python_name + '.so'
+ ldconfig_output = CheckOutput( [ 'ldconfig', '-p' ] ).strip().decode( 'utf8' )
+ for line in ldconfig_output.splitlines():
+ match = LIBRARY_LDCONFIG_REGEX.search( line )
+ if match and match.group( 'library' ) == python_library:
+ return match.group( 'path' ), python_include
+
+ sys.exit( NO_PYTHON_LIBRARY_ERROR )
+
+
+def FindPythonLibrariesOnMac():
+ python_prefix = sys.exec_prefix
+
+ python_library = p.join( python_prefix, 'Python' )
+ if p.isfile( python_library ):
+ return python_library, p.join( python_prefix, 'Headers' )
+
+ python_name = GetPythonNameOnUnix()
+ python_library_root, python_include = GetStandardPythonLocationsOnUnix(
+ python_prefix, python_name )
+
+ # On MacOS, ycmd does not work with statically linked python library.
+ # It typically manifests with the following error when there is a
+ # self-compiled python without --enable-framework (or, technically
+ # --enable-shared):
+ #
+ # Fatal Python error: PyThreadState_Get: no current thread
+ #
+ # The most likely explanation for this is that both the ycm_core.so and the
+ # python binary include copies of libpython.a (or whatever included
+ # objects). When the python interpreter starts it initializes only the
+ # globals within its copy, so when ycm_core.so's copy starts executing, it
+ # points at its own copy which is uninitialized.
+ #
+ # Some platforms' dynamic linkers (ld.so) are able to resolve this when
+ # loading shared libraries at runtime[citation needed], but OSX seemingly
+ # cannot.
+ #
+ # So we do 2 things special on OS X:
+ # - look for a .dylib first
+ # - if we find a .a, raise an error.
+ python_library = python_library_root + '.dylib'
+ if p.isfile( python_library ):
+ return python_library, python_include
+
+ python_library = python_library_root + '.a'
+ if p.isfile( python_library ):
+ sys.exit( NO_DYNAMIC_PYTHON_ERROR.format( library = python_library,
+ flag = '--enable-framework' ) )
+
+ sys.exit( NO_PYTHON_LIBRARY_ERROR )
+
+
+def FindPythonLibrariesOnWindows():
+ python_prefix = sys.exec_prefix
+ python_name = 'python' + str( PY_MAJOR ) + str( PY_MINOR )
+
+ python_library = p.join( python_prefix, 'libs', python_name + '.lib' )
+ if p.isfile( python_library ):
+ return python_library, p.join( python_prefix, 'include' )
+
+ sys.exit( NO_PYTHON_LIBRARY_ERROR )
+
+
+def FindPythonLibraries():
+ if OnLinux():
+ return FindPythonLibrariesOnLinux()
+
+ if OnMac():
+ return FindPythonLibrariesOnMac()
+
+ if OnWindows():
+ return FindPythonLibrariesOnWindows()
+
+ sys.exit( 'ERROR: your platform is not supported by this script. Follow the '
+ 'Full Installation Guide instructions in the documentation.' )
+
+
def CustomPythonCmakeArgs():
# The CMake 'FindPythonLibs' Module does not work properly.
# So we are forced to do its job for it.
+ print( 'Searching Python {major}.{minor} libraries...'.format(
+ major = PY_MAJOR, minor = PY_MINOR ) )
- print( 'Searching for python libraries...' )
-
- python_prefix = CheckOutput( [
- 'python-config',
- '--prefix'
- ] ).strip().decode( 'utf8' )
-
- if p.isfile( p.join( python_prefix, '/Python' ) ):
- python_library = p.join( python_prefix, '/Python' )
- python_include = p.join( python_prefix, '/Headers' )
- print( 'Using OSX-style libs from {0}'.format( python_prefix ) )
- else:
- major_minor = CheckOutput( [
- 'python',
- '-c',
- 'import sys;i=sys.version_info;print( "%d.%d" % (i[0], i[1]) )'
- ] ).strip().decode( 'utf8' )
- which_python = 'python' + major_minor
-
- # Python 3 has an 'm' suffix, for instance libpython3.3m.a
- if major_minor.startswith( '3' ):
- which_python += 'm'
-
- lib_python = '{0}/lib/lib{1}'.format( python_prefix, which_python ).strip()
-
- print( 'Searching for python with prefix: {0} and lib {1}:'.format(
- python_prefix, which_python ) )
-
- # On MacOS, ycmd does not work with statically linked python library.
- # It typically manifests with the following error when there is a
- # self-compiled python without --enable-framework (or, technically
- # --enable-shared):
- #
- # Fatal Python error: PyThreadState_Get: no current thread
- #
- # The most likely explanation for this is that both the ycm_core.so and the
- # python binary include copies of libpython.a (or whatever included
- # objects). When the python interpreter starts it initializes only the
- # globals within its copy, so when ycm_core.so's copy starts executing, it
- # points at its own copy which is uninitialized.
- #
- # Some platforms' dynamic linkers (ld.so) are able to resolve this when
- # loading shared libraries at runtime[citation needed], but OSX seemingly
- # cannot.
- #
- # So we do 2 things special on OS X:
- # - look for a .dylib first
- # - if we find a .a, raise an error.
-
- if p.isfile( '{0}.dylib'.format( lib_python ) ):
- python_library = '{0}.dylib'.format( lib_python )
- elif p.isfile( '/usr/lib/lib{0}.dylib'.format( which_python ) ):
- # For no clear reason, python2.6 only exists in /usr/lib on OS X and
- # not in the python prefix location
- python_library = '/usr/lib/lib{0}.dylib'.format( which_python )
- elif p.isfile( '{0}.a'.format( lib_python ) ):
- if OnMac():
- sys.exit( 'ERROR: You must use a python compiled with '
- '--enable-shared or --enable-framework (and thus a {0}.dylib '
- 'library) on OS X'.format( lib_python ) )
-
- python_library = '{0}.a'.format( lib_python )
- # This check is for CYGWIN
- elif p.isfile( '{0}.dll.a'.format( lib_python ) ):
- python_library = '{0}.dll.a'.format( lib_python )
- else:
- sys.exit( 'ERROR: Unable to find an appropriate python library' )
+ python_library, python_include = FindPythonLibraries()
- python_include = '{0}/include/{1}'.format( python_prefix, which_python )
+ print( 'Found Python library: {0}'.format( python_library ) )
+ print( 'Found Python headers folder: {0}'.format( python_include ) )
- print( 'Using PYTHON_LIBRARY={0} PYTHON_INCLUDE_DIR={1}'.format(
- python_library, python_include ) )
return [
'-DPYTHON_LIBRARY={0}'.format( python_library ),
'-DPYTHON_INCLUDE_DIR={0}'.format( python_include )
@@ -322,8 +381,7 @@ def BuildYcmdLib( args ):
try:
full_cmake_args = [ '-G', GetGenerator( args ) ]
- if OnMac():
- full_cmake_args.extend( CustomPythonCmakeArgs() )
+ full_cmake_args.extend( CustomPythonCmakeArgs() )
full_cmake_args.extend( GetCmakeArgs( args ) )
full_cmake_args.append( p.join( DIR_OF_THIS_SCRIPT, 'cpp' ) )
| make cmake use env python
CustomPythonCmakeArgs forces CMake to pickup the environment python libraries and include but the interpreter is still picked using PythonInterp which picks up the system default instead of the environment python. This cause issues with vitualenv.
<!-- Reviewable:start -->
[<img src="https://reviewable.io/review_button.png" height=40 alt="Review on Reviewable"/>](https://reviewable.io/reviews/valloric/ycmd/196)
<!-- Reviewable:end -->
Ycmd build system assumes that the current python on OSX is the same to compile ycmd for.
I tried to install ycmd on macosx and it took me a while to actually figure out that:
1) you need python3 (both for Sublime Text3 and for the example)
2) the build system tries to build the library from the output of python-config --prefix. So even if I was running `python3 build.py` it would still try to use the default python configuration (that is 2.7 on my machine).
it would be useful if the build.py uses the current python executable to determine the prefix.
| Hm, this has the potential to break everything. The whole multiple-versions-of-python-on-one-machine thing is the single most frequent reason for failed YCM installations.
So we need to be _really_ sure this is doing the right thing. I'd appreciated it if various people could try out this change; the more OS's/distros we cover, the better.
@vheon @micbou @puremourning
The `CustomPythonCmakeArgs` function is only called on OS X so Linux and Windows are not affected by this change.
happy to give this a spin as i have 2 macs - 1 which the install always works and 1 which i always have do battle with homebrew/python/etc.
However, @amchoukir could you elaborate on _exactly_ what problem this is solving? Does it relate to an issue on the tracker? Are there some obvious test steps we should go through?
Ta,
Ben
Thanks for having a look and giving it a try.
@puremourning, @Valloric
`CustomPythonCmakeArgs` uses `python-config` to figure out where python is installed in the current environment. Unfortunately `virtualenv` does not copy `python-config` when setting up a new environment, I will take this issue separately to the `virtualenv` github repo. Copying `python-config` to my `virtualenv` takes care of the `PythonLibs`.
But there is still an issue with `PythonInterp` as can be seen from the install log below:
```
-- Found PythonLibs: /usr/local/Cellar/python/2.7.10/Frameworks/Python.framework/Versions/2.7/lib/libpython2.7.dylib (found suitable version "2.7.10", minimum required is "2.6")
-- Found PythonInterp: /usr/bin/python2.6 (found suitable version "2.6.9", minimum required is "2.6")
```
`PythonInterp` picks up the system default instead of the interpreter in my `virtualenv`. The fix I proposed take care of the issue in a similar way as `PythonLibs`.
My setup is as follows:
OS X 10.10.3 (Maverick)
Python 2.7.10 installed via homebrew
Macvim 7.4 installed via homebrew
The test steps I went through are:
- Make sure the same version of the lib and interpreter are picked during the install
- Check that you don't get vim crashing upon startup.
To my understanding `virtualenv` is a tool for python available not only on OSX, but the fix you're proposing is implemented in a function that is called only on OSX, so is the fix not needed on other platforms?
@vheon yes it might definitely be needed. I have access to linux servers on which I could try it out, if someone could have a look at windows.
> @vheon yes it might definitely be needed.
@amchoukir Then this PR should be modified to run on linux as well right?
I think we should hold out on this change while I get the Travis OSX builds working. I've had to do some work to streamline the build which I think obsolete this change.
> On 19 Aug 2015, at 18:46, Andrea Cedraro [email protected] wrote:
>
> @vheon yes it might definitely be needed.
>
> Then this PR should be modified to run on linux as well right?
>
> —
> Reply to this email directly or view it on GitHub.
@puremourning Sure, np.
Here are the results of my tests on Linux
**Scenario 1: System default sanity check**
-- Found PythonLibs: /usr/lib/x86_64-linux-gnu/libpython2.7.so (found suitable version "2.7.6", minimum required is "2.6")
-- Found PythonInterp: /home/vagrant/.virtualenv/ycmd/bin/python2 (found suitable version "2.7.6", minimum required is "2.6")
**Scenario 2: Vanilla virtualenv with 2.7.10**
-- Found PythonLibs: /usr/lib/x86_64-linux-gnu/libpython2.7.so (found suitable version "2.7.6", minimum required is "2.6")
-- Found PythonInterp: /home/vagrant/.virtualenv/ycmd2.10/bin/python2 (found suitable version "2.7.10", minimum required is "2.6")
Wrong library picked up. It picked up the system default instead of the 2.7.10.
**Scenario 3: virtualenv with 2.7.10 and python-config copied over**
-- Found PythonLibs: /usr/lib/x86_64-linux-gnu/libpython2.7.so (found suitable version "2.7.6", minimum required is "2.6")
-- Found PythonInterp: /home/vagrant/.virtualenv/ycmd2.10/bin/python2 (found suitable version "2.7.10", minimum required is "2.6")
Wrong library picked up. It picked up the system default instead of the 2.7.10. So here python-config does not fix the issue.
Here are the specifics of python-config
(ycmd2.10)vagrant@vvv:~/ycmd$ python-config --prefix
/usr/local/python/2.10
(ycmd2.10)vagrant@vvv:~/ycmd$ which python-config
/home/vagrant/.virtualenv/ycmd2.10/bin/python-config
Most likely this is due to the difference in paths between OS X and Linux
**Next Steps**
I will dig into build.py to check what happens.
> @amchoukir Unfortunately virtualenv does not copy python-config when setting up a new environment, I will take this issue separately to the virtualenv github repo. Copying python-config to my virtualenv takes care of the PythonLibs.
Yes, this is annoying. I found the same problem when making the ycmd build/test run on Travis OS X. Did you raise this with virtualenv folks? If so could we get a reference?
Thanks,
Ben
@puremourning yes Paul opened the following issue:
https://github.com/pypa/virtualenv/pull/783
Hello Everyone, I am back from vacation and I will have a look at why the build does not work on virtualenv for Linux.
Meanwhile the fix to get the python-config copied in virtualenv has been merged:
https://github.com/pypa/virtualenv/pull/798
| 2016-05-07T14:09:01 |
|
ycm-core/ycmd | 482 | ycm-core__ycmd-482 | [
"476"
] | 8617de85073fa280c071ae55152b24f909de47ae | diff --git a/ycmd/handlers.py b/ycmd/handlers.py
--- a/ycmd/handlers.py
+++ b/ycmd/handlers.py
@@ -25,7 +25,6 @@
import atexit
import bottle
-import http.client
import json
import logging
import traceback
@@ -221,13 +220,15 @@ def DebugInfo():
# The type of the param is Bottle.HTTPError
[email protected]( http.client.INTERNAL_SERVER_ERROR )
def ErrorHandler( httperror ):
body = _JsonResponse( BuildExceptionResponse( httperror.exception,
httperror.traceback ) )
hmac_plugin.SetHmacHeader( body, _hmac_secret )
return body
+# For every error Bottle encounters it will use this as the default handler
+app.default_error_handler = ErrorHandler
+
def _JsonResponse( data ):
SetResponseHeader( 'Content-Type', 'application/json' )
| diff --git a/ycmd/tests/misc_handlers_test.py b/ycmd/tests/misc_handlers_test.py
--- a/ycmd/tests/misc_handlers_test.py
+++ b/ycmd/tests/misc_handlers_test.py
@@ -46,6 +46,16 @@ def MiscHandlers_EventNotification_AlwaysJsonResponse_test( app ):
app.post_json( '/event_notification', event_data ).json
+@SharedYcmd
+def MiscHandlers_EventNotification_ReturnJsonOnBigFileError_test( app ):
+ # We generate a content greater than Bottle.MEMFILE_MAX, which is set to 1Mb.
+ contents = "foo " * 500000
+ event_data = BuildRequest( contents = contents,
+ event_name = 'FileReadyToParse' )
+
+ app.post_json( '/event_notification', event_data, expect_errors = True ).json
+
+
@SharedYcmd
def MiscHandlers_FilterAndSortCandidates_Basic_test( app ):
candidate1 = { 'prop1': 'aoo', 'prop2': 'bar' }
| YCM runs on large-ish non-source files, spews a backtrace when you enter insert mode
STR:
```
# Create a file that's large, but not so large as to cause ycm to disable itself
$ cd `mktemp -d`
$ dd if=/dev/urandom of=test bs=500k count=1
$ vim test
```
Now enter insert mode.
Result: YCM error spew, included below.
I bisected the test file size and noticed that the error starts occurring somewhere between files of size 100k and 300k.
```
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "/usr/lib/youcompleteme/autoload/../python/ycm/youcompleteme.py",
line 255, in NativeFiletypeCompletionUsable
self.NativeFiletypeCompletionAvailable() )
File "/usr/lib/youcompleteme/autoload/../python/ycm/youcompleteme.py",
line 250, in NativeFiletypeCompletionAvailable
vimsupport.CurrentFiletypes() ] )
File "/usr/lib/youcompleteme/autoload/../python/ycm/youcompleteme.py",
line 240, in FiletypeCompleterExistsForFiletype
exists_completer = SendCompleterAvailableRequest( filetype )
File "/usr/lib/youcompleteme/autoload/../python/ycm/client/completer_available_request.py",
line 57, in SendCompleterAvailableRequest
request.Start()
File "/usr/lib/youcompleteme/autoload/../python/ycm/client/completer_available_request.py",
line 45, in Start
'semantic_completion_available' )
File "/usr/lib/youcompleteme/autoload/../python/ycm/client/base_request.py",
line 81, in PostDataToHandler
timeout ) )
File "/usr/lib/youcompleteme/autoload/../python/ycm/client/base_request.py",
line 177, in JsonFromFuture
_ValidateResponseObject( response )
File "/usr/lib/youcompleteme/autoload/../python/ycm/client/base_request.py",
line 206, in _ValidateResponseObject
their_hmac = ToBytes( b64decode( response.headers[ _HMAC_HEADER ] ) )
File "/usr/lib/youcompleteme/third_party/ycmd/third_party/requests/requests/structures.py",
line 54, in __getitem__
return self._store[key.lower()][1]
KeyError: u'x-ycm-hmac'
E858: Eval did not return a valid python object
```
+@r4nt
| I expected this to happen when the file is around 1Mb, which is the max size of a request that we set for Bottle, but if you can reproduce this with a 500Kb then the problem could be something else 😕 Can you post the ycmd logs as well?
Sure, where can I find the ycmd logs?
`:YcmDebugInfo`
OK, looks just like the stack trace I got above.
```
Printing YouCompleteMe debug information...
Error detected while processing function <SNR>149_DebugInfo[2]..<SNR>149_Pyeval:
line 2:
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "/usr/lib/youcompleteme/autoload/../python/ycm/youcompleteme.py", line 550, in DebugInfo
'debug_info' )
File "/usr/lib/youcompleteme/autoload/../python/ycm/client/base_request.py", line 81, in PostDataToHandler
timeout ) )
File "/usr/lib/youcompleteme/autoload/../python/ycm/client/base_request.py", line 177, in JsonFromFuture
_ValidateResponseObject( response )
File "/usr/lib/youcompleteme/autoload/../python/ycm/client/base_request.py", line 206, in _ValidateResponseObject
their_hmac = ToBytes( b64decode( response.headers[ _HMAC_HEADER ] ) )
File "/usr/lib/youcompleteme/third_party/ycmd/third_party/requests/requests/structures.py", line 54, in __getitem__
return self._store[key.lower()][1]
KeyError: u'x-ycm-hmac'
E858: Eval did not return a valid python object
-- 0
```
Actually the command didn't work an you got the same error as before. What you shoul do is:
- open the "big" file with vim
- use `YcmDebugInfo` to get the path of `ycmd` logs file
- Enter Insert mode to get the error
- Post the log
- open the "big" file with vim
- use YcmDebugInfo to get the path of ycmd logs file
If I do this, I still get the same stacktrace.
OTOH if I invert the order of these operations, I get the following in my stderr log. The stdout log is empty.
```
2016-05-05 14:23:55,847 - INFO - Calling global extra conf method YcmCorePreload on conf file /usr/lib/youcompleteme/ycm_extra_conf.py
2016-05-05 14:23:57,138 - INFO - Received health request
2016-05-05 14:23:57,141 - INFO - Received debug info request
2016-05-05 14:24:30,079 - INFO - Received filetype completion available request
```
@jlebar Since I see references to `/usr/lib/youcompleteme/`, could you make sure you can repro this with vanilla YCM and not the Google-internal copy?
I wonder if I'm saving anyone time by doing all this testing myself? Like, if others cannot reproduce, I would be happy to dig in and try to figure out what is interesting about my config, but otherwise if I can reproduce using the public build, well, the next step is going to be for someone else to reproduce it themselves...
Unless you think this is likely to be something unusual about my config?
> Like, if others cannot reproduce, I would be happy to dig in and try to figure out what is interesting about my config
By all means, please do. :)
> otherwise if I can reproduce using the public build, well, the next step is going to be for someone else to reproduce it themselves...
True, but issue reporters are rarely willing to run through the code to figure out the issue. :)
Also, we tend to know the codebase pretty well. If we could repro it, we could probably figure out the root cause faster and with less hassle. But hey, if you're willing to dig through the code on this, count us happy. :)
I'm not sure where you got that I was volunteering to dig through the code? Sorry, I must have been unclear.
All I was asking is for someone else to try to reproduce this bug, before asking me to spend half an hour configuring YCM on a new machine. If that's too much to ask, I'll go away, with sincere apologies for wasting everyone's time.
I can repro with stock YCM on my mac at head. Exact same behavior. Server stderr:
tail -f /var/folders/4l/w5dybvgs0790k5d3x6856drw002dvz/T/ycm_temp/server_56305_stderr.log
2016-05-06 16:28:06,488 - INFO - Received health request
2016-05-06 16:28:06,492 - INFO - Received debug info request
2016-05-06 16:28:37,967 - INFO - Received filetype completion available request
Otherwise the exact same output as @jlebar.
:YcmDebugInfo
Printing YouCompleteMe debug information...
-- Server has Clang support compiled in: False
-- Server running at: http://127.0.0.1:56341
-- Server process ID: 73697
-- Server logfiles:
-- /var/folders/4l/w5dybvgs0790k5d3x6856drw002dvz/T/ycm_temp/server_56341_stdout.log
-- /var/folders/4l/w5dybvgs0790k5d3x6856drw002dvz/T/ycm_temp/server_56341_stderr.log
I'll look into this
| 2016-05-07T17:27:56 |
ycm-core/ycmd | 483 | ycm-core__ycmd-483 | [
"450"
] | 76dd7b9eeaa88f4c97f879d7c9f9f05d969d7440 | diff --git a/ycmd/completers/javascript/tern_completer.py b/ycmd/completers/javascript/tern_completer.py
--- a/ycmd/completers/javascript/tern_completer.py
+++ b/ycmd/completers/javascript/tern_completer.py
@@ -404,12 +404,14 @@ def _StartServerNoLock( self ):
port = self._server_port,
std = 'stderr' )
- # On Windows, we need to open a pipe to stdin to prevent Tern crashing
- # with following error: "Implement me. Unknown stdin file type!"
+ # We need to open a pipe to stdin or the Tern server is killed.
+ # See https://github.com/ternjs/tern/issues/740#issuecomment-203979749
+ # For unknown reasons, this is only needed on Windows and for Python 3.4+
+ # on other platforms.
with utils.OpenForStdHandle( self._server_stdout ) as stdout:
with utils.OpenForStdHandle( self._server_stderr ) as stderr:
self._server_handle = utils.SafePopen( command,
- stdin_windows = PIPE,
+ stdin = PIPE,
stdout = stdout,
stderr = stderr )
except Exception:
| Always open a pipe to stdin for the tern command
The Tern command exits when its stdin closes. If no stdin is provided,
it exits immediately, so we have to provide a stdin to keep it running.
This caused tern to exit right after it was started by YouCompleteMe. I'm a bit curious to why no one else seem to have this problem.
<!-- Reviewable:start -->
---
This change is [<img src="https://reviewable.io/review_button.svg" height="35" align="absmiddle" alt="Reviewable"/>](https://reviewable.io/reviews/valloric/ycmd/450)
<!-- Reviewable:end -->
| I think this will break windows support.
Tern just added a --ignore-stdin flag which we should probably use instead.
[](https://coveralls.io/builds/5656684)
Coverage remained the same at 84.534% when pulling **167c2fcbf72e51631d654799afcd9289a28eba77 on trygveaa:tern-always-open-stdin** into **09f216441f99c02d51f9460e006683c5fe83669e on Valloric:master**.
The only thing `stdin_windows` does is set `stdin` to `PIPE` if `stdin_windows` is `PIPE` and it is running on Windows. So this should not change the behavior for Windows. Why do you think it will break?
I could add `--ignore-stdin` instead, but then I think both `stdin` and `stdin_windows` should be removed. Otherwise, the code would be pretty confusing IMO. If this is done, someone will have to verify that it still works on Windows.
thanks for the explanation.
Well fortunately we have CI to show if Windows support works and it looks like it does :)
I too am curious as to why we have not seen this in testing on other platforms.
In What scenario did you find the issue?
Aha, the error only occurs when running ycmd with python 3. When running it with python 3, tern is run with `stdin` pointed at `/dev/null`. When running it with python 2, `stdin` is pointed at the same place as ycmds `stdin`, which is a pipe, so then it works.
I guess most people run it with python 2, so that's probably why no one else has reported the bug.
Not sure why that happens with python 3 though. The docs for both python 2 and 3 says regarding the `stdin` argument of `Popen`:
> With the default settings of None, no redirection will occur; the child’s file handles will be inherited from the parent.
Regarding the scenario I experience the issue, I just installed YouCompleteMe with Vundle, ran `./install.py --tern-completer`, put `let g:ycm_server_python_interpreter = '/usr/bin/python'` in my vim config and opened a javascript file in vim.
Thanks so much for the additional info. It's still strange - our CI tests run fine on Python 3.
Just to be clear I'm not against merging this (thanks for doing it!). I just like to fully understand _why_ it fixes it :)
I still don't feel I fully understand it :/
Are you sure that the tests cover this?
I could run the tests locally without this patch and see if they fail here (just would have to set up a test environment first, didn't bother for this small change before, since CI runs the tests).
If so, I'm not sure what could be the reason. For info, I'm running Arch Linux with python 3.5.1.
Well. If the tern server didn't start in our tests, then all the JavaScript tests would fail.
I tried running the tests for the javascript completer on my machine with python 3 now. They all passed. Maybe tern is started in some other way in the tests?
The issue only happens starting with Python 3.4. I tested it using the example client.
> Maybe tern is started in some other way in the tests?
No but the ycmd server is not started. This has something to do with the Tern server running inside the ycmd process.
---
Reviewed 1 of 1 files at r1.
Review status: all files reviewed at latest revision, 1 unresolved discussion.
---
_[ycmd/completers/javascript/tern_completer.py, line 401 [r1]](https://reviewable.io:443/reviews/valloric/ycmd/450#-KFTZ4PMeoV5eQv2aQgM:-KFTZ4PMeoV5eQv2aQgN:-400513172) ([raw file](https://github.com/valloric/ycmd/blob/167c2fcbf72e51631d654799afcd9289a28eba77/ycmd/completers/javascript/tern_completer.py#L401)):_
We need to add a note that it happens starting with Python 3.4.
---
_Comments from [Reviewable](https://reviewable.io:443/reviews/valloric/ycmd/450#-:-KEX_E63dC8FRZooqvoq:1085123692)_
<!-- Sent from Reviewable.io -->
| 2016-05-08T15:43:07 |
|
ycm-core/ycmd | 488 | ycm-core__ycmd-488 | [
"486"
] | b3f654946a00e32dd96547b14259bde2b28fd605 | diff --git a/build.py b/build.py
--- a/build.py
+++ b/build.py
@@ -7,6 +7,7 @@
from __future__ import division
from __future__ import absolute_import
+from distutils import sysconfig
from shutil import rmtree
from tempfile import mkdtemp
import errno
@@ -144,15 +145,16 @@ def GetPythonNameOnUnix():
return python_name
-def GetStandardPythonLocationsOnUnix( prefix, name ):
- return ( '{0}/lib/lib{1}'.format( prefix, name ),
- '{0}/include/{1}'.format( prefix, name ) )
+def GetStandardPythonLocationsOnUnix( name ):
+ library_dir = sysconfig.get_config_var( 'LIBDIR' )
+ include_dir = sysconfig.get_config_var( 'INCLUDEDIR' )
+ return p.join( library_dir, 'lib' + name ), p.join( include_dir, name )
def FindPythonLibrariesOnLinux():
python_name = GetPythonNameOnUnix()
python_library_root, python_include = GetStandardPythonLocationsOnUnix(
- sys.exec_prefix, python_name )
+ python_name )
python_library = python_library_root + '.so'
if p.isfile( python_library ):
@@ -177,15 +179,9 @@ def FindPythonLibrariesOnLinux():
def FindPythonLibrariesOnMac():
- python_prefix = sys.exec_prefix
-
- python_library = p.join( python_prefix, 'Python' )
- if p.isfile( python_library ):
- return python_library, p.join( python_prefix, 'Headers' )
-
python_name = GetPythonNameOnUnix()
python_library_root, python_include = GetStandardPythonLocationsOnUnix(
- python_prefix, python_name )
+ python_name )
# On MacOS, ycmd does not work with statically linked python library.
# It typically manifests with the following error when there is a
@@ -220,12 +216,13 @@ def FindPythonLibrariesOnMac():
def FindPythonLibrariesOnWindows():
- python_prefix = sys.exec_prefix
python_name = 'python' + str( PY_MAJOR ) + str( PY_MINOR )
- python_library = p.join( python_prefix, 'libs', python_name + '.lib' )
+ python_include = sysconfig.get_config_var( 'INCLUDEPY' )
+ python_library = p.join(
+ p.dirname( python_include ), 'libs', python_name + '.lib' )
if p.isfile( python_library ):
- return python_library, p.join( python_prefix, 'include' )
+ return python_library, python_include
sys.exit( NO_PYTHON_LIBRARY_ERROR )
| Not compatible with virtualenv.
I use a Mac machine and created a python virtual environment use virtualenv on `~/pyvenv`, so the `sys.exec_prefix` is `'~/pyvenv/bin/..'`
When I run install.py in virtualenv, the `FindPythonLibrariesOnMac()` function expect to find the python library `~/pyvenv/lib/libpython2.7.dylib`, it seems that virtualenv not made the symlink, so the script report "unable to find an appropriate Python library", and I have to manual add this link.
Prior version of ycmd could find correct path of python library, so keep the original behaviour when FindPythonLibraries\* functions find nothing may be a better choice for backward compatibility.
| 2016-05-12T16:26:05 |
||
ycm-core/ycmd | 499 | ycm-core__ycmd-499 | [
"502"
] | 18fc8d8fbcb81cd29df53c2d753b9d3cef968d8f | diff --git a/build.py b/build.py
--- a/build.py
+++ b/build.py
@@ -11,11 +11,11 @@
from shutil import rmtree
from tempfile import mkdtemp
import errno
-import re
import multiprocessing
import os
import os.path as p
import platform
+import re
import shlex
import subprocess
import sys
@@ -49,12 +49,25 @@
'before installing a Python version.' )
NO_PYTHON_LIBRARY_ERROR = 'ERROR: unable to find an appropriate Python library.'
-LIBRARY_LDCONFIG_REGEX = re.compile(
- '(?P<library>\S+) \(.*\) => (?P<path>\S+)' )
-
-
-def OnLinux():
- return platform.system() == 'Linux'
+# Regular expressions used to find static and dynamic Python libraries.
+# Notes:
+# - Python 3 library name may have an 'm' suffix on Unix platforms, for
+# instance libpython3.3m.so;
+# - the linker name (the soname without the version) does not always
+# exist so we look for the versioned names too;
+# - on Windows, the .lib extension is used instead of the .dll one. See
+# http://xenophilia.org/winvunix.html to understand why.
+STATIC_PYTHON_LIBRARY_REGEX = '^libpython{major}\.{minor}m?\.a$'
+DYNAMIC_PYTHON_LIBRARY_REGEX = """
+ ^(?:
+ # Linux, BSD
+ libpython{major}\.{minor}m?\.so(\.\d+)*|
+ # OS X
+ libpython{major}\.{minor}m?\.dylib|
+ # Windows
+ python{major}{minor}\.lib
+ )$
+"""
def OnMac():
@@ -137,110 +150,60 @@ def CheckOutput( *popen_args, **kwargs ):
return output
-def GetPythonNameOnUnix():
- python_name = 'python' + str( PY_MAJOR ) + '.' + str( PY_MINOR )
- # Python 3 has an 'm' suffix on Unix platforms, for instance libpython3.3m.so.
- if PY_MAJOR == 3:
- python_name += 'm'
- return python_name
-
-
-def GetStandardPythonLocationsOnUnix( name ):
- library_dir = sysconfig.get_config_var( 'LIBDIR' )
- include_dir = sysconfig.get_config_var( 'INCLUDEDIR' )
- return p.join( library_dir, 'lib' + name ), p.join( include_dir, name )
-
-
-def FindPythonLibrariesOnLinux():
- python_name = GetPythonNameOnUnix()
- python_library_root, python_include = GetStandardPythonLocationsOnUnix(
- python_name )
-
- python_library = python_library_root + '.so'
- if p.isfile( python_library ):
- return python_library, python_include
-
- python_library = python_library_root + '.a'
- if p.isfile( python_library ):
- sys.exit( NO_DYNAMIC_PYTHON_ERROR.format( library = python_library,
- flag = '--enable-shared' ) )
-
- # On some distributions (Ubuntu for instance), the Python system library is
- # not installed in its default path: /usr/lib. We use the ldconfig tool to
- # find it.
- python_library = 'lib' + python_name + '.so'
- ldconfig_output = CheckOutput( [ 'ldconfig', '-p' ] ).strip().decode( 'utf8' )
- for line in ldconfig_output.splitlines():
- match = LIBRARY_LDCONFIG_REGEX.search( line )
- if match and match.group( 'library' ) == python_library:
- return match.group( 'path' ), python_include
-
- sys.exit( NO_PYTHON_LIBRARY_ERROR )
-
-
-def FindPythonLibrariesOnMac():
- python_name = GetPythonNameOnUnix()
- python_library_root, python_include = GetStandardPythonLocationsOnUnix(
- python_name )
-
- # On MacOS, ycmd does not work with statically linked python library.
- # It typically manifests with the following error when there is a
- # self-compiled python without --enable-framework (or, technically
- # --enable-shared):
+def FindPythonLibraries():
+ include_dir = sysconfig.get_python_inc()
+ # get_python_lib with the standard_lib parameter set to True returns the
+ # standard Python modules directory. Python libraries should always be in
+ # the parent directory or one of its subdirectories.
+ library_dir = p.dirname( sysconfig.get_python_lib( standard_lib = True ) )
+
+ # Since ycmd is compiled as a dynamic library, we can't link it to a Python
+ # static library. If we try, the following error will occur on Mac:
#
# Fatal Python error: PyThreadState_Get: no current thread
#
- # The most likely explanation for this is that both the ycm_core.so and the
- # python binary include copies of libpython.a (or whatever included
- # objects). When the python interpreter starts it initializes only the
- # globals within its copy, so when ycm_core.so's copy starts executing, it
- # points at its own copy which is uninitialized.
+ # while the error happens during linking on Linux and looks something like:
#
- # Some platforms' dynamic linkers (ld.so) are able to resolve this when
- # loading shared libraries at runtime[citation needed], but OSX seemingly
- # cannot.
+ # relocation R_X86_64_32 against `a local symbol' can not be used when
+ # making a shared object; recompile with -fPIC
#
- # So we do 2 things special on OS X:
- # - look for a .dylib first
- # - if we find a .a, raise an error.
- python_library = python_library_root + '.dylib'
- if p.isfile( python_library ):
- return python_library, python_include
-
- python_library = python_library_root + '.a'
- if p.isfile( python_library ):
- sys.exit( NO_DYNAMIC_PYTHON_ERROR.format( library = python_library,
- flag = '--enable-framework' ) )
-
- sys.exit( NO_PYTHON_LIBRARY_ERROR )
-
-
-def FindPythonLibrariesOnWindows():
- python_name = 'python' + str( PY_MAJOR ) + str( PY_MINOR )
-
- python_include = sysconfig.get_config_var( 'INCLUDEPY' )
- python_library = p.join(
- p.dirname( python_include ), 'libs', python_name + '.lib' )
- if p.isfile( python_library ):
- return python_library, python_include
+ # On Windows, the Python library is always a dynamic one (an import library to
+ # be exact). To obtain a dynamic library on other platforms, Python must be
+ # compiled with the --enable-shared flag on Linux or the --enable-framework
+ # flag on Mac.
+ #
+ # So we proceed like this:
+ # - look for a dynamic library and return its path;
+ # - if a static library is found instead, raise an error with instructions
+ # on how to build Python as a dynamic library.
+ # - if no libraries are found, raise a generic error.
+ dynamic_name = re.compile( DYNAMIC_PYTHON_LIBRARY_REGEX.format(
+ major = PY_MAJOR, minor = PY_MINOR ), re.X )
+ static_name = re.compile( STATIC_PYTHON_LIBRARY_REGEX.format(
+ major = PY_MAJOR, minor = PY_MINOR ), re.X )
+ static_libraries = []
+
+ # We search the Python libraries through the library directory and its
+ # subdirectories.
+ for root, dirs, files in os.walk( library_dir ):
+ # Files are sorted so that we found the non-versioned Python library before
+ # the versioned one.
+ for filename in sorted( files ):
+ if dynamic_name.match( filename ):
+ return p.join( root, filename ), include_dir
+
+ if static_name.match( filename ):
+ static_libraries.append( p.join( root, filename ) )
+
+ if static_libraries and not OnWindows():
+ dynamic_flag = ( '--enable-framework' if OnMac() else
+ '--enable-shared' )
+ sys.exit( NO_DYNAMIC_PYTHON_ERROR.format( library = static_libraries[ 0 ],
+ flag = dynamic_flag ) )
sys.exit( NO_PYTHON_LIBRARY_ERROR )
-def FindPythonLibraries():
- if OnLinux():
- return FindPythonLibrariesOnLinux()
-
- if OnMac():
- return FindPythonLibrariesOnMac()
-
- if OnWindows():
- return FindPythonLibrariesOnWindows()
-
- sys.exit( 'ERROR: your platform is not supported by this script. Follow the '
- 'Full Installation Guide instructions in the documentation.' )
-
-
def CustomPythonCmakeArgs():
# The CMake 'FindPythonLibs' Module does not work properly.
# So we are forced to do its job for it.
| Fails to install on 64-bit fedora core 23
The reason is that libpython is under `/usr/lib64/libpython2.7.so` (whereas `build.py` expects it in `/usr/lib/libpython2.7.so`), so the installation fails. I believe this may also occur on other distros.
| 2016-05-18T14:35:28 |
||
ycm-core/ycmd | 519 | ycm-core__ycmd-519 | [
"518"
] | bdab3d6c5f0eb04ade224643ee9a3deece8288d6 | diff --git a/build.py b/build.py
--- a/build.py
+++ b/build.py
@@ -150,12 +150,19 @@ def CheckOutput( *popen_args, **kwargs ):
return output
+def GetPossiblePythonLibraryDirectories():
+ library_dir = p.dirname( sysconfig.get_python_lib( standard_lib = True ) )
+ if OnWindows():
+ return [ p.join( library_dir, 'libs' ) ]
+ # On pyenv, there is no Python dynamic library in the directory returned by
+ # the LIBPL variable. Such library is located in the parent folder of the
+ # standard Python library modules.
+ return [ sysconfig.get_config_var( 'LIBPL' ), library_dir ]
+
+
def FindPythonLibraries():
include_dir = sysconfig.get_python_inc()
- # get_python_lib with the standard_lib parameter set to True returns the
- # standard Python modules directory. Python libraries should always be in
- # the parent directory or one of its subdirectories.
- library_dir = p.dirname( sysconfig.get_python_lib( standard_lib = True ) )
+ library_dirs = GetPossiblePythonLibraryDirectories()
# Since ycmd is compiled as a dynamic library, we can't link it to a Python
# static library. If we try, the following error will occur on Mac:
@@ -183,17 +190,15 @@ def FindPythonLibraries():
major = PY_MAJOR, minor = PY_MINOR ), re.X )
static_libraries = []
- # We search the Python libraries through the library directory and its
- # subdirectories.
- for root, dirs, files in os.walk( library_dir ):
+ for library_dir in library_dirs:
# Files are sorted so that we found the non-versioned Python library before
# the versioned one.
- for filename in sorted( files ):
+ for filename in sorted( os.listdir( library_dir ) ):
if dynamic_name.match( filename ):
- return p.join( root, filename ), include_dir
+ return p.join( library_dir, filename ), include_dir
if static_name.match( filename ):
- static_libraries.append( p.join( root, filename ) )
+ static_libraries.append( p.join( library_dir, filename ) )
if static_libraries and not OnWindows():
dynamic_flag = ( '--enable-framework' if OnMac() else
| build.py links to the wrong dynamic python library during installation
I recently forked this repository for my personal use and had to make an adjustment to the installation process. The final, working adjustment is [here](https://github.com/jmenashe/ycmd/commit/b15ffd00484402b6379d7e731b940ee9300515ad). A note made by @micbou on the [previous commit](https://github.com/jmenashe/ycmd/commit/c8be763ddc0e4a960ce68e96c0d06149041b9451) requested that I provide some details on the problem and my machine configuration.
My OS is Ubuntu 14.04 64-bit. I don't recall the exact python version that was found by the `build.py` script but essentially it found a debug version of the dynamic library `libpython2.7.so`. Since I use the release version of Python 2.7 as my interpreter, I needed the script to instead use the release version of `libpython2.7.so`. Linking against the wrong version would cause the YCM server to crash immediately upon launching vim without providing any logging output, as would be expected with a link error. The modification I made in [this commit](https://github.com/jmenashe/ycmd/commit/b15ffd00484402b6379d7e731b940ee9300515ad) enabled the build script to find the release version of the dynamic library in what I consider to be a reasonably robust manner.
I hope this information is helpful but let me know if I can provide more details. Thank you for putting together this wonderful library!
| Thanks for reporting the issue. I tried the script on one of my machine with Ubuntu 14.04 LTS 64-bit installed and it found this Python library:
```
/usr/lib/x86_64-linux-gnu/libpython2.7.so
```
which is not the debug version of the Python library (at least on my machine).
I am rather surprised that it found the debug version on your machine because its name should be `libpython2.7_d.so` (with the `_d` suffix) and the script only search for `libpython2.7.so`.
Could you try the script without your change and give us the Python library path it found? This would be really helpful.
The file it linked to is `/usr/lib/debug/usr/lib/x86_64-linux-gnu/libpython2.7.so.1.0`. This is from the `python2.7-dbg` package. Removing that package fixes the problem for me.
| 2016-06-08T14:47:06 |
|
ycm-core/ycmd | 542 | ycm-core__ycmd-542 | [
"541"
] | 525a971179be8cf013e9e3bb8f530ad89a1374f5 | diff --git a/ycmd/completers/cpp/clang_completer.py b/ycmd/completers/cpp/clang_completer.py
--- a/ycmd/completers/cpp/clang_completer.py
+++ b/ycmd/completers/cpp/clang_completer.py
@@ -338,7 +338,7 @@ def OnFileReadyToParse( self, request_data ):
def OnBufferUnload( self, request_data ):
self._completer.DeleteCachesForFile(
- ToCppStringCompatible( request_data[ 'unloaded_buffer' ] ) )
+ ToCppStringCompatible( request_data[ 'filepath' ] ) )
def GetDetailedDiagnostic( self, request_data ):
| diff --git a/ycmd/tests/typescript/event_notification_test.py b/ycmd/tests/typescript/event_notification_test.py
new file mode 100644
--- /dev/null
+++ b/ycmd/tests/typescript/event_notification_test.py
@@ -0,0 +1,116 @@
+# Copyright (C) 2016 ycmd contributors
+#
+# This file is part of ycmd.
+#
+# ycmd is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# ycmd is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import absolute_import
+from __future__ import unicode_literals
+from __future__ import print_function
+from __future__ import division
+from future import standard_library
+standard_library.install_aliases()
+from builtins import * # noqa
+
+from hamcrest import assert_that, contains, has_entries
+
+from ycmd.tests.typescript import IsolatedYcmd, PathToTestFile
+from ycmd.tests.test_utils import ( BuildRequest, ClearCompletionsCache,
+ CompletionEntryMatcher )
+from ycmd.utils import ReadFile
+
+
+@IsolatedYcmd
+def EventNotification_OnBufferUnload_CloseFile_test( app ):
+ # Open main.ts file in a buffer.
+ main_filepath = PathToTestFile( 'buffer_unload', 'main.ts' )
+ main_contents = ReadFile( main_filepath )
+
+ event_data = BuildRequest( filepath = main_filepath,
+ filetype = 'typescript',
+ contents = main_contents,
+ event_name = 'BufferVisit' )
+ app.post_json( '/event_notification', event_data )
+
+ # Complete in main.ts buffer an object defined in imported.ts.
+ completion_data = BuildRequest( filepath = main_filepath,
+ filetype = 'typescript',
+ contents = main_contents,
+ line_num = 3,
+ column_num = 10 )
+ response = app.post_json( '/completions', completion_data )
+ assert_that( response.json, has_entries( {
+ 'completions': contains( CompletionEntryMatcher( 'method' ) ) } ) )
+ # FIXME: we should not have to clear the cache.
+ ClearCompletionsCache()
+
+ # Open imported.ts file in another buffer.
+ imported_filepath = PathToTestFile( 'buffer_unload', 'imported.ts' )
+ imported_contents = ReadFile( imported_filepath )
+
+ event_data = BuildRequest( filepath = imported_filepath,
+ filetype = 'typescript',
+ contents = imported_contents,
+ event_name = 'BufferVisit' )
+ app.post_json( '/event_notification', event_data )
+
+ # Modify imported.ts buffer without writing the changes to disk.
+ modified_imported_contents = imported_contents.replace( 'method',
+ 'modified_method' )
+
+ # FIXME: TypeScript completer should not rely on the FileReadyToParse events
+ # to synchronize the contents of dirty buffers but use instead the file_data
+ # field of the request.
+ event_data = BuildRequest( filepath = imported_filepath,
+ filetype = 'typescript',
+ contents = modified_imported_contents,
+ event_name = 'FileReadyToParse' )
+ app.post_json( '/event_notification', event_data )
+
+ # Complete at same location in main.ts buffer.
+ imported_data = {
+ imported_filepath: {
+ 'filetypes': [ 'typescript' ],
+ 'contents': modified_imported_contents
+ }
+ }
+ completion_data = BuildRequest( filepath = main_filepath,
+ filetype = 'typescript',
+ contents = main_contents,
+ line_num = 3,
+ column_num = 10,
+ file_data = imported_data )
+ response = app.post_json( '/completions', completion_data )
+ assert_that( response.json, has_entries( {
+ 'completions': contains( CompletionEntryMatcher( 'modified_method' ) ) } )
+ )
+ # FIXME: we should not have to clear the cache.
+ ClearCompletionsCache()
+
+ # Unload imported.ts buffer.
+ event_data = BuildRequest( filepath = imported_filepath,
+ filetype = 'typescript',
+ contents = imported_contents,
+ event_name = 'BufferUnload' )
+ app.post_json( '/event_notification', event_data )
+
+ # Complete at same location in main.ts buffer.
+ completion_data = BuildRequest( filepath = main_filepath,
+ filetype = 'typescript',
+ contents = main_contents,
+ line_num = 3,
+ column_num = 10 )
+ response = app.post_json( '/completions', completion_data )
+ assert_that( response.json, has_entries( {
+ 'completions': contains( CompletionEntryMatcher( 'method' ) ) } ) )
diff --git a/ycmd/tests/typescript/testdata/buffer_unload/imported.ts b/ycmd/tests/typescript/testdata/buffer_unload/imported.ts
new file mode 100644
--- /dev/null
+++ b/ycmd/tests/typescript/testdata/buffer_unload/imported.ts
@@ -0,0 +1,4 @@
+export class Imported {
+ method() {
+ }
+}
diff --git a/ycmd/tests/typescript/testdata/buffer_unload/main.ts b/ycmd/tests/typescript/testdata/buffer_unload/main.ts
new file mode 100644
--- /dev/null
+++ b/ycmd/tests/typescript/testdata/buffer_unload/main.ts
@@ -0,0 +1,3 @@
+import { Imported } from "./imported";
+let imported = new Imported();
+imported.
| Why is clang_completer requesting `unloaded_buffer` key in OnBufferUnload
Would it be possible to just reuse `filepath` in `OnBufferUnload` in `clang_completer` instead of having to specify `unloaded_buffer`. The typescript completer is already using `filepath`. It would be nice to align both.
| This is a bug in the TypeScript completer. It should use `unloaded_buffer` instead of `filepath` because a buffer can be unloaded while not being the current buffer. I'll send a PR to fix this.
Ok I see. Thanks for clarifying. We have a pull request regading adding `BufferVisit` and `BufferUnload` in `emacs-ycmd` which brought up this.
| 2016-07-12T15:36:43 |
ycm-core/ycmd | 544 | ycm-core__ycmd-544 | [
"543"
] | 5d7a677129fa9796bb40c94824dfee56c0bd35c2 | diff --git a/build.py b/build.py
--- a/build.py
+++ b/build.py
@@ -427,7 +427,7 @@ def SetUpTern():
# node_modules of the Tern runtime. We also want to be able to install our
# own plugins to improve the user experience for all users.
#
- # This is not possible if we use a git submodle for Tern and simply run 'npm
+ # This is not possible if we use a git submodule for Tern and simply run 'npm
# install' within the submodule source directory, as subsequent 'npm install
# tern-my-plugin' will (heinously) install another (arbitrary) version of Tern
# within the Tern source tree (e.g. third_party/tern/node_modules/tern. The
@@ -438,19 +438,7 @@ def SetUpTern():
# So instead, we have a package.json within our "Tern runtime" directory
# (third_party/tern_runtime) that defines the packages that we require,
# including Tern and any plugins which we require as standard.
- TERN_RUNTIME_DIR = os.path.join( DIR_OF_THIS_SCRIPT,
- 'third_party',
- 'tern_runtime' )
- try:
- os.makedirs( TERN_RUNTIME_DIR )
- except Exception:
- # os.makedirs throws if the dir already exists, it also throws if the
- # permissions prevent creating the directory. There's no way to know the
- # difference, so we just let the call to os.chdir below throw if this fails
- # to create the target directory.
- pass
-
- os.chdir( TERN_RUNTIME_DIR )
+ os.chdir( p.join( DIR_OF_THIS_SCRIPT, 'third_party', 'tern_runtime' ) )
subprocess.check_call( [ paths[ 'npm' ], 'install', '--production' ] )
| Update package.json ...ycmd/third_party/tern_runtime/package.json
This tool is indispensable for **JS/node autocomplete**
The latest version of **tern.js is 0.19**
| 2016-07-12T19:21:16 |
||
ycm-core/ycmd | 547 | ycm-core__ycmd-547 | [
"534"
] | 683b6e4af36d918a0ff643b10ba794a3b2a0b2a0 | diff --git a/ycmd/completers/cpp/flags.py b/ycmd/completers/cpp/flags.py
--- a/ycmd/completers/cpp/flags.py
+++ b/ycmd/completers/cpp/flags.py
@@ -39,11 +39,19 @@
# We need to remove --fcolor-diagnostics because it will cause shell escape
# sequences to show up in editors, which is bad. See Valloric/YouCompleteMe#1421
-STATE_FLAGS_TO_SKIP = set(['-c', '-MP', '--fcolor-diagnostics'])
+STATE_FLAGS_TO_SKIP = set( [ '-c',
+ '-MP',
+ '-MD',
+ '-MMD',
+ '--fcolor-diagnostics' ] )
# The -M* flags spec:
# https://gcc.gnu.org/onlinedocs/gcc-4.9.0/gcc/Preprocessor-Options.html
-FILE_FLAGS_TO_SKIP = set(['-MD', '-MMD', '-MF', '-MT', '-MQ', '-o'])
+FILE_FLAGS_TO_SKIP = set( [ '-MF',
+ '-MT',
+ '-MQ',
+ '-o',
+ '--serialize-diagnostics' ] )
# Use a regex to correctly detect c++/c language for both versioned and
# non-versioned compiler executable names suffixes
@@ -266,9 +274,11 @@ def _RemoveUnusedFlags( flags, filename ):
previous_flag_is_include = False
previous_flag_starts_with_dash = False
current_flag_starts_with_dash = False
+
for flag in flags:
previous_flag_starts_with_dash = current_flag_starts_with_dash
current_flag_starts_with_dash = flag.startswith( '-' )
+
if skip_next:
skip_next = False
continue
@@ -377,9 +387,9 @@ def _ExtraClangFlags():
if OnMac():
for path in MAC_INCLUDE_PATHS:
flags.extend( [ '-isystem', path ] )
- # On Windows, parsing of templates is delayed until instantation time.
- # This makes GetType and GetParent commands not returning the expected
- # result when the cursor is in templates.
+ # On Windows, parsing of templates is delayed until instantiation time.
+ # This makes GetType and GetParent commands fail to return the expected
+ # result when the cursor is in a template.
# Using the -fno-delayed-template-parsing flag disables this behavior.
# See
# http://clang.llvm.org/extra/PassByValueTransform.html#note-about-delayed-template-parsing # noqa
| diff --git a/ycmd/tests/clang/flags_test.py b/ycmd/tests/clang/flags_test.py
--- a/ycmd/tests/clang/flags_test.py
+++ b/ycmd/tests/clang/flags_test.py
@@ -28,6 +28,8 @@
from mock import patch, Mock
from ycmd.tests.test_utils import MacOnly
+from hamcrest import assert_that, contains
+
@patch( 'ycmd.extra_conf_store.ModuleForSourceFile', return_value = Mock() )
def FlagsForFile_BadNonUnicodeFlagsAreAlsoRemoved_test( *args ):
@@ -162,6 +164,27 @@ def RemoveUnusedFlags_RemoveFlagWithoutPrecedingDashFlag_test():
filename ) )
+def RemoveUnusedFlags_Depfiles_test():
+ full_flags = [
+ '/bin/clang',
+ '-x', 'objective-c',
+ '-arch', 'armv7',
+ '-MMD',
+ '-MT', 'dependencies',
+ '-MF', 'file',
+ '--serialize-diagnostics', 'diagnostics'
+ ]
+
+ expected = [
+ '/bin/clang',
+ '-x', 'objective-c',
+ '-arch', 'armv7',
+ ]
+
+ assert_that( flags._RemoveUnusedFlags( full_flags, 'test.m' ),
+ contains( *expected ) )
+
+
def RemoveUnusedFlags_RemoveFilenameWithoutPrecedingInclude_test():
def tester( flag ):
expected = [ 'clang', flag, '/foo/bar', '-isystem/zoo/goo' ]
| Code Completion for IOS project.
I'm trying to set up ycmd with completions for objective-c/iOS based from an xcode project. Semantic completions don't seem to be working.
I've set up a sample project here.
https://github.com/jojojames/DummyXcode
.ycm_extra_conf located here
https://github.com/jojojames/DummyXcode/blob/master/.ycm_extra_conf.py
compile_commands.json here
https://github.com/jojojames/DummyXcode/blob/master/compile_commands.json
I had a similar problem (using the same sample project as above) described here which was resolved through blacklisting some compile flags.
https://github.com/Andersbakken/rtags/issues/738
Not sure where to get a complete log (down to the clang error)
2016-06-28 10:45:16,497 - ERROR - Exception from semantic completer (using general): Traceback (most recent call last):
File "/Users/james/.vim/dein/repos/github.com/Valloric/YouCompleteMe/third_party/ycmd/ycmd/../ycmd/handlers.py", line 112, in GetCompletions
.ComputeCandidates( request_data ) )
File "/Users/james/.vim/dein/repos/github.com/Valloric/YouCompleteMe/third_party/ycmd/ycmd/../ycmd/completers/completer.py", line 168, in ComputeCandidates
candidates = self._GetCandidatesFromSubclass( request_data )
File "/Users/james/.vim/dein/repos/github.com/Valloric/YouCompleteMe/third_party/ycmd/ycmd/../ycmd/completers/completer.py", line 184, in _GetCandidatesFromSubclass
raw_completions = self.ComputeCandidatesInner( request_data )
File "/Users/james/.vim/dein/repos/github.com/Valloric/YouCompleteMe/third_party/ycmd/ycmd/../ycmd/completers/cpp/clang_completer.py", line 112, in ComputeCandidatesInner
raise RuntimeError( NO_COMPLETIONS_MESSAGE )
RuntimeError: No completions found; errors in the file?
Using a more barebones ycm file that only specifies flags gets 70-80% of completions working.
https://github.com/jojojames/.emacs.d/blob/master/lang/objc/ycm_conf.py
| Thanks for the detailed report. can you grab the output of :YcmDebugInfo? Also, what is the output of :YcmDiags?
Ultimately, the set of flags passed to libclang means that it can't parse your files correctly. Either your extra conf is passing the wrong flags, or there is some other issue. Usually it is the former, though it looks like you're using a compilation database, so it could be that we need to blacklist more flags like the rtags folk (who also use libclang to parse your code)
The extra diags will help us work that out.
I'm using emacs-ycmd as a frontend for ycmd.
I tried opening the file in vim but got 'Native filetype completion not supported for current file, cannot force recompilation.' Might be something with my config for vim (haven't touched vim in a while).
Here's some debug info (just hunted around for some debug command `ycmd-show-debug-info'.
ycmd debug information for buffer AppDelegate.m in objc-mode:
Server has Clang support compiled in: True
Clang version: clang version 3.7.0 (tags/RELEASE_370/final)
Flags for /Users/james/Developer/DummyXcode/DummyXcode/AppDelegate.m loaded from /Users/james/Developer/DummyXcode/.ycm_extra_conf.py:
['/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/clang', '-x', 'c', '-x', 'objective-c', '-fmessage-length=0', '-fdiagnostics-show-note-include-stack', '-fmacro-backtrace-limit=0', '-std=gnu99', '-fobjc-arc', '-fmodules', '-fmodules-prune-interval=86400', '-fmodules-prune-after=345600', '-fbuild-session-file=/var/folders/k2/y7hdq90906d_5pgbt3_pt9jw0000gn/C/org.llvm.clang/ModuleCache/Session.modulevalidation', '-fmodules-validate-once-per-build-session', '-Wnon-modular-include-in-framework-module', '-Werror=non-modular-include-in-framework-module', '-Wno-trigraphs', '-fpascal-strings', '-Os', '-fno-common', '-Wno-missing-field-initializers', '-Wno-missing-prototypes', '-Werror=return-type', '-Wunreachable-code', '-Wno-implicit-atomic-properties', '-Werror=deprecated-objc-isa-usage', '-Werror=objc-root-class', '-Wno-arc-repeated-use-of-weak', '-Wduplicate-method-match', '-Wno-missing-braces', '-Wparentheses', '-Wswitch', '-Wunused-function', '-Wno-unused-label', '-Wno-unused-parameter', '-Wunused-variable', '-Wunused-value', '-Wempty-body', '-Wconditional-uninitialized', '-Wno-unknown-pragmas', '-Wno-shadow', '-Wno-four-char-constants', '-Wno-conversion', '-Wconstant-conversion', '-Wint-conversion', '-Wbool-conversion', '-Wenum-conversion', '-Wshorten-64-to-32', '-Wpointer-sign', '-Wno-newline-eof', '-Wno-selector', '-Wno-strict-selector-match', '-Wundeclared-selector', '-Wno-deprecated-implementations', '-DNS_BLOCK_ASSERTIONS=1', '-DOBJC_OLD_DISPATCH_PROTOTYPES=0', '-isysroot', '/Applications/Xcode.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS9.3.sdk', '-fstrict-aliasing', '-Wprotocol', '-Wdeprecated-declarations', '-miphoneos-version-min=9.3', '-g', '-fvisibility=hidden', '-Wno-sign-conversion', '-fembed-bitcode-marker', '-iquote', '/Users/james/Developer/DummyXcode/build/DummyXcode.build/Release-iphoneos/DummyXcode.build/DummyXcode-generated-files.hmap', '-I/Users/james/Developer/DummyXcode/build/DummyXcode.build/Release-iphoneos/DummyXcode.build/DummyXcode-own-target-headers.hmap', '-I/Users/james/Developer/DummyXcode/build/DummyXcode.build/Release-iphoneos/DummyXcode.build/DummyXcode-all-target-headers.hmap', '-iquote', '/Users/james/Developer/DummyXcode/build/DummyXcode.build/Release-iphoneos/DummyXcode.build/DummyXcode-project-headers.hmap', '-I/Users/james/Developer/DummyXcode/build/Release-iphoneos/include', '-I/Users/james/Developer/DummyXcode/build/DummyXcode.build/Release-iphoneos/DummyXcode.build/DerivedSources/armv7', '-I/Users/james/Developer/DummyXcode/build/DummyXcode.build/Release-iphoneos/DummyXcode.build/DerivedSources', '-F/Users/james/Developer/DummyXcode/build/Release-iphoneos', 'dependencies', '--serialize-diagnostics', '-resource-dir=/Users/james/.vim/dein/repos/github.com/Valloric/YouCompleteMe/third_party/ycmd/ycmd/../clang_includes', '-isystem', '/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/include/c++/v1', '-isystem', '/usr/local/include', '-isystem', '/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/include', '-isystem', '/usr/include', '-isystem', '/System/Library/Frameworks', '-isystem', '/Library/Frameworks']
Server running at: 127.0.0.1:63842
Maybe @abingham can point me in the right direction for the matching Emacs commands.
I was able to determine that the flag which causes `libclang` to barf is `-MT dependencies`.
I discovered this by trial and error, by manually setting flags in `.ycm_extra_conf.py`.
The net result is that I now get the "Could not build module 'UIKit'" error which you also reported to tags. I don't know how you solved that, so I can't really progress (modules are a dark magic to me), but I think it is clear that the issue is the set of flags passed to libclang, not an issue with YCM itself.
@r4nt @d0k any clue why `-MT dependencies` might cause `libclang` to be unable to parse a file?
```
-MT <value> Specify name of main file output in depfile
```
Full flags are as above or more readable):
```
flags = [
'/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/clang',
'-x', 'objective-c',
'-arch', 'armv7',
'-fmessage-length=0',
'-fdiagnostics-show-note-include-stack',
'-fmacro-backtrace-limit=0',
'-std=gnu99',
'-fobjc-arc',
'-fmodules',
'-fmodules-prune-interval=86400',
'-fmodules-prune-after=345600',
'-fbuild-session-file=' + os.tempnam(),
'-fmodules-validate-once-per-build-session',
'-Wnon-modular-include-in-framework-module',
'-Werror=non-modular-include-in-framework-module',
'-Wno-trigraphs',
'-fpascal-strings',
'-Os',
'-fno-common',
'-Wno-missing-field-initializers',
'-Wno-missing-prototypes',
'-Werror=return-type',
'-Wunreachable-code',
'-Wno-implicit-atomic-properties',
'-Werror=deprecated-objc-isa-usage',
'-Werror=objc-root-class',
'-Wno-arc-repeated-use-of-weak',
'-Wduplicate-method-match',
'-Wno-missing-braces',
'-Wparentheses',
'-Wswitch',
'-Wunused-function',
'-Wno-unused-label',
'-Wno-unused-parameter',
'-Wunused-variable',
'-Wunused-value',
'-Wempty-body',
'-Wconditional-uninitialized',
'-Wno-unknown-pragmas',
'-Wno-shadow',
'-Wno-four-char-constants',
'-Wno-conversion',
'-Wconstant-conversion',
'-Wint-conversion',
'-Wbool-conversion',
'-Wenum-conversion',
'-Wshorten-64-to-32',
'-Wpointer-sign',
'-Wno-newline-eof',
'-Wno-selector',
'-Wno-strict-selector-match',
'-Wundeclared-selector',
'-Wno-deprecated-implementations',
'-DNS_BLOCK_ASSERTIONS=1',
'-DOBJC_OLD_DISPATCH_PROTOTYPES=0',
'-isysroot', '/Applications/Xcode.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS9.3.sdk',
'-fstrict-aliasing',
'-Wprotocol',
'-Wdeprecated-declarations',
'-miphoneos-version-min=9.3',
'-g',
'-fvisibility=hidden',
'-Wno-sign-conversion',
'-fembed-bitcode-marker',
'-iquote', '/Users/ben/Development/YouCompleteMe/DummyXcode/build/DummyXcode.build/Release-iphoneos/DummyXcode.build/DummyXcode-generated-files.hmap',
'-I/Users/ben/Development/YouCompleteMe/DummyXcode/build/DummyXcode.build/Release-iphoneos/DummyXcode.build/DummyXcode-own-target-headers.hmap',
'-I/Users/ben/Development/YouCompleteMe/DummyXcode/build/DummyXcode.build/Release-iphoneos/DummyXcode.build/DummyXcode-all-target-headers.hmap',
'-iquote', '/Users/ben/Development/YouCompleteMe/DummyXcode/build/DummyXcode.build/Release-iphoneos/DummyXcode.build/DummyXcode-project-headers.hmap',
'-I/Users/ben/Development/YouCompleteMe/DummyXcode/build/Release-iphoneos/include',
'-I/Users/ben/Development/YouCompleteMe/DummyXcode/build/DummyXcode.build/Release-iphoneos/DummyXcode.build/DerivedSources/armv7',
'-I/Users/ben/Development/YouCompleteMe/DummyXcode/build/DummyXcode.build/Release-iphoneos/DummyXcode.build/DerivedSources',
'-F/Users/ben/Development/YouCompleteMe/DummyXcode/build/Release-iphoneos',
'-MMD',
# '-MT', 'dependencies',
'-MF', '/Users/ben/Development/YouCompleteMe/DummyXcode/build/DummyXcode.build/Release-iphoneos/DummyXcode.build/Objects-normal/armv7/AppDelegate.d',
'--serialize-diagnostics', '/Users/ben/Development/YouCompleteMe/DummyXcode/build/DummyXcode.build/Release-iphoneos/DummyXcode.build/Objects-normal/armv7/AppDelegate.dia',
]
```
We can blacklist the depfiles-related flags I suppose, but I would like to understand a bit better before doing so.
Seems Rtags blacklists -MT also.
https://github.com/Andersbakken/rtags/commit/23c6c00fc70e6fa2b314e243cc75c9546a3c7e5f
@puremourning I believed we already put `-MT` to the blacklist 😕
https://github.com/Valloric/ycmd/blob/master/ycmd/completers/cpp/flags.py#L46
@vheon you're quite right.
And it is broken somehow.
```
:YcmDebugInfo
...
'-F/Users/ben/Development/YouCompleteMe/DummyXcode/build/Release-iphoneos', 'dependencies',
```
It removed `-MT` but not `dependencies`.
@puremourning will you look into it? or should I?
I'll take a look now.
OK i see the problem.
`-MD` and `-MMD` are don't take a file argument, but we have said they do, which leads to malformed flag lists.
I'll prep a PR.
Having a left-over 'dependencies' in there would make it barf. The question is why you need to blacklist -MT (I see no reason why, as with -fsyntax-only the dep file will not be written anyway)
| 2016-07-16T22:11:22 |
ycm-core/ycmd | 548 | ycm-core__ycmd-548 | [
"546"
] | 4ee8b4662c99db1716108bff25a4cb77d2860865 | diff --git a/ycmd/completers/cpp/flags.py b/ycmd/completers/cpp/flags.py
--- a/ycmd/completers/cpp/flags.py
+++ b/ycmd/completers/cpp/flags.py
@@ -99,7 +99,10 @@ def FlagsForFile( self,
if add_extra_clang_flags:
flags += self.extra_clang_flags
- sanitized_flags = PrepareFlagsForClang( flags, filename )
+
+ sanitized_flags = PrepareFlagsForClang( flags,
+ filename,
+ add_extra_clang_flags )
if results[ 'do_cache' ]:
self.flags_for_file[ filename ] = sanitized_flags
@@ -172,10 +175,12 @@ def _CallExtraConfFlagsForFile( module, filename, client_data ):
return module.FlagsForFile( filename )
-def PrepareFlagsForClang( flags, filename ):
+def PrepareFlagsForClang( flags, filename, add_extra_clang_flags = True ):
flags = _CompilerToLanguageFlag( flags )
flags = _RemoveXclangFlags( flags )
flags = _RemoveUnusedFlags( flags, filename )
+ if add_extra_clang_flags:
+ flags = _EnableTypoCorrection( flags )
flags = _SanitizeFlags( flags )
return flags
@@ -401,6 +406,24 @@ def _ExtraClangFlags():
return flags
+def _EnableTypoCorrection( flags ):
+ """Adds the -fspell-checking flag if the -fno-spell-checking flag is not
+ present"""
+
+ # "Typo correction" (aka spell checking) in clang allows it to produce
+ # hints (in the form of fix-its) in the case of certain diagnostics. A common
+ # example is "no type named 'strng' in namespace 'std'; Did you mean
+ # 'string'? (FixIt)". This is enabled by default in the clang driver (i.e. the
+ # 'clang' binary), but is not when using libclang (as we do). It's a useful
+ # enough feature that we just always turn it on unless the user explicitly
+ # turned it off in their flags (with -fno-spell-checking).
+ if '-fno-spell-checking' in flags:
+ return flags
+
+ flags.append( '-fspell-checking' )
+ return flags
+
+
def _SpecialClangIncludes():
libclang_dir = os.path.dirname( ycm_core.__file__ )
path_to_includes = os.path.join( libclang_dir, 'clang_includes' )
| diff --git a/ycmd/tests/clang/flags_test.py b/ycmd/tests/clang/flags_test.py
--- a/ycmd/tests/clang/flags_test.py
+++ b/ycmd/tests/clang/flags_test.py
@@ -185,6 +185,25 @@ def RemoveUnusedFlags_Depfiles_test():
contains( *expected ) )
+def EnableTypoCorrection_Empty_test():
+ eq_( flags._EnableTypoCorrection( [] ), [ '-fspell-checking' ] )
+
+
+def EnableTypoCorrection_Trivial_test():
+ eq_( flags._EnableTypoCorrection( [ '-x', 'c++' ] ),
+ [ '-x', 'c++', '-fspell-checking' ] )
+
+
+def EnableTypoCorrection_Reciprocal_test():
+ eq_( flags._EnableTypoCorrection( [ '-fno-spell-checking' ] ),
+ [ '-fno-spell-checking' ] )
+
+
+def EnableTypoCorrection_ReciprocalOthers_test():
+ eq_( flags._EnableTypoCorrection( [ '-x', 'c++', '-fno-spell-checking' ] ),
+ [ '-x', 'c++', '-fno-spell-checking' ] )
+
+
def RemoveUnusedFlags_RemoveFilenameWithoutPrecedingInclude_test():
def tester( flag ):
expected = [ 'clang', flag, '/foo/bar', '-isystem/zoo/goo' ]
diff --git a/ycmd/tests/clang/subcommands_test.py b/ycmd/tests/clang/subcommands_test.py
--- a/ycmd/tests/clang/subcommands_test.py
+++ b/ycmd/tests/clang/subcommands_test.py
@@ -727,12 +727,32 @@ def FixIt_Check_cpp11_Note( results ):
} ) )
+def FixIt_Check_cpp11_SpellCheck( results ):
+ assert_that( results, has_entries( {
+ 'fixits': contains(
+ # Change to SpellingIsNotMyStrongPoint
+ has_entries( {
+ 'text': contains_string( "did you mean 'SpellingIsNotMyStrongPoint'" ),
+ 'chunks': contains(
+ ChunkMatcher( 'SpellingIsNotMyStrongPoint',
+ LineColMatcher( 72, 9 ),
+ LineColMatcher( 72, 35 ) )
+ ),
+ 'location': LineColMatcher( 72, 9 ),
+ } ) )
+ } ) )
+
+
def Subcommands_FixIt_all_test():
cfile = 'FixIt_Clang_cpp11.cpp'
mfile = 'FixIt_Clang_objc.m'
ufile = 'unicode.cc'
tests = [
+ # L
+ # i C
+ # n o
+ # e l Lang File, Checker
[ 16, 0, 'cpp11', cfile, FixIt_Check_cpp11_Ins ],
[ 16, 1, 'cpp11', cfile, FixIt_Check_cpp11_Ins ],
[ 16, 10, 'cpp11', cfile, FixIt_Check_cpp11_Ins ],
@@ -762,6 +782,9 @@ def Subcommands_FixIt_all_test():
# FixIt attached to a "child" diagnostic (i.e. a Note)
[ 60, 1, 'cpp11', cfile, FixIt_Check_cpp11_Note ],
+
+ # FixIt due to forced spell checking
+ [ 72, 9, 'cpp11', cfile, FixIt_Check_cpp11_SpellCheck ],
]
for test in tests:
diff --git a/ycmd/tests/clang/testdata/FixIt_Clang_cpp11.cpp b/ycmd/tests/clang/testdata/FixIt_Clang_cpp11.cpp
--- a/ycmd/tests/clang/testdata/FixIt_Clang_cpp11.cpp
+++ b/ycmd/tests/clang/testdata/FixIt_Clang_cpp11.cpp
@@ -63,3 +63,11 @@ void z() {
}
}
+
+namespace Typo {
+ struct SpellingIsNotMyStrongPoint;
+}
+
+void typo() {
+ Typo::SpellingIsNotMyStringPiont *p;
+}
| ycmd does not pass on many fixits
I'm finding that ycmd occasionally suggests fixits for things like missing semi-colons, but for many other things it does not suggest a fixit even though clang does. For instance, if I write `std::strng x;`, when compiling in clang it offers the correction to `string`, but ycmd does not offer this fixit. Is this a bug with ycmd, something wrong with my setup, or is the fixit feature constrained somehow?
| @puremourning You'd probably know best.
Can you check you are on the latest commit? We recently (like last week) merged a change which reports FixIts for notes, which should include typo correction (which is the type of case to which you are referring as I understand it)
It is also possible that your client doesn't handle multiple fix it options. Which client are you using ?
> On 16 Jul 2016, at 01:41, Val Markovic [email protected] wrote:
>
> @puremourning You'd probably know best.
>
> —
> You are receiving this because you were mentioned.
> Reply to this email directly, view it on GitHub, or mute the thread.
Just as
@puremourning I'm not exactly sure about being on the latest commit, the ycmd I use is built by someone else and distributed. The build is dated June 11 but I'm not necessarily sure whether they pulled that day. Is there some way I could tell from the released build whether that commit has been included?
I'm using the emacs-ycmd client. I'm not sure what you mean by multiple fixit options; I typed in `std::strng x;` so the correction to string should have been the only fixit. Do you mean a situation whether I typed in `std::strng x`? It seems like emacs ycmd supports multi line fixits because when I do a fixit, it asks me to confirm whether I want to perform all fixits on that line.
@quicknir Could you provide the link to the build?
@micbou Sorry I should have been more clear. We have a sort of package manager where I work so developers can install things easily without needing root. ycmd is available through that package manager. So I'm afraid I can't provide the link. If you can tell me the commit hash that adds multiple fixits I can ask the guy who's in charge of it, or if there's another way to tell from the built project.
It's this commit: cc87a9c5fc659f488879c3cb5bce090a966cbb70
In fact, it doesn't look like typo correction is kicking in in the following test case:
``` c++
#include <string>
int main( int argc, char ** argv )
{
std::strng this_is_a_string;
}
```
Diags reported:
```
fixit.cc|5 col 8 error| no type named 'strng' in namespace 'std'
fixit.cc|3 col 15 warning| unused parameter 'argc'
fixit.cc|3 col 29 warning| unused parameter 'argv'
```
And `:YcmShowDetailedDiagnostic`:
```
Forcing compilation, this will block Vim until done.
/Users/ben/Development/tmp/fixit.cc:5:8: error: no type named 'strng' in namespace 'std'
```
So, `libclang` isn't reporting a note or a FixIt (therefore there isn't much we can do).
Just to be sure, I checked and it is not related to the `Incomplete` flag that we use.
Maybe @r4nt or @d0k know if libclang would be expected to offer fixits for typo correction as in the above example?
Compiling with clang `-fsyntax-only` gives this:
```
BeniMac:tmp ben$ clang -x c++ -fsyntax-only -Wall -Wextra -Werror fixit.cc
fixit.cc:5:8: error: no type named 'strng' in namespace 'std'; did you mean 'string'?
std::strng this_is_a_string;
~~~~~^~~~~
string
/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/../include/c++/v1/iosfwd:194:65: note: 'string' declared here
typedef basic_string<char, char_traits<char>, allocator<char> > string;
^
1 error generated.
```
Ahah. You need to add `-fspell-checking` to your flags to enable this.

@puremourning Do we perhaps want to pass that by default? Something to consider.
I was considering it.
The slight con is that typo correction is guesswork, whereas FixIts are generally not going to change the semantics of your program. I guess I sort of ignored that for fixits on notes, principally because there are usually multiple options. In this case, the user doesn't get a "choice", but it is clear in the diagnostic message "; Did you mean 'string'?" So we are probably good to go with it.
At least it is good enough to be on by default when using the `clang` driver, so i don't see why we can't always turn it on.
Seems like a plan; let's turn it on always. We already add some flags by default so it's not without precedent.
PR on its way to fix this and the other bug we had recently with flags (to do with the preprocessor flags: https://github.com/Valloric/ycmd/issues/534).
Happy to do them together? They're really closely related.
| 2016-07-16T22:14:10 |
ycm-core/ycmd | 551 | ycm-core__ycmd-551 | [
"536"
] | 4ee8b4662c99db1716108bff25a4cb77d2860865 | diff --git a/ycmd/handlers.py b/ycmd/handlers.py
--- a/ycmd/handlers.py
+++ b/ycmd/handlers.py
@@ -188,6 +188,8 @@ def LoadExtraConfFile():
request_data = RequestWrap( request.json, validate = False )
extra_conf_store.Load( request_data[ 'filepath' ], force = True )
+ return _JsonResponse( True )
+
@app.post( '/ignore_extra_conf_file' )
def IgnoreExtraConfFile():
@@ -195,6 +197,8 @@ def IgnoreExtraConfFile():
request_data = RequestWrap( request.json, validate = False )
extra_conf_store.Disable( request_data[ 'filepath' ] )
+ return _JsonResponse( True )
+
@app.post( '/debug_info' )
def DebugInfo():
| diff --git a/ycmd/tests/misc_handlers_test.py b/ycmd/tests/misc_handlers_test.py
--- a/ycmd/tests/misc_handlers_test.py
+++ b/ycmd/tests/misc_handlers_test.py
@@ -24,10 +24,10 @@
standard_library.install_aliases()
from builtins import * # noqa
-from nose.tools import ok_
-from hamcrest import assert_that, contains
+from hamcrest import assert_that, contains, empty, equal_to, has_entries
+import requests
-from ycmd.tests import SharedYcmd
+from ycmd.tests import PathToTestFile, SharedYcmd
from ycmd.tests.test_utils import BuildRequest, DummyCompleter, PatchCompleter
@@ -35,7 +35,9 @@
def MiscHandlers_SemanticCompletionAvailable_test( app ):
with PatchCompleter( DummyCompleter, filetype = 'dummy_filetype' ):
request_data = BuildRequest( filetype = 'dummy_filetype' )
- ok_( app.post_json( '/semantic_completion_available', request_data ).json )
+ assert_that( app.post_json( '/semantic_completion_available',
+ request_data ).json,
+ equal_to( True ) )
@SharedYcmd
@@ -43,17 +45,26 @@ def MiscHandlers_EventNotification_AlwaysJsonResponse_test( app ):
event_data = BuildRequest( contents = 'foo foogoo ba',
event_name = 'FileReadyToParse' )
- app.post_json( '/event_notification', event_data ).json
+ assert_that( app.post_json( '/event_notification', event_data ).json,
+ empty() )
@SharedYcmd
def MiscHandlers_EventNotification_ReturnJsonOnBigFileError_test( app ):
- # We generate a content greater than Bottle.MEMFILE_MAX, which is set to 1Mb.
+ # We generate a content greater than Bottle.MEMFILE_MAX, which is set to 1MB.
contents = "foo " * 500000
event_data = BuildRequest( contents = contents,
event_name = 'FileReadyToParse' )
- app.post_json( '/event_notification', event_data, expect_errors = True ).json
+ response = app.post_json( '/event_notification',
+ event_data,
+ expect_errors = True )
+ assert_that( response.status_code,
+ equal_to( requests.codes.request_entity_too_large ) )
+ assert_that( response.json,
+ has_entries( { 'traceback': None,
+ 'message': 'None',
+ 'exception': None } ) )
@SharedYcmd
@@ -71,3 +82,21 @@ def MiscHandlers_FilterAndSortCandidates_Basic_test( app ):
response_data = app.post_json( '/filter_and_sort_candidates', data ).json
assert_that( response_data, contains( candidate2, candidate3 ) )
+
+
+@SharedYcmd
+def MiscHandlers_LoadExtraConfFile_AlwaysJsonResponse_test( app ):
+ filepath = PathToTestFile( '.ycm_extra_conf.py' )
+ extra_conf_data = BuildRequest( filepath = filepath )
+
+ assert_that( app.post_json( '/load_extra_conf_file', extra_conf_data ).json,
+ equal_to( True ) )
+
+
+@SharedYcmd
+def MiscHandlers_IgnoreExtraConfFile_AlwaysJsonResponse_test( app ):
+ filepath = PathToTestFile( '.ycm_extra_conf.py' )
+ extra_conf_data = BuildRequest( filepath = filepath )
+
+ assert_that( app.post_json( '/ignore_extra_conf_file', extra_conf_data ).json,
+ equal_to( True ) )
diff --git a/ycmd/tests/testdata/.ycm_extra_conf.py b/ycmd/tests/testdata/.ycm_extra_conf.py
new file mode 100644
| ycmd should return valid JSON instead of empty HTML for 2 requests
/load_extra_conf_file and /ignore_extra_conf_file requests currently return an empty body
in case of success, which is not valid JSON. Instead, ycmd should return valid JSON body, for example just "true".
| Please can you explain why you believe this change is necessary and generally useful? Is it causing you a problem?
FWIW we have some in-progress API documentation here: http://puremourning.github.io/ycmd-1/
> Please can you explain why you believe this change is necessary and generally useful?
Well, if ycmd wants to have a clean and consistent API, then all replies should follow some kind of rule. Considering almost all replies already are in JSON format, even errors, it may not be unreasonable to think that ALL replies should be JSON.
Even the documentation link _you_ gave says the results of the request should be JSON :)
Allowing YCM clients to assume all ycmd replies are in the same format (JSON) can simplify clients code as you can imagine. (I assume it must not be too complicated for ycmd to just return 4 bytes ("true") instead of one)
This is the kind of detail that differenciates a good, predictable API from an average API, with gotchas awaiting at the corner.
Nobody is going to deny the "cleanliness of API" argument. I was just curious as to whether there was a more pressing reason to make a (potentially breaking) API change (such as you're trying to do X and Y is not working). Changing the API for cosmetic reasons is hard to justify :/
IIRC this has come up before, though I forget the context.
The change we're talking about is really simple https://github.com/vheon/ycmd/commit/17cf01a9ee911189d1b9c5aaf99a230b1d8d35f4. So we only need to decide if it really makes sense.
I'm not against this change. I can't see how it would be backwards incompatible; we weren't returning anything in the body before and I can't imagine how any client would break if the body they were ignoring before suddenly starting having content. The only thing clients did up to now is look at the status code.
So IMO this should be fine. We can just make these handlers return `{}` and be done with it.
If someone can make a convincing case that this would be a breaking change, then of course we wouldn't do it. It's entirely a cosmetic improvement.
| 2016-07-17T22:48:59 |
ycm-core/ycmd | 603 | ycm-core__ycmd-603 | [
"538"
] | 1a82d7f59d43ac422c08097a241f1efe0a0c4dac | diff --git a/ycmd/completers/cpp/flags.py b/ycmd/completers/cpp/flags.py
--- a/ycmd/completers/cpp/flags.py
+++ b/ycmd/completers/cpp/flags.py
@@ -176,7 +176,7 @@ def _CallExtraConfFlagsForFile( module, filename, client_data ):
def PrepareFlagsForClang( flags, filename, add_extra_clang_flags = True ):
- flags = _CompilerToLanguageFlag( flags )
+ flags = _AddLanguageFlagWhenAppropriate( flags )
flags = _RemoveXclangFlags( flags )
flags = _RemoveUnusedFlags( flags, filename )
if add_extra_clang_flags:
@@ -240,22 +240,24 @@ def _RemoveFlagsPrecedingCompiler( flags ):
return flags[ :-1 ]
-def _CompilerToLanguageFlag( flags ):
- """When flags come from the compile_commands.json file, the flag preceding
- the first flag starting with a dash is usually the path to the compiler that
- should be invoked. We want to replace it with a corresponding language flag.
- E.g., -x c for gcc and -x c++ for g++."""
+def _AddLanguageFlagWhenAppropriate( flags ):
+ """When flags come from the compile_commands.json file, the flag preceding the
+ first flag starting with a dash is usually the path to the compiler that
+ should be invoked. Since LibClang does not deduce the language from the
+ compiler name, we explicitely set the language to C++ if the compiler is a C++
+ one (g++, clang++, etc.). Otherwise, we let LibClang guess the language from
+ the file extension. This handles the case where the .h extension is used for
+ C++ headers."""
flags = _RemoveFlagsPrecedingCompiler( flags )
- # First flag is now the compiler path or a flag starting with a dash
- if flags[ 0 ].startswith( '-' ):
- return flags
-
- language = ( 'c++' if CPP_COMPILER_REGEX.search( flags[ 0 ] ) else
- 'c' )
+ # First flag is now the compiler path or a flag starting with a dash.
+ first_flag = flags[ 0 ]
- return flags[ :1 ] + [ '-x', language ] + flags[ 1: ]
+ if ( not first_flag.startswith( '-' ) and
+ CPP_COMPILER_REGEX.search( first_flag ) ):
+ return [ first_flag, '-x', 'c++' ] + flags[ 1: ]
+ return flags
def _RemoveUnusedFlags( flags, filename ):
| diff --git a/ycmd/tests/clang/flags_test.py b/ycmd/tests/clang/flags_test.py
--- a/ycmd/tests/clang/flags_test.py
+++ b/ycmd/tests/clang/flags_test.py
@@ -300,12 +300,12 @@ def RemoveXclangFlags_test():
flags._RemoveXclangFlags( expected + to_remove + expected ) )
-def CompilerToLanguageFlag_Passthrough_test():
+def AddLanguageFlagWhenAppropriate_Passthrough_test():
eq_( [ '-foo', '-bar' ],
- flags._CompilerToLanguageFlag( [ '-foo', '-bar' ] ) )
+ flags._AddLanguageFlagWhenAppropriate( [ '-foo', '-bar' ] ) )
-def _ReplaceCompilerTester( compiler, language ):
+def _AddLanguageFlagWhenAppropriateTester( compiler, language_flag = [] ):
to_removes = [
[],
[ '/usr/bin/ccache' ],
@@ -314,19 +314,20 @@ def _ReplaceCompilerTester( compiler, language ):
expected = [ '-foo', '-bar' ]
for to_remove in to_removes:
- eq_( [ compiler, '-x', language ] + expected,
- flags._CompilerToLanguageFlag( to_remove + [ compiler ] + expected ) )
+ eq_( [ compiler ] + language_flag + expected,
+ flags._AddLanguageFlagWhenAppropriate( to_remove + [ compiler ] +
+ expected ) )
-def CompilerToLanguageFlag_ReplaceCCompiler_test():
+def AddLanguageFlagWhenAppropriate_CCompiler_test():
compilers = [ 'cc', 'gcc', 'clang', '/usr/bin/cc',
'/some/other/path', 'some_command' ]
for compiler in compilers:
- yield _ReplaceCompilerTester, compiler, 'c'
+ yield _AddLanguageFlagWhenAppropriateTester, compiler
-def CompilerToLanguageFlag_ReplaceCppCompiler_test():
+def AddLanguageFlagWhenAppropriate_CppCompiler_test():
compilers = [ 'c++', 'g++', 'clang++', '/usr/bin/c++',
'/some/other/path++', 'some_command++',
'c++-5', 'g++-5.1', 'clang++-3.7.3', '/usr/bin/c++-5',
@@ -335,7 +336,7 @@ def CompilerToLanguageFlag_ReplaceCppCompiler_test():
'/some/other/path++-4.9.31', 'some_command++-5.10' ]
for compiler in compilers:
- yield _ReplaceCompilerTester, compiler, 'c++'
+ yield _AddLanguageFlagWhenAppropriateTester, compiler, [ '-x', 'c++' ]
def ExtraClangFlags_test():
| ycmd insists I'm not using exceptions
I'm using ycmd through emacs, with flycheck integration. In the error list, I continually get errors "cannot use throw with exceptions disabled", "cannot use try with exceptions disabled". I'm using a compilation database, plus system paths as my flags. I certainly am not specify -fnoexceptions. I even tried to fight this error by explicitly adding -fexceptions. When I show the debug info for ycmd, it looks like this:
```
ycmd debug information for buffer read_vwap_publication.x.cpp in c++-mode:
Server has Clang support compiled in: True
Clang version: TPM built 20160117 clang version 3.6.2 (tags/RELEASE_362/final) (based on LLVM 3.6.2)
Flags for <redacted> loaded from <redacted>.ycm_extra_conf.py:
['-x', 'c', '-<redacted>', '-m64', '-pthread', '-fno-lto', '-g', '-std=c++11', '-Wdeprecated',
'-Woverloaded-virtual', '-stdlib=libc++', '-D__CPP0X', '-Werror=overloaded-virtual', '-Wno-gnu',
'-fcolor-diagnostics', '-Wno-inconsistent-missing-override', '-Wpointer-arith', '-Wreturn-type',
(
many many includes
)
(gcc 5.3.0 includes, ended up present twice)
(clang3.6.2 includes)
'-isystem', '/usr/include', '-Wall', '-Wextra', '-fexceptions', '-std=c++11', '-isystem', <some_long_path>/vim-YouCompleteMe/third_party/ycmd/ycmd/../clang_includes']
Server running at: 127.0.0.1:56135
```
Any ideas why ycmd would insist that I have asked for no exceptions, even though I have not asked for it anywhere?
| Could you try with the latest pre-built Clang binaries (`./build.py --clang-completer` will do it automatically for you) and report back?
You have '-x c'. C didn't support exceptions last time I checked. You need to tell clang you are compiling c++
> On 6 Jul 2016, at 00:54, quicknir [email protected] wrote:
>
> I'm using ycmd through emacs, with flycheck integration. In the error list, I continually get errors "cannot use throw with exceptions disabled", "cannot use try with exceptions disabled". I'm using a compilation database, plus system paths as my flags. I certainly am not specify -fnoexceptions. I even tried to fight this error by explicitly adding -fexceptions. When I show the debug info for ycmd, it looks like this:
>
> ycmd debug information for buffer read_vwap_publication.x.cpp in c++-mode:
>
> Server has Clang support compiled in: True
> Clang version: TPM built 20160117 clang version 3.6.2 (tags/RELEASE_362/final) (based on LLVM 3.6.2)
> Flags for <redacted> loaded from <redacted>.ycm_extra_conf.py:
> ['-x', 'c', '-<redacted>', '-m64', '-pthread', '-fno-lto', '-g', '-std=c++11', '-Wdeprecated', '-Woverloaded-virtual', '-stdlib=libc++', '-D__CPP0X', '-Werror=overloaded-virtual', '-Wno-gnu', '-fcolor-diagnostics', '-Wno-inconsistent-missing-override', '-Wpointer-arith', '-Wreturn-type', (
>
> many many includes
>
> )
>
> '/spare/local/nir/venv-mosaic/bin/../lib/gcc/x86_64-unknown-linux-gnu/5.3.0/../../../../include/c++/5.3.0', '-isystem', '/spare/local/nir/venv-mosaic/bin/../lib/gcc/x86_64-unknown-linux-gnu/5.3.0/../../../../include/c++/5.3.0/x86_64-unknown-linux-gnu', '-isystem', '/spare/local/nir/venv-mosaic/bin/../lib/gcc/x86_64-unknown-linux-gnu/5.3.0/../../../../include/c++/5.3.0/backward', '-isystem', '/usr/local/include', '-isystem', '/spare/local/nir/venv-mosaic/llvm/3.6.2/bin/../lib/clang/3.6.2/include', '-isystem', '/usr/include', '-Wall', '-Wextra', '-fexceptions', '-std=c++11', '-isystem', '/spare/local/nir/venv-mosaic/vim-YouCompleteMe/1.20160128/share/vim/bundle/vim-YouCompleteMe/third_party/ycmd/ycmd/../clang_includes']
>
> Server running at: 127.0.0.1:56135
> —
> You are receiving this because you are subscribed to this thread.
> Reply to this email directly, view it on GitHub, or mute the thread.
@micbou Are you talking about upgrading the version of clang that ycmd calls to, or the version of clang that ycmd is built against? Neither of these are very easy to upgrade in my particular case; do you have a concrete reason to think that will improve things?
@puremourning C doesn't support about a million things that C++ does, last time I checked, yet I'm not getting any of those other errors. Seriously, snarkiness aside, I don't know why exactly that `-x c` is there, but I really don't think it's compiling in C mode since I don't get any other errors. What's strange is that the `-x c` is not in my compile_commands.json, so I don't know where it's coming from. It's worth understanding this too, even if the language is ultimately C++ maybe this somehow changes some default.
Ah then we are guessing c from the compiler invocation. (There is some heuristic in there.) If you change the invocation to clang++ in compile commands does it work?
> On 6 Jul 2016, at 15:06, quicknir [email protected] wrote:
>
> @micbou Are you talking about upgrading the version of clang that ycmd calls to, or the version of clang that ycmd is built against? Neither of these are very easy to upgrade in my particular case; do you have a concrete reason to think that will improve things?
>
> @puremourning C doesn't support about a million things that C++ does, last time I checked, yet I'm not getting any of those other errors. Seriously, snarkiness aside, I don't know why exactly that -x c is there, but I really don't think it's compiling in C mode since I don't get any other errors. What's strange is that the -x c is not in my compile_commands.json, so I don't know where it's coming from. It's worth understanding this too, even if the language is ultimately C++ maybe this somehow changes some default.
>
> —
> You are receiving this because you were mentioned.
> Reply to this email directly, view it on GitHub, or mute the thread.
Regarding clang version. We only support the version of clang downloaded by build.py. You seem to have quite an older version (maybe you are using --system-libclang?)
We are referring to the version ycmd liked against and run against.
> On 6 Jul 2016, at 15:06, quicknir [email protected] wrote:
>
> @micbou Are you talking about upgrading the version of clang that ycmd calls to, or the version of clang that ycmd is built against? Neither of these are very easy to upgrade in my particular case; do you have a concrete reason to think that will improve things?
>
> @puremourning C doesn't support about a million things that C++ does, last time I checked, yet I'm not getting any of those other errors. Seriously, snarkiness aside, I don't know why exactly that -x c is there, but I really don't think it's compiling in C mode since I don't get any other errors. What's strange is that the -x c is not in my compile_commands.json, so I don't know where it's coming from. It's worth understanding this too, even if the language is ultimately C++ maybe this somehow changes some default.
>
> —
> You are receiving this because you were mentioned.
> Reply to this email directly, view it on GitHub, or mute the thread.
@puremourning very interestingly, changing the `c` to `c++` fixes it! I have no real explanation for the overall behavior. I tested this by just manually changing the ycmd code to replace flags[1] with "c++". Also, as you predict, changing the compile_commands.json invocation from clang to clang++ fixes it as well.
It seems though that in this case the heuristic is not really accurate, in the sense that I am actually building my project from this exact compile_commands.json, so it seems like invoking `clang` on a `.cpp` file does in fact launch in C++ mode.
Is simply replacing flags[1] a safe bet? can anything else ever be there? I'd rather avoid tweaking my compile_commands.json but I can do that as well if you think that my first workaround is better. In the longer term might be better to improve this heuristic (if it hasn't already been fixed in a more recent version).
@puremourning btw my clang version is 3.6.1, I cannot easily upgrade the version of clang for various reasons.
You should do this in your `.ycm_extra_conf.py` file and the heuristic is fine: the correct compiler for C++ sources is `clang++`, not `clang`. Something is wrong with the generation of your `compile_commands.json` file. Anyway, the issue seems to be resolved so I am closing it.
@micbou Nothing is wrong with my compile_commands.json, and the heuristic is wrong. The only difference between clang and clang++ is which standard library they link against: http://stackoverflow.com/questions/20047218/what-is-the-difference-clang-clang-std-c11. But here, I am not linking: all of the commands in compile_commands.json are with the -c flags, as they are building specific translation units. And regardless of heuristics for clang, I am compiling with -std=c++11 as a flag. `clang -c -std=c++11 ...` is a perfectly correct way to build C++ object files.
In summary, regardless of clang/clang++, if -c is present (which it probably nearly always will be for using ycmd in larger projects), then -std=.... should always overrule everything else. That is the correct heuristic.
Edit: actually, possibly clang is not deciding on C++ because of the -std, but rather because of the .cpp extension. So maybe that should be the heuristic.
So, can the heuristic be fixed, please?
Looking for the value of the `-std` flag then falling back to the name of the compiler if there is no `-std` flag would probably be a better heuristic to determine the `-x` flag. Reopening.
@micbou I read a little more about this, info on this is actually a bit contradictory, but it seems like it's the extension that drives the heuristic, the -std flag will only be applied if appropriate. The best quote I've found on this topic is from a clang dev post:
> Of course you can simply use "clang" to compile C++ code, too. You just have to pass "-x c++" and
> "-lc++" (for libc++) or "-lstdc++" (for libstdc++) to it ("-x c++" is optional if the source file
> is recognized (using its extension) as C++ code.
http://clang-developers.42468.n3.nabble.com/Difference-between-clang-and-clang-td3001279.html
The rabbit hole gets deeper. Regardless of which of the two techniques I use to fix this, flycheck seems to get completely disabled as a result. That is no matter what I type, no errors are triggered, even though auto completion works. So I'm actually not sure if the fix had anything to do with anything. Because again, I was never able to see errors for anything other than exceptions, even though my code has tons of templates and other things.
So now I'm just very confused.
Okay, I discovered the issue. It seems like when C++ mode was added, some flags that were being ignored before were activated. In particular, -Wextra was generating an enormous amount of errors from my header files. It seems like ycmd places a limit on how many errors it will send back, so the net effect was to completely prevent any errors from being flagged in the source file I was working.
So I guess: a) is there a way to increase the maximum number of errors sent back? b) does ycmd in newer versions stop sending back errors that occur in other files, or is there a setting that I can use to effect this now?
I'm not aware of any artificial limit on the number of diags returned _by ycmd_. However, if clang can't parse your file well enough (e.g. it hits too many internally), it _might_ not produce any. We know that it can be very slow if it can't generate a PCH due to errors.
Can you find the result of (the emacs equivalent of) `:YcmDiags` ? In Vim this command returns the full list of diagnostics. If not, can you run clang driver manually from the command line with (i think it is...) `-fsyntax-only`
I'm sorry we don't support the emacs client here, so I can't comment on what the client itself might do with a large number of errors.
Alternatively, if you can use wireshark to snoop the http traffic between your client and the ycmd server, we can look at the json. Beware that there is of course an infosec issue with sharing that data (it includes your source code).
Edit: one option also, if the headers causing trouble are system headers is to check this out : http://clang.llvm.org/docs/UsersManual.html#controlling-diagnostics-in-system-headers
> it seems like it's the extension that drives the heuristic, the -std flag will only be applied if appropriate.
In our case, we can't completely rely on the extension because of headers: it is not uncommon to use the `.h` extension for C++ headers. If we deduce the language mode from the extension, it could lead to the situation where some files are compiled in C++ while others in C and we clearly don't want that. So I still think that looking at the `-std` flag is a good addition to the current behavior.
> a) is there a way to increase the maximum number of errors sent back?
There is the `max_diagnostics_to_display` option (its default value is set to 30). I am not sure but it looks like the only way to change its value on the emacs client is to modify it in the [emacs.el](https://github.com/abingham/emacs-ycmd/blob/master/ycmd.el#L1574) file.
@puremourning I'm not sure what ycmdiags does, but I found a log file in emacs. It shows the exact json being sent back and forth. So I can clearly see that many many errors get sent back from the server, it's just that they were all in header files, and then it's stop sending errors. As soon as I e.g. comment out the header files it finds errors in the current file (a huge pile since there's no headers :-) ). I'm aware of the systems headers flags, this is actually not in third party code. A use case for the problem I'm experiencing might be someone writing new code in a code base and trying to use more warnings than the existing code uses as part of its build system. This was basically my situation, I solved this problem by basically just removing a single flag that isn't that useful anyway that we don't compile with (unused function parameters).
I'm actually lucky in that the codebase I work on compiles with Wall and Wextra with a couple exceptions. This issue would be a lot more problematic on a crappier codebase.
@micbou headers are generally not translation unit targets anyhow though. The approach in ycmd as I understand it is to try to use a heuristic to find a .cpp/.c/.cc etc file that corresponds to the .h file and then use the flags from there. So one could get the extension as part of the heuristic. Anyhow it is just a thought, I think using `-std` is a good improvement as well.
I've managed to work around this max diagnostics issue as I wrote above. It might be an improvement to consider though, to simply filter out diagnostics that don't occur in the current file. I'm perhaps missing something, but from my usage of ycmd they don't seem to be used for anything (they don't show up in flycheck in any way). This is preferable to simply raising the parameter you mentioned. If someone is indeed trying to be more conscientious with warnings then the codebase they are #including, one can very easily get hundreds of warnings from header files. One might have to raise max diagnostics so high that performance becomes an issue, and there's no real correct number other than just not having a limit at all. At least having an option to disable header file warning diagnostics would be useful at least for some people, and it seems to me naively, not too bad to implement.
btw if it is helpful, now that the problem is tracked down, I can close this and open two separate issues: one where I summarize the discussion on heuristic language detection, and one where (if you think the idea has merit) I suggest having an option to disable header file diagnostics. The titles would be more suggestive and it would perhaps facilitate further discussion re implementation.
Leaving this here for future reference: libclang has an API to determine if a Location (such as that associated with a diagnostic) is from the "main file" related to a TU: [`clang_Location_IsFromMainFile`](http://clang.llvm.org/doxygen/group__CINDEX__LOCATIONS.html#gacb4ca7b858d66f0205797ae84cc4e8f2)
In theory we could suppress any errors for which that is not true. Personally, I would resist this strongly, as I want to know if there are errors in my headers, flags or anywhere else. We also typically attempt to avoid configurable or contentious features... :/
> from my usage of ycmd they don't seem to be used for anything (they don't show up in flycheck in any way)
FWIW: I think that's an issue related to your client. The Vim client certainly does display them.
| 2016-09-17T19:09:00 |
ycm-core/ycmd | 623 | ycm-core__ycmd-623 | [
"618"
] | d8a61e9ee8af056bdda7f22b758a27da9dfcc774 | diff --git a/ycmd/extra_conf_store.py b/ycmd/extra_conf_store.py
--- a/ycmd/extra_conf_store.py
+++ b/ycmd/extra_conf_store.py
@@ -146,7 +146,21 @@ def Load( module_file, force = False ):
# anymore, but there are a lot of old ycm_extra_conf.py files that we don't
# want to break.
sys.path.insert( 0, _PathToCppCompleterFolder() )
- module = LoadPythonSource( _RandomName(), module_file )
+
+ # By default, the Python interpreter compiles source files into bytecode to
+ # load them faster next time they are run. These *.pyc files are generated
+ # along the source files prior to Python 3.2 or in a __pycache__ folder for
+ # newer versions. We disable the generation of these files when loading
+ # ycm_extra_conf.py files as users do not want them inside their projects.
+ # The drawback is negligible since ycm_extra_conf.py files are generally small
+ # files thus really fast to compile and only loaded once by editing session.
+ old_dont_write_bytecode = sys.dont_write_bytecode
+ sys.dont_write_bytecode = True
+ try:
+ module = LoadPythonSource( _RandomName(), module_file )
+ finally:
+ sys.dont_write_bytecode = old_dont_write_bytecode
+
del sys.path[ 0 ]
with _module_for_module_file_lock:
| Processing .ycm_extra_conf.py creates __pycache__ directory
I'm not sure if this is the intended behaviour. When YCM reads configuration creates a compiled version in `__pycache__`. I know that this behaviour can be disabled passing to `python` the `-B` argument or setting `PYTHONDONTWRITEBYTECODE=1` environmental variable. I don't want to disable global bytecode generation but I want to disable for `.ycm_extra_conf.py` because I feel it pollutes my project directory.
Is there a easy/reliable way to disable it in the YCM config?
| We could disable the compiled version with [the `sys.dont_write_bytecode` variable](https://docs.python.org/3/library/sys.html#sys.dont_write_bytecode) before loading the `.ycm_extra_conf.py` file then re-enable it just after. Seems to work in my tests.
I don't think we should add an option for that. Instead, we should always disable it as the only benefit of the compiled version is to improve loading time by skipping the compilation step but in practice the time to compile a `.ycm_extra_conf.py` file is negligible (~8ms for YCM's `.ycm_extra_conf.py` file) and is only done once by editing session.
| 2016-10-10T16:34:30 |
|
ycm-core/ycmd | 645 | ycm-core__ycmd-645 | [
"569"
] | 7af56b6af5003c38b57639cc7705e75fbacc8f32 | diff --git a/ycmd/extra_conf_store.py b/ycmd/extra_conf_store.py
--- a/ycmd/extra_conf_store.py
+++ b/ycmd/extra_conf_store.py
@@ -1,4 +1,5 @@
-# Copyright (C) 2011, 2012 Google Inc.
+# Copyright (C) 2011-2012 Google Inc.
+# 2016 ycmd contributors
#
# This file is part of ycmd.
#
@@ -37,6 +38,8 @@
from fnmatch import fnmatch
+_logger = logging.getLogger( __name__ )
+
# Singleton variables
_module_for_module_file = {}
_module_for_module_file_lock = Lock()
@@ -81,22 +84,34 @@ def Shutdown():
def _CallGlobalExtraConfMethod( function_name ):
- logger = _Logger()
global_ycm_extra_conf = _GlobalYcmExtraConfFileLocation()
if not ( global_ycm_extra_conf and
os.path.exists( global_ycm_extra_conf ) ):
- logger.debug( 'No global extra conf, not calling method ' + function_name )
+ _logger.debug( 'No global extra conf, '
+ 'not calling method {0}'.format( function_name ) )
+ return
+
+ try:
+ module = Load( global_ycm_extra_conf, force = True )
+ except Exception:
+ _logger.exception( 'Error occurred while loading '
+ 'global extra conf {0}'.format( global_ycm_extra_conf ) )
return
- module = Load( global_ycm_extra_conf, force = True )
if not module or not hasattr( module, function_name ):
- logger.debug( 'Global extra conf not loaded or no function ' +
- function_name )
+ _logger.debug( 'Global extra conf not loaded or no function ' +
+ function_name )
return
- logger.info( 'Calling global extra conf method {0} on conf file {1}'.format(
- function_name, global_ycm_extra_conf ) )
- getattr( module, function_name )()
+ try:
+ _logger.info(
+ 'Calling global extra conf method {0} '
+ 'on conf file {1}'.format( function_name, global_ycm_extra_conf ) )
+ getattr( module, function_name )()
+ except Exception:
+ _logger.exception(
+ 'Error occurred while calling global extra conf method {0} '
+ 'on conf file {1}'.format( function_name, global_ycm_extra_conf ) )
def Disable( module_file ):
@@ -210,7 +225,3 @@ def _RandomName():
def _GlobalYcmExtraConfFileLocation():
return os.path.expanduser(
user_options_store.Value( 'global_ycm_extra_conf' ) )
-
-
-def _Logger():
- return logging.getLogger( __name__ )
| diff --git a/ycmd/tests/extra_conf_store_test.py b/ycmd/tests/extra_conf_store_test.py
new file mode 100755
--- /dev/null
+++ b/ycmd/tests/extra_conf_store_test.py
@@ -0,0 +1,141 @@
+# Copyright (C) 2016 ycmd contributors
+#
+# This file is part of ycmd.
+#
+# ycmd is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# ycmd is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import unicode_literals
+from __future__ import print_function
+from __future__ import division
+from __future__ import absolute_import
+from future import standard_library
+standard_library.install_aliases()
+from builtins import * # noqa
+
+import inspect
+from mock import patch
+
+from hamcrest import assert_that, calling, equal_to, has_length, none, raises
+from ycmd import extra_conf_store
+from ycmd.responses import UnknownExtraConf
+from ycmd.tests import PathToTestFile
+from ycmd.tests.test_utils import UserOption
+
+
+class ExtraConfStore_test():
+
+ def setUp( self ):
+ extra_conf_store.Reset()
+
+
+ def ModuleForSourceFile_UnknownExtraConf_test( self ):
+ filename = PathToTestFile( 'extra_conf', 'project', 'some_file' )
+ assert_that(
+ calling( extra_conf_store.ModuleForSourceFile ).with_args( filename ),
+ raises( UnknownExtraConf, 'Found .*\.ycm_extra_conf\.py\. Load?' )
+ )
+
+
+ def ModuleForSourceFile_NoConfirmation_test( self ):
+ filename = PathToTestFile( 'extra_conf', 'project', 'some_file' )
+ extra_conf_file = PathToTestFile( 'extra_conf', 'project',
+ '.ycm_extra_conf.py' )
+ with UserOption( 'confirm_extra_conf', 0 ):
+ module = extra_conf_store.ModuleForSourceFile( filename )
+ assert_that( inspect.ismodule( module ) )
+ assert_that( inspect.getfile( module ), equal_to( extra_conf_file ) )
+
+
+ def ModuleForSourceFile_Whitelisted_test( self ):
+ filename = PathToTestFile( 'extra_conf', 'project', 'some_file' )
+ extra_conf_file = PathToTestFile( 'extra_conf', 'project',
+ '.ycm_extra_conf.py' )
+ with UserOption( 'extra_conf_globlist', [ extra_conf_file ] ):
+ module = extra_conf_store.ModuleForSourceFile( filename )
+ assert_that( inspect.ismodule( module ) )
+ assert_that( inspect.getfile( module ), equal_to( extra_conf_file ) )
+
+
+ def ModuleForSourceFile_Blacklisted_test( self ):
+ filename = PathToTestFile( 'extra_conf', 'project', 'some_file' )
+ extra_conf_file = PathToTestFile( 'extra_conf', 'project',
+ '.ycm_extra_conf.py' )
+ with UserOption( 'extra_conf_globlist', [ '!' + extra_conf_file ] ):
+ assert_that( extra_conf_store.ModuleForSourceFile( filename ), none() )
+
+
+ def ModuleForSourceFile_GlobalExtraConf_test( self ):
+ filename = PathToTestFile( 'extra_conf', 'some_file' )
+ extra_conf_file = PathToTestFile( 'extra_conf', 'global_extra_conf.py' )
+ with UserOption( 'global_ycm_extra_conf', extra_conf_file ):
+ module = extra_conf_store.ModuleForSourceFile( filename )
+ assert_that( inspect.ismodule( module ) )
+ assert_that( inspect.getfile( module ), equal_to( extra_conf_file ) )
+
+
+ @patch( 'ycmd.extra_conf_store._logger', autospec = True )
+ def CallGlobalExtraConfMethod_NoGlobalExtraConf_test( self, logger ):
+ with UserOption( 'global_ycm_extra_conf',
+ PathToTestFile( 'extra_conf', 'no_extra_conf.py' ) ):
+ extra_conf_store._CallGlobalExtraConfMethod( 'SomeMethod' )
+ assert_that( logger.method_calls, has_length( 1 ) )
+ logger.debug.assert_called_with( 'No global extra conf, not calling method '
+ 'SomeMethod' )
+
+
+ @patch( 'ycmd.extra_conf_store._logger', autospec = True )
+ def CallGlobalExtraConfMethod_NoMethodInGlobalExtraConf_test( self, logger ):
+ with UserOption( 'global_ycm_extra_conf',
+ PathToTestFile( 'extra_conf', 'global_extra_conf.py' ) ):
+ extra_conf_store._CallGlobalExtraConfMethod( 'MissingMethod' )
+ assert_that( logger.method_calls, has_length( 1 ) )
+ logger.debug.assert_called_with( 'Global extra conf not loaded or '
+ 'no function MissingMethod' )
+
+
+ @patch( 'ycmd.extra_conf_store._logger', autospec = True )
+ def CallGlobalExtraConfMethod_NoExceptionFromMethod_test( self, logger ):
+ extra_conf_file = PathToTestFile( 'extra_conf', 'global_extra_conf.py' )
+ with UserOption( 'global_ycm_extra_conf', extra_conf_file ):
+ extra_conf_store._CallGlobalExtraConfMethod( 'NoException' )
+ assert_that( logger.method_calls, has_length( 1 ) )
+ logger.info.assert_called_with( 'Calling global extra conf method '
+ 'NoException on conf file '
+ '{0}'.format( extra_conf_file ) )
+
+
+ @patch( 'ycmd.extra_conf_store._logger', autospec = True )
+ def CallGlobalExtraConfMethod_CatchExceptionFromMethod_test( self, logger ):
+ extra_conf_file = PathToTestFile( 'extra_conf', 'global_extra_conf.py' )
+ with UserOption( 'global_ycm_extra_conf', extra_conf_file ):
+ extra_conf_store._CallGlobalExtraConfMethod( 'RaiseException' )
+ assert_that( logger.method_calls, has_length( 2 ) )
+ logger.info.assert_called_with( 'Calling global extra conf method '
+ 'RaiseException on conf file '
+ '{0}'.format( extra_conf_file ) )
+ logger.exception.assert_called_with(
+ 'Error occurred while calling global extra conf method RaiseException '
+ 'on conf file {0}'.format( extra_conf_file ) )
+
+
+ @patch( 'ycmd.extra_conf_store._logger', autospec = True )
+ def CallGlobalExtraConfMethod_CatchExceptionFromExtraConf_test( self,
+ logger ):
+ extra_conf_file = PathToTestFile( 'extra_conf', 'erroneous_extra_conf.py' )
+ with UserOption( 'global_ycm_extra_conf', extra_conf_file ):
+ extra_conf_store._CallGlobalExtraConfMethod( 'NoException' )
+ assert_that( logger.method_calls, has_length( 1 ) )
+ logger.exception.assert_called_with( 'Error occurred while '
+ 'loading global extra conf '
+ '{0}'.format( extra_conf_file ) )
diff --git a/ycmd/tests/misc_handlers_test.py b/ycmd/tests/misc_handlers_test.py
--- a/ycmd/tests/misc_handlers_test.py
+++ b/ycmd/tests/misc_handlers_test.py
@@ -86,7 +86,7 @@ def MiscHandlers_FilterAndSortCandidates_Basic_test( app ):
@SharedYcmd
def MiscHandlers_LoadExtraConfFile_AlwaysJsonResponse_test( app ):
- filepath = PathToTestFile( '.ycm_extra_conf.py' )
+ filepath = PathToTestFile( 'extra_conf', 'project', '.ycm_extra_conf.py' )
extra_conf_data = BuildRequest( filepath = filepath )
assert_that( app.post_json( '/load_extra_conf_file', extra_conf_data ).json,
@@ -95,7 +95,7 @@ def MiscHandlers_LoadExtraConfFile_AlwaysJsonResponse_test( app ):
@SharedYcmd
def MiscHandlers_IgnoreExtraConfFile_AlwaysJsonResponse_test( app ):
- filepath = PathToTestFile( '.ycm_extra_conf.py' )
+ filepath = PathToTestFile( 'extra_conf', 'project', '.ycm_extra_conf.py' )
extra_conf_data = BuildRequest( filepath = filepath )
assert_that( app.post_json( '/ignore_extra_conf_file', extra_conf_data ).json,
diff --git a/ycmd/tests/testdata/extra_conf/erroneous_extra_conf.py b/ycmd/tests/testdata/extra_conf/erroneous_extra_conf.py
new file mode 100755
--- /dev/null
+++ b/ycmd/tests/testdata/extra_conf/erroneous_extra_conf.py
@@ -0,0 +1 @@
+raise Exception( 'Exception raised' )
diff --git a/ycmd/tests/testdata/extra_conf/global_extra_conf.py b/ycmd/tests/testdata/extra_conf/global_extra_conf.py
new file mode 100755
--- /dev/null
+++ b/ycmd/tests/testdata/extra_conf/global_extra_conf.py
@@ -0,0 +1,6 @@
+def NoException():
+ pass
+
+
+def RaiseException():
+ raise Exception( 'Exception raised' )
diff --git a/ycmd/tests/testdata/.ycm_extra_conf.py b/ycmd/tests/testdata/extra_conf/project/.ycm_extra_conf.py
similarity index 100%
rename from ycmd/tests/testdata/.ycm_extra_conf.py
rename to ycmd/tests/testdata/extra_conf/project/.ycm_extra_conf.py
| exit code not correct if importing ycm_core in global config fails
Hi,
I am not sure this is a real bug, but I encountered this while implementing handling of exit code in `emacs-ycmd`.
I had a `import ycm_core` in my global config. If importing fails there the line with `code = CompatibleWithCurrentCore()` in `__main__.py` will never be reached to return the correct exit code and then I just get an exit code 1.
| Ugh, that does sound like a bug.
| 2016-11-16T02:03:54 |
ycm-core/ycmd | 667 | ycm-core__ycmd-667 | [
"659"
] | 43c891a5aedd63ce451d024a2ccd277d27bede0e | diff --git a/ycmd/completers/cpp/flags.py b/ycmd/completers/cpp/flags.py
--- a/ycmd/completers/cpp/flags.py
+++ b/ycmd/completers/cpp/flags.py
@@ -181,8 +181,11 @@ def PrepareFlagsForClang( flags, filename, add_extra_clang_flags = True ):
flags = _RemoveUnusedFlags( flags, filename )
if add_extra_clang_flags:
flags = _EnableTypoCorrection( flags )
- flags = _SanitizeFlags( flags )
- return flags
+
+ vector = ycm_core.StringVector()
+ for flag in flags:
+ vector.append( ToCppStringCompatible( flag ) )
+ return vector
def _RemoveXclangFlags( flags ):
@@ -205,30 +208,6 @@ def _RemoveXclangFlags( flags ):
return sanitized_flags
-def _SanitizeFlags( flags ):
- """Drops unsafe flags. Currently these are only -arch flags; they tend to
- crash libclang."""
-
- sanitized_flags = []
- saw_arch = False
- for i, flag in enumerate( flags ):
- if flag == '-arch':
- saw_arch = True
- continue
- elif flag.startswith( '-arch' ):
- continue
- elif saw_arch:
- saw_arch = False
- continue
-
- sanitized_flags.append( flag )
-
- vector = ycm_core.StringVector()
- for flag in sanitized_flags:
- vector.append( ToCppStringCompatible( flag ) )
- return vector
-
-
def _RemoveFlagsPrecedingCompiler( flags ):
"""Assuming that the flag just before the first flag (which starts with a
dash) is the compiler path, removes all flags preceding it."""
| diff --git a/ycmd/tests/clang/flags_test.py b/ycmd/tests/clang/flags_test.py
--- a/ycmd/tests/clang/flags_test.py
+++ b/ycmd/tests/clang/flags_test.py
@@ -101,26 +101,6 @@ def FlagsForFile_FlagsCachedWhenDoCacheIsTrue_test( *args ):
assert_that( flags_list, contains( '-x', 'c' ) )
-def SanitizeFlags_Passthrough_test():
- eq_( [ '-foo', '-bar' ],
- list( flags._SanitizeFlags( [ '-foo', '-bar' ] ) ) )
-
-
-def SanitizeFlags_ArchRemoved_test():
- expected = [ '-foo', '-bar' ]
- to_remove = [ '-arch', 'arch_of_evil' ]
-
- eq_( expected,
- list( flags._SanitizeFlags( expected + to_remove ) ) )
-
- eq_( expected,
- list( flags._SanitizeFlags( to_remove + expected ) ) )
-
- eq_( expected,
- list( flags._SanitizeFlags(
- expected[ :1 ] + to_remove + expected[ -1: ] ) ) )
-
-
def RemoveUnusedFlags_Passthrough_test():
eq_( [ '-foo', '-bar' ],
flags._RemoveUnusedFlags( [ '-foo', '-bar' ], 'file' ) )
| module support for objective-c
I noticed the latest `YouCompleteMe` now supports `clang 3.9.0`.
So I did a little experiment (https://github.com/haifengkao/libclang-experiments-py).
To my surprise, the `libclang` built by `ycmd` can handle the objective-c modules successfully.
But `:YcmDiag` still fails with the annoying error `could not build module 'Foundation'`.

Since `libclang` now supports objective modules, what should I do to make it work with `YouCompleteMe`?
*update*
The YCM flags can be found in https://github.com/haifengkao/libclang-experiments-py/blob/master/.ycm_extra_conf.py
| I managed to create a simple test case
```py
from pprint import pprint
from ycmd.tests.clang import IsolatedYcmd, PathToTestFile
from ycmd.tests.test_utils import BuildRequest
@IsolatedYcmd
def Diagnostics_Module_test( app ):
contents = """
@import Foundation;
"""
flags = ['-x', 'objective-c',
'-arch', 'arm64',
'-fmodules',
'-miphoneos-version-min=9.3',
'-isysroot', '/Applications/Xcode.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk',
]
event_data = BuildRequest( compilation_flags = flags,
event_name = 'FileReadyToParse',
contents = contents,
filetype = 'objc' )
results = app.post_json( '/event_notification', event_data ).json
pprint (results)
```
The output is
```
[{u'fixit_available': False,
u'kind': u'ERROR',
u'location': {u'column_num': 11, u'filepath': u'/foo', u'line_num': 2},
u'location_extent': {u'end': {u'column_num': 0,
u'filepath': u'',
u'line_num': 0},
u'start': {u'column_num': 0,
u'filepath': u'',
u'line_num': 0}},
u'ranges': [{u'end': {u'column_num': 21,
u'filepath': u'/foo',
u'line_num': 2},
u'start': {u'column_num': 4,
u'filepath': u'/foo',
u'line_num': 2}}],
u'text': u"could not build module 'Foundation'"}]
.
----------------------------------------------------------------------
Ran 1 test in 0.367s
OK
```
I have tried `libclang` with C++ binding, and it still works without any problems.
I start to believe the problem comes from `ycm_core.so`, instead of `libclang.dylib`.
I created another test case with `TranslationUnit` in `YouCompleteMe` class.
It still handles the module syntax `@import Foundation;` successfully.
Now I don't really understand why `ycmd` would return `could not build module 'Foundation'`.
```c++
// ct.m contains "@import Foundation;"
fs::path test_file = fs::path( "ct.m" );
vector<string> flags;
flags.push_back("-fmodules");
TranslationUnit unit( test_file.string(),
std::vector< UnsavedFile >(),
flags,
clang_index_ );
std::vector< Diagnostic > diags = unit.Reparse( std::vector< UnsavedFile >());
// diags is an empty array. No errors are reported.
``` | 2016-12-06T09:00:49 |
pytorch/examples | 96 | pytorch__examples-96 | [
"95",
"95"
] | 179fb761ad203eb0234829dee39b0343a70551e9 | diff --git a/word_language_model/main.py b/word_language_model/main.py
--- a/word_language_model/main.py
+++ b/word_language_model/main.py
@@ -81,16 +81,6 @@ def batchify(data, bsz):
# Training code
###############################################################################
-def clip_gradient(model, clip):
- """Computes a gradient clipping coefficient based on gradient norm."""
- totalnorm = 0
- for p in model.parameters():
- modulenorm = p.grad.data.norm()
- totalnorm += modulenorm ** 2
- totalnorm = math.sqrt(totalnorm)
- return min(1, clip / (totalnorm + 1e-6))
-
-
def repackage_hidden(h):
"""Wraps hidden states in new Variables, to detach them from their history."""
if type(h) == Variable:
@@ -132,9 +122,9 @@ def train():
loss = criterion(output.view(-1, ntokens), targets)
loss.backward()
- clipped_lr = lr * clip_gradient(model, args.clip)
+ torch.nn.utils.clip_grad_norm(model.parameters(), args.clip)
for p in model.parameters():
- p.data.add_(-clipped_lr, p.grad.data)
+ p.data.add_(-lr, p.grad.data)
total_loss += loss.data
| LSTM language model baseline gap
The test ppl didn't reach the ppl of 113 as documented.
System
=====
GTX 1070
Driver Version: 367.57
cuDNN: 5
CUDA: 8.0
Intel i7 3770
```
| epoch 1 | 200/ 2323 batches | lr 20.00 | ms/batch 15.86 | loss 6.78 | ppl 883.54
| epoch 1 | 400/ 2323 batches | lr 20.00 | ms/batch 9.43 | loss 6.11 | ppl 451.70
| epoch 1 | 600/ 2323 batches | lr 20.00 | ms/batch 9.42 | loss 5.81 | ppl 332.98
| epoch 1 | 800/ 2323 batches | lr 20.00 | ms/batch 9.46 | loss 5.65 | ppl 283.32
| epoch 1 | 1000/ 2323 batches | lr 20.00 | ms/batch 9.44 | loss 5.53 | ppl 252.06
| epoch 1 | 1200/ 2323 batches | lr 20.00 | ms/batch 9.47 | loss 5.45 | ppl 232.68
| epoch 1 | 1400/ 2323 batches | lr 20.00 | ms/batch 9.44 | loss 5.29 | ppl 197.84
| epoch 1 | 1600/ 2323 batches | lr 20.00 | ms/batch 9.40 | loss 5.27 | ppl 193.50
| epoch 1 | 1800/ 2323 batches | lr 20.00 | ms/batch 9.43 | loss 5.26 | ppl 192.84
| epoch 1 | 2000/ 2323 batches | lr 20.00 | ms/batch 9.52 | loss 5.11 | ppl 165.52
| epoch 1 | 2200/ 2323 batches | lr 20.00 | ms/batch 9.44 | loss 5.00 | ppl 149.01
-----------------------------------------------------------------------------------------
| end of epoch 1 | time: 24.19s | valid loss 5.15 | valid ppl 172.34
-----------------------------------------------------------------------------------------
| epoch 2 | 200/ 2323 batches | lr 20.00 | ms/batch 9.50 | loss 5.01 | ppl 150.18
| epoch 2 | 400/ 2323 batches | lr 20.00 | ms/batch 9.46 | loss 5.07 | ppl 159.75
| epoch 2 | 600/ 2323 batches | lr 20.00 | ms/batch 9.48 | loss 4.97 | ppl 143.50
| epoch 2 | 800/ 2323 batches | lr 20.00 | ms/batch 9.71 | loss 4.92 | ppl 137.16
| epoch 2 | 1000/ 2323 batches | lr 20.00 | ms/batch 9.44 | loss 4.92 | ppl 136.96
| epoch 2 | 1200/ 2323 batches | lr 20.00 | ms/batch 9.44 | loss 4.89 | ppl 133.62
| epoch 2 | 1400/ 2323 batches | lr 20.00 | ms/batch 9.42 | loss 4.78 | ppl 118.79
| epoch 2 | 1600/ 2323 batches | lr 20.00 | ms/batch 9.45 | loss 4.83 | ppl 125.03
| epoch 2 | 1800/ 2323 batches | lr 20.00 | ms/batch 9.44 | loss 4.87 | ppl 130.80
| epoch 2 | 2000/ 2323 batches | lr 20.00 | ms/batch 9.43 | loss 4.69 | ppl 109.35
| epoch 2 | 2200/ 2323 batches | lr 20.00 | ms/batch 9.44 | loss 4.64 | ppl 103.29
-----------------------------------------------------------------------------------------
| end of epoch 2 | time: 22.96s | valid loss 4.96 | valid ppl 142.18
-----------------------------------------------------------------------------------------
| epoch 3 | 200/ 2323 batches | lr 20.00 | ms/batch 9.49 | loss 4.67 | ppl 106.62
| epoch 3 | 400/ 2323 batches | lr 20.00 | ms/batch 9.44 | loss 4.79 | ppl 120.30
| epoch 3 | 600/ 2323 batches | lr 20.00 | ms/batch 9.45 | loss 4.68 | ppl 107.72
| epoch 3 | 800/ 2323 batches | lr 20.00 | ms/batch 9.43 | loss 4.65 | ppl 104.60
| epoch 3 | 1000/ 2323 batches | lr 20.00 | ms/batch 9.43 | loss 4.67 | ppl 106.95
| epoch 3 | 1200/ 2323 batches | lr 20.00 | ms/batch 9.44 | loss 4.66 | ppl 105.12
| epoch 3 | 1400/ 2323 batches | lr 20.00 | ms/batch 9.42 | loss 4.55 | ppl 94.70
| epoch 3 | 1600/ 2323 batches | lr 20.00 | ms/batch 9.45 | loss 4.62 | ppl 101.98
| epoch 3 | 1800/ 2323 batches | lr 20.00 | ms/batch 9.43 | loss 4.68 | ppl 108.26
| epoch 3 | 2000/ 2323 batches | lr 20.00 | ms/batch 9.42 | loss 4.48 | ppl 88.55
| epoch 3 | 2200/ 2323 batches | lr 20.00 | ms/batch 9.42 | loss 4.45 | ppl 85.87
-----------------------------------------------------------------------------------------
| end of epoch 3 | time: 22.89s | valid loss 4.90 | valid ppl 133.71
-----------------------------------------------------------------------------------------
| epoch 4 | 200/ 2323 batches | lr 20.00 | ms/batch 9.49 | loss 4.48 | ppl 88.58
| epoch 4 | 400/ 2323 batches | lr 20.00 | ms/batch 9.43 | loss 4.63 | ppl 102.72
| epoch 4 | 600/ 2323 batches | lr 20.00 | ms/batch 9.48 | loss 4.52 | ppl 91.82
| epoch 4 | 800/ 2323 batches | lr 20.00 | ms/batch 9.58 | loss 4.50 | ppl 89.90
| epoch 4 | 1000/ 2323 batches | lr 20.00 | ms/batch 9.57 | loss 4.53 | ppl 92.52
| epoch 4 | 1200/ 2323 batches | lr 20.00 | ms/batch 9.59 | loss 4.52 | ppl 91.63
| epoch 4 | 1400/ 2323 batches | lr 20.00 | ms/batch 9.44 | loss 4.42 | ppl 82.96
| epoch 4 | 1600/ 2323 batches | lr 20.00 | ms/batch 9.44 | loss 4.50 | ppl 90.31
| epoch 4 | 1800/ 2323 batches | lr 20.00 | ms/batch 9.43 | loss 4.57 | ppl 96.44
| epoch 4 | 2000/ 2323 batches | lr 20.00 | ms/batch 9.44 | loss 4.37 | ppl 78.93
| epoch 4 | 2200/ 2323 batches | lr 20.00 | ms/batch 9.43 | loss 4.34 | ppl 77.00
-----------------------------------------------------------------------------------------
| end of epoch 4 | time: 23.00s | valid loss 4.89 | valid ppl 133.30
-----------------------------------------------------------------------------------------
| epoch 5 | 200/ 2323 batches | lr 20.00 | ms/batch 9.47 | loss 4.38 | ppl 79.91
| epoch 5 | 400/ 2323 batches | lr 20.00 | ms/batch 9.42 | loss 4.53 | ppl 92.42
| epoch 5 | 600/ 2323 batches | lr 20.00 | ms/batch 9.42 | loss 4.42 | ppl 83.08
| epoch 5 | 800/ 2323 batches | lr 20.00 | ms/batch 9.44 | loss 4.40 | ppl 81.46
| epoch 5 | 1000/ 2323 batches | lr 20.00 | ms/batch 9.45 | loss 4.44 | ppl 84.81
| epoch 5 | 1200/ 2323 batches | lr 20.00 | ms/batch 9.44 | loss 4.44 | ppl 84.47
| epoch 5 | 1400/ 2323 batches | lr 20.00 | ms/batch 9.43 | loss 4.34 | ppl 76.87
| epoch 5 | 1600/ 2323 batches | lr 20.00 | ms/batch 9.44 | loss 4.42 | ppl 83.43
| epoch 5 | 1800/ 2323 batches | lr 20.00 | ms/batch 9.43 | loss 4.49 | ppl 89.41
| epoch 5 | 2000/ 2323 batches | lr 20.00 | ms/batch 9.43 | loss 4.30 | ppl 73.41
| epoch 5 | 2200/ 2323 batches | lr 20.00 | ms/batch 9.46 | loss 4.28 | ppl 71.96
-----------------------------------------------------------------------------------------
| end of epoch 5 | time: 22.90s | valid loss 4.89 | valid ppl 132.54
-----------------------------------------------------------------------------------------
| epoch 6 | 200/ 2323 batches | lr 20.00 | ms/batch 9.49 | loss 4.32 | ppl 74.99
| epoch 6 | 400/ 2323 batches | lr 20.00 | ms/batch 9.46 | loss 4.47 | ppl 87.01
| epoch 6 | 600/ 2323 batches | lr 20.00 | ms/batch 9.44 | loss 4.36 | ppl 77.89
| epoch 6 | 800/ 2323 batches | lr 20.00 | ms/batch 9.45 | loss 4.34 | ppl 76.46
| epoch 6 | 1000/ 2323 batches | lr 20.00 | ms/batch 9.44 | loss 4.38 | ppl 79.95
| epoch 6 | 1200/ 2323 batches | lr 20.00 | ms/batch 9.53 | loss 4.37 | ppl 79.05
| epoch 6 | 1400/ 2323 batches | lr 20.00 | ms/batch 9.44 | loss 4.29 | ppl 72.78
| epoch 6 | 1600/ 2323 batches | lr 20.00 | ms/batch 9.43 | loss 4.37 | ppl 79.35
| epoch 6 | 1800/ 2323 batches | lr 20.00 | ms/batch 9.45 | loss 4.44 | ppl 84.42
| epoch 6 | 2000/ 2323 batches | lr 20.00 | ms/batch 9.45 | loss 4.24 | ppl 69.63
| epoch 6 | 2200/ 2323 batches | lr 20.00 | ms/batch 9.42 | loss 4.23 | ppl 68.58
-----------------------------------------------------------------------------------------
| end of epoch 6 | time: 22.92s | valid loss 4.89 | valid ppl 132.85
-----------------------------------------------------------------------------------------
=========================================================================================
| End of training | test loss 4.86 | test ppl 128.44
```
LSTM language model baseline gap
The test ppl didn't reach the ppl of 113 as documented.
System
=====
GTX 1070
Driver Version: 367.57
cuDNN: 5
CUDA: 8.0
Intel i7 3770
```
| epoch 1 | 200/ 2323 batches | lr 20.00 | ms/batch 15.86 | loss 6.78 | ppl 883.54
| epoch 1 | 400/ 2323 batches | lr 20.00 | ms/batch 9.43 | loss 6.11 | ppl 451.70
| epoch 1 | 600/ 2323 batches | lr 20.00 | ms/batch 9.42 | loss 5.81 | ppl 332.98
| epoch 1 | 800/ 2323 batches | lr 20.00 | ms/batch 9.46 | loss 5.65 | ppl 283.32
| epoch 1 | 1000/ 2323 batches | lr 20.00 | ms/batch 9.44 | loss 5.53 | ppl 252.06
| epoch 1 | 1200/ 2323 batches | lr 20.00 | ms/batch 9.47 | loss 5.45 | ppl 232.68
| epoch 1 | 1400/ 2323 batches | lr 20.00 | ms/batch 9.44 | loss 5.29 | ppl 197.84
| epoch 1 | 1600/ 2323 batches | lr 20.00 | ms/batch 9.40 | loss 5.27 | ppl 193.50
| epoch 1 | 1800/ 2323 batches | lr 20.00 | ms/batch 9.43 | loss 5.26 | ppl 192.84
| epoch 1 | 2000/ 2323 batches | lr 20.00 | ms/batch 9.52 | loss 5.11 | ppl 165.52
| epoch 1 | 2200/ 2323 batches | lr 20.00 | ms/batch 9.44 | loss 5.00 | ppl 149.01
-----------------------------------------------------------------------------------------
| end of epoch 1 | time: 24.19s | valid loss 5.15 | valid ppl 172.34
-----------------------------------------------------------------------------------------
| epoch 2 | 200/ 2323 batches | lr 20.00 | ms/batch 9.50 | loss 5.01 | ppl 150.18
| epoch 2 | 400/ 2323 batches | lr 20.00 | ms/batch 9.46 | loss 5.07 | ppl 159.75
| epoch 2 | 600/ 2323 batches | lr 20.00 | ms/batch 9.48 | loss 4.97 | ppl 143.50
| epoch 2 | 800/ 2323 batches | lr 20.00 | ms/batch 9.71 | loss 4.92 | ppl 137.16
| epoch 2 | 1000/ 2323 batches | lr 20.00 | ms/batch 9.44 | loss 4.92 | ppl 136.96
| epoch 2 | 1200/ 2323 batches | lr 20.00 | ms/batch 9.44 | loss 4.89 | ppl 133.62
| epoch 2 | 1400/ 2323 batches | lr 20.00 | ms/batch 9.42 | loss 4.78 | ppl 118.79
| epoch 2 | 1600/ 2323 batches | lr 20.00 | ms/batch 9.45 | loss 4.83 | ppl 125.03
| epoch 2 | 1800/ 2323 batches | lr 20.00 | ms/batch 9.44 | loss 4.87 | ppl 130.80
| epoch 2 | 2000/ 2323 batches | lr 20.00 | ms/batch 9.43 | loss 4.69 | ppl 109.35
| epoch 2 | 2200/ 2323 batches | lr 20.00 | ms/batch 9.44 | loss 4.64 | ppl 103.29
-----------------------------------------------------------------------------------------
| end of epoch 2 | time: 22.96s | valid loss 4.96 | valid ppl 142.18
-----------------------------------------------------------------------------------------
| epoch 3 | 200/ 2323 batches | lr 20.00 | ms/batch 9.49 | loss 4.67 | ppl 106.62
| epoch 3 | 400/ 2323 batches | lr 20.00 | ms/batch 9.44 | loss 4.79 | ppl 120.30
| epoch 3 | 600/ 2323 batches | lr 20.00 | ms/batch 9.45 | loss 4.68 | ppl 107.72
| epoch 3 | 800/ 2323 batches | lr 20.00 | ms/batch 9.43 | loss 4.65 | ppl 104.60
| epoch 3 | 1000/ 2323 batches | lr 20.00 | ms/batch 9.43 | loss 4.67 | ppl 106.95
| epoch 3 | 1200/ 2323 batches | lr 20.00 | ms/batch 9.44 | loss 4.66 | ppl 105.12
| epoch 3 | 1400/ 2323 batches | lr 20.00 | ms/batch 9.42 | loss 4.55 | ppl 94.70
| epoch 3 | 1600/ 2323 batches | lr 20.00 | ms/batch 9.45 | loss 4.62 | ppl 101.98
| epoch 3 | 1800/ 2323 batches | lr 20.00 | ms/batch 9.43 | loss 4.68 | ppl 108.26
| epoch 3 | 2000/ 2323 batches | lr 20.00 | ms/batch 9.42 | loss 4.48 | ppl 88.55
| epoch 3 | 2200/ 2323 batches | lr 20.00 | ms/batch 9.42 | loss 4.45 | ppl 85.87
-----------------------------------------------------------------------------------------
| end of epoch 3 | time: 22.89s | valid loss 4.90 | valid ppl 133.71
-----------------------------------------------------------------------------------------
| epoch 4 | 200/ 2323 batches | lr 20.00 | ms/batch 9.49 | loss 4.48 | ppl 88.58
| epoch 4 | 400/ 2323 batches | lr 20.00 | ms/batch 9.43 | loss 4.63 | ppl 102.72
| epoch 4 | 600/ 2323 batches | lr 20.00 | ms/batch 9.48 | loss 4.52 | ppl 91.82
| epoch 4 | 800/ 2323 batches | lr 20.00 | ms/batch 9.58 | loss 4.50 | ppl 89.90
| epoch 4 | 1000/ 2323 batches | lr 20.00 | ms/batch 9.57 | loss 4.53 | ppl 92.52
| epoch 4 | 1200/ 2323 batches | lr 20.00 | ms/batch 9.59 | loss 4.52 | ppl 91.63
| epoch 4 | 1400/ 2323 batches | lr 20.00 | ms/batch 9.44 | loss 4.42 | ppl 82.96
| epoch 4 | 1600/ 2323 batches | lr 20.00 | ms/batch 9.44 | loss 4.50 | ppl 90.31
| epoch 4 | 1800/ 2323 batches | lr 20.00 | ms/batch 9.43 | loss 4.57 | ppl 96.44
| epoch 4 | 2000/ 2323 batches | lr 20.00 | ms/batch 9.44 | loss 4.37 | ppl 78.93
| epoch 4 | 2200/ 2323 batches | lr 20.00 | ms/batch 9.43 | loss 4.34 | ppl 77.00
-----------------------------------------------------------------------------------------
| end of epoch 4 | time: 23.00s | valid loss 4.89 | valid ppl 133.30
-----------------------------------------------------------------------------------------
| epoch 5 | 200/ 2323 batches | lr 20.00 | ms/batch 9.47 | loss 4.38 | ppl 79.91
| epoch 5 | 400/ 2323 batches | lr 20.00 | ms/batch 9.42 | loss 4.53 | ppl 92.42
| epoch 5 | 600/ 2323 batches | lr 20.00 | ms/batch 9.42 | loss 4.42 | ppl 83.08
| epoch 5 | 800/ 2323 batches | lr 20.00 | ms/batch 9.44 | loss 4.40 | ppl 81.46
| epoch 5 | 1000/ 2323 batches | lr 20.00 | ms/batch 9.45 | loss 4.44 | ppl 84.81
| epoch 5 | 1200/ 2323 batches | lr 20.00 | ms/batch 9.44 | loss 4.44 | ppl 84.47
| epoch 5 | 1400/ 2323 batches | lr 20.00 | ms/batch 9.43 | loss 4.34 | ppl 76.87
| epoch 5 | 1600/ 2323 batches | lr 20.00 | ms/batch 9.44 | loss 4.42 | ppl 83.43
| epoch 5 | 1800/ 2323 batches | lr 20.00 | ms/batch 9.43 | loss 4.49 | ppl 89.41
| epoch 5 | 2000/ 2323 batches | lr 20.00 | ms/batch 9.43 | loss 4.30 | ppl 73.41
| epoch 5 | 2200/ 2323 batches | lr 20.00 | ms/batch 9.46 | loss 4.28 | ppl 71.96
-----------------------------------------------------------------------------------------
| end of epoch 5 | time: 22.90s | valid loss 4.89 | valid ppl 132.54
-----------------------------------------------------------------------------------------
| epoch 6 | 200/ 2323 batches | lr 20.00 | ms/batch 9.49 | loss 4.32 | ppl 74.99
| epoch 6 | 400/ 2323 batches | lr 20.00 | ms/batch 9.46 | loss 4.47 | ppl 87.01
| epoch 6 | 600/ 2323 batches | lr 20.00 | ms/batch 9.44 | loss 4.36 | ppl 77.89
| epoch 6 | 800/ 2323 batches | lr 20.00 | ms/batch 9.45 | loss 4.34 | ppl 76.46
| epoch 6 | 1000/ 2323 batches | lr 20.00 | ms/batch 9.44 | loss 4.38 | ppl 79.95
| epoch 6 | 1200/ 2323 batches | lr 20.00 | ms/batch 9.53 | loss 4.37 | ppl 79.05
| epoch 6 | 1400/ 2323 batches | lr 20.00 | ms/batch 9.44 | loss 4.29 | ppl 72.78
| epoch 6 | 1600/ 2323 batches | lr 20.00 | ms/batch 9.43 | loss 4.37 | ppl 79.35
| epoch 6 | 1800/ 2323 batches | lr 20.00 | ms/batch 9.45 | loss 4.44 | ppl 84.42
| epoch 6 | 2000/ 2323 batches | lr 20.00 | ms/batch 9.45 | loss 4.24 | ppl 69.63
| epoch 6 | 2200/ 2323 batches | lr 20.00 | ms/batch 9.42 | loss 4.23 | ppl 68.58
-----------------------------------------------------------------------------------------
| end of epoch 6 | time: 22.92s | valid loss 4.89 | valid ppl 132.85
-----------------------------------------------------------------------------------------
=========================================================================================
| End of training | test loss 4.86 | test ppl 128.44
```
| 2017-03-03T22:05:13 |
||
pytorch/examples | 182 | pytorch__examples-182 | [
"181"
] | 2d0f1c46d0db798339b46adc4e9154a04fabdd65 | diff --git a/fast_neural_style/neural_style/utils.py b/fast_neural_style/neural_style/utils.py
--- a/fast_neural_style/neural_style/utils.py
+++ b/fast_neural_style/neural_style/utils.py
@@ -39,5 +39,5 @@ def normalize_batch(batch):
std[:, 2, :, :] = 0.225
batch = torch.div(batch, 255.0)
batch -= Variable(mean)
- batch /= Variable(std)
+ batch = batch / Variable(std)
return batch
| Division error
Training a model for `fast-neural-style` raises a RuntimeError from variable division during input normalization.
- python2.7
- torch==0.1.12.post2
- torchvision==0.1.8
````
Traceback (most recent call last):
File "neural_style/neural_style.py", line 226, in <module>
main()
File "neural_style/neural_style.py", line 220, in main
train(args)
File "neural_style/neural_style.py", line 65, in train
style_v = utils.normalize_batch(style_v)
File "/home/paperspace/embro/neural_style/utils.py", line 42, in normalize_batch
batch /= Variable(std)
File "/usr/local/lib/python2.7/dist-packages/torch/autograd/variable.py", line 793, in __idiv__
return self.div_(other)
File "/usr/local/lib/python2.7/dist-packages/torch/autograd/variable.py", line 323, in div_
raise RuntimeError("div_ only supports scalar multiplication")
````
| 2017-07-20T01:43:03 |
||
pytorch/examples | 189 | pytorch__examples-189 | [
"161"
] | 6b17f79eee34c1bec34d45e334e6ce5392f47aa8 | diff --git a/super_resolution/model.py b/super_resolution/model.py
--- a/super_resolution/model.py
+++ b/super_resolution/model.py
@@ -1,22 +1,11 @@
import torch
import torch.nn as nn
+import torch.nn.init as init
from numpy.random import normal
from numpy.linalg import svd
from math import sqrt
-def _get_orthogonal_init_weights(weights):
- fan_out = weights.size(0)
- fan_in = weights.size(1) * weights.size(2) * weights.size(3)
-
- u, _, v = svd(normal(0.0, 1.0, (fan_out, fan_in)), full_matrices=False)
-
- if u.shape == (fan_out, fan_in):
- return torch.Tensor(u.reshape(weights.size()))
- else:
- return torch.Tensor(v.reshape(weights.size()))
-
-
class Net(nn.Module):
def __init__(self, upscale_factor):
super(Net, self).__init__()
@@ -38,7 +27,7 @@ def forward(self, x):
return x
def _initialize_weights(self):
- self.conv1.weight.data.copy_(_get_orthogonal_init_weights(self.conv1.weight) * sqrt(2))
- self.conv2.weight.data.copy_(_get_orthogonal_init_weights(self.conv2.weight) * sqrt(2))
- self.conv3.weight.data.copy_(_get_orthogonal_init_weights(self.conv3.weight) * sqrt(2))
- self.conv4.weight.data.copy_(_get_orthogonal_init_weights(self.conv4.weight))
+ init.orthogonal(self.conv1.weight, init.gain('relu'))
+ init.orthogonal(self.conv2.weight, init.gain('relu'))
+ init.orthogonal(self.conv3.weight, init.gain('relu'))
+ init.orthogonal(self.conv4.weight)
| [super_resolution]
def _get_orthogonal_init_weights(weights):
fan_out = weights.size(0)
fan_in = weights.size(1) * weights.size(2) * weights.size(3)
u, _, v = svd(normal(0.0, 1.0, (fan_out, fan_in)), full_matrices=False)
if u.shape == (fan_out, fan_in):
return torch.Tensor(u.reshape(weights.size()))
else:
return torch.Tensor(v.reshape(weights.size()))
Why do the above operation?
| I have the same question,did u solved that? | 2017-07-23T10:40:35 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.