code
stringlengths
66
870k
docstring
stringlengths
19
26.7k
func_name
stringlengths
1
138
language
stringclasses
1 value
repo
stringlengths
7
68
path
stringlengths
5
324
url
stringlengths
46
389
license
stringclasses
7 values
def expr_deserializer( expr: str | pl.Expr | list[pl.Expr] | None, ) -> pl.Expr | list[pl.Expr] | None: """Deserialize a polars expression or list thereof from json. This is applied both during deserialization and validation. """ if expr is None: return None elif isinstance(expr, pl.Expr): return expr elif isinstance(expr, list): return expr elif isinstance(expr, str): if expr == "null": return None # can be either a list of expr or expr elif expr[0] == "[": return [ pl.Expr.deserialize(io.StringIO(e), format="json") for e in json.loads(expr) ] else: return pl.Expr.deserialize(io.StringIO(expr), format="json") else: raise ValueError(f"{expr} can not be deserialized.")
Deserialize a polars expression or list thereof from json. This is applied both during deserialization and validation.
expr_deserializer
python
JakobGM/patito
src/patito/_pydantic/column_info.py
https://github.com/JakobGM/patito/blob/master/src/patito/_pydantic/column_info.py
MIT
def expr_or_col_name_deserializer(expr: str | pl.Expr | None) -> pl.Expr | str | None: """Deserialize a polars expression or column name from json. This is applied both during deserialization and validation. """ if expr is None: return None elif isinstance(expr, pl.Expr): return expr elif isinstance(expr, list): return expr elif isinstance(expr, str): # Default behaviour if expr == "null": return None else: try: return pl.Expr.deserialize(io.StringIO(expr), format="json") except ComputeError: try: # Column name is being deserialized return json.loads(expr) except json.JSONDecodeError: # Column name has been passed literally # to ColumnInfo(derived_from="foo") return expr else: raise ValueError(f"{expr} can not be deserialized.")
Deserialize a polars expression or column name from json. This is applied both during deserialization and validation.
expr_or_col_name_deserializer
python
JakobGM/patito
src/patito/_pydantic/column_info.py
https://github.com/JakobGM/patito/blob/master/src/patito/_pydantic/column_info.py
MIT
def __repr__(self) -> str: """Print only Field attributes whose values are not default (mainly None).""" not_default_field = { field: getattr(self, field) for field in self.model_fields if getattr(self, field) is not self.model_fields[field].default } string = "" for field, value in not_default_field.items(): string += f"{field}={value}, " if string: # remove trailing comma and space string = string[:-2] return f"ColumnInfo({string})"
Print only Field attributes whose values are not default (mainly None).
__repr__
python
JakobGM/patito
src/patito/_pydantic/column_info.py
https://github.com/JakobGM/patito/blob/master/src/patito/_pydantic/column_info.py
MIT
def __repr_args__(self) -> "ReprArgs": """Returns the attributes to show in __str__, __repr__, and __pretty__ this is generally overridden. Can either return: * name - value pairs, e.g.: `[('foo_name', 'foo'), ('bar_name', ['b', 'a', 'r'])]` * or, just values, e.g.: `[(None, 'foo'), (None, ['b', 'a', 'r'])]` """ attrs = ((s, getattr(self, s)) for s in self.__slots__) return [(a, v) for a, v in attrs if v is not None]
Returns the attributes to show in __str__, __repr__, and __pretty__ this is generally overridden. Can either return: * name - value pairs, e.g.: `[('foo_name', 'foo'), ('bar_name', ['b', 'a', 'r'])]` * or, just values, e.g.: `[(None, 'foo'), (None, ['b', 'a', 'r'])]`
__repr_args__
python
JakobGM/patito
src/patito/_pydantic/repr.py
https://github.com/JakobGM/patito/blob/master/src/patito/_pydantic/repr.py
MIT
def __pretty__( self, fmt: Callable[[Any], Any], **kwargs: Any ) -> Generator[Any, None, None]: """Used by devtools (https://python-devtools.helpmanual.io/) to provide a human readable representations of objects.""" yield self.__repr_name__() + "(" yield 1 for name, value in self.__repr_args__(): if name is not None: yield name + "=" yield fmt(value) yield "," yield 0 yield -1 yield ")"
Used by devtools (https://python-devtools.helpmanual.io/) to provide a human readable representations of objects.
__pretty__
python
JakobGM/patito
src/patito/_pydantic/repr.py
https://github.com/JakobGM/patito/blob/master/src/patito/_pydantic/repr.py
MIT
def display_as_type(obj: Any) -> str: """Pretty representation of a type, should be as close as possible to the original type definition string. Takes some logic from `typing._type_repr`. """ if isinstance(obj, types.FunctionType): return obj.__name__ elif obj is ...: return "..." elif isinstance(obj, Representation): return repr(obj) if not isinstance(obj, (typing_base, WithArgsTypes, type)): obj = obj.__class__ if origin_is_union(get_origin(obj)): args = ", ".join(map(display_as_type, get_args(obj))) return f"Union[{args}]" elif isinstance(obj, WithArgsTypes): if get_origin(obj) == Literal: args = ", ".join(map(repr, get_args(obj))) else: args = ", ".join(map(display_as_type, get_args(obj))) return f"{obj.__qualname__}[{args}]" elif isinstance(obj, type): return obj.__qualname__ else: return repr(obj).replace("typing.", "").replace("typing_extensions.", "")
Pretty representation of a type, should be as close as possible to the original type definition string. Takes some logic from `typing._type_repr`.
display_as_type
python
JakobGM/patito
src/patito/_pydantic/repr.py
https://github.com/JakobGM/patito/blob/master/src/patito/_pydantic/repr.py
MIT
def schema_for_model(cls: type[ModelType]) -> dict[str, dict[str, Any]]: """Return schema properties where definition references have been resolved. Returns: Field information as a dictionary where the keys are field names and the values are dictionaries containing metadata information about the field itself. Raises: TypeError: if a field is annotated with an enum where the values are of different types. """ schema = cls.model_json_schema(by_alias=False, ref_template="{model}") fields = {} # first resolve definitions for nested models TODO checks for one-way references, if models are self-referencing this falls apart with recursion depth error for f in cls.model_fields.values(): annotation = f.annotation cls._update_dfn(annotation, schema) for a in get_args(annotation): cls._update_dfn(a, schema) for field_name, field_info in schema["properties"].items(): fields[field_name] = _append_field_info_to_props( field_info=field_info, field_name=field_name, required=field_name in schema.get("required", set()), model_schema=schema, ) schema["properties"] = fields return schema
Return schema properties where definition references have been resolved. Returns: Field information as a dictionary where the keys are field names and the values are dictionaries containing metadata information about the field itself. Raises: TypeError: if a field is annotated with an enum where the values are of different types.
schema_for_model
python
JakobGM/patito
src/patito/_pydantic/schema.py
https://github.com/JakobGM/patito/blob/master/src/patito/_pydantic/schema.py
MIT
def validate_polars_dtype( annotation: type[Any] | None, dtype: DataType | DataTypeClass | None, column: str | None = None, ) -> None: """Check that the polars dtype is valid for the given annotation. Raises ValueError if not. Args: annotation (type[Any] | None): python type annotation dtype (DataType | DataTypeClass | None): polars dtype column (Optional[str], optional): column name. Defaults to None. """ if ( dtype is None or annotation is None ): # no potential conflict between type annotation and chosen polars type return valid_dtypes = DtypeResolver(annotation).valid_polars_dtypes() if dtype not in valid_dtypes: if column: column_msg = f" for column `{column}`" else: column_msg = "" raise ValueError( f"Invalid dtype {dtype}{column_msg}. Allowable polars dtypes for {display_as_type(annotation)} are: {', '.join([str(x) for x in valid_dtypes])}." ) return
Check that the polars dtype is valid for the given annotation. Raises ValueError if not. Args: annotation (type[Any] | None): python type annotation dtype (DataType | DataTypeClass | None): polars dtype column (Optional[str], optional): column name. Defaults to None.
validate_polars_dtype
python
JakobGM/patito
src/patito/_pydantic/dtypes/dtypes.py
https://github.com/JakobGM/patito/blob/master/src/patito/_pydantic/dtypes/dtypes.py
MIT
def validate_annotation( annotation: type[Any] | Any | None, column: str | None = None ) -> None: """Check that the provided annotation has polars/patito support (we can resolve it to a default dtype). Raises ValueError if not. Args: annotation (type[Any] | None): python type annotation column (Optional[str], optional): column name. Defaults to None. """ default_dtype = DtypeResolver(annotation).default_polars_dtype() if default_dtype is None: valid_polars_dtypes = DtypeResolver(annotation).valid_polars_dtypes() if column: column_msg = f" for column `{column}`" else: column_msg = "" if len(valid_polars_dtypes) == 0: raise ValueError( f"Annotation {display_as_type(annotation)}{column_msg} is not compatible with any polars dtypes." ) else: raise ValueError( f"Unable to determine default dtype for annotation {display_as_type(annotation)}{column_msg}. Please provide a valid default polars dtype via the `dtype` argument to `Field`. Valid dtypes are: {', '.join([str(x) for x in valid_polars_dtypes])}." ) return
Check that the provided annotation has polars/patito support (we can resolve it to a default dtype). Raises ValueError if not. Args: annotation (type[Any] | None): python type annotation column (Optional[str], optional): column name. Defaults to None.
validate_annotation
python
JakobGM/patito
src/patito/_pydantic/dtypes/dtypes.py
https://github.com/JakobGM/patito/blob/master/src/patito/_pydantic/dtypes/dtypes.py
MIT
def is_optional(type_annotation: type[Any] | Any | None) -> bool: """Return True if the given type annotation is an Optional annotation. Args: type_annotation: The type annotation to be checked. Returns: True if the outermost type is Optional. """ return (get_origin(type_annotation) in UNION_TYPES) and ( type(None) in get_args(type_annotation) )
Return True if the given type annotation is an Optional annotation. Args: type_annotation: The type annotation to be checked. Returns: True if the outermost type is Optional.
is_optional
python
JakobGM/patito
src/patito/_pydantic/dtypes/utils.py
https://github.com/JakobGM/patito/blob/master/src/patito/_pydantic/dtypes/utils.py
MIT
def unwrap_optional(type_annotation: type[Any] | Any) -> type: """Return the inner, wrapped type of an Optional. Is a no-op for non-Optional types. Args: type_annotation: The type annotation to be dewrapped. Returns: The input type, but with the outermost Optional removed. """ return ( next( # pragma: no cover valid_type for valid_type in get_args(type_annotation) if valid_type is not type(None) # noqa: E721 ) if is_optional(type_annotation) else type_annotation )
Return the inner, wrapped type of an Optional. Is a no-op for non-Optional types. Args: type_annotation: The type annotation to be dewrapped. Returns: The input type, but with the outermost Optional removed.
unwrap_optional
python
JakobGM/patito
src/patito/_pydantic/dtypes/utils.py
https://github.com/JakobGM/patito/blob/master/src/patito/_pydantic/dtypes/utils.py
MIT
def test_valids_basic_annotations() -> None: """Test type annotations match polars dtypes.""" # base types assert DtypeResolver(str).valid_polars_dtypes() == STRING_DTYPES assert DtypeResolver(int).valid_polars_dtypes() == DataTypeGroup(INTEGER_DTYPES) assert DtypeResolver(float).valid_polars_dtypes() == FLOAT_DTYPES assert DtypeResolver(bool).valid_polars_dtypes() == BOOLEAN_DTYPES # temporals assert DtypeResolver(datetime).valid_polars_dtypes() == DATETIME_DTYPES assert DtypeResolver(date).valid_polars_dtypes() == DATE_DTYPES assert DtypeResolver(time).valid_polars_dtypes() == TIME_DTYPES assert DtypeResolver(timedelta).valid_polars_dtypes() == DURATION_DTYPES # other with pytest.raises(TypeError, match="must be strings"): DtypeResolver(Literal[1, 2, 3]).valid_polars_dtypes() # pyright: ignore with pytest.raises(TypeError, match="Mixed type enums not supported"): DtypeResolver(Literal[1, 2, "3"]).valid_polars_dtypes() # pyright: ignore assert DtypeResolver(Literal["a", "b", "c"]).valid_polars_dtypes() == { # pyright: ignore pl.Enum(["a", "b", "c"]), pl.String, } # combos assert DtypeResolver(Optional[str]).valid_polars_dtypes() == STRING_DTYPES if sys.version_info[1] >= 10: assert ( DtypeResolver(str | None | None).valid_polars_dtypes() == STRING_DTYPES ) # superfluous None is ok assert ( DtypeResolver(Union[int, float]).valid_polars_dtypes() == FLOAT_DTYPES | INTEGER_DTYPES ) assert ( DtypeResolver(Union[str, int]).valid_polars_dtypes() == STRING_DTYPES | INTEGER_DTYPES ) # invalids assert DtypeResolver(object).valid_polars_dtypes() == frozenset()
Test type annotations match polars dtypes.
test_valids_basic_annotations
python
JakobGM/patito
tests/test_dtypes.py
https://github.com/JakobGM/patito/blob/master/tests/test_dtypes.py
MIT
def test_valids_nested_annotations() -> None: """Test type annotations match nested polars types like List.""" assert len(DtypeResolver(list).valid_polars_dtypes()) == 0 # needs inner annotation assert ( DtypeResolver(tuple).valid_polars_dtypes() == DtypeResolver(list).valid_polars_dtypes() == DtypeResolver(Sequence).valid_polars_dtypes() ) # for now, these are the same assert DtypeResolver(list[str]).valid_polars_dtypes() == {pl.List(pl.String)} assert DtypeResolver(Optional[list[str]]).valid_polars_dtypes() == { pl.List(pl.String) } assert len(DtypeResolver(list[int]).valid_polars_dtypes()) == len( DataTypeGroup(INTEGER_DTYPES) ) assert len(DtypeResolver(list[Union[int, float]]).valid_polars_dtypes()) == len( INTEGER_DTYPES | FLOAT_DTYPES ) assert len(DtypeResolver(list[Optional[int]]).valid_polars_dtypes()) == len( DataTypeGroup(INTEGER_DTYPES) ) assert DtypeResolver(list[list[str]]).valid_polars_dtypes() == { pl.List(pl.List(pl.String)) } # recursion works as expected assert ( DtypeResolver(dict).valid_polars_dtypes() == frozenset() ) # not currently supported # support for nested models via struct assert ( len(DtypeResolver(ManyTypes).valid_polars_dtypes()) == 1 ) # only defaults are valid assert ( DtypeResolver(ManyTypes).valid_polars_dtypes() == DtypeResolver(Optional[ManyTypes]).valid_polars_dtypes() )
Test type annotations match nested polars types like List.
test_valids_nested_annotations
python
JakobGM/patito
tests/test_dtypes.py
https://github.com/JakobGM/patito/blob/master/tests/test_dtypes.py
MIT
def test_dtype_validation() -> None: """Ensure python types match polars types.""" validate_polars_dtype(int, pl.Int16) # no issue with pytest.raises(ValueError, match="Invalid dtype"): validate_polars_dtype(int, pl.Float64) with pytest.raises(ValueError, match="Invalid dtype"): validate_polars_dtype(int, pl.String) with pytest.raises(ValueError, match="Invalid dtype"): validate_polars_dtype(list[str], pl.List(pl.Float64)) # some potential corner cases validate_polars_dtype(AwareDatetime, dtype=pl.Datetime(time_zone="UTC"))
Ensure python types match polars types.
test_dtype_validation
python
JakobGM/patito
tests/test_dtypes.py
https://github.com/JakobGM/patito/blob/master/tests/test_dtypes.py
MIT
def test_defaults_basic_annotations() -> None: """Ensure python types resolve to largest polars type.""" # base types assert DtypeResolver(str).default_polars_dtype() == pl.String assert DtypeResolver(int).default_polars_dtype() == pl.Int64 assert DtypeResolver(float).default_polars_dtype() == pl.Float64 assert DtypeResolver(bool).default_polars_dtype() == pl.Boolean # temporals assert DtypeResolver(datetime).default_polars_dtype() == pl.Datetime assert DtypeResolver(date).default_polars_dtype() == pl.Date assert DtypeResolver(time).default_polars_dtype() == pl.Time assert DtypeResolver(timedelta).default_polars_dtype() == pl.Duration # combos assert DtypeResolver(Optional[str]).default_polars_dtype() == pl.String assert DtypeResolver(Union[int, float]).default_polars_dtype() is None assert DtypeResolver(Union[str, int]).default_polars_dtype() is None # other literal = DtypeResolver(Literal["a", "b", "c"]).default_polars_dtype() assert literal == pl.Enum(["a", "b", "c"]) assert set(literal.categories) == {"a", "b", "c"} # invalids assert DtypeResolver(object).default_polars_dtype() is None
Ensure python types resolve to largest polars type.
test_defaults_basic_annotations
python
JakobGM/patito
tests/test_dtypes.py
https://github.com/JakobGM/patito/blob/master/tests/test_dtypes.py
MIT
def test_defaults_nested_annotations() -> None: """Ensure python nested types fallback to largest nested polars type.""" assert DtypeResolver(list).default_polars_dtype() is None # needs inner annotation assert DtypeResolver(list[str]).default_polars_dtype() == pl.List(pl.String) assert DtypeResolver(Optional[list[str]]).default_polars_dtype() == pl.List( pl.String ) assert DtypeResolver(list[int]).default_polars_dtype() == pl.List(pl.Int64) assert DtypeResolver(list[Optional[int]]).default_polars_dtype() == pl.List( pl.Int64 ) assert DtypeResolver(list[Union[int, float]]).default_polars_dtype() is None assert DtypeResolver(list[Union[str, int]]).default_polars_dtype() is None assert DtypeResolver(list[list[str]]).default_polars_dtype() == pl.List( pl.List(pl.String) ) # recursion works as expected assert DtypeResolver(list[list[Optional[str]]]).default_polars_dtype() == pl.List( pl.List(pl.String) ) with pytest.raises( NotImplementedError, match="dictionaries not currently supported" ): DtypeResolver(dict).default_polars_dtype() # support for nested models via struct many_types = DtypeResolver(ManyTypes).default_polars_dtype() assert many_types == pl.Struct assert len(many_types.fields) == len(ManyTypes.columns) assert DtypeResolver(Optional[ManyTypes]).default_polars_dtype() == many_types
Ensure python nested types fallback to largest nested polars type.
test_defaults_nested_annotations
python
JakobGM/patito
tests/test_dtypes.py
https://github.com/JakobGM/patito/blob/master/tests/test_dtypes.py
MIT
def test_annotation_validation() -> None: """Check that python types are resolveable.""" validate_annotation(int) # no issue validate_annotation(Optional[int]) with pytest.raises(ValueError, match="Valid dtypes are:"): validate_annotation(Union[int, float]) # Unions are unsupported as actual polars dtypes but are not supported by Patito IF a default dtype is provided # TODO: Does it make sense for Patito to support union given that the underlying dataframe cannot? with pytest.raises( ValueError, match="Unable to determine default dtype", ): validate_annotation(Union[str, int]) validate_annotation(list[Optional[int]]) with pytest.raises(ValueError, match="Unable to determine default dtype"): validate_annotation(list[Union[str, int]]) with pytest.raises( ValueError, match="Unable to determine default dtype", ): validate_annotation(list[Union[int, float]])
Check that python types are resolveable.
test_annotation_validation
python
JakobGM/patito
tests/test_dtypes.py
https://github.com/JakobGM/patito/blob/master/tests/test_dtypes.py
MIT
def test_generation_of_unique_data() -> None: """Example data generators should be able to generate unique data.""" class UniqueModel(pt.Model): bool_column: bool string_column: str = pt.Field(unique=True) int_column: int = pt.Field(unique=True) float_column: int = pt.Field(unique=True) date_column: date = pt.Field(unique=True) datetime_column: datetime = pt.Field(unique=True) example_df = UniqueModel.examples({"bool_column": [True, False]}) for column in UniqueModel.columns: assert example_df[column].is_duplicated().sum() == 0
Example data generators should be able to generate unique data.
test_generation_of_unique_data
python
JakobGM/patito
tests/test_dummy_data.py
https://github.com/JakobGM/patito/blob/master/tests/test_dummy_data.py
MIT
def test_enum_field_example_values() -> None: """It should produce correct example values for enums.""" class DefaultEnumModel(pt.Model): row_number: int # Here the first value will be used as the example value enum_field: Literal["a", "b", "c"] # Here the default value will be used as the example value default_enum_field: Literal["a", "b", "c"] = "b" default_optional_enum_field: Optional[Literal["a", "b", "c"]] = "c" # Here null will be used as the example value none_default_optional_enum_field: Optional[Literal["a", "b", "c"]] = None # Workaround for pola-rs/polars#4253 example_df = DefaultEnumModel.examples({"row_number": [1]}).with_columns( pl.col("none_default_optional_enum_field").cast(pl.Enum(["a", "b", "c"])) ) correct_example_df = pl.DataFrame( [ pl.Series("row_number", [1], dtype=pl.Int64), pl.Series("enum_field", ["a"], dtype=pl.Enum(["a", "b", "c"])), pl.Series("default_enum_field", ["b"], dtype=pl.Enum(["a", "b", "c"])), pl.Series( "default_optional_enum_field", ["c"], dtype=pl.Enum(["a", "b", "c"]) ), pl.Series( "none_default_optional_enum_field", [None], dtype=pl.Enum(["a", "b", "c"]), ), ] ) # Workaround for pl.StringCache() not working here for some reason assert correct_example_df.dtypes == example_df.dtypes assert example_df.select(pl.all().cast(pl.String)).equals( correct_example_df.select(pl.all().cast(pl.String)) ) example_model = DefaultEnumModel.example() assert example_model.enum_field == "a" assert example_model.default_enum_field == "b" assert example_model.default_optional_enum_field == "c" assert example_model.none_default_optional_enum_field is None
It should produce correct example values for enums.
test_enum_field_example_values
python
JakobGM/patito
tests/test_dummy_data.py
https://github.com/JakobGM/patito/blob/master/tests/test_dummy_data.py
MIT
def test_nested_models() -> None: """It should be possible to create nested models.""" class NestedModel(pt.Model): nested_field: int class ParentModel1(pt.Model): parent_field: int nested_model: NestedModel example_model = ParentModel1.example() example_df = ParentModel1.examples() assert isinstance(example_model.nested_model, NestedModel) assert example_model.nested_model.nested_field is not None example_df = ParentModel1.examples() assert isinstance(example_df, pl.DataFrame) # inheritance also works class ParentModel2(NestedModel): parent_field: int example_model = ParentModel2.example() assert example_model.nested_field is not None assert example_model.parent_field is not None # and optional nested models are ok class ParentModel3(pt.Model): parent_field: int nested_model: Optional[NestedModel] = None example_model = ParentModel3.example() assert example_model.nested_model is None # sequences of nested models also work class ParentModel(pt.Model): parent_field: int nested_models: Sequence[NestedModel] example_model = ParentModel.example() ParentModel.examples()
It should be possible to create nested models.
test_nested_models
python
JakobGM/patito
tests/test_dummy_data.py
https://github.com/JakobGM/patito/blob/master/tests/test_dummy_data.py
MIT
def test_instantiating_model_from_row() -> None: """You should be able to instantiate models from rows.""" class Model(pt.Model): a: int polars_dataframe = pl.DataFrame({"a": [1]}) assert Model.from_row(polars_dataframe).a == 1 # Anything besides a dataframe / row should raise TypeError with pytest.raises( TypeError, match=r"Model.from_row not implemented for \<class 'NoneType'\>." ): Model.from_row(None) # pyright: ignore with pytest.raises( TypeError, match=r"Model._from_polars\(\) must be invoked with polars.DataFrame.*", ): Model._from_polars(None) # pyright: ignore
You should be able to instantiate models from rows.
test_instantiating_model_from_row
python
JakobGM/patito
tests/test_model.py
https://github.com/JakobGM/patito/blob/master/tests/test_model.py
MIT
def test_instantiation_from_pandas_row() -> None: """You should be able to instantiate models from pandas rows.""" pytest.importorskip("pandas") class Model(pt.Model): a: int polars_dataframe = pl.DataFrame({"a": [1]}) assert Model.from_row(polars_dataframe).a == 1 pandas_dataframe = polars_dataframe.to_pandas() assert Model.from_row(pandas_dataframe).a == 1 assert Model.from_row(pandas_dataframe.loc[0]).a == 1 # type: ignore
You should be able to instantiate models from pandas rows.
test_instantiation_from_pandas_row
python
JakobGM/patito
tests/test_model.py
https://github.com/JakobGM/patito/blob/master/tests/test_model.py
MIT
def test_model_dataframe_class_creation() -> None: """Each model should get a custom DataFrame class.""" class CustomModel(pt.Model): a: int # The DataFrame class is a sub-class of patito.DataFrame assert issubclass(CustomModel.DataFrame, pt.DataFrame) # The LazyFrame class is a sub-class of patito.LazyFrame assert issubclass(CustomModel.LazyFrame, pt.LazyFrame) # And the model assert CustomModel.DataFrame.model is CustomModel assert CustomModel.LazyFrame.model is CustomModel
Each model should get a custom DataFrame class.
test_model_dataframe_class_creation
python
JakobGM/patito
tests/test_model.py
https://github.com/JakobGM/patito/blob/master/tests/test_model.py
MIT
def test_mapping_to_polars_dtypes() -> None: """Model fields should be mappable to polars dtypes.""" assert CompleteModel.dtypes == { "str_column": pl.String(), "int_column": pl.Int64(), "float_column": pl.Float64(), "bool_column": pl.Boolean(), "date_column": pl.Date(), "datetime_column": pl.Datetime(), "datetime_column2": pl.Datetime(), "aware_datetime_column": pl.Datetime(time_zone="UTC"), "duration_column": pl.Duration(), "time_column": pl.Time(), "categorical_column": pl.Enum(["a", "b", "c"]), "null_column": pl.Null(), "pt_model_column": pl.Struct( [ pl.Field("a", pl.Int64), pl.Field("b", pl.String), pl.Field("c", pl.Datetime(time_zone="UTC")), pl.Field("d", pl.Datetime(time_zone="UTC")), pl.Field("e", pl.Int8), ] ), "list_int_column": pl.List(pl.Int64), "list_str_column": pl.List(pl.String), "list_opt_column": pl.List(pl.Int64), } assert CompleteModel.valid_dtypes == { "str_column": {pl.String}, "int_column": DataTypeGroup(INTEGER_DTYPES), "float_column": FLOAT_DTYPES, "bool_column": {pl.Boolean}, "date_column": DATE_DTYPES, "datetime_column": DATETIME_DTYPES, "datetime_column2": {pl.Datetime()}, "aware_datetime_column": {pl.Datetime(time_zone="UTC")}, "duration_column": DURATION_DTYPES, "time_column": TIME_DTYPES, "categorical_column": {pl.Enum(["a", "b", "c"]), pl.String}, "null_column": {pl.Null}, "pt_model_column": DataTypeGroup( [ pl.Struct( [ pl.Field("a", pl.Int64), pl.Field("b", pl.String), pl.Field("c", pl.Datetime(time_zone="UTC")), pl.Field("d", pl.Datetime(time_zone="UTC")), pl.Field("e", pl.Int8), ] ) ] ), "list_int_column": DataTypeGroup( [pl.List(x) for x in DataTypeGroup(INTEGER_DTYPES)] ), "list_str_column": DataTypeGroup([pl.List(pl.String)]), "list_opt_column": DataTypeGroup( [pl.List(x) for x in DataTypeGroup(INTEGER_DTYPES)] ), } CompleteModel.example(int_column=2) CompleteModel.validate(CompleteModel.examples({"int_column": [1, 2, 3]}))
Model fields should be mappable to polars dtypes.
test_mapping_to_polars_dtypes
python
JakobGM/patito
tests/test_model.py
https://github.com/JakobGM/patito/blob/master/tests/test_model.py
MIT
def test_model_joins() -> None: """It should produce models compatible with join statements.""" class Left(pt.Model): left: int = pt.Field(gt=20) opt_left: Optional[int] = None class Right(pt.Model): right: int = pt.Field(gt=20) opt_right: Optional[int] = None def test_model_validator(model: type[pt.Model]) -> None: """Test if all field validators have been included correctly.""" with pytest.raises(ValidationError) as e: model(left=1, opt_left=1, right=1, opt_right=1) pattern = re.compile(r"Input should be greater than 20") assert len(pattern.findall(str(e.value))) == 2 # An inner join should keep nullability information InnerJoinModel = Left.join(Right, how="inner") assert set(InnerJoinModel.columns) == {"left", "opt_left", "right", "opt_right"} assert InnerJoinModel.nullable_columns == {"opt_left", "opt_right"} assert InnerJoinModel.__name__ == "LeftInnerJoinRight" test_model_validator(InnerJoinModel) # Left joins should make all fields on left model nullable LeftJoinModel = Left.join(Right, how="left") assert set(LeftJoinModel.columns) == {"left", "opt_left", "right", "opt_right"} assert LeftJoinModel.nullable_columns == {"opt_left", "right", "opt_right"} assert LeftJoinModel.__name__ == "LeftLeftJoinRight" test_model_validator(LeftJoinModel) # Outer joins should make all columns nullable OuterJoinModel = Left.join(Right, how="outer") assert set(OuterJoinModel.columns) == {"left", "opt_left", "right", "opt_right"} assert OuterJoinModel.nullable_columns == {"left", "opt_left", "right", "opt_right"} assert OuterJoinModel.__name__ == "LeftOuterJoinRight" test_model_validator(OuterJoinModel) # Semi- and anti-joins do not change the schema at all assert Left.join(Right, how="semi") is Left assert Left.join(Right, how="anti") is Left
It should produce models compatible with join statements.
test_model_joins
python
JakobGM/patito
tests/test_model.py
https://github.com/JakobGM/patito/blob/master/tests/test_model.py
MIT
def test_model_validator(model: type[pt.Model]) -> None: """Test if all field validators have been included correctly.""" with pytest.raises(ValidationError) as e: model(left=1, opt_left=1, right=1, opt_right=1) pattern = re.compile(r"Input should be greater than 20") assert len(pattern.findall(str(e.value))) == 2
Test if all field validators have been included correctly.
test_model_validator
python
JakobGM/patito
tests/test_model.py
https://github.com/JakobGM/patito/blob/master/tests/test_model.py
MIT
def test_model_selects() -> None: """It should produce models compatible with select statements.""" class MyModel(pt.Model): a: Optional[int] b: int = pt.Field(gt=10) MySubModel = MyModel.select("b") assert MySubModel.columns == ["b"] MySubModel(b=11) with pytest.raises(ValidationError, match="Input should be greater than 10"): MySubModel(b=1) MyTotalModel = MyModel.select(["a", "b"]) assert sorted(MyTotalModel.columns) == ["a", "b"] MyTotalModel(a=1, b=11) with pytest.raises(ValidationError, match="Input should be greater than 10"): MyTotalModel(a=1, b=1) assert MyTotalModel.nullable_columns == {"a"} with pytest.raises( ValueError, match="The following selected fields do not exist: {'c'}" ): MyModel.select("c")
It should produce models compatible with select statements.
test_model_selects
python
JakobGM/patito
tests/test_model.py
https://github.com/JakobGM/patito/blob/master/tests/test_model.py
MIT
def test_model_prefix_and_suffix() -> None: """It should produce models where all fields have been prefixed/suffixed.""" class MyModel(pt.Model): a: Optional[int] b: str NewModel = MyModel.prefix("pre_").suffix("_post") assert sorted(NewModel.columns) == ["pre_a_post", "pre_b_post"] assert NewModel.nullable_columns == {"pre_a_post"}
It should produce models where all fields have been prefixed/suffixed.
test_model_prefix_and_suffix
python
JakobGM/patito
tests/test_model.py
https://github.com/JakobGM/patito/blob/master/tests/test_model.py
MIT
def test_model_field_renaming() -> None: """It should be able to change its field names.""" class MyModel(pt.Model): a: Optional[int] b: str NewModel = MyModel.rename({"b": "B"}) assert sorted(NewModel.columns) == ["B", "a"] with pytest.raises( ValueError, match="The following fields do not exist for renaming: {'c'}", ): MyModel.rename({"c": "C"})
It should be able to change its field names.
test_model_field_renaming
python
JakobGM/patito
tests/test_model.py
https://github.com/JakobGM/patito/blob/master/tests/test_model.py
MIT
def test_model_field_dropping() -> None: """Model should be able to drop a subset of its fields.""" class MyModel(pt.Model): a: int b: int c: int assert sorted(MyModel.drop("c").columns) == ["a", "b"] assert MyModel.drop(["b", "c"]).columns == ["a"]
Model should be able to drop a subset of its fields.
test_model_field_dropping
python
JakobGM/patito
tests/test_model.py
https://github.com/JakobGM/patito/blob/master/tests/test_model.py
MIT
def test_with_fields() -> None: """It should allow whe user to add additional fields.""" class MyModel(pt.Model): a: int ExpandedModel = MyModel.with_fields( b=(int, ...), c=(int, None), d=(int, pt.Field(gt=10)), e=(Optional[int], None), ) assert sorted(ExpandedModel.columns) == list("abcde") assert ExpandedModel.nullable_columns == set("ce")
It should allow whe user to add additional fields.
test_with_fields
python
JakobGM/patito
tests/test_model.py
https://github.com/JakobGM/patito/blob/master/tests/test_model.py
MIT
def test_enum_annotated_field() -> None: """It should use values of enums to infer types.""" class ABCEnum(enum.Enum): ONE = "a" TWO = "b" THREE = "c" class EnumModel(pt.Model): column: ABCEnum assert EnumModel.dtypes["column"] == pl.Enum(["a", "b", "c"]) assert EnumModel.example_value(field="column") == "a" assert EnumModel.example() == EnumModel(column="a") EnumModel.DataFrame({"column": ["a"]}).cast() class MultiTypedEnum(enum.Enum): ONE = 1 TWO = "2" with pytest.raises(TypeError, match="Mixed type enums not supported"): class InvalidEnumModel(pt.Model): column: MultiTypedEnum InvalidEnumModel.validate_schema()
It should use values of enums to infer types.
test_enum_annotated_field
python
JakobGM/patito
tests/test_model.py
https://github.com/JakobGM/patito/blob/master/tests/test_model.py
MIT
def test_model_schema() -> None: """Ensure pt.Field properties are correctly applied to model.""" class Model(pt.Model): a: int = pt.Field(ge=0, unique=True) schema = Model.model_schema def validate_model_schema(schema) -> None: assert set(schema) == {"properties", "required", "type", "title"} assert schema["title"] == "Model" assert schema["type"] == "object" assert "a" in schema["properties"] assert schema["properties"]["a"]["type"] == "integer" assert schema["properties"]["a"]["minimum"] == 0 validate_model_schema(schema) # nested models class ParentModel(pt.Model): a: int b: Model c: Optional[float] = None schema = ParentModel.model_schema validate_model_schema( schema["$defs"]["Model"] ) # ensure that nested model schema is recorded in definitions validate_model_schema( schema["properties"]["b"] ) # and all info is copied into field properties assert set(schema["properties"]) == {"a", "b", "c"} assert schema["properties"]["a"]["required"] assert schema["properties"]["b"]["required"] assert schema["properties"]["a"]["type"] == "integer" assert not schema["properties"]["c"]["required"]
Ensure pt.Field properties are correctly applied to model.
test_model_schema
python
JakobGM/patito
tests/test_model.py
https://github.com/JakobGM/patito/blob/master/tests/test_model.py
MIT
def test_conflicting_type_dtype() -> None: """Ensure model annotation is compatible with Field dtype.""" with pytest.raises(ValueError, match="Invalid dtype String"): class Test1(pt.Model): foo: int = pt.Field(dtype=pl.String) Test1.validate_schema() with pytest.raises(ValueError, match="Invalid dtype Float32"): class Test2(pt.Model): foo: str = pt.Field(dtype=pl.Float32) Test2.validate_schema() with pytest.raises(ValueError, match="Invalid dtype UInt32"): class Test3(pt.Model): foo: Optional[str] = pt.Field(dtype=pl.UInt32) Test3.validate_schema()
Ensure model annotation is compatible with Field dtype.
test_conflicting_type_dtype
python
JakobGM/patito
tests/test_model.py
https://github.com/JakobGM/patito/blob/master/tests/test_model.py
MIT
def test_polars_python_type_harmonization() -> None: """Ensure datetime types are correctly transformed to polars types.""" class Test(pt.Model): date: datetime = pt.Field(dtype=pl.Datetime(time_unit="us")) time: time assert Test.valid_dtypes["date"] == {pl.Datetime(time_unit="us")} assert Test.valid_dtypes["time"] == TIME_DTYPES
Ensure datetime types are correctly transformed to polars types.
test_polars_python_type_harmonization
python
JakobGM/patito
tests/test_model.py
https://github.com/JakobGM/patito/blob/master/tests/test_model.py
MIT
def test_column_infos() -> None: """Test that pt.Field and ColumnInfo properties match.""" class Model(pt.Model): a: int b: int = pt.Field(constraints=[(pl.col("b") < 10)]) c: int = pt.Field(derived_from=pl.col("a") + pl.col("b")) d: int = pt.Field(dtype=pl.UInt8) e: int = pt.Field(unique=True) schema = Model.model_json_schema() # no serialization issues props = schema[ "properties" ] # extra fields are stored in modified schema_properties for col in ["b", "c", "d", "e"]: assert "column_info" in props[col] assert ( ColumnInfo.model_validate_json(props["b"]["column_info"]).constraints is not None ) assert ( ColumnInfo.model_validate_json(props["c"]["column_info"]).derived_from is not None ) assert ColumnInfo.model_validate_json(props["d"]["column_info"]).dtype is not None assert ColumnInfo.model_validate_json(props["e"]["column_info"]).unique is not None infos = Model.column_infos assert infos["b"].constraints is not None assert infos["c"].derived_from is not None assert infos["d"].dtype is not None assert infos["e"].unique is not None
Test that pt.Field and ColumnInfo properties match.
test_column_infos
python
JakobGM/patito
tests/test_model.py
https://github.com/JakobGM/patito/blob/master/tests/test_model.py
MIT
def test_validation_alias(): """Test that validation alias works in pt.Field. TODO: Not sure if this actually tests anything correctly. """ class AliasModel(pt.Model): my_val_a: int = pt.Field(validation_alias="myValA") my_val_b: int = pt.Field(validation_alias=AliasChoices("my_val_b", "myValB")) # code from validators _find_errors showing that we need model_json_schema without aliases for column_name, _column_properties in AliasModel._schema_properties().items(): assert AliasModel.column_infos[column_name] is not None AliasModel.examples()
Test that validation alias works in pt.Field. TODO: Not sure if this actually tests anything correctly.
test_validation_alias
python
JakobGM/patito
tests/test_model.py
https://github.com/JakobGM/patito/blob/master/tests/test_model.py
MIT
def test_json_schema_extra_is_extended_when_it_exists() -> None: """Ensure that the json_schema_extra property is extended with column_info when it is set from the model field.""" class Model(pt.Model): a: int b: int = pt.Field( json_schema_extra={"client_column_metadata": {"group1": "x", "group2": "y"}} ) c: int = pt.Field( json_schema_extra={"client_column_metadata": {"group1": "xxx"}} ) schema = Model.model_json_schema() # no serialization issues props = schema[ "properties" ] # extra fields are stored in modified schema_properties for col in ["b", "c"]: assert "column_info" in props[col] assert "client_column_metadata" in props[col] assert "client_column_metadata" not in props["a"] assert props["b"]["client_column_metadata"]["group1"] == "x" assert props["b"]["client_column_metadata"]["group2"] == "y" assert props["c"]["client_column_metadata"]["group1"] == "xxx"
Ensure that the json_schema_extra property is extended with column_info when it is set from the model field.
test_json_schema_extra_is_extended_when_it_exists
python
JakobGM/patito
tests/test_model.py
https://github.com/JakobGM/patito/blob/master/tests/test_model.py
MIT
def test_dataframe_get_method() -> None: """You should be able to retrieve a single row and cast to model.""" class Product(pt.Model): product_id: int = pt.Field(unique=True) price: float df = pt.DataFrame({"product_id": [1, 2], "price": [9.99, 19.99]}) # Validation does nothing, as no model is specified with pytest.raises(TypeError): df.validate() # But if we specify the model, it makes sense df.set_model(Product).validate() untyped_product = df.get(pl.col("product_id") == 1) assert untyped_product.price == 9.99 typed_product = df.set_model(Product).get(pl.col("product_id") == 1) assert typed_product.price == 9.99 with pytest.raises( pt.exceptions.MultipleRowsReturned, match=re.escape(r"DataFrame.get() yielded 2 rows."), ): df.get(pl.col("product_id") < 3) with pytest.raises( pt.exceptions.RowDoesNotExist, match=re.escape(r"DataFrame.get() yielded 0 rows."), ): df.get(pl.col("product_id") < 0) df.filter(pl.col("product_id") == 1).get()
You should be able to retrieve a single row and cast to model.
test_dataframe_get_method
python
JakobGM/patito
tests/test_polars.py
https://github.com/JakobGM/patito/blob/master/tests/test_polars.py
MIT
def test_dataframe_set_model_method() -> None: """You should be able to set the associated model of a dataframe.""" class MyModel(pt.Model): pass modelled_df = pt.DataFrame().set_model(MyModel) assert modelled_df.model is MyModel assert MyModel.DataFrame.model is MyModel
You should be able to set the associated model of a dataframe.
test_dataframe_set_model_method
python
JakobGM/patito
tests/test_polars.py
https://github.com/JakobGM/patito/blob/master/tests/test_polars.py
MIT
def test_fill_nan_with_defaults() -> None: """You should be able to fill missing values with declared defaults.""" class DefaultModel(pt.Model): foo: int = 2 bar: str = "default" missing_df = pt.DataFrame({"foo": [1, None], "bar": [None, "provided"]}) filled_df = missing_df.set_model(DefaultModel).fill_null(strategy="defaults") correct_filled_df = pt.DataFrame({"foo": [1, 2], "bar": ["default", "provided"]}) assert filled_df.equals(correct_filled_df)
You should be able to fill missing values with declared defaults.
test_fill_nan_with_defaults
python
JakobGM/patito
tests/test_polars.py
https://github.com/JakobGM/patito/blob/master/tests/test_polars.py
MIT
def test_create_missing_columns_with_defaults() -> None: """Columns that have default values should be created if they are missing.""" class NestedModel(pt.Model): foo: int = 2 small_model: Optional[SmallModel] = None class DefaultModel(pt.Model): foo: int = 2 bar: Optional[str] = "default" small_model: Optional[SmallModel] = None # works ok on polars==0.20.3 nested_model: Optional[NestedModel] = None # fails to convert on polars==0.20.3 missing_df = pt.DataFrame({"foo": [1, 2]}) filled_df = missing_df.set_model(DefaultModel).fill_null(strategy="defaults") correct_filled_df = pl.DataFrame( { "foo": [1, 2], "bar": ["default", "default"], "small_model": [None, None], "nested_model": [None, None], }, schema=DefaultModel.dtypes, ) assert filled_df.equals(correct_filled_df)
Columns that have default values should be created if they are missing.
test_create_missing_columns_with_defaults
python
JakobGM/patito
tests/test_polars.py
https://github.com/JakobGM/patito/blob/master/tests/test_polars.py
MIT
def test_create_missing_columns_with_dtype() -> None: """Ensure optional columns are created by model.""" class DefaultModel(pt.Model): foo: int bar: Optional[int] = None missing_df = pt.DataFrame({"foo": [1, 2]}) filled_df = missing_df.set_model(DefaultModel).fill_null(strategy="defaults") assert "bar" in filled_df.columns assert filled_df["bar"].dtype == pl.Int64
Ensure optional columns are created by model.
test_create_missing_columns_with_dtype
python
JakobGM/patito
tests/test_polars.py
https://github.com/JakobGM/patito/blob/master/tests/test_polars.py
MIT
def test_preservation_of_model() -> None: """The model should be preserved on data frames after method invocations.""" class DummyModel(pt.Model): a: int class AnotherDummyModel(pt.Model): a: int df_with_model = pt.DataFrame().set_model(DummyModel) # First many eagerly executed method calls assert ( df_with_model.with_columns(pl.lit(1).alias("a")) .filter(pl.lit(1) == 1) .select(pl.col("a")) .model ) is DummyModel # A round-trip to lazy and back should also preserve the model assert df_with_model.lazy().collect().model is DummyModel # Since DataFrame.set_model does some trickery with self.__class__ # it is important to test that this does not leak between different # sub-types of DataFrame df_with_another_model = pt.DataFrame().set_model(AnotherDummyModel) assert df_with_model.model is DummyModel assert df_with_another_model.model is AnotherDummyModel # The same goes for lazy round-trips assert df_with_model.lazy().collect().model is DummyModel assert df_with_another_model.lazy().collect().model is AnotherDummyModel # Round-trips for DataFrames and LazyFrames should work without models as well assert type(pt.DataFrame().lazy().collect()) is pt.DataFrame
The model should be preserved on data frames after method invocations.
test_preservation_of_model
python
JakobGM/patito
tests/test_polars.py
https://github.com/JakobGM/patito/blob/master/tests/test_polars.py
MIT
def test_dataframe_model_dtype_casting() -> None: """You should be able to cast columns according to model type annotations.""" class DTypeModel(pt.Model): implicit_int: int explicit_uint: int = pt.Field(dtype=pl.UInt64) implicit_date: date implicit_datetime: datetime original_df = DTypeModel.DataFrame().with_columns( [ # UInt32 is compatible with the "int" annotation, and since no explicit # dtype is specified, it will not be casted to the default pl.Int64 pl.lit(1).cast(pl.UInt32).alias("implicit_int"), # The integer will be casted to datetime 1970-01-01 00:00:00 pl.lit(0).cast(pl.Int64).alias("implicit_date"), # The integer will be casted to date 1970-01-01 pl.lit(0).cast(pl.Int64).alias("implicit_datetime"), # Columns not specified in the model should be left as-is pl.lit(True), ] ) casted_df = original_df.cast() assert casted_df.dtypes == [ pl.UInt32, pl.Date, pl.Datetime, pl.Boolean, ] strictly_casted_df = original_df.cast(strict=True) assert strictly_casted_df.dtypes == [ pl.Int64, pl.Date, pl.Datetime, pl.Boolean, ] some_columns_df = original_df.cast( strict=True, columns=["implicit_int", "implicit_date"] ) assert some_columns_df.dtypes == [ pl.Int64, pl.Date, pl.Int64, # not casted pl.Boolean, ]
You should be able to cast columns according to model type annotations.
test_dataframe_model_dtype_casting
python
JakobGM/patito
tests/test_polars.py
https://github.com/JakobGM/patito/blob/master/tests/test_polars.py
MIT
def test_correct_columns_and_dtype_on_read_regular_inferred(tmp_path): """The `polars.read_csv` function should infer dtypes.""" csv_path = tmp_path / "foo.csv" csv_path.write_text("1,2") regular_df = pl.read_csv(csv_path, has_header=False) assert regular_df.columns == ["column_1", "column_2"] assert regular_df.dtypes == [pl.Int64, pl.Int64]
The `polars.read_csv` function should infer dtypes.
test_correct_columns_and_dtype_on_read_regular_inferred
python
JakobGM/patito
tests/test_polars.py
https://github.com/JakobGM/patito/blob/master/tests/test_polars.py
MIT
def test_correct_columns_and_dtype_on_read_model_dtypes(tmp_path): """A model DataFrame should read headerless CSVs with column names and dtypes.""" class Foo(pt.Model): a: str = pt.Field() b: int = pt.Field() csv_path = tmp_path / "foo.csv" csv_path.write_text("1,2") model_df = Foo.DataFrame.read_csv(csv_path, has_header=False) assert model_df.columns == ["a", "b"] assert model_df.dtypes == [pl.String, pl.Int64]
A model DataFrame should read headerless CSVs with column names and dtypes.
test_correct_columns_and_dtype_on_read_model_dtypes
python
JakobGM/patito
tests/test_polars.py
https://github.com/JakobGM/patito/blob/master/tests/test_polars.py
MIT
def test_correct_columns_and_dtype_on_read_ordered(tmp_path): """A model DataFrame should read headered CSVs with column names and dtypes.""" class Foo(pt.Model): a: str = pt.Field() b: int = pt.Field() csv_path = tmp_path / "foo.csv" # in model field order csv_path.write_text("a,b\n1,2") column_specified_df_ab = Foo.DataFrame.read_csv(csv_path, has_header=True) assert column_specified_df_ab.schema == {"a": pl.String, "b": pl.Int64} assert column_specified_df_ab["a"].to_list() == ["1"] assert column_specified_df_ab["b"].to_list() == [2] # and out of order csv_path.write_text("b,a\n1,2") column_specified_df_ba = Foo.DataFrame.read_csv(csv_path, has_header=True) assert column_specified_df_ba.schema == { "a": pl.String, "b": pl.Int64, } assert column_specified_df_ba["a"].to_list() == ["2"] assert column_specified_df_ba["b"].to_list() == [1]
A model DataFrame should read headered CSVs with column names and dtypes.
test_correct_columns_and_dtype_on_read_ordered
python
JakobGM/patito
tests/test_polars.py
https://github.com/JakobGM/patito/blob/master/tests/test_polars.py
MIT
def test_correct_columns_and_dtype_on_read_ba_float_dtype_override(tmp_path): """A model DataFrame should aid CSV reading with column names and dtypes.""" class Foo(pt.Model): a: str = pt.Field() b: int = pt.Field() csv_path = tmp_path / "foo.csv" # in fkield order csv_path.write_text("a,b\n1,2") dtype_specified_df = Foo.DataFrame.read_csv( csv_path, has_header=True, schema_overrides=[pl.Float64, pl.Float64] ) assert dtype_specified_df.columns == ["a", "b"] assert dtype_specified_df.dtypes == [pl.Float64, pl.Float64] assert dtype_specified_df.schema == {"a": pl.Float64, "b": pl.Float64} assert dtype_specified_df["a"].to_list() == [1.0] assert dtype_specified_df["b"].to_list() == [2.0] # and reverse order csv_path.write_text("b,a\n1,2") dtype_specified_df = Foo.DataFrame.read_csv( csv_path, has_header=True, schema_overrides=[pl.Float64, pl.Float64] ) assert dtype_specified_df.columns == ["a", "b"] assert dtype_specified_df.dtypes == [pl.Float64, pl.Float64] assert dtype_specified_df.schema == {"a": pl.Float64, "b": pl.Float64} assert dtype_specified_df["a"].to_list() == [2.0] assert dtype_specified_df["b"].to_list() == [1.0]
A model DataFrame should aid CSV reading with column names and dtypes.
test_correct_columns_and_dtype_on_read_ba_float_dtype_override
python
JakobGM/patito
tests/test_polars.py
https://github.com/JakobGM/patito/blob/master/tests/test_polars.py
MIT
def test_correct_columns_and_dtype_on_read_third_float_col(tmp_path): """A model DataFrame should aid CSV reading with column names and dtypes.""" class Foo(pt.Model): a: str = pt.Field() b: int = pt.Field() csv_path = tmp_path / "foo.csv" csv_path.write_text("1,2,3.1") unspecified_column_df = Foo.DataFrame.read_csv(csv_path, has_header=False) assert unspecified_column_df.columns == ["a", "b", "column_3"] assert unspecified_column_df.dtypes == [pl.String, pl.Int64, pl.Float64]
A model DataFrame should aid CSV reading with column names and dtypes.
test_correct_columns_and_dtype_on_read_third_float_col
python
JakobGM/patito
tests/test_polars.py
https://github.com/JakobGM/patito/blob/master/tests/test_polars.py
MIT
def test_correct_columns_and_dtype_on_read_derived(tmp_path): """A model DataFrame should aid CSV reading with column names and dtypes.""" csv_path = tmp_path / "foo.csv" csv_path.write_text("month,dollars\n1,2.99") class DerivedModel(pt.Model): month: int = pt.Field() dollars: float = pt.Field() cents: int = pt.Field(derived_from=100 * pl.col("dollars")) derived_df = DerivedModel.DataFrame.read_csv(csv_path) assert derived_df.columns == ["month", "dollars", "cents"] assert derived_df.equals( DerivedModel.DataFrame({"month": [1], "dollars": [2.99], "cents": [299]}) )
A model DataFrame should aid CSV reading with column names and dtypes.
test_correct_columns_and_dtype_on_read_derived
python
JakobGM/patito
tests/test_polars.py
https://github.com/JakobGM/patito/blob/master/tests/test_polars.py
MIT
def test_correct_columns_and_dtype_on_read_alias_gen(tmp_path): """A model DataFrame should apply aliases to CSV columns.""" csv_path = tmp_path / "foo.csv" csv_path.write_text("a,b\n1,2") class AliasedModel(pt.Model): model_config = ConfigDict( alias_generator=AliasGenerator(validation_alias=str.upper) ) A: int = pt.Field() B: int = pt.Field() aliased_df = AliasedModel.DataFrame.read_csv(csv_path) assert aliased_df.columns == ["A", "B"] assert aliased_df.equals(AliasedModel.DataFrame({"A": [1], "B": [2]}))
A model DataFrame should apply aliases to CSV columns.
test_correct_columns_and_dtype_on_read_alias_gen
python
JakobGM/patito
tests/test_polars.py
https://github.com/JakobGM/patito/blob/master/tests/test_polars.py
MIT
def test_derive_functionality() -> None: """Test of Field(derived_from=...) and DataFrame.derive().""" class DerivedModel(pt.Model): underived: int const_derived: int = pt.Field(derived_from=pl.lit(3)) column_derived: int = pt.Field(derived_from="underived") expr_derived: int = pt.Field(derived_from=2 * pl.col("underived")) second_order_derived: int = pt.Field(derived_from=2 * pl.col("expr_derived")) assert DerivedModel.derived_columns == { "const_derived", "column_derived", "expr_derived", "second_order_derived", } df = DerivedModel.DataFrame({"underived": [1, 2]}) assert df.columns == ["underived"] derived_df = df.derive() correct_derived_df = DerivedModel.DataFrame( { "underived": [1, 2], "const_derived": [3, 3], "column_derived": [1, 2], "expr_derived": [2, 4], "second_order_derived": [4, 8], } ) assert derived_df.equals(correct_derived_df) # Non-compatible derive_from arguments should raise TypeError with pytest.raises(ValidationError): class InvalidModel(pt.Model): incompatible: int = pt.Field(derived_from=object)
Test of Field(derived_from=...) and DataFrame.derive().
test_derive_functionality
python
JakobGM/patito
tests/test_polars.py
https://github.com/JakobGM/patito/blob/master/tests/test_polars.py
MIT
def test_recursive_derive() -> None: """Data.Frame.derive() infers proper derivation order and executes it, then returns columns in the order given by the model.""" class DerivedModel(pt.Model): underived: int const_derived: int = pt.Field(derived_from=pl.lit(3)) second_order_derived: int = pt.Field( derived_from=2 * pl.col("expr_derived") ) # requires expr_derived to be derived first column_derived: int = pt.Field(derived_from="underived") expr_derived: int = pt.Field(derived_from=2 * pl.col("underived")) df = DerivedModel.DataFrame({"underived": [1, 2]}) assert df.columns == ["underived"] derived_df = df.derive() correct_derived_df = DerivedModel.DataFrame( { "underived": [1, 2], "const_derived": [3, 3], "second_order_derived": [4, 8], "column_derived": [1, 2], "expr_derived": [ 2, 4, ], # derived before second_order_derived, but remains in last position in output df according to the model } ) assert derived_df.equals(correct_derived_df)
Data.Frame.derive() infers proper derivation order and executes it, then returns columns in the order given by the model.
test_recursive_derive
python
JakobGM/patito
tests/test_polars.py
https://github.com/JakobGM/patito/blob/master/tests/test_polars.py
MIT
def test_drop_method() -> None: """We should be able to drop columns not specified by the data frame model.""" class Model(pt.Model): column_1: int df = Model.DataFrame({"column_1": [1, 2], "column_2": [3, 4]}) # Originally we have all the columns assert df.columns == ["column_1", "column_2"] # If no argument is provided to drop, all columns not mentioned in the model are # dropped. assert df.drop().columns == ["column_1"] # We can still specify a different subset assert df.drop("column_1").columns == ["column_2"] # Or a list of columns assert df.drop(["column_1", "column_2"]).columns == []
We should be able to drop columns not specified by the data frame model.
test_drop_method
python
JakobGM/patito
tests/test_polars.py
https://github.com/JakobGM/patito/blob/master/tests/test_polars.py
MIT
def test_polars_conversion(): """You should be able to convert a DataFrame to a polars DataFrame.""" class Model(pt.Model): a: int b: str df = Model.DataFrame({"a": [1, 2], "b": ["foo", "bar"]}) polars_df = df.as_polars() assert isinstance(polars_df, pl.DataFrame) assert not isinstance(polars_df, pt.DataFrame) assert polars_df.shape == (2, 2) assert polars_df.columns == ["a", "b"] assert polars_df.dtypes == [pl.Int64, pl.String]
You should be able to convert a DataFrame to a polars DataFrame.
test_polars_conversion
python
JakobGM/patito
tests/test_polars.py
https://github.com/JakobGM/patito/blob/master/tests/test_polars.py
MIT
def test_validation_alias() -> None: """Ensure validation_alias allows multiple column names to be parsed for one field.""" class AliasModel(pt.Model): my_val_a: int = pt.Field(validation_alias="myValA") my_val_b: int = pt.Field( validation_alias=AliasChoices("my_val_b", "myValB", "myValB2") ) my_val_c: int first_name: str = pt.Field(validation_alias=AliasPath("names", 0)) last_name: str = pt.Field( validation_alias=AliasChoices("lastName", AliasPath("names", 1)) ) examples = [ {"myValA": 1, "myValB": 1, "my_val_c": 1, "names": ["fname1", "lname1"]}, {"myValA": 2, "myValB": 2, "my_val_c": 2, "names": ["fname2", "lname2"]}, { "my_val_a": 3, "myValB2": 3, "my_val_c": 3, "names": ["fname3"], "last_name": "lname3", }, { "my_val_a": 4, "my_val_b": 4, "my_val_c": 4, "first_name": "fname4", "last_name": "lname4", }, ] # check record with all aliases df = ( AliasModel.LazyFrame([examples[0]]) .unalias() .cast(strict=True) .collect() .validate() ) assert df.columns == AliasModel.columns # check record with no aliases df = ( AliasModel.LazyFrame([examples[3]]) .unalias() .cast(strict=True) .collect() .validate() ) assert df.columns == AliasModel.columns # check records with mixed aliases df = AliasModel.LazyFrame(examples).unalias().cast(strict=True).collect().validate() assert df.columns == AliasModel.columns
Ensure validation_alias allows multiple column names to be parsed for one field.
test_validation_alias
python
JakobGM/patito
tests/test_polars.py
https://github.com/JakobGM/patito/blob/master/tests/test_polars.py
MIT
def test_alias_generator_read_csv() -> None: """Ensure validation alias is applied to read_csv.""" class AliasGeneratorModel(pt.Model): model_config = ConfigDict( alias_generator=AliasGenerator(validation_alias=str.title), ) My_Val_A: int My_Val_B: Optional[int] = None csv_data = StringIO("my_val_a,my_val_b\n1,") df = AliasGeneratorModel.DataFrame.read_csv(csv_data) df.validate() assert df.to_dicts() == [{"My_Val_A": 1, "My_Val_B": None}]
Ensure validation alias is applied to read_csv.
test_alias_generator_read_csv
python
JakobGM/patito
tests/test_polars.py
https://github.com/JakobGM/patito/blob/master/tests/test_polars.py
MIT
def test_is_optional() -> None: """It should return True for optional types.""" assert is_optional(Optional[int]) assert is_optional(Union[int, None]) assert not is_optional(int)
It should return True for optional types.
test_is_optional
python
JakobGM/patito
tests/test_validators.py
https://github.com/JakobGM/patito/blob/master/tests/test_validators.py
MIT
def test_dewrap_optional() -> None: """It should return the inner type of Optional types.""" assert unwrap_optional(Optional[int]) is int assert unwrap_optional(Union[int, None]) is int assert unwrap_optional(int) is int
It should return the inner type of Optional types.
test_dewrap_optional
python
JakobGM/patito
tests/test_validators.py
https://github.com/JakobGM/patito/blob/master/tests/test_validators.py
MIT
def test_validation_returns_df() -> None: """It should return a DataFrame with the validation results.""" class SimpleModel(pt.Model): column_1: int column_2: str df = pl.DataFrame({"column_1": [1, 2, 3], "column_2": ["a", "b", "c"]}) result = validate(dataframe=df, schema=SimpleModel) assert isinstance(result, pl.DataFrame) assert result.shape == (3, 2)
It should return a DataFrame with the validation results.
test_validation_returns_df
python
JakobGM/patito
tests/test_validators.py
https://github.com/JakobGM/patito/blob/master/tests/test_validators.py
MIT
def test_allow_missing_nested_column_validation() -> None: """Validation should allow missing nested columns.""" class InnerModel(pt.Model): column_1: int column_2: str = pt.Field(allow_missing=True) class OuterModel(pt.Model): inner: InnerModel other: str df_missing_nested_column_2 = pl.DataFrame( {"inner": [{"column_1": 1}, {"column_1": 2}], "other": ["a", "b"]} ) validate(dataframe=df_missing_nested_column_2, schema=OuterModel) OuterModel.validate(df_missing_nested_column_2) class OuterModelWithOptionalInner(pt.Model): inner: Optional[InnerModel] other: str df_missing_nested_column_2 = pl.DataFrame( {"inner": [{"column_1": 1}, None], "other": ["a", "b"]} ) validate(dataframe=df_missing_nested_column_2, schema=OuterModelWithOptionalInner) OuterModelWithOptionalInner.validate(df_missing_nested_column_2) class OuterModelWithListInner(pt.Model): inner: list[InnerModel] other: str df_missing_nested_column_2 = pl.DataFrame( { "inner": [ [{"column_1": 1}, {"column_1": 2}], [{"column_1": 3}, {"column_1": 4}], ], "other": ["a", "b"], } ) validate(dataframe=df_missing_nested_column_2, schema=OuterModelWithListInner) OuterModelWithListInner.validate(df_missing_nested_column_2) class OuterModelWithOptionalListInner(pt.Model): inner: Optional[list[InnerModel]] other: str df_missing_nested_column_2 = pl.DataFrame( {"inner": [[{"column_1": 1}, {"column_1": 2}], None], "other": ["a", "b"]} ) validate( dataframe=df_missing_nested_column_2, schema=OuterModelWithOptionalListInner ) OuterModelWithOptionalListInner.validate(df_missing_nested_column_2) class OuterModelWithListOptionalInner(pt.Model): inner: list[Optional[InnerModel]] other: str df_missing_nested_column_2 = pl.DataFrame( { "inner": [[{"column_1": 1}, None], [None, {"column_1": 2}, None]], "other": ["a", "b"], } ) validate( dataframe=df_missing_nested_column_2, schema=OuterModelWithListOptionalInner ) OuterModelWithListOptionalInner.validate(df_missing_nested_column_2) class OuterModelWithOptionalListOptionalInner(pt.Model): inner: Optional[list[Optional[InnerModel]]] other: str df_missing_nested_column_2 = pl.DataFrame( { "inner": [[{"column_1": 1}, None], [None, {"column_1": 2}, None], None], "other": ["a", "b", "c"], } ) validate( dataframe=df_missing_nested_column_2, schema=OuterModelWithOptionalListOptionalInner, ) OuterModelWithOptionalListOptionalInner.validate(df_missing_nested_column_2)
Validation should allow missing nested columns.
test_allow_missing_nested_column_validation
python
JakobGM/patito
tests/test_validators.py
https://github.com/JakobGM/patito/blob/master/tests/test_validators.py
MIT
def test_drop_superfluous_columns() -> None: """Test whether irrelevant columns get filtered out before validation.""" class SingleColumnModel(pt.Model): column_1: int test_df = SingleColumnModel.examples().with_columns( column_that_should_be_dropped=pl.Series([1]) ) result = validate( test_df, SingleColumnModel, drop_superfluous_columns=True, ) assert result.columns == ["column_1"]
Test whether irrelevant columns get filtered out before validation.
test_drop_superfluous_columns
python
JakobGM/patito
tests/test_validators.py
https://github.com/JakobGM/patito/blob/master/tests/test_validators.py
MIT
def test_validate_non_nullable_columns() -> None: """Test for validation logic related to missing values.""" class SmallModel(pt.Model): column_1: int column_2: Optional[int] = None # We insert nulls into a non-optional column, causing an exception wrong_nulls_df = pl.DataFrame().with_columns( [ pl.lit(None).cast(pl.Int64).alias("column_1"), pl.lit(None).cast(pl.Int64).alias("column_2"), ] ) with pytest.raises(DataFrameValidationError) as e_info: validate( dataframe=wrong_nulls_df, schema=SmallModel, ) errors = e_info.value.errors() assert len(e_info.value.errors()) == 1 assert errors[0] == { "loc": ("column_1",), "msg": "1 missing value", "type": "value_error.missingvalues", }
Test for validation logic related to missing values.
test_validate_non_nullable_columns
python
JakobGM/patito
tests/test_validators.py
https://github.com/JakobGM/patito/blob/master/tests/test_validators.py
MIT
def test_uniqueness_validation() -> None: """It should be able to validate uniqueness.""" class MyModel(pt.Model): column: int = pt.Field(unique=True) non_duplicated_df = pt.DataFrame({"column": [1, 2, 3]}) MyModel.validate(non_duplicated_df) empty_df = pt.DataFrame([pl.Series("column", [], dtype=pl.Int64)]) MyModel.validate(empty_df) duplicated_df = pt.DataFrame({"column": [1, 1, 2]}) with pytest.raises(pt.exceptions.DataFrameValidationError): MyModel.validate(duplicated_df)
It should be able to validate uniqueness.
test_uniqueness_validation
python
JakobGM/patito
tests/test_validators.py
https://github.com/JakobGM/patito/blob/master/tests/test_validators.py
MIT
def test_datetime_validation() -> None: """Test for date(time) validation. Both strings, dates, and datetimes are assigned type "string" in the OpenAPI JSON schema spec, so this needs to be specifically tested for since the implementation needs to check the "format" property on the field schema. """ string_df = pl.DataFrame().with_columns( pl.lit("string", dtype=pl.String).alias("c") ) date_df = pl.DataFrame().with_columns( pl.lit(date.today(), dtype=pl.Date).alias("c") ) datetime_df = pl.DataFrame().with_columns( pl.lit(datetime.now(), dtype=pl.Datetime).alias("c") ) class StringModel(pt.Model): c: str validate(dataframe=string_df, schema=StringModel) with pytest.raises(DataFrameValidationError): validate(dataframe=date_df, schema=StringModel) with pytest.raises(DataFrameValidationError): validate(dataframe=datetime_df, schema=StringModel) class DateModel(pt.Model): c: date validate(dataframe=date_df, schema=DateModel) with pytest.raises(DataFrameValidationError): validate(dataframe=string_df, schema=DateModel) with pytest.raises(DataFrameValidationError): validate(dataframe=datetime_df, schema=DateModel) class DateTimeModel(pt.Model): c: datetime validate(dataframe=datetime_df, schema=DateTimeModel) with pytest.raises(DataFrameValidationError): validate(dataframe=string_df, schema=DateTimeModel) with pytest.raises(DataFrameValidationError): validate(dataframe=date_df, schema=DateTimeModel)
Test for date(time) validation. Both strings, dates, and datetimes are assigned type "string" in the OpenAPI JSON schema spec, so this needs to be specifically tested for since the implementation needs to check the "format" property on the field schema.
test_datetime_validation
python
JakobGM/patito
tests/test_validators.py
https://github.com/JakobGM/patito/blob/master/tests/test_validators.py
MIT
def test_enum_validation() -> None: """Test validation of enum.Enum-typed fields.""" class ABCEnum(enum.Enum): ONE = "a" TWO = "b" THREE = "c" class EnumModel(pt.Model): column: ABCEnum valid_df = pl.DataFrame({"column": ["a", "b", "b", "c"]}) validate(dataframe=valid_df, schema=EnumModel) invalid_df = pl.DataFrame({"column": ["d"]}) with pytest.raises(DataFrameValidationError) as e_info: validate(dataframe=invalid_df, schema=EnumModel) errors = e_info.value.errors() assert len(errors) == 1 assert errors[0] == { "loc": ("column",), "msg": "Rows with invalid values: {'d'}.", "type": "value_error.rowvalue", }
Test validation of enum.Enum-typed fields.
test_enum_validation
python
JakobGM/patito
tests/test_validators.py
https://github.com/JakobGM/patito/blob/master/tests/test_validators.py
MIT
def test_optional_enum_validation() -> None: """Test validation of optional enum.Enum-typed fields.""" class ABCEnum(enum.Enum): ONE = "a" TWO = "b" THREE = "c" class EnumModel(pt.Model): column: Optional[ABCEnum] valid_df = pl.DataFrame({"column": ["a", "b", "b", "c"]}) validate(dataframe=valid_df, schema=EnumModel) invalid_df = pl.DataFrame({"column": ["d"]}) with pytest.raises(DataFrameValidationError) as e_info: validate(dataframe=invalid_df, schema=EnumModel) errors = e_info.value.errors() assert len(errors) == 1 assert errors[0] == { "loc": ("column",), "msg": "Rows with invalid values: {'d'}.", "type": "value_error.rowvalue", }
Test validation of optional enum.Enum-typed fields.
test_optional_enum_validation
python
JakobGM/patito
tests/test_validators.py
https://github.com/JakobGM/patito/blob/master/tests/test_validators.py
MIT
def test_literal_enum_validation() -> None: """Test validation of typing.Literal-typed fields.""" class EnumModel(pt.Model): column: Literal["a", "b", "c"] valid_df = pl.DataFrame({"column": ["a", "b", "b", "c"]}) validate(dataframe=valid_df, schema=EnumModel) invalid_df = pl.DataFrame({"column": ["d"]}) with pytest.raises(DataFrameValidationError) as e_info: validate(dataframe=invalid_df, schema=EnumModel) error_expected = { "loc": ("column",), "msg": "Rows with invalid values: {'d'}.", "type": "value_error.rowvalue", } errors = e_info.value.errors() assert len(errors) == 1 assert errors[0] == error_expected class ListEnumModel(pt.Model): column: list[Literal["a", "b", "c"]] valid_df = pl.DataFrame({"column": [["a", "b"], ["b", "c"], ["a", "c"]]}) validate(dataframe=valid_df, schema=ListEnumModel) invalid_df = pl.DataFrame({"column": [["a", "b"], ["b", "c"], ["a", "d"]]}) with pytest.raises(DataFrameValidationError) as e_info: validate(dataframe=invalid_df, schema=ListEnumModel) errors = e_info.value.errors() assert len(errors) == 1 assert errors[0] == error_expected
Test validation of typing.Literal-typed fields.
test_literal_enum_validation
python
JakobGM/patito
tests/test_validators.py
https://github.com/JakobGM/patito/blob/master/tests/test_validators.py
MIT
def test_optional_literal_enum_validation() -> None: """Test validation of optional typing.Literal-typed fields.""" class EnumModel(pt.Model): column: Optional[Literal["a", "b", "c"]] valid_df = pl.DataFrame({"column": ["a", "b", "b", "c"]}) validate(dataframe=valid_df, schema=EnumModel) invalid_df = pl.DataFrame({"column": ["d"]}) with pytest.raises(DataFrameValidationError) as e_info: validate(dataframe=invalid_df, schema=EnumModel) error_expected = { "loc": ("column",), "msg": "Rows with invalid values: {'d'}.", "type": "value_error.rowvalue", } errors = e_info.value.errors() assert len(errors) == 1 assert errors[0] == error_expected class ListEnumModel(pt.Model): column: list[Literal["a", "b", "c"]] valid_df = pl.DataFrame({"column": [["a", "b"], ["b", "c"], ["a", "c"]]}) validate(dataframe=valid_df, schema=ListEnumModel) invalid_df = pl.DataFrame({"column": [["a", "b"], ["b", "c"], ["a", "d"]]}) with pytest.raises(DataFrameValidationError) as e_info: validate(dataframe=invalid_df, schema=ListEnumModel) errors = e_info.value.errors() assert len(errors) == 1 assert errors[0] == error_expected
Test validation of optional typing.Literal-typed fields.
test_optional_literal_enum_validation
python
JakobGM/patito
tests/test_validators.py
https://github.com/JakobGM/patito/blob/master/tests/test_validators.py
MIT
def test_simple_struct_validation() -> None: """Test validation of model with struct column.""" valid_df = pl.DataFrame({"positive_struct": [{"x": 1}, {"x": 2}, {"x": 3}]}) _PositiveStructModel.validate(valid_df) bad_df = pl.DataFrame({"positive_struct": [{"x": -1}, {"x": 2}, {"x": 3}]}) with pytest.raises(DataFrameValidationError): _PositiveStructModel.validate(bad_df)
Test validation of model with struct column.
test_simple_struct_validation
python
JakobGM/patito
tests/test_validators.py
https://github.com/JakobGM/patito/blob/master/tests/test_validators.py
MIT
def test_nested_struct_validation() -> None: """Test validation of model with nested struct column.""" class NestedPositiveStructModel(pt.Model): positive_struct_model: _PositiveStructModel valid_df = pl.DataFrame( { "positive_struct_model": [ {"positive_struct": {"x": 1}}, {"positive_struct": {"x": 2}}, {"positive_struct": {"x": 3}}, ] } ) NestedPositiveStructModel.validate(valid_df) bad_df = pl.DataFrame( { "positive_struct_model": [ {"positive_struct": {"x": -1}}, {"positive_struct": {"x": 2}}, {"positive_struct": {"x": 3}}, ] } ) with pytest.raises(DataFrameValidationError): NestedPositiveStructModel.validate(bad_df)
Test validation of model with nested struct column.
test_nested_struct_validation
python
JakobGM/patito
tests/test_validators.py
https://github.com/JakobGM/patito/blob/master/tests/test_validators.py
MIT
def test_empty_list_validation() -> None: """Test validation of model with empty lists.""" class TestModel(pt.Model): list_field: list[str] # validate presence of an empty list df = pl.DataFrame({"list_field": [["a", "b"], []]}) TestModel.validate(df) # validate when all lists are empty, so long as the schema is correct df = pl.DataFrame( {"list_field": [[], []]}, schema={"list_field": pl.List(pl.String)} ) TestModel.validate(df) class NestedTestModel(pt.Model): nested_list_field: list[list[str]] df = pl.DataFrame({"nested_list_field": [[["a", "b"], ["c"]], []]}) NestedTestModel.validate(df) df = pl.DataFrame( {"nested_list_field": [[], []]}, schema={"nested_list_field": pl.List(pl.List(pl.String))}, ) NestedTestModel.validate(df)
Test validation of model with empty lists.
test_empty_list_validation
python
JakobGM/patito
tests/test_validators.py
https://github.com/JakobGM/patito/blob/master/tests/test_validators.py
MIT
def test_list_struct_validation() -> None: """Test validation of model with list of structs column.""" class ListPositiveStructModel(pt.Model): list_positive_struct: list[_PositiveStruct] valid_df = pl.DataFrame( {"list_positive_struct": [[{"x": 1}, {"x": 2}], [{"x": 3}, {"x": 4}, {"x": 5}]]} ) ListPositiveStructModel.validate(valid_df) bad_df = pl.DataFrame( { "list_positive_struct": [ [{"x": 1}, {"x": 2}], [{"x": 3}, {"x": -4}, {"x": 5}], ] } ) with pytest.raises(DataFrameValidationError): ListPositiveStructModel.validate(bad_df)
Test validation of model with list of structs column.
test_list_struct_validation
python
JakobGM/patito
tests/test_validators.py
https://github.com/JakobGM/patito/blob/master/tests/test_validators.py
MIT
def test_struct_validation_with_polars_constraint() -> None: """Test validation of models with constrained struct column.""" class Interval(pt.Model): x_min: int x_max: int = pt.Field(constraints=pt.col("x_min") <= pt.col("x_max")) class IntervalModel(pt.Model): interval: Interval valid_df = pl.DataFrame( { "interval": [ {"x_min": 0, "x_max": 1}, {"x_min": 0, "x_max": 0}, {"x_min": -1, "x_max": 1}, ] } ) IntervalModel.validate(valid_df) bad_df = pl.DataFrame( { "interval": [ {"x_min": 0, "x_max": 1}, {"x_min": 1, "x_max": 0}, {"x_min": -1, "x_max": 1}, ] } ) with pytest.raises(DataFrameValidationError): IntervalModel.validate(bad_df)
Test validation of models with constrained struct column.
test_struct_validation_with_polars_constraint
python
JakobGM/patito
tests/test_validators.py
https://github.com/JakobGM/patito/blob/master/tests/test_validators.py
MIT
def test_validation_of_bounds_checks() -> None: """Check if value bounds are correctly validated.""" class BoundModel(pt.Model): le_column: float = pt.Field(le=42.5) lt_column: float = pt.Field(lt=42.5) ge_column: float = pt.Field(ge=42.5) gt_column: float = pt.Field(gt=42.5) combined_column: float = pt.Field(gt=42.5, le=43) multiple_column: float = pt.Field(multiple_of=0.5) # const fields should now use Literal instead, but pyright # complains about Literal of float values const_column: Literal["3.1415"] = pt.Field(default="3.1415") # type: ignore regex_column: str = pt.Field(pattern=r"value [A-Z]") min_length_column: str = pt.Field(min_length=2) max_length_column: str = pt.Field(max_length=2) # The .example() method should produce the correct dtypes, except for # the regex-validated string field which is not supported BoundModel.validate( BoundModel.examples({"regex_column": ["value A", "value B", "value C"]}) ) valid = [42.5, 42.4, 42.5, 42.6, 42.6, 19.5, "3.1415", "value X", "ab", "ab"] valid_df = pl.DataFrame(data=[valid], schema=BoundModel.columns, orient="row") BoundModel.validate(valid_df) invalid = [42.6, 42.5, 42.4, 42.5, 43.1, 19.75, "3.2", "value x", "a", "abc"] for column_index, column_name in enumerate(BoundModel.columns): data = ( valid[:column_index] + invalid[column_index : column_index + 1] + valid[column_index + 1 :] ) invalid_df = pl.DataFrame(data=[data], schema=BoundModel.columns, orient="row") with pytest.raises(DataFrameValidationError) as e_info: BoundModel.validate(invalid_df) errors = e_info.value.errors() assert len(errors) == 1 assert errors[0] == { "loc": (column_name,), "msg": "1 row with out of bound values.", "type": "value_error.rowvalue", }
Check if value bounds are correctly validated.
test_validation_of_bounds_checks
python
JakobGM/patito
tests/test_validators.py
https://github.com/JakobGM/patito/blob/master/tests/test_validators.py
MIT
def test_validation_of_dtype_specifiers() -> None: """Fields with specific dtype annotations should be validated.""" class DTypeModel(pt.Model): int_column: int int_explicit_dtype_column: int = pt.Field(dtype=pl.Int64) smallint_column: int = pt.Field(dtype=pl.Int8) unsigned_int_column: int = pt.Field(dtype=pl.UInt64) unsigned_smallint_column: int = pt.Field(dtype=pl.UInt8) assert DTypeModel.dtypes == { "int_column": pl.Int64, "int_explicit_dtype_column": pl.Int64, "smallint_column": pl.Int8, "unsigned_int_column": pl.UInt64, "unsigned_smallint_column": pl.UInt8, } # The .example() method should produce the correct dtypes DTypeModel.validate(DTypeModel.examples({"smallint_column": [1, 2, 3]})) valid = [ pl.Series([-2]).cast(pl.Int64), pl.Series([2**32]).cast(pl.Int64), pl.Series([2]).cast(pl.Int8), pl.Series([2]).cast(pl.UInt64), pl.Series([2]).cast(pl.UInt8), ] valid_df = pl.DataFrame(data=valid, schema=DTypeModel.columns) DTypeModel.validate(valid_df) invalid = [ pl.Series(["a"]).cast(pl.String), pl.Series([2.5]).cast(pl.Float64), pl.Series([2**32]).cast(pl.Int64), pl.Series([-2]).cast(pl.Int64), pl.Series([-2]).cast(pl.Int64), ] for column_index, (column_name, dtype) in enumerate( zip( DTypeModel.columns, [pl.String, pl.Float64, pl.Int64, pl.Int64, pl.Int64], ) ): data = ( valid[:column_index] + invalid[column_index : column_index + 1] + valid[column_index + 1 :] ) invalid_df = pl.DataFrame(data=data, schema=DTypeModel.columns) with pytest.raises(DataFrameValidationError) as e_info: DTypeModel.validate(invalid_df) errors = e_info.value.errors() assert len(errors) == 1 assert errors[0] == { "loc": (column_name,), "msg": f"Polars dtype {dtype} does not match model field type.", "type": "type_error.columndtype", }
Fields with specific dtype annotations should be validated.
test_validation_of_dtype_specifiers
python
JakobGM/patito
tests/test_validators.py
https://github.com/JakobGM/patito/blob/master/tests/test_validators.py
MIT
def test_custom_constraint_validation() -> None: """Users should be able to specify custom constraints.""" class CustomConstraintModel(pt.Model): even_int: int = pt.Field( constraints=[(pl.col("even_int") % 2 == 0).alias("even_constraint")] ) odd_int: int = pt.Field(constraints=pl.col("odd_int") % 2 == 1) df = CustomConstraintModel.DataFrame({"even_int": [2, 3], "odd_int": [1, 2]}) with pytest.raises(DataFrameValidationError) as e_info: df.validate() errors = e_info.value.errors() assert len(errors) == 2 assert errors[0] == { "loc": ("even_int",), "msg": "1 row does not match custom constraints.", "type": "value_error.rowvalue", } assert errors[1] == { "loc": ("odd_int",), "msg": "1 row does not match custom constraints.", "type": "value_error.rowvalue", } df.limit(1).validate() # We can also validate aggregation queries class PizzaSlice(pt.Model): fraction: float = pt.Field(constraints=pl.col("fraction").sum() == 1) whole_pizza = pt.DataFrame({"fraction": [0.25, 0.75]}) PizzaSlice.validate(whole_pizza) part_pizza = pt.DataFrame({"fraction": [0.25, 0.25]}) with pytest.raises(DataFrameValidationError): PizzaSlice.validate(part_pizza) # We can validate multiple AND constraints with a list of constraints class DivisibleByTwoAndThree(pt.Model): number: int = pt.Field(constraints=[pt.col("_") % 2 == 0, pt.col("_") % 3 == 0]) one_constraint_failing_df = pt.DataFrame({"number": [3]}) with pytest.raises(DataFrameValidationError): DivisibleByTwoAndThree.validate(one_constraint_failing_df) other_constraint_failing_df = pt.DataFrame({"number": [4]}) with pytest.raises(DataFrameValidationError): DivisibleByTwoAndThree.validate(other_constraint_failing_df) all_constraints_failing_df = pt.DataFrame({"number": [5]}) with pytest.raises(DataFrameValidationError): DivisibleByTwoAndThree.validate(all_constraints_failing_df) all_constraints_passing_df = pt.DataFrame({"number": [6]}) DivisibleByTwoAndThree.validate(all_constraints_passing_df)
Users should be able to specify custom constraints.
test_custom_constraint_validation
python
JakobGM/patito
tests/test_validators.py
https://github.com/JakobGM/patito/blob/master/tests/test_validators.py
MIT
def test_anonymous_column_constraints() -> None: """You should be able to refer to the field column with an anonymous column.""" class Pair(pt.Model): # pl.col("_") refers to the given field column odd_number: int = pt.Field(constraints=pl.col("_") % 2 == 1) # pt.field is simply an alias for pl.col("_") even_number: int = pt.Field(constraints=pt.field % 2 == 0) pairs = pt.DataFrame({"odd_number": [1, 3, 5], "even_number": [2, 4, 6]}) Pair.validate(pairs) with pytest.raises(DataFrameValidationError): Pair.validate( pairs.select( [ pl.col("odd_number").alias("even_number"), pl.col("even_number").alias("odd_number"), ] ) )
You should be able to refer to the field column with an anonymous column.
test_anonymous_column_constraints
python
JakobGM/patito
tests/test_validators.py
https://github.com/JakobGM/patito/blob/master/tests/test_validators.py
MIT
def test_optional_enum() -> None: """It should handle optional enums correctly.""" class OptionalEnumModel(pt.Model): # Old type annotation syntax optional_enum: Optional[Literal["A", "B"]] df = pl.DataFrame({"optional_enum": ["A", "B", None]}) OptionalEnumModel.validate(df)
It should handle optional enums correctly.
test_optional_enum
python
JakobGM/patito
tests/test_validators.py
https://github.com/JakobGM/patito/blob/master/tests/test_validators.py
MIT
def test_optional_pipe_operator() -> None: """Ensure that pipe operator works as expected.""" class OptionalEnumModel(pt.Model): # Old type annotation syntax optional_enum_1: Optional[Literal["A", "B"]] # New type annotation syntax optional_enum_2: Optional[Literal["A", "B"]] df = pl.DataFrame( { "optional_enum_1": ["A", "B", None], "optional_enum_2": ["A", "B", None], } ) OptionalEnumModel.validate(df)
Ensure that pipe operator works as expected.
test_optional_pipe_operator
python
JakobGM/patito
tests/test_validators.py
https://github.com/JakobGM/patito/blob/master/tests/test_validators.py
MIT
def test_validation_of_list_dtypes() -> None: """It should be able to validate dtypes organized in lists.""" class ListModel(pt.Model): int_list: list[int] int_or_null_list: list[Optional[int]] nullable_int_list: Optional[list[int]] nullable_int_or_null_list: Optional[list[Optional[int]]] valid_df = pl.DataFrame( { "int_list": [[1, 2], [3, 4]], "int_or_null_list": [[1, 2], [3, None]], "nullable_int_list": [[1, 2], None], "nullable_int_or_null_list": [[1, None], None], } ) ListModel.validate(valid_df) for old, new in [ # List items are not nullable ("int_or_null_list", "int_list"), ("int_or_null_list", "nullable_int_list"), # List is not nullable ("nullable_int_list", "int_list"), ("nullable_int_list", "int_or_null_list"), # Combination of both ("nullable_int_or_null_list", "int_list"), ("nullable_int_or_null_list", "int_or_null_list"), ("nullable_int_or_null_list", "nullable_int_list"), ]: with pytest.raises(DataFrameValidationError): ListModel.validate(valid_df.with_columns(pl.col(old).alias(new)))
It should be able to validate dtypes organized in lists.
test_validation_of_list_dtypes
python
JakobGM/patito
tests/test_validators.py
https://github.com/JakobGM/patito/blob/master/tests/test_validators.py
MIT
def test_optional_nested_list() -> None: """It should be able to validate optional structs organized in lists.""" class Inner(pt.Model): name: str reliability: bool level: int class Outer(pt.Model): id: str code: str label: str inner_types: Optional[list[Inner]] good_df = pl.DataFrame( { "id": [1, 2, 3], "code": ["A", "B", "C"], "label": ["a", "b", "c"], "inner_types": [ [{"name": "a", "reliability": True, "level": 1}], [{"name": "b", "reliability": False, "level": 2}], None, ], } ) df = Outer.DataFrame(good_df).cast().derive() df.validate() bad_df = pl.DataFrame( { "id": [1, 2, 3], "code": ["A", "B", "C"], "label": ["a", "b", "c"], "inner_types": [ [{"name": "a", "level": 1}], # missing reliability [{"name": "b", "reliability": False, "level": 2}], None, ], } ) df = Outer.DataFrame(bad_df).cast().derive() with pytest.raises(DataFrameValidationError): df.validate()
It should be able to validate optional structs organized in lists.
test_optional_nested_list
python
JakobGM/patito
tests/test_validators.py
https://github.com/JakobGM/patito/blob/master/tests/test_validators.py
MIT
def test_nested_field_attrs() -> None: """Ensure that constraints are respected even when embedded inside 'anyOf'.""" class Test(pt.Model): foo: Optional[int] = pt.Field( dtype=pl.Int64, ge=0, le=100, constraints=pt.field.sum() == 100 ) test_df = Test.DataFrame( {"foo": [110, -10]} ) # meets constraint, but violates bounds (embedded in 'anyOf' in properties) with pytest.raises(DataFrameValidationError) as e: Test.validate(test_df) pattern = re.compile(r"2 rows with out of bound values") assert len(pattern.findall(str(e.value))) == 1 null_test_df = Test.DataFrame({"foo": [100, None, None]}) Test.validate(null_test_df) # should not raise
Ensure that constraints are respected even when embedded inside 'anyOf'.
test_nested_field_attrs
python
JakobGM/patito
tests/test_validators.py
https://github.com/JakobGM/patito/blob/master/tests/test_validators.py
MIT
def test_validation_column_subset() -> None: """Ensure that columns are only validated if they are in the subset.""" class Test(pt.Model): a: int b: int = pt.Field(dtype=pl.Int64, ge=0, le=100) Test.validate(pl.DataFrame({"a": [1, 2, 3], "b": [1, 2, 3]})) # should pass with pytest.raises(DataFrameValidationError): Test.validate(pl.DataFrame({"a": [1, 2, 3], "b": [101, 102, 103]})) # should pass without validating b Test.validate(pl.DataFrame({"a": [1, 2, 3], "b": [101, 102, 103]}), columns=["a"]) with pytest.raises(DataFrameValidationError): Test.validate( pl.DataFrame({"a": [1, 2, 3], "b": [101, 102, 103]}), columns=["b"] ) # test asking for superfluous column with pytest.raises(DataFrameValidationError): Test.validate(pl.DataFrame({"a": [1, 2, 3], "b": [1, 2, 3]}), columns=["c"])
Ensure that columns are only validated if they are in the subset.
test_validation_column_subset
python
JakobGM/patito
tests/test_validators.py
https://github.com/JakobGM/patito/blob/master/tests/test_validators.py
MIT
def test_alias_generator() -> None: """Allow column name transformations through AliasGenerator.""" df = pl.DataFrame({"my_val_a": [0]}) class NoAliasGeneratorModel(pt.Model): My_Val_A: int with pytest.raises(DataFrameValidationError): NoAliasGeneratorModel.validate(df) class AliasGeneratorModel(pt.Model): model_config = ConfigDict( alias_generator=AliasGenerator(validation_alias=str.title), ) My_Val_A: int AliasGeneratorModel.validate(df) df = pl.DataFrame({"my_incorrect_val_a": [0]}) with pytest.raises(DataFrameValidationError): AliasGeneratorModel.validate(df)
Allow column name transformations through AliasGenerator.
test_alias_generator
python
JakobGM/patito
tests/test_validators.py
https://github.com/JakobGM/patito/blob/master/tests/test_validators.py
MIT
def test_validate_should_not_mutate_the_original_polars_df_when_aliasing( df: pd.DataFrame | pl.DataFrame, expected: pd.DataFrame | pl.DataFrame ) -> None: """Ensure that the original DataFrame is not mutated by the validation process.""" class AliasGeneratorModel(pt.Model): model_config = ConfigDict( alias_generator=AliasGenerator(validation_alias=str.title), ) My_Val_A: int AliasGeneratorModel.validate(df) if isinstance(df, pd.DataFrame): assert isinstance(df, pd.DataFrame) pd_assert_frame_equal( df, expected, check_index_type=True, check_column_type=True ) else: assert isinstance(df, pl.DataFrame) pl_assert_frame_equal( df, expected, check_row_order=True, check_column_order=True )
Ensure that the original DataFrame is not mutated by the validation process.
test_validate_should_not_mutate_the_original_polars_df_when_aliasing
python
JakobGM/patito
tests/test_validators.py
https://github.com/JakobGM/patito/blob/master/tests/test_validators.py
MIT
def test_alias_generator_func() -> None: """Allow column name transformations through a string function.""" df = pl.DataFrame({"my_val_a": [0]}) class NoAliasGeneratorModel(pt.Model): My_Val_A: int with pytest.raises(DataFrameValidationError): NoAliasGeneratorModel.validate(df) class AliasGeneratorModel(pt.Model): model_config = ConfigDict( alias_generator=str.title, ) My_Val_A: int AliasGeneratorModel.validate(df) df = pl.DataFrame({"my_incorrect_val_a": [0]}) with pytest.raises(DataFrameValidationError): AliasGeneratorModel.validate(df)
Allow column name transformations through a string function.
test_alias_generator_func
python
JakobGM/patito
tests/test_validators.py
https://github.com/JakobGM/patito/blob/master/tests/test_validators.py
MIT
def test_derived_from_ser_deser(): """Test whether derived_from can be successfully serialized and deserialized.""" for expr in [None, "foo", pl.col("foo"), pl.col("foo") * 2]: ci = ColumnInfo(derived_from=expr) # use str for equality check of expressions # since expr == expr is an expression itself assert str(ci.derived_from) == str(expr) ci_ser_deser = ColumnInfo.model_validate_json(ci.model_dump_json()) assert str(ci_ser_deser.derived_from) == str(expr)
Test whether derived_from can be successfully serialized and deserialized.
test_derived_from_ser_deser
python
JakobGM/patito
tests/test_validators.py
https://github.com/JakobGM/patito/blob/master/tests/test_validators.py
MIT
def test_constraint_ser_deser(): """Test whether constraints can be successfully serialized and deserialized.""" for expr in [pl.col("foo") == 2, pl.col("foo") * 2 == 2]: ci = ColumnInfo(constraints=expr) # use str for equality check of expressions # since expr == expr is an expression itself assert str(ci.constraints) == str(expr) ci_ser_deser = ColumnInfo.model_validate_json(ci.model_dump_json()) assert str(ci_ser_deser.constraints) == str(expr)
Test whether constraints can be successfully serialized and deserialized.
test_constraint_ser_deser
python
JakobGM/patito
tests/test_validators.py
https://github.com/JakobGM/patito/blob/master/tests/test_validators.py
MIT
def test_dtype_ser_deser(): """Test whether dtypes can be successfully serialized and deserialized.""" for expr in [ pl.Float32, pl.String, pl.String(), pl.Datetime(time_zone="Europe/Oslo"), pl.Struct({"foo": pl.Int16, "bar": pl.Datetime(time_zone="Europe/Oslo")}), pl.List( pl.Struct({"foo": pl.Int16, "bar": pl.Datetime(time_zone="Europe/Oslo")}) ), pl.Enum(["Foo", "Bar"]), ]: ci = ColumnInfo(dtype=expr) # use str for equality check of expressions # since expr == expr is an expression itself assert ci.dtype == expr ci_ser_deser = ColumnInfo.model_validate_json(ci.model_dump_json()) assert ci_ser_deser.dtype == expr
Test whether dtypes can be successfully serialized and deserialized.
test_dtype_ser_deser
python
JakobGM/patito
tests/test_validators.py
https://github.com/JakobGM/patito/blob/master/tests/test_validators.py
MIT
def query_db(query, args=(), one=False): """Queries the database and returns a list of dictionaries.""" cur = sqldb.execute(query, args) rv = cur.fetchall() return (rv[0] if rv else None) if one else rv
Queries the database and returns a list of dictionaries.
query_db
python
karpathy/arxiv-sanity-preserver
buildsvm.py
https://github.com/karpathy/arxiv-sanity-preserver/blob/master/buildsvm.py
MIT
def encode_feedparser_dict(d): """ helper function to get rid of feedparser bs with a deep copy. I hate when libs wrap simple things in their own classes. """ if isinstance(d, feedparser.FeedParserDict) or isinstance(d, dict): j = {} for k in d.keys(): j[k] = encode_feedparser_dict(d[k]) return j elif isinstance(d, list): l = [] for k in d: l.append(encode_feedparser_dict(k)) return l else: return d
helper function to get rid of feedparser bs with a deep copy. I hate when libs wrap simple things in their own classes.
encode_feedparser_dict
python
karpathy/arxiv-sanity-preserver
fetch_papers.py
https://github.com/karpathy/arxiv-sanity-preserver/blob/master/fetch_papers.py
MIT
def parse_arxiv_url(url): """ examples is http://arxiv.org/abs/1512.08756v2 we want to extract the raw id and the version """ ix = url.rfind('/') idversion = url[ix+1:] # extract just the id (and the version) parts = idversion.split('v') assert len(parts) == 2, 'error parsing url ' + url return parts[0], int(parts[1])
examples is http://arxiv.org/abs/1512.08756v2 we want to extract the raw id and the version
parse_arxiv_url
python
karpathy/arxiv-sanity-preserver
fetch_papers.py
https://github.com/karpathy/arxiv-sanity-preserver/blob/master/fetch_papers.py
MIT
def query_db(query, args=(), one=False): """Queries the database and returns a list of dictionaries.""" cur = g.db.execute(query, args) rv = cur.fetchall() return (rv[0] if rv else None) if one else rv
Queries the database and returns a list of dictionaries.
query_db
python
karpathy/arxiv-sanity-preserver
serve.py
https://github.com/karpathy/arxiv-sanity-preserver/blob/master/serve.py
MIT
def get_user_id(username): """Convenience method to look up the id for a username.""" rv = query_db('select user_id from user where username = ?', [username], one=True) return rv[0] if rv else None
Convenience method to look up the id for a username.
get_user_id
python
karpathy/arxiv-sanity-preserver
serve.py
https://github.com/karpathy/arxiv-sanity-preserver/blob/master/serve.py
MIT
def get_username(user_id): """Convenience method to look up the username for a user.""" rv = query_db('select username from user where user_id = ?', [user_id], one=True) return rv[0] if rv else None
Convenience method to look up the username for a user.
get_username
python
karpathy/arxiv-sanity-preserver
serve.py
https://github.com/karpathy/arxiv-sanity-preserver/blob/master/serve.py
MIT
def review(): """ user wants to toggle a paper in his library """ # make sure user is logged in if not g.user: return 'NO' # fail... (not logged in). JS should prevent from us getting here. idvv = request.form['pid'] # includes version if not isvalidid(idvv): return 'NO' # fail, malformed id. weird. pid = strip_version(idvv) if not pid in db: return 'NO' # we don't know this paper. wat uid = session['user_id'] # id of logged in user # check this user already has this paper in library record = query_db('''select * from library where user_id = ? and paper_id = ?''', [uid, pid], one=True) print(record) ret = 'NO' if record: # record exists, erase it. g.db.execute('''delete from library where user_id = ? and paper_id = ?''', [uid, pid]) g.db.commit() #print('removed %s for %s' % (pid, uid)) ret = 'OFF' else: # record does not exist, add it. rawpid = strip_version(pid) g.db.execute('''insert into library (paper_id, user_id, update_time) values (?, ?, ?)''', [rawpid, uid, int(time.time())]) g.db.commit() #print('added %s for %s' % (pid, uid)) ret = 'ON' return ret
user wants to toggle a paper in his library
review
python
karpathy/arxiv-sanity-preserver
serve.py
https://github.com/karpathy/arxiv-sanity-preserver/blob/master/serve.py
MIT
def login(): """ logs in the user. if the username doesn't exist creates the account """ if not request.form['username']: flash('You have to enter a username') elif not request.form['password']: flash('You have to enter a password') elif get_user_id(request.form['username']) is not None: # username already exists, fetch all of its attributes user = query_db('''select * from user where username = ?''', [request.form['username']], one=True) if check_password_hash(user['pw_hash'], request.form['password']): # password is correct, log in the user session['user_id'] = get_user_id(request.form['username']) flash('User ' + request.form['username'] + ' logged in.') else: # incorrect password flash('User ' + request.form['username'] + ' already exists, wrong password.') else: # create account and log in creation_time = int(time.time()) g.db.execute('''insert into user (username, pw_hash, creation_time) values (?, ?, ?)''', [request.form['username'], generate_password_hash(request.form['password']), creation_time]) user_id = g.db.execute('select last_insert_rowid()').fetchall()[0][0] g.db.commit() session['user_id'] = user_id flash('New account %s created' % (request.form['username'], )) return redirect(url_for('intmain'))
logs in the user. if the username doesn't exist creates the account
login
python
karpathy/arxiv-sanity-preserver
serve.py
https://github.com/karpathy/arxiv-sanity-preserver/blob/master/serve.py
MIT
def _tempfile(*args, **kws): """ Context for temporary file. Will find a free temporary filename upon entering and will try to delete the file on leaving Parameters ---------- suffix : string optional file suffix """ fd, name = tempfile.mkstemp(*args, **kws) os.close(fd) try: yield name finally: try: os.remove(name) except OSError as e: if e.errno == 2: pass else: raise e
Context for temporary file. Will find a free temporary filename upon entering and will try to delete the file on leaving Parameters ---------- suffix : string optional file suffix
_tempfile
python
karpathy/arxiv-sanity-preserver
utils.py
https://github.com/karpathy/arxiv-sanity-preserver/blob/master/utils.py
MIT