repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
lale
|
lale-master/lale/lib/rasl/map.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ast
import typing
import pandas as pd
import lale.docstrings
import lale.operators
from lale.datasets.data_schemas import add_table_name, forward_metadata, get_table_name
from lale.expressions import _it_column
from lale.helpers import (
_is_ast_call,
_is_ast_name,
_is_ast_subs_or_attr,
_is_pandas_df,
_is_spark_df,
)
from lale.lib.dataframe import get_columns
from lale.lib.rasl._eval_pandas_df import eval_expr_pandas_df
from lale.lib.rasl._eval_spark_df import eval_expr_spark_df
try:
# noqa in the imports here because those get used dynamically and flake fails.
from pyspark.sql.functions import col as spark_col # noqa
spark_installed = True
except ImportError:
spark_installed = False
def _new_column_name(name, expr):
if name is None or (isinstance(name, str) and not name.strip()):
return _infer_new_name(expr)
else:
return name
def _infer_new_name(expr):
if (
_is_ast_call(expr.expr)
and _is_ast_name(expr.expr.func)
and expr.expr.func.id
in [
"identity",
"isnan",
"isnotnan",
"isnull",
"isnotnull",
"replace",
"day_of_month",
"day_of_week",
"day_of_year",
"hour",
"minute",
"month",
]
and _is_ast_subs_or_attr(expr.expr.args[0])
):
return _it_column(expr.expr.args[0])
else:
raise ValueError(
"""New name of the column to be renamed cannot be None or empty. You may want to use a dictionary
to specify the new column name as the key, and the expression as the value."""
)
def _validate(X, expr):
visitor = _Validate(X)
visitor.visit(expr.expr)
return visitor.accessed
class _Validate(ast.NodeVisitor):
def __init__(self, X):
self.df = X
self.accessed = set()
def visit_Attribute(self, node: ast.Attribute):
column_name = _it_column(node)
if column_name not in self.df.columns:
raise ValueError(
f"The column {column_name} is not present in the dataframe"
)
self.accessed.add(_it_column(node))
def visit_Subscript(self, node: ast.Subscript):
column_name = _it_column(node)
if column_name is None or (
isinstance(column_name, str) and not column_name.strip()
):
raise ValueError("Name of the column cannot be None or empty.")
if column_name not in self.df.columns:
raise ValueError(
f"The column {column_name} is not present in the dataframe"
)
self.accessed.add(column_name)
class _MapImpl:
def __init__(self, columns, remainder="drop"):
self.columns = columns
self.remainder = remainder
def fit(self, X, y=None):
if callable(self.columns):
self.columns = self.columns(X)
return self
def __getattribute__(self, item):
# we want to remove fit if a static column is available
# since it should be considered already trained
omit_fit = False
if item == "fit":
try:
cols = super().__getattribute__("columns")
if not callable(cols):
omit_fit = True
except AttributeError:
pass
if omit_fit:
raise AttributeError(
"fit cannot be called on a Map that has a static expression or has already been fit"
)
return super().__getattribute__(item)
def transform(self, X):
if _is_pandas_df(X):
return self.transform_pandas_df(X)
elif _is_spark_df(X):
return self.transform_spark_df(X)
else:
raise ValueError(
f"Only Pandas or Spark dataframe are supported as inputs, got {type(X)}. Please check that pyspark is installed if you see this error for a Spark dataframe."
)
def transform_pandas_df(self, X):
mapped_df = {}
accessed_column_names = set()
def get_map_function_output(column, new_column_name):
accessed_columns = _validate(X, column)
new_column_name = _new_column_name(new_column_name, column)
new_column = eval_expr_pandas_df(X, column)
mapped_df[new_column_name] = new_column
accessed_column_names.add(new_column_name)
accessed_column_names.update(accessed_columns)
columns = self.columns
if callable(columns):
columns = columns(X)
if isinstance(columns, list):
for column in columns:
get_map_function_output(column, None)
elif isinstance(columns, dict):
for new_column_name, column in columns.items():
get_map_function_output(column, new_column_name)
else:
raise ValueError("columns must be either a list or a dictionary.")
mapped_df = pd.DataFrame(mapped_df)
if self.remainder == "passthrough":
remainder_columns = [x for x in X.columns if x not in accessed_column_names]
mapped_df[remainder_columns] = X[remainder_columns]
table_name = get_table_name(X)
mapped_df = add_table_name(mapped_df, table_name)
return mapped_df
def transform_spark_df(self, X):
new_columns = []
accessed_column_names = set()
def get_map_function_expr(column, new_column_name):
accessed_columns = _validate(X, column)
new_column_name = _new_column_name(new_column_name, column)
new_column = eval_expr_spark_df(column) # type: ignore
new_columns.append(new_column.alias(new_column_name)) # type: ignore
accessed_column_names.add(new_column_name)
accessed_column_names.update(accessed_columns)
columns = self.columns
if callable(columns):
columns = columns(X)
if isinstance(columns, list):
for column in columns:
get_map_function_expr(column, None)
elif isinstance(columns, dict):
for new_column_name, column in columns.items():
get_map_function_expr(column, new_column_name)
else:
raise ValueError("columns must be either a list or a dictionary.")
if self.remainder == "passthrough":
remainder_columns = [
spark_col(typing.cast(str, x))
for x in get_columns(X)
if x not in accessed_column_names
]
new_columns.extend(remainder_columns)
for index_name in X.index_names:
if index_name not in accessed_column_names:
new_columns.extend([spark_col(index_name)])
mapped_df = X.select(new_columns)
mapped_df = forward_metadata(X, mapped_df)
return mapped_df
_hyperparams_schema = {
"allOf": [
{
"description": "This first sub-object lists all constructor arguments with their types, one at a time, omitting cross-argument constraints, if any.",
"type": "object",
"additionalProperties": False,
"relevantToOptimizer": [],
"properties": {
"columns": {
"description": "Mappings for producing output columns.",
"anyOf": [
{
"description": "Dictionary of output column names and mapping expressions.",
"type": "object",
"additionalProperties": {"laleType": "expression"},
},
{
"description": "List of mapping expressions. The output column name is determined by a heuristic based on the input column name and the transformation function.",
"type": "array",
"items": {"laleType": "expression"},
},
{
"description": "A callable which, when given the input data, returns either a list or dictionary of mapping expressions, as above.",
"laleType": "callable",
"forOptimizer": False,
},
],
"default": [],
},
"remainder": {
"description": "Transformation for the remaining columns.",
"anyOf": [
{"enum": ["passthrough", "drop"]},
{"description": "Mapping expression.", "laleType": "operator"},
],
"default": "drop",
},
},
}
]
}
_input_transform_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {
"description": "The outer array is over rows.",
"anyOf": [
{"laleType": "Any"},
{
"type": "array",
"items": {
"description": "The inner array is over columns.",
"type": "array",
"items": {"laleType": "Any"},
},
},
],
}
},
}
_output_transform_schema = {
"description": "The outer array is over rows.",
"anyOf": [
{
"type": "array",
"items": {
"description": "The inner array is over columns.",
"type": "array",
"items": {"laleType": "Any"},
},
},
{"laleType": "Any"},
],
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Relational algebra map operator.",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.rasl.map.html",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_transform": _input_transform_schema,
"input_fit": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
Map = lale.operators.make_operator(_MapImpl, _combined_schemas)
lale.docstrings.set_docstrings(Map)
| 11,081 | 33.958991 | 190 |
py
|
lale
|
lale-master/lale/lib/rasl/one_hot_encoder.py
|
# Copyright 2021 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import typing
from typing import Any, Tuple
import numpy as np
import lale.docstrings
import lale.helpers
import lale.operators
from lale.expressions import collect_set, it, replace
from lale.helpers import _ensure_pandas
from lale.lib.dataframe import count, get_columns
from lale.lib.sklearn import one_hot_encoder
from .aggregate import Aggregate
from .map import Map
from .monoid import Monoid, MonoidableOperator
class _OneHotEncoderMonoid(Monoid):
def __init__(self, *, n_samples_seen_, feature_names_in_, categories_):
self.n_samples_seen_ = n_samples_seen_
self.feature_names_in_ = feature_names_in_
self.categories_ = categories_
def combine(self, other: "_OneHotEncoderMonoid"):
n_samples_seen_ = self.n_samples_seen_ + other.n_samples_seen_
assert list(self.feature_names_in_) == list(other.feature_names_in_)
assert len(self.categories_) == len(other.categories_)
combined_categories = [
np.sort(
np.unique(np.concatenate([self.categories_[i], other.categories_[i]]))
)
for i in range(len(self.categories_))
]
return _OneHotEncoderMonoid(
n_samples_seen_=n_samples_seen_,
feature_names_in_=self.feature_names_in_,
categories_=combined_categories,
)
class _OneHotEncoderImpl(MonoidableOperator[_OneHotEncoderMonoid]):
def __init__(
self,
*,
categories="auto",
drop=None,
sparse=False,
dtype="float64",
handle_unknown="ignore",
):
self._hyperparams = {
"categories": categories,
"drop": drop,
"sparse": sparse,
"dtype": dtype,
"handle_unknown": handle_unknown,
}
def transform(self, X):
if self._transformer is None:
self._transformer = self._build_transformer()
return self._transformer.transform(X)
@property
def n_samples_seen_(self):
return getattr(self._monoid, "n_samples_seen_", 0)
@property
def categories_(self):
return getattr(self._monoid, "categories_", None)
@property
def feature_names_in_(self):
return getattr(self._monoid, "feature_names_in_", None)
def from_monoid(self, monoid: _OneHotEncoderMonoid):
self._monoid = monoid
self.n_features_in_ = len(monoid.feature_names_in_)
self._transformer = None
def _build_transformer(self):
assert self._monoid is not None
result = Map(
columns={
f"{col_name}_{cat_value}": replace(
it[col_name],
{cat_value: 1},
handle_unknown="use_encoded_value",
unknown_value=0,
)
for col_idx, col_name in enumerate(self._monoid.feature_names_in_)
for cat_value in self._monoid.categories_[col_idx]
}
)
return result
def to_monoid(self, batch: Tuple[Any, Any]):
X, _ = batch
n_samples_seen_ = count(X)
feature_names_in_ = get_columns(X)
agg_op = Aggregate(columns={c: collect_set(it[c]) for c in feature_names_in_})
agg_data = agg_op.transform(X)
agg_data = _ensure_pandas(agg_data)
categories_ = [np.sort(agg_data.loc[0, c]) for c in feature_names_in_]
return _OneHotEncoderMonoid(
n_samples_seen_=n_samples_seen_,
feature_names_in_=feature_names_in_,
categories_=categories_,
)
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """Relational algebra reimplementation of scikit-learn's `OneHotEncoder`_ transformer that encodes categorical features as numbers.
Works on both pandas and Spark dataframes by using `Aggregate`_ for `fit` and `Map`_ for `transform`, which in turn use the appropriate backend.
.. _`OneHotEncoder`: https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.OneHotEncoder.html
.. _`Aggregate`: https://lale.readthedocs.io/en/latest/modules/lale.lib.rasl.aggregate.html
.. _`Map`: https://lale.readthedocs.io/en/latest/modules/lale.lib.rasl.map.html
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.rasl.one_hot_encoder.html",
"type": "object",
"tags": {
"pre": ["categoricals"],
"op": ["transformer", "interpretable"],
"post": [],
},
"properties": {
"hyperparams": one_hot_encoder._hyperparams_schema,
"input_fit": one_hot_encoder._input_fit_schema,
"input_transform": one_hot_encoder._input_transform_schema,
"output_transform": one_hot_encoder._output_transform_schema,
},
}
OneHotEncoder = lale.operators.make_operator(_OneHotEncoderImpl, _combined_schemas)
OneHotEncoder = typing.cast(
lale.operators.PlannedIndividualOp,
OneHotEncoder.customize_schema(
drop={
"enum": [None],
"description": "This implementation only supports `drop=None`.",
"default": None,
},
sparse={
"enum": [False],
"description": "This implementation only supports `sparse=False`.",
"default": False,
},
dtype={
"enum": ["float64"],
"description": "This implementation only supports `dtype='float64'`.",
"default": "float64",
},
handle_unknown={
"enum": ["ignore"],
"description": "This implementation only supports `handle_unknown='ignore'`.",
"default": "ignore",
},
),
)
lale.docstrings.set_docstrings(OneHotEncoder)
| 6,313 | 34.077778 | 150 |
py
|
lale
|
lale-master/lale/lib/rasl/concat_features.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from functools import reduce
from typing import Optional
import numpy as np
import pandas as pd
import scipy.sparse
import lale.docstrings
import lale.operators
import lale.pretty_print
from lale.datasets.data_schemas import add_table_name, get_index_names, get_table_name
from lale.expressions import it
from lale.helpers import _is_spark_df
from lale.json_operator import JSON_TYPE
from lale.lib.rasl.alias import Alias
from lale.lib.rasl.join import Join
from lale.type_checking import is_subschema, join_schemas, validate_is_schema
logger = logging.getLogger(__name__)
logger.setLevel(logging.WARNING)
try:
import torch
torch_installed = True
except ImportError:
torch_installed = False
def _is_pandas_df(d):
return isinstance(d, pd.DataFrame)
def _is_pandas_series(d):
return isinstance(d, pd.Series)
def _is_pandas(d):
return _is_pandas_df(d) or _is_pandas_series(d)
def _gen_table_name(avoid, cpt=0):
name = f"tbl{cpt}"
if name in avoid:
return _gen_table_name(avoid, cpt=cpt + 1)
else:
return name
class _ConcatFeaturesImpl:
def transform(self, X):
if all(_is_pandas(d) for d in X):
name2series = {}
for dataset in X:
if _is_pandas_df(dataset):
for name in dataset.columns:
name2series[name] = name2series.get(name, []) + [dataset[name]]
elif _is_pandas_series(dataset):
name = dataset.name
name2series[name] = name2series.get(name, []) + [dataset]
else:
assert False
duplicates = [name for name, ls in name2series.items() if len(ls) > 1]
if len(duplicates) == 0:
result = pd.concat(X, axis=1)
else:
logger.info(f"ConcatFeatures duplicate column names {duplicates}")
deduplicated = [ls[-1] for _, ls in name2series.items()]
result = pd.concat(deduplicated, axis=1)
elif all(_is_spark_df(d) for d in X):
def join(d1, d2):
n1 = get_table_name(d1)
n2 = get_table_name(d2)
if n1 is None:
n1 = _gen_table_name([n2])
d1 = Alias(name=n1).transform(d1)
if n2 is None:
n2 = _gen_table_name([n1])
d2 = Alias(name=n2).transform(d2)
indexes_col1 = get_index_names(d1)
indexes_col2 = get_index_names(d2)
if indexes_col1 is None or indexes_col2 is None:
raise ValueError(
"Index columns are required to concatenate features of Spark dataframes (see SparkDataFrameWithIndex)"
)
transformer = Join(
pred=[
it[n1][index_col1] == it[n2][index_col2]
for index_col1, index_col2 in zip(indexes_col1, indexes_col2)
]
)
return transformer.transform([d1, d2])
result = reduce(join, X)
elif all(_is_pandas(d) or _is_spark_df(d) for d in X):
X = [d.toPandas() if _is_spark_df(d) else d for d in X]
result = self.transform(X)
else:
np_datasets = []
# Preprocess the datasets to convert them to 2-d numpy arrays
for dataset in X:
if _is_pandas(dataset):
np_dataset = dataset.values
elif _is_spark_df(dataset):
np_dataset = dataset.toPandas().values
elif isinstance(dataset, scipy.sparse.csr_matrix):
np_dataset = dataset.toarray()
elif torch_installed and isinstance(dataset, torch.Tensor):
np_dataset = dataset.detach().cpu().numpy()
else:
np_dataset = dataset
if hasattr(np_dataset, "shape"):
if len(np_dataset.shape) == 1: # To handle numpy column vectors
np_dataset = np.reshape(np_dataset, (np_dataset.shape[0], 1))
np_datasets.append(np_dataset)
result = np.concatenate(np_datasets, axis=1)
name = reduce(
(
lambda x, y: get_table_name(x)
if get_table_name(x) == get_table_name(y)
else None
),
X,
)
return add_table_name(result, name)
def transform_schema(self, s_X):
"""Used internally by Lale for type-checking downstream operators."""
min_cols, max_cols, elem_schema = 0, 0, None
def add_ranges(min_a, max_a, min_b, max_b):
min_ab = min_a + min_b
if max_a == "unbounded" or max_b == "unbounded":
max_ab = "unbounded"
else:
max_ab = max_a + max_b
return min_ab, max_ab
elem_schema: Optional[JSON_TYPE] = None
for s_dataset in s_X["items"]:
if s_dataset.get("laleType", None) == "Any":
return {"laleType": "Any"}
arr_1d_num = {"type": "array", "items": {"type": "number"}}
arr_2d_num = {"type": "array", "items": arr_1d_num}
s_decision_func = {"anyOf": [arr_1d_num, arr_2d_num]}
if is_subschema(s_decision_func, s_dataset):
s_dataset = arr_2d_num
assert "items" in s_dataset, lale.pretty_print.to_string(s_dataset)
s_rows = s_dataset["items"]
if "type" in s_rows and "array" == s_rows["type"]:
s_cols = s_rows["items"]
if isinstance(s_cols, dict):
min_c = s_rows["minItems"] if "minItems" in s_rows else 1
max_c = s_rows["maxItems"] if "maxItems" in s_rows else "unbounded"
if elem_schema is None:
elem_schema = s_cols
else:
elem_schema = join_schemas(elem_schema, s_cols)
else:
min_c, max_c = len(s_cols), len(s_cols)
for s_col in s_cols:
if elem_schema is None:
elem_schema = s_col
else:
elem_schema = join_schemas(elem_schema, s_col)
min_cols, max_cols = add_ranges(min_cols, max_cols, min_c, max_c)
else:
if elem_schema is None:
elem_schema = s_rows
else:
elem_schema = join_schemas(elem_schema, s_rows)
min_cols, max_cols = add_ranges(min_cols, max_cols, 1, 1)
s_result = {
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "array",
"items": {"type": "array", "minItems": min_cols, "items": elem_schema},
}
if max_cols != "unbounded":
s_result["items"]["maxItems"] = max_cols
validate_is_schema(s_result)
return s_result
_hyperparams_schema = {
"allOf": [
{
"description": "This first sub-object lists all constructor arguments with their "
"types, one at a time, omitting cross-argument constraints, if any.",
"type": "object",
"additionalProperties": False,
"relevantToOptimizer": [],
"properties": {},
}
]
}
_input_transform_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Outermost array dimension is over datasets.",
"type": "array",
"items": {
"description": "Middle array dimension is over samples (aka rows).",
"type": "array",
"items": {
"description": "Innermost array dimension is over features (aka columns).",
"anyOf": [
{
"type": "array",
"items": {"type": "number"},
},
{"type": "number"},
],
},
},
}
},
}
_output_transform_schema = {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {
"type": "array",
"description": "Outer array dimension is over samples (aka rows).",
"items": {
"description": "Inner array dimension is over features (aka columns).",
"laleType": "Any",
},
},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """Horizontal stacking concatenates features (aka columns) of input datasets.
Examples
--------
>>> A = [ [11, 12, 13],
... [21, 22, 23],
... [31, 32, 33] ]
>>> B = [ [14, 15],
... [24, 25],
... [34, 35] ]
>>> ConcatFeatures.transform([A, B])
NDArrayWithSchema([[11, 12, 13, 14, 15],
[21, 22, 23, 24, 25],
[31, 32, 33, 34, 35]])""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.rasl.concat_features.html",
"import_from": "lale.lib.rasl",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
ConcatFeatures = lale.operators.make_pretrained_operator(
_ConcatFeaturesImpl, _combined_schemas
)
lale.docstrings.set_docstrings(ConcatFeatures)
| 10,375 | 35.407018 | 126 |
py
|
lale
|
lale-master/lale/lib/rasl/metrics.py
|
# Copyright 2022 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
from abc import abstractmethod
from typing import Dict, Iterable, Optional, Tuple, TypeVar, Union, cast
import numpy as np
import pandas as pd
from typing_extensions import Protocol, TypeAlias
from lale.expressions import astype, count, it
from lale.expressions import sum as lale_sum
from lale.helpers import spark_installed
from lale.operators import TrainedOperator
from .aggregate import Aggregate
from .group_by import GroupBy
from .map import Map
from .monoid import Monoid, MonoidFactory
MetricMonoid = Monoid
_M = TypeVar("_M", bound=MetricMonoid)
_PandasBatch: TypeAlias = Tuple[pd.DataFrame, pd.Series]
if spark_installed:
from pyspark.sql.dataframe import DataFrame as SparkDataFrame
_SparkBatch: TypeAlias = Tuple[SparkDataFrame, SparkDataFrame]
_Batch_XyAux = Union[_PandasBatch, _SparkBatch]
_Batch_yyXAux = Tuple[
Union[pd.Series, np.ndarray, SparkDataFrame],
Union[pd.Series, np.ndarray, SparkDataFrame],
Union[pd.DataFrame, SparkDataFrame],
]
else:
_Batch_XyAux = _PandasBatch # type: ignore
_Batch_yyXAux = Tuple[ # type: ignore
Union[pd.Series, np.ndarray], Union[pd.Series, np.ndarray], pd.DataFrame
]
# pyright does not currently accept a TypeAlias with conditional definitions
_Batch_Xy: TypeAlias = _Batch_XyAux # type: ignore
_Batch_yyX: TypeAlias = _Batch_yyXAux # type: ignore
class MetricMonoidFactory(MonoidFactory[_Batch_yyX, float, _M], Protocol):
"""Abstract base class for factories that create metrics with an associative monoid interface."""
@abstractmethod
def to_monoid(self, batch: _Batch_yyX) -> _M:
pass
@abstractmethod
def score_data(
self, y_true: pd.Series, y_pred: pd.Series, X: Optional[pd.DataFrame] = None
) -> float:
pass # keeping this abstract to allow inheriting non-batched version
@abstractmethod
def score_estimator(
self, estimator: TrainedOperator, X: pd.DataFrame, y: pd.Series
) -> float:
pass # keeping this abstract to allow inheriting non-batched version
def __call__(
self, estimator: TrainedOperator, X: pd.DataFrame, y: pd.Series
) -> float:
return self.score_estimator(estimator, X, y)
def score_data_batched(self, batches: Iterable[_Batch_yyX]) -> float:
lifted_batches = (self.to_monoid(b) for b in batches)
combined = functools.reduce(lambda a, b: a.combine(b), lifted_batches)
return self.from_monoid(combined)
def score_estimator_batched(
self, estimator: TrainedOperator, batches: Iterable[_Batch_Xy]
) -> float:
predicted_batches = ((y, estimator.predict(X), X) for X, y in batches)
return self.score_data_batched(predicted_batches)
class _MetricMonoidMixin(MetricMonoidFactory[_M], Protocol):
# pylint:disable=abstract-method
# This is an abstract class as well
def score_data(
self, y_true: pd.Series, y_pred: pd.Series, X: Optional[pd.DataFrame] = None
) -> float:
return self.from_monoid(self.to_monoid((y_true, y_pred, X)))
def score_estimator(
self, estimator: TrainedOperator, X: pd.DataFrame, y: pd.Series
) -> float:
return self.score_data(y_true=y, y_pred=estimator.predict(X), X=X)
def _make_dataframe_yy(batch):
def make_series_y(y):
if isinstance(y, np.ndarray):
series = pd.Series(y)
elif isinstance(y, pd.DataFrame):
series = y.squeeze()
elif spark_installed and isinstance(y, SparkDataFrame):
series = cast(pd.DataFrame, y.toPandas()).squeeze()
else:
series = y
assert isinstance(series, pd.Series), type(series)
return series.reset_index(drop=True)
y_true, y_pred, _ = batch
result = pd.DataFrame(
{"y_true": make_series_y(y_true), "y_pred": make_series_y(y_pred)},
)
return result
class _AccuracyData(MetricMonoid):
def __init__(self, match: int, total: int):
self.match = match
self.total = total
def combine(self, other: "_AccuracyData") -> "_AccuracyData":
return _AccuracyData(self.match + other.match, self.total + other.total)
class _Accuracy(_MetricMonoidMixin[_AccuracyData]):
def __init__(self):
self._pipeline = Map(
columns={"match": astype("int", it.y_true == it.y_pred)}
) >> Aggregate(columns={"match": lale_sum(it.match), "total": count(it.match)})
def to_monoid(self, batch: _Batch_yyX) -> _AccuracyData:
input_df = _make_dataframe_yy(batch)
agg_df = self._pipeline.transform(input_df)
return _AccuracyData(match=agg_df.at[0, "match"], total=agg_df.at[0, "total"])
def from_monoid(self, monoid: _AccuracyData) -> float:
return float(monoid.match / np.float64(monoid.total))
def accuracy_score(y_true: pd.Series, y_pred: pd.Series) -> float:
"""Replacement for sklearn's `accuracy_score`_ function.
.. _`accuracy_score`: https://scikit-learn.org/stable/modules/generated/sklearn.metrics.accuracy_score.html
"""
return get_scorer("accuracy").score_data(y_true, y_pred)
class _BalancedAccuracyData(MetricMonoid):
def __init__(self, true_pos: Dict[str, int], false_neg: Dict[str, int]):
self.true_pos = true_pos
self.false_neg = false_neg
def combine(self, other: "_BalancedAccuracyData") -> "_BalancedAccuracyData":
keys = set(self.true_pos.keys()) | set(other.true_pos.keys())
return _BalancedAccuracyData(
{k: self.true_pos.get(k, 0) + other.true_pos.get(k, 0) for k in keys},
{k: self.false_neg.get(k, 0) + other.false_neg.get(k, 0) for k in keys},
)
class _BalancedAccuracy(_MetricMonoidMixin[_BalancedAccuracyData]):
def __init__(self):
self._pipeline = (
Map(
columns={
"y_true": it.y_true,
"true_pos": astype("int", (it.y_pred == it.y_true)),
"false_neg": astype("int", (it.y_pred != it.y_true)),
}
)
>> GroupBy(by=[it.y_true])
>> Aggregate(
columns={
"true_pos": lale_sum(it.true_pos),
"false_neg": lale_sum(it.false_neg),
}
)
)
def to_monoid(self, batch: _Batch_yyX) -> _BalancedAccuracyData:
input_df = _make_dataframe_yy(batch)
agg_df = self._pipeline.transform(input_df)
return _BalancedAccuracyData(
true_pos={k: agg_df.at[k, "true_pos"] for k in agg_df.index},
false_neg={k: agg_df.at[k, "false_neg"] for k in agg_df.index},
)
def from_monoid(self, monoid: _BalancedAccuracyData) -> float:
recalls = {
k: monoid.true_pos[k] / (monoid.true_pos[k] + monoid.false_neg[k])
for k in monoid.true_pos
}
result = sum(recalls.values()) / len(recalls)
return float(result)
def balanced_accuracy_score(y_true: pd.Series, y_pred: pd.Series) -> float:
"""Replacement for sklearn's `balanced_accuracy_score`_ function.
.. _`balanced_accuracy_score`: https://scikit-learn.org/stable/modules/generated/sklearn.metrics.balanced_accuracy_score.html
"""
return get_scorer("balanced_accuracy").score_data(y_true, y_pred)
class _F1Data(MetricMonoid):
def __init__(self, true_pos: int, false_pos: int, false_neg: int):
self.true_pos = true_pos
self.false_pos = false_pos
self.false_neg = false_neg
def combine(self, other: "_F1Data") -> "_F1Data":
return _F1Data(
self.true_pos + other.true_pos,
self.false_pos + other.false_pos,
self.false_neg + other.false_neg,
)
class _F1(_MetricMonoidMixin[_F1Data]):
def __init__(self, pos_label: Union[int, float, str] = 1):
self._pipeline = Map(
columns={
"true_pos": astype(
"int", (it.y_pred == pos_label) & (it.y_true == pos_label)
),
"false_pos": astype(
"int", (it.y_pred == pos_label) & (it.y_true != pos_label)
),
"false_neg": astype(
"int", (it.y_pred != pos_label) & (it.y_true == pos_label)
),
}
) >> Aggregate(
columns={
"true_pos": lale_sum(it.true_pos),
"false_pos": lale_sum(it.false_pos),
"false_neg": lale_sum(it.false_neg),
}
)
def to_monoid(self, batch: _Batch_yyX) -> _F1Data:
input_df = _make_dataframe_yy(batch)
agg_df = self._pipeline.transform(input_df)
return _F1Data(
true_pos=agg_df.at[0, "true_pos"],
false_pos=agg_df.at[0, "false_pos"],
false_neg=agg_df.at[0, "false_neg"],
)
def from_monoid(self, monoid: _F1Data) -> float:
two_tp = monoid.true_pos + monoid.true_pos
result = two_tp / (two_tp + monoid.false_pos + monoid.false_neg)
return float(result)
def f1_score(
y_true: pd.Series, y_pred: pd.Series, pos_label: Union[int, float, str] = 1
) -> float:
"""Replacement for sklearn's `f1_score`_ function.
.. _`f1_score`: https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html
"""
return get_scorer("f1", pos_label=pos_label).score_data(y_true, y_pred)
class _R2Data(MetricMonoid):
def __init__(self, n: int, tot_sum: float, tot_sum_sq: float, res_sum_sq: float):
self.n = n
self.sum = tot_sum
self.sum_sq = tot_sum_sq
self.res_sum_sq = res_sum_sq
def combine(self, other: "_R2Data") -> "_R2Data":
return _R2Data(
n=self.n + other.n,
tot_sum=self.sum + other.sum,
tot_sum_sq=self.sum_sq + other.sum_sq,
res_sum_sq=self.res_sum_sq + other.res_sum_sq,
)
class _R2(_MetricMonoidMixin[_R2Data]):
# https://en.wikipedia.org/wiki/Coefficient_of_determination
def __init__(self):
self._pipeline = Map(
columns={
"y": it.y_true, # observed values
"f": it.y_pred, # predicted values
"y2": it.y_true * it.y_true, # squares
"e2": (it.y_true - it.y_pred) * (it.y_true - it.y_pred),
}
) >> Aggregate(
columns={
"n": count(it.y),
"sum": lale_sum(it.y),
"sum_sq": lale_sum(it.y2),
"res_sum_sq": lale_sum(it.e2), # residual sum of squares
}
)
def to_monoid(self, batch: _Batch_yyX) -> _R2Data:
input_df = _make_dataframe_yy(batch)
agg_df = self._pipeline.transform(input_df)
return _R2Data(
n=agg_df.at[0, "n"],
tot_sum=agg_df.at[0, "sum"],
tot_sum_sq=agg_df.at[0, "sum_sq"],
res_sum_sq=agg_df.at[0, "res_sum_sq"],
)
def from_monoid(self, monoid: _R2Data) -> float:
ss_tot = monoid.sum_sq - (monoid.sum * monoid.sum / np.float64(monoid.n))
return 1 - float(monoid.res_sum_sq / ss_tot)
def r2_score(y_true: pd.Series, y_pred: pd.Series) -> float:
"""Replacement for sklearn's `r2_score`_ function.
.. _`r2_score`: https://scikit-learn.org/stable/modules/generated/sklearn.metrics.r2_score.html
"""
return get_scorer("r2").score_data(y_true, y_pred)
_scorer_cache: Dict[str, Optional[MetricMonoidFactory]] = {
"accuracy": None,
"balanced_accuracy": None,
"r2": None,
}
def get_scorer(scoring: str, **kwargs) -> MetricMonoidFactory:
"""Replacement for sklearn's `get_scorer`_ function.
.. _`get_scorer`: https://scikit-learn.org/stable/modules/generated/sklearn.metrics.get_scorer.html
"""
if scoring == "f1":
return _F1(**kwargs)
assert scoring in _scorer_cache, scoring
if _scorer_cache[scoring] is None:
if scoring == "accuracy":
_scorer_cache[scoring] = _Accuracy()
elif scoring == "balanced_accuracy":
_scorer_cache[scoring] = _BalancedAccuracy()
elif scoring == "r2":
_scorer_cache[scoring] = _R2()
result = _scorer_cache[scoring]
assert result is not None
return result
| 12,931 | 34.141304 | 129 |
py
|
lale
|
lale-master/lale/lib/rasl/convert.py
|
# Copyright 2022 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
import lale.docstrings
import lale.operators
from lale.datasets import pandas2spark
from lale.datasets.data_schemas import (
SparkDataFrameWithIndex,
add_table_name,
get_table_name,
)
from lale.helpers import _is_spark_df, _is_spark_df_without_index, datatype_param_type
def _convert(data, astype: datatype_param_type, X_or_y):
if _is_spark_df(data):
if astype == "pandas":
if X_or_y == "X":
result = data.toPandas()
else:
result = data.toPandas().squeeze()
elif astype == "spark":
if _is_spark_df_without_index(data):
result = SparkDataFrameWithIndex(data)
else:
result = data
else:
assert False, astype
elif isinstance(data, (pd.DataFrame, pd.Series)):
if astype == "pandas":
result = data
elif astype == "spark":
result = pandas2spark(data)
else:
assert False, astype
elif isinstance(data, (list, np.ndarray)):
if astype == "pandas":
if X_or_y == "X":
result = pd.DataFrame(data)
else:
result = pd.Series(data)
elif astype == "spark":
result = pandas2spark(pd.DataFrame(data))
else:
assert False, astype
else:
raise TypeError(f"unexpected type {type(data)}")
result = add_table_name(result, get_table_name(data))
return result
class _ConvertImpl:
astype: datatype_param_type
def __init__(self, astype: datatype_param_type = "pandas"):
self.astype = astype
def transform(self, X):
return _convert(X, self.astype, "X")
def transform_X_y(self, X, y):
result_X = self.transform(X)
result_y = None if y is None else _convert(y, self.astype, "y")
return result_X, result_y
def viz_label(self) -> str:
return "Convert:\n" + self.astype
_hyperparams_schema = {
"allOf": [
{
"type": "object",
"additionalProperties": False,
"required": ["astype"],
"relevantToOptimizer": [],
"properties": {
"astype": {
"description": "Type to convert to.",
"enum": ["pandas", "spark"],
"default": "pandas",
},
},
}
]
}
_input_transform_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Input features as numpy, pandas, or PySpark.",
"type": "array",
"items": {"type": "array", "items": {"laleType": "Any"}},
}
},
}
_output_transform_schema = {
"type": "array",
"items": {"type": "array", "items": {"laleType": "Any"}},
}
_input_transform_X_y_schema = {
"type": "object",
"required": ["X", "y"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Input features as numpy, pandas, or PySpark.",
"type": "array",
"items": {"type": "array", "items": {"laleType": "Any"}},
},
"y": {
"anyOf": [
{"enum": [None]},
{
"description": "Input labels as numpy, pandas, or PySpark.",
"type": "array",
"items": {"laleType": "Any"},
},
],
},
},
}
_output_transform_X_y_schema = {
"type": "array",
"laleType": "tuple",
"items": [
{
"description": "X",
"type": "array",
"items": {"type": "array", "items": {"laleType": "Any"}},
},
{
"anyOf": [
{"enum": [None]},
{
"description": "Input labels as numpy, pandas, or PySpark.",
"type": "array",
"items": {"laleType": "Any"},
},
],
},
],
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Convert data to different representation if necessary.",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.rasl.convert.html",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
"input_transform_X_y": _input_transform_X_y_schema,
"output_transform_X_y": _output_transform_X_y_schema,
},
}
Convert = lale.operators.make_operator(_ConvertImpl, _combined_schemas)
lale.docstrings.set_docstrings(Convert)
| 5,448 | 28.61413 | 100 |
py
|
lale
|
lale-master/lale/lib/rasl/relational.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import lale.docstrings
import lale.helpers
import lale.operators
class _RelationalImpl:
def __init__(self, operator=None):
self.operator = operator
def fit(self, X, y=None):
if self.operator is None:
raise ValueError("The pipeline object can't be None at the time of fit.")
if isinstance(X, list):
raise ValueError(
"""Relational operator's fit does not accept data before join and aggregates.
Please pass a preprocessed dataset that is either a numpy array or a pandas dataframe."""
)
return self
def transform(self, X):
if isinstance(X, list):
raise ValueError(
"""Relational operator's transform does not accept data before join and aggregates.
Please pass a preprocessed dataset that is either a numpy array or a pandas dataframe."""
)
return X
_input_fit_schema = {
"description": "Input data schema for fit.",
"type": "object",
"required": ["X", "y"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Features; the outer array is over samples.",
# commenting out to bypass schema validation as of now.
# 'anyOf': [
# { 'type': 'array',
# 'items': {
# 'type': 'array',
# 'items': {'type': 'number'}}}]
"laleType": "any",
},
"y": {
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "string"}},
{"type": "array", "items": {"type": "boolean"}},
],
},
},
}
_input_transform_schema = {
"description": "Input data schema for transform.",
"type": "object",
"required": ["X"],
"properties": {
"X": {
# commenting out to bypass schema validation as of now.
# 'type': 'array',
# 'items': {
# 'type': 'array',
# 'items': {
# 'type': 'number'},
# },
"laleType": "any",
"description": "The input data for transform.",
}
},
}
_output_transform_schema = {
"description": "Output data schema for transform.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
_hyperparams_schema = {
"description": "Hyperparameter schema.",
"allOf": [
{
"description": "This first sub-object lists all constructor arguments with their "
"types, one at a time, omitting cross-argument constraints.",
"type": "object",
"additionalProperties": False,
"relevantToOptimizer": [],
"properties": {
"operator": {
"description": "A lale pipeline object to be used inside of relational that captures the data join and aggregate operations.",
"laleType": "operator",
}
},
}
],
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """Higher order operator that contains a nested data join pipeline that has
multiple table joins and aggregates on those joins.""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.rasl.relational.html",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
Relational = lale.operators.make_operator(_RelationalImpl, _combined_schemas)
lale.docstrings.set_docstrings(Relational)
| 4,450 | 32.719697 | 146 |
py
|
lale
|
lale-master/lale/lib/rasl/__init__.py
|
# Copyright 2021-2023 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
RASL operators and functions (experimental).
Relational Algebra Operators
============================
* lale.lib.rasl. `Aggregate`_
* lale.lib.rasl. `Alias`_
* lale.lib.rasl. `Filter`_
* lale.lib.rasl. `GroupBy`_
* lale.lib.rasl. `Join`_
* lale.lib.rasl. `Map`_
* lale.lib.rasl. `OrderBy`_
* lale.lib.rasl. `Project`_
* lale.lib.rasl. `Relational`_
Transformers
============
* lale.lib.rasl. `Batching`_
* lale.lib.rasl. `ConcatFeatures`_
* lale.lib.rasl. `Convert`_
* lale.lib.rasl. `Scan`_
* lale.lib.rasl. `SortIndex`_
* lale.lib.rasl. `SplitXy`_
Scikit-learn Operators
======================
* lale.lib.rasl. `MinMaxScaler`_
* lale.lib.rasl. `OneHotEncoder`_
* lale.lib.rasl. `OrdinalEncoder`_
* lale.lib.rasl. `HashingEncoder`_
* lale.lib.rasl. `SelectKBest`_
* lale.lib.rasl. `SimpleImputer`_
* lale.lib.rasl. `StandardScaler`_
* lale.lib.rasl. `TargetEncoder`_
Estimators
==========
* lale.lib.rasl. `BatchedBaggingClassifier`_
Functions
=========
* lale.lib.rasl. `categorical`_
* lale.lib.rasl. `date_time`_
* lale.lib.rasl. `SparkExplainer`_
Data Loaders
============
* lale.lib.rasl. `csv_data_loader`_
* lale.lib.rasl. `mockup_data_loader`_
* lale.lib.rasl. `openml_data_loader`_
Metrics
=======
* lale.lib.rasl. `accuracy_score`_
* lale.lib.rasl. `balanced_accuracy_score`_
* lale.lib.rasl. `f1_score`_
* lale.lib.rasl. `get_scorer`_
* lale.lib.rasl. `r2_score`_
Other Facilities
================
* lale.lib.rasl. `Prio`_
* lale.lib.rasl. `PrioBatch`_
* lale.lib.rasl. `PrioResourceAware`_
* lale.lib.rasl. `PrioStep`_
* lale.lib.rasl. `cross_val_score`_
* lale.lib.rasl. `cross_validate`_
* lale.lib.rasl. `fit_with_batches`_
* lale.lib.rasl. `is_associative`_
* lale.lib.rasl. `is_incremental`_
.. _`Aggregate`: lale.lib.rasl.aggregate.html
.. _`Alias`: lale.lib.rasl.alias.html
.. _`Filter`: lale.lib.rasl.filter.html
.. _`GroupBy`: lale.lib.rasl.group_by.html
.. _`Join`: lale.lib.rasl.join.html
.. _`Map`: lale.lib.rasl.map.html
.. _`OrderBy`: lale.lib.rasl.orderby.html
.. _`Project`: lale.lib.rasl.project.html
.. _`Relational`: lale.lib.rasl.relational.html
.. _`BatchedBaggingClassifier`: lale.lib.rasl.batched_bagging_classifier.html
.. _`Batching`: lale.lib.rasl.batching.html
.. _`ConcatFeatures`: lale.lib.rasl.concat_features.html
.. _`Convert`: lale.lib.rasl.convert.html
.. _`Scan`: lale.lib.rasl.scan.html
.. _`SplitXy`: lale.lib.rasl.split_xy.html
.. _`SortIndex`: lale.lib.rasl.sort_index.html
.. _`MinMaxScaler`: lale.lib.rasl.min_max_scaler.html
.. _`OneHotEncoder`: lale.lib.rasl.one_hot_encoder.html
.. _`OrdinalEncoder`: lale.lib.rasl.ordinal_encoder.html
.. _`HashingEncoder`: lale.lib.rasl.hashing_encoder.html
.. _`SelectKBest`: lale.lib.rasl.select_k_best.html
.. _`SimpleImputer`: lale.lib.rasl.simple_imputer.html
.. _`StandardScaler`: lale.lib.rasl.standard_scaler.html
.. _`TargetEncoder`: lale.lib.rasl.target_encoder.html
.. _`categorical`: lale.lib.rasl.functions.html#lale.lib.rasl.functions.categorical
.. _`date_time`: lale.lib.rasl.functions.html#lale.lib.rasl.functions.date_time
.. _`SparkExplainer`: lale.lib.rasl.spark_explainer.html
.. _`Prio`: lale.lib.rasl.task_graphs.html#lale.lib.rasl.task_graphs.Prio
.. _`PrioBatch`: lale.lib.rasl.task_graphs.html#lale.lib.rasl.task_graphs.PrioBatch
.. _`PrioResourceAware`: lale.lib.rasl.task_graphs.html#lale.lib.rasl.task_graphs.PrioResourceAware
.. _`PrioStep`: lale.lib.rasl.task_graphs.html#lale.lib.rasl.task_graphs.PrioStep
.. _`accuracy_score`: lale.lib.rasl.metrics.html#lale.lib.rasl.metrics.accuracy_score
.. _`balanced_accuracy_score`: lale.lib.rasl.metrics.html#lale.lib.rasl.metrics.balanced_accuracy_score
.. _`cross_val_score`: lale.lib.rasl.task_graphs.html#lale.lib.rasl.task_graphs.cross_val_score
.. _`cross_validate`: lale.lib.rasl.task_graphs.html#lale.lib.rasl.task_graphs.cross_validate
.. _`f1_score`: lale.lib.rasl.metrics.html#lale.lib.rasl.metrics.f1_score
.. _`fit_with_batches`: lale.lib.rasl.task_graphs.html#lale.lib.rasl.task_graphs.fit_with_batches
.. _`get_scorer`: lale.lib.rasl.metrics.html#lale.lib.rasl.metrics.get_scorer
.. _`is_associative`: lale.lib.rasl.task_graphs.html#lale.lib.rasl.task_graphs.is_associative
.. _`is_incremental`: lale.lib.rasl.task_graphs.html#lale.lib.rasl.task_graphs.is_incremental
.. _`csv_data_loader`: lale.lib.rasl.datasets.html#lale.lib.rasl.datasets.csv_data_loader
.. _`mockup_data_loader`: lale.lib.rasl.datasets.html#lale.lib.rasl.datasets.mockup_data_loader
.. _`openml_data_loader`: lale.lib.rasl.datasets.html#lale.lib.rasl.datasets.openml_data_loader
.. _`r2_score`: lale.lib.rasl.metrics.html#lale.lib.rasl.metrics.r2_score
"""
# Note: all imports should be done as
# from .xxx import XXX as XXX
# this ensures that pyright considers them to be publicly available
# and not private imports (this affects lale users that use pyright)
from .aggregate import Aggregate as Aggregate
from .alias import Alias as Alias
from .batched_bagging_classifier import (
BatchedBaggingClassifier as BatchedBaggingClassifier,
)
from .batching import Batching as Batching
from .concat_features import ConcatFeatures as ConcatFeatures
from .convert import Convert as Convert
from .datasets import csv_data_loader as csv_data_loader
from .datasets import mockup_data_loader as mockup_data_loader
from .datasets import openml_data_loader as openml_data_loader
from .filter import Filter as Filter
from .functions import categorical, date_time
from .group_by import GroupBy as GroupBy
from .hashing_encoder import HashingEncoder as HashingEncoder
from .join import Join as Join
from .map import Map as Map
from .metrics import accuracy_score as accuracy_score
from .metrics import balanced_accuracy_score as balanced_accuracy_score
from .metrics import f1_score as f1_score
from .metrics import get_scorer as get_scorer
from .metrics import r2_score as r2_score
from .min_max_scaler import MinMaxScaler as MinMaxScaler
from .monoid import Monoid as Monoid
from .monoid import MonoidableOperator as MonoidableOperator
from .monoid import MonoidFactory as MonoidFactory
from .one_hot_encoder import OneHotEncoder as OneHotEncoder
from .orderby import OrderBy as OrderBy
from .ordinal_encoder import OrdinalEncoder as OrdinalEncoder
from .project import Project as Project
from .relational import Relational as Relational
from .scan import Scan as Scan
from .select_k_best import SelectKBest as SelectKBest
from .simple_imputer import SimpleImputer as SimpleImputer
from .sort_index import SortIndex as SortIndex
from .spark_explainer import SparkExplainer as SparkExplainer
from .split_xy import SplitXy as SplitXy
from .standard_scaler import StandardScaler as StandardScaler
from .target_encoder import TargetEncoder as TargetEncoder
from .task_graphs import Prio as Prio
from .task_graphs import PrioBatch as PrioBatch
from .task_graphs import PrioResourceAware as PrioResourceAware
from .task_graphs import PrioStep as PrioStep
from .task_graphs import cross_val_score as cross_val_score
from .task_graphs import cross_validate as cross_validate
from .task_graphs import fit_with_batches as fit_with_batches
from .task_graphs import is_associative as is_associative
from .task_graphs import is_incremental as is_incremental
| 7,794 | 39.180412 | 103 |
py
|
lale
|
lale-master/lale/lib/rasl/group_by.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import lale.datasets.data_schemas
import lale.docstrings
import lale.operators
from lale.datasets.data_schemas import add_table_name, get_index_names, get_table_name
from lale.helpers import (
_get_subscript_value,
_is_ast_attribute,
_is_ast_subscript,
_is_pandas_df,
_is_spark_df,
)
from lale.lib.dataframe import get_columns
class _GroupByImpl:
def __init__(self, by=None):
self.by = by
# Parse the 'by' element passed as input
def _get_group_key(self, expr_to_parse):
if _is_ast_subscript(expr_to_parse):
return _get_subscript_value(expr_to_parse)
elif _is_ast_attribute(expr_to_parse):
return expr_to_parse.attr
else:
raise ValueError(
"GroupBy by parameter only supports subscript or dot notation for the key columns. For example, it.col_name or it['col_name']."
)
def transform(self, X):
name = get_table_name(X)
group_by_keys = []
for by_element in self.by if self.by is not None else []:
expr_to_parse = by_element.expr
group_by_keys.append(self._get_group_key(expr_to_parse))
col_not_in_X = np.setdiff1d(group_by_keys, get_columns(X))
if col_not_in_X.size > 0:
raise ValueError(
f"GroupBy key columns {col_not_in_X} not present in input dataframe X."
)
if _is_pandas_df(X):
grouped_df = X.groupby(group_by_keys, sort=False)
elif _is_spark_df(X):
X = X.drop(*get_index_names(X))
grouped_df = X.groupby(group_by_keys)
else:
raise ValueError(
"Only pandas and spark dataframes are supported by the GroupBy operator."
)
named_grouped_df = add_table_name(grouped_df, name)
return named_grouped_df
_hyperparams_schema = {
"allOf": [
{
"description": "This first sub-object lists all constructor arguments with their "
"types, one at a time, omitting cross-argument constraints, if any.",
"type": "object",
"additionalProperties": False,
"required": ["by"],
"relevantToOptimizer": [],
"properties": {
"by": {
"description": "GroupBy key(s).",
"type": "array",
"items": {"laleType": "expression"},
},
},
}
]
}
_input_transform_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {
"description": "List of tables.",
"type": "array",
"items": {"type": "array", "items": {"laleType": "Any"}},
"minItems": 1,
}
},
}
_output_transform_schema = {
"description": "Features; no restrictions on data type.",
"laleType": "Any",
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Relational algebra group_by operator.",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.rasl.group_by.html",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
GroupBy = lale.operators.make_operator(_GroupByImpl, _combined_schemas)
lale.docstrings.set_docstrings(GroupBy)
| 4,148 | 31.928571 | 143 |
py
|
lale
|
lale-master/lale/lib/rasl/target_encoder.py
|
# Copyright 2023 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import typing
from typing import Any, Dict, List, Tuple, Union
import pandas as pd
import scipy.special
import lale.docstrings
import lale.helpers
import lale.operators
from lale.expressions import count as agg_count
from lale.expressions import it, replace
from lale.expressions import sum as agg_sum
from lale.lib.category_encoders import target_encoder
from lale.lib.dataframe import get_columns
from ._util import get_obj_cols
from .aggregate import Aggregate
from .concat_features import ConcatFeatures
from .group_by import GroupBy
from .map import Map
from .monoid import Monoid, MonoidableOperator
class _TargetEncoderMonoid(Monoid):
def __init__(
self,
*,
feature_names_out: List[str],
y_sum: float,
y_count: int,
col2cat2sum: Dict[str, Dict[Union[float, str], float]],
col2cat2count: Dict[str, Dict[Union[float, str], int]],
):
self.feature_names_out = feature_names_out
assert set(col2cat2sum.keys()) == set(col2cat2count.keys())
self.y_sum = y_sum
self.y_count = y_count
self.col2cat2sum = col2cat2sum
self.col2cat2count = col2cat2count
def combine(self, other: "_TargetEncoderMonoid"):
assert list(self.feature_names_out) == list(other.feature_names_out)
assert set(self.col2cat2sum.keys()) == set(other.col2cat2sum.keys())
assert set(self.col2cat2count.keys()) == set(other.col2cat2count.keys())
return _TargetEncoderMonoid(
feature_names_out=self.feature_names_out,
y_sum=self.y_sum + other.y_sum,
y_count=self.y_count + other.y_count,
col2cat2sum={
col: {
cat: cat2sum.get(cat, 0) + other.col2cat2sum[col].get(cat, 0)
for cat in set(cat2sum.keys()) | set(other.col2cat2sum[col].keys())
}
for col, cat2sum in self.col2cat2sum.items()
},
col2cat2count={
col: {
cat: cat2count.get(cat, 0) + other.col2cat2count[col].get(cat, 0)
for cat in set(cat2count.keys())
| set(other.col2cat2count[col].keys())
}
for col, cat2count in self.col2cat2count.items()
},
)
class _TargetEncoderImpl(MonoidableOperator[_TargetEncoderMonoid]):
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
def transform(self, X):
if self._transformer is None:
self._transformer = self._build_transformer()
return self._transformer.transform(X)
def get_feature_names_out(self):
return self.feature_names_out_
def from_monoid(self, monoid: _TargetEncoderMonoid):
self._monoid = monoid
self._transformer = None
self.feature_names_out_ = monoid.feature_names_out
self._prior = monoid.y_sum / monoid.y_count
k = self._hyperparams["min_samples_leaf"]
f = self._hyperparams["smoothing"]
def blend(posterior, sample_size):
if sample_size <= 1:
return self._prior
weighting = scipy.special.expit((sample_size - k) / f)
return weighting * posterior + (1 - weighting) * self._prior
self._col2cat2value = {
col: {
cat: blend(
monoid.col2cat2sum[col][cat] / monoid.col2cat2count[col][cat],
sample_size=monoid.col2cat2count[col][cat],
)
for cat in monoid.col2cat2sum[col].keys()
}
for col in monoid.col2cat2sum.keys()
}
def _build_transformer(self):
categorical_features = self._hyperparams["cols"]
def build_map_expr(col):
if col not in categorical_features:
return it[col]
return replace(
it[col],
self._col2cat2value[col],
handle_unknown="use_encoded_value",
unknown_value=self._prior,
)
return Map(
columns={col: build_map_expr(col) for col in self.feature_names_out_}
)
def to_monoid(self, batch: Tuple[Any, Any]):
X, y = batch
X_columns = list(typing.cast(List[str], get_columns(X)))
y_name = lale.helpers.GenSym(set(X_columns))("target")
if isinstance(y, pd.Series):
y = pd.DataFrame({y_name: y})
else:
y = Map(columns={y_name: it[get_columns(y)[0]]}).transform(y)
assert lale.helpers._is_df(y), type(y)
classes = self._hyperparams["classes"]
if classes is not None:
ordinal_encoder = Map(
columns={
y_name: replace(it[y_name], {v: i for i, v in enumerate(classes)})
}
)
y = ordinal_encoder.transform(y)
y_aggregator = Aggregate(
columns={"sum": agg_sum(it[y_name]), "count": agg_count(it[y_name])}
)
y_aggregated = lale.helpers._ensure_pandas(y_aggregator.transform(y))
y_sum = y_aggregated["sum"].iat[0]
y_count = y_aggregated["count"].iat[0]
Xy = ConcatFeatures.transform([X, y])
if self._hyperparams["cols"] is None:
self._hyperparams["cols"] = get_obj_cols(X)
col2cat2sum: Dict[str, Dict[Union[float, str], float]] = {}
col2cat2count: Dict[str, Dict[Union[float, str], int]] = {}
for col in typing.cast(List[str], self._hyperparams["cols"]):
pipeline = (
Map(columns={col: it[col], y_name: it[y_name]})
>> GroupBy(by=[it[col]])
>> Aggregate(
columns={"sum": agg_sum(it[y_name]), "count": agg_count(it[y_name])}
)
)
aggregated = lale.helpers._ensure_pandas(pipeline.transform(Xy))
col2cat2sum[col] = aggregated["sum"].to_dict()
col2cat2count[col] = aggregated["count"].to_dict()
return _TargetEncoderMonoid(
feature_names_out=X_columns,
y_sum=y_sum,
y_count=y_count,
col2cat2sum=col2cat2sum,
col2cat2count=col2cat2count,
)
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """Relational algebra reimplementation of scikit-learn contrib's `TargetEncoder`_ transformer.
Works on both pandas and Spark dataframes by using `Aggregate`_ for `fit` and `Map`_ for `transform`, which in turn use the appropriate backend.
.. _`TargetEncoder`: https://contrib.scikit-learn.org/category_encoders/targetencoder.html
.. _`Aggregate`: https://lale.readthedocs.io/en/latest/modules/lale.lib.rasl.aggregate.html
.. _`Map`: https://lale.readthedocs.io/en/latest/modules/lale.lib.rasl.map.html
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.rasl.target_encoder.html",
"type": "object",
"tags": {
"pre": ["categoricals"],
"op": ["transformer", "interpretable"],
"post": [],
},
"properties": {
"hyperparams": target_encoder._hyperparams_schema,
"input_fit": target_encoder._input_fit_schema,
"input_transform": target_encoder._input_transform_schema,
"output_transform": target_encoder._output_transform_schema,
},
}
TargetEncoder = lale.operators.make_operator(_TargetEncoderImpl, _combined_schemas)
TargetEncoder = typing.cast(
lale.operators.PlannedIndividualOp,
TargetEncoder.customize_schema(
classes={
"anyOf": [
{"enum": [None], "description": "Regression task."},
{
"type": "array",
"items": {"type": "number"},
"description": "Classification task with numeric labels.",
"minItems": 2,
},
{
"type": "array",
"items": {"type": "string"},
"description": "Classification task with string labels.",
"minItems": 2,
},
{
"type": "array",
"items": {"type": "boolean"},
"description": "Classification task with Boolean labels.",
"minItems": 2,
},
],
"default": None,
},
drop_invariant={
"enum": [False],
"default": False,
"description": "This implementation only supports `drop_invariant=False`.",
},
return_df={
"enum": [True],
"default": True,
"description": "This implementation returns a pandas or spark dataframe if the input is a pandas or spark dataframe, respectively.",
},
handle_missing={
"enum": ["value"],
"default": "value",
"description": "This implementation only supports `handle_missing='value'`.",
},
handle_unknown={
"enum": ["value"],
"default": "value",
"description": "This implementation only supports `handle_unknown='value'`.",
},
),
)
lale.docstrings.set_docstrings(TargetEncoder)
| 9,881 | 37.007692 | 144 |
py
|
lale
|
lale-master/lale/lib/rasl/split_xy.py
|
# Copyright 2021, 2022 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
import lale.docstrings
import lale.operators
from .project import Project
class _SplitXyImpl:
def __init__(self, label_name="y"):
self.label_name = label_name
self._project_X = None
self._project_y = None
def _extract_y(self, X):
if self._project_y is None:
self._project_y = Project(columns=[self.label_name])
result = self._project_y.transform(X)
if isinstance(result, pd.DataFrame):
result = result.squeeze()
return result
def transform(self, X):
if self._project_X is None:
self._project_X = Project(drop_columns=[self.label_name])
return self._project_X.transform(X)
def transform_X_y(self, X, y):
return self.transform(X), self._extract_y(X)
def viz_label(self) -> str:
return "SplitXy:\n" + self.label_name
_hyperparams_schema = {
"description": "The SplitXy operator separates the label field/column from the input dataframe X.",
"allOf": [
{
"description": "This first object lists all constructor arguments with their types, but omits constraints for conditional hyperparameters",
"type": "object",
"additionalProperties": False,
"relevantToOptimizer": [],
"properties": {
"label_name": {
"description": "The name of the label column in the input dataframe X.",
"default": "y",
"type": "string",
},
},
}
],
}
_input_transform_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": True,
"properties": {
"X": {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {"type": "array", "items": {"laleType": "Any"}},
},
},
}
_output_transform_schema = {
"description": "Output data schema for transformed data.",
"type": "array",
"items": {"type": "array", "items": {"laleType": "Any"}},
}
_input_transform_X_y_schema = {
"type": "object",
"required": ["X", "y"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Input features; the outer array is over samples.",
"type": "array",
"items": {"type": "array", "items": {"laleType": "Any"}},
},
"y": {
"description": "Input labels; ignored.",
"laleType": "Any",
},
},
}
_output_transform_X_y_schema = {
"type": "array",
"laleType": "tuple",
"items": [
{
"description": "X",
"type": "array",
"items": {"type": "array", "items": {"laleType": "Any"}},
},
{
"description": "y",
"type": "array",
"items": {"laleType": "Any"},
},
],
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Relational algebra SplitXy operator.",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.rasl.split_xy.html",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
"input_transform_X_y": _input_transform_X_y_schema,
"output_transform_X_y": _output_transform_X_y_schema,
},
}
SplitXy = lale.operators.make_operator(_SplitXyImpl, _combined_schemas)
lale.docstrings.set_docstrings(SplitXy)
| 4,260 | 29.654676 | 151 |
py
|
lale
|
lale-master/lale/lib/rasl/ordinal_encoder.py
|
# Copyright 2021 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import typing
from typing import Any, Tuple
import numpy as np
import lale.docstrings
import lale.helpers
import lale.operators
from lale.expressions import collect_set, it, replace
from lale.lib.dataframe import count, get_columns
from lale.lib.sklearn import ordinal_encoder
from .aggregate import Aggregate
from .map import Map
from .monoid import Monoid, MonoidableOperator
class _OrdinalEncoderMonoid(Monoid):
def __init__(self, *, n_samples_seen_, feature_names_in_, categories_):
self.n_samples_seen_ = n_samples_seen_
self.feature_names_in_ = feature_names_in_
self.categories_ = categories_
def combine(self, other: "_OrdinalEncoderMonoid"):
n_samples_seen_ = self.n_samples_seen_ + other.n_samples_seen_
assert list(self.feature_names_in_) == list(other.feature_names_in_)
assert len(self.categories_) == len(other.categories_)
combined_categories = [
np.sort(
np.unique(np.concatenate([self.categories_[i], other.categories_[i]]))
)
for i in range(len(self.categories_))
]
return _OrdinalEncoderMonoid(
n_samples_seen_=n_samples_seen_,
feature_names_in_=self.feature_names_in_,
categories_=combined_categories,
)
class _OrdinalEncoderImpl(MonoidableOperator[_OrdinalEncoderMonoid]):
def __init__(
self,
*,
categories="auto",
dtype="float64",
handle_unknown="error",
unknown_value=None,
):
self._hyperparams = {
"categories": categories,
"dtype": dtype,
"handle_unknown": handle_unknown,
"unknown_value": unknown_value,
}
def transform(self, X):
if self._transformer is None:
self._transformer = self._build_transformer()
return self._transformer.transform(X)
@property
def n_samples_seen_(self):
return getattr(self._monoid, "n_samples_seen_", 0)
@property
def categories_(self):
return getattr(self._monoid, "categories_", None)
@property
def feature_names_in_(self):
return getattr(self._monoid, "feature_names_in_", None)
def from_monoid(self, monoid: _OrdinalEncoderMonoid):
self._monoid = monoid
self.n_features_in_ = len(monoid.feature_names_in_)
self._transformer = None
def _build_transformer(self):
assert self._monoid is not None
result = Map(
columns={
col_name: replace(
it[col_name],
{
cat_value: cat_idx
for cat_idx, cat_value in enumerate(
self._monoid.categories_[col_idx]
)
},
handle_unknown="use_encoded_value",
unknown_value=self._hyperparams["unknown_value"],
)
for col_idx, col_name in enumerate(self._monoid.feature_names_in_)
}
)
return result
def to_monoid(self, batch: Tuple[Any, Any]):
hyperparams = self._hyperparams
X, _ = batch
n_samples_seen_ = count(X)
feature_names_in_ = get_columns(X)
if hyperparams["categories"] == "auto":
agg_op = Aggregate(
columns={c: collect_set(it[c]) for c in feature_names_in_}
)
agg_data = agg_op.transform(X)
if lale.helpers._is_spark_df(agg_data):
agg_data = agg_data.toPandas()
categories_ = [np.sort(agg_data.loc[0, c]) for c in feature_names_in_]
else:
categories_ = hyperparams["categories"]
return _OrdinalEncoderMonoid(
n_samples_seen_=n_samples_seen_,
feature_names_in_=feature_names_in_,
categories_=categories_,
)
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """Relational algebra reimplementation of scikit-learn's `OrdinalEncoder`_ transformer that encodes categorical features as numbers.
Works on both pandas and Spark dataframes by using `Aggregate`_ for `fit` and `Map`_ for `transform`, which in turn use the appropriate backend.
.. _`OrdinalEncoder`: https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.OrdinalEncoder.html
.. _`Aggregate`: https://lale.readthedocs.io/en/latest/modules/lale.lib.rasl.aggregate.html
.. _`Map`: https://lale.readthedocs.io/en/latest/modules/lale.lib.rasl.map.html
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.rasl.ordinal_encoder.html",
"type": "object",
"tags": {
"pre": ["categoricals"],
"op": ["transformer", "interpretable"],
"post": [],
},
"properties": {
"hyperparams": ordinal_encoder._hyperparams_schema,
"input_fit": ordinal_encoder._input_fit_schema,
"input_transform": ordinal_encoder._input_transform_schema,
"output_transform": ordinal_encoder._output_transform_schema,
},
}
OrdinalEncoder = lale.operators.make_operator(_OrdinalEncoderImpl, _combined_schemas)
OrdinalEncoder = typing.cast(
lale.operators.PlannedIndividualOp,
OrdinalEncoder.customize_schema(
encode_unknown_with=None,
dtype={
"enum": ["float64"],
"description": "This implementation only supports `dtype='float64'`.",
"default": "float64",
},
handle_unknown={
"enum": ["use_encoded_value"],
"description": "This implementation only supports `handle_unknown='use_encoded_value'`.",
"default": "use_encoded_value",
},
unknown_value={
"anyOf": [
{"type": "integer"},
{"enum": [np.nan, None]},
],
"description": "The encoded value of unknown categories to use when `handle_unknown='use_encoded_value'`. It has to be distinct from the values used to encode any of the categories in fit. If set to np.nan, the dtype hyperparameter must be a float dtype.",
},
),
)
lale.docstrings.set_docstrings(OrdinalEncoder)
| 6,824 | 35.693548 | 268 |
py
|
lale
|
lale-master/lale/lib/rasl/monoid.py
|
# Copyright 2022 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from typing import Any, Generic, Optional, TypeVar
from typing_extensions import Protocol, runtime_checkable
_InputType_contra = TypeVar("_InputType_contra", contravariant=True)
_OutputType_co = TypeVar("_OutputType_co", covariant=True)
_SelfType = TypeVar("_SelfType")
class Monoid(ABC):
"""
Data that can be combined in an associative way. See :class:MonoidFactory for ways to create/unpack
a given monoid.
"""
@abstractmethod
def combine(self: _SelfType, other: _SelfType) -> _SelfType:
"""
Combines this monoid instance with another, producing a result.
This operation must be observationally associative, satisfying
``x.from_monoid(a.combine(b.combine(c))) == x.from_monoid(a.combine(b).combine(c)))``
where `x` is the instance of :class:MonoidFactory that created
these instances.
"""
pass
@property
def is_absorbing(self):
"""
A monoid value `x` is absorbing if for all `y`, `x.combine(y) == x`.
This can help stop training early for monoids with learned coefficients.
"""
return False
_M = TypeVar("_M", bound=Monoid)
@runtime_checkable
class MonoidFactory(Generic[_InputType_contra, _OutputType_co, _M], Protocol):
"""
This protocol determines if a class supports creating a monoid and using it
to support associative computation.
Due to the ``runtime_checkable`` decorator, ``isinstance(obj, MonoidFactory)`` will succeed
if the object has the requisite methods, even if it does not have this protocol as
a base class.
"""
@abstractmethod
def to_monoid(self, batch: _InputType_contra) -> _M:
"""
Create a monoid instance representing the input data
"""
...
@abstractmethod
def from_monoid(self, monoid: _M) -> _OutputType_co:
"""
Given the monoid instance, return the appropriate type of output.
This method may also modify self based on the monoid instance.
"""
...
class MonoidableOperator(MonoidFactory[Any, None, _M], Protocol):
"""
This is a useful base class for operator implementations that support associative (monoid-based) fit.
Given the implementation supplied :class:MonoidFactory methods, this class provides
default :method:partial_fit and :method:fit implementations.
"""
_monoid: Optional[_M] = None
def partial_fit(self, X, y=None):
if self._monoid is None or not self._monoid.is_absorbing:
lifted = self.to_monoid((X, y))
if self._monoid is not None: # not first fit
lifted = self._monoid.combine(lifted)
self.from_monoid(lifted)
return self
def fit(self, X, y=None):
lifted = self.to_monoid((X, y))
self.from_monoid(lifted)
return self
| 3,473 | 33.39604 | 105 |
py
|
lale
|
lale-master/lale/lib/rasl/batched_bagging_classifier.py
|
# Copyright 2022 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import inspect
from typing import Any, Tuple
import pandas as pd
import sklearn.base
import lale.docstrings
import lale.operators
from lale.lib._common_schemas import schema_estimator
from lale.lib.sklearn import DecisionTreeClassifier, bagging_classifier
from .monoid import Monoid, MonoidableOperator
class _BatchedBaggingClassifierMonoid(Monoid):
def __init__(self, classifiers):
self.classifiers = classifiers
def combine(self, other: "_BatchedBaggingClassifierMonoid"):
orig_classifiers = copy.copy(self.classifiers)
orig_classifiers.extend(other.classifiers)
return _BatchedBaggingClassifierMonoid(classifiers=orig_classifiers)
class _BatchedBaggingClassifierImpl(
MonoidableOperator[_BatchedBaggingClassifierMonoid]
):
def __init__(self, base_estimator=None):
if base_estimator is None:
base_estimator = DecisionTreeClassifier()
self._hyperparams = {"base_estimator": base_estimator}
def predict(self, X):
if len(self.classifiers_list) == 1:
return self.classifiers_list[0].predict(X)
else:
# Take a voting of the classifiers
predictions_list = [
classifier.predict(X) for classifier in self.classifiers_list
]
df = pd.DataFrame(predictions_list).transpose()
predictions = df.mode(axis=1)
if (
predictions.shape[1] > 1
): # When there are multiple modes, pick the first one
predictions = predictions.iloc[:, 0]
predictions = predictions.squeeze() # converts a dataframe to series.
return predictions
def from_monoid(self, monoid: _BatchedBaggingClassifierMonoid):
self._monoid = monoid
self.classifiers_list = monoid.classifiers
def to_monoid(self, batch: Tuple[Any, Any]) -> _BatchedBaggingClassifierMonoid:
X, y = batch
trainable = self._hyperparams["base_estimator"]
if isinstance(trainable, sklearn.base.BaseEstimator):
trainable = sklearn.base.clone(trainable)
if inspect.isclass(trainable):
trainable = trainable()
trained_classifier = trainable.fit(X, y)
return _BatchedBaggingClassifierMonoid([trained_classifier])
_hyperparams_schema = {
"allOf": [
{
"type": "object",
"required": [
"base_estimator",
],
"relevantToOptimizer": [],
"additionalProperties": False,
"properties": {
"base_estimator": schema_estimator,
},
}
]
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """Implementation of a homomorphic bagging classifier.
As proposed in https://izbicki.me/public/papers/icml2013-algebraic-classifiers.pdf""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.rasl.bagging_monoid_classifier.html",
"type": "object",
"tags": {
"pre": ["~categoricals"],
"op": ["estimator"],
"post": [],
},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": bagging_classifier._input_fit_schema,
"input_predict": bagging_classifier.schema_X_numbers,
"output_predict": bagging_classifier.schema_1D_cats,
},
}
BatchedBaggingClassifier = lale.operators.make_operator(
_BatchedBaggingClassifierImpl, _combined_schemas
)
lale.docstrings.set_docstrings(BatchedBaggingClassifier)
| 4,158 | 33.94958 | 118 |
py
|
lale
|
lale-master/lale/lib/rasl/min_max_scaler.py
|
# Copyright 2021-2022 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import typing
from typing import Any, Tuple
import numpy as np
try:
from sklearn.preprocessing._data import _handle_zeros_in_scale
except ModuleNotFoundError:
def _handle_zeros_in_scale(scale):
constant_mask = scale < 10 * np.finfo(scale.dtype).eps
scale = scale.copy()
scale[constant_mask] = 1.0
return scale
import lale.docstrings
import lale.operators
from lale.datasets.data_schemas import forward_metadata
from lale.expressions import it, ite
from lale.expressions import max as agg_max
from lale.expressions import min as agg_min
from lale.helpers import _is_spark_df
from lale.lib.dataframe import count, get_columns
from lale.lib.rasl import Aggregate, Map
from lale.lib.sklearn import min_max_scaler
from lale.schemas import Enum
from .monoid import Monoid, MonoidableOperator
class _MinMaxScalerMonoid(Monoid):
def __init__(
self,
*,
data_min_: np.ndarray,
data_max_: np.ndarray,
n_samples_seen_: int,
feature_names_in_,
):
self.data_min_ = data_min_
self.data_max_ = data_max_
self.n_samples_seen_ = n_samples_seen_
self.feature_names_in_ = feature_names_in_
def combine(self, other: "_MinMaxScalerMonoid"):
data_min_ = np.minimum(self.data_min_, other.data_min_)
data_max_ = np.maximum(self.data_max_, other.data_max_)
n_samples_seen_ = self.n_samples_seen_ + other.n_samples_seen_
assert list(self.feature_names_in_) == list(self.feature_names_in_)
feature_names_in_ = self.feature_names_in_
return _MinMaxScalerMonoid(
data_min_=data_min_,
data_max_=data_max_,
n_samples_seen_=n_samples_seen_,
feature_names_in_=feature_names_in_,
)
class _MinMaxScalerImpl(MonoidableOperator[_MinMaxScalerMonoid]):
def __init__(self, feature_range=(0, 1), *, copy=True, clip=False):
if not copy:
raise ValueError("`copy=False` is not supported by this implementation")
self._hyperparams = {"feature_range": feature_range, "copy": copy, "clip": clip}
def transform(self, X):
if self._transformer is None:
self._transformer = self._build_transformer()
X_new = self._transformer.transform(X)
return forward_metadata(X, X_new)
@property
def data_min_(self):
return getattr(self._monoid, "data_min_", None)
@property
def data_max_(self):
return getattr(self._monoid, "data_max_", None)
@property
def n_samples_seen_(self):
return getattr(self._monoid, "n_samples_seen_", 0)
@property
def feature_names_in_(self):
return getattr(self._monoid, "feature_names_in_", None)
def from_monoid(self, monoid: _MinMaxScalerMonoid):
self._monoid = monoid
self.n_features_in_ = len(monoid.feature_names_in_)
self.data_range_ = monoid.data_max_ - monoid.data_min_ # type: ignore
range_min, range_max = self._hyperparams["feature_range"]
self.scale_ = (range_max - range_min) / _handle_zeros_in_scale(monoid.data_max_ - monoid.data_min_) # type: ignore
self.min_ = range_min - monoid.data_min_ * self.scale_
self._transformer = None
def _build_transformer(self):
range_min, range_max = self._hyperparams["feature_range"]
scale_columns = {}
dmin = self.data_min_
assert dmin is not None
dmax = self.data_max_
assert dmax is not None
assert self.feature_names_in_ is not None
for i, c in enumerate(self.feature_names_in_):
c_std = (it[c] - dmin[i]) / _handle_zeros_in_scale(dmax[i] - dmin[i])
c_scaled = c_std * (range_max - range_min) + range_min
scale_columns.update({c: c_scaled})
scale_map = Map(columns=scale_columns)
if not self._hyperparams["clip"]:
return scale_map
clip_map = Map(
columns={
c: ite(
it[c] >= range_min,
ite(
it[c] <= range_max,
it[c],
range_max,
),
range_min,
)
for c in self.feature_names_in_
}
)
return scale_map >> clip_map
def to_monoid(self, batch: Tuple[Any, Any]) -> _MinMaxScalerMonoid:
X, _ = batch
X_cols = get_columns(X)
agg = {f"{c}_min": agg_min(it[c]) for c in X_cols}
agg.update({f"{c}_max": agg_max(it[c]) for c in X_cols})
aggregate = Aggregate(columns=agg)
data_min_max = aggregate.transform(X)
if _is_spark_df(X):
data_min_max = data_min_max.toPandas()
n = len(X_cols)
data_min_ = np.zeros(shape=(n))
data_max_ = np.zeros(shape=(n))
for i, c in enumerate(X_cols):
data_min_[i] = data_min_max[f"{c}_min"]
data_max_[i] = data_min_max[f"{c}_max"]
data_min_ = np.array(data_min_)
data_max_ = np.array(data_max_)
n_samples_seen_ = count(X)
feature_names_in_ = X_cols
return _MinMaxScalerMonoid(
data_min_=data_min_,
data_max_=data_max_,
n_samples_seen_=n_samples_seen_,
feature_names_in_=feature_names_in_,
)
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Relational algebra implementation of MinMaxScaler.",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.rasl.min_max_scaler.html",
"type": "object",
"tags": {
"pre": ["~categoricals"],
"op": ["transformer", "interpretable"],
"post": [],
},
"properties": {
"hyperparams": min_max_scaler._hyperparams_schema,
"input_fit": min_max_scaler._input_schema_fit,
"input_transform": min_max_scaler._input_transform_schema,
"output_transform": min_max_scaler._output_transform_schema,
},
}
MinMaxScaler = lale.operators.make_operator(_MinMaxScalerImpl, _combined_schemas)
MinMaxScaler = typing.cast(
lale.operators.PlannedIndividualOp,
MinMaxScaler.customize_schema(
copy=Enum(
values=[True],
desc="`copy=True` is the only value currently supported by this implementation",
default=True,
),
clip={ # missing from min_max_scaler._hyperparams_schema pre sklearn 0.24
"type": "boolean",
"description": "Set to True to clip transformed values of held-out data to provided feature range.",
"default": False,
},
),
)
lale.docstrings.set_docstrings(MinMaxScaler)
| 7,335 | 34.61165 | 123 |
py
|
lale
|
lale-master/lale/lib/category_encoders/hashing_encoder.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import pandas as pd
try:
from category_encoders.hashing import HashingEncoder as _SkHashingEncoder
except ImportError:
class _SkHashingEncoder: # type: ignore
def __init__(self, *args, **hyperparams):
raise ValueError("The package 'category_encoders' is not installed.")
def fit(self, X, y=None):
raise ValueError("The package 'category_encoders' is not installed.")
def transform(self, X):
raise ValueError("The package 'category_encoders' is not installed.")
import lale.docstrings
import lale.operators
_hyperparams_schema = {
"description": "Hyperparameter schema for the HashingEncoder model from scikit-learn contrib.",
"allOf": [
{
"type": "object",
"additionalProperties": False,
"required": ["n_components", "cols", "hash_method"],
"relevantToOptimizer": [],
"properties": {
"n_components": {
"description": "how many bits to use to represent the feature.",
"type": "integer",
"default": 8,
},
"cols": {
"description": "a list of columns to encode, if None, all string columns will be encoded.",
"anyOf": [
{"enum": [None]},
{
"type": "array",
"items": {"type": "string"},
},
],
"default": None,
},
"hash_method": {
"description": "which hashing method to use.",
"enum": list(hashlib.algorithms_available),
"default": "md5",
},
},
}
],
}
_input_fit_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {
"type": "array",
"items": {"anyOf": [{"type": "number"}, {"type": "string"}]},
},
},
"y": {"description": "Target class labels; the array is over samples."},
},
}
_input_transform_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {
"type": "array",
"items": {"anyOf": [{"type": "number"}, {"type": "string"}]},
},
}
},
}
_output_transform_schema = {
"description": "Hash codes.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """`Hashing encoder`_ transformer from scikit-learn contrib that encodes categorical features as numbers.
.. _`Hashing encoder`: https://contrib.scikit-learn.org/category_encoders/hashing.html
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.category_encoders.hashing_encoder.html",
"import_from": "category_encoders.hashing",
"type": "object",
"tags": {"pre": ["categoricals"], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
class _HashingEncoderImpl:
def __init__(self, **hyperparams):
self._wrapped_model = _SkHashingEncoder(**hyperparams)
def fit(self, X, y=None):
self._wrapped_model.fit(X, y)
if isinstance(X, pd.DataFrame):
self._X_columns = X.columns
return self
def transform(self, X):
result = self._wrapped_model.transform(X)
return result
HashingEncoder = lale.operators.make_operator(_HashingEncoderImpl, _combined_schemas)
lale.docstrings.set_docstrings(HashingEncoder)
| 4,812 | 31.52027 | 124 |
py
|
lale
|
lale-master/lale/lib/category_encoders/__init__.py
|
# Copyright 2022 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Schema-enhanced versions of some of the operators from `category_encoders`_ to enable hyperparameter tuning.
.. _`category_encoders`: https://contrib.scikit-learn.org/category_encoders
Operators
=========
* lale.lib.category_encoders. `HashingEncoder`_
* lale.lib.category_encoders. `TargetEncoder`_
.. _`HashingEncoder`: lale.lib.category_encoders.hashing_encoder.html
.. _`TargetEncoder`: lale.lib.category_encoders.target_encoder.html
"""
# Note: all imports should be done as
# from .xxx import XXX as XXX
# this ensures that pyright considers them to be publicly available
# and not private imports (this affects lale users that use pyright)
from .hashing_encoder import HashingEncoder as HashingEncoder
from .target_encoder import TargetEncoder as TargetEncoder
| 1,355 | 34.684211 | 108 |
py
|
lale
|
lale-master/lale/lib/category_encoders/target_encoder.py
|
# Copyright 2022 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
import numpy as np
from packaging import version
try:
import category_encoders
catenc_version = version.parse(getattr(category_encoders, "__version__"))
except ImportError:
catenc_version = None
import lale.docstrings
import lale.operators
_hyperparams_schema = {
"allOf": [
{
"type": "object",
"additionalProperties": False,
"required": [
"verbose",
"cols",
"drop_invariant",
"return_df",
"handle_missing",
"handle_unknown",
"min_samples_leaf",
"smoothing",
],
"relevantToOptimizer": [],
"properties": {
"verbose": {
"type": "integer",
"description": "Verbosity of the output, 0 for none.",
"default": 0,
},
"cols": {
"description": "Columns to encode.",
"anyOf": [
{
"enum": [None],
"description": "All string columns will be encoded.",
},
{
"type": "array",
"items": {"type": "string"},
},
],
"default": None,
},
"drop_invariant": {
"type": "boolean",
"default": False,
"description": "Whether to drop columns with 0 variance.",
},
"return_df": {
"type": "boolean",
"default": True,
"description": "Whether to return a pandas DataFrame from transform (otherwise it will be a numpy array).",
},
"handle_missing": {
"enum": ["error", "return_nan", "value"],
"default": "value",
"description": "Given 'value', return the target mean.",
},
"handle_unknown": {
"enum": ["error", "return_nan", "value"],
"default": "value",
"description": "Given 'value', return the target mean.",
},
"min_samples_leaf": {
"type": "integer",
"default": 1,
"minimum": 1,
"maximumForOptimizer": 10,
"description": "For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper)",
},
"smoothing": {
"type": "number",
"default": 1.0,
"minimum": 0.0,
"exclusiveMinimum": True,
"maximumForOptimizer": 10.0,
"description": "Smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf).",
},
},
}
],
}
_input_fit_schema = {
"type": "object",
"required": ["X", "y"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {
"type": "array",
"items": {"anyOf": [{"type": "number"}, {"type": "string"}]},
},
},
"y": {
"description": "Target class labels; the array is over samples.",
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "string"}},
],
},
},
}
_input_transform_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {
"type": "array",
"items": {"anyOf": [{"type": "number"}, {"type": "string"}]},
},
}
},
}
_output_transform_schema = {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """`Target encoder`_ transformer from scikit-learn contrib that encodes categorical features as numbers.
.. _`Target encoder`: https://contrib.scikit-learn.org/category_encoders/targetencoder.html
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.category_encoders.target_encoder.html",
"import_from": "category_encoders.target_encoder",
"type": "object",
"tags": {"pre": ["categoricals"], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
class _TargetEncoderImpl:
def __init__(self, **hyperparams):
if catenc_version is None:
raise ValueError("The package 'category_encoders' is not installed.")
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
self._wrapped_model = category_encoders.TargetEncoder(**hyperparams)
def fit(self, X, y):
if catenc_version is None:
raise ValueError("The package 'category_encoders' is not installed.")
if np.issubdtype(y.dtype, np.number):
numeric_y = y
else:
from sklearn.preprocessing import LabelEncoder
trainable_le = LabelEncoder()
trained_le = trainable_le.fit(y)
numeric_y = trained_le.transform(y)
self._wrapped_model.fit(X, numeric_y)
return self
def transform(self, X):
if catenc_version is None:
raise ValueError("The package 'category_encoders' is not installed.")
result = self._wrapped_model.transform(X)
return result
TargetEncoder = lale.operators.make_operator(_TargetEncoderImpl, _combined_schemas)
if catenc_version is not None and catenc_version >= version.Version("2.5.1"):
TargetEncoder = TargetEncoder.customize_schema(
hierarchy={
"laleType": "Any",
"default": None,
"description": "A dictionary or a dataframe to define the hierarchy for mapping.",
},
)
lale.docstrings.set_docstrings(TargetEncoder)
| 7,581 | 34.933649 | 309 |
py
|
lale
|
lale-master/lale/lib/autogen/max_abs_scaler.py
|
from numpy import inf, nan
from sklearn.preprocessing import MaxAbsScaler as Op
from lale.docstrings import set_docstrings
from lale.operators import make_operator
class _MaxAbsScalerImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def transform(self, X):
return self._wrapped_model.transform(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for MaxAbsScaler Scale each feature by its maximum absolute value.",
"allOf": [
{
"type": "object",
"required": ["copy"],
"relevantToOptimizer": ["copy"],
"additionalProperties": False,
"properties": {
"copy": {
"XXX TODO XXX": "boolean, optional, default is True",
"description": "Set to False to perform inplace scaling and avoid a copy (if the input is already a numpy array).",
"type": "boolean",
"default": True,
}
},
}
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Compute the maximum absolute value to be used for later scaling.",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "The data used to compute the per-feature minimum and maximum used for later scaling along the features axis.",
}
},
}
_input_transform_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Scale the data",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "{array-like, sparse matrix}",
"description": "The data that should be scaled.",
}
},
}
_output_transform_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Scale the data",
"laleType": "Any",
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.preprocessing.MaxAbsScaler#sklearn-preprocessing-maxabsscaler",
"import_from": "sklearn.preprocessing",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
MaxAbsScaler = make_operator(_MaxAbsScalerImpl, _combined_schemas)
set_docstrings(MaxAbsScaler)
| 3,179 | 33.193548 | 145 |
py
|
lale
|
lale-master/lale/lib/autogen/cca.py
|
from numpy import inf, nan
from sklearn.cross_decomposition import CCA as Op
from lale.docstrings import set_docstrings
from lale.operators import make_operator
class _CCAImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def transform(self, X):
return self._wrapped_model.transform(X)
def predict(self, X):
return self._wrapped_model.predict(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for CCA CCA Canonical Correlation Analysis.",
"allOf": [
{
"type": "object",
"required": ["n_components", "scale", "max_iter", "tol", "copy"],
"relevantToOptimizer": ["n_components", "scale", "max_iter", "tol", "copy"],
"additionalProperties": False,
"properties": {
"n_components": {
"type": "integer",
"minimumForOptimizer": 2,
"maximumForOptimizer": 256,
"distribution": "uniform",
"default": 2,
"description": "number of components to keep.",
},
"scale": {
"type": "boolean",
"default": True,
"description": "whether to scale the data?",
},
"max_iter": {
"XXX TODO XXX": "an integer, (default 500)",
"description": "the maximum number of iterations of the NIPALS inner loop",
"type": "integer",
"minimumForOptimizer": 10,
"maximumForOptimizer": 1000,
"distribution": "uniform",
"default": 500,
},
"tol": {
"XXX TODO XXX": "non-negative real, default 1e-06.",
"description": "the tolerance used in the iterative algorithm",
"type": "number",
"minimumForOptimizer": 1e-08,
"maximumForOptimizer": 0.01,
"default": 1e-06,
},
"copy": {
"type": "boolean",
"default": True,
"description": "Whether the deflation be done on a copy",
},
},
}
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Fit model to data.",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Training vectors, where n_samples is the number of samples and n_features is the number of predictors.",
},
"Y": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Target vectors, where n_samples is the number of samples and n_targets is the number of response variables.",
},
},
}
_input_transform_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Apply the dimension reduction learned on the train data.",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Training vectors, where n_samples is the number of samples and n_features is the number of predictors.",
},
"Y": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Target vectors, where n_samples is the number of samples and n_targets is the number of response variables.",
},
"copy": {
"type": "boolean",
"default": True,
"description": "Whether to copy X and Y, or perform in-place normalization.",
},
},
}
_output_transform_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Apply the dimension reduction learned on the train data.",
"laleType": "Any",
"XXX TODO XXX": "x_scores if Y is not given, (x_scores, y_scores) otherwise.",
}
_input_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Apply the dimension reduction learned on the train data.",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Training vectors, where n_samples is the number of samples and n_features is the number of predictors.",
},
"copy": {
"type": "boolean",
"default": True,
"description": "Whether to copy X and Y, or perform in-place normalization.",
},
},
}
_output_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Apply the dimension reduction learned on the train data.",
"laleType": "Any",
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.cross_decomposition.CCA#sklearn-cross_decomposition-cca",
"import_from": "sklearn.cross_decomposition",
"type": "object",
"tags": {"pre": [], "op": ["transformer", "estimator"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
CCA = make_operator(_CCAImpl, _combined_schemas)
set_docstrings(CCA)
| 6,276 | 37.042424 | 139 |
py
|
lale
|
lale-master/lale/lib/autogen/ard_regression.py
|
from numpy import inf, nan
from packaging import version
from sklearn.linear_model import ARDRegression as Op
import lale
from lale.docstrings import set_docstrings
from lale.operators import make_operator, sklearn_version
class _ARDRegressionImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def predict(self, X):
return self._wrapped_model.predict(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for ARDRegression Bayesian ARD regression.",
"allOf": [
{
"type": "object",
"required": [
"n_iter",
"tol",
"alpha_1",
"alpha_2",
"lambda_1",
"lambda_2",
"compute_score",
"threshold_lambda",
"fit_intercept",
"normalize",
"copy_X",
"verbose",
],
"relevantToOptimizer": [
"n_iter",
"tol",
"compute_score",
"fit_intercept",
"normalize",
"copy_X",
],
"additionalProperties": False,
"properties": {
"n_iter": {
"type": "integer",
"minimumForOptimizer": 5,
"maximumForOptimizer": 1000,
"distribution": "uniform",
"default": 300,
"description": "Maximum number of iterations",
},
"tol": {
"type": "number",
"minimumForOptimizer": 1e-08,
"maximumForOptimizer": 0.01,
"default": 0.001,
"description": "Stop the algorithm if w has converged",
},
"alpha_1": {
"type": "number",
"default": 1e-06,
"description": "Hyper-parameter : shape parameter for the Gamma distribution prior over the alpha parameter",
},
"alpha_2": {
"type": "number",
"default": 1e-06,
"description": "Hyper-parameter : inverse scale parameter (rate parameter) for the Gamma distribution prior over the alpha parameter",
},
"lambda_1": {
"type": "number",
"default": 1e-06,
"description": "Hyper-parameter : shape parameter for the Gamma distribution prior over the lambda parameter",
},
"lambda_2": {
"type": "number",
"default": 1e-06,
"description": "Hyper-parameter : inverse scale parameter (rate parameter) for the Gamma distribution prior over the lambda parameter",
},
"compute_score": {
"type": "boolean",
"default": False,
"description": "If True, compute the objective function at each step of the model",
},
"threshold_lambda": {
"type": "number",
"default": 10000.0,
"description": "threshold for removing (pruning) weights with high precision from the computation",
},
"fit_intercept": {
"type": "boolean",
"default": True,
"description": "whether to calculate the intercept for this model",
},
"normalize": {
"type": "boolean",
"default": False,
"description": "This parameter is ignored when ``fit_intercept`` is set to False",
},
"copy_X": {
"type": "boolean",
"default": True,
"description": "If True, X will be copied; else, it may be overwritten.",
},
"verbose": {
"type": "boolean",
"default": False,
"description": "Verbose mode when fitting the model.",
},
},
}
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Fit the ARDRegression model according to the given training data",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Training vector, where n_samples in the number of samples and n_features is the number of features.",
},
"y": {
"type": "array",
"items": {"type": "number"},
"description": "Target values (integers)",
},
},
}
_input_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Predict using the linear model.",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Samples.",
},
"return_std": {
"anyOf": [{"type": "boolean"}, {"enum": [None]}],
"default": None,
"description": "Whether to return the standard deviation of posterior prediction.",
},
},
}
_output_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Predict using the linear model.",
"laleType": "Any",
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.linear_model.ARDRegression#sklearn-linear_model-ardregression",
"import_from": "sklearn.linear_model",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "regressor"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
ARDRegression = make_operator(_ARDRegressionImpl, _combined_schemas)
if sklearn_version >= version.Version("1.2"):
# new: "https://scikit-learn.org/1.2/modules/generated/sklearn.linear_model.ARDRegression#sklearn-linear_model-ardregression"
ARDRegression = ARDRegression.customize_schema(
normalize=None,
set_as_available=True,
)
set_docstrings(ARDRegression)
| 7,072 | 36.031414 | 155 |
py
|
lale
|
lale-master/lale/lib/autogen/label_binarizer.py
|
from numpy import inf, nan
from sklearn.preprocessing import LabelBinarizer as Op
from lale.docstrings import set_docstrings
from lale.operators import make_operator
class _LabelBinarizerImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def transform(self, X):
return self._wrapped_model.transform(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for LabelBinarizer Binarize labels in a one-vs-all fashion",
"allOf": [
{
"type": "object",
"required": ["neg_label", "pos_label", "sparse_output"],
"relevantToOptimizer": ["neg_label", "pos_label", "sparse_output"],
"additionalProperties": False,
"properties": {
"neg_label": {
"type": "integer",
"minimumForOptimizer": 0,
"maximumForOptimizer": 1,
"distribution": "uniform",
"default": 0,
"description": "Value with which negative labels must be encoded.",
},
"pos_label": {
"type": "integer",
"minimumForOptimizer": 1,
"maximumForOptimizer": 2,
"distribution": "uniform",
"default": 1,
"description": "Value with which positive labels must be encoded.",
},
"sparse_output": {
"type": "boolean",
"default": False,
"description": "True if the returned array from transform is desired to be in sparse CSR format.",
},
},
}
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Fit label binarizer",
"type": "object",
"required": ["y"],
"properties": {
"y": {
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
],
"description": "Target values",
}
},
}
_input_transform_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Transform multi-class labels to binary labels",
"type": "object",
"required": ["y"],
"properties": {
"y": {
"anyOf": [
{
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "array or sparse matrix of shape [n_samples,] or [n_samples, n_classes]",
},
{"type": "array", "items": {"type": "number"}},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
],
"description": "Target values",
}
},
}
_output_transform_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Shape will be [n_samples, 1] for binary problems.",
"laleType": "Any",
"XXX TODO XXX": "numpy array or CSR matrix of shape [n_samples, n_classes]",
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.preprocessing.LabelBinarizer#sklearn-preprocessing-labelbinarizer",
"import_from": "sklearn.preprocessing",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
LabelBinarizer = make_operator(_LabelBinarizerImpl, _combined_schemas)
set_docstrings(LabelBinarizer)
| 4,388 | 34.682927 | 149 |
py
|
lale
|
lale-master/lale/lib/autogen/truncated_svd.py
|
from numpy import inf, nan
from sklearn.decomposition import TruncatedSVD as Op
from lale.docstrings import set_docstrings
from lale.operators import make_operator
class _TruncatedSVDImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def transform(self, X):
return self._wrapped_model.transform(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for TruncatedSVD Dimensionality reduction using truncated SVD (aka LSA).",
"allOf": [
{
"type": "object",
"required": ["n_components", "algorithm", "n_iter", "random_state", "tol"],
"relevantToOptimizer": ["n_components", "algorithm", "n_iter", "tol"],
"additionalProperties": False,
"properties": {
"n_components": {
"type": "integer",
"minimun": 1,
"laleMaximum": "X/items/maxItems",
"minimumForOptimizer": 2,
"maximumForOptimizer": 256,
"distribution": "uniform",
"default": 2,
"description": "Desired dimensionality of output data",
},
"algorithm": {
"enum": ["arpack", "randomized"],
"default": "randomized",
"description": "SVD solver to use",
},
"n_iter": {
"type": "integer",
"minimumForOptimizer": 5,
"maximumForOptimizer": 1000,
"distribution": "uniform",
"default": 5,
"description": "Number of iterations for randomized SVD solver",
},
"random_state": {
"anyOf": [
{"type": "integer"},
{"laleType": "numpy.random.RandomState"},
{"enum": [None]},
],
"default": None,
"description": "If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`.",
},
"tol": {
"type": "number",
"minimumForOptimizer": 1e-08,
"maximumForOptimizer": 0.01,
"default": 0.0,
"description": "Tolerance for ARPACK",
},
},
},
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Fit LSI model on training data X.",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Training data.",
},
"y": {},
},
}
_input_transform_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Perform dimensionality reduction on X.",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "New data.",
}
},
}
_output_transform_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Reduced version of X",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.decomposition.TruncatedSVD#sklearn-decomposition-truncatedsvd",
"import_from": "sklearn.decomposition",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
TruncatedSVD = make_operator(_TruncatedSVDImpl, _combined_schemas)
set_docstrings(TruncatedSVD)
| 4,673 | 35.80315 | 263 |
py
|
lale
|
lale-master/lale/lib/autogen/theil_sen_regressor.py
|
from numpy import inf, nan
from sklearn.linear_model import TheilSenRegressor as Op
from lale.docstrings import set_docstrings
from lale.operators import make_operator
class _TheilSenRegressorImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def predict(self, X):
return self._wrapped_model.predict(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for TheilSenRegressor Theil-Sen Estimator: robust multivariate regression model.",
"allOf": [
{
"type": "object",
"required": [
"fit_intercept",
"copy_X",
"max_subpopulation",
"n_subsamples",
"max_iter",
"tol",
"random_state",
"n_jobs",
"verbose",
],
"relevantToOptimizer": [
"fit_intercept",
"copy_X",
"max_subpopulation",
"max_iter",
"tol",
],
"additionalProperties": False,
"properties": {
"fit_intercept": {
"type": "boolean",
"default": True,
"description": "Whether to calculate the intercept for this model",
},
"copy_X": {
"type": "boolean",
"default": True,
"description": "If True, X will be copied; else, it may be overwritten.",
},
"max_subpopulation": {
"type": "integer",
"minimumForOptimizer": 10000,
"maximumForOptimizer": 10001,
"distribution": "uniform",
"default": 10000,
"description": "Instead of computing with a set of cardinality 'n choose k', where n is the number of samples and k is the number of subsamples (at least number of features), consider only a stochastic subpopulation of a given maximal size if 'n choose k' is larger than max_subpopulation",
},
"n_subsamples": {
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"default": None,
"description": "Number of samples to calculate the parameters",
},
"max_iter": {
"type": "integer",
"minimumForOptimizer": 10,
"maximumForOptimizer": 1000,
"distribution": "uniform",
"default": 300,
"description": "Maximum number of iterations for the calculation of spatial median.",
},
"tol": {
"type": "number",
"minimumForOptimizer": 1e-08,
"maximumForOptimizer": 0.01,
"default": 0.001,
"description": "Tolerance when calculating spatial median.",
},
"random_state": {
"anyOf": [
{"type": "integer"},
{"laleType": "numpy.random.RandomState"},
{"enum": [None]},
],
"default": None,
"description": "A random number generator instance to define the state of the random permutations generator",
},
"n_jobs": {
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"default": 1,
"description": "Number of CPUs to use during the cross validation",
},
"verbose": {
"type": "boolean",
"default": False,
"description": "Verbose mode when fitting the model.",
},
},
},
{
"XXX TODO XXX": "Parameter: max_subpopulation > only a stochastic subpopulation of a given maximal size if 'n choose k' is larger than max_subpopulation"
},
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Fit linear model.",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Training data",
},
"y": {
"type": "array",
"items": {"type": "number"},
"description": "Target values",
},
},
}
_input_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Predict using the linear model",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"anyOf": [
{
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "array_like or sparse matrix, shape (n_samples, n_features)",
},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
],
"description": "Samples.",
}
},
}
_output_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Returns predicted values.",
"type": "array",
"items": {"type": "number"},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.linear_model.TheilSenRegressor#sklearn-linear_model-theilsenregressor",
"import_from": "sklearn.linear_model",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "regressor"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
TheilSenRegressor = make_operator(_TheilSenRegressorImpl, _combined_schemas)
set_docstrings(TheilSenRegressor)
| 6,633 | 36.480226 | 310 |
py
|
lale
|
lale-master/lale/lib/autogen/linear_discriminant_analysis.py
|
import sklearn
from numpy import inf, nan
from packaging import version
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as Op
from lale.docstrings import set_docstrings
from lale.operators import make_operator, sklearn_version
class _LinearDiscriminantAnalysisImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def transform(self, X):
return self._wrapped_model.transform(X)
def predict(self, X):
return self._wrapped_model.predict(X)
def predict_proba(self, X):
return self._wrapped_model.predict_proba(X)
def decision_function(self, X):
return self._wrapped_model.decision_function(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for LinearDiscriminantAnalysis Linear Discriminant Analysis",
"allOf": [
{
"type": "object",
"required": [
"solver",
"shrinkage",
"priors",
"n_components",
"store_covariance",
"tol",
],
"relevantToOptimizer": ["solver", "n_components", "tol", "shrinkage"],
"additionalProperties": False,
"properties": {
"solver": {
"enum": ["eigen", "lsqr", "svd"],
"default": "svd",
"description": "Solver to use, possible values: - 'svd': Singular value decomposition (default)",
},
"shrinkage": {
"anyOf": [
{"enum": ["auto"]},
{
"type": "number",
"minimumForOptimizer": 0,
"maximumForOptimizer": 1,
"minimum": 0,
"maximum": 1,
"exclusiveMinimum": True,
"exclusiveMaximum": True,
"distribution": "uniform",
},
{"enum": [None]},
],
"default": None,
"description": "Shrinkage parameter, possible values: - None: no shrinkage (default)",
},
"priors": {
"XXX TODO XXX": "array, optional, shape (n_classes,)",
"description": "Class priors.",
"enum": [None],
"default": None,
},
"n_components": {
"anyOf": [
{
"type": "integer",
"minimun": 1,
"laleMaximum": "X/items/maxItems",
"minimumForOptimizer": 2,
"maximumForOptimizer": 256,
"distribution": "uniform",
},
{"enum": [None]},
],
"default": None,
"description": "Number of components (< n_classes - 1) for dimensionality reduction.",
},
"store_covariance": {
"type": "boolean",
"default": False,
"description": "Additionally compute class covariance matrix (default False), used only in 'svd' solver",
},
"tol": {
"type": "number",
"minimumForOptimizer": 1e-08,
"maximumForOptimizer": 0.01,
"default": 0.0001,
"description": "Threshold used for rank estimation in SVD solver",
},
},
},
{
"description": "shrinkage, only with 'lsqr' and 'eigen' solvers",
"anyOf": [
{"type": "object", "properties": {"shrinkage": {"enum": [None]}}},
{
"type": "object",
"properties": {"solver": {"enum": ["lsqr", "eigen"]}},
},
],
},
{
"description": "store_covariance, only in 'svd' solver",
"anyOf": [
{
"type": "object",
"properties": {"store_covariance": {"enum": [False]}},
},
{"type": "object", "properties": {"solver": {"enum": ["svd"]}}},
],
},
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Fit LinearDiscriminantAnalysis model according to the given",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Training data.",
},
"y": {
"type": "array",
"items": {"type": "number"},
"description": "Target values.",
},
},
}
_input_transform_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Project data to maximize class separation.",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Input data.",
}
},
}
_output_transform_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Transformed data.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
_input_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Predict class labels for samples in X.",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"anyOf": [
{
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "array_like or sparse matrix, shape (n_samples, n_features)",
},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
],
"description": "Samples.",
}
},
}
_output_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Predicted class label per sample.",
"type": "array",
"items": {"type": "number"},
}
_input_predict_proba_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Estimate probability.",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Input data.",
}
},
}
_output_predict_proba_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Estimated probabilities.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
_input_decision_function_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Predict confidence scores for samples.",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"anyOf": [
{
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "array_like or sparse matrix, shape (n_samples, n_features)",
},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
],
"description": "Samples.",
}
},
}
_output_decision_function_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Confidence scores per (sample, class) combination",
"laleType": "Any",
"XXX TODO XXX": "array, shape=(n_samples,) if n_classes == 2 else (n_samples, n_classes)",
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.discriminant_analysis.LinearDiscriminantAnalysis#sklearn-discriminant_analysis-lineardiscriminantanalysis",
"import_from": "sklearn.discriminant_analysis",
"type": "object",
"tags": {"pre": [], "op": ["transformer", "estimator"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
"input_predict_proba": _input_predict_proba_schema,
"output_predict_proba": _output_predict_proba_schema,
"input_decision_function": _input_decision_function_schema,
"output_decision_function": _output_decision_function_schema,
},
}
LinearDiscriminantAnalysis = make_operator(
_LinearDiscriminantAnalysisImpl, _combined_schemas
)
if sklearn_version >= version.Version("0.24"):
# old: https://scikit-learn.org/0.20/modules/generated/sklearn.discriminant_analysis.LinearDiscriminantAnalysis#sklearn-discriminant_analysis-lineardiscriminantanalysis
# new: https://scikit-learn.org/0.24/modules/generated/sklearn.discriminant_analysis.LinearDiscriminantAnalysis#sklearn-discriminant_analysis-lineardiscriminantanalysis
LinearDiscriminantAnalysis = LinearDiscriminantAnalysis.customize_schema(
covariance_estimator={
"anyOf": [
{
"type": "string",
"forOptimizer": False,
},
{"enum": [None]},
],
"default": None,
"description": "type of (covariance estimator). Estimate the covariance matrices instead of relying on the empirical covariance estimator (with potential shrinkage)",
},
set_as_available=True,
)
LinearDiscriminantAnalysis = LinearDiscriminantAnalysis.customize_schema(
constraint={
"description": "covariance estimator is not supported with svd solver. Try another solver",
"anyOf": [
{
"type": "object",
"properties": {"solver": {"not": {"enum": ["svd"]}}},
},
{
"type": "object",
"properties": {"covariance_estimator": {"enum": [None]}},
},
],
},
set_as_available=True,
)
LinearDiscriminantAnalysis = LinearDiscriminantAnalysis.customize_schema(
constraint={
"description": "covariance_estimator and shrinkage parameters are not None. Only one of the two can be set.",
"anyOf": [
{"type": "object", "properties": {"solver": {"enum": ["svd", "lsqr"]}}},
{
"type": "object",
"properties": {"solver": {"not": {"enum": ["eigen"]}}},
},
{
"type": "object",
"properties": {"covariance_estimator": {"enum": [None]}},
},
{"type": "object", "properties": {"shrinkage": {"enum": [None, 0]}}},
],
},
set_as_available=True,
)
set_docstrings(LinearDiscriminantAnalysis)
| 12,053 | 36.551402 | 189 |
py
|
lale
|
lale-master/lale/lib/autogen/orthogonal_matching_pursuit.py
|
from numpy import inf, nan
from sklearn.linear_model import OrthogonalMatchingPursuit as Op
from lale.docstrings import set_docstrings
from lale.operators import make_operator
class _OrthogonalMatchingPursuitImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def predict(self, X):
return self._wrapped_model.predict(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for OrthogonalMatchingPursuit Orthogonal Matching Pursuit model (OMP)",
"allOf": [
{
"type": "object",
"required": [
"n_nonzero_coefs",
"tol",
"fit_intercept",
"normalize",
"precompute",
],
"relevantToOptimizer": [
"n_nonzero_coefs",
"tol",
"fit_intercept",
"normalize",
"precompute",
],
"additionalProperties": False,
"properties": {
"n_nonzero_coefs": {
"anyOf": [
{
"type": "integer",
"minimumForOptimizer": 500,
"maximumForOptimizer": 501,
"distribution": "uniform",
},
{"enum": [None]},
],
"default": None,
"description": "Desired number of non-zero entries in the solution",
},
"tol": {
"anyOf": [
{
"type": "number",
"minimumForOptimizer": 1e-08,
"maximumForOptimizer": 0.01,
},
{"enum": [None]},
],
"default": None,
"description": "Maximum norm of the residual",
},
"fit_intercept": {
"type": "boolean",
"default": True,
"description": "whether to calculate the intercept for this model",
},
"normalize": {
"type": "boolean",
"default": True,
"description": "This parameter is ignored when ``fit_intercept`` is set to False",
},
"precompute": {
"enum": [True, False, "auto"],
"default": "auto",
"description": "Whether to use a precomputed Gram and Xy matrix to speed up calculations",
},
},
}
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Fit the model using X, y as training data.",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Training data.",
},
"y": {
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
],
"description": "Target values",
},
},
}
_input_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Predict using the linear model",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"anyOf": [
{
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "array_like or sparse matrix, shape (n_samples, n_features)",
},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
],
"description": "Samples.",
}
},
}
_output_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Returns predicted values.",
"type": "array",
"items": {"type": "number"},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.linear_model.OrthogonalMatchingPursuit#sklearn-linear_model-orthogonalmatchingpursuit",
"import_from": "sklearn.linear_model",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "regressor"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
OrthogonalMatchingPursuit = make_operator(
_OrthogonalMatchingPursuitImpl, _combined_schemas
)
set_docstrings(OrthogonalMatchingPursuit)
| 5,485 | 33.2875 | 169 |
py
|
lale
|
lale-master/lale/lib/autogen/bayesian_ridge.py
|
from numpy import inf, nan
from packaging import version
from sklearn.linear_model import BayesianRidge as Op
import lale
from lale.docstrings import set_docstrings
from lale.operators import make_operator, sklearn_version
class _BayesianRidgeImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def predict(self, X):
return self._wrapped_model.predict(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for BayesianRidge Bayesian ridge regression",
"allOf": [
{
"type": "object",
"required": [
"n_iter",
"tol",
"alpha_1",
"alpha_2",
"lambda_1",
"lambda_2",
"compute_score",
"fit_intercept",
"normalize",
"copy_X",
"verbose",
],
"relevantToOptimizer": [
"n_iter",
"tol",
"compute_score",
"fit_intercept",
"normalize",
"copy_X",
],
"additionalProperties": False,
"properties": {
"n_iter": {
"type": "integer",
"minimumForOptimizer": 5,
"maximumForOptimizer": 1000,
"distribution": "uniform",
"default": 300,
"description": "Maximum number of iterations",
},
"tol": {
"type": "number",
"minimumForOptimizer": 1e-08,
"maximumForOptimizer": 0.01,
"default": 0.001,
"description": "Stop the algorithm if w has converged",
},
"alpha_1": {
"type": "number",
"default": 1e-06,
"description": "Hyper-parameter : shape parameter for the Gamma distribution prior over the alpha parameter",
},
"alpha_2": {
"type": "number",
"default": 1e-06,
"description": "Hyper-parameter : inverse scale parameter (rate parameter) for the Gamma distribution prior over the alpha parameter",
},
"lambda_1": {
"type": "number",
"default": 1e-06,
"description": "Hyper-parameter : shape parameter for the Gamma distribution prior over the lambda parameter",
},
"lambda_2": {
"type": "number",
"default": 1e-06,
"description": "Hyper-parameter : inverse scale parameter (rate parameter) for the Gamma distribution prior over the lambda parameter",
},
"compute_score": {
"type": "boolean",
"default": False,
"description": "If True, compute the objective function at each step of the model",
},
"fit_intercept": {
"type": "boolean",
"default": True,
"description": "whether to calculate the intercept for this model",
},
"normalize": {
"type": "boolean",
"default": False,
"description": "This parameter is ignored when ``fit_intercept`` is set to False",
},
"copy_X": {
"type": "boolean",
"default": True,
"description": "If True, X will be copied; else, it may be overwritten.",
},
"verbose": {
"type": "boolean",
"default": False,
"description": "Verbose mode when fitting the model.",
},
},
}
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Fit the model",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Training data",
},
"y": {
"type": "array",
"items": {"type": "number"},
"description": "Target values",
},
"sample_weight": {
"type": "array",
"items": {"type": "number"},
"description": "Individual weights for each sample ",
},
},
}
_input_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Predict using the linear model.",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Samples.",
},
"return_std": {
"anyOf": [{"type": "boolean"}, {"enum": [None]}],
"default": None,
"description": "Whether to return the standard deviation of posterior prediction.",
},
},
}
_output_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Predict using the linear model.",
"laleType": "Any",
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.linear_model.BayesianRidge#sklearn-linear_model-bayesianridge",
"import_from": "sklearn.linear_model",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "regressor"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
BayesianRidge = make_operator(_BayesianRidgeImpl, _combined_schemas)
if sklearn_version >= version.Version("1.2"):
# new: "https://scikit-learn.org/1.2/modules/generated/sklearn.linear_model.BayesianRidge#sklearn-linear_model-bayesianridge"
BayesianRidge = BayesianRidge.customize_schema(
normalize=None,
set_as_available=True,
)
set_docstrings(BayesianRidge)
| 6,809 | 34.842105 | 155 |
py
|
lale
|
lale-master/lale/lib/autogen/bernoulli_rbm.py
|
from numpy import inf, nan
from sklearn.neural_network import BernoulliRBM as Op
from lale.docstrings import set_docstrings
from lale.operators import make_operator
class _BernoulliRBMImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def transform(self, X):
return self._wrapped_model.transform(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for BernoulliRBM Bernoulli Restricted Boltzmann Machine (RBM).",
"allOf": [
{
"type": "object",
"required": [
"n_components",
"learning_rate",
"batch_size",
"n_iter",
"verbose",
"random_state",
],
"relevantToOptimizer": ["n_components", "batch_size", "n_iter"],
"additionalProperties": False,
"properties": {
"n_components": {
"type": "integer",
"minimumForOptimizer": 2,
"maximumForOptimizer": 256,
"distribution": "uniform",
"default": 256,
"description": "Number of binary hidden units.",
},
"learning_rate": {
"type": "number",
"default": 0.1,
"description": "The learning rate for weight updates",
},
"batch_size": {
"type": "integer",
"minimumForOptimizer": 3,
"maximumForOptimizer": 128,
"distribution": "uniform",
"default": 10,
"description": "Number of examples per minibatch.",
},
"n_iter": {
"type": "integer",
"minimumForOptimizer": 5,
"maximumForOptimizer": 1000,
"distribution": "uniform",
"default": 10,
"description": "Number of iterations/sweeps over the training dataset to perform during training.",
},
"verbose": {
"type": "integer",
"default": 0,
"description": "The verbosity level",
},
"random_state": {
"anyOf": [
{"type": "integer"},
{"laleType": "numpy.random.RandomState"},
],
"default": 33,
"description": "A random number generator instance to define the state of the random permutations generator",
},
},
}
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Fit the model to the data X.",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Training data.",
}
},
}
_input_transform_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Compute the hidden layer activation probabilities, P(h=1|v=X).",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "The data to be transformed.",
}
},
}
_output_transform_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Latent representations of the data.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.neural_network.BernoulliRBM#sklearn-neural_network-bernoullirbm",
"import_from": "sklearn.neural_network",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
BernoulliRBM = make_operator(_BernoulliRBMImpl, _combined_schemas)
set_docstrings(BernoulliRBM)
| 4,825 | 34.485294 | 147 |
py
|
lale
|
lale-master/lale/lib/autogen/bernoulli_nb.py
|
from numpy import inf, nan
from sklearn.naive_bayes import BernoulliNB as Op
from lale.docstrings import set_docstrings
from lale.operators import make_operator
class _BernoulliNBImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def predict(self, X):
return self._wrapped_model.predict(X)
def predict_proba(self, X):
return self._wrapped_model.predict_proba(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for BernoulliNB Naive Bayes classifier for multivariate Bernoulli models.",
"allOf": [
{
"type": "object",
"required": ["alpha", "binarize", "fit_prior", "class_prior"],
"relevantToOptimizer": ["alpha", "fit_prior", "binarize"],
"additionalProperties": False,
"properties": {
"alpha": {
"type": "number",
"minimumForOptimizer": 1e-10,
"maximumForOptimizer": 1.0,
"distribution": "loguniform",
"default": 1.0,
"description": "Additive (Laplace/Lidstone) smoothing parameter (0 for no smoothing).",
},
"binarize": {
"anyOf": [
{
"type": "number",
"minimumForOptimizer": -1.0,
"maximumForOptimizer": 1.0,
},
{"enum": [None]},
],
"default": 0.0,
"description": "Threshold for binarizing (mapping to booleans) of sample features",
},
"fit_prior": {
"type": "boolean",
"default": True,
"description": "Whether to learn class prior probabilities or not",
},
"class_prior": {
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"enum": [None]},
],
"default": None,
"description": "Prior probabilities of the classes",
},
},
},
{
"description": "Cannot binarize a sparse matrix with threshold < 0",
"anyOf": [
{"type": "object", "properties": {"binarize": {"enum": [None]}}},
{"type": "object", "laleNot": "X/isSparse"},
{
"type": "object",
"properties": {"binarize": {"type": "number", "minimum": 0}},
},
],
},
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Fit Naive Bayes classifier according to X, y",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Training vectors, where n_samples is the number of samples and n_features is the number of features.",
},
"y": {
"type": "array",
"items": {"type": "number"},
"description": "Target values.",
},
"sample_weight": {
"anyOf": [{"type": "array", "items": {"type": "number"}}, {"enum": [None]}],
"default": None,
"description": "Weights applied to individual samples (1",
},
},
}
_input_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Perform classification on an array of test vectors X.",
"type": "object",
"required": ["X"],
"properties": {
"X": {"type": "array", "items": {"type": "array", "items": {"type": "number"}}}
},
}
_output_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Predicted target values for X",
"type": "array",
"items": {"type": "number"},
}
_input_predict_proba_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Return probability estimates for the test vector X.",
"type": "object",
"required": ["X"],
"properties": {
"X": {"type": "array", "items": {"type": "array", "items": {"type": "number"}}}
},
}
_output_predict_proba_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Returns the probability of the samples for each class in the model",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.naive_bayes.BernoulliNB#sklearn-naive_bayes-bernoullinb",
"import_from": "sklearn.naive_bayes",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "classifier"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
"input_predict_proba": _input_predict_proba_schema,
"output_predict_proba": _output_predict_proba_schema,
},
}
BernoulliNB = make_operator(_BernoulliNBImpl, _combined_schemas)
set_docstrings(BernoulliNB)
| 5,832 | 36.152866 | 139 |
py
|
lale
|
lale-master/lale/lib/autogen/rbf_sampler.py
|
from numpy import inf, nan
from sklearn.kernel_approximation import RBFSampler as Op
from lale.docstrings import set_docstrings
from lale.operators import make_operator
class _RBFSamplerImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def transform(self, X):
return self._wrapped_model.transform(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for RBFSampler Approximates feature map of an RBF kernel by Monte Carlo approximation",
"allOf": [
{
"type": "object",
"required": ["gamma", "n_components", "random_state"],
"relevantToOptimizer": ["n_components"],
"additionalProperties": False,
"properties": {
"gamma": {
"type": "number",
"default": 1.0,
"description": "Parameter of RBF kernel: exp(-gamma * x^2)",
},
"n_components": {
"type": "integer",
"minimumForOptimizer": 2,
"maximumForOptimizer": 256,
"distribution": "uniform",
"default": 100,
"description": "Number of Monte Carlo samples per original feature",
},
"random_state": {
"anyOf": [
{"type": "integer"},
{"laleType": "numpy.random.RandomState"},
{"enum": [None]},
],
"default": None,
"description": "If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`.",
},
},
}
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Fit the model with X.",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Training data, where n_samples in the number of samples and n_features is the number of features.",
}
},
}
_input_transform_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Apply the approximate feature map to X.",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "New data, where n_samples in the number of samples and n_features is the number of features.",
}
},
}
_output_transform_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Apply the approximate feature map to X.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.kernel_approximation.RBFSampler#sklearn-kernel_approximation-rbfsampler",
"import_from": "sklearn.kernel_approximation",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
RBFSampler = make_operator(_RBFSamplerImpl, _combined_schemas)
set_docstrings(RBFSampler)
| 4,098 | 36.605505 | 263 |
py
|
lale
|
lale-master/lale/lib/autogen/label_propagation.py
|
from numpy import inf, nan
from sklearn.semi_supervised import LabelPropagation as Op
from lale.docstrings import set_docstrings
from lale.operators import make_operator
class _LabelPropagationImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def predict(self, X):
return self._wrapped_model.predict(X)
def predict_proba(self, X):
return self._wrapped_model.predict_proba(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for LabelPropagation Label Propagation classifier",
"allOf": [
{
"type": "object",
"required": [
"kernel",
"gamma",
"n_neighbors",
"max_iter",
"tol",
"n_jobs",
],
"relevantToOptimizer": [
"kernel",
"gamma",
"n_neighbors",
"max_iter",
"tol",
],
"additionalProperties": False,
"properties": {
"kernel": {
"anyOf": [
{"enum": ["knn", "rbf"]},
{"laleType": "callable", "forOptimizer": False},
],
"default": "rbf",
"description": "String identifier for kernel function to use or the kernel function itself",
},
"gamma": {
"type": "number",
"minimumForOptimizer": 0,
"maximumForOptimizer": 20,
"distribution": "uniform",
"default": 20,
"description": "Parameter for rbf kernel",
},
"n_neighbors": {
"XXX TODO XXX": "integer > 0",
"description": "Parameter for knn kernel",
"type": "integer",
"minimumForOptimizer": 5,
"maximumForOptimizer": 20,
"distribution": "uniform",
"default": 7,
},
"max_iter": {
"type": "integer",
"minimumForOptimizer": 10,
"maximumForOptimizer": 1000,
"distribution": "uniform",
"default": 1000,
"description": "Change maximum number of iterations allowed",
},
"tol": {
"type": "number",
"minimumForOptimizer": 1e-08,
"maximumForOptimizer": 0.01,
"default": 0.001,
"description": "Convergence tolerance: threshold to consider the system at steady state",
},
"n_jobs": {
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"default": 1,
"description": "The number of parallel jobs to run",
},
},
},
{
"XXX TODO XXX": "Parameter: kernel > only 'rbf' and 'knn' strings are valid inputs"
},
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Fit a semi-supervised label propagation model based",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "A {n_samples by n_samples} size matrix will be created from this",
},
"y": {
"type": "array",
"items": {"type": "number"},
"description": "n_labeled_samples (unlabeled points are marked as -1) All unlabeled samples will be transductively assigned labels",
},
},
}
_input_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Performs inductive inference across the model.",
"type": "object",
"required": ["X"],
"properties": {
"X": {"type": "array", "items": {"type": "array", "items": {"type": "number"}}}
},
}
_output_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Predictions for input data",
"type": "array",
"items": {"type": "number"},
}
_input_predict_proba_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Predict probability for each possible outcome.",
"type": "object",
"required": ["X"],
"properties": {
"X": {"type": "array", "items": {"type": "array", "items": {"type": "number"}}}
},
}
_output_predict_proba_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Normalized probability distributions across class labels",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.semi_supervised.LabelPropagation#sklearn-semi_supervised-labelpropagation",
"import_from": "sklearn.semi_supervised",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "classifier"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
"input_predict_proba": _input_predict_proba_schema,
"output_predict_proba": _output_predict_proba_schema,
},
}
LabelPropagation = make_operator(_LabelPropagationImpl, _combined_schemas)
set_docstrings(LabelPropagation)
| 6,156 | 35.431953 | 157 |
py
|
lale
|
lale-master/lale/lib/autogen/lasso_cv.py
|
from numpy import inf, nan
from packaging import version
from sklearn.linear_model import LassoCV as Op
import lale
from lale.docstrings import set_docstrings
from lale.operators import make_operator, sklearn_version
class _LassoCVImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def predict(self, X):
return self._wrapped_model.predict(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for LassoCV Lasso linear model with iterative fitting along a regularization path.",
"allOf": [
{
"type": "object",
"required": [
"eps",
"n_alphas",
"alphas",
"fit_intercept",
"normalize",
"precompute",
"max_iter",
"tol",
"copy_X",
"cv",
"verbose",
"n_jobs",
"positive",
"random_state",
"selection",
],
"relevantToOptimizer": [
"eps",
"n_alphas",
"fit_intercept",
"normalize",
"precompute",
"max_iter",
"tol",
"copy_X",
"cv",
"positive",
"selection",
],
"additionalProperties": False,
"properties": {
"eps": {
"type": "number",
"minimumForOptimizer": 0.001,
"maximumForOptimizer": 0.1,
"distribution": "loguniform",
"default": 0.001,
"description": "Length of the path",
},
"n_alphas": {
"type": "integer",
"minimumForOptimizer": 100,
"maximumForOptimizer": 101,
"distribution": "uniform",
"default": 100,
"description": "Number of alphas along the regularization path",
},
"alphas": {
"anyOf": [
{
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "numpy array, optional",
},
{"enum": [None]},
],
"default": None,
"description": "List of alphas where to compute the models",
},
"fit_intercept": {
"type": "boolean",
"default": True,
"description": "whether to calculate the intercept for this model",
},
"normalize": {
"type": "boolean",
"default": False,
"description": "This parameter is ignored when ``fit_intercept`` is set to False",
},
"precompute": {
"anyOf": [
{
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "True | False | 'auto' | array-like",
"forOptimizer": False,
},
{"enum": ["auto"]},
],
"default": "auto",
"description": "Whether to use a precomputed Gram matrix to speed up calculations",
},
"max_iter": {
"type": "integer",
"minimumForOptimizer": 10,
"maximumForOptimizer": 1000,
"distribution": "uniform",
"default": 1000,
"description": "The maximum number of iterations",
},
"tol": {
"type": "number",
"minimumForOptimizer": 1e-08,
"maximumForOptimizer": 0.01,
"default": 0.0001,
"description": "The tolerance for the optimization: if the updates are smaller than ``tol``, the optimization code checks the dual gap for optimality and continues until it is smaller than ``tol``.",
},
"copy_X": {
"type": "boolean",
"default": True,
"description": "If ``True``, X will be copied; else, it may be overwritten.",
},
"cv": {
"description": """Cross-validation as integer or as object that has a split function.
The fit method performs cross validation on the input dataset for per
trial, and uses the mean cross validation performance for optimization.
This behavior is also impacted by handle_cv_failure flag.
If integer: number of folds in sklearn.model_selection.StratifiedKFold.
If object with split function: generator yielding (train, test) splits
as arrays of indices. Can use any of the iterators from
https://scikit-learn.org/stable/modules/cross_validation.html#cross-validation-iterators.""",
"anyOf": [
{
"type": "integer",
"minimum": 1,
"default": 5,
"minimumForOptimizer": 3,
"maximumForOptimizer": 4,
"distribution": "uniform",
},
{"laleType": "Any", "forOptimizer": False},
],
},
"verbose": {
"anyOf": [{"type": "boolean"}, {"type": "integer"}],
"default": False,
"description": "Amount of verbosity.",
},
"n_jobs": {
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"default": 1,
"description": "Number of CPUs to use during the cross validation",
},
"positive": {
"type": "boolean",
"default": False,
"description": "If positive, restrict regression coefficients to be positive",
},
"random_state": {
"anyOf": [
{"type": "integer"},
{"laleType": "numpy.random.RandomState"},
{"enum": [None]},
],
"default": None,
"description": "The seed of the pseudo random number generator that selects a random feature to update",
},
"selection": {
"enum": ["random", "cyclic"],
"default": "cyclic",
"description": "If set to 'random', a random coefficient is updated every iteration rather than looping over features sequentially by default",
},
},
}
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Fit linear model with coordinate descent",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"laleType": "Any",
"XXX TODO XXX": "{array-like}, shape (n_samples, n_features)",
"description": "Training data",
},
"y": {
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
],
"description": "Target values",
},
},
}
_input_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Predict using the linear model",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"anyOf": [
{
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "array_like or sparse matrix, shape (n_samples, n_features)",
},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
],
"description": "Samples.",
}
},
}
_output_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Returns predicted values.",
"type": "array",
"items": {"type": "number"},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.linear_model.LassoCV#sklearn-linear_model-lassocv",
"import_from": "sklearn.linear_model",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "regressor"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
LassoCV = make_operator(_LassoCVImpl, _combined_schemas)
if sklearn_version >= version.Version("1.2"):
# new: "https://scikit-learn.org/1.2/modules/generated/sklearn.linear_model.LassoCV#sklearn-linear_model-lassocv"
LassoCV = LassoCV.customize_schema(
normalize=None,
set_as_available=True,
)
set_docstrings(LassoCV)
| 10,342 | 38.030189 | 219 |
py
|
lale
|
lale-master/lale/lib/autogen/pls_canonical.py
|
from numpy import inf, nan
from sklearn.cross_decomposition import PLSCanonical as Op
from lale.docstrings import set_docstrings
from lale.operators import make_operator
class _PLSCanonicalImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def transform(self, X):
return self._wrapped_model.transform(X)
def predict(self, X):
return self._wrapped_model.predict(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for PLSCanonical PLSCanonical implements the 2 blocks canonical PLS of the original Wold",
"allOf": [
{
"type": "object",
"required": [
"n_components",
"scale",
"algorithm",
"max_iter",
"tol",
"copy",
],
"relevantToOptimizer": [
"n_components",
"scale",
"algorithm",
"max_iter",
"tol",
"copy",
],
"additionalProperties": False,
"properties": {
"n_components": {
"type": "integer",
"minimumForOptimizer": 2,
"maximumForOptimizer": 256,
"distribution": "uniform",
"default": 2,
"description": "Number of components to keep",
},
"scale": {
"type": "boolean",
"default": True,
"description": "Option to scale data",
},
"algorithm": {
"XXX TODO XXX": 'string, "nipals" or "svd"',
"description": "The algorithm used to estimate the weights",
"enum": ["nipals", "svd"],
"default": "nipals",
},
"max_iter": {
"XXX TODO XXX": "an integer, (default 500)",
"description": 'the maximum number of iterations of the NIPALS inner loop (used only if algorithm="nipals")',
"type": "integer",
"minimumForOptimizer": 10,
"maximumForOptimizer": 1000,
"distribution": "uniform",
"default": 500,
},
"tol": {
"XXX TODO XXX": "non-negative real, default 1e-06",
"description": "the tolerance used in the iterative algorithm",
"type": "number",
"minimumForOptimizer": 1e-08,
"maximumForOptimizer": 0.01,
"default": 1e-06,
},
"copy": {
"type": "boolean",
"default": True,
"description": "Whether the deflation should be done on a copy",
},
},
},
{"XXX TODO XXX": 'Parameter: max_iter > only if algorithm="nipals")'},
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Fit model to data.",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Training vectors, where n_samples is the number of samples and n_features is the number of predictors.",
},
"Y": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Target vectors, where n_samples is the number of samples and n_targets is the number of response variables.",
},
},
}
_input_transform_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Apply the dimension reduction learned on the train data.",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Training vectors, where n_samples is the number of samples and n_features is the number of predictors.",
},
"Y": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Target vectors, where n_samples is the number of samples and n_targets is the number of response variables.",
},
"copy": {
"type": "boolean",
"default": True,
"description": "Whether to copy X and Y, or perform in-place normalization.",
},
},
}
_output_transform_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Apply the dimension reduction learned on the train data.",
"laleType": "Any",
"XXX TODO XXX": "x_scores if Y is not given, (x_scores, y_scores) otherwise.",
}
_input_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Apply the dimension reduction learned on the train data.",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Training vectors, where n_samples is the number of samples and n_features is the number of predictors.",
},
"copy": {
"type": "boolean",
"default": True,
"description": "Whether to copy X and Y, or perform in-place normalization.",
},
},
}
_output_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Apply the dimension reduction learned on the train data.",
"laleType": "Any",
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.cross_decomposition.PLSCanonical#sklearn-cross_decomposition-plscanonical",
"import_from": "sklearn.cross_decomposition",
"type": "object",
"tags": {"pre": [], "op": ["transformer", "estimator"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
PLSCanonical = make_operator(_PLSCanonicalImpl, _combined_schemas)
set_docstrings(PLSCanonical)
| 7,029 | 36.795699 | 157 |
py
|
lale
|
lale-master/lale/lib/autogen/lasso_lars.py
|
from numpy import inf, nan
from sklearn.linear_model import LassoLars as Op
from lale.docstrings import set_docstrings
from lale.operators import make_operator
class _LassoLarsImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def predict(self, X):
return self._wrapped_model.predict(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for LassoLars Lasso model fit with Least Angle Regression a.k.a. Lars",
"allOf": [
{
"type": "object",
"required": [
"alpha",
"fit_intercept",
"verbose",
"normalize",
"precompute",
"max_iter",
"eps",
"copy_X",
"fit_path",
"positive",
],
"relevantToOptimizer": [
"alpha",
"fit_intercept",
"normalize",
"precompute",
"max_iter",
"eps",
"copy_X",
"positive",
],
"additionalProperties": False,
"properties": {
"alpha": {
"type": "number",
"minimumForOptimizer": 1e-10,
"maximumForOptimizer": 1.0,
"distribution": "loguniform",
"default": 1.0,
"description": "Constant that multiplies the penalty term",
},
"fit_intercept": {
"type": "boolean",
"default": True,
"description": "whether to calculate the intercept for this model",
},
"verbose": {
"anyOf": [{"type": "boolean"}, {"type": "integer"}],
"default": False,
"description": "Sets the verbosity amount",
},
"normalize": {
"type": "boolean",
"default": True,
"description": "This parameter is ignored when ``fit_intercept`` is set to False",
},
"precompute": {
"anyOf": [
{
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "True | False | 'auto' | array-like",
"forOptimizer": False,
},
{"enum": ["auto"]},
],
"default": "auto",
"description": "Whether to use a precomputed Gram matrix to speed up calculations",
},
"max_iter": {
"type": "integer",
"minimumForOptimizer": 10,
"maximumForOptimizer": 1000,
"distribution": "uniform",
"default": 500,
"description": "Maximum number of iterations to perform.",
},
"eps": {
"type": "number",
"minimumForOptimizer": 0.001,
"maximumForOptimizer": 0.1,
"distribution": "loguniform",
"default": 2.220446049250313e-16,
"description": "The machine-precision regularization in the computation of the Cholesky diagonal factors",
},
"copy_X": {
"type": "boolean",
"default": True,
"description": "If True, X will be copied; else, it may be overwritten.",
},
"fit_path": {
"type": "boolean",
"default": True,
"description": "If ``True`` the full path is stored in the ``coef_path_`` attribute",
},
"positive": {
"type": "boolean",
"default": False,
"description": "Restrict coefficients to be >= 0",
},
},
},
{
"XXX TODO XXX": "Parameter: positive > only coefficients up to the smallest alpha value (alphas_[alphas_ > 0"
},
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Fit the model using X, y as training data.",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Training data.",
},
"y": {
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
],
"description": "Target values.",
},
"Xy": {
"laleType": "Any",
"XXX TODO XXX": "array-like, shape (n_samples,) or (n_samples, n_targets), optional",
"description": "Xy = np.dot(X.T, y) that can be precomputed",
},
},
}
_input_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Predict using the linear model",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"anyOf": [
{
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "array_like or sparse matrix, shape (n_samples, n_features)",
},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
],
"description": "Samples.",
}
},
}
_output_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Returns predicted values.",
"type": "array",
"items": {"type": "number"},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.linear_model.LassoLars#sklearn-linear_model-lassolars",
"import_from": "sklearn.linear_model",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "regressor"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
LassoLars = make_operator(_LassoLarsImpl, _combined_schemas)
set_docstrings(LassoLars)
| 7,285 | 35.248756 | 137 |
py
|
lale
|
lale-master/lale/lib/autogen/ridge_classifier_cv.py
|
from numpy import inf, nan
from packaging import version
from sklearn.linear_model import RidgeClassifierCV as Op
import lale
from lale.docstrings import set_docstrings
from lale.operators import make_operator, sklearn_version
class _RidgeClassifierCVImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def predict(self, X):
return self._wrapped_model.predict(X)
def decision_function(self, X):
return self._wrapped_model.decision_function(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for RidgeClassifierCV Ridge classifier with built-in cross-validation.",
"allOf": [
{
"type": "object",
"required": [
"alphas",
"fit_intercept",
"normalize",
"scoring",
"cv",
"class_weight",
"store_cv_values",
],
"relevantToOptimizer": [
"fit_intercept",
"normalize",
"scoring",
"cv",
"store_cv_values",
"class_weight",
],
"additionalProperties": False,
"properties": {
"alphas": {
"type": "array",
"items": {"type": "number"},
"default": [0.1, 1.0, 10.0],
"description": "Array of alpha values to try",
},
"fit_intercept": {
"type": "boolean",
"default": True,
"description": "Whether to calculate the intercept for this model",
},
"normalize": {
"type": "boolean",
"default": False,
"description": "This parameter is ignored when ``fit_intercept`` is set to False",
},
"scoring": {
"anyOf": [
{"laleType": "callable", "forOptimizer": False},
{"enum": ["accuracy", None]},
],
"default": None,
"description": "A string (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``.",
},
"cv": {
"description": """Cross-validation as integer or as object that has a split function.
The fit method performs cross validation on the input dataset for per
trial, and uses the mean cross validation performance for optimization.
This behavior is also impacted by handle_cv_failure flag.
If integer: number of folds in sklearn.model_selection.StratifiedKFold.
If object with split function: generator yielding (train, test) splits
as arrays of indices. Can use any of the iterators from
https://scikit-learn.org/stable/modules/cross_validation.html#cross-validation-iterators.""",
"anyOf": [
{
"type": "integer",
"minimum": 1,
"default": 5,
"minimumForOptimizer": 3,
"maximumForOptimizer": 4,
"distribution": "uniform",
},
{"laleType": "Any", "forOptimizer": False},
{"enum": [None]},
],
"default": None,
},
"class_weight": {
"XXX TODO XXX": "dict or 'balanced', optional",
"description": "Weights associated with classes in the form ``{class_label: weight}``",
"anyOf": [{"enum": ["balanced"]}, {"enum": [None]}],
"default": None,
},
"store_cv_values": {
"type": "boolean",
"default": False,
"description": "Flag indicating if the cross-validation values corresponding to each alpha should be stored in the ``cv_values_`` attribute (see below)",
},
},
},
{
"XXX TODO XXX": "Parameter: store_cv_values > only compatible with cv=none (i"
},
{
"description": "cv!=None and store_cv_values=True are incompatible",
"anyOf": [
{"type": "object", "properties": {"cv": {"enum": [None]}}},
{
"type": "object",
"properties": {"store_cv_values": {"enum": [False]}},
},
],
},
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Fit the ridge classifier.",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Training vectors, where n_samples is the number of samples and n_features is the number of features.",
},
"y": {
"type": "array",
"items": {"type": "number"},
"description": "Target values",
},
"sample_weight": {
"anyOf": [
{"type": "number"},
{"type": "array", "items": {"type": "number"}},
],
"description": "Sample weight.",
},
},
}
_input_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Predict class labels for samples in X.",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"anyOf": [
{
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "array_like or sparse matrix, shape (n_samples, n_features)",
},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
],
"description": "Samples.",
}
},
}
_output_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Predicted class label per sample.",
"type": "array",
"items": {"type": "number"},
}
_input_decision_function_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Predict confidence scores for samples.",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"anyOf": [
{
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "array_like or sparse matrix, shape (n_samples, n_features)",
},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
],
"description": "Samples.",
}
},
}
_output_decision_function_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Confidence scores per (sample, class) combination",
"laleType": "Any",
"XXX TODO XXX": "array, shape=(n_samples,) if n_classes == 2 else (n_samples, n_classes)",
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.linear_model.RidgeClassifierCV#sklearn-linear_model-ridgeclassifiercv",
"import_from": "sklearn.linear_model",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "classifier"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
"input_decision_function": _input_decision_function_schema,
"output_decision_function": _output_decision_function_schema,
},
}
RidgeClassifierCV = make_operator(_RidgeClassifierCVImpl, _combined_schemas)
if sklearn_version >= version.Version("1.2"):
# old: https://scikit-learn.org/1.1/modules/generated/sklearn.linear_model.RidgeCV.html
# new: https://scikit-learn.org/1.2/modules/generated/sklearn.linear_model.RidgeCV.html
RidgeClassifierCV = RidgeClassifierCV.customize_schema(
normalize=None,
set_as_available=True,
)
set_docstrings(RidgeClassifierCV)
| 9,203 | 37.672269 | 173 |
py
|
lale
|
lale-master/lale/lib/autogen/mlp_regressor.py
|
from numpy import inf, nan
from sklearn.neural_network import MLPRegressor as Op
from lale.docstrings import set_docstrings
from lale.operators import make_operator
class _MLPRegressorImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def predict(self, X):
return self._wrapped_model.predict(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for MLPRegressor Multi-layer Perceptron regressor.",
"allOf": [
{
"type": "object",
"required": [
"hidden_layer_sizes",
"activation",
"solver",
"alpha",
"batch_size",
"learning_rate",
"learning_rate_init",
"power_t",
"max_iter",
"shuffle",
"random_state",
"tol",
"verbose",
"warm_start",
"momentum",
"nesterovs_momentum",
"early_stopping",
"validation_fraction",
"beta_1",
"beta_2",
"epsilon",
"n_iter_no_change",
],
"relevantToOptimizer": [
"activation",
"solver",
"alpha",
"batch_size",
"learning_rate",
"max_iter",
"shuffle",
"tol",
"nesterovs_momentum",
"epsilon",
],
"additionalProperties": False,
"properties": {
"hidden_layer_sizes": {
"XXX TODO XXX": "tuple, length = n_layers - 2, default (100,)",
"description": "The ith element represents the number of neurons in the ith hidden layer.",
"type": "array",
"laleType": "tuple",
"default": (100,),
},
"activation": {
"enum": ["identity", "logistic", "tanh", "relu"],
"default": "relu",
"description": "Activation function for the hidden layer",
},
"solver": {
"enum": ["lbfgs", "sgd", "adam"],
"default": "adam",
"description": "The solver for weight optimization",
},
"alpha": {
"type": "number",
"minimumForOptimizer": 1e-10,
"maximumForOptimizer": 1.0,
"distribution": "loguniform",
"default": 0.0001,
"description": "L2 penalty (regularization term) parameter.",
},
"batch_size": {
"anyOf": [
{
"type": "integer",
"minimumForOptimizer": 3,
"maximumForOptimizer": 128,
"distribution": "uniform",
},
{"enum": ["auto"]},
],
"default": "auto",
"description": "Size of minibatches for stochastic optimizers",
},
"learning_rate": {
"enum": ["constant", "invscaling", "adaptive"],
"default": "constant",
"description": "Learning rate schedule for weight updates",
},
"learning_rate_init": {
"type": "number",
"default": 0.001,
"description": "The initial learning rate used",
},
"power_t": {
"type": "number",
"default": 0.5,
"description": "The exponent for inverse scaling learning rate",
},
"max_iter": {
"type": "integer",
"minimumForOptimizer": 10,
"maximumForOptimizer": 1000,
"distribution": "uniform",
"default": 200,
"description": "Maximum number of iterations",
},
"shuffle": {
"type": "boolean",
"default": True,
"description": "Whether to shuffle samples in each iteration",
},
"random_state": {
"anyOf": [
{"type": "integer"},
{"laleType": "numpy.random.RandomState"},
{"enum": [None]},
],
"default": None,
"description": "If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`.",
},
"tol": {
"type": "number",
"minimumForOptimizer": 1e-08,
"maximumForOptimizer": 0.01,
"default": 0.0001,
"description": "Tolerance for the optimization",
},
"verbose": {
"type": "boolean",
"default": False,
"description": "Whether to print progress messages to stdout.",
},
"warm_start": {
"type": "boolean",
"default": False,
"description": "When set to True, reuse the solution of the previous call to fit as initialization, otherwise, just erase the previous solution",
},
"momentum": {
"type": "number",
"default": 0.9,
"description": "Momentum for gradient descent update",
},
"nesterovs_momentum": {
"type": "boolean",
"default": True,
"description": "Whether to use Nesterov's momentum",
},
"early_stopping": {
"type": "boolean",
"default": False,
"description": "Whether to use early stopping to terminate training when validation score is not improving",
},
"validation_fraction": {
"type": "number",
"default": 0.1,
"description": "The proportion of training data to set aside as validation set for early stopping",
},
"beta_1": {
"type": "number",
"default": 0.9,
"description": "Exponential decay rate for estimates of first moment vector in adam, should be in [0, 1)",
},
"beta_2": {
"type": "number",
"default": 0.999,
"description": "Exponential decay rate for estimates of second moment vector in adam, should be in [0, 1)",
},
"epsilon": {
"type": "number",
"minimumForOptimizer": 1e-08,
"maximumForOptimizer": 1.35,
"distribution": "loguniform",
"default": 1e-08,
"description": "Value for numerical stability in adam",
},
"n_iter_no_change": {
"type": "integer",
"default": 10,
"description": "Maximum number of epochs to not meet ``tol`` improvement",
},
},
},
{
"description": "learning_rate, only used when solver='sgd'",
"anyOf": [
{
"type": "object",
"properties": {"learning_rate": {"enum": ["constant"]}},
},
{"type": "object", "properties": {"solver": {"enum": ["sgd"]}}},
],
},
{
"description": "learning_rate_init, only used when solver='sgd' or 'adam'",
"anyOf": [
{
"type": "object",
"properties": {"learning_rate_init": {"enum": [0.001]}},
},
{"type": "object", "properties": {"solver": {"enum": ["sgd", "adam"]}}},
],
},
{
"description": "power_t, only used when solver='sgd'",
"anyOf": [
{"type": "object", "properties": {"power_t": {"enum": [0.5]}}},
{"type": "object", "properties": {"solver": {"enum": ["sgd"]}}},
],
},
{
"description": "shuffle, only used when solver='sgd' or 'adam'",
"anyOf": [
{"type": "object", "properties": {"shuffle": {"enum": [True]}}},
{"type": "object", "properties": {"solver": {"enum": ["sgd", "adam"]}}},
],
},
{
"description": "momentum, only used when solver='sgd'",
"anyOf": [
{"type": "object", "properties": {"momentum": {"enum": [0.9]}}},
{"type": "object", "properties": {"solver": {"enum": ["sgd"]}}},
],
},
{
"XXX TODO XXX": "Parameter: nesterovs_momentum > only used when solver='sgd' and momentum > 0"
},
{
"description": "early_stopping, only effective when solver='sgd' or 'adam'",
"anyOf": [
{"type": "object", "properties": {"early_stopping": {"enum": [False]}}},
{"type": "object", "properties": {"solver": {"enum": ["sgd", "adam"]}}},
],
},
{
"description": "validation_fraction, only used if early_stopping is true",
"anyOf": [
{
"type": "object",
"properties": {"validation_fraction": {"enum": [0.1]}},
},
{"type": "object", "properties": {"early_stopping": {"enum": [True]}}},
],
},
{
"description": "beta_1, only used when solver='adam'",
"anyOf": [
{"type": "object", "properties": {"beta_1": {"enum": [0.9]}}},
{"type": "object", "properties": {"solver": {"enum": ["adam"]}}},
],
},
{
"description": "beta_2, only used when solver='adam'",
"anyOf": [
{"type": "object", "properties": {"beta_2": {"enum": [0.999]}}},
{"type": "object", "properties": {"solver": {"enum": ["adam"]}}},
],
},
{
"description": "epsilon, only used when solver='adam'",
"anyOf": [
{"type": "object", "properties": {"epsilon": {"enum": [1e-08]}}},
{"type": "object", "properties": {"solver": {"enum": ["adam"]}}},
],
},
{
"description": "n_iter_no_change, only effective when solver='sgd' or 'adam' ",
"anyOf": [
{"type": "object", "properties": {"n_iter_no_change": {"enum": [10]}}},
{"type": "object", "properties": {"solver": {"enum": ["sgd", "adam"]}}},
],
},
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Fit the model to data matrix X and target(s) y.",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"anyOf": [
{
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "array-like or sparse matrix, shape (n_samples, n_features)",
},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
],
"description": "The input data.",
},
"y": {
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
],
"description": "The target values (class labels in classification, real numbers in regression).",
},
},
}
_input_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Predict using the multi-layer perceptron model.",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "The input data.",
}
},
}
_output_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "The predicted values.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.neural_network.MLPRegressor#sklearn-neural_network-mlpregressor",
"import_from": "sklearn.neural_network",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "regressor"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
MLPRegressor = make_operator(_MLPRegressorImpl, _combined_schemas)
set_docstrings(MLPRegressor)
| 14,379 | 38.505495 | 263 |
py
|
lale
|
lale-master/lale/lib/autogen/huber_regressor.py
|
from numpy import inf, nan
from sklearn.linear_model import HuberRegressor as Op
from lale.docstrings import set_docstrings
from lale.operators import make_operator
class _HuberRegressorImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def predict(self, X):
return self._wrapped_model.predict(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for HuberRegressor Linear regression model that is robust to outliers.",
"allOf": [
{
"type": "object",
"required": [
"epsilon",
"max_iter",
"alpha",
"warm_start",
"fit_intercept",
"tol",
],
"relevantToOptimizer": [
"epsilon",
"max_iter",
"alpha",
"fit_intercept",
"tol",
],
"additionalProperties": False,
"properties": {
"epsilon": {
"XXX TODO XXX": "float, greater than 1.0, default 1.35",
"description": "The parameter epsilon controls the number of samples that should be classified as outliers",
"type": "number",
"minimumForOptimizer": 1.0,
"maximumForOptimizer": 2.0,
"distribution": "uniform",
"default": 1.35,
},
"max_iter": {
"type": "integer",
"minimumForOptimizer": 10,
"maximumForOptimizer": 1000,
"distribution": "uniform",
"default": 100,
"description": "Maximum number of iterations that scipy.optimize.fmin_l_bfgs_b should run for.",
},
"alpha": {
"type": "number",
"minimumForOptimizer": 1e-10,
"maximumForOptimizer": 1.0,
"distribution": "loguniform",
"default": 0.0001,
"description": "Regularization parameter.",
},
"warm_start": {
"type": "boolean",
"default": False,
"description": "This is useful if the stored attributes of a previously used model has to be reused",
},
"fit_intercept": {
"type": "boolean",
"default": True,
"description": "Whether or not to fit the intercept",
},
"tol": {
"type": "number",
"minimumForOptimizer": 1e-08,
"maximumForOptimizer": 0.01,
"default": 1e-05,
"description": "The iteration will stop when ``max{|proj g_i | i = 1, ..., n}`` <= ``tol`` where pg_i is the i-th component of the projected gradient.",
},
},
}
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Fit the model according to the given training data.",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Training vector, where n_samples in the number of samples and n_features is the number of features.",
},
"y": {
"type": "array",
"items": {"type": "number"},
"description": "Target vector relative to X.",
},
"sample_weight": {
"type": "array",
"items": {"type": "number"},
"description": "Weight given to each sample.",
},
},
}
_input_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Predict using the linear model",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"anyOf": [
{
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "array_like or sparse matrix, shape (n_samples, n_features)",
},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
],
"description": "Samples.",
}
},
}
_output_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Returns predicted values.",
"type": "array",
"items": {"type": "number"},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.linear_model.HuberRegressor#sklearn-linear_model-huberregressor",
"import_from": "sklearn.linear_model",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "regressor"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
HuberRegressor = make_operator(_HuberRegressorImpl, _combined_schemas)
set_docstrings(HuberRegressor)
| 5,833 | 35.236025 | 172 |
py
|
lale
|
lale-master/lale/lib/autogen/lasso_lars_cv.py
|
from numpy import inf, nan
from sklearn.linear_model import LassoLarsCV as Op
from lale.docstrings import set_docstrings
from lale.operators import make_operator
class _LassoLarsCVImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def predict(self, X):
return self._wrapped_model.predict(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for LassoLarsCV Cross-validated Lasso, using the LARS algorithm.",
"allOf": [
{
"type": "object",
"required": [
"fit_intercept",
"verbose",
"max_iter",
"normalize",
"precompute",
"cv",
"max_n_alphas",
"n_jobs",
"eps",
"copy_X",
"positive",
],
"relevantToOptimizer": [
"fit_intercept",
"max_iter",
"normalize",
"precompute",
"cv",
"max_n_alphas",
"eps",
"copy_X",
"positive",
],
"additionalProperties": False,
"properties": {
"fit_intercept": {
"type": "boolean",
"default": True,
"description": "whether to calculate the intercept for this model",
},
"verbose": {
"anyOf": [{"type": "boolean"}, {"type": "integer"}],
"default": False,
"description": "Sets the verbosity amount",
},
"max_iter": {
"type": "integer",
"minimumForOptimizer": 10,
"maximumForOptimizer": 1000,
"distribution": "uniform",
"default": 500,
"description": "Maximum number of iterations to perform.",
},
"normalize": {
"type": "boolean",
"default": True,
"description": "This parameter is ignored when ``fit_intercept`` is set to False",
},
"precompute": {
"enum": [True, False, "auto"],
"default": "auto",
"description": "Whether to use a precomputed Gram matrix to speed up calculations",
},
"cv": {
"description": """Cross-validation as integer or as object that has a split function.
The fit method performs cross validation on the input dataset for per
trial, and uses the mean cross validation performance for optimization.
This behavior is also impacted by handle_cv_failure flag.
If integer: number of folds in sklearn.model_selection.StratifiedKFold.
If object with split function: generator yielding (train, test) splits
as arrays of indices. Can use any of the iterators from
https://scikit-learn.org/stable/modules/cross_validation.html#cross-validation-iterators.""",
"anyOf": [
{
"type": "integer",
"minimum": 1,
"default": 5,
"minimumForOptimizer": 3,
"maximumForOptimizer": 4,
"distribution": "uniform",
},
{"laleType": "Any", "forOptimizer": False},
],
},
"max_n_alphas": {
"type": "integer",
"minimumForOptimizer": 1000,
"maximumForOptimizer": 1001,
"distribution": "uniform",
"default": 1000,
"description": "The maximum number of points on the path used to compute the residuals in the cross-validation",
},
"n_jobs": {
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"default": 1,
"description": "Number of CPUs to use during the cross validation",
},
"eps": {
"type": "number",
"minimumForOptimizer": 0.001,
"maximumForOptimizer": 0.1,
"distribution": "loguniform",
"default": 2.220446049250313e-16,
"description": "The machine-precision regularization in the computation of the Cholesky diagonal factors",
},
"copy_X": {
"type": "boolean",
"default": True,
"description": "If True, X will be copied; else, it may be overwritten.",
},
"positive": {
"type": "boolean",
"default": False,
"description": "Restrict coefficients to be >= 0",
},
},
},
{"XXX TODO XXX": "Parameter: precompute > only subsets of x"},
{
"XXX TODO XXX": "Parameter: positive > only coefficients up to the smallest alpha value (alphas_[alphas_ > 0"
},
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Fit the model using X, y as training data.",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Training data.",
},
"y": {
"type": "array",
"items": {"type": "number"},
"description": "Target values.",
},
},
}
_input_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Predict using the linear model",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"anyOf": [
{
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "array_like or sparse matrix, shape (n_samples, n_features)",
},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
],
"description": "Samples.",
}
},
}
_output_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Returns predicted values.",
"type": "array",
"items": {"type": "number"},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.linear_model.LassoLarsCV#sklearn-linear_model-lassolarscv",
"import_from": "sklearn.linear_model",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "regressor"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
LassoLarsCV = make_operator(_LassoLarsCVImpl, _combined_schemas)
set_docstrings(LassoLarsCV)
| 7,973 | 37.521739 | 141 |
py
|
lale
|
lale-master/lale/lib/autogen/ransac_regressor.py
|
from numpy import inf, nan
from packaging import version
from sklearn.linear_model import RANSACRegressor as Op
import lale
from lale.docstrings import set_docstrings
from lale.operators import make_operator, sklearn_version
class _RANSACRegressorImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def predict(self, X):
return self._wrapped_model.predict(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for RANSACRegressor RANSAC (RANdom SAmple Consensus) algorithm.",
"allOf": [
{
"type": "object",
"required": [
"base_estimator",
"min_samples",
"residual_threshold",
"is_data_valid",
"is_model_valid",
"max_trials",
"max_skips",
"stop_n_inliers",
"stop_score",
"stop_probability",
"loss",
"random_state",
],
"relevantToOptimizer": [
"min_samples",
"max_trials",
"max_skips",
"stop_n_inliers",
"loss",
],
"additionalProperties": False,
"properties": {
"base_estimator": {
"anyOf": [{"type": "object"}, {"enum": [None]}],
"default": None,
"description": "Base estimator object which implements the following methods: * `fit(X, y)`: Fit model to given training data and target values",
},
"min_samples": {
"XXX TODO XXX": "int (>= 1) or float ([0, 1]), optional",
"description": "Minimum number of samples chosen randomly from original data",
"anyOf": [
{
"type": "number",
"minimumForOptimizer": 0.0,
"maximumForOptimizer": 1.0,
"distribution": "uniform",
},
{"enum": [None]},
],
"default": None,
},
"residual_threshold": {
"anyOf": [{"type": "number"}, {"enum": [None]}],
"default": None,
"description": "Maximum residual for a data sample to be classified as an inlier",
},
"is_data_valid": {
"anyOf": [{"laleType": "callable"}, {"enum": [None]}],
"default": None,
"description": "This function is called with the randomly selected data before the model is fitted to it: `is_data_valid(X, y)`",
},
"is_model_valid": {
"anyOf": [{"laleType": "callable"}, {"enum": [None]}],
"default": None,
"description": "This function is called with the estimated model and the randomly selected data: `is_model_valid(model, X, y)`",
},
"max_trials": {
"type": "integer",
"minimumForOptimizer": 100,
"maximumForOptimizer": 101,
"distribution": "uniform",
"default": 100,
"description": "Maximum number of iterations for random sample selection.",
},
"max_skips": {
"anyOf": [
{"type": "integer", "forOptimizer": False},
{
"type": "number",
"minimumForOptimizer": 0.0,
"maximumForOptimizer": 1.0,
"distribution": "uniform",
},
],
"default": inf,
"description": "Maximum number of iterations that can be skipped due to finding zero inliers or invalid data defined by ``is_data_valid`` or invalid models defined by ``is_model_valid``",
},
"stop_n_inliers": {
"anyOf": [
{"type": "integer", "forOptimizer": False},
{
"type": "number",
"minimumForOptimizer": 0.0,
"maximumForOptimizer": 1.0,
"distribution": "uniform",
},
],
"default": inf,
"description": "Stop iteration if at least this number of inliers are found.",
},
"stop_score": {
"type": "number",
"default": inf,
"description": "Stop iteration if score is greater equal than this threshold.",
},
"stop_probability": {
"XXX TODO XXX": "float in range [0, 1], optional",
"description": "RANSAC iteration stops if at least one outlier-free set of the training data is sampled in RANSAC",
"type": "number",
"default": 0.99,
},
"loss": {
"anyOf": [
{"laleType": "callable", "forOptimizer": False},
{
"enum": [
"absolute_loss",
"squared_loss",
]
},
],
"default": "absolute_loss",
"description": 'String inputs, "absolute_loss" and "squared_loss" are supported which find the absolute loss and squared loss per sample respectively',
},
"random_state": {
"anyOf": [
{"type": "integer"},
{"laleType": "numpy.random.RandomState"},
{"enum": [None]},
],
"default": None,
"description": "The generator used to initialize the centers",
},
},
},
{
"XXX TODO XXX": "Parameter: base_estimator > only supports regression estimators"
},
{
"XXX TODO XXX": "Parameter: is_model_valid > only be used if the estimated model is needed for making the rejection decision"
},
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Fit estimator using RANSAC algorithm.",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"anyOf": [
{
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "array-like or sparse matrix, shape [n_samples, n_features]",
},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
],
"description": "Training data.",
},
"y": {
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
],
"description": "Target values.",
},
"sample_weight": {
"type": "array",
"items": {"type": "number"},
"description": "Individual weights for each sample raises error if sample_weight is passed and base_estimator fit method does not support it.",
},
},
}
_input_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Predict using the estimated model.",
"type": "object",
"required": ["X"],
"properties": {
"X": {"type": "array", "items": {"type": "array", "items": {"type": "number"}}}
},
}
_output_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Returns predicted values.",
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "array", "items": {"type": "number"}}},
],
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.linear_model.RANSACRegressor#sklearn-linear_model-ransacregressor",
"import_from": "sklearn.linear_model",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "regressor"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
RANSACRegressor = make_operator(_RANSACRegressorImpl, _combined_schemas)
if sklearn_version >= version.Version("1.2"):
# new: "https://scikit-learn.org/1.2/modules/generated/sklearn.linear_model.RANSACRegressor#sklearn-linear_model-ransacregressor"
RANSACRegressor = RANSACRegressor.customize_schema(
loss={
"anyOf": [
{"laleType": "callable", "forOptimizer": False},
{
"enum": [
"absolute_error",
"squared_error",
]
},
],
"default": "absolute_error",
"description": 'String inputs, "absolute_error" and "squared_error" are supported which find the absolute error and squared error per sample respectively',
},
max_skips={
"anyOf": [
{"type": "integer", "forOptimizer": False},
{"enum": [inf]},
],
"default": inf,
"description": "Maximum number of iterations that can be skipped due to finding zero inliers or invalid data defined by ``is_data_valid`` or invalid models defined by ``is_model_valid``",
},
stop_n_inliers={
"anyOf": [
{"type": "integer", "forOptimizer": False},
{"enum": [inf]},
],
"default": inf,
"description": "Stop iteration if at least this number of inliers are found.",
},
set_as_available=True,
)
set_docstrings(RANSACRegressor)
| 11,063 | 39.527473 | 207 |
py
|
lale
|
lale-master/lale/lib/autogen/sparse_random_projection.py
|
from numpy import inf, nan
from sklearn.random_projection import SparseRandomProjection as Op
from lale.docstrings import set_docstrings
from lale.operators import make_operator
class _SparseRandomProjectionImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def transform(self, X):
return self._wrapped_model.transform(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for SparseRandomProjection Reduce dimensionality through sparse random projection",
"allOf": [
{
"type": "object",
"required": [
"n_components",
"density",
"eps",
"dense_output",
"random_state",
],
"relevantToOptimizer": ["n_components", "eps", "dense_output", "density"],
"additionalProperties": False,
"properties": {
"n_components": {
"XXX TODO XXX": "int or 'auto', optional (default = 'auto')",
"description": "Dimensionality of the target projection space",
"anyOf": [
{
"type": "integer",
"minimumForOptimizer": 2,
"maximumForOptimizer": 256,
"distribution": "uniform",
},
{"enum": ["auto"]},
],
"default": "auto",
},
"density": {
"XXX TODO XXX": "float in range ]0, 1], optional (default='auto')",
"description": "Ratio of non-zero component in the random projection matrix",
"anyOf": [
{"enum": ["auto"]},
{
"type": "number",
"minimumForOptimizer": 0,
"maximumForOptimizer": 1,
"distribution": "uniform",
},
],
"default": "auto",
},
"eps": {
"XXX TODO XXX": "strictly positive float, optional, (default=0.1)",
"description": "Parameter to control the quality of the embedding according to the Johnson-Lindenstrauss lemma when n_components is set to 'auto'",
"type": "number",
"minimumForOptimizer": 0.001,
"maximumForOptimizer": 0.1,
"distribution": "loguniform",
"default": 0.1,
},
"dense_output": {
"type": "boolean",
"default": False,
"description": "If True, ensure that the output of the random projection is a dense numpy array even if the input and random projection matrix are both sparse",
},
"random_state": {
"anyOf": [
{"type": "integer"},
{"laleType": "numpy.random.RandomState"},
{"enum": [None]},
],
"default": None,
"description": "Control the pseudo random number generator used to generate the matrix at fit time",
},
},
}
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Generate a sparse random projection matrix",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"anyOf": [
{
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "numpy array or scipy.sparse of shape [n_samples, n_features]",
},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
],
"description": "Training set: only the shape is used to find optimal random matrix dimensions based on the theory referenced in the afore mentioned papers.",
},
"y": {"laleType": "Any", "XXX TODO XXX": "", "description": "Ignored"},
},
}
_input_transform_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Project the data by using matrix product with the random matrix",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"anyOf": [
{
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "numpy array or scipy.sparse of shape [n_samples, n_features]",
},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
],
"description": "The input data to project into a smaller dimensional space.",
}
},
}
_output_transform_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Projected array.",
"anyOf": [
{
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "numpy array or scipy sparse of shape [n_samples, n_components]",
},
{"type": "array", "items": {"type": "array", "items": {"type": "number"}}},
],
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.random_projection.SparseRandomProjection#sklearn-random_projection-sparserandomprojection",
"import_from": "sklearn.random_projection",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
SparseRandomProjection = make_operator(_SparseRandomProjectionImpl, _combined_schemas)
set_docstrings(SparseRandomProjection)
| 6,749 | 38.940828 | 180 |
py
|
lale
|
lale-master/lale/lib/autogen/birch.py
|
from numpy import inf, nan
from sklearn.cluster import Birch as Op
from lale.docstrings import set_docstrings
from lale.operators import make_operator
class _BirchImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def transform(self, X):
return self._wrapped_model.transform(X)
def predict(self, X):
return self._wrapped_model.predict(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for Birch Implements the Birch clustering algorithm.",
"allOf": [
{
"type": "object",
"required": [
"threshold",
"branching_factor",
"n_clusters",
"compute_labels",
"copy",
],
"relevantToOptimizer": [
"branching_factor",
"n_clusters",
"compute_labels",
"copy",
],
"additionalProperties": False,
"properties": {
"threshold": {
"type": "number",
"default": 0.5,
"description": "The radius of the subcluster obtained by merging a new sample and the closest subcluster should be lesser than the threshold",
},
"branching_factor": {
"type": "integer",
"minimumForOptimizer": 50,
"maximumForOptimizer": 51,
"distribution": "uniform",
"default": 50,
"description": "Maximum number of CF subclusters in each node",
},
"n_clusters": {
"XXX TODO XXX": "int, instance of sklearn.cluster model, default 3",
"description": "Number of clusters after the final clustering step, which treats the subclusters from the leaves as new samples",
"type": "integer",
"minimumForOptimizer": 2,
"maximumForOptimizer": 8,
"distribution": "uniform",
"default": 3,
},
"compute_labels": {
"type": "boolean",
"default": True,
"description": "Whether or not to compute labels for each fit.",
},
"copy": {
"type": "boolean",
"default": True,
"description": "Whether or not to make a copy of the given data",
},
},
}
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Build a CF Tree for the input data.",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Input data.",
},
"y": {},
},
}
_input_transform_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Transform X into subcluster centroids dimension.",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Input data.",
}
},
}
_output_transform_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Transformed data.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
_input_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Predict data using the ``centroids_`` of subclusters.",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Input data.",
}
},
}
_output_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Labelled data.",
"laleType": "Any",
"XXX TODO XXX": "ndarray, shape(n_samples)",
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.cluster.Birch#sklearn-cluster-birch",
"import_from": "sklearn.cluster",
"type": "object",
"tags": {"pre": [], "op": ["transformer", "estimator"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
Birch = make_operator(_BirchImpl, _combined_schemas)
set_docstrings(Birch)
| 5,312 | 33.277419 | 162 |
py
|
lale
|
lale-master/lale/lib/autogen/power_transformer.py
|
from numpy import inf, nan
from sklearn.preprocessing import PowerTransformer as Op
from lale.docstrings import set_docstrings
from lale.operators import make_operator
class _PowerTransformerImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def transform(self, X):
return self._wrapped_model.transform(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for PowerTransformer Apply a power transform featurewise to make data more Gaussian-like.",
"allOf": [
{
"type": "object",
"required": ["method", "standardize", "copy"],
"relevantToOptimizer": ["method", "standardize"],
"additionalProperties": False,
"properties": {
"method": {
"enum": ["yeo-johnson", "box-cox"],
"default": "yeo-johnson",
"description": "The power transform method",
},
"standardize": {
"type": "boolean",
"default": True,
"description": "Set to True to apply zero-mean, unit-variance normalization to the transformed output.",
},
"copy": {
"type": "boolean",
"default": True,
"description": "Set to False to perform inplace computation during transformation.",
},
},
},
{
"XXX TODO XXX": "Parameter: method > only works with strictly positive values"
},
{
"description": "FA sparse matrix was passed, but dense data is required. Use X.toarray() to convert to a dense numpy array.",
"type": "object",
"laleNot": "X/isSparse",
},
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Estimate the optimal parameter lambda for each feature.",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "The data used to estimate the optimal transformation parameters.",
},
"y": {},
},
}
_input_transform_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Apply the power transform to each feature using the fitted lambdas.",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "The data to be transformed using a power transformation.",
}
},
}
_output_transform_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "The transformed data.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.preprocessing.PowerTransformer#sklearn-preprocessing-powertransformer",
"import_from": "sklearn.preprocessing",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
PowerTransformer = make_operator(_PowerTransformerImpl, _combined_schemas)
set_docstrings(PowerTransformer)
| 4,027 | 35.288288 | 153 |
py
|
lale
|
lale-master/lale/lib/autogen/orthogonal_matching_pursuit_cv.py
|
from numpy import inf, nan
from sklearn.linear_model import OrthogonalMatchingPursuitCV as Op
from lale.docstrings import set_docstrings
from lale.operators import make_operator
class _OrthogonalMatchingPursuitCVImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def predict(self, X):
return self._wrapped_model.predict(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for OrthogonalMatchingPursuitCV Cross-validated Orthogonal Matching Pursuit model (OMP).",
"allOf": [
{
"type": "object",
"required": [
"copy",
"fit_intercept",
"normalize",
"max_iter",
"cv",
"n_jobs",
"verbose",
],
"relevantToOptimizer": [
"copy",
"fit_intercept",
"normalize",
"max_iter",
"cv",
],
"additionalProperties": False,
"properties": {
"copy": {
"type": "boolean",
"default": True,
"description": "Whether the design matrix X must be copied by the algorithm",
},
"fit_intercept": {
"type": "boolean",
"default": True,
"description": "whether to calculate the intercept for this model",
},
"normalize": {
"type": "boolean",
"default": True,
"description": "This parameter is ignored when ``fit_intercept`` is set to False",
},
"max_iter": {
"anyOf": [
{
"type": "integer",
"minimumForOptimizer": 10,
"maximumForOptimizer": 1000,
"distribution": "uniform",
},
{"enum": [None]},
],
"default": None,
"description": "Maximum numbers of iterations to perform, therefore maximum features to include",
},
"cv": {
"description": """Cross-validation as integer or as object that has a split function.
The fit method performs cross validation on the input dataset for per
trial, and uses the mean cross validation performance for optimization.
This behavior is also impacted by handle_cv_failure flag.
If integer: number of folds in sklearn.model_selection.StratifiedKFold.
If object with split function: generator yielding (train, test) splits
as arrays of indices. Can use any of the iterators from
https://scikit-learn.org/stable/modules/cross_validation.html#cross-validation-iterators.""",
"anyOf": [
{
"type": "integer",
"minimum": 1,
"default": 5,
"minimumForOptimizer": 3,
"maximumForOptimizer": 4,
"distribution": "uniform",
},
{"laleType": "Any", "forOptimizer": False},
],
},
"n_jobs": {
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"default": 1,
"description": "Number of CPUs to use during the cross validation",
},
"verbose": {
"anyOf": [{"type": "boolean"}, {"type": "integer"}],
"default": False,
"description": "Sets the verbosity amount",
},
},
},
{
"XXX TODO XXX": "Parameter: copy > only helpful if x is already fortran-ordered, otherwise a copy is made anyway"
},
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Fit the model using X, y as training data.",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Training data.",
},
"y": {
"type": "array",
"items": {"type": "number"},
"description": "Target values",
},
},
}
_input_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Predict using the linear model",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"anyOf": [
{
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "array_like or sparse matrix, shape (n_samples, n_features)",
},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
],
"description": "Samples.",
}
},
}
_output_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Returns predicted values.",
"type": "array",
"items": {"type": "number"},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.linear_model.OrthogonalMatchingPursuitCV#sklearn-linear_model-orthogonalmatchingpursuitcv",
"import_from": "sklearn.linear_model",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "regressor"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
OrthogonalMatchingPursuitCV = make_operator(
_OrthogonalMatchingPursuitCVImpl, _combined_schemas
)
set_docstrings(OrthogonalMatchingPursuitCV)
| 6,780 | 36.882682 | 173 |
py
|
lale
|
lale-master/lale/lib/autogen/complement_nb.py
|
from numpy import inf, nan
from sklearn.naive_bayes import ComplementNB as Op
from lale.docstrings import set_docstrings
from lale.operators import make_operator
class _ComplementNBImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def predict(self, X):
return self._wrapped_model.predict(X)
def predict_proba(self, X):
return self._wrapped_model.predict_proba(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for ComplementNB The Complement Naive Bayes classifier described in Rennie et al. (2003).",
"allOf": [
{
"type": "object",
"required": ["alpha", "fit_prior", "class_prior", "norm"],
"relevantToOptimizer": [],
"additionalProperties": False,
"properties": {
"alpha": {
"type": "number",
"default": 1.0,
"description": "Additive (Laplace/Lidstone) smoothing parameter (0 for no smoothing).",
},
"fit_prior": {
"type": "boolean",
"default": True,
"description": "Only used in edge case with a single class in the training set.",
},
"class_prior": {
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"enum": [None]},
],
"default": None,
"description": "Prior probabilities of the classes",
},
"norm": {
"type": "boolean",
"default": False,
"description": "Whether or not a second normalization of the weights is performed",
},
},
},
{
"XXX TODO XXX": "Parameter: fit_prior > only used in edge case with a single class in the training set"
},
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Fit Naive Bayes classifier according to X, y",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Training vectors, where n_samples is the number of samples and n_features is the number of features.",
},
"y": {
"type": "array",
"items": {"type": "number"},
"description": "Target values.",
},
"sample_weight": {
"anyOf": [{"type": "array", "items": {"type": "number"}}, {"enum": [None]}],
"default": None,
"description": "Weights applied to individual samples (1",
},
},
}
_input_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Perform classification on an array of test vectors X.",
"type": "object",
"required": ["X"],
"properties": {
"X": {"type": "array", "items": {"type": "array", "items": {"type": "number"}}}
},
}
_output_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Predicted target values for X",
"type": "array",
"items": {"type": "number"},
}
_input_predict_proba_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Return probability estimates for the test vector X.",
"type": "object",
"required": ["X"],
"properties": {
"X": {"type": "array", "items": {"type": "array", "items": {"type": "number"}}}
},
}
_output_predict_proba_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Returns the probability of the samples for each class in the model",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.naive_bayes.ComplementNB#sklearn-naive_bayes-complementnb",
"import_from": "sklearn.naive_bayes",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "classifier"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
"input_predict_proba": _input_predict_proba_schema,
"output_predict_proba": _output_predict_proba_schema,
},
}
ComplementNB = make_operator(_ComplementNBImpl, _combined_schemas)
set_docstrings(ComplementNB)
| 5,111 | 35.776978 | 141 |
py
|
lale
|
lale-master/lale/lib/autogen/lasso.py
|
from numpy import inf, nan
from packaging import version
from sklearn.linear_model import Lasso as Op
import lale
from lale.docstrings import set_docstrings
from lale.operators import make_operator, sklearn_version
class _LassoImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def predict(self, X):
return self._wrapped_model.predict(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for Lasso Linear Model trained with L1 prior as regularizer (aka the Lasso)",
"allOf": [
{
"type": "object",
"required": [
"alpha",
"fit_intercept",
"normalize",
"precompute",
"copy_X",
"max_iter",
"tol",
"warm_start",
"positive",
"random_state",
"selection",
],
"relevantToOptimizer": [
"alpha",
"fit_intercept",
"normalize",
"copy_X",
"max_iter",
"tol",
"positive",
"selection",
],
"additionalProperties": False,
"properties": {
"alpha": {
"type": "number",
"minimumForOptimizer": 1e-10,
"maximumForOptimizer": 1.0,
"distribution": "loguniform",
"default": 1.0,
"description": "Constant that multiplies the L1 term",
},
"fit_intercept": {
"type": "boolean",
"default": True,
"description": "Whether to calculate the intercept for this model",
},
"normalize": {
"type": "boolean",
"default": False,
"description": "This parameter is ignored when ``fit_intercept`` is set to False",
},
"precompute": {
"anyOf": [
{
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "True | False | array-like, default=False",
},
{"type": "boolean"},
],
"default": False,
"description": "Whether to use a precomputed Gram matrix to speed up calculations",
},
"copy_X": {
"type": "boolean",
"default": True,
"description": "If ``True``, X will be copied; else, it may be overwritten.",
},
"max_iter": {
"type": "integer",
"minimumForOptimizer": 10,
"maximumForOptimizer": 1000,
"distribution": "uniform",
"default": 1000,
"description": "The maximum number of iterations",
},
"tol": {
"type": "number",
"minimumForOptimizer": 1e-08,
"maximumForOptimizer": 0.01,
"default": 0.0001,
"description": "The tolerance for the optimization: if the updates are smaller than ``tol``, the optimization code checks the dual gap for optimality and continues until it is smaller than ``tol``.",
},
"warm_start": {
"type": "boolean",
"default": False,
"description": "When set to True, reuse the solution of the previous call to fit as initialization, otherwise, just erase the previous solution",
},
"positive": {
"type": "boolean",
"default": False,
"description": "When set to ``True``, forces the coefficients to be positive.",
},
"random_state": {
"anyOf": [
{"type": "integer"},
{"laleType": "numpy.random.RandomState"},
{"enum": [None]},
],
"default": None,
"description": "The seed of the pseudo random number generator that selects a random feature to update",
},
"selection": {
"enum": ["random", "cyclic"],
"default": "cyclic",
"description": "If set to 'random', a random coefficient is updated every iteration rather than looping over features sequentially by default",
},
},
}
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Fit model with coordinate descent.",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"laleType": "Any",
"XXX TODO XXX": "ndarray or scipy.sparse matrix, (n_samples, n_features)",
"description": "Data",
},
"y": {
"laleType": "Any",
"XXX TODO XXX": "ndarray, shape (n_samples,) or (n_samples, n_targets)",
"description": "Target",
},
"check_input": {
"type": "boolean",
"default": True,
"description": "Allow to bypass several input checking",
},
},
}
_input_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Predict using the linear model",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"anyOf": [
{
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "array_like or sparse matrix, shape (n_samples, n_features)",
},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
],
"description": "Samples.",
}
},
}
_output_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Returns predicted values.",
"type": "array",
"items": {"type": "number"},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.linear_model.Lasso#sklearn-linear_model-lasso",
"import_from": "sklearn.linear_model",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "regressor"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
Lasso = make_operator(_LassoImpl, _combined_schemas)
if sklearn_version >= version.Version("1.2"):
# new: "https://scikit-learn.org/1.2/modules/generated/sklearn.linear_model.Lasso#sklearn-linear_model-lasso"
Lasso = Lasso.customize_schema(
normalize=None,
set_as_available=True,
)
set_docstrings(Lasso)
| 7,821 | 36.07109 | 219 |
py
|
lale
|
lale-master/lale/lib/autogen/radius_neighbors_regressor.py
|
from numpy import inf, nan
from sklearn.neighbors import RadiusNeighborsRegressor as Op
from lale.docstrings import set_docstrings
from lale.operators import make_operator
class _RadiusNeighborsRegressorImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def predict(self, X):
return self._wrapped_model.predict(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for RadiusNeighborsRegressor Regression based on neighbors within a fixed radius.",
"allOf": [
{
"type": "object",
"required": [
"radius",
"weights",
"algorithm",
"leaf_size",
"p",
"metric",
"metric_params",
"n_jobs",
],
"relevantToOptimizer": ["weights", "algorithm", "leaf_size", "p", "metric"],
"additionalProperties": False,
"properties": {
"radius": {
"type": "number",
"default": 1.0,
"description": "Range of parameter space to use by default for :meth:`radius_neighbors` queries.",
},
"weights": {
"anyOf": [
{"laleType": "callable", "forOptimizer": False},
{"enum": ["distance", "uniform"]},
],
"default": "uniform",
"description": "weight function used in prediction",
},
"algorithm": {
"enum": ["auto", "ball_tree", "kd_tree", "brute"],
"default": "auto",
"description": "Algorithm used to compute the nearest neighbors: - 'ball_tree' will use :class:`BallTree` - 'kd_tree' will use :class:`KDTree` - 'brute' will use a brute-force search",
},
"leaf_size": {
"type": "integer",
"minimumForOptimizer": 30,
"maximumForOptimizer": 31,
"distribution": "uniform",
"default": 30,
"description": "Leaf size passed to BallTree or KDTree",
},
"p": {
"type": "integer",
"minimumForOptimizer": 1,
"maximumForOptimizer": 3,
"distribution": "uniform",
"default": 2,
"description": "Power parameter for the Minkowski metric",
},
"metric": {
"anyOf": [
{"laleType": "callable", "forOptimizer": False},
{
"enum": [
"euclidean",
"manhattan",
"minkowski",
"precomputed",
]
},
],
"default": "minkowski",
"description": "the distance metric to use for the tree",
},
"metric_params": {
"anyOf": [{"type": "object"}, {"enum": [None]}],
"default": None,
"description": "Additional keyword arguments for the metric function.",
},
"n_jobs": {
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"default": None,
"description": "The number of parallel jobs to run for neighbors search",
},
},
}
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Fit the model using X as training data and y as target values",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "{array-like, sparse matrix, BallTree, KDTree}",
"description": "Training data",
},
"y": {
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "{array-like, sparse matrix}",
"description": "Target values, array of float values, shape = [n_samples] or [n_samples, n_outputs]",
},
},
}
_input_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Predict the target for the provided data",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"laleType": "Any",
"XXX TODO XXX": "array-like, shape (n_query, n_features), or (n_query, n_indexed) if metric == 'precomputed'",
"description": "Test samples.",
}
},
}
_output_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Target values",
"laleType": "Any",
"XXX TODO XXX": "array of float, shape = [n_samples] or [n_samples, n_outputs]",
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.neighbors.RadiusNeighborsRegressor#sklearn-neighbors-radiusneighborsregressor",
"import_from": "sklearn.neighbors",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "regressor"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
RadiusNeighborsRegressor = make_operator(
_RadiusNeighborsRegressorImpl, _combined_schemas
)
set_docstrings(RadiusNeighborsRegressor)
| 6,332 | 37.615854 | 217 |
py
|
lale
|
lale-master/lale/lib/autogen/gaussian_process_regressor.py
|
from numpy import inf, nan
from sklearn.gaussian_process import GaussianProcessRegressor as Op
from lale.docstrings import set_docstrings
from lale.operators import make_operator
class _GaussianProcessRegressorImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def predict(self, X):
return self._wrapped_model.predict(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for GaussianProcessRegressor Gaussian process regression (GPR).",
"allOf": [
{
"type": "object",
"required": [
"kernel",
"alpha",
"optimizer",
"n_restarts_optimizer",
"normalize_y",
"copy_X_train",
"random_state",
],
"relevantToOptimizer": [
"alpha",
"optimizer",
"n_restarts_optimizer",
"normalize_y",
],
"additionalProperties": False,
"properties": {
"kernel": {
"XXX TODO XXX": "kernel object",
"description": "The kernel specifying the covariance function of the GP",
"enum": [None],
"default": None,
},
"alpha": {
"anyOf": [
{
"type": "number",
"minimumForOptimizer": 1e-10,
"maximumForOptimizer": 1.0,
"distribution": "loguniform",
},
{
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "float or array-like, optional (default: 1e-10)",
"forOptimizer": False,
},
],
"default": 1e-10,
"description": "Value added to the diagonal of the kernel matrix during fitting",
},
"optimizer": {
"anyOf": [
{"laleType": "callable", "forOptimizer": False},
{"enum": ["fmin_l_bfgs_b"]},
],
"default": "fmin_l_bfgs_b",
"description": "Can either be one of the internally supported optimizers for optimizing the kernel's parameters, specified by a string, or an externally defined optimizer passed as a callable",
},
"n_restarts_optimizer": {
"type": "integer",
"minimumForOptimizer": 0,
"maximumForOptimizer": 1,
"distribution": "uniform",
"default": 0,
"description": "The number of restarts of the optimizer for finding the kernel's parameters which maximize the log-marginal likelihood",
},
"normalize_y": {
"type": "boolean",
"default": False,
"description": "Whether the target values y are normalized, i.e., the mean of the observed target values become zero",
},
"copy_X_train": {
"type": "boolean",
"default": True,
"description": "If True, a persistent copy of the training data is stored in the object",
},
"random_state": {
"anyOf": [
{"type": "integer"},
{"laleType": "numpy.random.RandomState"},
{"enum": [None]},
],
"default": None,
"description": "The generator used to initialize the centers",
},
},
}
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Fit Gaussian process regression model.",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Training data",
},
"y": {
"laleType": "Any",
"XXX TODO XXX": "array-like, shape = (n_samples, [n_output_dims])",
"description": "Target values",
},
},
}
_input_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Predict using the Gaussian process regression model",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Query points where the GP is evaluated",
},
"return_std": {
"type": "boolean",
"default": False,
"description": "If True, the standard-deviation of the predictive distribution at the query points is returned along with the mean.",
},
"return_cov": {
"type": "boolean",
"default": False,
"description": "If True, the covariance of the joint predictive distribution at the query points is returned along with the mean",
},
},
}
_output_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Predict using the Gaussian process regression model",
"laleType": "Any",
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.gaussian_process.GaussianProcessRegressor#sklearn-gaussian_process-gaussianprocessregressor",
"import_from": "sklearn.gaussian_process",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "regressor"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
GaussianProcessRegressor = make_operator(
_GaussianProcessRegressorImpl, _combined_schemas
)
set_docstrings(GaussianProcessRegressor)
| 6,782 | 37.76 | 213 |
py
|
lale
|
lale-master/lale/lib/autogen/skewed_chi2_sampler.py
|
from numpy import inf, nan
from sklearn.kernel_approximation import SkewedChi2Sampler as Op
from lale.docstrings import set_docstrings
from lale.operators import make_operator
class _SkewedChi2SamplerImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def transform(self, X):
return self._wrapped_model.transform(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": 'inherited docstring for SkewedChi2Sampler Approximates feature map of the "skewed chi-squared" kernel by Monte',
"allOf": [
{
"type": "object",
"required": ["skewedness", "n_components", "random_state"],
"relevantToOptimizer": ["n_components"],
"additionalProperties": False,
"properties": {
"skewedness": {
"type": "number",
"default": 1.0,
"description": '"skewedness" parameter of the kernel',
},
"n_components": {
"type": "integer",
"minimumForOptimizer": 2,
"maximumForOptimizer": 256,
"distribution": "uniform",
"default": 100,
"description": "number of Monte Carlo samples per original feature",
},
"random_state": {
"anyOf": [
{"type": "integer"},
{"laleType": "numpy.random.RandomState"},
{"enum": [None]},
],
"default": None,
"description": "If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`.",
},
},
},
{
"description": "A sparse matrix was passed, but dense data is required. Use X.toarray() to convert to a dense numpy array.",
"type": "object",
"laleNot": "X/isSparse",
},
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Fit the model with X.",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Training data, where n_samples in the number of samples and n_features is the number of features.",
}
},
}
_input_transform_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Apply the approximate feature map to X.",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "New data, where n_samples in the number of samples and n_features is the number of features",
}
},
}
_output_transform_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Apply the approximate feature map to X.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.kernel_approximation.SkewedChi2Sampler#sklearn-kernel_approximation-skewedchi2sampler",
"import_from": "sklearn.kernel_approximation",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
SkewedChi2Sampler = make_operator(_SkewedChi2SamplerImpl, _combined_schemas)
set_docstrings(SkewedChi2Sampler)
| 4,381 | 37.438596 | 263 |
py
|
lale
|
lale-master/lale/lib/autogen/latent_dirichlet_allocation.py
|
from numpy import inf, nan
from sklearn.decomposition import LatentDirichletAllocation as Op
from lale.docstrings import set_docstrings
from lale.operators import make_operator
class _LatentDirichletAllocationImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def transform(self, X):
return self._wrapped_model.transform(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for LatentDirichletAllocation Latent Dirichlet Allocation with online variational Bayes algorithm",
"allOf": [
{
"type": "object",
"required": [
"n_components",
"doc_topic_prior",
"topic_word_prior",
"learning_method",
"learning_decay",
"learning_offset",
"max_iter",
"batch_size",
"evaluate_every",
"total_samples",
"perp_tol",
"mean_change_tol",
"max_doc_update_iter",
"n_jobs",
"verbose",
"random_state",
],
"relevantToOptimizer": [
"n_components",
"max_iter",
"batch_size",
"evaluate_every",
"total_samples",
"max_doc_update_iter",
"learning_method",
],
"additionalProperties": False,
"properties": {
"n_components": {
"type": "integer",
"minimun": 1,
"laleMaximum": "X/items/maxItems",
"minimumForOptimizer": 2,
"maximumForOptimizer": 256,
"distribution": "uniform",
"default": 10,
"description": "Number of topics.",
},
"doc_topic_prior": {
"anyOf": [{"type": "number"}, {"enum": [None]}],
"default": None,
"description": "Prior of document topic distribution `theta`",
},
"topic_word_prior": {
"anyOf": [{"type": "number"}, {"enum": [None]}],
"default": None,
"description": "Prior of topic word distribution `beta`",
},
"learning_method": {
"enum": ["batch", "online"],
"default": "batch",
"description": "Method used to update `_component`",
},
"learning_decay": {
"type": "number",
"default": 0.7,
"description": "It is a parameter that control learning rate in the online learning method",
},
"learning_offset": {
"type": "number",
"default": 10.0,
"description": "A (positive) parameter that downweights early iterations in online learning",
},
"max_iter": {
"type": "integer",
"minimumForOptimizer": 10,
"maximumForOptimizer": 1000,
"distribution": "uniform",
"default": 10,
"description": "The maximum number of iterations.",
},
"batch_size": {
"type": "integer",
"minimumForOptimizer": 3,
"maximumForOptimizer": 128,
"distribution": "uniform",
"default": 128,
"description": "Number of documents to use in each EM iteration",
},
"evaluate_every": {
"type": "integer",
"minimumForOptimizer": (-1),
"maximumForOptimizer": 0,
"distribution": "uniform",
"default": (-1),
"description": "How often to evaluate perplexity",
},
"total_samples": {
"anyOf": [
{"type": "integer", "forOptimizer": False},
{
"type": "number",
"minimumForOptimizer": 0.0,
"maximumForOptimizer": 1.0,
"distribution": "uniform",
},
],
"default": 1000000.0,
"description": "Total number of documents",
},
"perp_tol": {
"type": "number",
"default": 0.1,
"description": "Perplexity tolerance in batch learning",
},
"mean_change_tol": {
"type": "number",
"default": 0.001,
"description": "Stopping tolerance for updating document topic distribution in E-step.",
},
"max_doc_update_iter": {
"type": "integer",
"minimumForOptimizer": 100,
"maximumForOptimizer": 101,
"distribution": "uniform",
"default": 100,
"description": "Max number of iterations for updating document topic distribution in the E-step.",
},
"n_jobs": {
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"default": 1,
"description": "The number of jobs to use in the E-step",
},
"verbose": {
"type": "integer",
"default": 0,
"description": "Verbosity level.",
},
"random_state": {
"anyOf": [
{"type": "integer"},
{"laleType": "numpy.random.RandomState"},
{"enum": [None]},
],
"default": None,
"description": "If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`.",
},
},
},
{
"XXX TODO XXX": "Parameter: perp_tol > only used when evaluate_every is greater than 0"
},
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Learn model for the data X with variational Bayes method.",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"anyOf": [
{
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "array-like or sparse matrix, shape=(n_samples, n_features)",
},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
],
"description": "Document word matrix.",
},
"y": {},
},
}
_input_transform_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Transform data X according to the fitted model.",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"anyOf": [
{
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "array-like or sparse matrix, shape=(n_samples, n_features)",
},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
],
"description": "Document word matrix.",
}
},
}
_output_transform_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Document topic distribution for X.",
"laleType": "Any",
"XXX TODO XXX": "shape=(n_samples, n_components)",
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.decomposition.LatentDirichletAllocation#sklearn-decomposition-latentdirichletallocation",
"import_from": "sklearn.decomposition",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
LatentDirichletAllocation = make_operator(
_LatentDirichletAllocationImpl, _combined_schemas
)
set_docstrings(LatentDirichletAllocation)
| 9,504 | 37.795918 | 263 |
py
|
lale
|
lale-master/lale/lib/autogen/multi_task_elastic_net_cv.py
|
from numpy import inf, nan
from packaging import version
from sklearn.linear_model import MultiTaskElasticNetCV as Op
import lale.operators
from lale.docstrings import set_docstrings
from lale.operators import make_operator
class _MultiTaskElasticNetCVImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def predict(self, X):
return self._wrapped_model.predict(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for MultiTaskElasticNetCV Multi-task L1/L2 ElasticNet with built-in cross-validation.",
"allOf": [
{
"type": "object",
"required": [
"l1_ratio",
"eps",
"n_alphas",
"alphas",
"fit_intercept",
"normalize",
"max_iter",
"tol",
"cv",
"copy_X",
"verbose",
"n_jobs",
"random_state",
"selection",
],
"relevantToOptimizer": [
"eps",
"n_alphas",
"fit_intercept",
"normalize",
"max_iter",
"tol",
"cv",
"copy_X",
],
"additionalProperties": False,
"properties": {
"l1_ratio": {
"XXX TODO XXX": "float or array of floats",
"description": "The ElasticNet mixing parameter, with 0 < l1_ratio <= 1",
"type": "number",
"default": 0.5,
},
"eps": {
"type": "number",
"minimumForOptimizer": 0.001,
"maximumForOptimizer": 0.1,
"distribution": "loguniform",
"default": 0.001,
"description": "Length of the path",
},
"n_alphas": {
"type": "integer",
"minimumForOptimizer": 100,
"maximumForOptimizer": 101,
"distribution": "uniform",
"default": 100,
"description": "Number of alphas along the regularization path",
},
"alphas": {
"anyOf": [
{
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "array-like, optional",
},
{"enum": [None]},
],
"default": None,
"description": "List of alphas where to compute the models",
},
"fit_intercept": {
"type": "boolean",
"default": True,
"description": "whether to calculate the intercept for this model",
},
"normalize": {
"type": "boolean",
"default": False,
"description": "This parameter is ignored when ``fit_intercept`` is set to False",
},
"max_iter": {
"type": "integer",
"minimumForOptimizer": 10,
"maximumForOptimizer": 1000,
"distribution": "uniform",
"default": 1000,
"description": "The maximum number of iterations",
},
"tol": {
"type": "number",
"minimumForOptimizer": 1e-08,
"maximumForOptimizer": 0.01,
"default": 0.0001,
"description": "The tolerance for the optimization: if the updates are smaller than ``tol``, the optimization code checks the dual gap for optimality and continues until it is smaller than ``tol``.",
},
"cv": {
"description": """Cross-validation as integer or as object that has a split function.
The fit method performs cross validation on the input dataset for per
trial, and uses the mean cross validation performance for optimization.
This behavior is also impacted by handle_cv_failure flag.
If integer: number of folds in sklearn.model_selection.StratifiedKFold.
If object with split function: generator yielding (train, test) splits
as arrays of indices. Can use any of the iterators from
https://scikit-learn.org/stable/modules/cross_validation.html#cross-validation-iterators.""",
"anyOf": [
{
"type": "integer",
"minimum": 1,
"default": 5,
"minimumForOptimizer": 3,
"maximumForOptimizer": 4,
"distribution": "uniform",
},
{"laleType": "Any", "forOptimizer": False},
],
},
"copy_X": {
"type": "boolean",
"default": True,
"description": "If ``True``, X will be copied; else, it may be overwritten.",
},
"verbose": {
"anyOf": [{"type": "boolean"}, {"type": "integer"}],
"default": 0,
"description": "Amount of verbosity.",
},
"n_jobs": {
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"default": 1,
"description": "Number of CPUs to use during the cross validation",
},
"random_state": {
"anyOf": [
{"type": "integer"},
{"laleType": "numpy.random.RandomState"},
{"enum": [None]},
],
"default": None,
"description": "The seed of the pseudo random number generator that selects a random feature to update",
},
"selection": {
"type": "string",
"default": "cyclic",
"description": "If set to 'random', a random coefficient is updated every iteration rather than looping over features sequentially by default",
},
},
},
{
"XXX TODO XXX": "Parameter: n_jobs > only if multiple values for l1_ratio are given"
},
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Fit linear model with coordinate descent",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"laleType": "Any",
"XXX TODO XXX": "{array-like}, shape (n_samples, n_features)",
"description": "Training data",
},
"y": {
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
],
"description": "Target values",
},
},
}
_input_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Predict using the linear model",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"anyOf": [
{
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "array_like or sparse matrix, shape (n_samples, n_features)",
},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
],
"description": "Samples.",
}
},
}
_output_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Returns predicted values.",
"type": "array",
"items": {"type": "number"},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.linear_model.MultiTaskElasticNetCV#sklearn-linear_model-multitaskelasticnetcv",
"import_from": "sklearn.linear_model",
"type": "object",
"tags": {"pre": [], "op": ["estimator"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
MultiTaskElasticNetCV = make_operator(_MultiTaskElasticNetCVImpl, _combined_schemas)
if lale.operators.sklearn_version >= version.Version("1.2"):
# old: https://scikit-learn.org/1.1/modules/generated/sklearn.linear_model.MultiTaskElasticNetCV.html
# new: https://scikit-learn.org/1.2/modules/generated/sklearn.linear_model.MultiTaskElasticNetCV.html
MultiTaskElasticNetCV = MultiTaskElasticNetCV.customize_schema(normalize=None)
set_docstrings(MultiTaskElasticNetCV)
| 9,944 | 38.939759 | 219 |
py
|
lale
|
lale-master/lale/lib/autogen/kernel_ridge.py
|
from numpy import inf, nan
from packaging import version
from sklearn.kernel_ridge import KernelRidge as Op
import lale
from lale.docstrings import set_docstrings
from lale.operators import make_operator, sklearn_version
class _KernelRidgeImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def predict(self, X):
return self._wrapped_model.predict(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for KernelRidge Kernel ridge regression.",
"allOf": [
{
"type": "object",
"required": [
"alpha",
"kernel",
"gamma",
"degree",
"coef0",
"kernel_params",
],
"relevantToOptimizer": ["alpha", "kernel", "degree", "coef0"],
"additionalProperties": False,
"properties": {
"alpha": {
"XXX TODO XXX": "{float, array-like}, shape = [n_targets]",
"description": "Small positive values of alpha improve the conditioning of the problem and reduce the variance of the estimates",
"type": "integer",
"minimumForOptimizer": 1,
"maximumForOptimizer": 2,
"distribution": "uniform",
"default": 1,
},
"kernel": {
"anyOf": [
{"laleType": "callable", "forOptimizer": False},
{"enum": ["linear", "poly", "precomputed", "rbf", "sigmoid"]},
],
"default": "linear",
"description": "Kernel mapping used internally",
},
"gamma": {
"anyOf": [{"type": "number"}, {"enum": [None]}],
"default": None,
"description": "Gamma parameter for the RBF, laplacian, polynomial, exponential chi2 and sigmoid kernels",
},
"degree": {
"type": "number",
"minimumForOptimizer": 0.0,
"maximumForOptimizer": 1.0,
"distribution": "uniform",
"default": 3,
"description": "Degree of the polynomial kernel",
},
"coef0": {
"type": "number",
"minimumForOptimizer": 0.0,
"maximumForOptimizer": 1.0,
"distribution": "uniform",
"default": 1,
"description": "Zero coefficient for polynomial and sigmoid kernels",
},
"kernel_params": {
"XXX TODO XXX": "mapping of string to any, optional",
"description": "Additional parameters (keyword arguments) for kernel function passed as callable object.",
"enum": [None],
"default": None,
},
},
}
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Fit Kernel Ridge regression model",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Training data",
},
"y": {
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
],
"description": "Target values",
},
"sample_weight": {
"anyOf": [
{"type": "number"},
{"type": "array", "items": {"type": "number"}},
],
"description": "Individual weights for each sample, ignored if None is passed.",
},
},
}
_input_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Predict using the kernel ridge model",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Samples",
}
},
}
_output_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Returns predicted values.",
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "array", "items": {"type": "number"}}},
],
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.kernel_ridge.KernelRidge#sklearn-kernel_ridge-kernelridge",
"import_from": "sklearn.kernel_ridge",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "regressor"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
KernelRidge = make_operator(_KernelRidgeImpl, _combined_schemas)
if sklearn_version >= version.Version("1.2"):
# new: "https://scikit-learn.org/1.2/modules/generated/sklearn.kernel_ridge.KernelRidge#sklearn-kernel_ridge-kernelridge",
KernelRidge = KernelRidge.customize_schema(
degree={
"type": "integer",
"minimumForOptimizer": 0,
"maximumForOptimizer": 100,
"distribution": "uniform",
"default": 3,
"description": "Degree of the polynomial kernel",
},
set_as_available=True,
)
set_docstrings(KernelRidge)
| 6,295 | 35.183908 | 149 |
py
|
lale
|
lale-master/lale/lib/autogen/nu_svc.py
|
from numpy import inf, nan
from sklearn.svm import NuSVC as Op
from lale.docstrings import set_docstrings
from lale.operators import make_operator
class _NuSVCImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def predict(self, X):
return self._wrapped_model.predict(X)
def predict_proba(self, X):
return self._wrapped_model.predict_proba(X)
def decision_function(self, X):
return self._wrapped_model.decision_function(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for NuSVC Nu-Support Vector Classification.",
"allOf": [
{
"type": "object",
"required": [
"nu",
"kernel",
"degree",
"gamma",
"coef0",
"shrinking",
"probability",
"tol",
"cache_size",
"class_weight",
"verbose",
"max_iter",
"decision_function_shape",
"break_ties",
"random_state",
],
"relevantToOptimizer": [
"kernel",
"degree",
"gamma",
"shrinking",
"probability",
"tol",
"cache_size",
"max_iter",
"decision_function_shape",
],
"additionalProperties": False,
"properties": {
"nu": {
"type": "number",
"default": 0.5,
"description": "An upper bound on the fraction of training errors and a lower bound of the fraction of support vectors",
},
"kernel": {
"enum": ["linear", "poly", "precomputed", "sigmoid", "rbf"],
"default": "rbf",
"description": "Specifies the kernel type to be used in the algorithm",
},
"degree": {
"type": "integer",
"minimumForOptimizer": 2,
"maximumForOptimizer": 3,
"distribution": "uniform",
"default": 3,
"description": "Degree of the polynomial kernel function ('poly')",
},
"gamma": {
"anyOf": [
{"type": "number", "forOptimizer": False},
{"enum": ["scale", "auto"]},
],
"default": "scale",
"description": "Kernel coefficient for 'rbf', 'poly' and 'sigmoid'",
},
"coef0": {
"type": "number",
"default": 0.0,
"description": "Independent term in kernel function",
},
"shrinking": {
"type": "boolean",
"default": True,
"description": "Whether to use the shrinking heuristic.",
},
"probability": {
"type": "boolean",
"default": False,
"description": "Whether to enable probability estimates",
},
"tol": {
"type": "number",
"minimumForOptimizer": 1e-08,
"maximumForOptimizer": 0.01,
"default": 0.001,
"description": "Tolerance for stopping criterion.",
},
"cache_size": {
"type": "number",
"minimumForOptimizer": 0.0,
"maximumForOptimizer": 1.0,
"distribution": "uniform",
"default": 200,
"description": "Specify the size of the kernel cache (in MB).",
},
"class_weight": {
"enum": ["dict", "balanced"],
"default": "balanced",
"description": "Set the parameter C of class i to class_weight[i]*C for SVC",
},
"verbose": {
"type": "boolean",
"default": False,
"description": "Enable verbose output",
},
"max_iter": {
"XXX TODO XXX": "int, optional (default=-1)",
"description": "Hard limit on iterations within solver, or -1 for no limit.",
"type": "integer",
"minimumForOptimizer": 10,
"maximumForOptimizer": 1000,
"distribution": "uniform",
"default": (-1),
},
"decision_function_shape": {
"XXX TODO XXX": "'ovo', 'ovr', default='ovr'",
"description": "Whether to return a one-vs-rest ('ovr') decision function of shape (n_samples, n_classes) as all other classifiers, or the original one-vs-one ('ovo') decision function of libsvm which has shape (n_samples, n_classes * (n_classes - 1) / 2)",
"enum": ["ovr", "ovo"],
"default": "ovr",
},
"break_ties": {
"type": "boolean",
"default": False,
"description": "If true, decision_function_shape='ovr', and number of classes > 2, predict will break ties according to the confidence values of decision_function; otherwise the first class among the tied classes is returned.",
},
"random_state": {
"anyOf": [
{"type": "integer"},
{"laleType": "numpy.random.RandomState"},
{"enum": [None]},
],
"default": None,
"description": "The seed of the pseudo random number generator used when shuffling the data for probability estimates",
},
},
},
{"XXX TODO XXX": "Parameter: coef0 > only significant in 'poly' and 'sigmoid'"},
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Fit the SVM model according to the given training data.",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Training vectors, where n_samples is the number of samples and n_features is the number of features",
},
"y": {
"type": "array",
"items": {"type": "number"},
"description": "Target values (class labels in classification, real numbers in regression)",
},
"sample_weight": {
"type": "array",
"items": {"type": "number"},
"description": "Per-sample weights",
},
},
}
_input_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Perform classification on samples in X.",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": 'For kernel="precomputed", the expected shape of X is [n_samples_test, n_samples_train]',
}
},
}
_output_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Class labels for samples in X.",
"type": "array",
"items": {"type": "number"},
}
_input_predict_proba_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Compute probabilities of possible outcomes for samples in X.",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": 'For kernel="precomputed", the expected shape of X is [n_samples_test, n_samples_train]',
}
},
}
_output_predict_proba_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Returns the probability of the sample for each class in the model",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
_input_decision_function_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Evaluates the decision function for the samples in X.",
"type": "object",
"required": ["X"],
"properties": {
"X": {"type": "array", "items": {"type": "array", "items": {"type": "number"}}}
},
}
_output_decision_function_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Returns the decision function of the sample for each class in the model",
"laleType": "Any",
"XXX TODO XXX": "array-like, shape (n_samples, n_classes * (n_classes-1) / 2)",
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.svm.NuSVC#sklearn-svm-nusvc",
"import_from": "sklearn.svm",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "classifier"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
"input_predict_proba": _input_predict_proba_schema,
"output_predict_proba": _output_predict_proba_schema,
"input_decision_function": _input_decision_function_schema,
"output_decision_function": _output_decision_function_schema,
},
}
NuSVC = make_operator(_NuSVCImpl, _combined_schemas)
set_docstrings(NuSVC)
| 10,382 | 38.479087 | 277 |
py
|
lale
|
lale-master/lale/lib/autogen/binarizer.py
|
from numpy import inf, nan
from sklearn.preprocessing import Binarizer as Op
from lale.docstrings import set_docstrings
from lale.operators import make_operator
class _BinarizerImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def transform(self, X):
return self._wrapped_model.transform(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for Binarizer Binarize data (set feature values to 0 or 1) according to a threshold",
"allOf": [
{
"type": "object",
"required": ["threshold", "copy"],
"relevantToOptimizer": ["copy"],
"additionalProperties": False,
"properties": {
"threshold": {
"XXX TODO XXX": "float, optional (0.0 by default)",
"description": "Feature values below or equal to this are replaced by 0, above it by 1",
"type": "number",
"default": 0.0,
},
"copy": {
"type": "boolean",
"default": True,
"description": "set to False to perform inplace binarization and avoid a copy (if the input is already a numpy array or a scipy.sparse CSR matrix).",
},
},
}
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Do nothing and return the estimator unchanged",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "array-like",
}
},
}
_input_transform_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Binarize each element of X",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "The data to binarize, element by element",
},
"y": {"laleType": "Any", "XXX TODO XXX": "(ignored)", "description": ""},
"copy": {"type": "boolean", "description": "Copy the input X or not."},
},
}
_output_transform_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Binarize each element of X",
"laleType": "Any",
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.preprocessing.Binarizer#sklearn-preprocessing-binarizer",
"import_from": "sklearn.preprocessing",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
Binarizer = make_operator(_BinarizerImpl, _combined_schemas)
set_docstrings(Binarizer)
| 3,475 | 34.111111 | 169 |
py
|
lale
|
lale-master/lale/lib/autogen/multi_task_lasso_cv.py
|
from numpy import inf, nan
from packaging import version
from sklearn.linear_model import MultiTaskLassoCV as Op
import lale.operators
from lale.docstrings import set_docstrings
from lale.operators import make_operator
class _MultiTaskLassoCVImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def predict(self, X):
return self._wrapped_model.predict(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for MultiTaskLassoCV Multi-task Lasso model trained with L1/L2 mixed-norm as regularizer.",
"allOf": [
{
"type": "object",
"required": [
"eps",
"n_alphas",
"alphas",
"fit_intercept",
"normalize",
"max_iter",
"tol",
"copy_X",
"cv",
"verbose",
"n_jobs",
"random_state",
"selection",
],
"relevantToOptimizer": [
"eps",
"n_alphas",
"fit_intercept",
"normalize",
"max_iter",
"tol",
"copy_X",
"cv",
],
"additionalProperties": False,
"properties": {
"eps": {
"type": "number",
"minimumForOptimizer": 0.001,
"maximumForOptimizer": 0.1,
"distribution": "loguniform",
"default": 0.001,
"description": "Length of the path",
},
"n_alphas": {
"type": "integer",
"minimumForOptimizer": 100,
"maximumForOptimizer": 101,
"distribution": "uniform",
"default": 100,
"description": "Number of alphas along the regularization path",
},
"alphas": {
"anyOf": [
{
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "array-like, optional",
},
{"enum": [None]},
],
"default": None,
"description": "List of alphas where to compute the models",
},
"fit_intercept": {
"type": "boolean",
"default": True,
"description": "whether to calculate the intercept for this model",
},
"normalize": {
"type": "boolean",
"default": False,
"description": "This parameter is ignored when ``fit_intercept`` is set to False",
},
"max_iter": {
"type": "integer",
"minimumForOptimizer": 10,
"maximumForOptimizer": 1000,
"distribution": "uniform",
"default": 1000,
"description": "The maximum number of iterations.",
},
"tol": {
"type": "number",
"minimumForOptimizer": 1e-08,
"maximumForOptimizer": 0.01,
"default": 0.0001,
"description": "The tolerance for the optimization: if the updates are smaller than ``tol``, the optimization code checks the dual gap for optimality and continues until it is smaller than ``tol``.",
},
"copy_X": {
"type": "boolean",
"default": True,
"description": "If ``True``, X will be copied; else, it may be overwritten.",
},
"cv": {
"description": """Cross-validation as integer or as object that has a split function.
The fit method performs cross validation on the input dataset for per
trial, and uses the mean cross validation performance for optimization.
This behavior is also impacted by handle_cv_failure flag.
If integer: number of folds in sklearn.model_selection.StratifiedKFold.
If object with split function: generator yielding (train, test) splits
as arrays of indices. Can use any of the iterators from
https://scikit-learn.org/stable/modules/cross_validation.html#cross-validation-iterators.""",
"anyOf": [
{
"type": "integer",
"minimum": 1,
"default": 5,
"minimumForOptimizer": 3,
"maximumForOptimizer": 4,
"distribution": "uniform",
},
{"laleType": "Any", "forOptimizer": False},
],
},
"verbose": {
"anyOf": [{"type": "boolean"}, {"type": "integer"}],
"default": False,
"description": "Amount of verbosity.",
},
"n_jobs": {
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"default": 1,
"description": "Number of CPUs to use during the cross validation",
},
"random_state": {
"anyOf": [
{"type": "integer"},
{"laleType": "numpy.random.RandomState"},
{"enum": [None]},
],
"default": None,
"description": "The seed of the pseudo random number generator that selects a random feature to update",
},
"selection": {
"type": "string",
"default": "cyclic",
"description": "If set to 'random', a random coefficient is updated every iteration rather than looping over features sequentially by default",
},
},
},
{
"XXX TODO XXX": "Parameter: n_jobs > only if multiple values for l1_ratio are given"
},
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Fit linear model with coordinate descent",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"laleType": "Any",
"XXX TODO XXX": "{array-like}, shape (n_samples, n_features)",
"description": "Training data",
},
"y": {
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
],
"description": "Target values",
},
},
}
_input_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Predict using the linear model",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"anyOf": [
{
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "array_like or sparse matrix, shape (n_samples, n_features)",
},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
],
"description": "Samples.",
}
},
}
_output_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Returns predicted values.",
"type": "array",
"items": {"type": "number"},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.linear_model.MultiTaskLassoCV#sklearn-linear_model-multitasklassocv",
"import_from": "sklearn.linear_model",
"type": "object",
"tags": {"pre": [], "op": ["estimator"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
MultiTaskLassoCV = make_operator(_MultiTaskLassoCVImpl, _combined_schemas)
if lale.operators.sklearn_version >= version.Version("1.2"):
# old: https://scikit-learn.org/1.1/modules/generated/sklearn.linear_model.MultiTaskLassoCV.html
# new: https://scikit-learn.org/1.2/modules/generated/sklearn.linear_model.MultiTaskLassoCV.html
MultiTaskLassoCV = MultiTaskLassoCV.customize_schema(normalize=None)
set_docstrings(MultiTaskLassoCV)
| 9,589 | 38.628099 | 219 |
py
|
lale
|
lale-master/lale/lib/autogen/locally_linear_embedding.py
|
from numpy import inf, nan
from sklearn.manifold import LocallyLinearEmbedding as Op
from lale.docstrings import set_docstrings
from lale.operators import make_operator
class _LocallyLinearEmbeddingImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def transform(self, X):
return self._wrapped_model.transform(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for LocallyLinearEmbedding Locally Linear Embedding",
"allOf": [
{
"type": "object",
"required": [
"n_neighbors",
"n_components",
"reg",
"eigen_solver",
"tol",
"max_iter",
"method",
"hessian_tol",
"modified_tol",
"neighbors_algorithm",
"random_state",
"n_jobs",
],
"relevantToOptimizer": [
"n_neighbors",
"n_components",
"eigen_solver",
"tol",
"max_iter",
"method",
"neighbors_algorithm",
],
"additionalProperties": False,
"properties": {
"n_neighbors": {
"type": "integer",
"minimumForOptimizer": 5,
"maximumForOptimizer": 20,
"distribution": "uniform",
"default": 5,
"description": "number of neighbors to consider for each point.",
},
"n_components": {
"type": "integer",
"minimun": 1,
"laleMaximum": "X/items/maxItems",
"minimumForOptimizer": 2,
"maximumForOptimizer": 256,
"distribution": "uniform",
"default": 2,
"description": "number of coordinates for the manifold",
},
"reg": {
"type": "number",
"default": 0.001,
"description": "regularization constant, multiplies the trace of the local covariance matrix of the distances.",
},
"eigen_solver": {
"enum": ["auto", "arpack", "dense"],
"default": "auto",
"description": "auto : algorithm will attempt to choose the best method for input data arpack : use arnoldi iteration in shift-invert mode",
},
"tol": {
"type": "number",
"minimumForOptimizer": 1e-08,
"maximumForOptimizer": 0.01,
"default": 1e-06,
"description": "Tolerance for 'arpack' method Not used if eigen_solver=='dense'.",
},
"max_iter": {
"type": "integer",
"minimumForOptimizer": 10,
"maximumForOptimizer": 1000,
"distribution": "uniform",
"default": 100,
"description": "maximum number of iterations for the arpack solver",
},
"method": {
"XXX TODO XXX": "string ('standard', 'hessian', 'modified' or 'ltsa')",
"description": "standard : use the standard locally linear embedding algorithm",
"enum": ["ltsa", "modified", "standard", "hessian"],
"default": "standard",
},
"hessian_tol": {
"type": "number",
"default": 0.0001,
"description": "Tolerance for Hessian eigenmapping method",
},
"modified_tol": {
"type": "number",
"default": 1e-12,
"description": "Tolerance for modified LLE method",
},
"neighbors_algorithm": {
"enum": ["auto", "brute", "kd_tree", "ball_tree"],
"default": "auto",
"description": "algorithm to use for nearest neighbors search, passed to neighbors.NearestNeighbors instance",
},
"random_state": {
"anyOf": [
{"type": "integer"},
{"laleType": "numpy.random.RandomState"},
{"enum": [None]},
],
"default": None,
"description": "If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`",
},
"n_jobs": {
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"default": 1,
"description": "The number of parallel jobs to run",
},
},
},
{
"description": "hessian_tol, only used if method == 'hessian'",
"anyOf": [
{"type": "object", "properties": {"hessian_tol": {"enum": [0.0001]}}},
{"type": "object", "properties": {"method": {"enum": ["hessian"]}}},
],
},
{
"description": "modified_tol, only used if method == 'modified'",
"anyOf": [
{"type": "object", "properties": {"modified_tol": {"enum": [1e-12]}}},
{"type": "object", "properties": {"method": {"enum": ["modified"]}}},
],
},
{
"description": "for method='hessian', n_neighbors must be greater than [n_components * (n_components + 3) / 2]",
"anyOf": [
{"type": "object", "properties": {"method": {"enum": ["standard"]}}},
{
"type": "object",
"properties": {"method": {"not": {"enum": ["hessian"]}}},
},
{
"XXX TODO XXX": "self.n_neighbors > self.n_components + self.n_components * (self.n_components + 1) // 2"
},
],
},
{
"description": "A sparse matrix was passed, but dense data is required. Use X.toarray() to convert to a dense numpy array.) ",
"type": "object",
"laleNot": "X/isSparse",
},
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Compute the embedding vectors for data X",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "training set.",
},
"y": {},
},
}
_input_transform_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Transform new points into embedding space.",
"type": "object",
"required": ["X"],
"properties": {
"X": {"type": "array", "items": {"type": "array", "items": {"type": "number"}}}
},
}
_output_transform_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Transform new points into embedding space.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.manifold.LocallyLinearEmbedding#sklearn-manifold-locallylinearembedding",
"import_from": "sklearn.manifold",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
LocallyLinearEmbedding = make_operator(_LocallyLinearEmbeddingImpl, _combined_schemas)
set_docstrings(LocallyLinearEmbedding)
| 8,633 | 39.15814 | 262 |
py
|
lale
|
lale-master/lale/lib/autogen/multi_task_lasso.py
|
from numpy import inf, nan
from packaging import version
from sklearn.linear_model import MultiTaskLasso as Op
import lale.operators
from lale.docstrings import set_docstrings
from lale.operators import make_operator
class _MultiTaskLassoImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def predict(self, X):
return self._wrapped_model.predict(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for MultiTaskLasso Multi-task Lasso model trained with L1/L2 mixed-norm as regularizer.",
"allOf": [
{
"type": "object",
"required": [
"alpha",
"fit_intercept",
"normalize",
"copy_X",
"max_iter",
"tol",
"warm_start",
"random_state",
"selection",
],
"relevantToOptimizer": [
"alpha",
"fit_intercept",
"normalize",
"copy_X",
"max_iter",
"tol",
],
"additionalProperties": False,
"properties": {
"alpha": {
"type": "number",
"minimumForOptimizer": 1e-10,
"maximumForOptimizer": 1.0,
"distribution": "loguniform",
"default": 1.0,
"description": "Constant that multiplies the L1/L2 term",
},
"fit_intercept": {
"type": "boolean",
"default": True,
"description": "whether to calculate the intercept for this model",
},
"normalize": {
"type": "boolean",
"default": False,
"description": "This parameter is ignored when ``fit_intercept`` is set to False",
},
"copy_X": {
"type": "boolean",
"default": True,
"description": "If ``True``, X will be copied; else, it may be overwritten.",
},
"max_iter": {
"type": "integer",
"minimumForOptimizer": 10,
"maximumForOptimizer": 1000,
"distribution": "uniform",
"default": 1000,
"description": "The maximum number of iterations",
},
"tol": {
"type": "number",
"minimumForOptimizer": 1e-08,
"maximumForOptimizer": 0.01,
"default": 0.0001,
"description": "The tolerance for the optimization: if the updates are smaller than ``tol``, the optimization code checks the dual gap for optimality and continues until it is smaller than ``tol``.",
},
"warm_start": {
"type": "boolean",
"default": False,
"description": "When set to ``True``, reuse the solution of the previous call to fit as initialization, otherwise, just erase the previous solution",
},
"random_state": {
"anyOf": [
{"type": "integer"},
{"laleType": "numpy.random.RandomState"},
{"enum": [None]},
],
"default": None,
"description": "The seed of the pseudo random number generator that selects a random feature to update",
},
"selection": {
"type": "string",
"default": "cyclic",
"description": "If set to 'random', a random coefficient is updated every iteration rather than looping over features sequentially by default",
},
},
}
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Fit MultiTaskElasticNet model with coordinate descent",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"laleType": "Any",
"XXX TODO XXX": "ndarray, shape (n_samples, n_features)",
"description": "Data",
},
"y": {
"laleType": "Any",
"XXX TODO XXX": "ndarray, shape (n_samples, n_tasks)",
"description": "Target",
},
},
}
_input_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Predict using the linear model",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"anyOf": [
{
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "array_like or sparse matrix, shape (n_samples, n_features)",
},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
],
"description": "Samples.",
}
},
}
_output_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Returns predicted values.",
"type": "array",
"items": {"type": "number"},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.linear_model.MultiTaskLasso#sklearn-linear_model-multitasklasso",
"import_from": "sklearn.linear_model",
"type": "object",
"tags": {"pre": [], "op": ["estimator"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
MultiTaskLasso = make_operator(_MultiTaskLassoImpl, _combined_schemas)
if lale.operators.sklearn_version >= version.Version("1.2"):
# old: https://scikit-learn.org/1.1/modules/generated/sklearn.linear_model.MultiTaskLasso.html
# new: https://scikit-learn.org/1.2/modules/generated/sklearn.linear_model.MultiTaskLasso.html
MultiTaskLasso = MultiTaskLasso.customize_schema(normalize=None)
set_docstrings(MultiTaskLasso)
| 6,854 | 36.664835 | 219 |
py
|
lale
|
lale-master/lale/lib/autogen/kernel_pca.py
|
from numpy import inf, nan
from sklearn.decomposition import KernelPCA as Op
from lale.docstrings import set_docstrings
from lale.operators import make_operator
class _KernelPCAImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def transform(self, X):
return self._wrapped_model.transform(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for KernelPCA Kernel Principal component analysis (KPCA)",
"allOf": [
{
"type": "object",
"required": [
"n_components",
"kernel",
"gamma",
"degree",
"coef0",
"kernel_params",
"alpha",
"fit_inverse_transform",
"eigen_solver",
"tol",
"max_iter",
"remove_zero_eig",
"random_state",
"copy_X",
"n_jobs",
],
"relevantToOptimizer": [
"n_components",
"kernel",
"degree",
"coef0",
"alpha",
"eigen_solver",
"tol",
"max_iter",
"remove_zero_eig",
"copy_X",
],
"additionalProperties": False,
"properties": {
"n_components": {
"anyOf": [
{
"type": "integer",
"minimumForOptimizer": 2,
"maximumForOptimizer": 256,
"distribution": "uniform",
},
{"enum": [None]},
],
"default": None,
"description": "Number of components",
},
"kernel": {
"enum": [
"linear",
"poly",
"rbf",
"sigmoid",
"cosine",
"precomputed",
],
"default": "linear",
"description": "Kernel",
},
"gamma": {
"XXX TODO XXX": "float, default=1/n_features",
"description": "Kernel coefficient for rbf, poly and sigmoid kernels",
"enum": [None],
"default": None,
},
"degree": {
"type": "integer",
"minimumForOptimizer": 2,
"maximumForOptimizer": 3,
"distribution": "uniform",
"default": 3,
"description": "Degree for poly kernels",
},
"coef0": {
"type": "number",
"minimumForOptimizer": 0.0,
"maximumForOptimizer": 1.0,
"distribution": "uniform",
"default": 1,
"description": "Independent term in poly and sigmoid kernels",
},
"kernel_params": {
"XXX TODO XXX": "mapping of string to any, default=None",
"description": "Parameters (keyword arguments) and values for kernel passed as callable object",
"enum": [None],
"default": None,
},
"alpha": {
"anyOf": [
{"type": "integer", "forOptimizer": False},
{
"type": "number",
"minimumForOptimizer": 1e-10,
"maximumForOptimizer": 1.0,
"distribution": "loguniform",
},
],
"default": 1.0,
"description": "Hyperparameter of the ridge regression that learns the inverse transform (when fit_inverse_transform=True).",
},
"fit_inverse_transform": {
"type": "boolean",
"default": False,
"description": "Learn the inverse transform for non-precomputed kernels",
},
"eigen_solver": {
"enum": ["auto", "dense", "arpack"],
"default": "auto",
"description": "Select eigensolver to use",
},
"tol": {
"type": "number",
"minimumForOptimizer": 1e-08,
"maximumForOptimizer": 0.01,
"default": 0,
"description": "Convergence tolerance for arpack",
},
"max_iter": {
"anyOf": [
{
"type": "integer",
"minimumForOptimizer": 10,
"maximumForOptimizer": 1000,
"distribution": "uniform",
},
{"enum": [None]},
],
"default": None,
"description": "Maximum number of iterations for arpack",
},
"remove_zero_eig": {
"type": "boolean",
"default": False,
"description": "If True, then all components with zero eigenvalues are removed, so that the number of components in the output may be < n_components (and sometimes even zero due to numerical instability)",
},
"random_state": {
"anyOf": [
{"type": "integer"},
{"laleType": "numpy.random.RandomState"},
{"enum": [None]},
],
"default": None,
"description": "If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`",
},
"copy_X": {
"type": "boolean",
"default": True,
"description": "If True, input X is copied and stored by the model in the `X_fit_` attribute",
},
"n_jobs": {
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"default": 1,
"description": "The number of parallel jobs to run",
},
},
},
{
"description": "Cannot fit_inverse_transform with a precomputed kernel.",
"anyOf": [
{
"type": "object",
"properties": {"fit_inverse_transform": {"enum": [False]}},
},
{
"type": "object",
"properties": {"kernel": {"not": {"enum": ["precomputed"]}}},
},
],
},
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Fit the model from data in X.",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Training vector, where n_samples in the number of samples and n_features is the number of features.",
}
},
}
_input_transform_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Transform X.",
"type": "object",
"required": ["X"],
"properties": {
"X": {"type": "array", "items": {"type": "array", "items": {"type": "number"}}}
},
}
_output_transform_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Transform X.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.decomposition.KernelPCA#sklearn-decomposition-kernelpca",
"import_from": "sklearn.decomposition",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
KernelPCA = make_operator(_KernelPCAImpl, _combined_schemas)
set_docstrings(KernelPCA)
| 9,282 | 37.201646 | 262 |
py
|
lale
|
lale-master/lale/lib/autogen/elastic_net_cv.py
|
from numpy import inf, nan
from packaging import version
from sklearn.linear_model import ElasticNetCV as Op
import lale
from lale.docstrings import set_docstrings
from lale.operators import make_operator, sklearn_version
class _ElasticNetCVImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def predict(self, X):
return self._wrapped_model.predict(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for ElasticNetCV Elastic Net model with iterative fitting along a regularization path.",
"allOf": [
{
"type": "object",
"required": [
"l1_ratio",
"eps",
"n_alphas",
"alphas",
"fit_intercept",
"normalize",
"precompute",
"max_iter",
"tol",
"cv",
"copy_X",
"verbose",
"n_jobs",
"positive",
"random_state",
"selection",
],
"relevantToOptimizer": [
"l1_ratio",
"eps",
"n_alphas",
"alpha",
"fit_intercept",
"normalize",
"precompute",
"max_iter",
"tol",
"cv",
"copy_X",
"positive",
"selection",
],
"additionalProperties": False,
"properties": {
"l1_ratio": {
"XXX TODO XXX": "float or array of floats, optional",
"description": "float between 0 and 1 passed to ElasticNet (scaling between l1 and l2 penalties)",
"type": "number",
"minimumForOptimizer": 0.0,
"maximumForOptimizer": 1.0,
"distribution": "uniform",
"default": 0.5,
},
"eps": {
"type": "number",
"minimumForOptimizer": 0.001,
"maximumForOptimizer": 0.1,
"distribution": "loguniform",
"default": 0.001,
"description": "Length of the path",
},
"n_alphas": {
"type": "integer",
"minimumForOptimizer": 100,
"maximumForOptimizer": 101,
"distribution": "uniform",
"default": 100,
"description": "Number of alphas along the regularization path, used for each l1_ratio.",
},
"alphas": {
"anyOf": [
{
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "numpy array, optional",
},
{"enum": [None]},
],
"default": None,
"description": "List of alphas where to compute the models",
},
"fit_intercept": {
"type": "boolean",
"default": True,
"description": "whether to calculate the intercept for this model",
},
"normalize": {
"type": "boolean",
"default": False,
"description": "This parameter is ignored when ``fit_intercept`` is set to False",
},
"precompute": {
"anyOf": [
{
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "True | False | 'auto' | array-like",
"forOptimizer": False,
},
{"enum": ["auto"]},
],
"default": "auto",
"description": "Whether to use a precomputed Gram matrix to speed up calculations",
},
"max_iter": {
"type": "integer",
"minimumForOptimizer": 10,
"maximumForOptimizer": 1000,
"distribution": "uniform",
"default": 1000,
"description": "The maximum number of iterations",
},
"tol": {
"type": "number",
"minimumForOptimizer": 1e-08,
"maximumForOptimizer": 0.01,
"default": 0.0001,
"description": "The tolerance for the optimization: if the updates are smaller than ``tol``, the optimization code checks the dual gap for optimality and continues until it is smaller than ``tol``.",
},
"cv": {
"description": """Cross-validation as integer or as object that has a split function.
The fit method performs cross validation on the input dataset for per
trial, and uses the mean cross validation performance for optimization.
This behavior is also impacted by handle_cv_failure flag.
If integer: number of folds in sklearn.model_selection.StratifiedKFold.
If object with split function: generator yielding (train, test) splits
as arrays of indices. Can use any of the iterators from
https://scikit-learn.org/stable/modules/cross_validation.html#cross-validation-iterators.""",
"anyOf": [
{
"type": "integer",
"minimum": 1,
"default": 5,
"minimumForOptimizer": 3,
"maximumForOptimizer": 4,
"distribution": "uniform",
},
{"laleType": "Any", "forOptimizer": False},
],
},
"copy_X": {
"type": "boolean",
"default": True,
"description": "If ``True``, X will be copied; else, it may be overwritten.",
},
"verbose": {
"anyOf": [{"type": "boolean"}, {"type": "integer"}],
"default": 0,
"description": "Amount of verbosity.",
},
"n_jobs": {
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"default": 1,
"description": "Number of CPUs to use during the cross validation",
},
"positive": {
"type": "boolean",
"default": False,
"description": "When set to ``True``, forces the coefficients to be positive.",
},
"random_state": {
"anyOf": [
{"type": "integer"},
{"laleType": "numpy.random.RandomState"},
{"enum": [None]},
],
"default": None,
"description": "The seed of the pseudo random number generator that selects a random feature to update",
},
"selection": {
"enum": ["random", "cyclic"],
"default": "cyclic",
"description": "If set to 'random', a random coefficient is updated every iteration rather than looping over features sequentially by default",
},
},
},
{
"description": "From /linear_model/_coordinate_descent.py:None:_alpha_grid, Exception: raise ValueError( "
"Automatic alpha grid generation is not supported for l1_ratio=0. Please supply a grid by providing your estimator with the appropriate `alphas=` argument.",
"anyOf": [
{"type": "object", "properties": {"alphas": {"not": {"enum": [None]}}}},
{"type": "object", "properties": {"l1_ratio": {"not": {"enum": [0]}}}},
],
},
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Fit linear model with coordinate descent",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"laleType": "Any",
"XXX TODO XXX": "{array-like}, shape (n_samples, n_features)",
"description": "Training data",
},
"y": {
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
],
"description": "Target values",
},
},
}
_input_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Predict using the linear model",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"anyOf": [
{
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "array_like or sparse matrix, shape (n_samples, n_features)",
},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
],
"description": "Samples.",
}
},
}
_output_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Returns predicted values.",
"type": "array",
"items": {"type": "number"},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.linear_model.ElasticNetCV#sklearn-linear_model-elasticnetcv",
"import_from": "sklearn.linear_model",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "regressor"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
ElasticNetCV = make_operator(_ElasticNetCVImpl, _combined_schemas)
if sklearn_version >= version.Version("1.2"):
# new: "https://scikit-learn.org/1.2/modules/generated/sklearn.linear_model.ElasticNetCV#sklearn-linear_model-elasticnetcv"
ElasticNetCV = ElasticNetCV.customize_schema(
normalize=None,
set_as_available=True,
)
set_docstrings(ElasticNetCV)
| 11,493 | 39.329825 | 219 |
py
|
lale
|
lale-master/lale/lib/autogen/mini_batch_k_means.py
|
from numpy import inf, nan
from sklearn.cluster import MiniBatchKMeans as Op
from lale.docstrings import set_docstrings
from lale.operators import make_operator
class _MiniBatchKMeansImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def transform(self, X):
return self._wrapped_model.transform(X)
def predict(self, X):
return self._wrapped_model.predict(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for MiniBatchKMeans Mini-Batch K-Means clustering",
"allOf": [
{
"type": "object",
"required": [
"n_clusters",
"init",
"max_iter",
"batch_size",
"verbose",
"compute_labels",
"random_state",
"tol",
"max_no_improvement",
"init_size",
"n_init",
"reassignment_ratio",
],
"relevantToOptimizer": [
"n_clusters",
"init",
"max_iter",
"batch_size",
"compute_labels",
"tol",
"max_no_improvement",
"n_init",
],
"additionalProperties": False,
"properties": {
"n_clusters": {
"type": "integer",
"minimumForOptimizer": 2,
"maximumForOptimizer": 8,
"distribution": "uniform",
"default": 8,
"description": "The number of clusters to form as well as the number of centroids to generate.",
},
"init": {
"anyOf": [
{"enum": ["k-means++", "random"]},
{"laleType": "callable", "forOptimizer": False},
],
"default": "k-means++",
"description": "Method for initialization, defaults to 'k-means++': 'k-means++' : selects initial cluster centers for k-mean clustering in a smart way to speed up convergence",
},
"max_iter": {
"type": "integer",
"minimumForOptimizer": 10,
"maximumForOptimizer": 1000,
"distribution": "uniform",
"default": 100,
"description": "Maximum number of iterations over the complete dataset before stopping independently of any early stopping criterion heuristics.",
},
"batch_size": {
"type": "integer",
"minimumForOptimizer": 3,
"maximumForOptimizer": 128,
"distribution": "uniform",
"default": 100,
"description": "Size of the mini batches.",
},
"verbose": {
"anyOf": [{"type": "boolean"}, {"type": "integer"}],
"default": 0,
"description": "Verbosity mode.",
},
"compute_labels": {
"type": "boolean",
"default": True,
"description": "Compute label assignment and inertia for the complete dataset once the minibatch optimization has converged in fit.",
},
"random_state": {
"anyOf": [
{"type": "integer"},
{"laleType": "numpy.random.RandomState"},
{"enum": [None]},
],
"default": None,
"description": "Determines random number generation for centroid initialization and random reassignment",
},
"tol": {
"type": "number",
"minimumForOptimizer": 1e-08,
"maximumForOptimizer": 0.01,
"default": 0.0,
"description": "Control early stopping based on the relative center changes as measured by a smoothed, variance-normalized of the mean center squared position changes",
},
"max_no_improvement": {
"type": "integer",
"minimumForOptimizer": 10,
"maximumForOptimizer": 11,
"distribution": "uniform",
"default": 10,
"description": "Control early stopping based on the consecutive number of mini batches that does not yield an improvement on the smoothed inertia",
},
"init_size": {
"XXX TODO XXX": "int, optional, default: 3 * batch_size",
"description": "Number of samples to randomly sample for speeding up the initialization (sometimes at the expense of accuracy): the only algorithm is initialized by running a batch KMeans on a random subset of the data",
"enum": [None],
"default": None,
},
"n_init": {
"type": "integer",
"minimumForOptimizer": 3,
"maximumForOptimizer": 10,
"distribution": "uniform",
"default": 3,
"description": "Number of random initializations that are tried",
},
"reassignment_ratio": {
"type": "number",
"default": 0.01,
"description": "Control the fraction of the maximum number of counts for a center to be reassigned",
},
},
},
{
"XXX TODO XXX": "Parameter: init_size > only algorithm is initialized by running a batch kmeans on a random subset of the data"
},
{
"XXX TODO XXX": "Parameter: n_init > only run once, using the best of the n_init initializations as measured by inertia"
},
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Compute the centroids on X by chunking it into mini-batches.",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"anyOf": [
{
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "array-like or sparse matrix, shape=(n_samples, n_features)",
},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
],
"description": "Training instances to cluster",
},
"y": {
"description": "not used, present here for API consistency by convention."
},
"sample_weight": {
"anyOf": [{"type": "array", "items": {"type": "number"}}, {"enum": [None]}],
"default": None,
"description": "The weights for each observation in X",
},
},
}
_input_transform_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Transform X to a cluster-distance space.",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "New data to transform.",
}
},
}
_output_transform_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "X transformed in the new space.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
_input_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Predict the closest cluster each sample in X belongs to.",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "New data to predict.",
},
"sample_weight": {
"anyOf": [{"type": "array", "items": {"type": "number"}}, {"enum": [None]}],
"default": None,
"description": "The weights for each observation in X",
},
},
}
_output_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Index of the cluster each sample belongs to.",
"type": "array",
"items": {"type": "number"},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.cluster.MiniBatchKMeans#sklearn-cluster-minibatchkmeans",
"import_from": "sklearn.cluster",
"type": "object",
"tags": {"pre": [], "op": ["transformer", "estimator"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
MiniBatchKMeans = make_operator(_MiniBatchKMeansImpl, _combined_schemas)
set_docstrings(MiniBatchKMeans)
| 9,839 | 39 | 240 |
py
|
lale
|
lale-master/lale/lib/autogen/nearest_centroid.py
|
from numpy import inf, nan
from sklearn.neighbors import NearestCentroid as Op
from lale.docstrings import set_docstrings
from lale.operators import make_operator
class _NearestCentroidImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def predict(self, X):
return self._wrapped_model.predict(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for NearestCentroid Nearest centroid classifier.",
"allOf": [
{
"type": "object",
"required": ["metric", "shrink_threshold"],
"relevantToOptimizer": ["metric", "shrink_threshold"],
"additionalProperties": False,
"properties": {
"metric": {
"anyOf": [
{"laleType": "callable", "forOptimizer": False},
{
"enum": [
"cityblock",
"cosine",
"euclidean",
"l1",
"l2",
"manhattan",
"braycurtis",
"canberra",
"chebyshev",
"correlation",
"dice",
"hamming",
"jaccard",
"kulsinski",
"mahalanobis",
"minkowski",
"rogerstanimoto",
"russellrao",
"seuclidean",
"sokalmichener",
"sokalsneath",
"sqeuclidean",
"yule",
]
},
],
"default": "euclidean",
"description": "The metric to use when calculating distance between instances in a feature array",
},
"shrink_threshold": {
"anyOf": [
{
"type": "number",
"minimumForOptimizer": 0.0,
"maximumForOptimizer": 1.0,
"distribution": "uniform",
},
{"enum": [None]},
],
"default": None,
"description": "Threshold for shrinking centroids to remove features.",
},
},
},
{
"description": "threshold shrinking not supported for sparse input",
"anyOf": [
{"type": "object", "laleNot": "X/isSparse"},
{
"type": "object",
"properties": {"shrink_threshold": {"enum": [None]}},
},
],
},
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Fit the NearestCentroid model according to the given training data.",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Training vector, where n_samples is the number of samples and n_features is the number of features",
},
"y": {
"type": "array",
"items": {"type": "number"},
"description": "Target values (integers)",
},
},
}
_input_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Perform classification on an array of test vectors X.",
"type": "object",
"required": ["X"],
"properties": {
"X": {"type": "array", "items": {"type": "array", "items": {"type": "number"}}}
},
}
_output_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Perform classification on an array of test vectors X.",
"type": "array",
"items": {"type": "number"},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.neighbors.NearestCentroid#sklearn-neighbors-nearestcentroid",
"import_from": "sklearn.neighbors",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "classifier"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
NearestCentroid = make_operator(_NearestCentroidImpl, _combined_schemas)
set_docstrings(NearestCentroid)
| 5,357 | 35.951724 | 143 |
py
|
lale
|
lale-master/lale/lib/autogen/dictionary_learning.py
|
from numpy import inf, nan
from sklearn.decomposition import DictionaryLearning as Op
from lale.docstrings import set_docstrings
from lale.operators import make_operator
class _DictionaryLearningImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def transform(self, X):
return self._wrapped_model.transform(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for DictionaryLearning Dictionary learning",
"allOf": [
{
"type": "object",
"required": [
"n_components",
"alpha",
"max_iter",
"tol",
"fit_algorithm",
"transform_algorithm",
"transform_n_nonzero_coefs",
"transform_alpha",
"n_jobs",
"code_init",
"dict_init",
"verbose",
"split_sign",
"random_state",
"positive_code",
"positive_dict",
],
"relevantToOptimizer": [
"n_components",
"alpha",
"max_iter",
"tol",
"fit_algorithm",
"transform_algorithm",
],
"additionalProperties": False,
"properties": {
"n_components": {
"anyOf": [
{
"type": "integer",
"minimumForOptimizer": 2,
"maximumForOptimizer": 256,
"distribution": "uniform",
},
{"enum": [None]},
],
"default": None,
"description": "number of dictionary elements to extract",
},
"alpha": {
"type": "number",
"minimumForOptimizer": 1e-10,
"maximumForOptimizer": 1.0,
"distribution": "loguniform",
"default": 1,
"description": "sparsity controlling parameter",
},
"max_iter": {
"type": "integer",
"minimumForOptimizer": 10,
"maximumForOptimizer": 1000,
"distribution": "uniform",
"default": 1000,
"description": "maximum number of iterations to perform",
},
"tol": {
"type": "number",
"minimumForOptimizer": 1e-08,
"maximumForOptimizer": 0.01,
"default": 1e-08,
"description": "tolerance for numerical error",
},
"fit_algorithm": {
"enum": ["lars", "cd"],
"default": "lars",
"description": "lars: uses the least angle regression method to solve the lasso problem (linear_model.lars_path) cd: uses the coordinate descent method to compute the Lasso solution (linear_model.Lasso)",
},
"transform_algorithm": {
"enum": ["lasso_lars", "lasso_cd", "lars", "omp", "threshold"],
"default": "omp",
"description": "Algorithm used to transform the data lars: uses the least angle regression method (linear_model.lars_path) lasso_lars: uses Lars to compute the Lasso solution lasso_cd: uses the coordinate descent method to compute the Lasso solution (linear_model.Lasso)",
},
"transform_n_nonzero_coefs": {
"XXX TODO XXX": "int, ``0.1 * n_features`` by default",
"description": "Number of nonzero coefficients to target in each column of the solution",
"enum": [None],
"default": None,
},
"transform_alpha": {
"anyOf": [{"type": "number"}, {"enum": [None]}],
"default": None,
"description": "If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the penalty applied to the L1 norm",
},
"n_jobs": {
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"default": 1,
"description": "Number of parallel jobs to run",
},
"code_init": {
"anyOf": [
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
{"enum": [None]},
],
"default": None,
"description": "initial value for the code, for warm restart",
},
"dict_init": {
"anyOf": [
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
{"enum": [None]},
],
"default": None,
"description": "initial values for the dictionary, for warm restart",
},
"verbose": {
"type": "boolean",
"default": False,
"description": "To control the verbosity of the procedure.",
},
"split_sign": {
"type": "boolean",
"default": False,
"description": "Whether to split the sparse feature vector into the concatenation of its negative part and its positive part",
},
"random_state": {
"anyOf": [
{"type": "integer"},
{"laleType": "numpy.random.RandomState"},
{"enum": [None]},
],
"default": None,
"description": "If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`.",
},
"positive_code": {
"type": "boolean",
"default": False,
"description": "Whether to enforce positivity when finding the code",
},
"positive_dict": {
"type": "boolean",
"default": False,
"description": "Whether to enforce positivity when finding the dictionary ",
},
},
},
{
"XXX TODO XXX": "Parameter: transform_n_nonzero_coefs > only used by algorithm='lars' and algorithm='omp' and is overridden by alpha in the omp case"
},
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Fit the model from data in X.",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Training vector, where n_samples in the number of samples and n_features is the number of features.",
},
"y": {},
},
}
_input_transform_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Encode the data as a sparse combination of the dictionary atoms.",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Test data to be transformed, must have the same number of features as the data used to train the model.",
}
},
}
_output_transform_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Transformed data",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.decomposition.DictionaryLearning#sklearn-decomposition-dictionarylearning",
"import_from": "sklearn.decomposition",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
DictionaryLearning = make_operator(_DictionaryLearningImpl, _combined_schemas)
set_docstrings(DictionaryLearning)
| 9,423 | 40.333333 | 292 |
py
|
lale
|
lale-master/lale/lib/autogen/sparse_pca.py
|
from numpy import inf, nan
from sklearn.decomposition import SparsePCA as Op
from lale.docstrings import set_docstrings
from lale.operators import make_operator
class _SparsePCAImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def transform(self, X):
return self._wrapped_model.transform(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for SparsePCA Sparse Principal Components Analysis (SparsePCA)",
"allOf": [
{
"type": "object",
"required": [
"n_components",
"alpha",
"ridge_alpha",
"max_iter",
"tol",
"method",
"n_jobs",
"U_init",
"V_init",
"verbose",
"random_state",
],
"relevantToOptimizer": [
"n_components",
"alpha",
"max_iter",
"tol",
"method",
],
"additionalProperties": False,
"properties": {
"n_components": {
"anyOf": [
{
"type": "integer",
"minimumForOptimizer": 2,
"maximumForOptimizer": 256,
"distribution": "uniform",
},
{"enum": [None]},
],
"default": None,
"description": "Number of sparse atoms to extract.",
},
"alpha": {
"type": "number",
"minimumForOptimizer": 1e-10,
"maximumForOptimizer": 1.0,
"distribution": "loguniform",
"default": 1,
"description": "Sparsity controlling parameter",
},
"ridge_alpha": {
"type": "number",
"default": 0.01,
"description": "Amount of ridge shrinkage to apply in order to improve conditioning when calling the transform method.",
},
"max_iter": {
"type": "integer",
"minimumForOptimizer": 10,
"maximumForOptimizer": 1000,
"distribution": "uniform",
"default": 1000,
"description": "Maximum number of iterations to perform.",
},
"tol": {
"type": "number",
"minimumForOptimizer": 1e-08,
"maximumForOptimizer": 0.01,
"default": 1e-08,
"description": "Tolerance for the stopping condition.",
},
"method": {
"enum": ["lars", "cd"],
"default": "lars",
"description": "lars: uses the least angle regression method to solve the lasso problem (linear_model.lars_path) cd: uses the coordinate descent method to compute the Lasso solution (linear_model.Lasso)",
},
"n_jobs": {
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"default": 1,
"description": "Number of parallel jobs to run",
},
"U_init": {
"anyOf": [
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
{"enum": [None]},
],
"default": None,
"description": "Initial values for the loadings for warm restart scenarios.",
},
"V_init": {
"anyOf": [
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
{"enum": [None]},
],
"default": None,
"description": "Initial values for the components for warm restart scenarios.",
},
"verbose": {
"anyOf": [{"type": "integer"}, {"type": "boolean"}],
"default": False,
"description": "Controls the verbosity; the higher, the more messages",
},
"random_state": {
"anyOf": [
{"type": "integer"},
{"laleType": "numpy.random.RandomState"},
{"enum": [None]},
],
"default": None,
"description": "If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`.",
},
},
},
{
"description": "A sparse matrix was passed, but dense data is required. Use X.toarray() to convert to a dense numpy array.",
"type": "object",
"laleNot": "X/isSparse",
},
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Fit the model from data in X.",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Training vector, where n_samples in the number of samples and n_features is the number of features.",
},
"y": {},
},
}
_input_transform_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Least Squares projection of the data onto the sparse components.",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Test data to be transformed, must have the same number of features as the data used to train the model.",
},
"ridge_alpha": {
"type": "number",
"default": 0.01,
"description": "Amount of ridge shrinkage to apply in order to improve conditioning",
},
},
}
_output_transform_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Transformed data.",
"laleType": "Any",
"XXX TODO XXX": "X_new array, shape (n_samples, n_components)",
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.decomposition.SparsePCA#sklearn-decomposition-sparsepca",
"import_from": "sklearn.decomposition",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
SparsePCA = make_operator(_SparsePCAImpl, _combined_schemas)
set_docstrings(SparsePCA)
| 7,885 | 37.847291 | 263 |
py
|
lale
|
lale-master/lale/lib/autogen/fast_ica.py
|
from numpy import inf, nan
from sklearn.decomposition import FastICA as Op
from lale.docstrings import set_docstrings
from lale.operators import make_operator
class _FastICAImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def transform(self, X):
return self._wrapped_model.transform(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for FastICA FastICA: a fast algorithm for Independent Component Analysis.",
"allOf": [
{
"type": "object",
"required": [
"n_components",
"algorithm",
"whiten",
"fun",
"fun_args",
"max_iter",
"tol",
"w_init",
"random_state",
],
"relevantToOptimizer": [
"n_components",
"algorithm",
"whiten",
"fun",
"max_iter",
"tol",
],
"additionalProperties": False,
"properties": {
"n_components": {
"anyOf": [
{
"type": "integer",
"minimumForOptimizer": 2,
"maximumForOptimizer": 256,
"distribution": "uniform",
},
{"enum": [None]},
],
"default": None,
"description": "Number of components to use",
},
"algorithm": {
"enum": ["parallel", "deflation"],
"default": "parallel",
"description": "Apply parallel or deflational algorithm for FastICA.",
},
"whiten": {
"type": "boolean",
"default": True,
"description": "If whiten is false, the data is already considered to be whitened, and no whitening is performed.",
},
"fun": {
"XXX TODO XXX": "string or function, optional. Default: 'logcosh'",
"description": "The functional form of the G function used in the approximation to neg-entropy",
"enum": ["cube", "exp", "logcosh"],
"default": "logcosh",
},
"fun_args": {
"XXX TODO XXX": "dictionary, optional",
"description": "Arguments to send to the functional form",
"enum": [None],
"default": None,
},
"max_iter": {
"type": "integer",
"minimum": 1,
"minimumForOptimizer": 10,
"maximumForOptimizer": 1000,
"distribution": "uniform",
"default": 200,
"description": "Maximum number of iterations during fit.",
},
"tol": {
"type": "number",
"minimumForOptimizer": 1e-08,
"maximumForOptimizer": 0.01,
"default": 0.0001,
"description": "Tolerance on update at each iteration.",
},
"w_init": {
"XXX TODO XXX": "None of an (n_components, n_components) ndarray",
"description": "The mixing matrix to be used to initialize the algorithm.",
"enum": [None],
"default": None,
},
"random_state": {
"anyOf": [
{"type": "integer"},
{"laleType": "numpy.random.RandomState"},
{"enum": [None]},
],
"default": None,
"description": "If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`.",
},
},
}
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Fit the model to X.",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Training data, where n_samples is the number of samples and n_features is the number of features.",
},
"y": {},
},
}
_input_transform_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Recover the sources from X (apply the unmixing matrix).",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Data to transform, where n_samples is the number of samples and n_features is the number of features.",
},
"y": {"laleType": "Any", "XXX TODO XXX": "(ignored)", "description": ""},
"copy": {
"laleType": "Any",
"XXX TODO XXX": "bool (optional)",
"description": "If False, data passed to fit are overwritten",
},
},
}
_output_transform_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Recover the sources from X (apply the unmixing matrix).",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.decomposition.FastICA#sklearn-decomposition-fastica",
"import_from": "sklearn.decomposition",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
FastICA = make_operator(_FastICAImpl, _combined_schemas)
set_docstrings(FastICA)
| 6,789 | 37.361582 | 263 |
py
|
lale
|
lale-master/lale/lib/autogen/additive_chi2_sampler.py
|
from numpy import inf, nan
from sklearn.kernel_approximation import AdditiveChi2Sampler as Op
from lale.docstrings import set_docstrings
from lale.operators import make_operator
class _AdditiveChi2SamplerImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def transform(self, X):
return self._wrapped_model.transform(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for AdditiveChi2Sampler Approximate feature map for additive chi2 kernel.",
"allOf": [
{
"type": "object",
"required": ["sample_steps", "sample_interval"],
"relevantToOptimizer": ["sample_steps", "sample_interval"],
"additionalProperties": False,
"properties": {
"sample_steps": {
"type": "integer",
"minimumForOptimizer": 1,
"maximumForOptimizer": 5,
"distribution": "uniform",
"default": 2,
"description": "Gives the number of (complex) sampling points.",
},
"sample_interval": {
"anyOf": [
{
"type": "number",
"minimumForOptimizer": 0.1,
"maximumForOptimizer": 1.0,
"distribution": "uniform",
},
{"enum": [None]},
],
"default": None,
"description": "Sampling interval",
},
},
},
{
"description": "From /kernel_approximation.py:AdditiveChi2Sampler:fit, Exception: raise ValueError( 'If "
"sample_steps is not in [1, 2, 3], you need to provide sample_interval') ",
"anyOf": [
{
"type": "object",
"properties": {"sample_interval": {"not": {"enum": [None]}}},
},
{"type": "object", "properties": {"sample_steps": {"enum": [1, 2, 3]}}},
],
},
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Set the parameters",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Training data, where n_samples in the number of samples and n_features is the number of features.",
}
},
}
_input_transform_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Apply approximate feature map to X.",
"type": "object",
"required": ["X"],
"properties": {
"X": {"type": "array", "items": {"type": "array", "items": {"type": "number"}}}
},
}
_output_transform_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Whether the return value is an array of sparse matrix depends on the type of the input X.",
"laleType": "Any",
"XXX TODO XXX": "{array, sparse matrix}, shape = (n_samples, n_features * (2*sample_steps + 1))",
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.kernel_approximation.AdditiveChi2Sampler#sklearn-kernel_approximation-additivechi2sampler",
"import_from": "sklearn.kernel_approximation",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
AdditiveChi2Sampler = make_operator(_AdditiveChi2SamplerImpl, _combined_schemas)
set_docstrings(AdditiveChi2Sampler)
| 4,362 | 36.93913 | 173 |
py
|
lale
|
lale-master/lale/lib/autogen/mini_batch_dictionary_learning.py
|
import sklearn
from numpy import inf, nan
from packaging import version
from sklearn.decomposition import MiniBatchDictionaryLearning as Op
from lale.docstrings import set_docstrings
from lale.operators import make_operator, sklearn_version
class _MiniBatchDictionaryLearningImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def transform(self, X):
return self._wrapped_model.transform(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for MiniBatchDictionaryLearning Mini-batch dictionary learning",
"allOf": [
{
"type": "object",
"required": [
"n_components",
"alpha",
"n_iter",
"fit_algorithm",
"n_jobs",
"batch_size",
"shuffle",
"dict_init",
"transform_algorithm",
"transform_n_nonzero_coefs",
"transform_alpha",
"verbose",
"split_sign",
"random_state",
"positive_code",
"positive_dict",
],
"relevantToOptimizer": [
"n_components",
"alpha",
"n_iter",
"fit_algorithm",
"batch_size",
"shuffle",
"transform_algorithm",
],
"additionalProperties": False,
"properties": {
"n_components": {
"anyOf": [
{
"type": "integer",
"minimumForOptimizer": 2,
"maximumForOptimizer": 256,
"distribution": "uniform",
},
{"enum": [None]},
],
"default": None,
"description": "number of dictionary elements to extract",
},
"alpha": {
"type": "number",
"minimumForOptimizer": 1e-10,
"maximumForOptimizer": 1.0,
"distribution": "loguniform",
"default": 1,
"description": "sparsity controlling parameter",
},
"n_iter": {
"type": "integer",
"minimumForOptimizer": 5,
"maximumForOptimizer": 1000,
"distribution": "uniform",
"default": 1000,
"description": "total number of iterations to perform",
},
"fit_algorithm": {
"enum": ["lars", "cd"],
"default": "lars",
"description": "lars: uses the least angle regression method to solve the lasso problem (linear_model.lars_path) cd: uses the coordinate descent method to compute the Lasso solution (linear_model.Lasso)",
},
"n_jobs": {
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"default": 1,
"description": "Number of parallel jobs to run",
},
"batch_size": {
"type": "integer",
"minimumForOptimizer": 3,
"maximumForOptimizer": 128,
"distribution": "uniform",
"default": 3,
"description": "number of samples in each mini-batch",
},
"shuffle": {
"type": "boolean",
"default": True,
"description": "whether to shuffle the samples before forming batches",
},
"dict_init": {
"anyOf": [
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
{"enum": [None]},
],
"default": None,
"description": "initial value of the dictionary for warm restart scenarios",
},
"transform_algorithm": {
"enum": ["lasso_lars", "lasso_cd", "lars", "omp", "threshold"],
"default": "omp",
"description": "Algorithm used to transform the data",
},
"transform_n_nonzero_coefs": {
"XXX TODO XXX": "int, ``0.1 * n_features`` by default",
"description": "Number of nonzero coefficients to target in each column of the solution",
"enum": [None],
"default": None,
},
"transform_alpha": {
"anyOf": [{"type": "number"}, {"enum": [None]}],
"default": None,
"description": "If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the penalty applied to the L1 norm",
},
"verbose": {
"type": "boolean",
"default": False,
"description": "To control the verbosity of the procedure.",
},
"split_sign": {
"type": "boolean",
"default": False,
"description": "Whether to split the sparse feature vector into the concatenation of its negative part and its positive part",
},
"random_state": {
"anyOf": [
{"type": "integer"},
{"laleType": "numpy.random.RandomState"},
{"enum": [None]},
],
"default": None,
"description": "If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`.",
},
"positive_code": {
"type": "boolean",
"default": False,
"description": "Whether to enforce positivity when finding the code",
},
"positive_dict": {
"type": "boolean",
"default": False,
"description": "Whether to enforce positivity when finding the dictionary",
},
},
},
{
"XXX TODO XXX": "Parameter: transform_n_nonzero_coefs > only used by algorithm='lars' and algorithm='omp' and is overridden by alpha in the omp case"
},
{
"description": "From /decomposition/_dict_learning.py:None:_check_positive_coding, Exception: raise ValueError(\"Positive constraint not supported for '{}' coding method.\" .format(method)) ",
"anyOf": [
{"type": "object", "properties": {"positive_code": {"enum": [False]}}},
{
"type": "object",
"properties": {"fit_algorithm": {"not": {"enum": ["omp", "lars"]}}},
},
],
},
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Fit the model from data in X.",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Training vector, where n_samples in the number of samples and n_features is the number of features.",
},
"y": {},
},
}
_input_transform_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Encode the data as a sparse combination of the dictionary atoms.",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Test data to be transformed, must have the same number of features as the data used to train the model.",
}
},
}
_output_transform_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Transformed data",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.decomposition.MiniBatchDictionaryLearning#sklearn-decomposition-minibatchdictionarylearning",
"import_from": "sklearn.decomposition",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
MiniBatchDictionaryLearning = make_operator(
_MiniBatchDictionaryLearningImpl, _combined_schemas
)
if sklearn_version >= version.Version("0.22"):
# old: https://scikit-learn.org/0.20/modules/generated/sklearn.decomposition.MiniBatchDictionaryLearning#sklearn-decomposition-minibatchdictionarylearning
# new: https://scikit-learn.org/0.22/modules/generated/sklearn.decomposition.MiniBatchDictionaryLearning#sklearn-decomposition-minibatchdictionarylearning
MiniBatchDictionaryLearning = MiniBatchDictionaryLearning.customize_schema(
transform_max_iter={
"type": "integer",
"minimumForOptimizer": 100,
"maximumForOptimizer": 2000,
"distribution": "uniform",
"default": 1000,
"description": "Maximum number of iterations to perform if algorithm='lasso_cd' or 'lasso_lars'",
},
set_as_available=True,
)
set_docstrings(MiniBatchDictionaryLearning)
| 10,544 | 40.679842 | 263 |
py
|
lale
|
lale-master/lale/lib/autogen/plssvd.py
|
from numpy import inf, nan
from sklearn.cross_decomposition import PLSSVD as Op
from lale.docstrings import set_docstrings
from lale.operators import make_operator
class _PLSSVDImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def transform(self, X):
return self._wrapped_model.transform(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for PLSSVD Partial Least Square SVD",
"allOf": [
{
"type": "object",
"required": ["n_components", "scale", "copy"],
"relevantToOptimizer": ["n_components", "scale", "copy"],
"additionalProperties": False,
"properties": {
"n_components": {
"type": "integer",
"minimumForOptimizer": 2,
"maximumForOptimizer": 256,
"distribution": "uniform",
"default": 2,
"description": "Number of components to keep.",
},
"scale": {
"type": "boolean",
"default": True,
"description": "Whether to scale X and Y.",
},
"copy": {
"type": "boolean",
"default": True,
"description": "Whether to copy X and Y, or perform in-place computations.",
},
},
}
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Fit model to data.",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Training vectors, where n_samples is the number of samples and n_features is the number of predictors.",
},
"Y": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Target vectors, where n_samples is the number of samples and n_targets is the number of response variables.",
},
},
}
_input_transform_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Apply the dimension reduction learned on the train data.",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Training vectors, where n_samples is the number of samples and n_features is the number of predictors.",
},
"Y": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Target vectors, where n_samples is the number of samples and n_targets is the number of response variables.",
},
},
}
_output_transform_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Apply the dimension reduction learned on the train data.",
"laleType": "Any",
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.cross_decomposition.PLSSVD#sklearn-cross_decomposition-plssvd",
"import_from": "sklearn.cross_decomposition",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
PLSSVD = make_operator(_PLSSVDImpl, _combined_schemas)
set_docstrings(PLSSVD)
| 4,157 | 35.473684 | 145 |
py
|
lale
|
lale-master/lale/lib/autogen/multi_label_binarizer.py
|
from numpy import inf, nan
from sklearn.preprocessing import MultiLabelBinarizer as Op
from lale.docstrings import set_docstrings
from lale.operators import make_operator
class _MultiLabelBinarizerImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def transform(self, X):
return self._wrapped_model.transform(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for MultiLabelBinarizer Transform between iterable of iterables and a multilabel format",
"allOf": [
{
"type": "object",
"required": ["classes", "sparse_output"],
"relevantToOptimizer": ["sparse_output"],
"additionalProperties": False,
"properties": {
"classes": {
"XXX TODO XXX": "array-like of shape [n_classes] (optional)",
"description": "Indicates an ordering for the class labels",
"enum": [None],
"default": None,
},
"sparse_output": {
"type": "boolean",
"default": False,
"description": "Set to true if output binary array is desired in CSR sparse format",
},
},
}
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Fit the label sets binarizer, storing `classes_`",
"type": "object",
"required": ["y"],
"properties": {
"y": {
"laleType": "Any",
"XXX TODO XXX": "iterable of iterables",
"description": "A set of labels (any orderable and hashable object) for each sample",
}
},
}
_input_transform_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Transform the given label sets",
"type": "object",
"required": ["y"],
"properties": {
"y": {
"laleType": "Any",
"XXX TODO XXX": "iterable of iterables",
"description": "A set of labels (any orderable and hashable object) for each sample",
}
},
}
_output_transform_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in `y[i]`, and 0 otherwise.",
"laleType": "Any",
"XXX TODO XXX": "array or CSR matrix, shape (n_samples, n_classes)",
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.preprocessing.MultiLabelBinarizer#sklearn-preprocessing-multilabelbinarizer",
"import_from": "sklearn.preprocessing",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
MultiLabelBinarizer = make_operator(_MultiLabelBinarizerImpl, _combined_schemas)
set_docstrings(MultiLabelBinarizer)
| 3,524 | 34.969388 | 159 |
py
|
lale
|
lale-master/lale/lib/autogen/factor_analysis.py
|
import sklearn
from numpy import inf, nan
from packaging import version
from sklearn.decomposition import FactorAnalysis as Op
from lale.docstrings import set_docstrings
from lale.operators import make_operator, sklearn_version
class _FactorAnalysisImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def transform(self, X):
return self._wrapped_model.transform(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for FactorAnalysis Factor Analysis (FA)",
"allOf": [
{
"type": "object",
"required": [
"n_components",
"tol",
"copy",
"max_iter",
"noise_variance_init",
"svd_method",
"iterated_power",
"random_state",
],
"relevantToOptimizer": [
"tol",
"copy",
"svd_method",
"iterated_power",
],
"additionalProperties": False,
"properties": {
"n_components": {
"anyOf": [
{
"type": "integer",
"minimun": 1,
"laleMaximum": "X/items/maxItems",
"minimumForOptimizer": 2,
"maximumForOptimizer": 256,
"distribution": "uniform",
},
{"enum": [None]},
],
"default": None,
"description": "Dimensionality of latent space, the number of components of ``X`` that are obtained after ``transform``",
},
"tol": {
"type": "number",
"minimumForOptimizer": 1e-08,
"maximumForOptimizer": 0.01,
"default": 0.01,
"description": "Stopping tolerance for EM algorithm.",
},
"copy": {
"type": "boolean",
"default": True,
"description": "Whether to make a copy of X",
},
"max_iter": {
"type": "integer",
"minimumForOptimizer": 10,
"maximumForOptimizer": 1000,
"distribution": "uniform",
"default": 1000,
"description": "Maximum number of iterations.",
},
"noise_variance_init": {
"XXX TODO XXX": "None | array, shape=(n_features,)",
"description": "The initial guess of the noise variance for each feature",
"enum": [None],
"default": None,
},
"svd_method": {
"enum": ["lapack", "randomized"],
"default": "randomized",
"description": "Which SVD method to use",
},
"iterated_power": {
"type": "integer",
"minimumForOptimizer": 3,
"maximumForOptimizer": 4,
"distribution": "uniform",
"default": 3,
"description": "Number of iterations for the power method",
},
"random_state": {
"anyOf": [
{"type": "integer"},
{"laleType": "numpy.random.RandomState"},
{"enum": [None]},
],
"default": 0,
"description": "If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`",
},
},
},
{
"XXX TODO XXX": "Parameter: iterated_power > only used if svd_method equals 'randomized'"
},
{
"XXX TODO XXX": "Parameter: random_state > only used when svd_method equals 'randomized'"
},
{
"description": "(‘random_state’ only used when svd_method equals ‘randomized’) From /utils/validation.py:None:check_random_state, Exception: raise ValueError( '%r cannot be used to seed a numpy.random.RandomState instance' % seed) ",
"anyOf": [
{"type": "object", "properties": {"svd_method": {"enum": ["lapack"]}}},
{
"type": "object",
"properties": {"svd_method": {"not": {"enum": ["randomized"]}}},
},
{"type": "object", "properties": {"random_state": {"enum": [None]}}},
{"XXX TODO XXX": "self.random_state is np.random"},
{
"XXX TODO XXX": "isinstance(self.random_state, np.random.RandomState)"
},
],
},
{
"description": "A sparse matrix was passed, but dense data is required. Use X.toarray() to convert to a dense numpy array.",
"type": "object",
"laleNot": "X/isSparse",
},
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Fit the FactorAnalysis model to X using EM",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Training data.",
},
"y": {},
},
}
_input_transform_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Apply dimensionality reduction to X using the model.",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Training data.",
}
},
}
_output_transform_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "The latent variables of X.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.decomposition.FactorAnalysis#sklearn-decomposition-factoranalysis",
"import_from": "sklearn.decomposition",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
FactorAnalysis = make_operator(_FactorAnalysisImpl, _combined_schemas)
if sklearn_version >= version.Version("0.24"):
# old: https://scikit-learn.org/0.20/modules/generated/sklearn.decomposition.FactorAnalysis#sklearn-decomposition-factoranalysis
# new: https://scikit-learn.org/0.24/modules/generated/sklearn.decomposition.FactorAnalysis#sklearn-decomposition-factoranalysis
FactorAnalysis = FactorAnalysis.customize_schema(
rotation={
"enum": ["varimax", "quartimax", None],
"default": None,
"description": "if not None, apply the indicated rotation. Currently, varimax and quartimax are implemented.",
},
set_as_available=True,
)
set_docstrings(FactorAnalysis)
| 8,084 | 38.439024 | 262 |
py
|
lale
|
lale-master/lale/lib/autogen/incremental_pca.py
|
from numpy import inf, nan
from sklearn.decomposition import IncrementalPCA as Op
from lale.docstrings import set_docstrings
from lale.operators import make_operator
class _IncrementalPCAImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def transform(self, X):
return self._wrapped_model.transform(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for IncrementalPCA Incremental principal components analysis (IPCA).",
"allOf": [
{
"type": "object",
"required": ["n_components", "whiten", "copy", "batch_size"],
"relevantToOptimizer": ["n_components", "whiten", "copy", "batch_size"],
"additionalProperties": False,
"properties": {
"n_components": {
"anyOf": [
{
"type": "integer",
"minimumForOptimizer": 2,
"maximumForOptimizer": 256,
"distribution": "uniform",
},
{"enum": [None]},
],
"default": None,
"description": "Number of components to keep",
},
"whiten": {
"type": "boolean",
"default": False,
"description": "When True (False by default) the ``components_`` vectors are divided by ``n_samples`` times ``components_`` to ensure uncorrelated outputs with unit component-wise variances",
},
"copy": {
"type": "boolean",
"default": True,
"description": "If False, X will be overwritten",
},
"batch_size": {
"anyOf": [
{
"type": "integer",
"minimumForOptimizer": 3,
"maximumForOptimizer": 128,
"distribution": "uniform",
},
{"enum": [None]},
],
"default": None,
"description": "The number of samples to use for each batch",
},
},
},
{"XXX TODO XXX": "Parameter: batch_size > only used when calling fit"},
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Fit the model with X, using minibatches of size batch_size.",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Training data, where n_samples is the number of samples and n_features is the number of features.",
},
"y": {},
},
}
_input_transform_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Apply dimensionality reduction to X.",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "New data, where n_samples is the number of samples and n_features is the number of features.",
}
},
}
_output_transform_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Apply dimensionality reduction to X.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.decomposition.IncrementalPCA#sklearn-decomposition-incrementalpca",
"import_from": "sklearn.decomposition",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
IncrementalPCA = make_operator(_IncrementalPCAImpl, _combined_schemas)
set_docstrings(IncrementalPCA)
| 4,712 | 36.704 | 211 |
py
|
lale
|
lale-master/lale/lib/autogen/logistic_regression_cv.py
|
from numpy import inf, nan
from sklearn.linear_model import LogisticRegressionCV as Op
from lale.docstrings import set_docstrings
from lale.operators import make_operator
class _LogisticRegressionCVImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def predict(self, X):
return self._wrapped_model.predict(X)
def predict_proba(self, X):
return self._wrapped_model.predict_proba(X)
def decision_function(self, X):
return self._wrapped_model.decision_function(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for LogisticRegressionCV Logistic Regression CV (aka logit, MaxEnt) classifier.",
"allOf": [
{
"type": "object",
"required": [
"Cs",
"fit_intercept",
"cv",
"dual",
"penalty",
"scoring",
"solver",
"tol",
"max_iter",
"class_weight",
"n_jobs",
"verbose",
"refit",
"intercept_scaling",
"multi_class",
"random_state",
],
"relevantToOptimizer": [
"Cs",
"fit_intercept",
"cv",
"dual",
"penalty",
"scoring",
"solver",
"tol",
"max_iter",
"multi_class",
],
"additionalProperties": False,
"properties": {
"Cs": {
"XXX TODO XXX": "list of floats | int",
"description": "Each of the values in Cs describes the inverse of regularization strength",
"type": "integer",
"minimumForOptimizer": 10,
"maximumForOptimizer": 11,
"distribution": "uniform",
"default": 10,
},
"fit_intercept": {
"type": "boolean",
"default": True,
"description": "Specifies if a constant (a.k.a",
},
"cv": {
"description": """Cross-validation as integer or as object that has a split function.
The fit method performs cross validation on the input dataset for per
trial, and uses the mean cross validation performance for optimization.
This behavior is also impacted by handle_cv_failure flag.
If integer: number of folds in sklearn.model_selection.StratifiedKFold.
If object with split function: generator yielding (train, test) splits
as arrays of indices. Can use any of the iterators from
https://scikit-learn.org/stable/modules/cross_validation.html#cross-validation-iterators.""",
"anyOf": [
{
"type": "integer",
"minimum": 1,
"default": 5,
"minimumForOptimizer": 3,
"maximumForOptimizer": 4,
"distribution": "uniform",
},
{"laleType": "Any", "forOptimizer": False},
],
},
"dual": {
"type": "boolean",
"default": False,
"description": "Dual or primal formulation",
},
"penalty": {
"XXX TODO XXX": "str, 'l1' or 'l2'",
"description": "Used to specify the norm used in the penalization",
"enum": ["l1", "l2"],
"default": "l2",
},
"scoring": {
"anyOf": [
{"laleType": "callable", "forOptimizer": False},
{"enum": ["accuracy", None]},
],
"default": None,
"description": "A string (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``",
},
"solver": {
"enum": ["newton-cg", "lbfgs", "liblinear", "sag", "saga"],
"default": "lbfgs",
"description": "Algorithm to use in the optimization problem",
},
"tol": {
"type": "number",
"minimumForOptimizer": 1e-08,
"maximumForOptimizer": 0.01,
"default": 0.0001,
"description": "Tolerance for stopping criteria.",
},
"max_iter": {
"type": "integer",
"minimumForOptimizer": 10,
"maximumForOptimizer": 1000,
"distribution": "uniform",
"default": 100,
"description": "Maximum number of iterations of the optimization algorithm.",
},
"class_weight": {
"XXX TODO XXX": "dict or 'balanced', optional",
"description": "Weights associated with classes in the form ``{class_label: weight}``",
"enum": ["balanced"],
"default": "balanced",
},
"n_jobs": {
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"default": 1,
"description": "Number of CPU cores used during the cross-validation loop",
},
"verbose": {
"type": "integer",
"default": 0,
"description": "For the 'liblinear', 'sag' and 'lbfgs' solvers set verbose to any positive number for verbosity.",
},
"refit": {
"type": "boolean",
"default": True,
"description": "If set to True, the scores are averaged across all folds, and the coefs and the C that corresponds to the best score is taken, and a final refit is done using these parameters",
},
"intercept_scaling": {
"type": "number",
"default": 1.0,
"description": "Useful only when the solver 'liblinear' is used and self.fit_intercept is set to True",
},
"multi_class": {
"enum": ["ovr", "multinomial", "auto"],
"default": "ovr",
"description": "If the option chosen is 'ovr', then a binary problem is fit for each label",
},
"random_state": {
"anyOf": [
{"type": "integer"},
{"laleType": "numpy.random.RandomState"},
{"enum": [None]},
],
"default": None,
"description": "If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`.",
},
},
},
{
"XXX TODO XXX": "Parameter: dual > only implemented for l2 penalty with liblinear solver"
},
{"XXX TODO XXX": "Parameter: penalty > only l2 penalties"},
{
"XXX TODO XXX": "Parameter: solver > only 'newton-cg', 'sag', 'saga' and 'lbfgs' handle multinomial loss; 'liblinear' is limited to one-versus-rest schemes"
},
{
"XXX TODO XXX": "Parameter: intercept_scaling > only when the solver 'liblinear' is used and self"
},
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Fit the model according to the given training data.",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Training vector, where n_samples is the number of samples and n_features is the number of features.",
},
"y": {
"type": "array",
"items": {"type": "number"},
"description": "Target vector relative to X.",
},
"sample_weight": {
"laleType": "Any",
"XXX TODO XXX": "array-like, shape (n_samples,) optional",
"description": "Array of weights that are assigned to individual samples",
},
},
}
_input_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Predict class labels for samples in X.",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"anyOf": [
{
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "array_like or sparse matrix, shape (n_samples, n_features)",
},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
],
"description": "Samples.",
}
},
}
_output_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Predicted class label per sample.",
"type": "array",
"items": {"type": "number"},
}
_input_predict_proba_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Probability estimates.",
"type": "object",
"required": ["X"],
"properties": {
"X": {"type": "array", "items": {"type": "array", "items": {"type": "number"}}}
},
}
_output_predict_proba_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Returns the probability of the sample for each class in the model, where classes are ordered as they are in ``self.classes_``.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
_input_decision_function_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Predict confidence scores for samples.",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"anyOf": [
{
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "array_like or sparse matrix, shape (n_samples, n_features)",
},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
],
"description": "Samples.",
}
},
}
_output_decision_function_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Confidence scores per (sample, class) combination",
"laleType": "Any",
"XXX TODO XXX": "array, shape=(n_samples,) if n_classes == 2 else (n_samples, n_classes)",
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.linear_model.LogisticRegressionCV#sklearn-linear_model-logisticregressioncv",
"import_from": "sklearn.linear_model",
"type": "object",
"tags": {"pre": [], "op": ["estimator"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
"input_predict_proba": _input_predict_proba_schema,
"output_predict_proba": _output_predict_proba_schema,
"input_decision_function": _input_decision_function_schema,
"output_decision_function": _output_decision_function_schema,
},
}
LogisticRegressionCV = make_operator(_LogisticRegressionCVImpl, _combined_schemas)
set_docstrings(LogisticRegressionCV)
| 12,844 | 40.302251 | 263 |
py
|
lale
|
lale-master/lale/lib/autogen/lasso_lars_ic.py
|
from numpy import inf, nan
from sklearn.linear_model import LassoLarsIC as Op
from lale.docstrings import set_docstrings
from lale.operators import make_operator
class _LassoLarsICImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def predict(self, X):
return self._wrapped_model.predict(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for LassoLarsIC Lasso model fit with Lars using BIC or AIC for model selection",
"allOf": [
{
"type": "object",
"required": [
"criterion",
"fit_intercept",
"verbose",
"normalize",
"precompute",
"max_iter",
"eps",
"copy_X",
"positive",
],
"relevantToOptimizer": [
"criterion",
"fit_intercept",
"normalize",
"precompute",
"max_iter",
"eps",
"copy_X",
"positive",
],
"additionalProperties": False,
"properties": {
"criterion": {
"enum": ["bic", "aic"],
"default": "aic",
"description": "The type of criterion to use.",
},
"fit_intercept": {
"type": "boolean",
"default": True,
"description": "whether to calculate the intercept for this model",
},
"verbose": {
"anyOf": [{"type": "boolean"}, {"type": "integer"}],
"default": False,
"description": "Sets the verbosity amount",
},
"normalize": {
"type": "boolean",
"default": True,
"description": "This parameter is ignored when ``fit_intercept`` is set to False",
},
"precompute": {
"anyOf": [
{
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "True | False | 'auto' | array-like",
"forOptimizer": False,
},
{"type": "boolean"},
{"enum": ["auto"]},
],
"default": "auto",
"description": "Whether to use a precomputed Gram matrix to speed up calculations",
},
"max_iter": {
"type": "integer",
"minimumForOptimizer": 10,
"maximumForOptimizer": 1000,
"distribution": "uniform",
"default": 500,
"description": "Maximum number of iterations to perform",
},
"eps": {
"type": "number",
"minimumForOptimizer": 0.001,
"maximumForOptimizer": 0.1,
"distribution": "loguniform",
"default": 2.220446049250313e-16,
"description": "The machine-precision regularization in the computation of the Cholesky diagonal factors",
},
"copy_X": {
"type": "boolean",
"default": True,
"description": "If True, X will be copied; else, it may be overwritten.",
},
"positive": {
"type": "boolean",
"default": False,
"description": "Restrict coefficients to be >= 0",
},
},
},
{
"XXX TODO XXX": "Parameter: positive > only coefficients up to the smallest alpha value (alphas_[alphas_ > 0"
},
{
"description": "X cannot be None if Gram is not NoneUse lars_path_gram to avoid passing X and y.) ",
"anyOf": [
{"XXX TODO XXX": "input X is not None"},
{"type": "object", "properties": {"precompute": {"enum": [None]}}},
],
},
{
"description": "From /linear_model/_least_angle.py:None:_lars_path_solver, Exception: raise ValueError('X and Gram cannot both be unspecified.') ",
"anyOf": [
{
"type": "object",
"properties": {"precompute": {"not": {"enum": [None, False]}}},
},
{"XXX TODO XXX": "input X is not None"},
],
},
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Fit the model using X, y as training data.",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "training data.",
},
"y": {
"type": "array",
"items": {"type": "number"},
"description": "target values",
},
"copy_X": {
"type": "boolean",
"default": True,
"description": "If ``True``, X will be copied; else, it may be overwritten.",
},
},
}
_input_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Predict using the linear model",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"anyOf": [
{
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "array_like or sparse matrix, shape (n_samples, n_features)",
},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
],
"description": "Samples.",
}
},
}
_output_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Returns predicted values.",
"type": "array",
"items": {"type": "number"},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.linear_model.LassoLarsIC#sklearn-linear_model-lassolarsic",
"import_from": "sklearn.linear_model",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "regressor"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
LassoLarsIC = make_operator(_LassoLarsICImpl, _combined_schemas)
set_docstrings(LassoLarsIC)
| 7,452 | 35.356098 | 159 |
py
|
lale
|
lale-master/lale/lib/autogen/k_bins_discretizer.py
|
import sklearn
from numpy import inf, nan
from packaging import version
from sklearn.preprocessing import KBinsDiscretizer as Op
from lale.docstrings import set_docstrings
from lale.operators import make_operator, sklearn_version
class _KBinsDiscretizerImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def transform(self, X):
return self._wrapped_model.transform(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for KBinsDiscretizer Bin continuous data into intervals.",
"allOf": [
{
"type": "object",
"required": ["n_bins", "encode", "strategy"],
"relevantToOptimizer": ["encode", "strategy"],
"additionalProperties": False,
"properties": {
"n_bins": {
"anyOf": [
{"type": "integer"},
{"type": "array", "items": {"type": "number"}},
],
"default": 5,
"description": "The number of bins to produce",
},
"encode": {
"enum": ["onehot", "onehot-dense", "ordinal"],
"default": "onehot",
"description": "Method used to encode the transformed result",
},
"strategy": {
"enum": ["uniform", "quantile", "kmeans"],
"default": "quantile",
"description": "Strategy used to define the widths of the bins",
},
},
},
{
"description": "A sparse matrix was passed, but dense data is required. Use X.toarray() to convert to a dense numpy array. ",
"type": "object",
"laleNot": "X/isSparse",
},
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Fits the estimator.",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"laleType": "Any",
"XXX TODO XXX": "numeric array-like, shape (n_samples, n_features)",
"description": "Data to be discretized.",
},
"y": {"laleType": "Any", "XXX TODO XXX": "ignored"},
},
}
_input_transform_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Discretizes the data.",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"laleType": "Any",
"XXX TODO XXX": "numeric array-like, shape (n_samples, n_features)",
"description": "Data to be discretized.",
}
},
}
_output_transform_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Data in the binned space.",
"laleType": "Any",
"XXX TODO XXX": "numeric array-like or sparse matrix",
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.preprocessing.KBinsDiscretizer#sklearn-preprocessing-kbinsdiscretizer",
"import_from": "sklearn.preprocessing",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
KBinsDiscretizer = make_operator(_KBinsDiscretizerImpl, _combined_schemas)
if sklearn_version >= version.Version("0.24"):
# old: https://scikit-learn.org/0.20/modules/generated/sklearn.preprocessing.KBinsDiscretizer#sklearn-preprocessing-kbinsdiscretizer
# new: https://scikit-learn.org/0.24/modules/generated/sklearn.preprocessing.KBinsDiscretizer#sklearn-preprocessing-kbinsdiscretizer
KBinsDiscretizer = KBinsDiscretizer.customize_schema(
dtype={
"XXX TODO XXX": "dtype{np.float32, np.float64}, default=None",
"laleType": "Any",
"default": None,
},
set_as_available=True,
)
set_docstrings(KBinsDiscretizer)
| 4,544 | 35.36 | 153 |
py
|
lale
|
lale-master/lale/lib/autogen/__init__.py
|
""" Lale autogen schemas
The JSON schemas of the operators defined in this module were automatically generated from the source code of 115 scikit-learn operators.
The resulting schemas are all valid and usable to build Lale pipelines.
The following paper describes the schema extractor::
@InProceedings{baudart_et_al_2020,
title = "Mining Documentation to Extract Hyperparameter Schemas",
author = "Baudart, Guillaume and Kirchner, Peter and Hirzel, Martin and Kate, Kiran",
booktitle = "ICML Workshop on Automated Machine Learning (AutoML@ICML)",
year = 2020,
url = "https://arxiv.org/abs/2006.16984" }
"""
from lale.lib.sklearn.ada_boost_classifier import AdaBoostClassifier
from lale.lib.sklearn.ada_boost_regressor import AdaBoostRegressor
from lale.lib.sklearn.decision_tree_classifier import DecisionTreeClassifier
from lale.lib.sklearn.decision_tree_regressor import DecisionTreeRegressor
from lale.lib.sklearn.extra_trees_classifier import ExtraTreesClassifier
from lale.lib.sklearn.extra_trees_regressor import ExtraTreesRegressor
from lale.lib.sklearn.function_transformer import FunctionTransformer
from lale.lib.sklearn.gaussian_nb import GaussianNB
from lale.lib.sklearn.gradient_boosting_classifier import GradientBoostingClassifier
from lale.lib.sklearn.gradient_boosting_regressor import GradientBoostingRegressor
from lale.lib.sklearn.isomap import Isomap
from lale.lib.sklearn.k_means import KMeans
from lale.lib.sklearn.k_neighbors_classifier import KNeighborsClassifier
from lale.lib.sklearn.k_neighbors_regressor import KNeighborsRegressor
from lale.lib.sklearn.linear_regression import LinearRegression
from lale.lib.sklearn.linear_svc import LinearSVC
from lale.lib.sklearn.linear_svr import LinearSVR
from lale.lib.sklearn.logistic_regression import LogisticRegression
from lale.lib.sklearn.min_max_scaler import MinMaxScaler
from lale.lib.sklearn.missing_indicator import MissingIndicator
from lale.lib.sklearn.mlp_classifier import MLPClassifier
from lale.lib.sklearn.multinomial_nb import MultinomialNB
from lale.lib.sklearn.nmf import NMF
from lale.lib.sklearn.normalizer import Normalizer
from lale.lib.sklearn.nystroem import Nystroem
from lale.lib.sklearn.one_hot_encoder import OneHotEncoder
from lale.lib.sklearn.ordinal_encoder import OrdinalEncoder
from lale.lib.sklearn.passive_aggressive_classifier import PassiveAggressiveClassifier
from lale.lib.sklearn.pca import PCA
from lale.lib.sklearn.polynomial_features import PolynomialFeatures
from lale.lib.sklearn.quadratic_discriminant_analysis import (
QuadraticDiscriminantAnalysis,
)
from lale.lib.sklearn.quantile_transformer import QuantileTransformer
from lale.lib.sklearn.random_forest_classifier import RandomForestClassifier
from lale.lib.sklearn.random_forest_regressor import RandomForestRegressor
from lale.lib.sklearn.ridge import Ridge
from lale.lib.sklearn.ridge_classifier import RidgeClassifier
from lale.lib.sklearn.robust_scaler import RobustScaler
from lale.lib.sklearn.sgd_classifier import SGDClassifier
from lale.lib.sklearn.sgd_regressor import SGDRegressor
from lale.lib.sklearn.simple_imputer import SimpleImputer
from lale.lib.sklearn.standard_scaler import StandardScaler
from lale.lib.sklearn.svc import SVC
from lale.lib.sklearn.svr import SVR
from .additive_chi2_sampler import AdditiveChi2Sampler
from .ard_regression import ARDRegression
from .bayesian_ridge import BayesianRidge
from .bernoulli_nb import BernoulliNB
from .bernoulli_rbm import BernoulliRBM
from .binarizer import Binarizer
from .birch import Birch
from .calibrated_classifier_cv import CalibratedClassifierCV
from .cca import CCA
from .complement_nb import ComplementNB
from .dictionary_learning import DictionaryLearning
from .elastic_net import ElasticNet
from .elastic_net_cv import ElasticNetCV
from .factor_analysis import FactorAnalysis
from .fast_ica import FastICA
from .gaussian_process_classifier import GaussianProcessClassifier
from .gaussian_process_regressor import GaussianProcessRegressor
from .gaussian_random_projection import GaussianRandomProjection
from .huber_regressor import HuberRegressor
from .incremental_pca import IncrementalPCA
from .k_bins_discretizer import KBinsDiscretizer
from .kernel_pca import KernelPCA
from .kernel_ridge import KernelRidge
from .label_binarizer import LabelBinarizer
from .label_encoder import LabelEncoder
from .label_propagation import LabelPropagation
from .label_spreading import LabelSpreading
from .lars import Lars
from .lars_cv import LarsCV
from .lasso import Lasso
from .lasso_cv import LassoCV
from .lasso_lars import LassoLars
from .lasso_lars_cv import LassoLarsCV
from .lasso_lars_ic import LassoLarsIC
from .latent_dirichlet_allocation import LatentDirichletAllocation
from .linear_discriminant_analysis import LinearDiscriminantAnalysis
from .locally_linear_embedding import LocallyLinearEmbedding
from .logistic_regression_cv import LogisticRegressionCV
from .max_abs_scaler import MaxAbsScaler
from .mini_batch_dictionary_learning import MiniBatchDictionaryLearning
from .mini_batch_k_means import MiniBatchKMeans
from .mini_batch_sparse_pca import MiniBatchSparsePCA
from .mlp_regressor import MLPRegressor
from .multi_label_binarizer import MultiLabelBinarizer
from .multi_task_elastic_net import MultiTaskElasticNet
from .multi_task_elastic_net_cv import MultiTaskElasticNetCV
from .multi_task_lasso import MultiTaskLasso
from .multi_task_lasso_cv import MultiTaskLassoCV
from .nearest_centroid import NearestCentroid
from .nu_svc import NuSVC
from .nu_svr import NuSVR
from .orthogonal_matching_pursuit import OrthogonalMatchingPursuit
from .orthogonal_matching_pursuit_cv import OrthogonalMatchingPursuitCV
from .passive_aggressive_regressor import PassiveAggressiveRegressor
from .perceptron import Perceptron
from .pls_canonical import PLSCanonical
from .pls_regression import PLSRegression
from .plssvd import PLSSVD
from .power_transformer import PowerTransformer
from .radius_neighbors_classifier import RadiusNeighborsClassifier
from .radius_neighbors_regressor import RadiusNeighborsRegressor
from .random_trees_embedding import RandomTreesEmbedding
from .ransac_regressor import RANSACRegressor
from .rbf_sampler import RBFSampler
from .ridge_classifier_cv import RidgeClassifierCV
from .ridge_cv import RidgeCV
from .skewed_chi2_sampler import SkewedChi2Sampler
from .sparse_pca import SparsePCA
from .sparse_random_projection import SparseRandomProjection
from .theil_sen_regressor import TheilSenRegressor
from .transformed_target_regressor import TransformedTargetRegressor
from .truncated_svd import TruncatedSVD
| 6,623 | 48.066667 | 137 |
py
|
lale
|
lale-master/lale/lib/autogen/multi_task_elastic_net.py
|
from numpy import inf, nan
from packaging import version
from sklearn.linear_model import MultiTaskElasticNet as Op
import lale.operators
from lale.docstrings import set_docstrings
from lale.operators import make_operator
class _MultiTaskElasticNetImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def predict(self, X):
return self._wrapped_model.predict(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for MultiTaskElasticNet Multi-task ElasticNet model trained with L1/L2 mixed-norm as regularizer",
"allOf": [
{
"type": "object",
"required": [
"alpha",
"l1_ratio",
"fit_intercept",
"normalize",
"copy_X",
"max_iter",
"tol",
"warm_start",
"random_state",
"selection",
],
"relevantToOptimizer": [
"alpha",
"fit_intercept",
"normalize",
"copy_X",
"max_iter",
"tol",
],
"additionalProperties": False,
"properties": {
"alpha": {
"type": "number",
"minimumForOptimizer": 1e-10,
"maximumForOptimizer": 1.0,
"distribution": "loguniform",
"default": 1.0,
"description": "Constant that multiplies the L1/L2 term",
},
"l1_ratio": {
"type": "number",
"default": 0.5,
"description": "The ElasticNet mixing parameter, with 0 < l1_ratio <= 1",
},
"fit_intercept": {
"type": "boolean",
"default": True,
"description": "whether to calculate the intercept for this model",
},
"normalize": {
"type": "boolean",
"default": False,
"description": "This parameter is ignored when ``fit_intercept`` is set to False",
},
"copy_X": {
"type": "boolean",
"default": True,
"description": "If ``True``, X will be copied; else, it may be overwritten.",
},
"max_iter": {
"type": "integer",
"minimumForOptimizer": 10,
"maximumForOptimizer": 1000,
"distribution": "uniform",
"default": 1000,
"description": "The maximum number of iterations",
},
"tol": {
"type": "number",
"minimumForOptimizer": 1e-08,
"maximumForOptimizer": 0.01,
"default": 0.0001,
"description": "The tolerance for the optimization: if the updates are smaller than ``tol``, the optimization code checks the dual gap for optimality and continues until it is smaller than ``tol``.",
},
"warm_start": {
"type": "boolean",
"default": False,
"description": "When set to ``True``, reuse the solution of the previous call to fit as initialization, otherwise, just erase the previous solution",
},
"random_state": {
"anyOf": [
{"type": "integer"},
{"laleType": "numpy.random.RandomState"},
{"enum": [None]},
],
"default": None,
"description": "The seed of the pseudo random number generator that selects a random feature to update",
},
"selection": {
"enum": ["cyclic", "random"],
"default": "cyclic",
"description": "If set to 'random', a random coefficient is updated every iteration rather than looping over features sequentially by default",
},
},
}
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Fit MultiTaskElasticNet model with coordinate descent",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"laleType": "Any",
"XXX TODO XXX": "ndarray, shape (n_samples, n_features)",
"description": "Data",
},
"y": {
"laleType": "Any",
"XXX TODO XXX": "ndarray, shape (n_samples, n_tasks)",
"description": "Target",
},
},
}
_input_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Predict using the linear model",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"anyOf": [
{
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "array_like or sparse matrix, shape (n_samples, n_features)",
},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
],
"description": "Samples.",
}
},
}
_output_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Returns predicted values.",
"type": "array",
"items": {"type": "number"},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.linear_model.MultiTaskElasticNet#sklearn-linear_model-multitaskelasticnet",
"import_from": "sklearn.linear_model",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "regressor"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
MultiTaskElasticNet = make_operator(_MultiTaskElasticNetImpl, _combined_schemas)
if lale.operators.sklearn_version >= version.Version("1.2"):
# old: https://scikit-learn.org/1.1/modules/generated/sklearn.linear_model.MultiTaskElasticNet.html
# new: https://scikit-learn.org/1.2/modules/generated/sklearn.linear_model.MultiTaskElasticNet.html
MultiTaskElasticNet = MultiTaskElasticNet.customize_schema(normalize=None)
set_docstrings(MultiTaskElasticNet)
| 7,188 | 37.239362 | 219 |
py
|
lale
|
lale-master/lale/lib/autogen/gaussian_process_classifier.py
|
from numpy import inf, nan
from sklearn.gaussian_process import GaussianProcessClassifier as Op
from lale.docstrings import set_docstrings
from lale.operators import make_operator
class _GaussianProcessClassifierImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def predict(self, X):
return self._wrapped_model.predict(X)
def predict_proba(self, X):
return self._wrapped_model.predict_proba(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for GaussianProcessClassifier Gaussian process classification (GPC) based on Laplace approximation.",
"allOf": [
{
"type": "object",
"required": [
"kernel",
"optimizer",
"n_restarts_optimizer",
"max_iter_predict",
"warm_start",
"copy_X_train",
"random_state",
"multi_class",
"n_jobs",
],
"relevantToOptimizer": [
"optimizer",
"n_restarts_optimizer",
"max_iter_predict",
"multi_class",
],
"additionalProperties": False,
"properties": {
"kernel": {
"XXX TODO XXX": "kernel object",
"description": "The kernel specifying the covariance function of the GP",
"enum": [None],
"default": None,
},
"optimizer": {
"anyOf": [
{"laleType": "callable", "forOptimizer": False},
{"enum": ["fmin_l_bfgs_b"]},
],
"default": "fmin_l_bfgs_b",
"description": "Can either be one of the internally supported optimizers for optimizing the kernel's parameters, specified by a string, or an externally defined optimizer passed as a callable",
},
"n_restarts_optimizer": {
"type": "integer",
"minimumForOptimizer": 0,
"maximumForOptimizer": 1,
"distribution": "uniform",
"default": 0,
"description": "The number of restarts of the optimizer for finding the kernel's parameters which maximize the log-marginal likelihood",
},
"max_iter_predict": {
"type": "integer",
"minimumForOptimizer": 100,
"maximumForOptimizer": 101,
"distribution": "uniform",
"default": 100,
"description": "The maximum number of iterations in Newton's method for approximating the posterior during predict",
},
"warm_start": {
"type": "boolean",
"default": False,
"description": "If warm-starts are enabled, the solution of the last Newton iteration on the Laplace approximation of the posterior mode is used as initialization for the next call of _posterior_mode()",
},
"copy_X_train": {
"type": "boolean",
"default": True,
"description": "If True, a persistent copy of the training data is stored in the object",
},
"random_state": {
"anyOf": [
{"type": "integer"},
{"laleType": "numpy.random.RandomState"},
{"enum": [None]},
],
"default": None,
"description": "The generator used to initialize the centers",
},
"multi_class": {
"XXX TODO XXX": "string, default",
"description": "Specifies how multi-class classification problems are handled",
"enum": ["one_vs_one", "one_vs_rest"],
"default": "one_vs_rest",
},
"n_jobs": {
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"default": 1,
"description": "The number of jobs to use for the computation",
},
},
}
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Fit Gaussian process classification model",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Training data",
},
"y": {
"type": "array",
"items": {"type": "number"},
"description": "Target values, must be binary",
},
},
}
_input_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Perform classification on an array of test vectors X.",
"type": "object",
"required": ["X"],
"properties": {
"X": {"type": "array", "items": {"type": "array", "items": {"type": "number"}}}
},
}
_output_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Predicted target values for X, values are from ``classes_``",
"type": "array",
"items": {"type": "number"},
}
_input_predict_proba_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Return probability estimates for the test vector X.",
"type": "object",
"required": ["X"],
"properties": {
"X": {"type": "array", "items": {"type": "array", "items": {"type": "number"}}}
},
}
_output_predict_proba_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Returns the probability of the samples for each class in the model",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.gaussian_process.GaussianProcessClassifier#sklearn-gaussian_process-gaussianprocessclassifier",
"import_from": "sklearn.gaussian_process",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "classifier"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
"input_predict_proba": _input_predict_proba_schema,
"output_predict_proba": _output_predict_proba_schema,
},
}
GaussianProcessClassifier = make_operator(
_GaussianProcessClassifierImpl, _combined_schemas
)
set_docstrings(GaussianProcessClassifier)
| 7,336 | 38.659459 | 223 |
py
|
lale
|
lale-master/lale/lib/autogen/nu_svr.py
|
from numpy import inf, nan
from sklearn.svm import NuSVR as Op
from lale.docstrings import set_docstrings
from lale.operators import make_operator
class _NuSVRImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def predict(self, X):
return self._wrapped_model.predict(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for NuSVR Nu Support Vector Regression.",
"allOf": [
{
"type": "object",
"required": [
"nu",
"C",
"kernel",
"degree",
"gamma",
"coef0",
"shrinking",
"tol",
"cache_size",
"verbose",
"max_iter",
],
"relevantToOptimizer": [
"kernel",
"degree",
"gamma",
"shrinking",
"tol",
"cache_size",
"max_iter",
],
"additionalProperties": False,
"properties": {
"nu": {
"type": "number",
"default": 0.5,
"description": "An upper bound on the fraction of training errors and a lower bound of the fraction of support vectors",
},
"C": {
"type": "number",
"default": 1.0,
"description": "Penalty parameter C of the error term.",
},
"kernel": {
"enum": ["linear", "poly", "precomputed", "sigmoid", "rbf"],
"default": "rbf",
"description": "Specifies the kernel type to be used in the algorithm",
},
"degree": {
"type": "integer",
"minimumForOptimizer": 2,
"maximumForOptimizer": 3,
"distribution": "uniform",
"default": 3,
"description": "Degree of the polynomial kernel function ('poly')",
},
"gamma": {
"anyOf": [
{"type": "number", "forOptimizer": False},
{"enum": ["scale", "auto"]},
],
"default": "scale",
"description": "Kernel coefficient for 'rbf', 'poly' and 'sigmoid'",
},
"coef0": {
"type": "number",
"default": 0.0,
"description": "Independent term in kernel function",
},
"shrinking": {
"type": "boolean",
"default": True,
"description": "Whether to use the shrinking heuristic.",
},
"tol": {
"type": "number",
"minimumForOptimizer": 1e-08,
"maximumForOptimizer": 0.01,
"default": 0.001,
"description": "Tolerance for stopping criterion.",
},
"cache_size": {
"type": "number",
"minimumForOptimizer": 0.0,
"maximumForOptimizer": 1.0,
"distribution": "uniform",
"default": 200,
"description": "Specify the size of the kernel cache (in MB).",
},
"verbose": {
"type": "boolean",
"default": False,
"description": "Enable verbose output",
},
"max_iter": {
"XXX TODO XXX": "int, optional (default=-1)",
"description": "Hard limit on iterations within solver, or -1 for no limit.",
"type": "integer",
"minimumForOptimizer": 10,
"maximumForOptimizer": 1000,
"distribution": "uniform",
"default": (-1),
},
},
},
{"XXX TODO XXX": "Parameter: coef0 > only significant in 'poly' and 'sigmoid'"},
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Fit the SVM model according to the given training data.",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Training vectors, where n_samples is the number of samples and n_features is the number of features",
},
"y": {
"type": "array",
"items": {"type": "number"},
"description": "Target values (class labels in classification, real numbers in regression)",
},
"sample_weight": {
"type": "array",
"items": {"type": "number"},
"description": "Per-sample weights",
},
},
}
_input_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Perform regression on samples in X.",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": 'For kernel="precomputed", the expected shape of X is (n_samples_test, n_samples_train).',
}
},
}
_output_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Perform regression on samples in X.",
"type": "array",
"items": {"type": "number"},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.svm.NuSVR#sklearn-svm-nusvr",
"import_from": "sklearn.svm",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "regressor"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
NuSVR = make_operator(_NuSVRImpl, _combined_schemas)
set_docstrings(NuSVR)
| 6,778 | 35.058511 | 140 |
py
|
lale
|
lale-master/lale/lib/autogen/elastic_net.py
|
from numpy import inf, nan
from packaging import version
from sklearn.linear_model import ElasticNet as Op
import lale
from lale.docstrings import set_docstrings
from lale.operators import make_operator, sklearn_version
class _ElasticNetImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def predict(self, X):
return self._wrapped_model.predict(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for ElasticNet Linear regression with combined L1 and L2 priors as regularizer.",
"allOf": [
{
"type": "object",
"required": [
"alpha",
"l1_ratio",
"fit_intercept",
"normalize",
"precompute",
"max_iter",
"copy_X",
"tol",
"warm_start",
"positive",
"random_state",
"selection",
],
"relevantToOptimizer": [
"alpha",
"fit_intercept",
"normalize",
"max_iter",
"copy_X",
"tol",
"positive",
"selection",
],
"additionalProperties": False,
"properties": {
"alpha": {
"type": "number",
"minimumForOptimizer": 1e-10,
"maximumForOptimizer": 1.0,
"distribution": "loguniform",
"default": 1.0,
"description": "Constant that multiplies the penalty terms",
},
"l1_ratio": {
"type": "number",
"default": 0.5,
"description": "The ElasticNet mixing parameter, with ``0 <= l1_ratio <= 1``",
},
"fit_intercept": {
"type": "boolean",
"default": True,
"description": "Whether the intercept should be estimated or not",
},
"normalize": {
"type": "boolean",
"default": False,
"description": "This parameter is ignored when ``fit_intercept`` is set to False",
},
"precompute": {
"anyOf": [
{
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "True | False | array-like",
},
{"type": "boolean"},
],
"default": False,
"description": "Whether to use a precomputed Gram matrix to speed up calculations",
},
"max_iter": {
"type": "integer",
"minimumForOptimizer": 10,
"maximumForOptimizer": 1000,
"distribution": "uniform",
"default": 1000,
"description": "The maximum number of iterations",
},
"copy_X": {
"type": "boolean",
"default": True,
"description": "If ``True``, X will be copied; else, it may be overwritten.",
},
"tol": {
"type": "number",
"minimumForOptimizer": 1e-08,
"maximumForOptimizer": 0.01,
"default": 0.0001,
"description": "The tolerance for the optimization: if the updates are smaller than ``tol``, the optimization code checks the dual gap for optimality and continues until it is smaller than ``tol``.",
},
"warm_start": {
"type": "boolean",
"default": False,
"description": "When set to ``True``, reuse the solution of the previous call to fit as initialization, otherwise, just erase the previous solution",
},
"positive": {
"type": "boolean",
"default": False,
"description": "When set to ``True``, forces the coefficients to be positive.",
},
"random_state": {
"anyOf": [
{"type": "integer"},
{"laleType": "numpy.random.RandomState"},
{"enum": [None]},
],
"default": None,
"description": "The seed of the pseudo random number generator that selects a random feature to update",
},
"selection": {
"enum": ["random", "cyclic"],
"default": "cyclic",
"description": "If set to 'random', a random coefficient is updated every iteration rather than looping over features sequentially by default",
},
},
}
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Fit model with coordinate descent.",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"laleType": "Any",
"XXX TODO XXX": "ndarray or scipy.sparse matrix, (n_samples, n_features)",
"description": "Data",
},
"y": {
"laleType": "Any",
"XXX TODO XXX": "ndarray, shape (n_samples,) or (n_samples, n_targets)",
"description": "Target",
},
"check_input": {
"type": "boolean",
"default": True,
"description": "Allow to bypass several input checking",
},
},
}
_input_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Predict using the linear model",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"anyOf": [
{
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "array_like or sparse matrix, shape (n_samples, n_features)",
},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
],
"description": "Samples.",
}
},
}
_output_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Returns predicted values.",
"type": "array",
"items": {"type": "number"},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.linear_model.ElasticNet#sklearn-linear_model-elasticnet",
"import_from": "sklearn.linear_model",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "regressor"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
ElasticNet = make_operator(_ElasticNetImpl, _combined_schemas)
if sklearn_version >= version.Version("1.2"):
# new: "https://scikit-learn.org/1.2/modules/generated/sklearn.linear_model.ElasticNet#sklearn-linear_model-elasticnet"
ElasticNet = ElasticNet.customize_schema(
normalize=None,
set_as_available=True,
)
set_docstrings(ElasticNet)
| 8,125 | 36.275229 | 219 |
py
|
lale
|
lale-master/lale/lib/autogen/random_trees_embedding.py
|
import typing
import sklearn
from numpy import inf, nan
from packaging import version
from sklearn.ensemble import RandomTreesEmbedding as Op
from lale.docstrings import set_docstrings
from lale.operators import PlannedIndividualOp, make_operator, sklearn_version
class _RandomTreesEmbeddingImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def transform(self, X):
return self._wrapped_model.transform(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for RandomTreesEmbedding An ensemble of totally random trees.",
"allOf": [
{
"type": "object",
"required": [
"n_estimators",
"max_depth",
"min_samples_split",
"min_samples_leaf",
"min_weight_fraction_leaf",
"max_leaf_nodes",
"min_impurity_decrease",
"min_impurity_split",
"sparse_output",
"n_jobs",
"random_state",
"verbose",
"warm_start",
],
"relevantToOptimizer": [
"n_estimators",
"max_depth",
"min_samples_split",
"min_samples_leaf",
"sparse_output",
],
"additionalProperties": False,
"properties": {
"n_estimators": {
"type": "integer",
"minimumForOptimizer": 10,
"maximumForOptimizer": 100,
"distribution": "uniform",
"default": 10,
"description": "Number of trees in the forest",
},
"max_depth": {
"type": "integer",
"minimumForOptimizer": 3,
"maximumForOptimizer": 5,
"distribution": "uniform",
"default": 5,
"description": "The maximum depth of each tree",
},
"min_samples_split": {
"anyOf": [
{"type": "integer", "forOptimizer": False},
{
"type": "number",
"minimumForOptimizer": 0.01,
"maximumForOptimizer": 0.5,
"distribution": "uniform",
},
],
"default": 2,
"description": "The minimum number of samples required to split an internal node: - If int, then consider `min_samples_split` as the minimum number",
},
"min_samples_leaf": {
"anyOf": [
{"type": "integer", "forOptimizer": False},
{
"type": "number",
"minimumForOptimizer": 0.01,
"maximumForOptimizer": 0.5,
"distribution": "uniform",
},
],
"default": 1,
"description": "The minimum number of samples required to be at a leaf node",
},
"min_weight_fraction_leaf": {
"type": "number",
"default": 0.0,
"description": "The minimum weighted fraction of the sum total of weights (of all the input samples) required to be at a leaf node",
},
"max_leaf_nodes": {
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"default": None,
"description": "Grow trees with ``max_leaf_nodes`` in best-first fashion",
},
"min_impurity_decrease": {
"type": "number",
"default": 0.0,
"description": "A node will be split if this split induces a decrease of the impurity greater than or equal to this value",
},
"min_impurity_split": {
"anyOf": [{"type": "number"}, {"enum": [None]}],
"default": None,
"description": "Threshold for early stopping in tree growth",
},
"sparse_output": {
"type": "boolean",
"default": True,
"description": "Whether or not to return a sparse CSR matrix, as default behavior, or to return a dense array compatible with dense pipeline operators.",
},
"n_jobs": {
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"default": 1,
"description": "The number of jobs to run in parallel for both `fit` and `predict`",
},
"random_state": {
"anyOf": [
{"type": "integer"},
{"laleType": "numpy.random.RandomState"},
{"enum": [None]},
],
"default": None,
"description": "If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`.",
},
"verbose": {
"type": "integer",
"default": 0,
"description": "Controls the verbosity when fitting and predicting.",
},
"warm_start": {
"type": "boolean",
"default": False,
"description": "When set to ``True``, reuse the solution of the previous call to fit and add more estimators to the ensemble, otherwise, just fit a whole new forest",
},
},
},
{
"XXX TODO XXX": "Parameter: min_samples_leaf > only be considered if it leaves at least min_samples_leaf training samples in each of the left and right branches"
},
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Fit estimator.",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"anyOf": [
{
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "array-like or sparse matrix, shape=(n_samples, n_features)",
},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
],
"description": "The input samples",
},
"sample_weight": {
"anyOf": [{"type": "array", "items": {"type": "number"}}, {"enum": [None]}],
"description": "Sample weights",
},
},
}
_input_transform_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Transform dataset.",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"anyOf": [
{
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "array-like or sparse matrix, shape=(n_samples, n_features)",
},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
],
"description": "Input data to be transformed",
}
},
}
_output_transform_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Transformed dataset.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.ensemble.RandomTreesEmbedding#sklearn-ensemble-randomtreesembedding",
"import_from": "sklearn.ensemble",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
RandomTreesEmbedding = make_operator(_RandomTreesEmbeddingImpl, _combined_schemas)
if sklearn_version >= version.Version("1.0"):
# old: https://scikit-learn.org/0.24/modules/generated/sklearn.ensemble.RandomTreesEmbedding.html
# new: https://scikit-learn.org/1.0/modules/generated/sklearn.ensemble.RandomTreesEmbedding.html
RandomTreesEmbedding = typing.cast(
PlannedIndividualOp,
RandomTreesEmbedding.customize_schema(
min_impurity_split=None,
),
)
set_docstrings(RandomTreesEmbedding)
| 9,561 | 38.512397 | 263 |
py
|
lale
|
lale-master/lale/lib/autogen/lars.py
|
from numpy import inf, nan
from sklearn.linear_model import Lars as Op
from lale.docstrings import set_docstrings
from lale.operators import make_operator
class _LarsImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def predict(self, X):
return self._wrapped_model.predict(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for Lars Least Angle Regression model a.k.a. LAR",
"allOf": [
{
"type": "object",
"required": [
"fit_intercept",
"verbose",
"normalize",
"precompute",
"n_nonzero_coefs",
"eps",
"copy_X",
"fit_path",
"jitter",
"random_state",
],
"relevantToOptimizer": [
"fit_intercept",
"normalize",
"precompute",
"n_nonzero_coefs",
"eps",
"copy_X",
"fit_path",
],
"additionalProperties": False,
"properties": {
"fit_intercept": {
"type": "boolean",
"default": True,
"description": "Whether to calculate the intercept for this model",
},
"verbose": {
"anyOf": [{"type": "boolean"}, {"type": "integer"}],
"default": False,
"description": "Sets the verbosity amount",
},
"normalize": {
"type": "boolean",
"default": True,
"description": "This parameter is ignored when ``fit_intercept`` is set to False",
},
"precompute": {
"anyOf": [
{
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "True | False | 'auto' | array-like",
"forOptimizer": False,
},
{"enum": ["auto"]},
],
"default": "auto",
"description": "Whether to use a precomputed Gram matrix to speed up calculations",
},
"n_nonzero_coefs": {
"type": "integer",
"minimumForOptimizer": 500,
"maximumForOptimizer": 501,
"distribution": "uniform",
"default": 500,
"description": "Target number of non-zero coefficients",
},
"eps": {
"type": "number",
"minimumForOptimizer": 0.001,
"maximumForOptimizer": 0.1,
"distribution": "loguniform",
"default": 2.220446049250313e-16,
"description": "The machine-precision regularization in the computation of the Cholesky diagonal factors",
},
"copy_X": {
"type": "boolean",
"default": True,
"description": "If ``True``, X will be copied; else, it may be overwritten.",
},
"fit_path": {
"type": "boolean",
"default": True,
"description": "If True the full path is stored in the ``coef_path_`` attribute",
},
"jitter": {
"anyOf": [
{"type": "number"},
{"enum": [None]},
],
"default": None,
"description": "Upper bound on a uniform noise parameter to be added to the y values, to satisfy the model’s assumption of one-at-a-time computations",
},
"random_state": {
"anyOf": [
{"type": "integer"},
{"laleType": "numpy.random.RandomState"},
{"enum": [None]},
],
"default": None,
"description": "The seed of the pseudo random number generator to use when shuffling the data",
},
},
}
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Fit the model using X, y as training data.",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Training data.",
},
"y": {
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
],
"description": "Target values.",
},
"Xy": {
"laleType": "Any",
"XXX TODO XXX": "array-like, shape (n_samples,) or (n_samples, n_targets), optional",
"description": "Xy = np.dot(X.T, y) that can be precomputed",
},
},
}
_input_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Predict using the linear model",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"anyOf": [
{
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "array_like or sparse matrix, shape (n_samples, n_features)",
},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
],
"description": "Samples.",
}
},
}
_output_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Returns predicted values.",
"type": "array",
"items": {"type": "number"},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.linear_model.Lars#sklearn-linear_model-lars",
"import_from": "sklearn.linear_model",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "regressor"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
Lars = make_operator(_LarsImpl, _combined_schemas)
set_docstrings(Lars)
| 7,348 | 35.562189 | 171 |
py
|
lale
|
lale-master/lale/lib/autogen/ridge_cv.py
|
from numpy import inf, nan
from packaging import version
from sklearn.linear_model import RidgeCV as Op
import lale
from lale.docstrings import set_docstrings
from lale.operators import make_operator, sklearn_version
class _RidgeCVImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def predict(self, X):
return self._wrapped_model.predict(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for RidgeCV Ridge regression with built-in cross-validation.",
"allOf": [
{
"type": "object",
"required": [
"alphas",
"fit_intercept",
"normalize",
"scoring",
"cv",
"gcv_mode",
"store_cv_values",
],
"relevantToOptimizer": [
"fit_intercept",
"normalize",
"scoring",
"cv",
"gcv_mode",
"store_cv_values",
],
"additionalProperties": False,
"properties": {
"alphas": {
"type": "array",
"items": {"type": "number"},
"default": [0.1, 1.0, 10.0],
"description": "Array of alpha values to try",
},
"fit_intercept": {
"type": "boolean",
"default": True,
"description": "Whether to calculate the intercept for this model",
},
"normalize": {
"type": "boolean",
"default": False,
"description": "This parameter is ignored when ``fit_intercept`` is set to False",
},
"scoring": {
"anyOf": [
{"laleType": "callable", "forOptimizer": False},
{"enum": ["accuracy", None]},
],
"default": None,
"description": "A string (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``.",
},
"cv": {
"description": """Cross-validation as integer or as object that has a split function.
The fit method performs cross validation on the input dataset for per
trial, and uses the mean cross validation performance for optimization.
This behavior is also impacted by handle_cv_failure flag.
If integer: number of folds in sklearn.model_selection.StratifiedKFold.
If object with split function: generator yielding (train, test) splits
as arrays of indices. Can use any of the iterators from
https://scikit-learn.org/stable/modules/cross_validation.html#cross-validation-iterators.""",
"anyOf": [
{
"type": "integer",
"minimum": 1,
"default": 5,
"minimumForOptimizer": 3,
"maximumForOptimizer": 4,
"distribution": "uniform",
},
{"laleType": "Any", "forOptimizer": False},
],
},
"gcv_mode": {
"enum": [None, "auto", "svd", "eigen"],
"default": None,
"description": "Flag indicating which strategy to use when performing Generalized Cross-Validation",
},
"store_cv_values": {
"type": "boolean",
"default": False,
"description": "Flag indicating if the cross-validation values corresponding to each alpha should be stored in the ``cv_values_`` attribute (see below)",
},
},
},
{
"XXX TODO XXX": "Parameter: store_cv_values > only compatible with cv=none (i"
},
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Fit Ridge regression model",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Training data",
},
"y": {
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
],
"description": "Target values",
},
"sample_weight": {
"anyOf": [
{"type": "number"},
{"type": "array", "items": {"type": "number"}},
],
"description": "Sample weight",
},
},
}
_input_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Predict using the linear model",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"anyOf": [
{
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "array_like or sparse matrix, shape (n_samples, n_features)",
},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
],
"description": "Samples.",
}
},
}
_output_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Returns predicted values.",
"type": "array",
"items": {"type": "number"},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.linear_model.RidgeCV#sklearn-linear_model-ridgecv",
"import_from": "sklearn.linear_model",
"type": "object",
"tags": {"pre": [], "op": ["estimator"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
RidgeCV = make_operator(_RidgeCVImpl, _combined_schemas)
if sklearn_version >= version.Version("1.2"):
# old: https://scikit-learn.org/1.1/modules/generated/sklearn.linear_model.Ridge.html
# new: https://scikit-learn.org/1.2/modules/generated/sklearn.linear_model.Ridge.html
RidgeCV = RidgeCV.customize_schema(
normalize=None,
set_as_available=True,
)
set_docstrings(RidgeCV)
| 7,390 | 36.517766 | 173 |
py
|
lale
|
lale-master/lale/lib/autogen/transformed_target_regressor.py
|
from numpy import inf, nan
from sklearn.compose import TransformedTargetRegressor as Op
from lale.docstrings import set_docstrings
from lale.operators import make_operator
class _TransformedTargetRegressorImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def predict(self, X):
return self._wrapped_model.predict(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for TransformedTargetRegressor Meta-estimator to regress on a transformed target.",
"allOf": [
{
"type": "object",
"required": [
"regressor",
"transformer",
"func",
"inverse_func",
"check_inverse",
],
"relevantToOptimizer": [],
"additionalProperties": False,
"properties": {
"regressor": {
"XXX TODO XXX": "object, default=LinearRegression()",
"description": "Regressor object such as derived from ``RegressorMixin``",
"enum": [None],
"default": None,
},
"transformer": {
"anyOf": [{"type": "object"}, {"enum": [None]}],
"default": None,
"description": "Estimator object such as derived from ``TransformerMixin``",
},
"func": {
"XXX TODO XXX": "function, optional",
"description": "Function to apply to ``y`` before passing to ``fit``",
"enum": [None],
"default": None,
},
"inverse_func": {
"XXX TODO XXX": "function, optional",
"description": "Function to apply to the prediction of the regressor",
"enum": [None],
"default": None,
},
"check_inverse": {
"type": "boolean",
"default": True,
"description": "Whether to check that ``transform`` followed by ``inverse_transform`` or ``func`` followed by ``inverse_func`` leads to the original targets.",
},
},
},
{
"description": "transformer' and functions 'func'/'inverse_func' cannot both be set.",
"anyOf": [
{"type": "object", "properties": {"transformer": {"enum": [None]}}},
{
"allOf": [
{"type": "object", "properties": {"func": {"enum": [None]}}},
{
"type": "object",
"properties": {"inverse_func": {"enum": [None]}},
},
]
},
],
},
{
"description": "When 'func' is provided, 'inverse_func' must also be provided",
"anyOf": [
{
"allOf": [
{
"type": "object",
"properties": {"transformer": {"not": {"enum": [None]}}},
},
{
"anyOf": [
{
"type": "object",
"properties": {"func": {"not": {"enum": [None]}}},
},
{
"type": "object",
"properties": {
"inverse_func": {"not": {"enum": [None]}}
},
},
]
},
]
},
{
"type": "object",
"properties": {"transformer": {"not": {"enum": [None]}}},
},
{"type": "object", "properties": {"func": {"enum": [None]}}},
{
"type": "object",
"properties": {"inverse_func": {"not": {"enum": [None]}}},
},
],
},
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Fit the model according to the given training data.",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Training vector, where n_samples is the number of samples and n_features is the number of features.",
},
"y": {
"type": "array",
"items": {"type": "number"},
"description": "Target values.",
},
"sample_weight": {
"laleType": "Any",
"XXX TODO XXX": "array-like, shape (n_samples,) optional",
"description": "Array of weights that are assigned to individual samples",
},
},
}
_input_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Predict using the base regressor, applying inverse.",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Samples.",
}
},
}
_output_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Predicted values.",
"type": "array",
"items": {"type": "number"},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.compose.TransformedTargetRegressor#sklearn-compose-transformedtargetregressor",
"import_from": "sklearn.compose",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "regressor"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
TransformedTargetRegressor = make_operator(
_TransformedTargetRegressorImpl, _combined_schemas
)
set_docstrings(TransformedTargetRegressor)
| 6,863 | 36.304348 | 179 |
py
|
lale
|
lale-master/lale/lib/autogen/pls_regression.py
|
from numpy import inf, nan
from sklearn.cross_decomposition import PLSRegression as Op
from lale.docstrings import set_docstrings
from lale.operators import make_operator
class _PLSRegressionImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def transform(self, X):
return self._wrapped_model.transform(X)
def predict(self, X):
return self._wrapped_model.predict(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for PLSRegression PLS regression",
"allOf": [
{
"type": "object",
"required": ["n_components", "scale", "max_iter", "tol", "copy"],
"relevantToOptimizer": ["n_components", "scale", "max_iter", "tol", "copy"],
"additionalProperties": False,
"properties": {
"n_components": {
"type": "integer",
"minimumForOptimizer": 2,
"maximumForOptimizer": 256,
"distribution": "uniform",
"default": 2,
"description": "Number of components to keep.",
},
"scale": {
"type": "boolean",
"default": True,
"description": "whether to scale the data",
},
"max_iter": {
"XXX TODO XXX": "an integer, (default 500)",
"description": 'the maximum number of iterations of the NIPALS inner loop (used only if algorithm="nipals")',
"type": "integer",
"minimumForOptimizer": 10,
"maximumForOptimizer": 1000,
"distribution": "uniform",
"default": 500,
},
"tol": {
"XXX TODO XXX": "non-negative real",
"description": "Tolerance used in the iterative algorithm default 1e-06.",
"type": "number",
"minimumForOptimizer": 1e-08,
"maximumForOptimizer": 0.01,
"default": 1e-06,
},
"copy": {
"type": "boolean",
"default": True,
"description": "Whether the deflation should be done on a copy",
},
},
},
{"XXX TODO XXX": 'Parameter: max_iter > only if algorithm="nipals")'},
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Fit model to data.",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Training vectors, where n_samples is the number of samples and n_features is the number of predictors.",
},
"Y": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Target vectors, where n_samples is the number of samples and n_targets is the number of response variables.",
},
},
}
_input_transform_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Apply the dimension reduction learned on the train data.",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Training vectors, where n_samples is the number of samples and n_features is the number of predictors.",
},
"Y": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Target vectors, where n_samples is the number of samples and n_targets is the number of response variables.",
},
"copy": {
"type": "boolean",
"default": True,
"description": "Whether to copy X and Y, or perform in-place normalization.",
},
},
}
_output_transform_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Apply the dimension reduction learned on the train data.",
"laleType": "Any",
"XXX TODO XXX": "x_scores if Y is not given, (x_scores, y_scores) otherwise.",
}
_input_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Apply the dimension reduction learned on the train data.",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Training vectors, where n_samples is the number of samples and n_features is the number of predictors.",
},
"copy": {
"type": "boolean",
"default": True,
"description": "Whether to copy X and Y, or perform in-place normalization.",
},
},
}
_output_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Apply the dimension reduction learned on the train data.",
"laleType": "Any",
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.cross_decomposition.PLSRegression#sklearn-cross_decomposition-plsregression",
"import_from": "sklearn.cross_decomposition",
"type": "object",
"tags": {"pre": [], "op": ["transformer", "estimator"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
PLSRegression = make_operator(_PLSRegressionImpl, _combined_schemas)
set_docstrings(PLSRegression)
| 6,450 | 37.861446 | 159 |
py
|
lale
|
lale-master/lale/lib/autogen/gaussian_random_projection.py
|
from numpy import inf, nan
from sklearn.random_projection import GaussianRandomProjection as Op
from lale.docstrings import set_docstrings
from lale.operators import make_operator
class _GaussianRandomProjectionImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def transform(self, X):
return self._wrapped_model.transform(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for GaussianRandomProjection Reduce dimensionality through Gaussian random projection",
"allOf": [
{
"type": "object",
"required": ["n_components", "eps", "random_state"],
"relevantToOptimizer": ["n_components", "eps"],
"additionalProperties": False,
"properties": {
"n_components": {
"XXX TODO XXX": "int or 'auto', optional (default = 'auto')",
"description": "Dimensionality of the target projection space",
"anyOf": [
{
"type": "integer",
"minimumForOptimizer": 2,
"maximumForOptimizer": 256,
"distribution": "uniform",
},
{"enum": ["auto"]},
],
"default": "auto",
},
"eps": {
"XXX TODO XXX": "strictly positive float, optional (default=0.1)",
"description": "Parameter to control the quality of the embedding according to the Johnson-Lindenstrauss lemma when n_components is set to 'auto'",
"type": "number",
"minimumForOptimizer": 0.001,
"maximumForOptimizer": 0.1,
"distribution": "loguniform",
"default": 0.1,
},
"random_state": {
"anyOf": [
{"type": "integer"},
{"laleType": "numpy.random.RandomState"},
{"enum": [None]},
],
"default": None,
"description": "Control the pseudo random number generator used to generate the matrix at fit time",
},
},
},
{
"description": "eps=%f and n_samples=%d lead to a target dimension of %d which is larger than the original space with n_features=%d' % (self.eps, n_samples, `self.n_components_`, n_features) ",
"anyOf": [
{
"type": "object",
"properties": {"n_components": {"not": {"enum": ["auto"]}}},
},
{
"XXX TODO XXX": "johnson_lindenstrauss_min_dim(n_samples=X.shape[0], eps=self.eps) <= 0"
},
{
"XXX TODO XXX": "johnson_lindenstrauss_min_dim(n_samples=X.shape[0], eps=self.eps) <= X.shape[1]"
},
],
},
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Generate a sparse random projection matrix",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"anyOf": [
{
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "numpy array or scipy.sparse of shape [n_samples, n_features]",
},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
],
"description": "Training set: only the shape is used to find optimal random matrix dimensions based on the theory referenced in the afore mentioned papers.",
},
"y": {"laleType": "Any", "XXX TODO XXX": "", "description": "Ignored"},
},
}
_input_transform_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Project the data by using matrix product with the random matrix",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"anyOf": [
{
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "numpy array or scipy.sparse of shape [n_samples, n_features]",
},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
],
"description": "The input data to project into a smaller dimensional space.",
}
},
}
_output_transform_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Projected array.",
"anyOf": [
{
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "numpy array or scipy sparse of shape [n_samples, n_components]",
},
{"type": "array", "items": {"type": "array", "items": {"type": "number"}}},
],
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.random_projection.GaussianRandomProjection#sklearn-random_projection-gaussianrandomprojection",
"import_from": "sklearn.random_projection",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
GaussianRandomProjection = make_operator(
_GaussianRandomProjectionImpl, _combined_schemas
)
set_docstrings(GaussianRandomProjection)
| 6,407 | 38.801242 | 210 |
py
|
lale
|
lale-master/lale/lib/autogen/perceptron.py
|
from numpy import inf, nan
from sklearn.linear_model import Perceptron as Op
from lale.docstrings import set_docstrings
from lale.operators import make_operator
class _PerceptronImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def predict(self, X):
return self._wrapped_model.predict(X)
def decision_function(self, X):
return self._wrapped_model.decision_function(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for Perceptron Perceptron",
"allOf": [
{
"type": "object",
"required": [
"penalty",
"alpha",
"fit_intercept",
"max_iter",
"tol",
"shuffle",
"verbose",
"eta0",
"n_jobs",
"random_state",
"early_stopping",
"validation_fraction",
"n_iter_no_change",
"class_weight",
"warm_start",
],
"relevantToOptimizer": [
"alpha",
"fit_intercept",
"max_iter",
"tol",
"shuffle",
"eta0",
],
"additionalProperties": False,
"properties": {
"penalty": {
"XXX TODO XXX": "None, 'l2' or 'l1' or 'elasticnet'",
"description": "The penalty (aka regularization term) to be used",
"enum": [None],
"default": None,
},
"alpha": {
"type": "number",
"minimumForOptimizer": 1e-10,
"maximumForOptimizer": 1.0,
"distribution": "loguniform",
"default": 0.0001,
"description": "Constant that multiplies the regularization term if regularization is used",
},
"fit_intercept": {
"type": "boolean",
"default": True,
"description": "Whether the intercept should be estimated or not",
},
"max_iter": {
"anyOf": [
{
"type": "integer",
"minimumForOptimizer": 10,
"maximumForOptimizer": 1000,
"distribution": "uniform",
},
{"enum": [None]},
],
"default": None,
"description": "The maximum number of passes over the training data (aka epochs)",
},
"tol": {
"anyOf": [
{
"type": "number",
"minimumForOptimizer": 1e-08,
"maximumForOptimizer": 0.01,
},
{"enum": [None]},
],
"default": None,
"description": "The stopping criterion",
},
"shuffle": {
"type": "boolean",
"default": True,
"description": "Whether or not the training data should be shuffled after each epoch.",
},
"verbose": {
"type": "integer",
"default": 0,
"description": "The verbosity level",
},
"eta0": {
"type": "number",
"minimumForOptimizer": 0.01,
"maximumForOptimizer": 1.0,
"distribution": "loguniform",
"default": 1.0,
"description": "Constant by which the updates are multiplied",
},
"n_jobs": {
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"default": 1,
"description": "The number of CPUs to use to do the OVA (One Versus All, for multi-class problems) computation",
},
"random_state": {
"anyOf": [
{"type": "integer"},
{"laleType": "numpy.random.RandomState"},
{"enum": [None]},
],
"default": None,
"description": "The seed of the pseudo random number generator to use when shuffling the data",
},
"early_stopping": {
"type": "boolean",
"default": False,
"description": "Whether to use early stopping to terminate training when validation",
},
"validation_fraction": {
"type": "number",
"default": 0.1,
"description": "The proportion of training data to set aside as validation set for early stopping",
},
"n_iter_no_change": {
"type": "integer",
"default": 5,
"description": "Number of iterations with no improvement to wait before early stopping",
},
"class_weight": {
"XXX TODO XXX": 'dict, {class_label: weight} or "balanced" or None, optional',
"description": "Preset for the class_weight fit parameter",
"enum": ["balanced"],
"default": "balanced",
},
"warm_start": {
"type": "boolean",
"default": False,
"description": "When set to True, reuse the solution of the previous call to fit as initialization, otherwise, just erase the previous solution",
},
},
},
{
"XXX TODO XXX": "Parameter: max_iter > only impacts the behavior in the fit method, and not the partial_fit"
},
{
"description": "validation_fraction, only used if early_stopping is true",
"anyOf": [
{
"type": "object",
"properties": {"validation_fraction": {"enum": [0.1]}},
},
{"type": "object", "properties": {"early_stopping": {"enum": [True]}}},
],
},
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Fit linear model with Stochastic Gradient Descent.",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Training data",
},
"y": {
"type": "array",
"items": {"type": "number"},
"description": "Target values",
},
"coef_init": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "The initial coefficients to warm-start the optimization.",
},
"intercept_init": {
"type": "array",
"items": {"type": "number"},
"description": "The initial intercept to warm-start the optimization.",
},
"sample_weight": {
"anyOf": [{"type": "array", "items": {"type": "number"}}, {"enum": [None]}],
"default": None,
"description": "Weights applied to individual samples",
},
},
}
_input_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Predict class labels for samples in X.",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"anyOf": [
{
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "array_like or sparse matrix, shape (n_samples, n_features)",
},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
],
"description": "Samples.",
}
},
}
_output_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Predicted class label per sample.",
"type": "array",
"items": {"type": "number"},
}
_input_decision_function_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Predict confidence scores for samples.",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"anyOf": [
{
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "array_like or sparse matrix, shape (n_samples, n_features)",
},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
],
"description": "Samples.",
}
},
}
_output_decision_function_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Confidence scores per (sample, class) combination",
"laleType": "Any",
"XXX TODO XXX": "array, shape=(n_samples,) if n_classes == 2 else (n_samples, n_classes)",
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.linear_model.Perceptron#sklearn-linear_model-perceptron",
"import_from": "sklearn.linear_model",
"type": "object",
"tags": {"pre": [], "op": ["estimator"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
"input_decision_function": _input_decision_function_schema,
"output_decision_function": _output_decision_function_schema,
},
}
Perceptron = make_operator(_PerceptronImpl, _combined_schemas)
set_docstrings(Perceptron)
| 10,859 | 36.839721 | 165 |
py
|
lale
|
lale-master/lale/lib/autogen/mini_batch_sparse_pca.py
|
from numpy import inf, nan
from sklearn.decomposition import MiniBatchSparsePCA as Op
from lale.docstrings import set_docstrings
from lale.operators import make_operator
class _MiniBatchSparsePCAImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def transform(self, X):
return self._wrapped_model.transform(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for MiniBatchSparsePCA Mini-batch Sparse Principal Components Analysis",
"allOf": [
{
"type": "object",
"required": [
"n_components",
"alpha",
"ridge_alpha",
"n_iter",
"callback",
"batch_size",
"verbose",
"shuffle",
"n_jobs",
"method",
"random_state",
],
"relevantToOptimizer": [
"n_components",
"alpha",
"n_iter",
"batch_size",
"shuffle",
"method",
],
"additionalProperties": False,
"properties": {
"n_components": {
"anyOf": [
{
"type": "integer",
"minimumForOptimizer": 2,
"maximumForOptimizer": 256,
"distribution": "uniform",
},
{"enum": [None]},
],
"default": None,
"description": "number of sparse atoms to extract",
},
"alpha": {
"type": "integer",
"minimumForOptimizer": 1,
"maximumForOptimizer": 2,
"distribution": "uniform",
"default": 1,
"description": "Sparsity controlling parameter",
},
"ridge_alpha": {
"type": "number",
"default": 0.01,
"description": "Amount of ridge shrinkage to apply in order to improve conditioning when calling the transform method.",
},
"n_iter": {
"type": "integer",
"minimumForOptimizer": 5,
"maximumForOptimizer": 1000,
"distribution": "uniform",
"default": 100,
"description": "number of iterations to perform for each mini batch",
},
"callback": {
"anyOf": [{"laleType": "callable"}, {"enum": [None]}],
"default": None,
"description": "callable that gets invoked every five iterations",
},
"batch_size": {
"type": "integer",
"minimumForOptimizer": 3,
"maximumForOptimizer": 128,
"distribution": "uniform",
"default": 3,
"description": "the number of features to take in each mini batch",
},
"verbose": {
"anyOf": [{"type": "integer"}, {"type": "boolean"}],
"default": False,
"description": "Controls the verbosity; the higher, the more messages",
},
"shuffle": {
"type": "boolean",
"default": True,
"description": "whether to shuffle the data before splitting it in batches",
},
"n_jobs": {
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"default": 1,
"description": "Number of parallel jobs to run",
},
"method": {
"enum": ["lars", "cd"],
"default": "lars",
"description": "lars: uses the least angle regression method to solve the lasso problem (linear_model.lars_path) cd: uses the coordinate descent method to compute the Lasso solution (linear_model.Lasso)",
},
"random_state": {
"anyOf": [
{"type": "integer"},
{"laleType": "numpy.random.RandomState"},
{"enum": [None]},
],
"default": None,
"description": "If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`.",
},
},
},
{
"description": "A sparse matrix was passed, but dense data is required. Use X.toarray() to convert to a dense numpy array.",
"type": "object",
"laleNot": "X/isSparse",
},
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Fit the model from data in X.",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Training vector, where n_samples in the number of samples and n_features is the number of features.",
},
"y": {},
},
}
_input_transform_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Least Squares projection of the data onto the sparse components.",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Test data to be transformed, must have the same number of features as the data used to train the model.",
},
"ridge_alpha": {
"type": "number",
"default": 0.01,
"description": "Amount of ridge shrinkage to apply in order to improve conditioning",
},
},
}
_output_transform_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Transformed data.",
"laleType": "Any",
"XXX TODO XXX": "X_new array, shape (n_samples, n_components)",
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.decomposition.MiniBatchSparsePCA#sklearn-decomposition-minibatchsparsepca",
"import_from": "sklearn.decomposition",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
MiniBatchSparsePCA = make_operator(_MiniBatchSparsePCAImpl, _combined_schemas)
set_docstrings(MiniBatchSparsePCA)
| 7,597 | 38.367876 | 263 |
py
|
lale
|
lale-master/lale/lib/autogen/calibrated_classifier_cv.py
|
import sklearn
from numpy import inf, nan
from packaging import version
from sklearn.calibration import CalibratedClassifierCV as Op
from lale.docstrings import set_docstrings
from lale.operators import make_operator, sklearn_version
class _CalibratedClassifierCVImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def predict(self, X):
return self._wrapped_model.predict(X)
def predict_proba(self, X):
return self._wrapped_model.predict_proba(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for CalibratedClassifierCV Probability calibration with isotonic regression or sigmoid.",
"allOf": [
{
"type": "object",
"required": ["base_estimator", "method", "cv"],
"relevantToOptimizer": ["method", "cv"],
"additionalProperties": False,
"properties": {
"base_estimator": {
"XXX TODO XXX": "instance BaseEstimator",
"description": "The classifier whose output decision function needs to be calibrated to offer more accurate predict_proba outputs",
"enum": [None],
"default": None,
},
"method": {
"XXX TODO XXX": "'sigmoid' or 'isotonic'",
"description": "The method to use for calibration",
"enum": ["sigmoid", "isotonic"],
"default": "sigmoid",
},
"cv": {
"description": """Cross-validation as integer or as object that has a split function.
The fit method performs cross validation on the input dataset for per
trial, and uses the mean cross validation performance for optimization.
This behavior is also impacted by handle_cv_failure flag.
If integer: number of folds in sklearn.model_selection.StratifiedKFold.
If object with split function: generator yielding (train, test) splits
as arrays of indices. Can use any of the iterators from
https://scikit-learn.org/stable/modules/cross_validation.html#cross-validation-iterators.""",
"anyOf": [
{
"type": "integer",
"minimum": 1,
"default": 5,
"minimumForOptimizer": 3,
"maximumForOptimizer": 4,
"distribution": "uniform",
},
{"laleType": "Any", "forOptimizer": False},
{"enum": [None]},
{"enum": ["prefit"]},
],
"default": None,
},
},
}
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Fit the calibrated model",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Training data.",
},
"y": {
"type": "array",
"items": {"type": "number"},
"description": "Target values.",
},
"sample_weight": {
"anyOf": [{"type": "array", "items": {"type": "number"}}, {"enum": [None]}],
"description": "Sample weights",
},
},
}
_input_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Predict the target of new samples. Can be different from the",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "The samples.",
}
},
}
_output_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "The predicted class.",
"type": "array",
"items": {"type": "number"},
}
_input_predict_proba_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Posterior probabilities of classification",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "The samples.",
}
},
}
_output_predict_proba_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "The predicted probas.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.calibration.CalibratedClassifierCV#sklearn-calibration-calibratedclassifiercv",
"import_from": "sklearn.calibration",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "classifier"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
"input_predict_proba": _input_predict_proba_schema,
"output_predict_proba": _output_predict_proba_schema,
},
}
CalibratedClassifierCV = make_operator(_CalibratedClassifierCVImpl, _combined_schemas)
if sklearn_version >= version.Version("0.24"):
# old: https://scikit-learn.org/0.20/modules/generated/sklearn.calibration.CalibratedClassifierCV#sklearn-calibration-calibratedclassifiercv
# new: https://scikit-learn.org/0.24/modules/generated/sklearn.calibration.CalibratedClassifierCV#sklearn-calibration-calibratedclassifiercv
CalibratedClassifierCV = CalibratedClassifierCV.customize_schema(
n_jobs={
"description": "Number of jobs to run in parallel.",
"anyOf": [
{
"description": "1 unless in joblib.parallel_backend context.",
"enum": [None],
},
{"description": "Use all processors.", "enum": [-1]},
{
"description": "Number of jobs to run in parallel.",
"type": "integer",
"minimum": 1,
},
],
"default": None,
},
set_as_available=True,
)
CalibratedClassifierCV = CalibratedClassifierCV.customize_schema(
ensemble={
"type": "boolean",
"default": True,
"description": "Determines how the calibrator is fitted when cv is not 'prefit'. Ignored if cv='prefit",
},
set_as_available=True,
)
set_docstrings(CalibratedClassifierCV)
| 7,384 | 37.868421 | 161 |
py
|
lale
|
lale-master/lale/lib/autogen/label_spreading.py
|
from numpy import inf, nan
from sklearn.semi_supervised import LabelSpreading as Op
from lale.docstrings import set_docstrings
from lale.operators import make_operator
class _LabelSpreadingImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def predict(self, X):
return self._wrapped_model.predict(X)
def predict_proba(self, X):
return self._wrapped_model.predict_proba(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for LabelSpreading LabelSpreading model for semi-supervised learning",
"allOf": [
{
"type": "object",
"required": [
"kernel",
"gamma",
"n_neighbors",
"alpha",
"max_iter",
"tol",
"n_jobs",
],
"relevantToOptimizer": [
"kernel",
"gamma",
"n_neighbors",
"alpha",
"max_iter",
"tol",
],
"additionalProperties": False,
"properties": {
"kernel": {
"anyOf": [
{"enum": ["knn", "rbf"]},
{"laleType": "callable", "forOptimizer": False},
],
"default": "rbf",
"description": "String identifier for kernel function to use or the kernel function itself",
},
"gamma": {
"type": "number",
"minimumForOptimizer": 0,
"maximumForOptimizer": 20,
"distribution": "uniform",
"default": 20,
"description": "parameter for rbf kernel",
},
"n_neighbors": {
"XXX TODO XXX": "integer > 0",
"description": "parameter for knn kernel",
"type": "integer",
"minimumForOptimizer": 5,
"maximumForOptimizer": 20,
"distribution": "uniform",
"default": 7,
},
"alpha": {
"type": "number",
"minimumForOptimizer": 1e-10,
"maximumForOptimizer": 1.0,
"distribution": "loguniform",
"default": 0.2,
"description": "Clamping factor",
},
"max_iter": {
"type": "integer",
"minimumForOptimizer": 10,
"maximumForOptimizer": 1000,
"distribution": "uniform",
"default": 30,
"description": "maximum number of iterations allowed",
},
"tol": {
"type": "number",
"minimumForOptimizer": 1e-08,
"maximumForOptimizer": 0.01,
"default": 0.001,
"description": "Convergence tolerance: threshold to consider the system at steady state",
},
"n_jobs": {
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"default": 1,
"description": "The number of parallel jobs to run",
},
},
},
{
"XXX TODO XXX": "Parameter: kernel > only 'rbf' and 'knn' strings are valid inputs"
},
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Fit a semi-supervised label propagation model based",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "A {n_samples by n_samples} size matrix will be created from this",
},
"y": {
"type": "array",
"items": {"type": "number"},
"description": "n_labeled_samples (unlabeled points are marked as -1) All unlabeled samples will be transductively assigned labels",
},
},
}
_input_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Performs inductive inference across the model.",
"type": "object",
"required": ["X"],
"properties": {
"X": {"type": "array", "items": {"type": "array", "items": {"type": "number"}}}
},
}
_output_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Predictions for input data",
"type": "array",
"items": {"type": "number"},
}
_input_predict_proba_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Predict probability for each possible outcome.",
"type": "object",
"required": ["X"],
"properties": {
"X": {"type": "array", "items": {"type": "array", "items": {"type": "number"}}}
},
}
_output_predict_proba_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Normalized probability distributions across class labels",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.semi_supervised.LabelSpreading#sklearn-semi_supervised-labelspreading",
"import_from": "sklearn.semi_supervised",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "classifier"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
"input_predict_proba": _input_predict_proba_schema,
"output_predict_proba": _output_predict_proba_schema,
},
}
LabelSpreading = make_operator(_LabelSpreadingImpl, _combined_schemas)
set_docstrings(LabelSpreading)
| 6,525 | 35.255556 | 153 |
py
|
lale
|
lale-master/lale/lib/autogen/radius_neighbors_classifier.py
|
from numpy import inf, nan
from sklearn.neighbors import RadiusNeighborsClassifier as Op
from lale.docstrings import set_docstrings
from lale.operators import make_operator
class _RadiusNeighborsClassifierImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def predict(self, X):
return self._wrapped_model.predict(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for RadiusNeighborsClassifier Classifier implementing a vote among neighbors within a given radius",
"allOf": [
{
"type": "object",
"required": [
"radius",
"weights",
"algorithm",
"leaf_size",
"p",
"metric",
"outlier_label",
"metric_params",
"n_jobs",
],
"relevantToOptimizer": [
"weights",
"algorithm",
"leaf_size",
"p",
"metric",
"outlier_label",
],
"additionalProperties": False,
"properties": {
"radius": {
"type": "number",
"default": 1.0,
"description": "Range of parameter space to use by default for :meth:`radius_neighbors` queries.",
},
"weights": {
"anyOf": [
{"laleType": "callable", "forOptimizer": False},
{"enum": ["distance", "uniform"]},
],
"default": "uniform",
"description": "weight function used in prediction",
},
"algorithm": {
"enum": ["auto", "ball_tree", "kd_tree", "brute"],
"default": "auto",
"description": "Algorithm used to compute the nearest neighbors: - 'ball_tree' will use :class:`BallTree` - 'kd_tree' will use :class:`KDTree` - 'brute' will use a brute-force search",
},
"leaf_size": {
"type": "integer",
"minimumForOptimizer": 30,
"maximumForOptimizer": 31,
"distribution": "uniform",
"default": 30,
"description": "Leaf size passed to BallTree or KDTree",
},
"p": {
"type": "integer",
"minimumForOptimizer": 1,
"maximumForOptimizer": 3,
"distribution": "uniform",
"default": 2,
"description": "Power parameter for the Minkowski metric",
},
"metric": {
"anyOf": [
{"laleType": "callable", "forOptimizer": False},
{
"enum": [
"euclidean",
"manhattan",
"minkowski",
"precomputed",
]
},
],
"default": "minkowski",
"description": "the distance metric to use for the tree",
},
"outlier_label": {
"anyOf": [
{"type": "integer", "forOptimizer": False},
{"enum": ["most_frequent"]},
{"enum": [None]},
],
"default": None,
"description": "Label, which is given for outlier samples (samples with no neighbors on given radius)",
},
"metric_params": {
"anyOf": [{"type": "object"}, {"enum": [None]}],
"default": None,
"description": "Additional keyword arguments for the metric function.",
},
"n_jobs": {
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"default": None,
"description": "The number of parallel jobs to run for neighbors search",
},
},
}
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Fit the model using X as training data and y as target values",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "{array-like, sparse matrix, BallTree, KDTree}",
"description": "Training data",
},
"y": {
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "{array-like, sparse matrix}",
"description": "Target values of shape = [n_samples] or [n_samples, n_outputs]",
},
},
}
_input_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Predict the class labels for the provided data",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"laleType": "Any",
"XXX TODO XXX": "array-like, shape (n_query, n_features), or (n_query, n_indexed) if metric == 'precomputed'",
"description": "Test samples.",
}
},
}
_output_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Class labels for each data sample.",
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "array", "items": {"type": "number"}}},
],
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.neighbors.RadiusNeighborsClassifier#sklearn-neighbors-radiusneighborsclassifier",
"import_from": "sklearn.neighbors",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "classifier"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
RadiusNeighborsClassifier = make_operator(
_RadiusNeighborsClassifierImpl, _combined_schemas
)
set_docstrings(RadiusNeighborsClassifier)
| 6,997 | 37.240437 | 205 |
py
|
lale
|
lale-master/lale/lib/autogen/label_encoder.py
|
from numpy import inf, nan
from sklearn.preprocessing import LabelEncoder as Op
from lale.docstrings import set_docstrings
from lale.operators import make_operator
class _LabelEncoderImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def transform(self, X):
return self._wrapped_model.transform(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for LabelEncoder Encode labels with value between 0 and n_classes-1.",
"allOf": [
{
"type": "object",
"relevantToOptimizer": [],
"additionalProperties": False,
"properties": {},
}
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Fit label encoder",
"type": "object",
"required": ["y"],
"properties": {
"y": {
"type": "array",
"items": {"type": "number"},
"description": "Target values.",
}
},
}
_input_transform_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Transform labels to normalized encoding.",
"type": "object",
"required": ["y"],
"properties": {
"y": {
"type": "array",
"items": {"type": "number"},
"description": "Target values.",
}
},
}
_output_transform_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Transform labels to normalized encoding.",
"type": "array",
"items": {"type": "number"},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.preprocessing.LabelEncoder#sklearn-preprocessing-labelencoder",
"import_from": "sklearn.preprocessing",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
LabelEncoder = make_operator(_LabelEncoderImpl, _combined_schemas)
set_docstrings(LabelEncoder)
| 2,606 | 29.670588 | 145 |
py
|
lale
|
lale-master/lale/lib/autogen/passive_aggressive_regressor.py
|
from numpy import inf, nan
from sklearn.linear_model import PassiveAggressiveRegressor as Op
from lale.docstrings import set_docstrings
from lale.operators import make_operator
class _PassiveAggressiveRegressorImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def predict(self, X):
return self._wrapped_model.predict(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for PassiveAggressiveRegressor Passive Aggressive Regressor",
"allOf": [
{
"type": "object",
"required": [
"C",
"fit_intercept",
"max_iter",
"tol",
"early_stopping",
"validation_fraction",
"n_iter_no_change",
"shuffle",
"verbose",
"loss",
"epsilon",
"random_state",
"warm_start",
"average",
],
"relevantToOptimizer": [
"fit_intercept",
"max_iter",
"tol",
"shuffle",
"loss",
"epsilon",
],
"additionalProperties": False,
"properties": {
"C": {
"type": "number",
"default": 1.0,
"description": "Maximum step size (regularization)",
},
"fit_intercept": {
"type": "boolean",
"default": True,
"description": "Whether the intercept should be estimated or not",
},
"max_iter": {
"type": "integer",
"minimumForOptimizer": 10,
"maximumForOptimizer": 1000,
"distribution": "uniform",
"default": 1000,
"description": "The maximum number of passes over the training data (aka epochs)",
},
"tol": {
"anyOf": [
{
"type": "number",
"minimumForOptimizer": 1e-08,
"maximumForOptimizer": 0.01,
},
{"enum": [None]},
],
"default": 1e-3,
"description": "The stopping criterion",
},
"early_stopping": {
"type": "boolean",
"default": False,
"description": "Whether to use early stopping to terminate training when validation",
},
"validation_fraction": {
"type": "number",
"default": 0.1,
"description": "The proportion of training data to set aside as validation set for early stopping",
},
"n_iter_no_change": {
"type": "integer",
"default": 5,
"description": "Number of iterations with no improvement to wait before early stopping",
},
"shuffle": {
"type": "boolean",
"default": True,
"description": "Whether or not the training data should be shuffled after each epoch.",
},
"verbose": {
"type": "integer",
"default": 0,
"description": "The verbosity level",
},
"loss": {
"enum": [
"huber",
"squared_epsilon_insensitive",
"squared_loss",
"epsilon_insensitive",
],
"default": "epsilon_insensitive",
"description": "The loss function to be used: epsilon_insensitive: equivalent to PA-I in the reference paper",
},
"epsilon": {
"type": "number",
"minimumForOptimizer": 1e-08,
"maximumForOptimizer": 1.35,
"distribution": "loguniform",
"default": 0.1,
"description": "If the difference between the current prediction and the correct label is below this threshold, the model is not updated.",
},
"random_state": {
"anyOf": [
{"type": "integer"},
{"laleType": "numpy.random.RandomState"},
{"enum": [None]},
],
"default": None,
"description": "The seed of the pseudo random number generator to use when shuffling the data",
},
"warm_start": {
"type": "boolean",
"default": False,
"description": "When set to True, reuse the solution of the previous call to fit as initialization, otherwise, just erase the previous solution",
},
"average": {
"anyOf": [{"type": "boolean"}, {"type": "integer"}],
"default": False,
"description": "When set to True, computes the averaged SGD weights and stores the result in the ``coef_`` attribute",
},
},
},
{
"XXX TODO XXX": "Parameter: max_iter > only impacts the behavior in the fit method, and not the partial_fit"
},
{
"description": "validation_fraction, only used if early_stopping is true",
"anyOf": [
{
"type": "object",
"properties": {"validation_fraction": {"enum": [0.1]}},
},
{"type": "object", "properties": {"early_stopping": {"enum": [True]}}},
],
},
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Fit linear model with Passive Aggressive algorithm.",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Training data",
},
"y": {
"type": "array",
"items": {"type": "number"},
"description": "Target values",
},
"coef_init": {
"type": "array",
"items": {"type": "number"},
"description": "The initial coefficients to warm-start the optimization.",
},
"intercept_init": {
"type": "array",
"items": {"type": "number"},
"description": "The initial intercept to warm-start the optimization.",
},
},
}
_input_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Predict using the linear model",
"type": "object",
"required": ["X"],
"properties": {
"X": {"type": "array", "items": {"type": "array", "items": {"type": "number"}}}
},
}
_output_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Predicted target values per element in X.",
"type": "array",
"items": {"type": "number"},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.linear_model.PassiveAggressiveRegressor#sklearn-linear_model-passiveaggressiveregressor",
"import_from": "sklearn.linear_model",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "regressor"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
PassiveAggressiveRegressor = make_operator(
_PassiveAggressiveRegressorImpl, _combined_schemas
)
set_docstrings(PassiveAggressiveRegressor)
| 8,652 | 37.118943 | 171 |
py
|
lale
|
lale-master/lale/lib/autogen/lars_cv.py
|
from numpy import inf, nan
from sklearn.linear_model import LarsCV as Op
from lale.docstrings import set_docstrings
from lale.operators import make_operator
class _LarsCVImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def predict(self, X):
return self._wrapped_model.predict(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for LarsCV Cross-validated Least Angle Regression model.",
"allOf": [
{
"type": "object",
"required": [
"fit_intercept",
"verbose",
"max_iter",
"normalize",
"precompute",
"cv",
"max_n_alphas",
"n_jobs",
"eps",
"copy_X",
],
"relevantToOptimizer": [
"fit_intercept",
"max_iter",
"normalize",
"precompute",
"cv",
"max_n_alphas",
"eps",
"copy_X",
],
"additionalProperties": False,
"properties": {
"fit_intercept": {
"type": "boolean",
"default": True,
"description": "whether to calculate the intercept for this model",
},
"verbose": {
"anyOf": [{"type": "boolean"}, {"type": "integer"}],
"default": False,
"description": "Sets the verbosity amount",
},
"max_iter": {
"type": "integer",
"minimumForOptimizer": 10,
"maximumForOptimizer": 1000,
"distribution": "uniform",
"default": 500,
"description": "Maximum number of iterations to perform.",
},
"normalize": {
"type": "boolean",
"default": True,
"description": "This parameter is ignored when ``fit_intercept`` is set to False",
},
"precompute": {
"anyOf": [
{
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "True | False | 'auto' | array-like",
"forOptimizer": False,
},
{"enum": ["auto"]},
],
"default": "auto",
"description": "Whether to use a precomputed Gram matrix to speed up calculations",
},
"cv": {
"description": """Cross-validation as integer or as object that has a split function.
The fit method performs cross validation on the input dataset for per
trial, and uses the mean cross validation performance for optimization.
This behavior is also impacted by handle_cv_failure flag.
If integer: number of folds in sklearn.model_selection.StratifiedKFold.
If object with split function: generator yielding (train, test) splits
as arrays of indices. Can use any of the iterators from
https://scikit-learn.org/stable/modules/cross_validation.html#cross-validation-iterators.""",
"anyOf": [
{
"type": "integer",
"minimum": 1,
"default": 5,
"minimumForOptimizer": 3,
"maximumForOptimizer": 4,
"distribution": "uniform",
},
{"laleType": "Any", "forOptimizer": False},
{"enum": [None]},
],
"default": None,
},
"max_n_alphas": {
"type": "integer",
"minimumForOptimizer": 1000,
"maximumForOptimizer": 1001,
"distribution": "uniform",
"default": 1000,
"description": "The maximum number of points on the path used to compute the residuals in the cross-validation",
},
"n_jobs": {
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"default": 1,
"description": "Number of CPUs to use during the cross validation",
},
"eps": {
"type": "number",
"minimumForOptimizer": 0.001,
"maximumForOptimizer": 0.1,
"distribution": "loguniform",
"default": 2.220446049250313e-16,
"description": "The machine-precision regularization in the computation of the Cholesky diagonal factors",
},
"copy_X": {
"type": "boolean",
"default": True,
"description": "If ``True``, X will be copied; else, it may be overwritten.",
},
},
},
{"XXX TODO XXX": "Parameter: precompute > only subsets of x"},
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Fit the model using X, y as training data.",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Training data.",
},
"y": {
"type": "array",
"items": {"type": "number"},
"description": "Target values.",
},
},
}
_input_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Predict using the linear model",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"anyOf": [
{
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "array_like or sparse matrix, shape (n_samples, n_features)",
},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
],
"description": "Samples.",
}
},
}
_output_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Returns predicted values.",
"type": "array",
"items": {"type": "number"},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.linear_model.LarsCV#sklearn-linear_model-larscv",
"import_from": "sklearn.linear_model",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "regressor"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
LarsCV = make_operator(_LarsCVImpl, _combined_schemas)
set_docstrings(LarsCV)
| 7,982 | 37.565217 | 132 |
py
|
lale
|
lale-master/lale/lib/snapml/snap_decision_tree_classifier.py
|
# Copyright 2019,2021 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import snapml # type: ignore
snapml_installed = True
except ImportError:
snapml_installed = False
import lale.datasets.data_schemas
import lale.docstrings
import lale.operators
class _SnapDecisionTreeClassifierImpl:
def __init__(self, **hyperparams):
assert (
snapml_installed
), """Your Python environment does not have snapml installed. Install using: pip install snapml"""
self._wrapped_model = snapml.SnapDecisionTreeClassifier(**hyperparams)
def fit(self, X, y, **fit_params):
X = lale.datasets.data_schemas.strip_schema(X)
y = lale.datasets.data_schemas.strip_schema(y)
self._wrapped_model.fit(X, y, **fit_params)
return self
def predict(self, X, **predict_params):
X = lale.datasets.data_schemas.strip_schema(X)
return self._wrapped_model.predict(X, **predict_params)
def predict_proba(self, X, **predict_proba_params):
X = lale.datasets.data_schemas.strip_schema(X)
return self._wrapped_model.predict_proba(X, **predict_proba_params)
_hyperparams_schema = {
"description": "Hyperparameter schema.",
"allOf": [
{
"description": "This first sub-object lists all constructor arguments with their types, one at a time, omitting cross-argument constraints.",
"type": "object",
"relevantToOptimizer": ["max_depth", "max_features", "hist_nbins"],
"additionalProperties": False,
"properties": {
"criterion": {
"enum": ["gini"],
"default": "gini",
"description": "Function to measure the quality of a split.",
},
"splitter": {
"enum": ["best"],
"default": "best",
"description": "The strategy used to choose the split at each node.",
},
"max_depth": {
"anyOf": [
{
"type": "integer",
"minimum": 1,
"minimumForOptimizer": 3,
"maximumForOptimizer": 5,
},
{
"enum": [None],
"description": "Nodes are expanded until all leaves are pure or until all leaves contain less than min_samples_leaf samples.",
},
],
"default": None,
"description": "The maximum depth of the tree.",
},
"min_samples_leaf": {
"anyOf": [
{
"type": "integer",
"minimum": 1,
"forOptimizer": False,
"laleMaximum": "X/maxItems", # number of rows
"description": "Consider min_samples_leaf as the minimum number.",
},
{
"type": "number",
"minimum": 0.0,
"exclusiveMinimum": True,
"maximum": 0.5,
"description": "min_samples_leaf is a fraction and ceil(min_samples_leaf * n_samples) are the minimum number of samples for each node.",
},
],
"default": 1,
"description": "The minimum number of samples required to be at a leaf node.",
},
"max_features": {
"anyOf": [
{
"type": "integer",
"minimum": 1,
"forOptimizer": False,
"laleMaximum": "X/items/maxItems", # number of columns
"description": "Consider max_features features at each split.",
},
{
"type": "number",
"minimum": 0.0,
"exclusiveMinimum": True,
"maximum": 1.0,
"minimumForOptimizer": 0.1,
"maximumForOptimizer": 0.9,
"distribution": "uniform",
"description": "max_features is a fraction and int(max_features * n_features) features are considered at each split.",
},
{"enum": ["auto", "sqrt", "log2", None]},
],
"default": None,
"description": "The number of features to consider when looking for the best split.",
},
"random_state": {
"description": "Seed of pseudo-random number generator.",
"anyOf": [
{
"description": "RandomState used by np.random",
"enum": [None],
},
{"description": "Explicit seed.", "type": "integer"},
],
"default": None,
},
"n_jobs": {
"type": "integer",
"minimum": 1,
"default": 1,
"description": "Number of CPU threads to use.",
},
"use_histograms": {
"type": "boolean",
"default": True,
"description": "Use histogram-based splits rather than exact splits.",
},
"hist_nbins": {
"type": "integer",
"default": 256,
"minimum": 1,
"maximum": 256,
"minimumForOptimizer": 16,
"maximumForOptimizer": 256,
"description": "Number of histogram bins.",
},
"use_gpu": {
"type": "boolean",
"default": False,
"description": "Use GPU acceleration (only supported for histogram-based splits).",
},
"gpu_id": {
"type": "integer",
"default": 0,
"description": "Device ID of the GPU which will be used when GPU acceleration is enabled.",
},
"verbose": {
"type": "boolean",
"default": False,
"description": "If True, it prints debugging information while training. Warning: this will increase the training time. For performance evaluation, use verbose=False.",
},
},
},
{
"description": "GPU only supported for histogram-based splits.",
"anyOf": [
{"type": "object", "properties": {"use_gpu": {"enum": [False]}}},
{"type": "object", "properties": {"use_histograms": {"enum": [True]}}},
],
},
],
}
_input_fit_schema = {
"description": "Build a decision tree from the training set (X, y).",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"description": "The outer array is over samples aka rows.",
"items": {
"type": "array",
"description": "The inner array is over features aka columns.",
"items": {"type": "number"},
},
},
"y": {
"description": "The classes.",
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "string"}},
{"type": "array", "items": {"type": "boolean"}},
],
},
"sample_weight": {
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"enum": [None], "description": "Samples are equally weighted."},
],
"description": "Sample weights.",
"default": None,
},
},
}
_input_predict_schema = {
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"description": "The outer array is over samples aka rows.",
"items": {
"type": "array",
"description": "The inner array is over features aka columns.",
"items": {"type": "number"},
},
},
"n_jobs": {
"type": "integer",
"minimum": 0,
"default": 0,
"description": "Number of threads used to run inference. By default inference runs with maximum number of available threads.",
},
},
}
_output_predict_schema = {
"description": "The predicted classes.",
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "string"}},
{"type": "array", "items": {"type": "boolean"}},
],
}
_input_predict_proba_schema = {
"type": "object",
"properties": {
"X": {
"type": "array",
"description": "The outer array is over samples aka rows.",
"items": {
"type": "array",
"description": "The inner array is over features aka columns.",
"items": {"type": "number"},
},
},
"n_jobs": {
"type": "integer",
"minimum": 0,
"default": 0,
"description": "Number of threads used to run inference. By default inference runs with maximum number of available threads..",
},
},
}
_output_predict_proba_schema = {
"type": "array",
"description": "The outer array is over samples aka rows.",
"items": {
"type": "array",
"description": "The inner array contains probabilities corresponding to each class.",
"items": {"type": "number"},
},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """`Decision tree classifier`_ from `Snap ML`_. It can be used for binary classification problems.
.. _`Decision tree classifier`: https://snapml.readthedocs.io/en/latest/#snapml.DecisionTreeClassifier
.. _`Snap ML`: https://www.zurich.ibm.com/snapml/
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.snapml.snap_decision_tree_classifier.html",
"import_from": "snapml",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "classifier"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
"input_predict_proba": _input_predict_proba_schema,
"output_predict_proba": _output_predict_proba_schema,
},
}
SnapDecisionTreeClassifier = lale.operators.make_operator(
_SnapDecisionTreeClassifierImpl, _combined_schemas
)
lale.docstrings.set_docstrings(SnapDecisionTreeClassifier)
| 11,989 | 37.928571 | 188 |
py
|
lale
|
lale-master/lale/lib/snapml/snap_random_forest_regressor.py
|
# Copyright 2019,2021 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
try:
import snapml # type: ignore
snapml_version = version.parse(getattr(snapml, "__version__"))
except ImportError:
snapml_version = None
import lale.datasets.data_schemas
import lale.docstrings
import lale.operators
class _SnapRandomForestRegressorImpl:
def __init__(self, **hyperparams):
assert (
snapml_version is not None
), """Your Python environment does not have snapml installed. Install using: pip install snapml"""
if (
snapml_version <= version.Version("1.7.8")
and "compress_trees" in hyperparams
):
del hyperparams["compress_trees"]
if hyperparams.get("gpu_ids", None) is None:
hyperparams["gpu_ids"] = [0]
self._wrapped_model = snapml.SnapRandomForestRegressor(**hyperparams)
def fit(self, X, y, **fit_params):
X = lale.datasets.data_schemas.strip_schema(X)
y = lale.datasets.data_schemas.strip_schema(y)
self._wrapped_model.fit(X, y, **fit_params)
return self
def predict(self, X, **predict_params):
X = lale.datasets.data_schemas.strip_schema(X)
return self._wrapped_model.predict(X, **predict_params)
_hyperparams_schema = {
"description": "Hyperparameter schema.",
"allOf": [
{
"description": "This first sub-object lists all constructor arguments with their types, one at a time, omitting cross-argument constraints.",
"type": "object",
"relevantToOptimizer": ["n_estimators", "max_depth", "max_features"],
"additionalProperties": False,
"properties": {
"n_estimators": {
"type": "integer",
"minimum": 1,
"minimumForOptimizer": 10,
"maximumForOptimizer": 100,
"default": 10,
"description": "The number of trees in the forest.",
},
"criterion": {
"enum": ["mse"],
"default": "mse",
"description": "Function to measure the quality of a split.",
},
"max_depth": {
"anyOf": [
{
"type": "integer",
"minimum": 1,
"minimumForOptimizer": 3,
"maximumForOptimizer": 5,
},
{
"enum": [None],
"description": "Nodes are expanded until all leaves are pure or until all leaves contain less than min_samples_leaf samples.",
},
],
"default": None,
"description": "The maximum depth of the tree.",
},
"min_samples_leaf": {
"anyOf": [
{
"type": "integer",
"minimum": 1,
"forOptimizer": False,
"laleMaximum": "X/maxItems", # number of rows
"description": "Consider min_samples_leaf as the minimum number.",
},
{
"type": "number",
"minimum": 0.0,
"exclusiveMinimum": True,
"maximum": 0.5,
"description": "min_samples_leaf is a fraction and ceil(min_samples_leaf * n_samples) are the minimum number of samples for each node.",
},
],
"default": 1,
"description": "The minimum number of samples required to be at a leaf node.",
},
"max_features": {
"anyOf": [
{
"type": "integer",
"minimum": 1,
"forOptimizer": False,
"laleMaximum": "X/items/maxItems", # number of columns
"description": "Consider max_features features at each split.",
},
{
"type": "number",
"minimum": 0.0,
"exclusiveMinimum": True,
"maximum": 1.0,
"minimumForOptimizer": 0.1,
"maximumForOptimizer": 0.9,
"distribution": "uniform",
"description": "max_features is a fraction and int(max_features * n_features) features are considered at each split.",
},
{"enum": ["auto", "sqrt", "log2", None]},
],
"default": "auto",
"description": "The number of features to consider when looking for the best split.",
},
"bootstrap": {
"type": "boolean",
"default": True,
"description": "Whether bootstrap samples are used when building trees.",
},
"n_jobs": {
"type": "integer",
"minimum": 1,
"default": 1,
"description": "Number of CPU threads to use.",
},
"random_state": {
"description": "Seed of pseudo-random number generator.",
"anyOf": [
{
"description": "RandomState used by np.random",
"enum": [None],
},
{"description": "Explicit seed.", "type": "integer"},
],
"default": None,
},
"verbose": {
"type": "boolean",
"default": False,
"description": "If True, it prints debugging information while training. Warning: this will increase the training time. For performance evaluation, use verbose=False.",
},
"use_histograms": {
"type": "boolean",
"default": False,
"description": "Use histogram-based splits rather than exact splits.",
},
"hist_nbins": {
"type": "integer",
"default": 256,
"description": "Number of histogram bins.",
},
"use_gpu": {
"type": "boolean",
"default": False,
"description": "Use GPU acceleration (only supported for histogram-based splits).",
},
"gpu_ids": {
"anyOf": [
{"description": "Use [0].", "enum": [None]},
{"type": "array", "items": {"type": "integer"}},
],
"default": None,
"description": "Device IDs of the GPUs which will be used when GPU acceleration is enabled.",
},
},
},
{
"description": "GPU only supported for histogram-based splits.",
"anyOf": [
{"type": "object", "properties": {"use_gpu": {"enum": [False]}}},
{"type": "object", "properties": {"use_histograms": {"enum": [True]}}},
],
},
],
}
_input_fit_schema = {
"description": "Build a forest of trees from the training set (X, y).",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"description": "The outer array is over samples aka rows.",
"items": {
"type": "array",
"description": "The inner array is over features aka columns.",
"items": {"type": "number"},
},
},
"y": {
"description": "The regression target.",
"anyOf": [
{"type": "array", "items": {"type": "number"}},
],
},
"sample_weight": {
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"enum": [None], "description": "Samples are equally weighted."},
],
"description": "Sample weights.",
"default": None,
},
},
}
_input_predict_schema = {
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"description": "The outer array is over samples aka rows.",
"items": {
"type": "array",
"description": "The inner array is over features aka columns.",
"items": {"type": "number"},
},
},
"n_jobs": {
"type": "integer",
"minimum": 0,
"default": 0,
"description": "Number of threads used to run inference. By default inference runs with maximum number of available threads.",
},
},
}
_output_predict_schema = {
"description": "The predicted values.",
"anyOf": [
{"type": "array", "items": {"type": "number"}},
],
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """`Random forest regressor`_ from `Snap ML`_.
.. _`Random forest regressor`: https://snapml.readthedocs.io/en/latest/#snapml.RandomForestRegressor
.. _`Snap ML`: https://www.zurich.ibm.com/snapml/
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.snapml.snap_random_forest_regressor.html",
"import_from": "snapml",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "regressor"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
SnapRandomForestRegressor = lale.operators.make_operator(
_SnapRandomForestRegressorImpl, _combined_schemas
)
if snapml_version is not None and snapml_version > version.Version("1.7.8"): # type: ignore # noqa
from lale.schemas import Bool
SnapRandomForestRegressor = SnapRandomForestRegressor.customize_schema(
compress_trees=Bool(
desc="""Compress trees after training for fast inference.""",
default=False,
forOptimizer=False,
),
set_as_available=True,
)
lale.docstrings.set_docstrings(SnapRandomForestRegressor)
| 11,528 | 37.949324 | 188 |
py
|
lale
|
lale-master/lale/lib/snapml/batched_tree_ensemble_classifier.py
|
# Copyright 2022 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import snapml # type: ignore
snapml_installed = True
except ImportError:
snapml_installed = False
import pandas as pd
import lale.datasets.data_schemas
import lale.docstrings
import lale.operators
def _ensure_numpy(data):
if isinstance(data, (pd.DataFrame, pd.Series)):
return data.to_numpy()
return lale.datasets.data_schemas.strip_schema(data)
class _BatchedTreeEnsembleClassifierImpl:
def __init__(self, **hyperparams):
assert (
snapml_installed
), """Your Python environment does not have snapml installed. Install using: pip install snapml"""
if hyperparams.get("base_ensemble", None) is None:
from snapml import SnapBoostingMachineClassifier
hyperparams["base_ensemble"] = SnapBoostingMachineClassifier()
self._wrapped_model = snapml.BatchedTreeEnsembleClassifier(**hyperparams)
def fit(self, X, y, **fit_params):
X = _ensure_numpy(X)
y = _ensure_numpy(y)
self._wrapped_model.fit(X, y, **fit_params)
return self
def predict(self, X, **predict_params):
X = _ensure_numpy(X)
return self._wrapped_model.predict(X, **predict_params)
def predict_proba(self, X, **predict_proba_params):
X = _ensure_numpy(X)
return self._wrapped_model.predict_proba(X, **predict_proba_params)
def partial_fit(self, X, y, **fit_params):
X = _ensure_numpy(X)
y = _ensure_numpy(y)
self._wrapped_model.partial_fit(X, y, **fit_params)
return self
_hyperparams_schema = {
"description": "Hyperparameter schema.",
"allOf": [
{
"description": "This first object lists all constructor arguments with their types, but omits constraints for conditional hyperparameters.",
"type": "object",
"additionalProperties": True,
"relevantToOptimizer": [],
"properties": {},
}
],
}
_input_fit_schema = {
"description": "Fit the base ensemble without batching.",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"description": "The outer array is over samples aka rows.",
"items": {
"type": "array",
"description": "The inner array is over features aka columns.",
"items": {"type": "number"},
},
},
"y": {
"description": "The classes.",
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "string"}},
{"type": "array", "items": {"type": "boolean"}},
],
},
"sample_weight": {
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"enum": [None], "description": "Samples are equally weighted."},
],
"description": "Sample weights.",
"default": None,
},
},
}
_input_predict_schema = {
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"description": "The outer array is over samples aka rows.",
"items": {
"type": "array",
"description": "The inner array is over features aka columns.",
"items": {"type": "number"},
},
},
},
}
_output_predict_schema = {
"description": "The predicted classes.",
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "string"}},
{"type": "array", "items": {"type": "boolean"}},
],
}
_input_predict_proba_schema = {
"type": "object",
"properties": {
"X": {
"type": "array",
"description": "The outer array is over samples aka rows.",
"items": {
"type": "array",
"description": "The inner array is over features aka columns.",
"items": {"type": "number"},
},
},
},
}
_output_predict_proba_schema = {
"type": "array",
"description": "The outer array is over samples aka rows.",
"items": {
"type": "array",
"description": "The inner array contains probabilities corresponding to each class.",
"items": {"type": "number"},
},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """`Batched Tree Ensemble Classifier`_ from `Snap ML`_.
.. _`Batched Tree Ensemble Classifier`: https://snapml.readthedocs.io/en/latest/batched_tree_ensembles.html
.. _`Snap ML`: https://www.zurich.ibm.com/snapml/
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.snapml.batched_tree_ensemble_classifier.html",
"import_from": "snapml",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "classifier"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
"input_predict_proba": _input_predict_proba_schema,
"output_predict_proba": _output_predict_proba_schema,
},
}
BatchedTreeEnsembleClassifier = lale.operators.make_operator(
_BatchedTreeEnsembleClassifierImpl, _combined_schemas
)
lale.docstrings.set_docstrings(BatchedTreeEnsembleClassifier)
| 6,110 | 31.333333 | 152 |
py
|
lale
|
lale-master/lale/lib/snapml/snap_boosting_machine_regressor.py
|
# Copyright 2019,2021 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
try:
import snapml # type: ignore
snapml_version = version.parse(getattr(snapml, "__version__"))
except ImportError:
snapml_version = None
import lale.datasets.data_schemas
import lale.docstrings
import lale.operators
class _SnapBoostingMachineRegressorImpl:
def __init__(self, **hyperparams):
assert (
snapml_version is not None
), """Your Python environment does not have snapml installed. Install using: pip install snapml"""
self._wrapped_model = snapml.SnapBoostingMachineRegressor(**hyperparams)
def fit(self, X, y, **fit_params):
X = lale.datasets.data_schemas.strip_schema(X)
y = lale.datasets.data_schemas.strip_schema(y)
self._wrapped_model.fit(X, y, **fit_params)
return self
def predict(self, X, **predict_params):
X = lale.datasets.data_schemas.strip_schema(X)
return self._wrapped_model.predict(X, **predict_params)
_hyperparams_schema = {
"description": "Hyperparameter schema.",
"allOf": [
{
"description": "This first sub-object lists all constructor arguments with their types, one at a time, omitting cross-argument constraints.",
"type": "object",
"relevantToOptimizer": [
"num_round",
"learning_rate",
"min_max_depth",
"max_max_depth",
],
"additionalProperties": False,
"properties": {
"num_round": {
"type": "integer",
"minimum": 1,
"minimumForOptimizer": 100,
"maximumForOptimizer": 1000,
"default": 100,
"description": "Number of boosting iterations.",
},
"objective": {
"enum": ["mse", "cross_entropy"],
"default": "mse",
"description": "Training objective.",
},
"learning_rate": {
"type": "number",
"minimum": 0.0,
"exclusiveMinimum": True,
"minimumForOptimizer": 0.01,
"maximumForOptimizer": 0.3,
"distribution": "uniform",
"default": 0.1,
"description": "Learning rate / shrinkage factor.",
},
"random_state": {
"type": "integer",
"default": 0,
"description": "Random seed.",
},
"colsample_bytree": {
"type": "number",
"minimum": 0.0,
"exclusiveMinimum": True,
"maximum": 1.0,
"default": 1.0,
"description": "Fraction of feature columns used at each boosting iteration.",
},
"subsample": {
"type": "number",
"minimum": 0.0,
"exclusiveMinimum": True,
"maximum": 1.0,
"default": 1.0,
"description": "Fraction of training examples used at each boosting iteration.",
},
"verbose": {
"type": "boolean",
"default": False,
"description": "Print off information during training.",
},
"lambda_l2": {
"type": "number",
"minimum": 0.0,
"default": 0.0,
"description": "L2-reguralization penalty used during tree-building.",
},
"early_stopping_rounds": {
"type": "integer",
"minimum": 1,
"default": 10,
"description": "When a validation set is provided, training will stop if the validation loss does not increase after a fixed number of rounds.",
},
"compress_trees": {
"type": "boolean",
"default": False,
"description": "Compress trees after training for fast inference.",
},
"base_score": {
"anyOf": [
{
"type": "number",
},
{"enum": [None]},
],
"default": None,
"description": "Base score to initialize boosting algorithm. If None then the algorithm will initialize the base score to be the the logit of the probability of the positive class.",
},
"max_depth": {
"anyOf": [
{
"type": "integer",
"minimum": 1,
},
{"enum": [None]},
],
"default": None,
"description": "If set, will set min_max_depth = max_depth = max_max_depth",
},
"min_max_depth": {
"type": "integer",
"minimum": 1,
"minimumForOptimizer": 1,
"maximumForOptimizer": 5,
"default": 1,
"description": "Minimum max_depth of trees in the ensemble.",
},
"max_max_depth": {
"type": "integer",
"minimum": 1,
"minimumForOptimizer": 5,
"maximumForOptimizer": 10,
"default": 5,
"description": "Maximum max_depth of trees in the ensemble.",
},
"n_jobs": {
"type": "integer",
"minimum": 1,
"default": 1,
"description": "Number of threads to use during training.",
},
"use_histograms": {
"type": "boolean",
"default": True,
"description": "Use histograms to accelerate tree-building.",
},
"hist_nbins": {
"type": "integer",
"default": 256,
"description": "Number of histogram bins.",
},
"use_gpu": {
"type": "boolean",
"default": False,
"description": "Use GPU for tree-building.",
},
"gpu_id": {
"type": "integer",
"default": 0,
"description": "Device ID for GPU to use during training.",
},
"tree_select_probability": {
"type": "number",
"minimum": 0.0,
"maximum": 1.0,
"default": 1.0,
"description": "Probability of selecting a tree (rather than a kernel ridge regressor) at each boosting iteration.",
},
"regularizer": {
"type": "number",
"minimum": 0.0,
"default": 1.0,
"description": "L2-regularization penality for the kernel ridge regressor.",
},
"fit_intercept": {
"type": "boolean",
"default": False,
"description": "Include intercept term in the kernel ridge regressor.",
},
"gamma": {
"type": "number",
"minimum": 0.0,
"default": 1.0,
"description": "Guassian kernel parameter.",
},
"n_components": {
"type": "integer",
"minimum": 1,
"default": 10,
"description": "Number of components in the random projection.",
},
},
},
{
"description": "GPU only supported for histogram-based splits.",
"anyOf": [
{"type": "object", "properties": {"use_gpu": {"enum": [False]}}},
{"type": "object", "properties": {"use_histograms": {"enum": [True]}}},
],
},
],
}
_input_fit_schema = {
"description": "Build a boosted ensemble from the training set (X, y).",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"description": "The outer array is over samples aka rows.",
"items": {
"type": "array",
"description": "The inner array is over features aka columns.",
"items": {"type": "number"},
},
},
"y": {
"description": "The regression target.",
"anyOf": [
{"type": "array", "items": {"type": "number"}},
],
},
"sample_weight": {
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"enum": [None], "description": "Samples are equally weighted."},
],
"description": "Sample weights.",
"default": None,
},
"X_val": {
"anyOf": [
{
"type": "array",
"description": "The outer array is over validation samples aka rows.",
"items": {
"type": "array",
"description": "The inner array is over features aka columns.",
"items": {"type": "number"},
},
},
{"enum": [None], "description": "No validation set provided."},
],
"default": None,
},
"y_val": {
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"enum": [None], "description": "No validation set provided."},
],
"description": "The validation regression target.",
"default": None,
},
"sample_weight_val": {
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{
"enum": [None],
"description": "Validation samples are equally weighted.",
},
],
"description": "Validation sample weights.",
"default": None,
},
},
}
_input_predict_schema = {
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"description": "The outer array is over samples aka rows.",
"items": {
"type": "array",
"description": "The inner array is over features aka columns.",
"items": {"type": "number"},
},
},
"n_jobs": {
"type": "integer",
"minimum": 1,
"default": 1,
"description": "Number of threads used to run inference.",
},
},
}
_output_predict_schema = {
"description": "The predicted values.",
"anyOf": [
{"type": "array", "items": {"type": "number"}},
],
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """`Boosting machine Regressor`_ from `Snap ML`_.
.. _`Boosting machine Regressor`: https://snapml.readthedocs.io/en/latest/#snapml.BoostingMachineRegressor
.. _`Snap ML`: https://www.zurich.ibm.com/snapml/
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.snapml.snap_boosting_machine_regressor.html",
"import_from": "snapml",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "regressor"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
SnapBoostingMachineRegressor = lale.operators.make_operator(
_SnapBoostingMachineRegressorImpl, _combined_schemas
)
if snapml_version is not None and snapml_version >= version.Version("1.12"):
SnapBoostingMachineRegressor = SnapBoostingMachineRegressor.customize_schema(
max_delta_step={
"description": """Regularization term to ensure numerical stability.""",
"anyOf": [
{
"type": "number",
"minimum": 0.0,
},
{"enum": [None]},
],
"default": 0.0,
}
)
lale.docstrings.set_docstrings(SnapBoostingMachineRegressor)
| 13,618 | 36.210383 | 202 |
py
|
lale
|
lale-master/lale/lib/snapml/snap_random_forest_classifier.py
|
# Copyright 2019,2021 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
try:
import snapml # type: ignore
snapml_version = version.parse(getattr(snapml, "__version__"))
except ImportError:
snapml_version = None
import lale.datasets.data_schemas
import lale.docstrings
import lale.operators
class _SnapRandomForestClassifierImpl:
def __init__(self, **hyperparams):
assert (
snapml_version is not None
), """Your Python environment does not have snapml installed. Install using: pip install snapml"""
if (
snapml_version <= version.Version("1.7.8")
and "compress_trees" in hyperparams
):
del hyperparams["compress_trees"]
if hyperparams.get("gpu_ids", None) is None:
hyperparams["gpu_ids"] = [0]
self._wrapped_model = snapml.SnapRandomForestClassifier(**hyperparams)
def fit(self, X, y, **fit_params):
X = lale.datasets.data_schemas.strip_schema(X)
y = lale.datasets.data_schemas.strip_schema(y)
self._wrapped_model.fit(X, y, **fit_params)
return self
def predict(self, X, **predict_params):
X = lale.datasets.data_schemas.strip_schema(X)
return self._wrapped_model.predict(X, **predict_params)
def predict_proba(self, X, **predict_proba_params):
X = lale.datasets.data_schemas.strip_schema(X)
return self._wrapped_model.predict_proba(X, **predict_proba_params)
_hyperparams_schema = {
"description": "Hyperparameter schema.",
"allOf": [
{
"description": "This first sub-object lists all constructor arguments with their types, one at a time, omitting cross-argument constraints.",
"type": "object",
"relevantToOptimizer": ["n_estimators", "max_depth", "max_features"],
"additionalProperties": False,
"properties": {
"n_estimators": {
"type": "integer",
"minimum": 1,
"minimumForOptimizer": 10,
"maximumForOptimizer": 100,
"default": 10,
"description": "The number of trees in the forest.",
},
"criterion": {
"enum": ["gini"],
"default": "gini",
"description": "Function to measure the quality of a split.",
},
"max_depth": {
"anyOf": [
{
"type": "integer",
"minimum": 1,
"minimumForOptimizer": 3,
"maximumForOptimizer": 5,
},
{
"enum": [None],
"description": "Nodes are expanded until all leaves are pure or until all leaves contain less than min_samples_leaf samples.",
},
],
"default": None,
"description": "The maximum depth of the tree.",
},
"min_samples_leaf": {
"anyOf": [
{
"type": "integer",
"minimum": 1,
"forOptimizer": False,
"laleMaximum": "X/maxItems", # number of rows
"description": "Consider min_samples_leaf as the minimum number.",
},
{
"type": "number",
"minimum": 0.0,
"exclusiveMinimum": True,
"maximum": 0.5,
"description": "min_samples_leaf is a fraction and ceil(min_samples_leaf * n_samples) are the minimum number of samples for each node.",
},
],
"default": 1,
"description": "The minimum number of samples required to be at a leaf node.",
},
"max_features": {
"anyOf": [
{
"type": "integer",
"minimum": 1,
"forOptimizer": False,
"laleMaximum": "X/items/maxItems", # number of columns
"description": "Consider max_features features at each split.",
},
{
"type": "number",
"minimum": 0.0,
"exclusiveMinimum": True,
"maximum": 1.0,
"minimumForOptimizer": 0.1,
"maximumForOptimizer": 0.9,
"distribution": "uniform",
"description": "max_features is a fraction and int(max_features * n_features) features are considered at each split.",
},
{"enum": ["auto", "sqrt", "log2", None]},
],
"default": "auto",
"description": "The number of features to consider when looking for the best split.",
},
"bootstrap": {
"type": "boolean",
"default": True,
"description": "Whether bootstrap samples are used when building trees.",
},
"n_jobs": {
"type": "integer",
"minimum": 1,
"default": 1,
"description": "Number of CPU threads to use.",
},
"random_state": {
"description": "Seed of pseudo-random number generator.",
"anyOf": [
{
"description": "RandomState used by np.random",
"enum": [None],
},
{"description": "Explicit seed.", "type": "integer"},
],
"default": None,
},
"verbose": {
"type": "boolean",
"default": False,
"description": "If True, it prints debugging information while training. Warning: this will increase the training time. For performance evaluation, use verbose=False.",
},
"use_histograms": {
"type": "boolean",
"default": False,
"description": "Use histogram-based splits rather than exact splits.",
},
"hist_nbins": {
"type": "integer",
"default": 256,
"description": "Number of histogram bins.",
},
"use_gpu": {
"type": "boolean",
"default": False,
"description": "Use GPU acceleration (only supported for histogram-based splits).",
},
"gpu_ids": {
"anyOf": [
{"description": "Use [0].", "enum": [None]},
{"type": "array", "items": {"type": "integer"}},
],
"default": None,
"description": "Device IDs of the GPUs which will be used when GPU acceleration is enabled.",
},
},
},
{
"description": "GPU only supported for histogram-based splits.",
"anyOf": [
{"type": "object", "properties": {"use_gpu": {"enum": [False]}}},
{"type": "object", "properties": {"use_histograms": {"enum": [True]}}},
],
},
],
}
_input_fit_schema = {
"description": "Build a forest of trees from the training set (X, y).",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"description": "The outer array is over samples aka rows.",
"items": {
"type": "array",
"description": "The inner array is over features aka columns.",
"items": {"type": "number"},
},
},
"y": {
"description": "The classes.",
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "string"}},
{"type": "array", "items": {"type": "boolean"}},
],
},
"sample_weight": {
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"enum": [None], "description": "Samples are equally weighted."},
],
"description": "Sample weights.",
"default": None,
},
},
}
_input_predict_schema = {
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"description": "The outer array is over samples aka rows.",
"items": {
"type": "array",
"description": "The inner array is over features aka columns.",
"items": {"type": "number"},
},
},
"n_jobs": {
"type": "integer",
"minimum": 0,
"default": 0,
"description": "Number of threads used to run inference. By default inference runs with maximum number of available threads.",
},
},
}
_output_predict_schema = {
"description": "The predicted classes.",
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "string"}},
{"type": "array", "items": {"type": "boolean"}},
],
}
_input_predict_proba_schema = {
"type": "object",
"properties": {
"X": {
"type": "array",
"description": "The outer array is over samples aka rows.",
"items": {
"type": "array",
"description": "The inner array is over features aka columns.",
"items": {"type": "number"},
},
},
"n_jobs": {
"type": "integer",
"minimum": 0,
"default": 0,
"description": "Number of threads used to run inference. By default inference runs with maximum number of available threads..",
},
},
}
_output_predict_proba_schema = {
"type": "array",
"description": "The outer array is over samples aka rows.",
"items": {
"type": "array",
"description": "The inner array contains probabilities corresponding to each class.",
"items": {"type": "number"},
},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """`Random forest classifier`_ from `Snap ML`_. It can be used for binary classification problems.
.. _`Random forest classifier`: https://snapml.readthedocs.io/en/latest/#snapml.RandomForestClassifier
.. _`Snap ML`: https://www.zurich.ibm.com/snapml/
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.snapml.snap_random_forest_classifier.html",
"import_from": "snapml",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "classifier"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
"input_predict_proba": _input_predict_proba_schema,
"output_predict_proba": _output_predict_proba_schema,
},
}
SnapRandomForestClassifier = lale.operators.make_operator(
_SnapRandomForestClassifierImpl, _combined_schemas
)
if snapml_version is not None and snapml_version > version.Version("1.7.8"): # type: ignore # noqa
from lale.schemas import Bool
SnapRandomForestClassifier = SnapRandomForestClassifier.customize_schema(
compress_trees=Bool(
desc="""Compress trees after training for fast inference.""",
default=False,
forOptimizer=False,
),
set_as_available=True,
)
lale.docstrings.set_docstrings(SnapRandomForestClassifier)
| 13,096 | 37.863501 | 188 |
py
|
lale
|
lale-master/lale/lib/snapml/batched_tree_ensemble_regressor.py
|
# Copyright 2022 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import snapml # type: ignore
snapml_installed = True
except ImportError:
snapml_installed = False
import pandas as pd
import lale.datasets.data_schemas
import lale.docstrings
import lale.operators
def _ensure_numpy(data):
if isinstance(data, (pd.DataFrame, pd.Series)):
return data.to_numpy()
return lale.datasets.data_schemas.strip_schema(data)
class _BatchedTreeEnsembleRegressorImpl:
def __init__(self, **hyperparams):
assert (
snapml_installed
), """Your Python environment does not have snapml installed. Install using: pip install snapml"""
if hyperparams.get("base_ensemble") is None:
from snapml import SnapBoostingMachineRegressor
hyperparams["base_ensemble"] = SnapBoostingMachineRegressor()
self._wrapped_model = snapml.BatchedTreeEnsembleRegressor(**hyperparams)
def fit(self, X, y, **fit_params):
X = _ensure_numpy(X)
y = _ensure_numpy(y)
self._wrapped_model.fit(X, y, **fit_params)
return self
def predict(self, X, **predict_params):
X = _ensure_numpy(X)
return self._wrapped_model.predict(X, **predict_params)
def predict_proba(self, X, **predict_proba_params):
X = _ensure_numpy(X)
return self._wrapped_model.predict_proba(X, **predict_proba_params)
def partial_fit(self, X, y, **fit_params):
X = _ensure_numpy(X)
y = _ensure_numpy(y)
self._wrapped_model.partial_fit(X, y, **fit_params)
return self
_hyperparams_schema = {
"description": "Hyperparameter schema.",
"allOf": [
{
"description": "This first object lists all constructor arguments with their types, but omits constraints for conditional hyperparameters.",
"type": "object",
"additionalProperties": True,
"relevantToOptimizer": [],
"properties": {},
}
],
}
_input_fit_schema = {
"description": "Fit the base ensemble without batching.",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"description": "The outer array is over samples aka rows.",
"items": {
"type": "array",
"description": "The inner array is over features aka columns.",
"items": {"type": "number"},
},
},
"y": {
"description": "The classes.",
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "string"}},
{"type": "array", "items": {"type": "boolean"}},
],
},
"sample_weight": {
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"enum": [None], "description": "Samples are equally weighted."},
],
"description": "Sample weights.",
"default": None,
},
},
}
_input_predict_schema = {
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"description": "The outer array is over samples aka rows.",
"items": {
"type": "array",
"description": "The inner array is over features aka columns.",
"items": {"type": "number"},
},
},
},
}
_output_predict_schema = {
"description": "The predicted classes.",
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "string"}},
{"type": "array", "items": {"type": "boolean"}},
],
}
_input_predict_proba_schema = {
"type": "object",
"properties": {
"X": {
"type": "array",
"description": "The outer array is over samples aka rows.",
"items": {
"type": "array",
"description": "The inner array is over features aka columns.",
"items": {"type": "number"},
},
},
},
}
_output_predict_proba_schema = {
"type": "array",
"description": "The outer array is over samples aka rows.",
"items": {
"type": "array",
"description": "The inner array contains probabilities corresponding to each class.",
"items": {"type": "number"},
},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """`Batched Tree Ensemble Regressor`_ from `Snap ML`_.
.. _`Batched Tree Ensemble Regressor`: https://snapml.readthedocs.io/en/latest/batched_tree_ensembles.html
.. _`Snap ML`: https://www.zurich.ibm.com/snapml/
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.snapml.batched_tree_ensemble_regressor.html",
"import_from": "snapml",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "regressor"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
"input_predict_proba": _input_predict_proba_schema,
"output_predict_proba": _output_predict_proba_schema,
},
}
BatchedTreeEnsembleRegressor = lale.operators.make_operator(
_BatchedTreeEnsembleRegressorImpl, _combined_schemas
)
lale.docstrings.set_docstrings(BatchedTreeEnsembleRegressor)
| 6,093 | 31.243386 | 152 |
py
|
lale
|
lale-master/lale/lib/snapml/__init__.py
|
# Copyright 2020,2021 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Schema-enhanced versions of the operators from `Snap ML`_ to enable hyperparameter tuning.
.. _`Snap ML`: https://www.zurich.ibm.com/snapml/
Operators
=========
Classifiers:
* lale.lib.snapml. `BatchedTreeEnsembleClassifier`_
* lale.lib.snapml. `SnapBoostingMachineClassifier`_
* lale.lib.snapml. `SnapDecisionTreeClassifier`_
* lale.lib.snapml. `SnapLogisticRegression`_
* lale.lib.snapml. `SnapRandomForestClassifier`_
* lale.lib.snapml. `SnapSVMClassifier`_
Regressors:
* lale.lib.snapml. `BatchedTreeEnsembleRegressor`_
* lale.lib.snapml. `SnapBoostingMachineRegressor`_
* lale.lib.snapml. `SnapDecisionTreeRegressor`_
* lale.lib.snapml. `SnapLinearRegression`_
* lale.lib.snapml. `SnapRandomForestRegressor`_
.. _`BatchedTreeEnsembleClassifier`: lale.lib.snapml.batched_tree_ensemble_classifier.html
.. _`BatchedTreeEnsembleRegressor`: lale.lib.snapml.batched_tree_ensemble_regressor.html
.. _`SnapBoostingMachineClassifier`: lale.lib.snapml.snap_boosting_machine_classifier.html
.. _`SnapBoostingMachineRegressor`: lale.lib.snapml.snap_boosting_machine_regressor.html
.. _`SnapDecisionTreeClassifier`: lale.lib.snapml.snap_decision_tree_classifier.html
.. _`SnapDecisionTreeRegressor`: lale.lib.snapml.snap_decision_tree_regressor.html
.. _`SnapLinearRegression`: lale.lib.snapml.snap_linear_regression.html
.. _`SnapLogisticRegression`: lale.lib.snapml.snap_logistic_regression.html
.. _`SnapRandomForestClassifier`: lale.lib.snapml.snap_random_forest_classifier.html
.. _`SnapRandomForestRegressor`: lale.lib.snapml.snap_random_forest_regressor.html
.. _`SnapSVMClassifier`: lale.lib.snapml.snap_svm_classifier.html
"""
from lale import register_lale_wrapper_modules
from .batched_tree_ensemble_classifier import (
BatchedTreeEnsembleClassifier as BatchedTreeEnsembleClassifier,
)
from .batched_tree_ensemble_regressor import (
BatchedTreeEnsembleRegressor as BatchedTreeEnsembleRegressor,
)
from .snap_boosting_machine_classifier import (
SnapBoostingMachineClassifier as SnapBoostingMachineClassifier,
)
from .snap_boosting_machine_regressor import (
SnapBoostingMachineRegressor as SnapBoostingMachineRegressor,
)
from .snap_decision_tree_classifier import (
SnapDecisionTreeClassifier as SnapDecisionTreeClassifier,
)
from .snap_decision_tree_regressor import (
SnapDecisionTreeRegressor as SnapDecisionTreeRegressor,
)
from .snap_linear_regression import SnapLinearRegression as SnapLinearRegression
from .snap_logistic_regression import SnapLogisticRegression as SnapLogisticRegression
from .snap_random_forest_classifier import (
SnapRandomForestClassifier as SnapRandomForestClassifier,
)
from .snap_random_forest_regressor import (
SnapRandomForestRegressor as SnapRandomForestRegressor,
)
from .snap_svm_classifier import SnapSVMClassifier as SnapSVMClassifier
# Note: all imports should be done as
# from .xxx import XXX as XXX
# this ensures that pyright considers them to be publicly available
# and not private imports (this affects lale users that use pyright)
register_lale_wrapper_modules(__name__)
| 3,652 | 40.044944 | 90 |
py
|
lale
|
lale-master/lale/lib/snapml/snap_svm_classifier.py
|
# Copyright 2019,2021 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
try:
import snapml # type: ignore
snapml_version = version.parse(getattr(snapml, "__version__"))
except ImportError:
snapml_version = None
import lale.datasets.data_schemas
import lale.docstrings
import lale.operators
class _SnapSVMClassifierImpl:
def __init__(self, **hyperparams):
assert (
snapml_version is not None
), """Your Python environment does not have snapml installed. Install using: pip install snapml"""
if snapml_version <= version.Version("1.8.0") and "loss" in hyperparams:
del hyperparams["loss"]
if hyperparams.get("device_ids", None) is None:
hyperparams["device_ids"] = [0]
self._wrapped_model = snapml.SnapSVMClassifier(**hyperparams)
def fit(self, X, y, **fit_params):
X = lale.datasets.data_schemas.strip_schema(X)
y = lale.datasets.data_schemas.strip_schema(y)
self._wrapped_model.fit(X, y, **fit_params)
return self
def predict(self, X, **predict_params):
X = lale.datasets.data_schemas.strip_schema(X)
return self._wrapped_model.predict(X, **predict_params)
def decision_function(self, X, **decision_function_params):
X = lale.datasets.data_schemas.strip_schema(X)
return self._wrapped_model.decision_function(X, **decision_function_params)
_hyperparams_schema = {
"description": "Hyperparameter schema.",
"allOf": [
{
"description": "This first sub-object lists all constructor arguments with their types, one at a time, omitting cross-argument constraints.",
"type": "object",
"relevantToOptimizer": [
"fit_intercept",
"regularizer",
"max_iter",
"kernel",
"gamma",
"n_components",
],
"additionalProperties": False,
"properties": {
"max_iter": {
"type": "integer",
"minimum": 1,
"minimumForOptimizer": 10,
"maximumForOptimizer": 1000,
"default": 100,
"description": "Maximum number of iterations used by the solver to converge.",
},
"regularizer": {
"type": "number",
"minimum": 0.0,
"default": 1.0,
"exclusiveMinimum": True,
"minimumForOptimizer": 1.0,
"maximumForOptimizer": 100.0,
"distribution": "uniform",
"description": "Larger regularization values imply stronger regularization.",
},
"use_gpu": {
"type": "boolean",
"default": False,
"description": "Use GPU Acceleration.",
},
"device_ids": {
"anyOf": [
{"description": "Use [0].", "enum": [None]},
{"type": "array", "items": {"type": "integer"}},
],
"default": None,
"description": "Device IDs of the GPUs which will be used when GPU acceleration is enabled.",
},
"class_weight": {
"enum": ["balanced", None],
"default": None,
"description": "If set to 'balanced' samples weights will be applied to account for class imbalance, otherwise no sample weights will be used.",
},
"verbose": {
"type": "boolean",
"default": False,
"description": "If True, it prints the training cost, one per iteration. Warning: this will increase the training time. For performance evaluation, use verbose=False.",
},
"n_jobs": {
"type": "integer",
"minimum": 1,
"default": 1,
"description": "The number of threads used for running the training. The value of this parameter should be a multiple of 32 if the training is performed on GPU (use_gpu=True).",
},
"tol": {
"type": "number",
"minimum": 0.0,
"default": 0.001,
"exclusiveMinimum": True,
"description": "The tolerance parameter. Training will finish when maximum change in model coefficients is less than tol.",
},
"generate_training_history": {
"enum": ["summary", "full", None],
"default": None,
"description": "Determines the level of summary statistics that are generated during training.",
},
"fit_intercept": {
"type": "boolean",
"default": True,
"description": "Add bias term -- note, may affect speed of convergence, especially for sparse datasets.",
},
"intercept_scaling": {
"type": "number",
"minimum": 0.0,
"default": 1.0,
"exclusiveMinimum": True,
"description": "Scaling of bias term. The inclusion of a bias term is implemented by appending an additional feature to the dataset. This feature has a constant value, that can be set using this parameter.",
},
"normalize": {
"type": "boolean",
"default": True,
"description": "Normalize rows of dataset (recommended for fast convergence).",
},
"kernel": {
"enum": ["rbf", "linear"],
"default": "rbf",
"description": "Approximate feature map of a specified kernel function.",
},
"gamma": {
"type": "number",
"minimum": 0.0,
"default": 1.0,
"exclusiveMinimum": True,
"minimumForOptimizer": 0.01,
"maximumForOptimizer": 100.0,
"distribution": "uniform",
"description": "Parameter of RBF kernel: exp(-gamma * x^2).",
},
"n_components": {
"type": "integer",
"minimum": 1,
"default": 100,
"minimumForOptimizer": 10,
"maximumForOptimizer": 200,
"description": "Dimensionality of the feature space when approximating a kernel function.",
},
"random_state": {
"description": "Seed of pseudo-random number generator.",
"anyOf": [
{
"description": "RandomState used by np.random",
"enum": [None],
},
{"description": "Explicit seed.", "type": "integer"},
],
"default": None,
},
},
},
],
}
_input_fit_schema = {
"description": "Fit the model according to the given train dataset.",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"description": "The outer array is over samples aka rows.",
"items": {
"type": "array",
"description": "The inner array is over features aka columns.",
"items": {"type": "number"},
},
},
"y": {
"description": "The classes.",
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "string"}},
{"type": "array", "items": {"type": "boolean"}},
],
},
},
}
_input_predict_schema = {
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"description": "The outer array is over samples aka rows.",
"items": {
"type": "array",
"description": "The inner array is over features aka columns.",
"items": {"type": "number"},
},
},
"n_jobs": {
"type": "integer",
"minimum": 0,
"default": 0,
"description": "Number of threads used to run inference. By default inference runs with maximum number of available threads.",
},
},
}
_output_predict_schema = {
"description": "The predicted classes.",
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "string"}},
{"type": "array", "items": {"type": "boolean"}},
],
}
_input_decision_function_schema = {
"type": "object",
"properties": {
"X": {
"type": "array",
"description": "The outer array is over samples aka rows.",
"items": {
"type": "array",
"description": "The inner array is over features aka columns.",
"items": {"type": "number"},
},
},
"n_jobs": {
"type": "integer",
"minimum": 0,
"default": 0,
"description": "Number of threads used to run inference. By default inference runs with maximum number of available threads.",
},
},
}
_output_decision_function_schema = {
"type": "array",
"description": "The outer array is over samples aka rows.",
"items": {
"type": "array",
"description": "The inner array contains confidence scores corresponding to each class.",
"items": {"type": "number"},
},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """`Support Vector Machine`_ from `Snap ML`_.
.. _`Support Vector Machine`: https://snapml.readthedocs.io/en/latest/#snapml.SupportVectorMachine
.. _`Snap ML`: https://www.zurich.ibm.com/snapml/
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.snapml.snap_support_vector_machine.html",
"import_from": "snapml",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "classifier"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
"input_decision_function": _input_decision_function_schema,
"output_decision_function": _output_decision_function_schema,
},
}
SnapSVMClassifier = lale.operators.make_operator(
_SnapSVMClassifierImpl, _combined_schemas
)
if snapml_version is not None and snapml_version > version.Version("1.8.0"): # type: ignore # noqa
from lale.schemas import Enum
SnapSVMClassifier = SnapSVMClassifier.customize_schema(
loss=Enum(
desc="""The loss function that will be used for training.""",
values=["hinge", "squared_hinge"],
default="hinge",
forOptimizer=True,
),
set_as_available=True,
)
lale.docstrings.set_docstrings(SnapSVMClassifier)
| 12,175 | 37.531646 | 227 |
py
|
lale
|
lale-master/lale/lib/snapml/snap_decision_tree_regressor.py
|
# Copyright 2019,2021 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import snapml # type: ignore
snapml_installed = True
except ImportError:
snapml_installed = False
import lale.datasets.data_schemas
import lale.docstrings
import lale.operators
class _SnapDecisionTreeRegressorImpl:
def __init__(self, **hyperparams):
assert (
snapml_installed
), """Your Python environment does not have snapml installed. Install using: pip install snapml"""
self._wrapped_model = snapml.SnapDecisionTreeRegressor(**hyperparams)
def fit(self, X, y, **fit_params):
X = lale.datasets.data_schemas.strip_schema(X)
y = lale.datasets.data_schemas.strip_schema(y)
self._wrapped_model.fit(X, y, **fit_params)
return self
def predict(self, X, **predict_params):
X = lale.datasets.data_schemas.strip_schema(X)
return self._wrapped_model.predict(X, **predict_params)
_hyperparams_schema = {
"description": "Hyperparameter schema.",
"allOf": [
{
"description": "This first sub-object lists all constructor arguments with their types, one at a time, omitting cross-argument constraints.",
"type": "object",
"relevantToOptimizer": ["max_depth", "max_features", "hist_nbins"],
"additionalProperties": False,
"properties": {
"criterion": {
"enum": ["mse"],
"default": "mse",
"description": "Function to measure the quality of a split.",
},
"splitter": {
"enum": ["best"],
"default": "best",
"description": "The strategy used to choose the split at each node.",
},
"max_depth": {
"anyOf": [
{
"type": "integer",
"minimum": 1,
"minimumForOptimizer": 3,
"maximumForOptimizer": 5,
},
{
"enum": [None],
"description": "Nodes are expanded until all leaves are pure or until all leaves contain less than min_samples_leaf samples.",
},
],
"default": None,
"description": "The maximum depth of the tree.",
},
"min_samples_leaf": {
"anyOf": [
{
"type": "integer",
"minimum": 1,
"forOptimizer": False,
"laleMaximum": "X/maxItems", # number of rows
"description": "Consider min_samples_leaf as the minimum number.",
},
{
"type": "number",
"minimum": 0.0,
"exclusiveMinimum": True,
"maximum": 0.5,
"description": "min_samples_leaf is a fraction and ceil(min_samples_leaf * n_samples) are the minimum number of samples for each node.",
},
],
"default": 1,
"description": "The minimum number of samples required to be at a leaf node.",
},
"max_features": {
"anyOf": [
{
"type": "integer",
"minimum": 1,
"forOptimizer": False,
"laleMaximum": "X/items/maxItems", # number of columns
"description": "Consider max_features features at each split.",
},
{
"type": "number",
"minimum": 0.0,
"exclusiveMinimum": True,
"maximum": 1.0,
"minimumForOptimizer": 0.1,
"maximumForOptimizer": 0.9,
"distribution": "uniform",
"description": "max_features is a fraction and int(max_features * n_features) features are considered at each split.",
},
{"enum": ["auto", "sqrt", "log2", None]},
],
"default": None,
"description": "The number of features to consider when looking for the best split.",
},
"random_state": {
"description": "Seed of pseudo-random number generator.",
"anyOf": [
{
"description": "RandomState used by np.random",
"enum": [None],
},
{"description": "Explicit seed.", "type": "integer"},
],
"default": None,
},
"n_jobs": {
"type": "integer",
"minimum": 1,
"default": 1,
"description": "Number of CPU threads to use.",
},
"use_histograms": {
"type": "boolean",
"default": True,
"description": "Use histogram-based splits rather than exact splits.",
},
"hist_nbins": {
"type": "integer",
"default": 256,
"minimum": 1,
"maximum": 256,
"minimumForOptimizer": 16,
"maximumForOptimizer": 256,
"description": "Number of histogram bins.",
},
"use_gpu": {
"type": "boolean",
"default": False,
"description": "Use GPU acceleration (only supported for histogram-based splits).",
},
"gpu_id": {
"type": "integer",
"default": 0,
"description": "Device ID of the GPU which will be used when GPU acceleration is enabled.",
},
"verbose": {
"type": "boolean",
"default": False,
"description": "If True, it prints debugging information while training. Warning: this will increase the training time. For performance evaluation, use verbose=False.",
},
},
},
{
"description": "GPU only supported for histogram-based splits.",
"anyOf": [
{"type": "object", "properties": {"use_gpu": {"enum": [False]}}},
{"type": "object", "properties": {"use_histograms": {"enum": [True]}}},
],
},
],
}
_input_fit_schema = {
"description": "Build a decision tree from the training set (X, y).",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"description": "The outer array is over samples aka rows.",
"items": {
"type": "array",
"description": "The inner array is over features aka columns.",
"items": {"type": "number"},
},
},
"y": {
"description": "The regression target.",
"anyOf": [
{"type": "array", "items": {"type": "number"}},
],
},
"sample_weight": {
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"enum": [None], "description": "Samples are equally weighted."},
],
"description": "Sample weights.",
"default": None,
},
},
}
_input_predict_schema = {
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"description": "The outer array is over samples aka rows.",
"items": {
"type": "array",
"description": "The inner array is over features aka columns.",
"items": {"type": "number"},
},
},
"n_jobs": {
"type": "integer",
"minimum": 0,
"default": 0,
"description": "Number of threads used to run inference. By default inference runs with maximum number of available threads.",
},
},
}
_output_predict_schema = {
"description": "The predicted values.",
"anyOf": [
{"type": "array", "items": {"type": "number"}},
],
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """`Decision tree Regressor`_ from `Snap ML`_.
.. _`Decision tree Regressor`: https://snapml.readthedocs.io/en/latest/#snapml.DecisionTreeRegressor
.. _`Snap ML`: https://www.zurich.ibm.com/snapml/
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.snapml.snap_decision_tree_regressor.html",
"import_from": "snapml",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "regressor"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
SnapDecisionTreeRegressor = lale.operators.make_operator(
_SnapDecisionTreeRegressorImpl, _combined_schemas
)
lale.docstrings.set_docstrings(SnapDecisionTreeRegressor)
| 10,424 | 37.899254 | 188 |
py
|
lale
|
lale-master/lale/lib/snapml/snap_linear_regression.py
|
# Copyright 2019,2021 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import snapml # type: ignore
snapml_installed = True
except ImportError:
snapml_installed = False
import lale.datasets.data_schemas
import lale.docstrings
import lale.operators
class _SnapLinearRegressionImpl:
def __init__(self, **hyperparams):
assert (
snapml_installed
), """Your Python environment does not have snapml installed. Install using: pip install snapml"""
if hyperparams.get("device_ids", None) is None:
hyperparams["device_ids"] = []
self._wrapped_model = snapml.LinearRegression(**hyperparams)
def fit(self, X, y, **fit_params):
X = lale.datasets.data_schemas.strip_schema(X)
y = lale.datasets.data_schemas.strip_schema(y)
self._wrapped_model.fit(X, y, **fit_params)
return self
def predict(self, X, **predict_params):
X = lale.datasets.data_schemas.strip_schema(X)
return self._wrapped_model.predict(X, **predict_params)
_hyperparams_schema = {
"description": "Hyperparameter schema.",
"allOf": [
{
"description": "This first sub-object lists all constructor arguments with their types, one at a time, omitting cross-argument constraints.",
"type": "object",
"relevantToOptimizer": ["fit_intercept", "regularizer", "max_iter"],
"additionalProperties": False,
"properties": {
"max_iter": {
"type": "integer",
"minimum": 1,
"minimumForOptimizer": 10,
"maximumForOptimizer": 1000,
"default": 100,
"description": "Maximum number of iterations used by the solver to converge.",
},
"regularizer": {
"type": "number",
"minimum": 0.0,
"default": 1.0,
"exclusiveMinimum": True,
"minimumForOptimizer": 1.0,
"maximumForOptimizer": 100.0,
"distribution": "uniform",
"description": "Larger regularization values imply stronger regularization.",
},
"use_gpu": {
"type": "boolean",
"default": False,
"description": "Use GPU Acceleration.",
},
"device_ids": {
"anyOf": [
{"description": "Use [0].", "enum": [None]},
{"type": "array", "items": {"type": "integer"}},
],
"default": None,
"description": "Device IDs of the GPUs which will be used when GPU acceleration is enabled.",
},
"dual": {
"type": "boolean",
"default": True,
"description": "Use dual formulation (rather than primal).",
},
"verbose": {
"type": "boolean",
"default": False,
"description": "If True, it prints the training cost, one per iteration. Warning: this will increase the training time. For performance evaluation, use verbose=False.",
},
"n_jobs": {
"type": "integer",
"minimum": 1,
"default": 1,
"description": "The number of threads used for running the training. The value of this parameter should be a multiple of 32 if the training is performed on GPU (use_gpu=True).",
},
"penalty": {
"enum": ["l1", "l2"],
"default": "l2",
"description": "The regularization / penalty type. Possible values are 'l2' for L2 regularization (LinearRegression) or 'l1' for L1 regularization (SparseLinearRegression). L1 regularization is possible only for the primal optimization problem (dual=False).",
},
"tol": {
"type": "number",
"minimum": 0.0,
"default": 0.001,
"exclusiveMinimum": True,
"description": "The tolerance parameter. Training will finish when maximum change in model coefficients is less than tol.",
},
"generate_training_history": {
"enum": ["summary", "full", None],
"default": None,
"description": "Determines the level of summary statistics that are generated during training.",
},
"privacy": {
"type": "boolean",
"default": False,
"description": "Train the model using a differentially private algorithm.",
},
"eta": {
"type": "number",
"minimum": 0.0,
"default": 0.3,
"exclusiveMinimum": True,
"description": "Learning rate for the differentially private training algorithm.",
},
"batch_size": {
"type": "integer",
"minimum": 1,
"default": 100,
"description": "Mini-batch size for the differentially private training algorithm.",
},
"privacy_epsilon": {
"type": "number",
"minimum": 0.0,
"default": 10.0,
"exclusiveMinimum": True,
"description": "Target privacy gaurantee. Learned model will be (privacy_epsilon, 0.01)-private.",
},
"grad_clip": {
"type": "number",
"minimum": 0.0,
"default": 1.0,
"description": "Gradient clipping parameter for the differentially private training algorithm.",
},
"fit_intercept": {
"type": "boolean",
"default": False,
"description": "Add bias term -- note, may affect speed of convergence, especially for sparse datasets.",
},
"intercept_scaling": {
"type": "number",
"minimum": 0.0,
"default": 1.0,
"exclusiveMinimum": True,
"description": "Scaling of bias term. The inclusion of a bias term is implemented by appending an additional feature to the dataset. This feature has a constant value, that can be set using this parameter.",
},
"normalize": {
"type": "boolean",
"default": False,
"description": "Normalize rows of dataset (recommended for fast convergence).",
},
"kernel": {
"enum": ["rbf", "linear"],
"default": "linear",
"description": "Approximate feature map of a specified kernel function.",
},
"gamma": {
"type": "number",
"minimum": 0.0,
"default": 1.0,
"exclusiveMinimum": True,
"description": "Parameter of RBF kernel: exp(-gamma * x^2).",
},
"n_components": {
"type": "integer",
"minimum": 1,
"default": 100,
"description": "Dimensionality of the feature space when approximating a kernel function.",
},
"random_state": {
"description": "Seed of pseudo-random number generator.",
"anyOf": [
{
"description": "RandomState used by np.random",
"enum": [None],
},
{"description": "Explicit seed.", "type": "integer"},
],
"default": None,
},
},
},
{
"description": "L1 regularization is supported only for primal optimization problems.",
"anyOf": [
{"type": "object", "properties": {"penalty": {"enum": ["l2"]}}},
{"type": "object", "properties": {"dual": {"enum": [False]}}},
],
},
{
"description": "Privacy only supported for primal objective functions.",
"anyOf": [
{"type": "object", "properties": {"privacy": {"enum": [False]}}},
{"type": "object", "properties": {"dual": {"enum": [False]}}},
],
},
{
"description": "Privacy only supported for L2-regularized objective functions.",
"anyOf": [
{"type": "object", "properties": {"privacy": {"enum": [False]}}},
{"type": "object", "properties": {"penalty": {"enum": ["l2"]}}},
],
},
{
"description": "Privacy not supported with fit_intercept=True.",
"anyOf": [
{"type": "object", "properties": {"privacy": {"enum": [False]}}},
{"type": "object", "properties": {"fit_intercept": {"enum": [False]}}},
],
},
],
}
_input_fit_schema = {
"description": "Fit the model according to the given train dataset.",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"description": "The outer array is over samples aka rows.",
"items": {
"type": "array",
"description": "The inner array is over features aka columns.",
"items": {"type": "number"},
},
},
"y": {
"description": "The regression target.",
"anyOf": [
{"type": "array", "items": {"type": "number"}},
],
},
},
}
_input_predict_schema = {
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"description": "The outer array is over samples aka rows.",
"items": {
"type": "array",
"description": "The inner array is over features aka columns.",
"items": {"type": "number"},
},
},
"n_jobs": {
"type": "integer",
"minimum": 0,
"default": 0,
"description": "Number of threads used to run inference. By default inference runs with maximum number of available threads.",
},
},
}
_output_predict_schema = {
"description": "The predicted values.",
"anyOf": [
{"type": "array", "items": {"type": "number"}},
],
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """`Linear Regression`_ from `Snap ML`_.
.. _`Linear Regression`: https://snapml.readthedocs.io/en/latest/#snapml.LinearRegression
.. _`Snap ML`: https://www.zurich.ibm.com/snapml/
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.snapml.snap_linear_regression.html",
"import_from": "snapml",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "classifier"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
SnapLinearRegression = lale.operators.make_operator(
_SnapLinearRegressionImpl, _combined_schemas
)
lale.docstrings.set_docstrings(SnapLinearRegression)
| 12,547 | 39.74026 | 279 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.