repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
lale
|
lale-master/lale/lib/snapml/snap_boosting_machine_classifier.py
|
# Copyright 2019,2021 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
try:
import snapml # type: ignore
snapml_version = version.parse(getattr(snapml, "__version__"))
except ImportError:
snapml_version = None
import lale.datasets.data_schemas
import lale.docstrings
import lale.operators
class _SnapBoostingMachineClassifierImpl:
def __init__(self, **hyperparams):
assert (
snapml_version is not None
), """Your Python environment does not have snapml installed. Install using: pip install snapml"""
if (
snapml_version > version.Version("1.7.8")
and hyperparams.get("gpu_ids", None) is None
):
hyperparams["gpu_ids"] = [0]
self._wrapped_model = snapml.SnapBoostingMachineClassifier(**hyperparams)
def fit(self, X, y, **fit_params):
X = lale.datasets.data_schemas.strip_schema(X)
y = lale.datasets.data_schemas.strip_schema(y)
self._wrapped_model.fit(X, y, **fit_params)
return self
def predict(self, X, **predict_params):
X = lale.datasets.data_schemas.strip_schema(X)
return self._wrapped_model.predict(X, **predict_params)
def predict_proba(self, X, **predict_proba_params):
X = lale.datasets.data_schemas.strip_schema(X)
return self._wrapped_model.predict_proba(X, **predict_proba_params)
_hyperparams_schema = {
"description": "Hyperparameter schema.",
"allOf": [
{
"description": "This first sub-object lists all constructor arguments with their types, one at a time, omitting cross-argument constraints.",
"type": "object",
"required": [
"num_round",
"learning_rate",
"random_state",
"colsample_bytree",
"subsample",
"verbose",
"lambda_l2",
"early_stopping_rounds",
"compress_trees",
"base_score",
"class_weight",
"max_depth",
"min_max_depth",
"max_max_depth",
"n_jobs",
"use_histograms",
"hist_nbins",
"use_gpu",
"gpu_id",
"tree_select_probability",
"regularizer",
"fit_intercept",
"gamma",
"n_components",
],
"relevantToOptimizer": [
"num_round",
"learning_rate",
"min_max_depth",
"max_max_depth",
],
"additionalProperties": False,
"properties": {
"num_round": {
"type": "integer",
"minimum": 1,
"minimumForOptimizer": 100,
"maximumForOptimizer": 1000,
"default": 100,
"description": "Number of boosting iterations.",
},
"learning_rate": {
"type": "number",
"minimum": 0.0,
"exclusiveMinimum": True,
"minimumForOptimizer": 0.01,
"maximumForOptimizer": 0.3,
"distribution": "uniform",
"default": 0.1,
"description": "Learning rate / shrinkage factor.",
},
"random_state": {
"type": "integer",
"default": 0,
"description": "Random seed.",
},
"colsample_bytree": {
"type": "number",
"minimum": 0.0,
"exclusiveMinimum": True,
"maximum": 1.0,
"default": 1.0,
"description": "Fraction of feature columns used at each boosting iteration.",
},
"subsample": {
"type": "number",
"minimum": 0.0,
"exclusiveMinimum": True,
"maximum": 1.0,
"default": 1.0,
"description": "Fraction of training examples used at each boosting iteration.",
},
"verbose": {
"type": "boolean",
"default": False,
"description": "Print off information during training.",
},
"lambda_l2": {
"type": "number",
"minimum": 0.0,
"default": 0.0,
"description": "L2-reguralization penalty used during tree-building.",
},
"early_stopping_rounds": {
"type": "integer",
"minimum": 1,
"default": 10,
"description": "When a validation set is provided, training will stop if the validation loss does not increase after a fixed number of rounds.",
},
"compress_trees": {
"type": "boolean",
"default": False,
"description": "Compress trees after training for fast inference.",
},
"base_score": {
"anyOf": [
{
"type": "number",
},
{"enum": [None]},
],
"default": None,
"description": "Base score to initialize boosting algorithm. If None then the algorithm will initialize the base score to be the the logit of the probability of the positive class.",
},
"class_weight": {
"enum": ["balanced", None],
"default": None,
"description": "If set to 'balanced' samples weights will be applied to account for class imbalance, otherwise no sample weights will be used.",
},
"max_depth": {
"anyOf": [
{
"type": "integer",
"minimum": 1,
},
{"enum": [None]},
],
"default": None,
"description": "If set, will set min_max_depth = max_depth = max_max_depth",
},
"min_max_depth": {
"type": "integer",
"minimum": 1,
"minimumForOptimizer": 1,
"maximumForOptimizer": 5,
"default": 1,
"description": "Minimum max_depth of trees in the ensemble.",
},
"max_max_depth": {
"type": "integer",
"minimum": 1,
"minimumForOptimizer": 5,
"maximumForOptimizer": 10,
"default": 5,
"description": "Maximum max_depth of trees in the ensemble.",
},
"n_jobs": {
"type": "integer",
"minimum": 1,
"default": 1,
"description": "Number of threads to use during training.",
},
"use_histograms": {
"type": "boolean",
"default": True,
"description": "Use histograms to accelerate tree-building.",
},
"hist_nbins": {
"type": "integer",
"default": 256,
"description": "Number of histogram bins.",
},
"use_gpu": {
"type": "boolean",
"default": False,
"description": "Use GPU for tree-building.",
},
"gpu_id": {
"type": "integer",
"default": 0,
"description": "Device ID for GPU to use during training.",
},
"tree_select_probability": {
"type": "number",
"minimum": 0.0,
"maximum": 1.0,
"default": 1.0,
"description": "Probability of selecting a tree (rather than a kernel ridge regressor) at each boosting iteration.",
},
"regularizer": {
"type": "number",
"minimum": 0.0,
"default": 1.0,
"description": "L2-regularization penality for the kernel ridge regressor.",
},
"fit_intercept": {
"type": "boolean",
"default": False,
"description": "Include intercept term in the kernel ridge regressor.",
},
"gamma": {
"type": "number",
"minimum": 0.0,
"default": 1.0,
"description": "Guassian kernel parameter.",
},
"n_components": {
"type": "integer",
"minimum": 1,
"default": 10,
"description": "Number of components in the random projection.",
},
},
},
{
"description": "GPU only supported for histogram-based splits.",
"anyOf": [
{"type": "object", "properties": {"use_gpu": {"enum": [False]}}},
{"type": "object", "properties": {"use_histograms": {"enum": [True]}}},
],
},
],
}
_input_fit_schema = {
"description": "Build a boosted ensemble from the training set (X, y).",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"description": "The outer array is over samples aka rows.",
"items": {
"type": "array",
"description": "The inner array is over features aka columns.",
"items": {"type": "number"},
},
},
"y": {
"description": "The classes.",
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "string"}},
{"type": "array", "items": {"type": "boolean"}},
],
},
"sample_weight": {
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"enum": [None], "description": "Samples are equally weighted."},
],
"description": "Sample weights.",
"default": None,
},
"X_val": {
"anyOf": [
{
"type": "array",
"description": "The outer array is over validation samples aka rows.",
"items": {
"type": "array",
"description": "The inner array is over features aka columns.",
"items": {"type": "number"},
},
},
{"enum": [None], "description": "No validation set provided."},
],
"default": None,
},
"y_val": {
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "string"}},
{"type": "array", "items": {"type": "boolean"}},
{"enum": [None], "description": "No validation set provided."},
],
"description": "The validation classes.",
"default": None,
},
"sample_weight_val": {
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{
"enum": [None],
"description": "Validation samples are equally weighted.",
},
],
"description": "Validation sample weights.",
"default": None,
},
},
}
_input_predict_schema = {
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"description": "The outer array is over samples aka rows.",
"items": {
"type": "array",
"description": "The inner array is over features aka columns.",
"items": {"type": "number"},
},
},
"n_jobs": {
"type": "integer",
"minimum": 1,
"default": 1,
"description": "Number of threads used to run inference.",
},
},
}
_output_predict_schema = {
"description": "The predicted classes.",
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "string"}},
{"type": "array", "items": {"type": "boolean"}},
],
}
_input_predict_proba_schema = {
"type": "object",
"properties": {
"X": {
"type": "array",
"description": "The outer array is over samples aka rows.",
"items": {
"type": "array",
"description": "The inner array is over features aka columns.",
"items": {"type": "number"},
},
},
"n_jobs": {
"type": "integer",
"minimum": 1,
"default": 1,
"description": "Number of threads used to run inference.",
},
},
}
_output_predict_proba_schema = {
"type": "array",
"description": "The outer array is over samples aka rows.",
"items": {
"type": "array",
"description": "The inner array contains probabilities corresponding to each class.",
"items": {"type": "number"},
},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """`Boosting machine classifier`_ from `Snap ML`_. It can be used for binary classification problems.
.. _`Boosting machine classifier`: https://snapml.readthedocs.io/en/latest/#snapml.BoostingMachineClassifier
.. _`Snap ML`: https://www.zurich.ibm.com/snapml/
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.snapml.snap_boosting_machine_classifier.html",
"import_from": "snapml",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "classifier"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
"input_predict_proba": _input_predict_proba_schema,
"output_predict_proba": _output_predict_proba_schema,
},
}
SnapBoostingMachineClassifier = lale.operators.make_operator(
_SnapBoostingMachineClassifierImpl, _combined_schemas
)
if snapml_version is not None and snapml_version > version.Version("1.7.8"): # type: ignore # noqa
SnapBoostingMachineClassifier = SnapBoostingMachineClassifier.customize_schema(
gpu_id=None,
gpu_ids={
"description": "Device IDs of the GPUs which will be used when GPU acceleration is enabled.",
"anyOf": [
{"type": "array", "items": {"type": "integer"}},
{"enum": [None], "description": "Use [0]."},
],
"default": None,
"forOptimizer": False,
},
set_as_available=True,
)
lale.docstrings.set_docstrings(SnapBoostingMachineClassifier)
| 16,418 | 36.063205 | 202 |
py
|
lale
|
lale-master/lale/lib/snapml/snap_logistic_regression.py
|
# Copyright 2019,2021 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import snapml # type: ignore
snapml_installed = True
except ImportError:
snapml_installed = False
import lale.datasets.data_schemas
import lale.docstrings
import lale.operators
class _SnapLogisticRegressionImpl:
def __init__(self, **hyperparams):
assert (
snapml_installed
), """Your Python environment does not have snapml installed. Install using: pip install snapml"""
if hyperparams.get("device_ids", None) is None:
hyperparams["device_ids"] = []
self._wrapped_model = snapml.SnapLogisticRegression(**hyperparams)
def fit(self, X, y, **fit_params):
X = lale.datasets.data_schemas.strip_schema(X)
y = lale.datasets.data_schemas.strip_schema(y)
self._wrapped_model.fit(X, y, **fit_params)
return self
def predict(self, X, **predict_params):
X = lale.datasets.data_schemas.strip_schema(X)
return self._wrapped_model.predict(X, **predict_params)
def predict_proba(self, X, **predict_proba_params):
X = lale.datasets.data_schemas.strip_schema(X)
return self._wrapped_model.predict_proba(X, **predict_proba_params)
_hyperparams_schema = {
"description": "Hyperparameter schema.",
"allOf": [
{
"description": "This first sub-object lists all constructor arguments with their types, one at a time, omitting cross-argument constraints.",
"type": "object",
"relevantToOptimizer": [
"fit_intercept",
"regularizer",
"max_iter",
],
"additionalProperties": False,
"properties": {
"max_iter": {
"type": "integer",
"minimum": 1,
"minimumForOptimizer": 10,
"maximumForOptimizer": 1000,
"default": 100,
"description": "Maximum number of iterations used by the solver to converge.",
},
"regularizer": {
"type": "number",
"minimum": 0.0,
"default": 1.0,
"exclusiveMinimum": True,
"minimumForOptimizer": 1.0,
"maximumForOptimizer": 100.0,
"distribution": "uniform",
"description": "Larger regularization values imply stronger regularization.",
},
"use_gpu": {
"type": "boolean",
"default": False,
"description": "Use GPU Acceleration.",
},
"device_ids": {
"anyOf": [
{"description": "Use [0].", "enum": [None]},
{"type": "array", "items": {"type": "integer"}},
],
"default": None,
"description": "Device IDs of the GPUs which will be used when GPU acceleration is enabled.",
},
"class_weight": {
"enum": ["balanced", None],
"default": None,
"description": "If set to 'balanced' samples weights will be applied to account for class imbalance, otherwise no sample weights will be used.",
},
"dual": {
"type": "boolean",
"default": True,
"description": "Use dual formulation (rather than primal).",
},
"verbose": {
"type": "boolean",
"default": False,
"description": "If True, it prints the training cost, one per iteration. Warning: this will increase the training time. For performance evaluation, use verbose=False.",
},
"n_jobs": {
"type": "integer",
"minimum": 1,
"default": 1,
"description": "The number of threads used for running the training. The value of this parameter should be a multiple of 32 if the training is performed on GPU (use_gpu=True).",
},
"penalty": {
"enum": ["l1", "l2"],
"default": "l2",
"description": "The regularization / penalty type. Possible values are 'l2' for L2 regularization (LogisticRegression) or 'l1' for L1 regularization (SparseLogisticRegression). L1 regularization is possible only for the primal optimization problem (dual=False).",
},
"tol": {
"type": "number",
"minimum": 0.0,
"default": 0.001,
"exclusiveMinimum": True,
"description": "The tolerance parameter. Training will finish when maximum change in model coefficients is less than tol.",
},
"generate_training_history": {
"enum": ["summary", "full", None],
"default": None,
"description": "Determines the level of summary statistics that are generated during training.",
},
"privacy": {
"type": "boolean",
"default": False,
"description": "Train the model using a differentially private algorithm.",
},
"eta": {
"type": "number",
"minimum": 0.0,
"default": 0.3,
"exclusiveMinimum": True,
"description": "Learning rate for the differentially private training algorithm.",
},
"batch_size": {
"type": "integer",
"minimum": 1,
"default": 100,
"description": "Mini-batch size for the differentially private training algorithm.",
},
"privacy_epsilon": {
"type": "number",
"minimum": 0.0,
"default": 10.0,
"exclusiveMinimum": True,
"description": "Target privacy gaurantee. Learned model will be (privacy_epsilon, 0.01)-private.",
},
"grad_clip": {
"type": "number",
"minimum": 0.0,
"default": 1.0,
"description": "Gradient clipping parameter for the differentially private training algorithm.",
},
"fit_intercept": {
"type": "boolean",
"default": True,
"transient": "alwaysPrint", # since default differs from signature
"description": "Add bias term -- note, may affect speed of convergence, especially for sparse datasets.",
},
"intercept_scaling": {
"type": "number",
"minimum": 0.0,
"default": 1.0,
"exclusiveMinimum": True,
"description": "Scaling of bias term. The inclusion of a bias term is implemented by appending an additional feature to the dataset. This feature has a constant value, that can be set using this parameter.",
},
"normalize": {
"type": "boolean",
"default": True,
"transient": "alwaysPrint", # since default differs from signature
"description": "Normalize rows of dataset (recommended for fast convergence).",
},
"kernel": {
"enum": ["rbf", "linear"],
"default": "linear",
"description": "Approximate feature map of a specified kernel function.",
},
"gamma": {
"type": "number",
"minimum": 0.0,
"default": 1.0,
"exclusiveMinimum": True,
"description": "Parameter of RBF kernel: exp(-gamma * x^2).",
},
"n_components": {
"type": "integer",
"minimum": 1,
"default": 100,
"description": "Dimensionality of the feature space when approximating a kernel function.",
},
"random_state": {
"description": "Seed of pseudo-random number generator.",
"anyOf": [
{
"description": "RandomState used by np.random",
"enum": [None],
},
{"description": "Explicit seed.", "type": "integer"},
],
"default": None,
},
},
},
{
"description": "L1 regularization is supported only for primal optimization problems.",
"anyOf": [
{"type": "object", "properties": {"penalty": {"enum": ["l2"]}}},
{"type": "object", "properties": {"dual": {"enum": [False]}}},
],
},
{
"description": "Privacy only supported for primal objective functions.",
"anyOf": [
{"type": "object", "properties": {"privacy": {"enum": [False]}}},
{"type": "object", "properties": {"dual": {"enum": [False]}}},
],
},
{
"description": "Privacy only supported for L2-regularized objective functions.",
"anyOf": [
{"type": "object", "properties": {"privacy": {"enum": [False]}}},
{"type": "object", "properties": {"penalty": {"enum": ["l2"]}}},
],
},
{
"description": "Privacy not supported with fit_intercept=True.",
"anyOf": [
{"type": "object", "properties": {"privacy": {"enum": [False]}}},
{"type": "object", "properties": {"fit_intercept": {"enum": [False]}}},
],
},
],
}
_input_fit_schema = {
"description": "Fit the model according to the given train dataset.",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"description": "The outer array is over samples aka rows.",
"items": {
"type": "array",
"description": "The inner array is over features aka columns.",
"items": {"type": "number"},
},
},
"y": {
"description": "The classes.",
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "string"}},
{"type": "array", "items": {"type": "boolean"}},
],
},
},
}
_input_predict_schema = {
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"description": "The outer array is over samples aka rows.",
"items": {
"type": "array",
"description": "The inner array is over features aka columns.",
"items": {"type": "number"},
},
},
"n_jobs": {
"type": "integer",
"minimum": 0,
"default": 0,
"description": "Number of threads used to run inference. By default inference runs with maximum number of available threads.",
},
},
}
_output_predict_schema = {
"description": "The predicted classes.",
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "string"}},
{"type": "array", "items": {"type": "boolean"}},
],
}
_input_predict_proba_schema = {
"type": "object",
"properties": {
"X": {
"type": "array",
"description": "The outer array is over samples aka rows.",
"items": {
"type": "array",
"description": "The inner array is over features aka columns.",
"items": {"type": "number"},
},
},
"n_jobs": {
"type": "integer",
"minimum": 0,
"default": 0,
"description": "Number of threads used to run inference. By default inference runs with maximum number of available threads.",
},
},
}
_output_predict_proba_schema = {
"type": "array",
"description": "The outer array is over samples aka rows.",
"items": {
"type": "array",
"description": "The inner array contains probabilities corresponding to each class.",
"items": {"type": "number"},
},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """`Logistic Regression`_ from `Snap ML`_.
.. _`Logistic Regression`: https://snapml.readthedocs.io/en/latest/#snapml.LogisticRegression
.. _`Snap ML`: https://www.zurich.ibm.com/snapml/
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.snapml.snap_logistic_regression.html",
"import_from": "snapml",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "classifier"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
"input_predict_proba": _input_predict_proba_schema,
"output_predict_proba": _output_predict_proba_schema,
},
}
SnapLogisticRegression = lale.operators.make_operator(
_SnapLogisticRegressionImpl, _combined_schemas
)
lale.docstrings.set_docstrings(SnapLogisticRegression)
| 14,615 | 39.6 | 283 |
py
|
lale
|
lale-master/lale/lib/lightgbm/lgbm_classifier.py
|
# Copyright 2019-2022 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import lale.docstrings
import lale.helpers
import lale.operators
try:
import lightgbm
import lightgbm.sklearn
lightgbm_installed = True
except ImportError:
lightgbm_installed = False
if TYPE_CHECKING:
import lightgbm # type: ignore
class _LGBMClassifierImpl:
def __init__(self, **hyperparams):
assert lightgbm_installed, """Your Python environment does not have lightgbm installed. You can install it with
pip install lightgbm
or with
pip install 'lale[full]'"""
self._hyperparams = hyperparams
self._wrapped_model = lightgbm.sklearn.LGBMClassifier(**hyperparams)
def fit(self, X, y=None, **fit_params):
if X.shape[0] * self._wrapped_model.subsample < 1.0:
self._wrapped_model.subsample = 1.001 / X.shape[0]
try:
self._wrapped_model.fit(X, y, **fit_params)
except Exception as e:
raise RuntimeError(str(self._hyperparams)) from e
return self
def partial_fit(self, X, y, **fit_params):
fit_params = lale.helpers.dict_without(fit_params, "classes")
if self._wrapped_model.__sklearn_is_fitted__():
booster = self._wrapped_model.booster_
fit_params = {**fit_params, "init_model": booster}
return self.fit(X, y, **fit_params)
def predict(self, X, **predict_params):
return self._wrapped_model.predict(X, **predict_params)
def predict_proba(self, X):
return self._wrapped_model.predict_proba(X)
def score(self, X, y):
from sklearn.metrics import accuracy_score
y_pred = self.predict(X)
return accuracy_score(y, y_pred)
_hyperparams_schema = {
"description": "LightGBM classifier. (https://lightgbm.readthedocs.io/en/latest/Python-API.html#scikit-learn-api)",
"allOf": [
{
"type": "object",
"required": [
"boosting_type",
"max_depth",
"learning_rate",
"n_estimators",
"min_child_samples",
"subsample",
"subsample_freq",
],
"relevantToOptimizer": [
"boosting_type",
"num_leaves",
"learning_rate",
"n_estimators",
"min_child_weight",
"min_child_samples",
"subsample",
"subsample_freq",
"colsample_bytree",
"reg_alpha",
"reg_lambda",
],
"additionalProperties": False,
"properties": {
"boosting_type": {
"anyOf": [
{
"enum": ["gbdt"],
"description": "Traditional Gradient Boosting Decision Tree.",
},
{
"enum": ["dart"],
"description": "Dropouts meet Multiple Additive Regression Trees.",
},
{
"enum": ["goss"],
"forOptimizer": False,
"description": "Gradient-based One-Side Sampling.",
},
{
"enum": ["rf"],
"forOptimizer": False,
"description": "Random Forest.",
},
],
"default": "gbdt",
},
"num_leaves": {
"anyOf": [
{"type": "integer", "forOptimizer": False},
{"enum": [2, 4, 8, 32, 64, 128, 16]},
],
"default": 31,
"description": "Maximum tree leaves for base learners",
},
"max_depth": {
"anyOf": [
{
"type": "integer",
"minimumForOptimizer": 3,
"maximumForOptimizer": 5,
}
],
"default": -1,
"description": "Maximum tree depth for base learners, <=0 means no limit",
},
"learning_rate": {
"type": "number",
"minimumForOptimizer": 0.02,
"maximumForOptimizer": 1.0,
"distribution": "loguniform",
"default": 0.1,
"description": "Boosting learning rate.",
},
"n_estimators": {
"type": "integer",
"minimumForOptimizer": 50,
"maximumForOptimizer": 1000,
"distribution": "uniform",
"default": 200,
"description": "Number of boosted trees to fit.",
},
"subsample_for_bin": {
"type": "integer",
"default": 200000,
"description": "Number of samples for constructing bins.",
},
"objective": {
"anyOf": [
{"type": "object"},
{"enum": ["binary", "multiclass", None]},
],
"default": None,
"description": "Specify the learning task and the corresponding learning objective or a custom objective function to be used",
},
"class_weight": {
"anyOf": [{"type": "object"}, {"enum": ["balanced", None]}],
"default": None,
"description": "Weights associated with classes",
},
"min_split_gain": {
"type": "number",
"default": 0.0,
"description": "Minimum loss reduction required to make a further partition on a leaf node of the tree.",
},
"min_child_weight": {
"type": "number",
"minimumForOptimizer": 0.0001,
"maximumForOptimizer": 0.01,
"default": 1e-3,
"description": "Minimum sum of instance weight (hessian) needed in a child (leaf).",
},
"min_child_samples": {
"type": "integer",
"minimumForOptimizer": 5,
"maximumForOptimizer": 30,
"distribution": "uniform",
"default": 20,
"description": "Minimum number of data needed in a child (leaf).",
},
"subsample": {
"type": "number",
"minimum": 0.0,
"maximum": 1.0,
"exclusiveMinimum": True,
"minimumForOptimizer": 0.01,
"maximumForOptimizer": 1.0,
"distribution": "uniform",
"default": 1.0,
"description": "Subsample ratio of the training instance.",
},
"subsample_freq": {
"type": "integer",
"minimumForOptimizer": 0,
"maximumForOptimizer": 5,
"distribution": "uniform",
"default": 0,
"description": "Frequence of subsample, <=0 means no enable.",
},
"colsample_bytree": {
"type": "number",
"default": 1.0,
"minimumForOptimizer": 0.01,
"maximumForOptimizer": 1.0,
"description": "Subsample ratio of columns when constructing each tree.",
},
"reg_alpha": {
"type": "number",
"minimumForOptimizer": 0.0,
"maximumForOptimizer": 1.0,
"default": 0.0,
"description": "L1 regularization term on weights.",
},
"reg_lambda": {
"type": "number",
"minimumForOptimizer": 0.0,
"maximumForOptimizer": 1.0,
"default": 0.0,
"description": "L2 regularization term on weights.",
},
"random_state": {
"anyOf": [
{"type": "integer"},
{"laleType": "numpy.random.RandomState"},
{"enum": [None]},
],
"default": None,
"description": "Random number seed. If None, default seeds in C++ code will be used.",
},
"n_jobs": {
"type": "integer",
"default": -1,
"description": "Number of parallel threads.",
},
"silent": {
"type": "boolean",
"default": True,
"description": "Whether to print messages while running boosting.",
},
"importance_type": {
"enum": ["split", "gain"],
"default": "split",
"description": "The type of feature importance to be filled into `feature_importances_`.",
},
},
},
{
"description": "boosting_type `rf` needs bagging (which means subsample_freq > 0 and subsample < 1.0)",
"anyOf": [
{
"type": "object",
"properties": {"boosting_type": {"not": {"enum": ["rf"]}}},
},
{
"allOf": [
{
"type": "object",
"properties": {"subsample_freq": {"not": {"enum": [0]}}},
},
{
"type": "object",
"properties": {"subsample": {"not": {"enum": [1.0]}}},
},
]
},
],
},
{
"description": "boosting_type `goss` cannot use bagging (which means subsample_freq = 0 and subsample = 1.0)",
"anyOf": [
{
"type": "object",
"properties": {"boosting_type": {"not": {"enum": ["goss"]}}},
},
{
"type": "object",
"properties": {"subsample_freq": {"enum": [0]}},
},
{
"type": "object",
"properties": {"subsample": {"enum": [1.0]}},
},
],
},
],
}
_input_fit_schema = {
"description": "Build a lightgbm model from the training set (X, y).",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
"description": "The input samples. Internally, it will be converted to",
},
"y": {
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "string"}},
{"type": "array", "items": {"type": "boolean"}},
],
"description": "Labels",
},
"sample_weight": {
"anyOf": [
{
"type": "array",
"items": {"type": "number"},
},
{"enum": [None]},
],
"default": None,
"description": "Weights of training data.",
},
"init_score": {
"anyOf": [
{
"type": "array",
"items": {"type": "number"},
},
{"enum": [None]},
],
"default": None,
"description": "Init score of training data.",
},
"group": {"default": None, "description": "Group data of training data."},
"eval_set": {
"default": None,
"description": "A list of (X, y) tuple pairs to use as validation sets.",
},
"eval_names": {"default": None, "description": "Names of eval_set."},
"eval_sample_weight": {"default": None, "description": "Weights of eval data."},
"eval_class_weight": {
"anyOf": [{"type": "array", "items": {"type": "number"}}, {"enum": [None]}],
"default": None,
"description": "Class weights of eval data.",
},
"eval_init_score": {"default": None, "description": "Init score of eval data."},
"eval_group": {"default": None, "description": "Group data of eval data."},
"eval_metric": {
"anyOf": [
{"type": "array", "items": {"type": "string"}},
{"enum": ["logloss", None]},
{"laleType": "callable"},
],
"default": None,
"description": "string, list of strings, callable or None, optional (default=None).",
},
"early_stopping_rounds": {
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"default": None,
"description": "Activates early stopping. The model will train until the validation score stops improving.",
},
"verbose": {
"anyOf": [{"type": "boolean"}, {"type": "integer"}],
"default": True,
"description": "Requires at least one evaluation data.",
},
"feature_name": {
"anyOf": [
{"type": "array", "items": {"type": "string"}},
{"enum": ["auto"]},
],
"default": "auto",
"description": "Feature names. If ‘auto’ and data is pandas DataFrame, data columns names are used.",
},
"categorical_feature": {
"anyOf": [
{
"type": "array",
"items": {"anyOf": [{"type": "string"}, {"type": "integer"}]},
},
{"enum": ["auto"]},
],
"default": "auto",
"description": "Categorical features. If list of int, interpreted as indices. If list of strings, interpreted as feature names.",
},
"callbacks": {
"anyOf": [{"type": "array", "items": {"type": "object"}}, {"enum": [None]}],
"default": None,
"description": "List of callback functions that are applied at each iteration. ",
},
},
}
_input_predict_schema = {
"description": "Return the predicted value for each sample.",
"additionalProperties": False,
"type": "object",
"properties": {
"X": {
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
"description": " Input features matrix.",
},
"raw_score": {
"type": "boolean",
"default": False,
"description": "Whether to predict raw scores.",
},
"num_iteration": {
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"default": None,
"description": "Limit number of iterations in the prediction.",
},
"pred_leaf": {
"type": "boolean",
"default": False,
"description": "Whether to predict leaf index.",
},
"pred_contrib": {
"type": "boolean",
"default": False,
"description": "Whether to predict feature contributions.",
},
},
}
_output_predict_schema = {
"description": "Return the predicted value for each sample.",
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "string"}},
{"type": "array", "items": {"type": "boolean"}},
],
}
_input_predict_proba_schema = {
"description": "Return the predicted probability for each class for each sample.",
"type": "object",
"properties": {
"X": {
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
"description": " Input features matrix.",
},
"raw_score": {
"type": "boolean",
"default": False,
"description": "Whether to predict raw scores.",
},
"num_iteration": {
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"default": None,
"description": "Limit number of iterations in the prediction.",
},
"pred_leaf": {
"type": "boolean",
"default": False,
"description": "Whether to predict leaf index.",
},
"pred_contrib": {
"type": "boolean",
"default": False,
"description": "Whether to predict feature contributions.",
},
},
}
_output_predict_proba_schema = {
"description": "Return the predicted probability for each class for each sample.",
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.lightgbm.lgbm_classifier.html",
"import_from": "lightgbm.sklearn",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "classifier"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_partial_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
"input_predict_proba": _input_predict_proba_schema,
"output_predict_proba": _output_predict_proba_schema,
},
}
LGBMClassifier = lale.operators.make_operator(_LGBMClassifierImpl, _combined_schemas)
if lightgbm_installed:
from packaging import version
lightgbm_version = version.parse(getattr(lightgbm, "__version__"))
if lightgbm_version >= version.Version("3.3.0"):
# https://lightgbm.readthedocs.io/en/latest/pythonapi/lightgbm.LGBMClassifier.html#lightgbm.LGBMClassifier
LGBMClassifier = LGBMClassifier.customize_schema(
silent={
"description": "Whether to print messages while running boosting.",
"anyOf": [{"enum": ["warn"]}, {"type": "boolean"}],
"default": "warn",
},
set_as_available=True,
)
lale.docstrings.set_docstrings(LGBMClassifier)
| 19,905 | 36.347092 | 146 |
py
|
lale
|
lale-master/lale/lib/lightgbm/__init__.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Scikit-learn compatible wrappers for LightGBM_ along with schemas to enable hyperparameter tuning.
.. _LightGBM: https://www.microsoft.com/en-us/research/project/lightgbm/
Operators:
==========
* `LGBMClassifier`_
* `LGBMRegressor`_
.. _`LGBMClassifier`: lale.lib.lightgbm.lgbm_classifier.html
.. _`LGBMRegressor`: lale.lib.lightgbm.lgbm_regressor.html
"""
from lale import register_lale_wrapper_modules
from .lgbm_classifier import LGBMClassifier as LGBMClassifier
from .lgbm_regressor import LGBMRegressor as LGBMRegressor
# Note: all imports should be done as
# from .xxx import XXX as XXX
# this ensures that pyright considers them to be publicly available
# and not private imports (this affects lale users that use pyright)
register_lale_wrapper_modules(__name__)
| 1,359 | 31.380952 | 98 |
py
|
lale
|
lale-master/lale/lib/lightgbm/lgbm_regressor.py
|
# Copyright 2019-2022 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import lale.docstrings
import lale.helpers
import lale.operators
try:
import lightgbm
import lightgbm.sklearn
lightgbm_installed = True
except ImportError:
lightgbm_installed = False
if TYPE_CHECKING:
import lightgbm # type: ignore
class _LGBMRegressorImpl:
def __init__(self, **hyperparams):
assert lightgbm_installed, """Your Python environment does not have lightgbm installed. You can install it with
pip install lightgbm
or with
pip install 'lale[full]'"""
self._hyperparams = hyperparams
self._wrapped_model = lightgbm.sklearn.LGBMRegressor(**self._hyperparams)
def fit(self, X, y=None, **fit_params):
try:
self._wrapped_model.fit(X, y, **fit_params)
except Exception as e:
raise RuntimeError(str(self._hyperparams)) from e
return self
def partial_fit(self, X, y, **fit_params):
fit_params = lale.helpers.dict_without(fit_params, "classes")
if self._wrapped_model.__sklearn_is_fitted__():
booster = self._wrapped_model.booster_
fit_params = {**fit_params, "init_model": booster}
return self.fit(X, y, **fit_params)
def predict(self, X, **predict_params):
return self._wrapped_model.predict(X, **predict_params)
def score(self, X, y):
from sklearn.metrics import r2_score
y_pred = self.predict(X)
return r2_score(y, y_pred)
_hyperparams_schema = {
"description": "LightGBM classifier. (https://lightgbm.readthedocs.io/en/latest/Python-API.html#scikit-learn-api)",
"allOf": [
{
"type": "object",
"relevantToOptimizer": [
"boosting_type",
"num_leaves",
"learning_rate",
"n_estimators",
"min_child_weight",
"min_child_samples",
"subsample",
"subsample_freq",
"colsample_bytree",
"reg_alpha",
"reg_lambda",
],
"additionalProperties": False,
"properties": {
"boosting_type": {
"anyOf": [
{
"enum": ["gbdt"],
"description": "Traditional Gradient Boosting Decision Tree.",
},
{
"enum": ["dart"],
"description": "Dropouts meet Multiple Additive Regression Trees.",
},
{
"enum": ["goss"],
"forOptimizer": False,
"description": "Gradient-based One-Side Sampling.",
},
{
"enum": ["rf"],
"forOptimizer": False,
"description": "Random Forest.",
},
],
"default": "gbdt",
},
"num_leaves": {
"anyOf": [
{"type": "integer", "forOptimizer": False},
{"enum": [2, 4, 8, 32, 64, 128, 16]},
],
"default": 31,
"description": "Maximum tree leaves for base learners",
},
"max_depth": {
"anyOf": [
{
"type": "integer",
"minimumForOptimizer": 3,
"maximumForOptimizer": 5,
}
],
"default": -1,
"description": "Maximum tree depth for base learners, <=0 means no limit",
},
"learning_rate": {
"type": "number",
"minimumForOptimizer": 0.02,
"maximumForOptimizer": 1.0,
"distribution": "loguniform",
"default": 0.1,
"description": "Boosting learning rate.",
},
"n_estimators": {
"type": "integer",
"minimumForOptimizer": 50,
"maximumForOptimizer": 1000,
"distribution": "uniform",
"default": 200,
"description": "Number of boosted trees to fit.",
},
"subsample_for_bin": {
"type": "integer",
"default": 200000,
"description": "Number of samples for constructing bins.",
},
"objective": {
"anyOf": [{"type": "object"}, {"enum": ["regression", None]}],
"default": None,
"description": "Specify the learning task and the corresponding learning objective or a custom objective function to be used",
},
"class_weight": { # Should not apply to regression, but documentation includes it.
"anyOf": [{"type": "object"}, {"enum": ["balanced", None]}],
"default": None,
"description": "Weights associated with classes",
},
"min_split_gain": {
"type": "number",
"default": 0.0,
"description": "Minimum loss reduction required to make a further partition on a leaf node of the tree.",
},
"min_child_weight": {
"type": "number",
"minimumForOptimizer": 0.0001,
"maximumForOptimizer": 0.01,
"default": 1e-3,
"description": "Minimum sum of instance weight (hessian) needed in a child (leaf).",
},
"min_child_samples": {
"type": "integer",
"minimumForOptimizer": 5,
"maximumForOptimizer": 30,
"distribution": "uniform",
"default": 20,
"description": "Minimum number of data needed in a child (leaf).",
},
"subsample": {
"type": "number",
"minimumForOptimizer": 0.01,
"maximumForOptimizer": 1.0,
"distribution": "uniform",
"default": 1.0,
"description": "Subsample ratio of the training instance.",
},
"subsample_freq": {
"type": "integer",
"minimumForOptimizer": 0,
"maximumForOptimizer": 5,
"distribution": "uniform",
"default": 0,
"description": "Frequence of subsample, <=0 means no enable.",
},
"colsample_bytree": {
"type": "number",
"default": 1.0,
"minimumForOptimizer": 0.01,
"maximumForOptimizer": 1.0,
"description": "Subsample ratio of columns when constructing each tree.",
},
"reg_alpha": {
"type": "number",
"minimumForOptimizer": 0.0,
"maximumForOptimizer": 1.0,
"default": 0.0,
"description": "L1 regularization term on weights.",
},
"reg_lambda": {
"type": "number",
"minimumForOptimizer": 0.0,
"maximumForOptimizer": 1.0,
"default": 0.0,
"description": "L2 regularization term on weights.",
},
"random_state": {
"anyOf": [
{"type": "integer"},
{"laleType": "numpy.random.RandomState"},
{"enum": [None]},
],
"default": None,
"description": "Random number seed. If None, default seeds in C++ code will be used.",
},
"n_jobs": {
"type": "integer",
"default": -1,
"description": "Number of parallel threads.",
},
"silent": {
"type": "boolean",
"default": True,
"description": "Whether to print messages while running boosting.",
},
"importance_type": {
"enum": ["split", "gain"],
"default": "split",
"description": "The type of feature importance to be filled into `feature_importances_`.",
},
},
},
{
"description": "boosting_type `rf` needs bagging (which means subsample_freq > 0 and subsample < 1.0)",
"anyOf": [
{
"type": "object",
"properties": {"boosting_type": {"not": {"enum": ["rf"]}}},
},
{
"allOf": [
{
"type": "object",
"properties": {"subsample_freq": {"not": {"enum": [0]}}},
},
{
"type": "object",
"properties": {"subsample": {"not": {"enum": [1.0]}}},
},
]
},
],
},
{
"description": "boosting_type `goss` cannot use bagging (which means subsample_freq = 0 and subsample = 1.0)",
"anyOf": [
{
"type": "object",
"properties": {"boosting_type": {"not": {"enum": ["goss"]}}},
},
{
"type": "object",
"properties": {"subsample_freq": {"enum": [0]}},
},
{
"type": "object",
"properties": {"subsample": {"enum": [1.0]}},
},
],
},
],
}
_input_fit_schema = {
"description": "Build a lightgbm model from the training set (X, y).",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
"description": "The input samples. Internally, it will be converted to",
},
"y": {
"type": "array",
"items": {"type": "number"},
"description": "Target values real numbers",
},
"sample_weight": {
"anyOf": [
{
"type": "array",
"items": {"type": "number"},
},
{"enum": [None]},
],
"default": None,
"description": "Weights of training data.",
},
"init_score": {
"anyOf": [
{
"type": "array",
"items": {"type": "number"},
},
{"enum": [None]},
],
"default": None,
"description": "Init score of training data.",
},
"group": {"default": None, "description": "Group data of training data."},
"eval_set": {
"default": None,
"description": "A list of (X, y) tuple pairs to use as validation sets.",
},
"eval_names": {"default": None, "description": "Names of eval_set."},
"eval_sample_weight": {"default": None, "description": "Weights of eval data."},
"eval_class_weight": {
"anyOf": [{"type": "array", "items": {"type": "number"}}, {"enum": [None]}],
"default": None,
"description": "Class weights of eval data.",
},
"eval_init_score": {"default": None, "description": "Init score of eval data."},
"eval_group": {"default": None, "description": "Group data of eval data."},
"eval_metric": {
"anyOf": [
{"type": "array", "items": {"type": "string"}},
{"enum": ["l2", None]},
{"laleType": "callable"},
],
"default": None,
"description": "string, list of strings, callable or None, optional (default=None).",
},
"early_stopping_rounds": {
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"default": None,
"description": "Activates early stopping. The model will train until the validation score stops improving.",
},
"verbose": {
"anyOf": [{"type": "boolean"}, {"type": "integer"}],
"default": True,
"description": "Requires at least one evaluation data.",
},
"feature_name": {
"anyOf": [
{"type": "array", "items": {"type": "string"}},
{"enum": ["auto"]},
],
"default": "auto",
"description": "Feature names. If ‘auto’ and data is pandas DataFrame, data columns names are used.",
},
"categorical_feature": {
"anyOf": [
{
"type": "array",
"items": {"anyOf": [{"type": "string"}, {"type": "integer"}]},
},
{"enum": ["auto"]},
],
"default": "auto",
"description": "Categorical features. If list of int, interpreted as indices. If list of strings, interpreted as feature names.",
},
"callbacks": {
"anyOf": [{"type": "array", "items": {"type": "object"}}, {"enum": [None]}],
"default": None,
"description": "List of callback functions that are applied at each iteration. ",
},
},
}
_input_predict_schema = {
"description": "Return the predicted value for each sample.",
"type": "object",
"properties": {
"X": {
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
"description": " Input features matrix.",
},
"raw_score": {
"type": "boolean",
"default": False,
"description": "Whether to predict raw scores.",
},
"num_iteration": {
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"default": None,
"description": "Limit number of iterations in the prediction.",
},
"pred_leaf": {
"type": "boolean",
"default": False,
"description": "Whether to predict leaf index.",
},
"pred_contrib": {
"type": "boolean",
"default": False,
"description": "Whether to predict feature contributions.",
},
},
}
_output_predict_schema = {
"description": "Return the predicted value for each sample.",
"type": "array",
"items": {"type": "number"},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.lightgbm.lgbm_regressor.html",
"import_from": "lightgbm.sklearn",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "regressor"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_partial_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
LGBMRegressor = lale.operators.make_operator(_LGBMRegressorImpl, _combined_schemas)
if lightgbm_installed:
from packaging import version
lightgbm_version = version.parse(getattr(lightgbm, "__version__"))
if lightgbm_version >= version.Version("3.3.0"):
# https://lightgbm.readthedocs.io/en/latest/pythonapi/lightgbm.LGBMRegressor.html
LGBMRegressor = LGBMRegressor.customize_schema(
silent={
"description": "Whether to print messages while running boosting.",
"anyOf": [{"enum": ["warn"]}, {"type": "boolean"}],
"default": "warn",
},
set_as_available=True,
)
lale.docstrings.set_docstrings(LGBMRegressor)
| 17,504 | 37.054348 | 146 |
py
|
lale
|
lale-master/lale/lib/xgboost/xgb_regressor.py
|
# Copyright 2019-2022 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import numpy as np
import pandas as pd
from packaging import version
import lale.docstrings
import lale.helpers
import lale.operators
import lale.schemas
from ._common_schemas import schema_silent
try:
import xgboost # type: ignore
xgboost_version = version.parse(getattr(xgboost, "__version__"))
except ImportError:
xgboost_version = None
if TYPE_CHECKING:
import xgboost # type: ignore
# xgboost does not like column names with some characters (which are legal in pandas)
# so we encode them
def _rename_one_feature(name):
mapping = {"[": "[", "]": "]", "<": "<"}
for old, new in mapping.items():
name = name.replace(old, new)
return name
def _rename_all_features(X):
if not isinstance(X, pd.DataFrame):
return X
mapped = [_rename_one_feature(f) for f in X.columns]
if list(X.columns) == mapped:
return X
return pd.DataFrame(data=X, columns=mapped)
class _XGBRegressorImpl:
_wrapped_model: xgboost.XGBRegressor
@classmethod
def validate_hyperparams(cls, **hyperparams):
assert (
xgboost_version is not None
), """Your Python environment does not have xgboost installed. You can install it with
pip install xgboost
or with
pip install 'lale[full]'"""
def __init__(self, **hyperparams):
self.validate_hyperparams(**hyperparams)
self._wrapped_model = xgboost.XGBRegressor(**hyperparams)
def fit(self, X, y, **fit_params):
renamed_X = _rename_all_features(X)
self._wrapped_model.fit(renamed_X, y, **fit_params)
return self
def partial_fit(self, X, y, **fit_params):
fit_params = lale.helpers.dict_without(fit_params, "classes")
if self._wrapped_model.__sklearn_is_fitted__():
booster = self._wrapped_model.get_booster()
fit_params = {**fit_params, "xgb_model": booster}
return self.fit(X, y, **fit_params)
def predict(self, X, **predict_params):
renamed_X = _rename_all_features(X)
result = self._wrapped_model.predict(renamed_X, **predict_params)
return result
def score(self, X, y):
from sklearn.metrics import r2_score
y_pred = self.predict(X)
return r2_score(y, y_pred)
_hyperparams_schema = {
"description": "Hyperparameter schema for a Lale wrapper for XGBoost.",
"allOf": [
{
"description": "This first sub-object lists all constructor arguments with their "
"types, one at a time, omitting cross-argument constraints.",
"type": "object",
"additionalProperties": False,
"required": [
"max_depth",
"learning_rate",
"n_estimators",
"verbosity",
"objective",
"booster",
"tree_method",
"n_jobs",
"gamma",
"min_child_weight",
"max_delta_step",
"subsample",
"colsample_bytree",
"colsample_bylevel",
"colsample_bynode",
"reg_alpha",
"reg_lambda",
"scale_pos_weight",
"base_score",
"random_state",
"missing",
],
"relevantToOptimizer": [
"max_depth",
"learning_rate",
"n_estimators",
"gamma",
"min_child_weight",
"subsample",
"reg_alpha",
"reg_lambda",
],
"properties": {
"max_depth": {
"description": "Maximum tree depth for base learners.",
"type": "integer",
"default": 4,
"minimum": 0,
"distribution": "uniform",
"minimumForOptimizer": 1,
"maximumForOptimizer": 7,
},
"learning_rate": {
"description": """Boosting learning rate (xgb's "eta")""",
"type": "number",
"default": 0.1,
"distribution": "loguniform",
"minimumForOptimizer": 0.02,
"maximumForOptimizer": 1,
},
"n_estimators": {
"description": "Number of trees to fit.",
"type": "integer",
"default": 200,
"minimumForOptimizer": 50,
"maximumForOptimizer": 1000,
},
"verbosity": {
"description": "The degree of verbosity.",
"type": "integer",
"default": 1,
"minimum": 0,
"maximum": 3,
},
"silent": schema_silent,
"objective": {
"description": "Specify the learning task and the corresponding "
"learning objective or a custom objective function to be used.",
"anyOf": [
{
"enum": [
"reg:linear",
"reg:logistic",
"reg:gamma",
"reg:tweedie",
]
},
{"laleType": "callable"},
],
"default": "reg:linear",
},
"booster": {
"description": "Specify which booster to use.",
"enum": ["gbtree", "gblinear", "dart"],
"default": "gbtree",
},
"tree_method": {
"description": """Specify which tree method to use.
Default to auto. If this parameter is set to default, XGBoost will choose the most conservative option available.
Refer to https://xgboost.readthedocs.io/en/latest/parameter.html. """,
"enum": ["auto", "exact", "approx", "hist", "gpu_hist"],
"default": "auto",
},
"n_jobs": {
"type": "integer",
"description": "Number of parallel threads used to run xgboost. (replaces ``nthread``)",
"default": 1,
},
"nthread": {
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"default": None,
"description": "Number of parallel threads used to run xgboost. Deprecated, please use n_jobs",
},
"gamma": {
"type": "number",
"description": "Minimum loss reduction required to make a further partition on a leaf node of the tree.",
"default": 0,
"minimum": 0,
"maximumForOptimizer": 1.0,
},
"min_child_weight": {
"type": "integer",
"description": "Minimum sum of instance weight(hessian) needed in a child.",
"default": 10,
"distribution": "uniform",
"minimumForOptimizer": 2,
"maximumForOptimizer": 20,
},
"max_delta_step": {
"type": "integer",
"description": "Maximum delta step we allow each tree's weight estimation to be.",
"default": 0,
},
"subsample": {
"type": "number",
"description": "Subsample ratio of the training instance.",
"default": 1,
"minimum": 0,
"exclusiveMinimum": True,
"distribution": "uniform",
"minimumForOptimizer": 0.01,
"maximumForOptimizer": 1.0,
},
"colsample_bytree": {
"type": "number",
"description": "Subsample ratio of columns when constructing each tree.",
"default": 1,
"minimum": 0,
"exclusiveMinimum": True,
"maximum": 1,
"distribution": "uniform",
"minimumForOptimizer": 0.1,
"maximumForOptimizer": 1.0,
},
"colsample_bylevel": {
"type": "number",
"description": "Subsample ratio of columns for each split, in each level.",
"default": 1,
"minimum": 0,
"exclusiveMinimum": True,
"maximum": 1,
"distribution": "uniform",
"minimumForOptimizer": 0.1,
"maximumForOptimizer": 1.0,
},
"colsample_bynode": {
"type": "number",
"description": "Subsample ratio of columns for each split.",
"default": 1,
"minimum": 0,
"exclusiveMinimum": True,
"maximum": 1,
},
"reg_alpha": {
"type": "number",
"description": "L1 regularization term on weights",
"default": 0,
"distribution": "uniform",
"minimumForOptimizer": 0,
"maximumForOptimizer": 1,
},
"reg_lambda": {
"type": "number",
"description": "L2 regularization term on weights",
"default": 1,
"distribution": "uniform",
"minimumForOptimizer": 0.1,
"maximumForOptimizer": 1,
},
"scale_pos_weight": {
"type": "number",
"description": "Balancing of positive and negative weights.",
"default": 1,
},
"base_score": {
"type": "number",
"description": "The initial prediction score of all instances, global bias.",
"default": 0.5,
},
"random_state": {
"type": "integer",
"description": "Random number seed. (replaces seed)",
"default": 0,
},
"missing": {
"anyOf": [
{
"type": "number",
},
{
"enum": [None],
},
],
"default": None,
"description": "Value in the data which needs to be present as a missing value. If"
" If None, defaults to np.nan.",
},
"importance_type": {
"enum": [
"gain",
"weight",
"cover",
"total_gain",
"total_cover",
None,
],
"default": "gain",
"description": "The feature importance type for the `feature_importances_` property.",
},
"seed": {
"default": None,
"description": "deprecated and replaced with random_state, but adding to be backward compatible. ",
},
},
}
],
}
_input_fit_schema = {
"description": "Fit gradient boosting classifier",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
"description": "Feature matrix",
},
"y": {
"type": "array",
"items": {"type": "number"},
"description": "Labels",
},
"sample_weight": {
"anyOf": [
{
"type": "array",
"items": {"type": "number"},
},
{"enum": [None]},
],
"description": "Weight for each instance",
"default": None,
},
"eval_set": {
"anyOf": [
{
"type": "array",
},
{
"enum": [None],
},
],
"default": None,
"description": "A list of (X, y) pairs to use as a validation set for",
},
"sample_weight_eval_set": {
"anyOf": [
{
"type": "array",
},
{
"enum": [None],
},
],
"default": None,
"description": "A list of the form [L_1, L_2, ..., L_n], where each L_i is a list of",
},
"eval_metric": {
"anyOf": [
{"type": "array", "items": {"type": "string"}},
{"type": "string"},
{"enum": [None]},
{"type": "object"},
],
"default": None,
"description": "If a str, should be a built-in evaluation metric to use. See",
},
"early_stopping_rounds": {
"anyOf": [
{
"type": "integer",
},
{
"enum": [None],
},
],
"default": None,
"description": "Activates early stopping. Validation error needs to decrease at",
},
"verbose": {
"type": "boolean",
"description": "If `verbose` and an evaluation set is used, writes the evaluation",
"default": True,
},
"xgb_model": {
"anyOf": [{"type": "string"}, {"enum": [None]}],
"description": "file name of stored xgb model or 'Booster' instance Xgb model to be",
"default": None,
},
"callbacks": {
"anyOf": [{"type": "array", "items": {"type": "object"}}, {"enum": [None]}],
"default": None,
"description": "List of callback functions that are applied at each iteration. ",
},
},
}
_input_predict_schema = {
"description": "Predict with `data`.",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
"description": "The dmatrix storing the input.",
},
"output_margin": {
"type": "boolean",
"default": False,
"description": "Whether to output the raw untransformed margin value.",
},
"ntree_limit": {
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"description": "Limit number of trees in the prediction; defaults to best_ntree_limit if defined",
},
"validate_features": {
"type": "boolean",
"default": True,
"description": "When this is True, validate that the Booster's and data's feature_names are identical.",
},
},
}
_output_predict_schema = {
"description": "Output data schema for predictions (target class labels).",
"type": "array",
"items": {"type": "number"},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """`XGBRegressor`_ gradient boosted decision trees.
.. _`XGBRegressor`: https://xgboost.readthedocs.io/en/latest/python/python_api.html#xgboost.XGBRegressor
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.xgboost.xgb_regressor.html",
"import_from": "xgboost",
"tags": {"pre": [], "op": ["estimator", "regressor"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_partial_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
XGBRegressor: lale.operators.PlannedIndividualOp
XGBRegressor = lale.operators.make_operator(_XGBRegressorImpl, _combined_schemas)
if xgboost_version is not None and xgboost_version >= version.Version("0.90"):
# page 58 of https://readthedocs.org/projects/xgboost/downloads/pdf/release_0.90/
XGBRegressor = XGBRegressor.customize_schema(
objective=lale.schemas.JSON(
{
"description": "Specify the learning task and the corresponding learning objective or a custom objective function to be used.",
"anyOf": [
{
"enum": [
"reg:linear",
"reg:logistic",
"reg:gamma",
"reg:tweedie",
"reg:squarederror",
]
},
{"laleType": "callable"},
],
"default": "reg:linear",
}
),
set_as_available=True,
)
if xgboost_version is not None and xgboost_version >= version.Version("1.3"):
# https://xgboost.readthedocs.io/en/latest/python/python_api.html#module-xgboost.sklearn
XGBRegressor = XGBRegressor.customize_schema(
monotone_constraints={
"description": "Constraint of variable monotonicity.",
"anyOf": [{"enum": [None]}, {"type": "string"}],
"default": None,
},
interaction_constraints={
"description": "Constraints for interaction representing permitted interactions. The constraints must be specified in the form of a nest list, e.g. [[0, 1], [2, 3, 4]], where each inner list is a group of indices of features that are allowed to interact with each other.",
"anyOf": [{"enum": [None]}, {"type": "string"}],
"default": None,
},
num_parallel_tree={
"description": "Used for boosting random forest.",
"anyOf": [{"enum": [None]}, {"type": "integer"}],
"default": None,
},
validate_parameters={
"description": "Give warnings for unknown parameter.",
"anyOf": [{"enum": [None]}, {"type": "boolean"}, {"type": "integer"}],
"default": None,
},
gpu_id={
"description": "Device ordinal.",
"anyOf": [
{"type": "integer"},
{"enum": [None]},
],
"default": None,
},
max_depth={
"description": "Maximum tree depth for base learners.",
"anyOf": [
{
"type": "integer",
"minimum": 0,
"distribution": "uniform",
"minimumForOptimizer": 1,
"maximumForOptimizer": 7,
},
{"enum": [None], "forOptimizer": False},
],
"default": None,
},
learning_rate={
"description": """Boosting learning rate (xgb's "eta")""",
"anyOf": [
{
"type": "number",
"distribution": "loguniform",
"minimumForOptimizer": 0.02,
"maximumForOptimizer": 1,
},
{"enum": [None], "forOptimizer": False},
],
"default": None,
},
booster={
"description": "Specify which booster to use.",
"enum": ["gbtree", "gblinear", "dart", None],
"default": None,
},
tree_method={
"description": """Specify which tree method to use.
Default to auto. If this parameter is set to default, XGBoost will choose the most conservative option available.
Refer to https://xgboost.readthedocs.io/en/latest/parameter.html. """,
"enum": ["auto", "exact", "approx", "hist", "gpu_hist", None],
"default": None,
},
gamma={
"description": "Minimum loss reduction required to make a further partition on a leaf node of the tree.",
"anyOf": [
{
"type": "number",
"minimum": 0,
"maximumForOptimizer": 1.0,
},
{"enum": [None], "forOptimizer": False},
],
"default": None,
},
min_child_weight={
"description": "Minimum sum of instance weight(hessian) needed in a child.",
"anyOf": [
{
"type": "integer",
"distribution": "uniform",
"minimumForOptimizer": 2,
"maximumForOptimizer": 20,
},
{"enum": [None], "forOptimizer": False},
],
"default": None,
},
max_delta_step={
"description": "Maximum delta step we allow each tree's weight estimation to be.",
"anyOf": [{"enum": [None]}, {"type": "integer"}],
"default": None,
},
subsample={
"description": "Subsample ratio of the training instance.",
"anyOf": [
{
"type": "number",
"minimum": 0,
"exclusiveMinimum": True,
"distribution": "uniform",
"minimumForOptimizer": 0.01,
"maximumForOptimizer": 1.0,
},
{"enum": [None], "forOptimizer": False},
],
"default": None,
},
colsample_bytree={
"description": "Subsample ratio of columns when constructing each tree.",
"anyOf": [
{
"type": "number",
"minimum": 0,
"exclusiveMinimum": True,
"maximum": 1,
"distribution": "uniform",
"minimumForOptimizer": 0.1,
"maximumForOptimizer": 1.0,
},
{"enum": [None], "forOptimizer": False},
],
"default": None,
},
colsample_bylevel={
"description": "Subsample ratio of columns for each split, in each level.",
"anyOf": [
{
"type": "number",
"minimum": 0,
"exclusiveMinimum": True,
"maximum": 1,
"distribution": "uniform",
"minimumForOptimizer": 0.1,
"maximumForOptimizer": 1.0,
},
{"enum": [None], "forOptimizer": False},
],
"default": None,
},
colsample_bynode={
"description": "Subsample ratio of columns for each split.",
"anyOf": [
{
"type": "number",
"minimum": 0,
"exclusiveMinimum": True,
"maximum": 1,
},
{"enum": [None], "forOptimizer": False},
],
"default": None,
},
reg_alpha={
"description": "L1 regularization term on weights",
"anyOf": [
{
"type": "number",
"distribution": "uniform",
"minimumForOptimizer": 0,
"maximumForOptimizer": 1,
},
{"enum": [None], "forOptimizer": False},
],
"default": None,
},
reg_lambda={
"description": "L2 regularization term on weights",
"anyOf": [
{
"type": "number",
"distribution": "uniform",
"minimumForOptimizer": 0.1,
"maximumForOptimizer": 1,
},
{"enum": [None], "forOptimizer": False},
],
"default": None,
},
scale_pos_weight={
"description": "Balancing of positive and negative weights.",
"anyOf": [
{"type": "number"},
{"enum": [None], "forOptimizer": False},
],
"default": None,
},
base_score={
"description": "The initial prediction score of all instances, global bias.",
"anyOf": [
{"type": "number"},
{"enum": [None], "forOptimizer": False},
],
"default": None,
},
missing={
"anyOf": [
{
"type": "number",
},
{
"enum": [None, np.NaN],
},
],
"default": np.NaN,
"description": "Value in the data which needs to be present as a missing value. If"
" If None, defaults to np.nan.",
},
verbosity={
"description": "The degree of verbosity.",
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"default": None,
"minimum": 0,
"maximum": 3,
},
set_as_available=True,
)
if xgboost_version is not None and xgboost_version >= version.Version("1.5"):
# https://xgboost.readthedocs.io/en/latest/python/python_api.html#module-xgboost.sklearn
XGBRegressor = XGBRegressor.customize_schema(
enable_categorical={
"type": "boolean",
"description": """Experimental support for categorical data.
Do not set to true unless you are interested in development.
Only valid when gpu_hist and dataframe are used.""",
"default": False,
},
predictor={
"anyOf": [{"type": "string"}, {"enum": [None]}],
"description": """Force XGBoost to use specific predictor,
available choices are [cpu_predictor, gpu_predictor].""",
"default": None,
},
set_as_available=True,
)
if xgboost_version is not None and xgboost_version >= version.Version("1.6"):
# https://xgboost.readthedocs.io/en/latest/python/python_api.html#module-xgboost.sklearn
XGBRegressor = XGBRegressor.customize_schema(
max_leaves={
"description": """Maximum number of leaves; 0 indicates no limit.""",
"anyOf": [
{"type": "integer"},
{"enum": [None], "forOptimizer": False},
],
"default": None,
},
max_bin={
"description": """If using histogram-based algorithm, maximum number of bins per feature.""",
"anyOf": [
{"type": "integer"},
{"enum": [None], "forOptimizer": False},
],
"default": None,
},
grow_policy={
"description": """Tree growing policy.
0 or depthwise: favor splitting at nodes closest to the node, i.e. grow depth-wise.
1 or lossguide: favor splitting at nodes with highest loss change.""",
"enum": [0, 1, "depthwise", "lossguide", None],
"default": None,
},
sampling_method={
"description": """Sampling method. Used only by gpu_hist tree method.
- uniform: select random training instances uniformly.
- gradient_based select random training instances with higher probability when the gradient and hessian are larger. (cf. CatBoost)""",
"enum": ["uniform", "gadient_based", None],
"default": None,
},
max_cat_to_onehot={
"description": """A threshold for deciding whether XGBoost should use
one-hot encoding based split for categorical data.""",
"anyOf": [
{"type": "integer"},
{"enum": [None]},
],
"default": None,
},
eval_metric={
"description": """Metric used for monitoring the training result and early stopping.""",
"anyOf": [
{"type": "string"},
{"type": "array", "items": {"type": "string"}},
{"type": "array", "items": {"laleType": "callable"}},
{"enum": [None]},
],
"default": None,
},
early_stopping_rounds={
"description": """Activates early stopping.
Validation metric needs to improve at least once in every early_stopping_rounds round(s)
to continue training.""",
"anyOf": [
{"type": "integer"},
{"enum": [None]},
],
"default": None,
},
callbacks={
"description": """List of callback functions that are applied at end of each iteration.
It is possible to use predefined callbacks by using Callback API.""",
"anyOf": [
{"type": "array", "items": {"laleType": "callable"}},
{"enum": [None]},
],
"default": None,
},
n_jobs={
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"description": "Number of parallel threads used to run xgboost. (replaces ``nthread``)",
"default": 1,
},
random_state={
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"description": "Random number seed. (replaces seed)",
"default": 0,
},
)
lale.docstrings.set_docstrings(XGBRegressor)
| 30,974 | 35.963007 | 284 |
py
|
lale
|
lale-master/lale/lib/xgboost/xgb_classifier.py
|
# Copyright 2019-2022 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import TYPE_CHECKING
import numpy as np
import pandas as pd
from packaging import version
import lale.docstrings
import lale.helpers
import lale.operators
import lale.schemas
from ._common_schemas import schema_silent
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
try:
import xgboost # type: ignore
xgboost_version = version.parse(getattr(xgboost, "__version__"))
except ImportError:
xgboost_version = None
if TYPE_CHECKING:
import xgboost # type: ignore
# xgboost does not like column names with some characters (which are legal in pandas)
# so we encode them
def _rename_one_feature(name):
mapping = {"[": "[", "]": "]", "<": "<"}
for old, new in mapping.items():
name = name.replace(old, new)
return name
def _rename_all_features(X):
if not isinstance(X, pd.DataFrame):
return X
mapped = [_rename_one_feature(f) for f in X.columns]
if list(X.columns) == mapped:
return X
return pd.DataFrame(data=X, columns=mapped)
class _XGBClassifierImpl:
_wrapped_model: xgboost.XGBClassifier
@classmethod
def validate_hyperparams(cls, **hyperparams):
assert (
xgboost_version is not None
), """Your Python environment does not have xgboost installed. You can install it with
pip install xgboost
or with
pip install 'lale[full]'"""
def __init__(self, **hyperparams):
self.validate_hyperparams(**hyperparams)
self._wrapped_model = xgboost.XGBClassifier(**hyperparams)
def fit(self, X, y, **fit_params):
renamed_X = _rename_all_features(X)
assert xgboost_version is not None
if (
xgboost_version >= version.Version("1.3.0")
and "eval_metric" not in fit_params
):
# set eval_metric explicitly to avoid spurious warning
fit_params = {"eval_metric": "logloss", **fit_params}
with warnings.catch_warnings():
if fit_params.get("use_label_encoder", True):
warnings.filterwarnings("ignore", category=UserWarning)
warnings.filterwarnings("ignore", category=FutureWarning)
self._wrapped_model.fit(renamed_X, y, **fit_params)
return self
def partial_fit(self, X, y, **fit_params):
fit_params = lale.helpers.dict_without(fit_params, "classes")
if self._wrapped_model.__sklearn_is_fitted__():
booster = self._wrapped_model.get_booster()
fit_params = {**fit_params, "xgb_model": booster}
return self.fit(X, y, **fit_params)
def predict(self, X, **predict_params):
renamed_X = _rename_all_features(X)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
result = self._wrapped_model.predict(renamed_X, **predict_params)
return result
def predict_proba(self, X):
return self._wrapped_model.predict_proba(X)
def score(self, X, y):
from sklearn.metrics import accuracy_score
y_pred = self.predict(X)
return accuracy_score(y, y_pred)
_hyperparams_schema = {
"description": "Hyperparameter schema for a Lale wrapper for XGBoost.",
"allOf": [
{
"description": "This first sub-object lists all constructor arguments with their "
"types, one at a time, omitting cross-argument constraints.",
"type": "object",
"additionalProperties": False,
"required": [
"max_depth",
"learning_rate",
"n_estimators",
"verbosity",
"objective",
"booster",
"tree_method",
"n_jobs",
"gamma",
"min_child_weight",
"max_delta_step",
"subsample",
"colsample_bytree",
"colsample_bylevel",
"colsample_bynode",
"reg_alpha",
"reg_lambda",
"scale_pos_weight",
"base_score",
"random_state",
"missing",
],
"relevantToOptimizer": [
"gamma",
"max_depth",
"learning_rate",
"n_estimators",
"min_child_weight",
"subsample",
"reg_alpha",
"reg_lambda",
],
"properties": {
"max_depth": {
"description": "Maximum tree depth for base learners.",
"type": "integer",
"default": 4,
"minimum": 0,
"distribution": "uniform",
"minimumForOptimizer": 1,
"maximumForOptimizer": 7,
},
"learning_rate": {
"description": "Boosting learning rate (xgb’s “eta”)",
"type": "number",
"default": 0.1,
"distribution": "loguniform",
"minimumForOptimizer": 0.02,
"maximumForOptimizer": 1,
},
"n_estimators": {
"description": "Number of trees to fit.",
"type": "integer",
"default": 100,
"minimumForOptimizer": 50,
"maximumForOptimizer": 1000,
},
"verbosity": {
"description": "The degree of verbosity.",
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"default": 1,
"minimum": 0,
"maximum": 3,
},
"objective": {
"description": "Specify the learning task and the corresponding "
"learning objective or a custom objective function to be used.",
"anyOf": [
{
"enum": [
"binary:logistic",
"binary:logitraw",
"binary:hinge",
"multi:softprob",
"multi:softmax",
]
},
{"laleType": "callable"},
],
"default": "binary:logistic",
},
"booster": {
"description": "Specify which booster to use.",
"enum": ["gbtree", "gblinear", "dart"],
"default": "gbtree",
},
"tree_method": {
"description": """Specify which tree method to use.
Default to auto. If this parameter is set to default, XGBoost will choose the most conservative option available.
Refer to https://xgboost.readthedocs.io/en/latest/parameter.html. """,
"enum": ["auto", "exact", "approx", "hist", "gpu_hist"],
"default": "auto",
},
"n_jobs": {
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"description": "Number of parallel threads used to run xgboost. (replaces ``nthread``)",
"default": 1,
},
"nthread": {
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"default": None,
"description": "Number of parallel threads used to run xgboost. Deprecated, please use n_jobs",
},
"gamma": {
"type": "number",
"description": "Minimum loss reduction required to make a further partition on a leaf node of the tree.",
"default": 0,
"minimum": 0,
"maximumForOptimizer": 1.0,
},
"min_child_weight": {
"type": "integer",
"description": "Minimum sum of instance weight(hessian) needed in a child.",
"default": 10,
"distribution": "uniform",
"minimumForOptimizer": 2,
"maximumForOptimizer": 20,
},
"max_delta_step": {
"type": "integer",
"description": "Maximum delta step we allow each tree's weight estimation to be.",
"default": 0,
},
"subsample": {
"type": "number",
"description": "Subsample ratio of the training instance.",
"default": 1,
"minimum": 0,
"exclusiveMinimum": True,
"distribution": "uniform",
"minimumForOptimizer": 0.01,
"maximumForOptimizer": 1.0,
},
"colsample_bytree": {
"type": "number",
"description": "Subsample ratio of columns when constructing each tree.",
"default": 1,
"minimum": 0,
"exclusiveMinimum": True,
"maximum": 1,
"distribution": "uniform",
"minimumForOptimizer": 0.1,
"maximumForOptimizer": 1.0,
},
"colsample_bylevel": {
"type": "number",
"description": "Subsample ratio of columns for each split, in each level.",
"default": 1,
"minimum": 0,
"exclusiveMinimum": True,
"maximum": 1,
"distribution": "uniform",
"minimumForOptimizer": 0.1,
"maximumForOptimizer": 1.0,
},
"colsample_bynode": {
"type": "number",
"description": "Subsample ratio of columns for each split.",
"default": 1,
"minimum": 0,
"exclusiveMinimum": True,
"maximum": 1,
},
"reg_alpha": {
"type": "number",
"description": "L1 regularization term on weights",
"default": 0,
"distribution": "uniform",
"minimumForOptimizer": 0.0,
"maximumForOptimizer": 1.0,
},
"reg_lambda": {
"type": "number",
"description": "L2 regularization term on weights",
"default": 1,
"distribution": "uniform",
"minimumForOptimizer": 0.1,
"maximumForOptimizer": 1.0,
},
"scale_pos_weight": {
"anyOf": [
{
"type": "number",
},
{
"enum": [None],
},
],
"description": "Balancing of positive and negative weights.",
"default": 1,
},
"base_score": {
"anyOf": [
{
"type": "number",
},
{
"enum": [None],
},
],
"description": "The initial prediction score of all instances, global bias.",
"default": 0.5,
},
"random_state": {
"anyOf": [
{
"type": "integer",
},
{
"enum": [None],
},
],
"description": "Random number seed. (replaces seed)",
"default": 0,
},
"missing": {
"anyOf": [
{
"type": "number",
},
{
"enum": [None],
},
],
"default": None,
"description": "Value in the data which needs to be present as a missing value. If"
" If None, defaults to np.nan.",
},
"silent": schema_silent,
"seed": {
"default": None,
"description": "deprecated and replaced with random_state, but adding to be backward compatible. ",
},
},
}
],
}
_input_fit_schema = {
"description": "Fit gradient boosting classifier",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
"description": "Feature matrix",
},
"y": {
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "string"}},
{"type": "array", "items": {"type": "boolean"}},
],
"description": "Labels",
},
"sample_weight": {
"anyOf": [
{
"type": "array",
"items": {"type": "number"},
},
{"enum": [None]},
],
"description": "Weight for each instance",
"default": None,
},
"eval_set": {
"anyOf": [
{
"type": "array",
},
{
"enum": [None],
},
],
"default": None,
"description": "A list of (X, y) pairs to use as a validation set for",
},
"sample_weight_eval_set": {
"anyOf": [
{
"type": "array",
},
{
"enum": [None],
},
],
"default": None,
"description": "A list of the form [L_1, L_2, ..., L_n], where each L_i is a list of",
},
"eval_metric": {
"anyOf": [
{"type": "array", "items": {"type": "string"}},
{"type": "string"},
{"enum": [None]},
{"type": "object"},
],
"default": None,
"description": "If a str, should be a built-in evaluation metric to use. See",
},
"early_stopping_rounds": {
"anyOf": [
{
"type": "integer",
},
{
"enum": [None],
},
],
"default": None,
"description": "Activates early stopping. Validation error needs to decrease at",
},
"verbose": {
"type": "boolean",
"description": "If `verbose` and an evaluation set is used, writes the evaluation",
"default": True,
},
"xgb_model": {
"anyOf": [{"type": "string"}, {"enum": [None]}],
"description": "file name of stored xgb model or 'Booster' instance Xgb model to be",
"default": None,
},
"callbacks": {
"anyOf": [{"type": "array", "items": {"type": "object"}}, {"enum": [None]}],
"default": None,
"description": "List of callback functions that are applied at each iteration. ",
},
},
}
_input_predict_schema = {
"description": "Predict with `data`.",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "The dmatrix storing the input.",
},
"output_margin": {
"type": "boolean",
"default": False,
"description": "Whether to output the raw untransformed margin value.",
},
"ntree_limit": {
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"description": "Limit number of trees in the prediction; defaults to best_ntree_limit if defined",
},
"validate_features": {
"type": "boolean",
"default": True,
"description": "When this is True, validate that the Booster's and data's feature_names are identical.",
},
},
}
_output_predict_schema = {
"description": "Predicted class label per sample.",
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "string"}},
{"type": "array", "items": {"type": "boolean"}},
],
}
_input_predict_proba_schema = {
"type": "object",
"required": ["X"],
"properties": {
"X": {"type": "array", "items": {"type": "array", "items": {"type": "number"}}}
},
}
_output_predict_proba_schema = {
"description": "Probability of the sample for each class in the model.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """`XGBClassifier`_ gradient boosted decision trees.
.. _`XGBClassifier`: https://xgboost.readthedocs.io/en/latest/python/python_api.html#xgboost.XGBClassifier
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.xgboost.xgb_classifier.html",
"import_from": "xgboost",
"tags": {"pre": [], "op": ["estimator", "classifier"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_partial_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
"input_predict_proba": _input_predict_proba_schema,
"output_predict_proba": _output_predict_proba_schema,
},
}
XGBClassifier: lale.operators.PlannedIndividualOp
XGBClassifier = lale.operators.make_operator(_XGBClassifierImpl, _combined_schemas)
if xgboost_version is not None and xgboost_version >= version.Version("0.90"):
# page 58 of https://readthedocs.org/projects/xgboost/downloads/pdf/release_0.90/
XGBClassifier = XGBClassifier.customize_schema(
objective=lale.schemas.JSON(
{
"description": "Specify the learning task and the corresponding learning objective or a custom objective function to be used.",
"anyOf": [
{
"enum": [
"binary:hinge",
"binary:logistic",
"binary:logitraw",
"multi:softmax",
"multi:softprob",
]
},
{"laleType": "callable"},
],
"default": "binary:logistic",
}
),
set_as_available=True,
)
if xgboost_version is not None and xgboost_version >= version.Version("1.3"):
# https://xgboost.readthedocs.io/en/latest/python/python_api.html#module-xgboost.sklearn
XGBClassifier = XGBClassifier.customize_schema(
monotone_constraints={
"description": "Constraint of variable monotonicity.",
"anyOf": [{"enum": [None]}, {"type": "string"}],
"default": None,
},
interaction_constraints={
"description": "Constraints for interaction representing permitted interactions. The constraints must be specified in the form of a nest list, e.g. [[0, 1], [2, 3, 4]], where each inner list is a group of indices of features that are allowed to interact with each other.",
"anyOf": [{"enum": [None]}, {"type": "string"}],
"default": None,
},
num_parallel_tree={
"description": "Used for boosting random forest.",
"anyOf": [{"enum": [None]}, {"type": "integer"}],
"default": None,
},
validate_parameters={
"description": "Give warnings for unknown parameter.",
"anyOf": [{"enum": [None]}, {"type": "boolean"}, {"type": "integer"}],
"default": None,
},
gpu_id={
"description": "Device ordinal.",
"anyOf": [
{"type": "integer"},
{"enum": [None]},
],
"default": None,
},
max_depth={
"description": "Maximum tree depth for base learners.",
"anyOf": [
{
"type": "integer",
"minimum": 0,
"distribution": "uniform",
"minimumForOptimizer": 1,
"maximumForOptimizer": 7,
},
{"enum": [None], "forOptimizer": False},
],
"default": None,
},
learning_rate={
"description": """Boosting learning rate (xgb's "eta")""",
"anyOf": [
{
"type": "number",
"distribution": "loguniform",
"minimumForOptimizer": 0.02,
"maximumForOptimizer": 1,
},
{"enum": [None], "forOptimizer": False},
],
"default": None,
},
booster={
"description": "Specify which booster to use.",
"enum": ["gbtree", "gblinear", "dart", None],
"default": None,
},
tree_method={
"description": """Specify which tree method to use.
Default to auto. If this parameter is set to default, XGBoost will choose the most conservative option available.
Refer to https://xgboost.readthedocs.io/en/latest/parameter.html. """,
"enum": ["auto", "exact", "approx", "hist", "gpu_hist", None],
"default": None,
},
gamma={
"description": "Minimum loss reduction required to make a further partition on a leaf node of the tree.",
"anyOf": [
{
"type": "number",
"minimum": 0,
"maximumForOptimizer": 1.0,
},
{"enum": [None], "forOptimizer": False},
],
"default": None,
},
min_child_weight={
"description": "Minimum sum of instance weight(hessian) needed in a child.",
"anyOf": [
{
"type": "integer",
"distribution": "uniform",
"minimumForOptimizer": 2,
"maximumForOptimizer": 20,
},
{"enum": [None], "forOptimizer": False},
],
"default": None,
},
max_delta_step={
"description": "Maximum delta step we allow each tree's weight estimation to be.",
"anyOf": [{"enum": [None]}, {"type": "integer"}],
"default": None,
},
subsample={
"description": "Subsample ratio of the training instance.",
"anyOf": [
{
"type": "number",
"minimum": 0,
"exclusiveMinimum": True,
"distribution": "uniform",
"minimumForOptimizer": 0.01,
"maximumForOptimizer": 1.0,
},
{"enum": [None], "forOptimizer": False},
],
"default": None,
},
colsample_bytree={
"description": "Subsample ratio of columns when constructing each tree.",
"anyOf": [
{
"type": "number",
"minimum": 0,
"exclusiveMinimum": True,
"maximum": 1,
"distribution": "uniform",
"minimumForOptimizer": 0.1,
"maximumForOptimizer": 1.0,
},
{"enum": [None], "forOptimizer": False},
],
"default": None,
},
colsample_bylevel={
"description": "Subsample ratio of columns for each split, in each level.",
"anyOf": [
{
"type": "number",
"minimum": 0,
"exclusiveMinimum": True,
"maximum": 1,
"distribution": "uniform",
"minimumForOptimizer": 0.1,
"maximumForOptimizer": 1.0,
},
{"enum": [None], "forOptimizer": False},
],
"default": None,
},
colsample_bynode={
"description": "Subsample ratio of columns for each split.",
"anyOf": [
{
"type": "number",
"minimum": 0,
"exclusiveMinimum": True,
"maximum": 1,
},
{"enum": [None], "forOptimizer": False},
],
"default": None,
},
reg_alpha={
"description": "L1 regularization term on weights",
"anyOf": [
{
"type": "number",
"distribution": "uniform",
"minimumForOptimizer": 0,
"maximumForOptimizer": 1,
},
{"enum": [None], "forOptimizer": False},
],
"default": None,
},
reg_lambda={
"description": "L2 regularization term on weights",
"anyOf": [
{
"type": "number",
"distribution": "uniform",
"minimumForOptimizer": 0.1,
"maximumForOptimizer": 1,
},
{"enum": [None], "forOptimizer": False},
],
"default": None,
},
scale_pos_weight={
"description": "Balancing of positive and negative weights.",
"anyOf": [
{"type": "number"},
{"enum": [None], "forOptimizer": False},
],
"default": None,
},
base_score={
"description": "The initial prediction score of all instances, global bias.",
"anyOf": [
{"type": "number"},
{"enum": [None], "forOptimizer": False},
],
"default": None,
},
importance_type={
"description": "The feature importance type for the `feature_importances_` property.",
"enum": ["gain", "weight", "cover", "total_gain", "total_cover", None],
"default": "gain",
},
use_label_encoder={
"description": """(Deprecated) Use the label encoder from scikit-learn to encode the labels.
For new code, we recommend that you set this parameter to False.""",
"type": "boolean",
"default": True,
},
missing={
"anyOf": [
{
"type": "number",
},
{
"enum": [None, np.NaN],
},
],
"default": np.NaN,
"description": "Value in the data which needs to be present as a missing value. If"
" If None, defaults to np.nan.",
},
verbosity={
"description": "The degree of verbosity.",
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"default": None,
"minimum": 0,
"maximum": 3,
},
set_as_available=True,
)
if xgboost_version is not None and xgboost_version >= version.Version("1.5"):
# https://xgboost.readthedocs.io/en/latest/python/python_api.html#module-xgboost.sklearn
XGBClassifier = XGBClassifier.customize_schema(
enable_categorical={
"type": "boolean",
"description": """Experimental support for categorical data.
Do not set to true unless you are interested in development.
Only valid when gpu_hist and dataframe are used.""",
"default": False,
},
predictor={
"anyOf": [{"type": "string"}, {"enum": [None]}],
"description": """Force XGBoost to use specific predictor,
available choices are [cpu_predictor, gpu_predictor].""",
"default": None,
},
set_as_available=True,
)
if xgboost_version is not None and xgboost_version >= version.Version("1.6"):
# https://xgboost.readthedocs.io/en/latest/python/python_api.html#module-xgboost.sklearn
XGBClassifier = XGBClassifier.customize_schema(
use_label_encoder={
"description": """(Deprecated) Use the label encoder from scikit-learn to encode the labels.
For new code, we recommend that you set this parameter to False.""",
"type": "boolean",
"default": False,
},
max_leaves={
"description": """Maximum number of leaves; 0 indicates no limit.""",
"anyOf": [
{"type": "integer"},
{"enum": [None], "forOptimizer": False},
],
"default": None,
},
max_bin={
"description": """If using histogram-based algorithm, maximum number of bins per feature.""",
"anyOf": [
{"type": "integer"},
{"enum": [None], "forOptimizer": False},
],
"default": None,
},
grow_policy={
"description": """Tree growing policy.
0 or depthwise: favor splitting at nodes closest to the node, i.e. grow depth-wise.
1 or lossguide: favor splitting at nodes with highest loss change.""",
"enum": [0, 1, "depthwise", "lossguide", None],
"default": None,
},
sampling_method={
"description": """Sampling method. Used only by gpu_hist tree method.
- uniform: select random training instances uniformly.
- gradient_based select random training instances with higher probability when the gradient and hessian are larger. (cf. CatBoost)""",
"enum": ["uniform", "gadient_based", None],
"default": None,
},
max_cat_to_onehot={
"description": """A threshold for deciding whether XGBoost should use
one-hot encoding based split for categorical data.""",
"anyOf": [
{"type": "integer"},
{"enum": [None]},
],
"default": None,
},
eval_metric={
"description": """Metric used for monitoring the training result and early stopping.""",
"anyOf": [
{"type": "string"},
{"type": "array", "items": {"type": "string"}},
{"type": "array", "items": {"laleType": "callable"}},
{"enum": [None]},
],
"default": None,
},
early_stopping_rounds={
"description": """Activates early stopping.
Validation metric needs to improve at least once in every early_stopping_rounds round(s)
to continue training.""",
"anyOf": [
{"type": "integer"},
{"enum": [None]},
],
"default": None,
},
callbacks={
"description": """List of callback functions that are applied at end of each iteration.
It is possible to use predefined callbacks by using Callback API.""",
"anyOf": [
{"type": "array", "items": {"laleType": "callable"}},
{"enum": [None]},
],
"default": None,
},
)
lale.docstrings.set_docstrings(XGBClassifier)
| 33,369 | 36.326622 | 284 |
py
|
lale
|
lale-master/lale/lib/xgboost/_common_schemas.py
|
# Copyright 2021 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from lale.type_checking import JSON_TYPE
schema_silent: JSON_TYPE = {
"anyOf": [
{"type": "boolean"},
{"enum": [None]},
],
"default": None,
"description": "Deprecated and replaced with verbosity, but adding to be backward compatible.",
}
| 847 | 32.92 | 99 |
py
|
lale
|
lale-master/lale/lib/xgboost/__init__.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Scikit-learn compatible wrappers for XGBoost_ along with schemas to enable hyperparameter tuning.
.. _XGBoost: https://xgboost.readthedocs.io/en/latest/
Operators:
==========
* `XGBClassifier`_
* `XGBRegressor`_
.. _`XGBClassifier`: lale.lib.xgboost.xgb_classifier.html
.. _`XGBRegressor`: lale.lib.xgboost.xgb_regressor.html
"""
from lale import register_lale_wrapper_modules
from .xgb_classifier import XGBClassifier as XGBClassifier
from .xgb_regressor import XGBRegressor as XGBRegressor
# Note: all imports should be done as
# from .xxx import XXX as XXX
# this ensures that pyright considers them to be publicly available
# and not private imports (this affects lale users that use pyright)
register_lale_wrapper_modules(__name__)
| 1,325 | 31.341463 | 97 |
py
|
lale
|
lale-master/lale/lib/lale/observing.py
|
# Copyright 2019-2022 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from functools import wraps
import lale.docstrings
import lale.operators
logger = logging.getLogger(__name__)
# since we want the LoggingObserver
logger.setLevel(logging.INFO)
def observe(f):
@wraps(f)
def wrapper(self, *args, **kwds):
name = f.__name__
self.startObserving(name, *args, **kwds)
try:
ret = f(self, *args, **kwds)
self.endObserving(name, ret)
except BaseException as e:
self.failObserving(name, e)
raise
return ret
return wrapper
start_prefix = "start_"
end_prefix = "end_"
fail_prefix = "fail_"
class _ObservingImpl:
def __init__(self, op=None, observer=None):
if observer is not None and isinstance(observer, type):
# if we are given a class name, instantiate it
observer = observer()
self._hyperparams = {"op": op, "observer": observer}
def getOp(self):
return self._hyperparams["op"]
def getObserver(self):
return self._hyperparams["observer"]
def _observe(self, methodName, *args, **kwargs):
o = self.getObserver()
if o is not None:
m = getattr(o, methodName, None)
if m is not None:
m(self.getOp(), *args, **kwargs)
def startObserving(self, methodName, *args, **kwargs):
self._observe(f"{start_prefix}{methodName}", *args, **kwargs)
def endObserving(self, methodName, *args, **kwargs):
self._observe(f"{end_prefix}{methodName}", *args, **kwargs)
def failObserving(self, methodName, e: BaseException):
self._observe(f"{fail_prefix}{methodName}", e)
@observe
def transform(self, X, y=None):
ret = self.getOp().transform(X, y=y)
self.endObserving("transform", ret)
return ret
@observe
def transform_schema(self, s_X):
return self.getOp().transform_schema(s_X)
@observe
def input_schema_fit(self):
return self.getOp().input_schema_fit()
@observe
def predict(self, X, **predict_params):
return self.getOp().predict(X, **predict_params)
@observe
def predict_proba(self, X):
return self.getOp().predict_proba(self, X)
@observe
def fit(self, X, y=None, **fit_params):
self._hyperparams["op"] = self.getOp().fit(X, y=y, **fit_params)
return self
_hyperparams_schema = {
"description": "Hyperparameter schema for the identity Higher Order Operator, which wraps another operator and runs it as usual",
"allOf": [
{
"description": "This first object lists all constructor arguments with their types, but omits constraints for conditional hyperparameters",
"type": "object",
"additionalProperties": False,
"relevantToOptimizer": ["op"],
"properties": {
"op": {"laleType": "operator"},
"observer": {"laleType": "Any"},
},
}
],
}
# TODO: can we surface the base op input/output schema?
_input_fit_schema = {
"description": "Input data schema for training identity.",
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {"X": {}},
}
_input_predict_transform_schema = (
{ # TODO: separate predict vs. predict_proba vs. transform
"description": "Input data schema for transformations using identity.",
"type": "object",
"required": ["X", "y"],
"additionalProperties": False,
"properties": {"X": {}, "y": {}},
}
)
_output_schema = { # TODO: separate predict vs. predict_proba vs. transform
"description": "Output data schema for transformations using identity.",
"laleType": "Any",
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """This should functionally be identical to the identity wrapper, except that it calls methods on the observer (if they exist) before and after calls to the underlying wrapper. This is similar to aspect-oriented programming. See also Tee, which provides a simpler method for observing/logging data.""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.lale.identity.html",
"import_from": "lale.lib.lale",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_transform_schema,
"output_predict": _output_schema,
"input_predict_proba": _input_predict_transform_schema,
"output_predict_proba": _output_schema,
"input_transform": _input_predict_transform_schema,
"output_transform": _output_schema,
},
}
Observing = lale.operators.make_operator(_ObservingImpl, _combined_schemas)
lale.docstrings.set_docstrings(Observing)
class LoggingObserver:
"""An observer that logs everything.
This is also useful for debugging, since you can set breakpoints here
"""
_indent: int
def __init__(self):
self._indent = 0
def __getattr__(self, prop: str):
if prop.startswith("_"):
raise AttributeError
if prop.startswith(start_prefix):
suffix = prop[len(start_prefix) :]
def startfun(*args, **kwargs):
if logger.isEnabledFor(logging.INFO):
s: str = " " * self._indent
s += f"[observing({suffix})->] "
s += ",".join((str(x) for x in args))
if len(args) > 0 and len(kwargs) > 0:
s += ", "
for k, v in kwargs.items():
s += f"{k}->{v}"
logger.info(s)
self._indent += 1
return startfun
elif prop.startswith(end_prefix):
suffix = prop[len(end_prefix) :]
def endfun(*args, **kwargs):
assert self._indent > 0
self._indent -= 1
if logger.isEnabledFor(logging.INFO):
s: str = " " * self._indent
s += f"[<-observed({suffix})] "
s += ",".join((str(x) for x in args))
for k, v in kwargs.items():
s += f"{k}->{v}"
logger.info(s)
return endfun
elif prop.startswith(fail_prefix):
suffix = prop[len(fail_prefix) :]
def failfun(*args, **kwargs):
assert self._indent > 0
self._indent -= 1
if logger.isEnabledFor(logging.INFO):
s: str = " " * self._indent
s += f"[!error!<-observed({suffix})] "
s += ",".join((str(x) for x in args))
for k, v in kwargs.items():
s += f"{k}->{v}"
logger.info(s)
return failfun
else:
logger.debug(f"trying to observe {prop}, which is not a start or stop")
return None
| 7,733 | 32.480519 | 322 |
py
|
lale
|
lale-master/lale/lib/lale/identity_wrapper.py
|
# Copyright 2019-2022 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import lale.docstrings
import lale.operators
class _IdentityWrapperImpl:
# This should be equivalent to:
# the underlying operator:
# IdentityWrapper(op) should behave the same as op
def __init__(self, op=None):
self._hyperparams = {"op": op}
def getOp(self):
op = self._hyperparams["op"]
assert op is not None
return op
def transform(self, X, y=None):
return self.getOp().transform(X, y=y)
def transform_schema(self, s_X):
return self.getOp().transform_schema(s_X)
def input_schema_fit(self):
return self.getOp().input_schema_fit()
def predict(self, X, **predict_params):
return self.getOp().predict(X, **predict_params)
def predict_proba(self, X):
return self.getOp().predict_proba(self, X)
def fit(self, X, y=None, **fit_params):
self._hyperparams["op"] = self.getOp().fit(X, y=y, **fit_params)
return self
# def get_feature_names(self, input_features=None):
# if input_features is not None:
# return list(input_features)
# elif self._feature_names is not None:
# return self._feature_names
# else:
# raise ValueError('Can only call get_feature_names on a trained operator. Please call fit to get a trained operator.')
_hyperparams_schema = {
"description": "Hyperparameter schema for the identity Higher Order Operator, which wraps another operator and runs it as usual",
"allOf": [
{
"description": "This first object lists all constructor arguments with their types, but omits constraints for conditional hyperparameters",
"type": "object",
"additionalProperties": False,
"relevantToOptimizer": ["op"],
"properties": {"op": {"laleType": "operator"}},
}
],
}
# TODO: can we surface the base op input/output schema?
_input_fit_schema = {
"description": "Input data schema for training identity.",
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {"X": {}},
}
_input_predict_transform_schema = (
{ # TODO: separate predict vs. predict_proba vs. transform
"description": "Input data schema for transformations using identity.",
"type": "object",
"required": ["X", "y"],
"additionalProperties": False,
"properties": {"X": {}, "y": {}},
}
)
_output_schema = { # TODO: separate predict vs. predict_proba vs. transform
"description": "Output data schema for transformations using identity.",
"laleType": "Any",
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.lale.identity.html",
"import_from": "lale.lib.lale",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_transform_schema,
"output_predict": _output_schema,
"input_predict_proba": _input_predict_transform_schema,
"output_predict_proba": _output_schema,
"input_transform": _input_predict_transform_schema,
"output_transform": _output_schema,
},
}
IdentityWrapper = lale.operators.make_operator(_IdentityWrapperImpl, _combined_schemas)
lale.docstrings.set_docstrings(IdentityWrapper)
| 4,161 | 33.97479 | 151 |
py
|
lale
|
lale-master/lale/lib/lale/smac.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import time
import traceback
import warnings
import numpy as np
from sklearn.metrics import check_scoring, log_loss
from sklearn.model_selection import check_cv, train_test_split
import lale.docstrings
import lale.helpers
import lale.operators
import lale.sklearn_compat
from lale.helpers import cross_val_score_track_trials
from lale.lib._common_schemas import (
schema_best_score_single,
schema_cv,
schema_estimator,
schema_max_opt_time,
schema_scoring_single,
)
from lale.lib.sklearn import LogisticRegression
try:
# Import ConfigSpace and different types of parameters
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
from smac.configspace import ConfigurationSpace
# Import SMAC-utilities
from smac.facade.smac_facade import SMAC as orig_SMAC
from smac.scenario.scenario import Scenario
from smac.tae.execute_ta_run import BudgetExhaustedException
from lale.search.lale_smac import ( # pylint:disable=wrong-import-position,ungrouped-imports
get_smac_space,
lale_op_smac_tae,
lale_trainable_op_from_config,
)
smac_installed = True
except ImportError:
smac_installed = False
logger = logging.getLogger(__name__)
class _SMACImpl:
def __init__(
self,
*,
estimator=None,
scoring=None,
best_score=0.0,
cv=5,
handle_cv_failure=False,
max_evals=50,
max_opt_time=None,
lale_num_grids=None,
):
assert smac_installed, """Your Python environment does not have smac installed. You can install it with
pip install smac<=0.10.0
or with
pip install 'lale[full]'"""
self.max_evals = max_evals
if estimator is None:
self.estimator = LogisticRegression()
else:
self.estimator = estimator
self.scoring = scoring
if self.scoring is None:
is_clf = self.estimator.is_classifier()
if is_clf:
self.scoring = "accuracy"
else:
self.scoring = "r2"
self.best_score = best_score
self.handle_cv_failure = handle_cv_failure
self.cv = cv
self.max_opt_time = max_opt_time
self.lale_num_grids = lale_num_grids
self.trials = None
def fit(self, X_train, y_train, **fit_params):
data_schema = lale.helpers.fold_schema(
X_train, y_train, self.cv, self.estimator.is_classifier()
)
self.search_space: ConfigurationSpace = get_smac_space(
self.estimator, lale_num_grids=self.lale_num_grids, data_schema=data_schema
)
# Scenario object
scenario_options = {
"run_obj": "quality", # optimize quality (alternatively runtime)
"runcount-limit": self.max_evals, # maximum function evaluations
"cs": self.search_space, # configuration space
"deterministic": "true",
"abort_on_first_run_crash": False,
}
if self.max_opt_time is not None:
scenario_options["wallclock_limit"] = self.max_opt_time
self.scenario = Scenario(scenario_options)
self.cv = check_cv(
self.cv, y=y_train, classifier=self.estimator.is_classifier()
)
def smac_train_test(trainable, X_train, y_train):
try:
cv_score, logloss, execution_time = cross_val_score_track_trials(
trainable, X_train, y_train, cv=self.cv, scoring=self.scoring
)
logger.debug("Successful trial of SMAC")
except BaseException as e:
# If there is any error in cross validation, use the score based on a random train-test split as the evaluation criterion
if self.handle_cv_failure:
(
X_train_part,
X_validation,
y_train_part,
y_validation,
) = train_test_split(X_train, y_train, test_size=0.20)
start = time.time()
trained = trainable.fit(X_train_part, y_train_part, **fit_params)
scorer = check_scoring(trainable, scoring=self.scoring)
cv_score = scorer(trained, X_validation, y_validation)
execution_time = time.time() - start
y_pred_proba = trained.predict_proba(X_validation)
try:
logloss = log_loss(y_true=y_validation, y_pred=y_pred_proba)
except BaseException:
logloss = 0
logger.debug("Warning, log loss cannot be computed")
else:
logger.debug(f"Error {e} with pipeline:{trainable.to_json()}")
raise e
return cv_score, logloss, execution_time
def f(trainable):
return_dict = {}
try:
score, logloss, execution_time = smac_train_test(
trainable, X_train=X_train, y_train=y_train
)
return_dict = {
"loss": self.best_score - score,
"time": execution_time,
"log_loss": logloss,
}
except BaseException as e:
logger.warning(
f"Exception caught in SMACCV:{type(e)}, {traceback.format_exc()}, SMAC will set a cost_for_crash to MAXINT."
)
raise e
return return_dict["loss"]
try:
smac = orig_SMAC(
scenario=self.scenario,
rng=np.random.RandomState(42),
tae_runner=lale_op_smac_tae(self.estimator, f),
)
incumbent = smac.optimize()
self.trials = smac.get_runhistory()
trainable = lale_trainable_op_from_config(self.estimator, incumbent)
# get the trainable corresponding to the best params and train it on the entire training dataset.
trained = trainable.fit(X_train, y_train, **fit_params)
self._best_estimator = trained
except BudgetExhaustedException:
logger.warning(
"Maximum alloted optimization time exceeded. Optimization exited prematurely"
)
except BaseException as e:
logger.warning(f"Error during optimization: {e}")
self._best_estimator = None
return self
def predict(self, X_eval, **predict_params):
warnings.filterwarnings("ignore")
trained = self._best_estimator
if trained is None:
logger.warning(
"Could not get trained best estimator when predicting using SMACCV:{}, the error is"
)
return None
try:
predictions = trained.predict(X_eval, **predict_params)
except ValueError as e:
logger.warning(
f"ValueError in predicting using SMACCV:{trained}, the error is:{e}"
)
predictions = None
return predictions
def get_trials(self):
"""Returns the trials i.e. RunHistory object.
Returns
-------
smac.runhistory.runhistory.RunHistory
RunHistory of all the trials executed during the optimization i.e. fit method of SMACCV.
"""
return self.trials
def get_pipeline(
self, pipeline_name=None, astype: lale.helpers.astype_type = "lale"
):
if pipeline_name is not None:
raise NotImplementedError("Cannot get pipeline by name yet.")
result = getattr(self, "_best_estimator", None)
if result is None or astype == "lale":
return result
assert astype == "sklearn", astype
# TODO: should this try and return an actual sklearn pipeline?
return result
_hyperparams_schema = {
"allOf": [
{
"type": "object",
"required": [
"estimator",
"max_evals",
"cv",
"handle_cv_failure",
"max_opt_time",
"lale_num_grids",
],
"relevantToOptimizer": ["estimator"],
"additionalProperties": False,
"properties": {
"estimator": schema_estimator,
"scoring": schema_scoring_single,
"best_score": schema_best_score_single,
"cv": schema_cv,
"handle_cv_failure": {
"description": """How to deal with cross validation failure for a trial.
If True, continue the trial by doing a 80-20 percent train-validation
split of the dataset input to fit and report the score on the
validation part. If False, terminate the trial with FAIL status.""",
"type": "boolean",
"default": False,
},
"max_evals": {
"type": "integer",
"minimum": 1,
"default": 50,
"description": "Number of trials of SMAC search i.e. runcount_limit of SMAC.",
},
"max_opt_time": schema_max_opt_time,
"lale_num_grids": {
"anyOf": [
{"description": "If not set, keep all grids.", "enum": [None]},
{
"description": "Fraction of grids to keep.",
"type": "number",
"minimum": 0.0,
"exclusiveMinimum": True,
"maximum": 1.0,
"exclusiveMaximum": True,
},
{
"description": "Number of grids to keep.",
"type": "integer",
"minimum": 1,
},
],
"default": None,
},
},
}
]
}
_input_fit_schema = {
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"items": {
"anyOf": [
{"type": "array", "items": {"type": ["number", "string"]}},
{"type": "string"},
]
},
},
"y": {"type": "array", "items": {"type": "number"}},
},
}
_input_predict_schema = {
"type": "object",
"properties": {
"X": {
"type": "array",
"items": {
"anyOf": [
{"type": "array", "items": {"type": ["number", "string"]}},
{"type": "string"},
]
},
}
},
}
_output_predict_schema = {"type": "array", "items": {"type": "number"}}
_combined_schemas = {
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.lale.smac.html",
"import_from": "lale.lib.lale",
"description": """SMAC_, the optimizer used inside auto-weka and auto-sklearn.
.. _SMAC: https://github.com/automl/SMAC3
Examples
--------
>>> from sklearn.metrics import make_scorer, f1_score, accuracy_score
>>> lr = LogisticRegression()
>>> clf = SMAC(estimator=lr, scoring='accuracy', cv=5)
>>> from sklearn import datasets
>>> diabetes = datasets.load_diabetes()
>>> X = diabetes.data[:150]
>>> y = diabetes.target[:150]
>>> trained = clf.fit(X, y)
>>> predictions = trained.predict(X)
Other scoring metrics:
>>> clf = SMAC(estimator=lr, scoring=make_scorer(f1_score, average='macro'), cv=3, max_evals=2)
""",
"type": "object",
"tags": {"pre": [], "op": ["estimator"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
SMAC = lale.operators.make_operator(_SMACImpl, _combined_schemas)
lale.docstrings.set_docstrings(SMAC)
| 12,786 | 33.937158 | 137 |
py
|
lale
|
lale-master/lale/lib/lale/optimize_last.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import Any, Dict, Optional
import lale.docstrings
import lale.operators
import lale.pretty_print
from lale.lib._common_schemas import schema_estimator
from lale.lib.lale.optimize_suffix import OptimizeSuffix
logger = logging.getLogger(__name__)
logger.setLevel(logging.ERROR)
class _OptimizeLastImpl:
_suffix_optimizer: lale.operators.Operator
def __init__(
self,
estimator: Optional[lale.operators.TrainedOperator] = None,
last_optimizer: Optional[lale.operators.Operator] = None,
optimizer_args=None,
**kwargs
):
if estimator is None:
last_estimator = None
lale_prefix = None
elif isinstance(estimator, lale.operators.TrainedIndividualOp):
lale_prefix = None
last_estimator = estimator.clone()
else:
assert isinstance(estimator, lale.operators.TrainedPipeline)
steps = estimator.steps_list()
num_steps = len(steps)
if num_steps == 0:
last_estimator = None
else:
last_estimator = estimator.steps_list()[-1].clone()
lale_prefix = estimator.remove_last()
self._suffix_optimizer = OptimizeSuffix(
prefix=lale_prefix,
suffix=last_estimator,
optimizer=last_optimizer,
optimizer_args=optimizer_args,
**kwargs
)
def __getattr__(self, item):
return getattr(self._suffix_optimizer.shallow_impl, item)
def fit(self, X_train, y_train=None, **kwargs):
self._suffix_optimizer = self._suffix_optimizer.fit(X_train, y_train, **kwargs)
return self
def predict(self, X_eval, **predict_params):
return self._suffix_optimizer.predict(X_eval, **predict_params)
_hyperparams_schema = {
"allOf": [
{
"type": "object",
"required": [
"estimator",
"last_optimizer",
],
"relevantToOptimizer": [],
"additionalProperties": True,
"properties": {
"estimator": schema_estimator,
"last_optimizer": {
"description": "Lale optimizer.\nIf (default) None is specified, Hyperopt is used.",
"anyOf": [
{"laleType": "operator", "not": {"enum": [None]}},
{"enum": [None]},
],
"default": None,
},
"optimizer_args": {
"description": "Parameters to be passed to the optimizer",
"anyOf": [
{"type": "object"},
{"enum": [None]},
],
"default": None,
},
},
}
]
}
_input_fit_schema = {
"type": "object",
"required": ["X", "y"],
"properties": {"X": {}, "y": {}},
}
_input_predict_schema = {"type": "object", "required": ["X"], "properties": {"X": {}}}
_output_predict_schema: Dict[str, Any] = {}
_combined_schemas = {
"description": """OptimizeLast is a wrapper around other optimizers, which runs the given optimizer
against the suffix, after transforming the data according to the prefix, and then stitches the result together into
a single trained pipeline.
Examples
--------
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.lale.optimize_last.html",
"import_from": "lale.lib.lale",
"type": "object",
"tags": {"pre": [], "op": ["estimator"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
OptimizeLast = lale.operators.make_operator(_OptimizeLastImpl, _combined_schemas)
lale.docstrings.set_docstrings(OptimizeLast)
| 4,559 | 31.571429 | 115 |
py
|
lale
|
lale-master/lale/lib/lale/no_op.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import lale.docstrings
import lale.operators
class _NoOpImpl:
def __init__(self):
pass
def transform(self, X):
return X
def transform_schema(self, s_X):
"""Used internally by Lale for type-checking downstream operators."""
return s_X
_hyperparams_schema = {
"allOf": [
{
"description": "This first sub-object lists all constructor arguments with their "
"types, one at a time, omitting cross-argument constraints, if any.",
"type": "object",
"additionalProperties": False,
"relevantToOptimizer": [],
"properties": {},
}
]
}
_input_transform_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Features; no restrictions on data type.",
"laleType": "Any",
}
},
}
_output_transform_schema = {
"description": "Features; no restrictions on data type.",
"laleType": "Any",
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Passes the data through unchanged.",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.lale.no_op.html",
"import_from": "lale.lib.lale",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
NoOp = lale.operators.make_operator(_NoOpImpl, _combined_schemas)
lale.docstrings.set_docstrings(NoOp)
| 2,267 | 27.708861 | 98 |
py
|
lale
|
lale-master/lale/lib/lale/grid_search_cv.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict, Optional
import lale.docstrings
import lale.helpers
import lale.lib.sklearn
import lale.operators
import lale.search.lale_grid_search_cv
import lale.sklearn_compat
from lale.lib._common_schemas import (
schema_cv,
schema_estimator,
schema_max_opt_time,
schema_scoring_single,
)
from .observing import Observing
func_timeout_installed = False
try:
from func_timeout import FunctionTimedOut, func_timeout
func_timeout_installed = True
except ImportError:
pass
class _GridSearchCVImpl:
_best_estimator: Optional[lale.operators.TrainedOperator] = None
def __init__(
self,
*,
estimator=None,
scoring=None,
cv=5,
verbose=0,
n_jobs=None,
lale_num_samples=None,
lale_num_grids=None,
param_grid=None,
pgo=None,
observer=None,
max_opt_time=None,
):
if observer is not None and isinstance(observer, type):
# if we are given a class name, instantiate it
observer = observer()
if scoring is None:
if estimator is None:
is_clf = True # Since we will use LogisticRegression
else:
is_clf = estimator.is_classifier()
if is_clf:
scoring = "accuracy"
else:
scoring = "r2"
self._hyperparams = {
"estimator": estimator,
"cv": cv,
"verbose": verbose,
"scoring": scoring,
"n_jobs": n_jobs,
"lale_num_samples": lale_num_samples,
"lale_num_grids": lale_num_grids,
"pgo": pgo,
"hp_grid": param_grid,
"observer": observer,
"max_opt_time": max_opt_time,
}
def fit(self, X, y, **fit_params):
if self._hyperparams["estimator"] is None:
op = lale.lib.sklearn.LogisticRegression
else:
op = self._hyperparams["estimator"]
observed_op = op
obs = self._hyperparams["observer"]
# We always create an observer.
# Otherwise, we can have a problem with PlannedOperators
# (that are not trainable):
# GridSearchCV checks if a fit method is present before
# configuring the operator, and our planned operators
# don't have a fit method
# Observing always has a fit method, and so solves this problem.
observed_op = Observing(op=op, observer=obs)
hp_grid = self._hyperparams["hp_grid"]
data_schema = {}
try:
data_schema = lale.helpers.fold_schema(
X, y, self._hyperparams["cv"], op.is_classifier()
)
except BaseException: # Not all data types are handled by fold_schema
pass
if hp_grid is None:
hp_grid = lale.search.lale_grid_search_cv.get_parameter_grids(
observed_op,
num_samples=self._hyperparams["lale_num_samples"],
num_grids=self._hyperparams["lale_num_grids"],
pgo=self._hyperparams["pgo"],
data_schema=data_schema,
)
else:
# if hp_grid is specified manually, we need to add a level of nesting
# since we are wrapping it in an observer
if isinstance(hp_grid, list):
hp_grid = lale.helpers.nest_all_HPparams("op", hp_grid)
else:
assert isinstance(hp_grid, dict)
hp_grid = lale.helpers.nest_HPparams("op", hp_grid)
if not hp_grid and isinstance(op, lale.operators.IndividualOp):
hp_grid = [
lale.search.lale_grid_search_cv.get_defaults_as_param_grid(observed_op) # type: ignore
]
be: lale.operators.TrainableOperator
if hp_grid:
if obs is not None:
impl = observed_op._impl # type: ignore
impl.startObserving(
"optimize",
hp_grid=hp_grid,
op=op,
num_samples=self._hyperparams["lale_num_samples"],
num_grids=self._hyperparams["lale_num_grids"],
pgo=self._hyperparams["pgo"],
)
try:
self.grid = lale.search.lale_grid_search_cv.get_lale_gridsearchcv_op(
observed_op,
hp_grid,
cv=self._hyperparams["cv"],
verbose=self._hyperparams["verbose"],
scoring=self._hyperparams["scoring"],
n_jobs=self._hyperparams["n_jobs"],
)
if self._hyperparams["max_opt_time"] is not None:
if func_timeout_installed:
try:
func_timeout(
self._hyperparams["max_opt_time"], self.grid.fit, (X, y)
)
except FunctionTimedOut as exc:
raise BaseException("GridSearchCV timed out.") from exc
else:
raise ValueError(
f"""max_opt_time is set to {self._hyperparams["max_opt_time"]} but the Python package
required for timeouts is not installed. Please install `func_timeout` using `pip install func_timeout`
or set max_opt_time to None."""
)
else:
self.grid.fit(X, y, **fit_params)
be = self.grid.best_estimator_
except BaseException as e:
if obs is not None:
assert isinstance(obs, Observing) # type: ignore
impl = observed_op.shallow_impl # type: ignore
impl.failObserving("optimize", e)
raise
impl = None
if isinstance(be, lale.operators.Operator):
impl = be._impl_instance()
if impl is not None:
assert isinstance(be, Observing) # type: ignore
be = impl.getOp()
if obs is not None:
obs_impl = observed_op._impl # type: ignore
obs_impl.endObserving("optimize", best=be)
else:
assert isinstance(op, lale.operators.TrainableOperator)
be = op
self._best_estimator = be.fit(X, y, **fit_params)
return self
def predict(self, X, **predict_params):
assert self._best_estimator is not None
return self._best_estimator.predict(X, **predict_params)
def get_pipeline(
self, pipeline_name=None, astype: lale.helpers.astype_type = "lale"
):
if pipeline_name is not None:
raise NotImplementedError("Cannot get pipeline by name yet.")
result = self._best_estimator
if result is None or astype == "lale":
return result
assert astype == "sklearn", astype
# TODO: should this try and return an actual sklearn pipeline?
return result
_hyperparams_schema = {
"allOf": [
{
"type": "object",
"required": [
"estimator",
"cv",
"verbose",
"scoring",
"n_jobs",
"lale_num_samples",
"lale_num_grids",
"pgo",
"max_opt_time",
],
"relevantToOptimizer": ["estimator"],
"additionalProperties": False,
"properties": {
"estimator": schema_estimator,
"scoring": schema_scoring_single,
"cv": schema_cv,
"verbose": {
"description": "Controls the verbosity: the higher, the more messages.",
"type": "integer",
"default": 0,
},
"n_jobs": {
"description": "Number of jobs to run in parallel.",
"anyOf": [
{
"description": "1 unless in joblib.parallel_backend context.",
"enum": [None],
},
{"description": "Use all processors.", "enum": [-1]},
{
"description": "Number of jobs to run in parallel.",
"type": "integer",
"minimum": 1,
},
],
"default": None,
},
"lale_num_samples": {
"description": "How many samples to draw when discretizing a continuous hyperparameter.",
"anyOf": [
{"type": "integer", "minimum": 1},
{
"description": "lale.search.lale_grid_search_cv.DEFAULT_SAMPLES_PER_DISTRIBUTION",
"enum": [None],
},
],
"default": None,
},
"lale_num_grids": {
"description": "How many top-level disjuncts to explore.",
"anyOf": [
{"description": "If not set, keep all grids.", "enum": [None]},
{
"description": "Fraction of grids to keep.",
"type": "number",
"minimum": 0.0,
"exclusiveMinimum": True,
"maximum": 1.0,
"exclusiveMaximum": True,
},
{
"description": "Number of grids to keep.",
"type": "integer",
"minimum": 1,
},
],
"default": None,
},
"param_grid": {
"anyOf": [
{"enum": [None], "description": "Generated automatically."},
{
"description": "Dictionary of hyperparameter ranges in the grid."
},
],
"default": None,
},
"pgo": {
"anyOf": [{"description": "lale.search.PGO"}, {"enum": [None]}],
"default": None,
},
"observer": {
"laleType": "Any",
"default": None,
"description": "a class or object with callbacks for observing the state of the optimization",
},
"max_opt_time": schema_max_opt_time,
},
}
]
}
_input_fit_schema = {
"type": "object",
"required": ["X", "y"],
"properties": {"X": {}, "y": {}},
}
_input_predict_schema = {"type": "object", "required": ["X"], "properties": {"X": {}}}
_output_predict_schema: Dict[str, Any] = {}
_combined_schemas = {
"description": """GridSearchCV_ performs an exhaustive search over a discretized space.
.. _GridSearchCV: https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.lale.hyperopt_classifier.html",
"import_from": "lale.lib.lale",
"type": "object",
"tags": {"pre": [], "op": ["estimator"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
GridSearchCV = lale.operators.make_operator(_GridSearchCVImpl, _combined_schemas)
lale.docstrings.set_docstrings(GridSearchCV)
| 12,631 | 36.262537 | 130 |
py
|
lale
|
lale-master/lale/lib/lale/auto_pipeline.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import warnings
from typing import Optional
import hyperopt
import pandas as pd
import sklearn.metrics
import sklearn.model_selection
import lale.docstrings
import lale.helpers
import lale.operators
from lale.lib._common_schemas import (
schema_best_score_single,
schema_cv,
schema_max_opt_time,
schema_scoring_single,
)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
try:
import xgboost # noqa: F401
xgboost_installed = True
except ImportError:
xgboost_installed = False
try:
import lightgbm.sklearn # noqa: F401
lightgbm_installed = True
except ImportError:
lightgbm_installed = False
def auto_prep(X):
from lale.lib.lale import ConcatFeatures, Project, categorical
from lale.lib.sklearn import OneHotEncoder, SimpleImputer
n_cols = X.shape[1]
n_cats = len(categorical()(X))
prep_num = SimpleImputer(strategy="mean")
prep_cat = SimpleImputer(strategy="most_frequent") >> OneHotEncoder(
handle_unknown="ignore"
)
if n_cats == 0:
result = prep_num
elif n_cats == n_cols:
result = prep_cat
else:
result = (
(
Project(columns={"type": "number"}, drop_columns=categorical())
>> prep_num
)
& (Project(columns=categorical()) >> prep_cat)
) >> ConcatFeatures
return result
def auto_gbt(prediction_type):
if prediction_type == "regression":
if xgboost_installed:
from lale.lib.xgboost import XGBRegressor
return XGBRegressor(verbosity=0)
elif lightgbm_installed:
from lale.lib.lightgbm import LGBMRegressor
return LGBMRegressor()
else:
from lale.lib.sklearn import GradientBoostingRegressor
return GradientBoostingRegressor()
else:
assert prediction_type in ["binary", "multiclass", "classification"]
if xgboost_installed:
from lale.lib.xgboost import XGBClassifier
return XGBClassifier(verbosity=0)
elif lightgbm_installed:
from lale.lib.lightgbm import LGBMClassifier
return LGBMClassifier()
else:
from lale.lib.sklearn import GradientBoostingClassifier
return GradientBoostingClassifier()
class _AutoPipelineImpl:
_summary: Optional[pd.DataFrame]
def __init__(
self,
*,
prediction_type="classification",
scoring=None,
best_score=0.0,
verbose=False,
max_evals=100,
max_opt_time=600.0,
max_eval_time=120.0,
cv=5,
):
self.prediction_type = prediction_type
self.max_opt_time = max_opt_time
self.max_eval_time = max_eval_time
self.max_evals = max_evals
self.verbose = verbose
if scoring is None:
scoring = "r2" if prediction_type == "regression" else "accuracy"
self.scoring = scoring
self._scorer = sklearn.metrics.get_scorer(scoring)
self.best_score = best_score
self._summary = None
self.cv = cv
def _try_and_add(self, name, trainable, X, y):
assert name not in self._pipelines
if self._name_of_best is not None:
if time.time() > self._start_fit + self.max_opt_time:
return
with warnings.catch_warnings():
warnings.simplefilter("ignore")
cv = sklearn.model_selection.check_cv(
cv=self.cv, classifier=(self.prediction_type != "regression")
)
(
cv_score,
logloss,
execution_time,
) = lale.helpers.cross_val_score_track_trials(
trainable, X, y, self.scoring, cv
)
loss = self.best_score - cv_score
if self._name_of_best is None or (
self._summary is None or loss < self._summary.at[self._name_of_best, "loss"]
):
self._name_of_best = name
record = {
"name": name,
"loss": loss,
"time": execution_time,
"log_loss": logloss,
"status": hyperopt.STATUS_OK,
}
singleton_summary = pd.DataFrame.from_records([record], index="name")
if self._summary is None:
self._summary = singleton_summary
else:
self._summary = pd.concat([self._summary, singleton_summary])
if name == self._name_of_best:
self._pipelines[name] = trainable.fit(X, y)
else:
self._pipelines[name] = trainable
def _fit_dummy(self, X, y):
from lale.lib.sklearn import DummyClassifier, DummyRegressor
if self.prediction_type == "regression":
trainable = DummyRegressor()
else:
trainable = DummyClassifier()
self._try_and_add("dummy", trainable, X, y)
def _fit_gbt_num(self, X, y):
from lale.lib.lale import Project
from lale.lib.sklearn import SimpleImputer
gbt = auto_gbt(self.prediction_type)
trainable = (
Project(columns={"type": "number"}) >> SimpleImputer(strategy="mean") >> gbt
)
self._try_and_add("gbt_num", trainable, X, y)
def _fit_gbt_all(self, X, y):
prep = auto_prep(X)
gbt = auto_gbt(self.prediction_type)
trainable = prep >> gbt
self._try_and_add("gbt_all", trainable, X, y)
def _fit_hyperopt(self, X, y):
from lale.lib.lale import Hyperopt, NoOp
from lale.lib.sklearn import (
PCA,
DecisionTreeClassifier,
DecisionTreeRegressor,
KNeighborsClassifier,
KNeighborsRegressor,
MinMaxScaler,
RandomForestClassifier,
RandomForestRegressor,
RobustScaler,
SelectKBest,
SGDClassifier,
SGDRegressor,
StandardScaler,
)
remaining_time = self.max_opt_time - (time.time() - self._start_fit)
if remaining_time <= 0:
return
prep = auto_prep(X)
scale = MinMaxScaler | StandardScaler | RobustScaler | NoOp
reduce_dims = PCA | SelectKBest | NoOp
gbt = auto_gbt(self.prediction_type)
if self.prediction_type == "regression":
estim_trees = gbt | DecisionTreeRegressor | RandomForestRegressor
estim_notree = SGDRegressor | KNeighborsRegressor
else:
estim_trees = gbt | DecisionTreeClassifier | RandomForestClassifier
estim_notree = SGDClassifier | KNeighborsClassifier
model_trees = reduce_dims >> estim_trees
model_notree = scale >> reduce_dims >> estim_notree
planned = prep >> (model_trees | model_notree)
prior_evals = self._summary.shape[0] if self._summary is not None else 0
trainable = Hyperopt(
estimator=planned,
max_evals=self.max_evals - prior_evals,
scoring=self.scoring,
best_score=self.best_score,
max_opt_time=remaining_time,
max_eval_time=self.max_eval_time,
verbose=self.verbose,
show_progressbar=False,
cv=self.cv,
)
trained = trainable.fit(X, y)
# The static types are not currently smart enough to verify
# that the conditionally defined summary method is actually present
# But it must be, since the hyperopt impl type provides it
summary: pd.DataFrame = trained.summary() # type: ignore
if list(summary.status) == ["new"]:
return # only one trial and that one timed out
best_trial = trained._impl._trials.best_trial
if "loss" in best_trial["result"]:
if (
self._summary is None
or best_trial["result"]["loss"]
< self._summary.at[self._name_of_best, "loss"]
):
self._name_of_best = f'p{best_trial["tid"]}'
if self._summary is None:
self._summary = summary
else:
self._summary = pd.concat([self._summary, summary])
for name in summary.index:
assert name not in self._pipelines
if summary.at[name, "status"] == hyperopt.STATUS_OK:
self._pipelines[name] = trained.get_pipeline(name)
def fit(self, X, y):
self._start_fit = time.time()
self._name_of_best = None
self._summary = None
self._pipelines = {}
self._fit_dummy(X, y)
self._fit_gbt_num(X, y)
self._fit_gbt_all(X, y)
self._fit_hyperopt(X, y)
return self
def predict(self, X, **predict_params):
best_pipeline = self._pipelines[self._name_of_best]
result = best_pipeline.predict(X, **predict_params)
return result
def summary(self):
"""Table summarizing the trial results (name, tid, loss, time, log_loss, status).
Returns
-------
result : DataFrame"""
if self._summary is not None:
self._summary.sort_values(by="loss", inplace=True)
return self._summary
def get_pipeline(
self,
pipeline_name: Optional[str] = None,
astype: lale.helpers.astype_type = "lale",
):
"""Retrieve one of the trials.
Parameters
----------
pipeline_name : union type, default None
- string
Key for table returned by summary(), return a trainable pipeline.
- None
When not specified, return the best trained pipeline found.
astype : 'lale' or 'sklearn', default 'lale'
Type of resulting pipeline.
Returns
-------
result : Trained operator if best, trainable operator otherwise."""
if pipeline_name is None:
pipeline_name = self._name_of_best
result = self._pipelines[pipeline_name]
if result is None or astype == "lale":
return result
assert astype == "sklearn", astype
return result.export_to_sklearn_pipeline()
_hyperparams_schema = {
"allOf": [
{
"type": "object",
"required": [
"prediction_type",
"scoring",
"max_evals",
"max_opt_time",
"max_eval_time",
"cv",
],
"relevantToOptimizer": [],
"additionalProperties": False,
"properties": {
"prediction_type": {
"description": "The kind of learning problem.",
"enum": ["binary", "multiclass", "classification", "regression"],
"default": "classification",
},
"scoring": schema_scoring_single,
"best_score": schema_best_score_single,
"verbose": {
"description": """Whether to print errors from each of the trials if any.
This is also logged using logger.warning in Hyperopt.""",
"type": "boolean",
"default": False,
},
"max_evals": {
"description": "Number of trials of Hyperopt search.",
"type": "integer",
"minimum": 1,
"default": 100,
},
"max_opt_time": {
**schema_max_opt_time,
"default": 600.0,
},
"max_eval_time": {
"description": "Maximum time in seconds for each evaluation.",
"anyOf": [
{"type": "number", "minimum": 0.0, "exclusiveMinimum": True},
{"description": "No runtime bound.", "enum": [None]},
],
"default": 120.0,
},
"cv": schema_cv,
},
}
]
}
_input_fit_schema = {
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"laleType": "Any"}},
},
"y": {
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "string"}},
{"type": "array", "items": {"type": "boolean"}},
]
},
},
}
_input_predict_schema = {
"type": "object",
"required": ["X"],
"properties": {
"X": {"type": "array", "items": {"type": "array", "items": {"laleType": "Any"}}}
},
}
_output_predict_schema = {
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "string"}},
{"type": "array", "items": {"type": "boolean"}},
]
}
_combined_schemas = {
"description": """Automatically find a pipeline for a dataset.
This is a high-level entry point to get an initial trained pipeline
without having to specify your own planned pipeline first. It is
designed to be simple at the expense of not offering much control.
For an example, see `demo_auto_pipeline.ipynb`_.
.. _`demo_auto_pipeline.ipynb`: https://nbviewer.jupyter.org/github/IBM/lale/blob/master/examples/demo_auto_pipeline.ipynb
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.lale.auto_pipeline.html",
"import_from": "lale.lib.lale",
"type": "object",
"tags": {"pre": [], "op": ["estimator"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
AutoPipeline = lale.operators.make_operator(_AutoPipelineImpl, _combined_schemas)
lale.docstrings.set_docstrings(AutoPipeline)
| 14,599 | 32.87471 | 122 |
py
|
lale
|
lale-master/lale/lib/lale/sample_based_voting.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
import lale.docstrings
import lale.operators
class _SampleBasedVotingImpl:
def __init__(self, hyperparams=None):
self._hyperparams = hyperparams
self.end_index_list = None
def set_meta_data(self, meta_data_dict):
if "end_index_list" in meta_data_dict.keys():
self.end_index_list = meta_data_dict["end_index_list"]
def transform(self, X, end_index_list=None):
if end_index_list is None:
end_index_list = (
self.end_index_list
) # in case the end_index_list was set as meta_data
if end_index_list is None:
return X
else:
voted_labels = []
prev_index = 0
if not isinstance(X, np.ndarray):
if isinstance(X, list):
X = np.array(X)
elif isinstance(X, pd.dataframe):
X = X.as_matrix()
for index in end_index_list:
labels = X[prev_index:index]
_, counts = np.unique(labels, return_counts=True)
ind = np.argmax(
counts
) # If two labels are in majority, this will pick the first one.
voted_labels.append(ind)
return np.array(voted_labels)
_hyperparams_schema = {
"allOf": [
{
"description": "This first object lists all constructor arguments with their types, but omits constraints for conditional hyperparameters",
"type": "object",
"additionalProperties": False,
"relevantToOptimizer": [],
"properties": {},
}
]
}
_input_transform_schema = {
"description": "Input data schema for transformations using NoOp.",
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Labels from the previous component in a pipeline.",
"type": "array",
"items": {"laleType": "Any"},
},
"end_index_list": {
"laleType": "Any",
"description": "For each output label to be produced, end_index_list is supposed to contain the index of the last element corresponding to the original input.",
},
},
}
_output_transform_schema = {"type": "array", "items": {"laleType": "Any"}}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """Treat the input as labels and use the end_index_list to produce labels using voting. Note that here, X contains the label and no y is accepted.""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.lale.sample_based_voting.html",
"import_from": "lale.lib.lale",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
SampleBasedVoting = lale.operators.make_operator(
_SampleBasedVotingImpl, _combined_schemas
)
lale.docstrings.set_docstrings(SampleBasedVoting)
| 3,790 | 33.779817 | 172 |
py
|
lale
|
lale-master/lale/lib/lale/both.py
|
# Copyright 2019-2022 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import lale.docstrings
import lale.operators
from lale.lib.lale.no_op import NoOp
class _BothImpl:
# This should be equivalent to:
# (op1 >> op2) | (op2 >> op1)
# but with a smaller search space
def __init__(self, op1, op2, order: str = "forward"):
self._hyperparams = {"order": order, "op1": op1, "op2": op2}
self._pipeline = None
def getPipeline(self):
if self._pipeline is not None:
return self._pipeline
params = self._hyperparams
op1 = params.get("op1", None)
if op1 is None:
op1 = NoOp
op2 = params.get("op2", None)
if op2 is None:
op2 = NoOp
if params["order"] == "backward":
self._pipeline = op2 >> op1
else:
self._pipeline = op1 >> op2
return self._pipeline
def transform(self, X, y=None):
return self.getPipeline().transform(X, y=y)
# def transform_schema(self, s_X):
# return self.getPipeline().transform_schema(s_X)
def predict(self, X, **predict_params):
return self.getPipeline().predict(X, **predict_params)
def predict_proba(self, X):
return self.getPipeline().predict_proba(X)
def fit(self, X, y=None, **fit_params):
self._pipeline = self.getPipeline().fit(X, y=y, **fit_params)
return self
# def get_feature_names(self, input_features=None):
# if input_features is not None:
# return list(input_features)
# elif self._feature_names is not None:
# return self._feature_names
# else:
# raise ValueError('Can only call get_feature_names on a trained operator. Please call fit to get a trained operator.')
_hyperparams_schema = {
"description": "Hyperparameter schema for the both Higher Order Operator, which wraps another operator and runs it a given number of times",
"allOf": [
{
"description": "This first object lists all constructor arguments with their types, but omits constraints for conditional hyperparameters",
"type": "object",
"additionalProperties": False,
"relevantToOptimizer": ["order", "op1", "op2"],
"properties": {
"order": {"enum": ["forward", "backward"], "default": "forward"},
"op1": {"laleType": "operator"},
"op2": {"laleType": "operator"},
},
}
],
}
# TODO: can we surface the base op input/output schema?
_input_fit_schema = {
"description": "Input data schema for training both.",
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {"X": {}},
}
_input_predict_transform_schema = (
{ # TODO: separate predict vs. predict_proba vs. transform
"description": "Input data schema for transformations using both.",
"type": "object",
"required": ["X", "y"],
"additionalProperties": False,
"properties": {"X": {}, "y": {}},
}
)
_output_schema = { # TODO: separate predict vs. predict_proba vs. transform
"description": "Output data schema for transformations using both.",
"laleType": "Any",
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.lale.both.html",
"import_from": "lale.lib.lale",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_transform_schema,
"output_predict": _output_schema,
"input_predict_proba": _input_predict_transform_schema,
"output_predict_proba": _output_schema,
"input_transform": _input_predict_transform_schema,
"output_transform": _output_schema,
},
}
Both = lale.operators.make_operator(_BothImpl, _combined_schemas)
lale.docstrings.set_docstrings(Both)
| 4,710 | 34.421053 | 151 |
py
|
lale
|
lale-master/lale/lib/lale/topk_voting_classifier.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import logging
from typing import Any, Dict, Optional
from hyperopt import STATUS_OK
import lale.docstrings
import lale.helpers
import lale.operators
from lale.lib.lale import Hyperopt
logger = logging.getLogger(__name__)
class _TopKVotingClassifierImpl:
args_to_optimizer: Dict[str, Any]
def __init__(
self,
estimator=None,
optimizer=None,
args_to_optimizer: Optional[Dict[str, Any]] = None,
k=10,
):
self.estimator = estimator
if self.estimator is None:
raise ValueError("Estimator is a required argument.")
if optimizer is None:
self.optimizer = Hyperopt
else:
self.optimizer = optimizer
if args_to_optimizer is None:
self.args_to_optimizer = {}
else:
self.args_to_optimizer = args_to_optimizer
self.k = k
def fit(self, X_train, y_train, **fit_params):
from lale.lib.sklearn import VotingClassifier
optimizer_instance = self.optimizer(
estimator=self.estimator, **self.args_to_optimizer
)
trained_optimizer1 = optimizer_instance.fit(X_train, y_train, **fit_params)
results = trained_optimizer1.summary()
results = results[
results["status"] == STATUS_OK
] # Consider only successful trials
results = results.sort_values(by=["loss"], axis=0)
k = min(self.k, results.shape[0])
top_k_pipelines = results.iloc[0:k]
pipeline_tuples = []
for pipeline_name in top_k_pipelines.index:
pipeline_instance = trained_optimizer1.get_pipeline(pipeline_name)
pipeline_tuple = (pipeline_name, pipeline_instance)
pipeline_tuples.append(pipeline_tuple)
voting = VotingClassifier(estimators=pipeline_tuples)
args_to_optimizer = copy.copy(self.args_to_optimizer)
try:
del args_to_optimizer["max_evals"]
except KeyError:
pass
args_to_optimizer[
"max_evals"
] = 1 # Currently, voting classifier has no useful hyperparameters to tune.
optimizer_instance2 = self.optimizer(estimator=voting, **args_to_optimizer)
trained_optimizer2 = optimizer_instance2.fit(X_train, y_train, **fit_params)
self._best_estimator = trained_optimizer2.get_pipeline()
return self
def predict(self, X_eval, **predict_params):
import warnings
warnings.filterwarnings("ignore")
if self._best_estimator is None:
raise ValueError(
"Can not predict as the best estimator is None. Either an attempt to call `predict` "
"before calling `fit` or all the trials during `fit` failed."
)
trained = self._best_estimator
try:
predictions = trained.predict(X_eval, **predict_params)
except ValueError as e:
logger.warning(
f"ValueError in predicting using Hyperopt:{trained}, the error is:{e}"
)
predictions = None
return predictions
def get_pipeline(
self, pipeline_name=None, astype: lale.helpers.astype_type = "lale"
):
"""Retrieve one of the trials.
Parameters
----------
pipeline_name : None
astype : 'lale' or 'sklearn', default 'lale'
Type of resulting pipeline.
Returns
-------
result : Trained operator if best, trainable operator otherwise."""
assert pipeline_name is None
result = self._best_estimator
if result is None or astype == "lale":
return result
assert astype == "sklearn", astype
assert isinstance(result, lale.operators.BasePipeline)
return result.export_to_sklearn_pipeline()
_hyperparams_schema = {
"allOf": [
{
"type": "object",
"required": ["estimator"],
"relevantToOptimizer": [],
"additionalProperties": False,
"properties": {
"estimator": {
"description": "Planned Lale individual operator or pipeline.",
"anyOf": [
{"laleType": "operator", "not": {"enum": [None]}},
{"enum": [None]},
],
"default": None,
},
"optimizer": {
"description": """Optimizer class to be used during the two stages of optimization.
Default of None uses Hyperopt internally. Currently, only Hyperopt is supported as an optimizer.""",
"anyOf": [
{"laleType": "operator", "not": {"enum": [None]}},
{"enum": [None]},
],
"default": None,
},
"args_to_optimizer": {
"description": """Dictionary of keyword arguments required to be used for the given optimizer
as applicable for the given task. For example, max_evals, cv, scoring etc. for Hyperopt.
If None, default values for the optimizer would be used.""",
"anyOf": [
{"type": "object"}, # Python dictionary
{"enum": [None]},
],
"default": None,
},
"k": {
"description": """Number of top pipelines to be used for the voting ensemble. If the number of
successful trials of the optimizer are less than k, the ensemble will use
only successful trials.""",
"type": "integer",
"minimum": 1,
"default": 10,
},
},
}
]
}
_input_fit_schema = {
"type": "object",
"required": ["X", "y"],
"properties": {"X": {}, "y": {}},
}
_input_predict_schema = {"type": "object", "required": ["X"], "properties": {"X": {}}}
_output_predict_schema: Dict[str, Any] = {}
_combined_schemas = {
"description": """This operator creates a voting ensemble from top k performing pipelines from the given planned pipeline.""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.lale.topk_voting_classifier.html",
"import_from": "lale.lib.lale",
"type": "object",
"tags": {"pre": [], "op": ["estimator"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
TopKVotingClassifier = lale.operators.make_operator(
_TopKVotingClassifierImpl, _combined_schemas
)
lale.docstrings.set_docstrings(TopKVotingClassifier)
| 7,513 | 35.653659 | 130 |
py
|
lale
|
lale-master/lale/lib/lale/tee.py
|
# Copyright 2021 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import lale.docstrings
import lale.operators
class _TeeImpl:
def __init__(self, listener=None):
self._listener = listener
def transform(self, X, y=None):
if self._listener is not None:
self._listener(X, y)
return X
def transform_schema(self, s_X):
"""Used internally by Lale for type-checking downstream operators."""
return s_X
_hyperparams_schema = {
"allOf": [
{
"description": "This first sub-object lists all constructor arguments with their "
"types, one at a time, omitting cross-argument constraints, if any.",
"type": "object",
"additionalProperties": False,
"relevantToOptimizer": [],
"properties": {
"listener": {
"anyOf": [
{
"description": "A callable (lambda, method, class that implements __call__, ...)"
"that accepts to arguments: X and y (which may be None). When transform"
"is called on this operator, the callable will be passed the given"
"X and y values",
"laleType": "callable",
},
{
"description": "No listener. Causes this operator to behave like NoOp.",
"enum": [None],
},
]
}
},
}
]
}
_input_transform_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Features; no restrictions on data type.",
"laleType": "Any",
}
},
}
_output_transform_schema = {
"description": "Features; no restrictions on data type.",
"laleType": "Any",
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Passes the data through unchanged (like NoOp), first giving it to an listener. Useful for debugging and logging."
"Similar to Observing, which provides a higher order operator with more comprehensive abilities.",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.lale.tee.html",
"import_from": "lale.lib.lale",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
Tee = lale.operators.make_operator(_TeeImpl, _combined_schemas)
lale.docstrings.set_docstrings(Tee)
| 3,330 | 32.989796 | 134 |
py
|
lale
|
lale-master/lale/lib/lale/concat_features.py
|
# Copyright 2022 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from lale.lib.rasl import ConcatFeatures as RaslConcatFeatures
from lale.lib.rasl.concat_features import _ConcatFeaturesImpl as _RaslConcatFeaturesImpl
_ConcatFeaturesImpl = _RaslConcatFeaturesImpl
ConcatFeatures = RaslConcatFeatures
| 812 | 37.714286 | 88 |
py
|
lale
|
lale-master/lale/lib/lale/halving_grid_search_cv.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict, Optional
import numpy as np
import lale.docstrings
import lale.helpers
import lale.lib.sklearn
import lale.operators
import lale.search.lale_grid_search_cv
from lale.lib._common_schemas import (
schema_cv,
schema_estimator,
schema_max_opt_time,
schema_scoring_single,
)
from .observing import Observing
func_timeout_installed = False
try:
from func_timeout import FunctionTimedOut, func_timeout
func_timeout_installed = True
except ImportError:
pass
class _HalvingGridSearchCVImpl:
_best_estimator: Optional[lale.operators.TrainedOperator] = None
def __init__(
self,
*,
estimator=None,
scoring=None,
cv=5,
verbose=0,
param_grid=None,
factor=3,
resource="n_strings",
max_resources="auto",
min_resources="exhaust",
aggressive_elimination=False,
refit=True,
error_score=np.nan,
return_train_score=False,
random_state=None,
n_jobs=None,
lale_num_samples=None,
lale_num_grids=None,
pgo=None,
observer=None,
max_opt_time=None,
):
if observer is not None and isinstance(observer, type):
# if we are given a class name, instantiate it
observer = observer()
if scoring is None:
if estimator is None:
is_clf = True
else:
is_clf = estimator.is_classifier()
if is_clf:
scoring = "accuracy"
else:
scoring = "r2"
self._hyperparams = {
"estimator": estimator,
"factor": factor,
"resource": resource,
"max_resources": max_resources,
"min_resources": min_resources,
"aggressive_elimination": aggressive_elimination,
"cv": cv,
"scoring": scoring,
"refit": refit,
"error_score": error_score,
"return_train_score": return_train_score,
"random_state": random_state,
"n_jobs": n_jobs,
"verbose": verbose,
"lale_num_samples": lale_num_samples,
"lale_num_grids": lale_num_grids,
"pgo": pgo,
"hp_grid": param_grid,
"observer": observer,
"max_opt_time": max_opt_time,
}
def fit(self, X, y, **fit_params):
if self._hyperparams["estimator"] is None:
op = lale.lib.sklearn.LogisticRegression
else:
op = self._hyperparams["estimator"]
observed_op = op
obs = self._hyperparams["observer"]
# We always create an observer.
# Otherwise, we can have a problem with PlannedOperators
# (that are not trainable):
# GridSearchCV checks if a fit method is present before
# configuring the operator, and our planned operators
# don't have a fit method
# Observing always has a fit method, and so solves this problem.
observed_op = Observing(op=op, observer=obs)
hp_grid = self._hyperparams["hp_grid"]
data_schema = lale.helpers.fold_schema(
X, y, self._hyperparams["cv"], op.is_classifier()
)
if hp_grid is None:
hp_grid = lale.search.lale_grid_search_cv.get_parameter_grids(
observed_op,
num_samples=self._hyperparams["lale_num_samples"],
num_grids=self._hyperparams["lale_num_grids"],
pgo=self._hyperparams["pgo"],
data_schema=data_schema,
)
else:
# if hp_grid is specified manually, we need to add a level of nesting
# since we are wrapping it in an observer
if isinstance(hp_grid, list):
hp_grid = lale.helpers.nest_all_HPparams("op", hp_grid)
else:
assert isinstance(hp_grid, dict)
hp_grid = lale.helpers.nest_HPparams("op", hp_grid)
if not hp_grid and isinstance(op, lale.operators.IndividualOp):
hp_grid = [
lale.search.lale_grid_search_cv.get_defaults_as_param_grid(observed_op) # type: ignore
]
be: lale.operators.TrainableOperator
if hp_grid:
if obs is not None:
impl = observed_op._impl # type: ignore
impl.startObserving(
"optimize",
hp_grid=hp_grid,
op=op,
num_samples=self._hyperparams["lale_num_samples"],
num_grids=self._hyperparams["lale_num_grids"],
pgo=self._hyperparams["pgo"],
)
try:
# explicitly require this experimental feature
from sklearn.experimental import enable_halving_search_cv # noqa
import sklearn.model_selection # isort: skip
self.grid = sklearn.model_selection.HalvingGridSearchCV(
observed_op,
hp_grid,
cv=self._hyperparams["cv"],
scoring=self._hyperparams["scoring"],
n_jobs=self._hyperparams["n_jobs"],
)
if self._hyperparams["max_opt_time"] is not None:
if func_timeout_installed:
try:
func_timeout(
self._hyperparams["max_opt_time"], self.grid.fit, (X, y)
)
except FunctionTimedOut as exc:
raise BaseException(
"HalvingGridSearchCV timed out."
) from exc
else:
raise ValueError(
f"""max_opt_time is set to {self._hyperparams["max_opt_time"]} but the Python package
required for timeouts is not installed. Please install `func_timeout` using `pip install func_timeout`
or set max_opt_time to None."""
)
else:
self.grid.fit(X, y, **fit_params)
be = self.grid.best_estimator_
except BaseException as e:
if obs is not None:
assert isinstance(observed_op, Observing) # type: ignore
impl = observed_op.shallow_impl # type: ignore
impl.failObserving("optimize", e)
raise
impl = None
if isinstance(be, lale.operators.Operator):
impl = be._impl_instance()
if impl is not None:
assert isinstance(be, Observing) # type: ignore
be = impl.getOp()
if obs is not None:
obs_impl = observed_op._impl # type: ignore
obs_impl.endObserving("optimize", best=be)
else:
assert isinstance(op, lale.operators.TrainableOperator)
be = op
self._best_estimator = be.fit(X, y, **fit_params)
return self
def predict(self, X, **predict_params):
assert self._best_estimator is not None
return self._best_estimator.predict(X, **predict_params)
def get_pipeline(
self, pipeline_name=None, astype: lale.helpers.astype_type = "lale"
):
if pipeline_name is not None:
raise NotImplementedError("Cannot get pipeline by name yet.")
result = self._best_estimator
if result is None or astype == "lale":
return result
assert astype == "sklearn", astype
return result
_hyperparams_schema = {
"allOf": [
{
"type": "object",
"required": [
"estimator",
"cv",
"scoring",
"n_jobs",
"lale_num_samples",
"lale_num_grids",
"pgo",
"max_opt_time",
],
"relevantToOptimizer": ["estimator"],
"additionalProperties": False,
"properties": {
"estimator": schema_estimator,
"scoring": schema_scoring_single,
"cv": schema_cv,
"verbose": {
"description": "Controls the verbosity: the higher, the more messages.",
"type": "integer",
"minimum": 0,
"default": 0,
},
"factor": {
"description": """The `halving` parameter, which determines the proportion of candidates
that are selected for each subsequent iteration. For example, factor=3 means that only one third of the candidates are selected.""",
"type": "number",
"minimum": 1,
"exclusiveMinimum": True,
"minimumForOptimizer": 2,
"maximumForOptimizer": 5,
"default": 3,
},
"resource": {
"description": """Defines the resource that increases with each iteration.
By default, the resource is the number of samples.
It can also be set to any parameter of the base estimator that accepts positive integer values, e.g. ‘n_iterations’ or ‘n_estimators’ for a gradient boosting estimator.""",
"type": "string",
"default": "n_samples",
},
"max_resources": {
"description": "The maximum amount of resource that any candidate is allowed to use for a given iteration.",
"anyOf": [
{"enum": ["auto"]},
{
"forOptimizer": False,
"type": "integer",
"minimum": 1,
},
],
"default": "auto",
},
"min_resources": {
"description": "The minimum amount of resource that any candidate is allowed to use for a given iteration",
"anyOf": [
{
"description": "A heuristic that sets r0 to a small value",
"enum": ["smallest"],
},
{
"description": "Sets r0 such that the last iteration uses as much resources as possible",
"enum": ["exhaust"],
},
{
"forOptimizer": False,
"type": "integer",
"minimum": 1,
},
],
"default": "exhaust",
},
"aggressive_elimination": {
"description": "Enable aggresive elimination when there aren't enough resources to reduce the remaining candidates to at most factor after the last iteration",
"type": "boolean",
"default": False,
},
"refit": {
"description": "Refit an estimator using the best found parameters on the whole dataset.",
"type": "boolean",
"default": True,
},
"error_score": {
"description": "Value to assign to the score if an error occurs in estimator fitting.",
"anyOf": [
{"description": "Raise the error", "enum": ["raise"]},
{"enum": [np.nan]},
{"type": "number", "forOptimizer": False},
],
"default": np.nan,
},
"return_train_score": {
"description": "Include training scores",
"type": "boolean",
"default": False,
},
"random_state": {
"description": "Pseudo random number generator state used for subsampling the dataset when resources != 'n_samples'. Ignored otherwise.",
"anyOf": [
{
"description": "RandomState used by np.random",
"enum": [None],
},
{
"description": "Use the provided random state, only affecting other users of that same random state instance.",
"laleType": "numpy.random.RandomState",
},
{"description": "Explicit seed.", "type": "integer"},
],
"default": None,
},
"n_jobs": {
"description": "Number of jobs to run in parallel.",
"anyOf": [
{
"description": "1 unless in joblib.parallel_backend context.",
"enum": [None],
},
{"description": "Use all processors.", "enum": [-1]},
{
"description": "Number of jobs to run in parallel.",
"type": "integer",
"minimum": 1,
},
],
"default": None,
},
"lale_num_samples": {
"description": "How many samples to draw when discretizing a continuous hyperparameter.",
"anyOf": [
{"type": "integer", "minimum": 1},
{
"description": "lale.search.lale_grid_search_cv.DEFAULT_SAMPLES_PER_DISTRIBUTION",
"enum": [None],
},
],
"default": None,
},
"lale_num_grids": {
"description": "How many top-level disjuncts to explore.",
"anyOf": [
{"description": "If not set, keep all grids.", "enum": [None]},
{
"description": "Fraction of grids to keep.",
"type": "number",
"minimum": 0.0,
"exclusiveMinimum": True,
"maximum": 1.0,
"exclusiveMaximum": True,
},
{
"description": "Number of grids to keep.",
"type": "integer",
"minimum": 1,
},
],
"default": None,
},
"param_grid": {
"anyOf": [
{"enum": [None], "description": "Generated automatically."},
{
"description": "Dictionary of hyperparameter ranges in the grid."
},
],
"default": None,
},
"pgo": {
"anyOf": [{"description": "lale.search.PGO"}, {"enum": [None]}],
"default": None,
},
"observer": {
"laleType": "Any",
"default": None,
"description": "a class or object with callbacks for observing the state of the optimization",
},
"max_opt_time": schema_max_opt_time,
},
},
{
"description": "max_resources is set to 'auto' if and only if resource is set to 'n_samples'"
"penalty with the liblinear solver.",
"oneOf": [
{"type": "object", "properties": {"resource": {"enum": ["n_samples"]}}},
{
"type": "object",
"properties": {
"max_resources": {"not": {"enum": ["auto"]}},
},
},
],
},
]
}
_input_fit_schema = {
"type": "object",
"required": ["X", "y"],
"properties": {"X": {}, "y": {}},
}
_input_predict_schema = {"type": "object", "required": ["X"], "properties": {"X": {}}}
_output_predict_schema: Dict[str, Any] = {}
_combined_schemas = {
"description": """GridSearchCV_ performs an exhaustive search over a discretized space.
.. _GridSearchCV: https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.HalvingGridSearchCV.html""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.lale.halving_grid_search_cv.html",
"import_from": "lale.lib.lale",
"type": "object",
"tags": {"pre": [], "op": ["estimator"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
HalvingGridSearchCV = lale.operators.make_operator(
_HalvingGridSearchCVImpl, _combined_schemas
)
lale.docstrings.set_docstrings(HalvingGridSearchCV)
| 18,162 | 38.570806 | 179 |
py
|
lale
|
lale-master/lale/lib/lale/__init__.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Lale operators with schemas.
Operators
=========
Estimators:
* lale.lib.lale. `AutoPipeline`_
* lale.lib.lale. `GridSearchCV`_
* lale.lib.lale. `HalvingGridSearchCV`_
* lale.lib.lale. `Hyperopt`_
* lale.lib.lale. `OptimizeLast`_
* lale.lib.lale. `OptimizeSuffix`_
* lale.lib.lale. `SMAC`_
* lale.lib.lale. `TopKVotingClassifier`_
Transformers:
* lale.lib.rasl. `Aggregate`_
* lale.lib.rasl. `Alias`_
* lale.lib.rasl. `Batching`_
* lale.lib.rasl. `ConcatFeatures`_
* lale.lib.rasl. `Filter`_
* lale.lib.rasl. `GroupBy`_
* lale.lib.rasl. `Join`_
* lale.lib.rasl. `Map`_
* lale.lib.lale. `NoOp`_
* lale.lib.rasl. `OrderBy`_
* lale.lib.rasl. `Project`_
* lale.lib.rasl. `Relational`_
* lale.lib.lale. `SampleBasedVoting`_
* lale.lib.rasl. `Scan`_
* lale.lib.rasl. `SplitXy`_
* lale.lib.lale. `Tee`_
Estimators and transformers:
* lale.lib.lale. `Both`_
* lale.lib.lale. `IdentityWrapper`_
* lale.lib.lale. `Observing`_
.. _`AutoPipeline`: lale.lib.lale.auto_pipeline.html
.. _`GridSearchCV`: lale.lib.lale.grid_search_cv.html
.. _`HalvingGridSearchCV`: lale.lib.lale.halving_grid_search_cv.html
.. _`Hyperopt`: lale.lib.lale.hyperopt.html
.. _`OptimizeLast`: lale.lib.lale.optimize_last.html
.. _`OptimizeSuffix`: lale.lib.lale.optimize_suffix.html
.. _`TopKVotingClassifier`: lale.lib.lale.topk_voting_classifier.html
.. _`SMAC`: lale.lib.lale.smac.html
.. _`Batching`: lale.lib.rasl.batching.html
.. _`ConcatFeatures`: lale.lib.rasl.concat_features.html
.. _`NoOp`: lale.lib.lale.no_op.html
.. _`Project`: lale.lib.rasl.project.html
.. _`SampleBasedVoting`: lale.lib.lale.sample_based_voting.html
.. _`Aggregate`: lale.lib.rasl.aggregate.html
.. _`Filter`: lale.lib.rasl.filter.html
.. _`GroupBy`: lale.lib.rasl.group_by.html
.. _`Map`: lale.lib.rasl.map.html
.. _`OrderBy`: lale.lib.rasl.orderby.html
.. _`Join`: lale.lib.rasl.join.html
.. _`Alias`: lale.lib.rasl.alias.html
.. _`Scan`: lale.lib.rasl.scan.html
.. _`SplitXy`: lale.lib.rasl.split_xy.html
.. _`Relational`: lale.lib.rasl.relational.html
.. _`Both`: lale.lib.lale.both.html
.. _`IdentityWrapper`: lale.lib.lale.identity_wrapper.html
.. _`Observing`: lale.lib.lale.observing.html
.. _`Tee`: lale.lib.lale.tee.html
Functions:
==========
* lale.lib.lale. `categorical`_
* lale.lib.lale. `date_time`_
* SparkExplainer. `spark_explainer`_
.. _`categorical`: lale.lib.rasl.functions.html#lale.lib.rasl.functions.categorical
.. _`date_time`: lale.lib.rasl.functions.html#lale.lib.rasl.functions.date_time
.. _`spark_explainer`: lale.lib.rasl.spark_explainer.html
"""
# Note: all imports should be done as
# from .xxx import XXX as XXX
# this ensures that pyright considers them to be publicly available
# and not private imports (this affects lale users that use pyright)
from lale.lib.rasl import Aggregate as Aggregate
from lale.lib.rasl import Alias as Alias
from lale.lib.rasl import Batching as Batching
from lale.lib.rasl import ConcatFeatures as ConcatFeatures
from lale.lib.rasl import Filter as Filter
from lale.lib.rasl import GroupBy as GroupBy
from lale.lib.rasl import Join as Join
from lale.lib.rasl import Map as Map
from lale.lib.rasl import OrderBy as OrderBy
from lale.lib.rasl import Project as Project
from lale.lib.rasl import Relational as Relational
from lale.lib.rasl import Scan as Scan
from lale.lib.rasl import SplitXy as SplitXy
from lale.lib.rasl import categorical as categorical
from lale.lib.rasl import date_time as date_time
from lale.lib.rasl import spark_explainer as spark_explainer
# estimators
from .auto_pipeline import AutoPipeline as AutoPipeline
# estimators and transformers
from .both import Both as Both
# functions
from .grid_search_cv import GridSearchCV as GridSearchCV
from .halving_grid_search_cv import HalvingGridSearchCV as HalvingGridSearchCV
from .hyperopt import Hyperopt as Hyperopt
from .identity_wrapper import IdentityWrapper as IdentityWrapper
from .no_op import NoOp as NoOp
from .observing import Observing as Observing
from .optimize_last import OptimizeLast as OptimizeLast
from .optimize_suffix import OptimizeSuffix as OptimizeSuffix
from .sample_based_voting import SampleBasedVoting as SampleBasedVoting
from .smac import SMAC as SMAC
from .tee import Tee as Tee
from .topk_voting_classifier import TopKVotingClassifier as TopKVotingClassifier
| 4,867 | 34.021583 | 83 |
py
|
lale
|
lale-master/lale/lib/lale/time_series_transformer.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import os
import sys
import warnings
import numpy as np
from scipy.signal import resample
from sklearn import preprocessing
import lale.docstrings
import lale.operators
sys.path.append(os.getcwd())
seizure_type_data = collections.namedtuple(
"seizure_type_data", ["seizure_type", "data"]
)
# Some of the classes and modules have been taken from https://github.com/MichaelHills/seizure-detection
class Slice:
"""
Job: Take a slice of the data on the last axis.
Note: Slice(x, y) works like a normal python slice, that is x to (y-1) will be taken.
"""
def __init__(self, start, stop):
self.start = start
self.stop = stop + 1
def get_name(self):
return f"slice{self.start:d}-{self.stop:d}"
def apply(self, data):
s = [
slice(None),
] * data.ndim
s[-1] = slice(self.start, self.stop)
return data[s]
class Magnitude:
"""
Job: Take magnitudes of Complex data
"""
def get_name(self):
return "mag"
def apply(self, data):
return np.absolute(data)
class Log10:
"""
Apply Log10
"""
def get_name(self):
return "log10"
def apply(self, data):
# 10.0 * log10(re * re + im * im)
indices = np.where(data <= 0)
data[indices] = np.max(data)
data[indices] = np.min(data) * 0.1
return np.log10(data)
class Pipeline:
"""
A Pipeline is an object representing the data transformations to make
on the input data, finally outputting extracted features.
pipeline: List of transforms to apply one by one to the input data
"""
def __init__(self, pipeline):
self.transforms = pipeline
names = [t.get_name() for t in self.transforms]
self.name = "empty" if len(names) == 0 else "_".join(names)
def get_name(self):
return self.name
def apply(self, data):
for transform in self.transforms:
data = transform.apply(data)
return data
class FFT:
"""
Apply Fast Fourier Transform to the last axis.
"""
def get_name(self):
return "fft"
def apply(self, data):
axis = data.ndim - 1
return np.fft.rfft(data, axis=axis)
class Resample:
"""
Resample time-series data.
"""
def __init__(self, sample_rate):
self.f = sample_rate
def get_name(self):
return f"resample{self.f:d}"
def apply(self, data):
axis = data.ndim - 1
if data.shape[-1] > self.f:
return resample(data, self.f, axis=axis)
return data
class StandardizeLast:
"""
Scale across the last axis.
"""
def get_name(self):
return "standardize-last"
def apply(self, data):
return preprocessing.scale(data, axis=data.ndim - 1)
class StandardizeFirst:
"""
Scale across the first axis.
"""
def get_name(self):
return "standardize-first"
def apply(self, data):
return preprocessing.scale(data, axis=0)
class CorrelationMatrix:
"""
Calculate correlation coefficients matrix across all EEG channels.
"""
def get_name(self):
return "correlation-matrix"
def apply(self, data):
return np.corrcoef(data)
class Eigenvalues:
"""
Take eigenvalues of a matrix, and sort them by magnitude in order to
make them useful as features (as they have no inherent order).
"""
def get_name(self):
return "eigenvalues"
def apply(self, data):
w, _v = np.linalg.eig(data)
w = np.absolute(w)
w.sort()
return w
# Take the upper right triangle of a matrix
def upper_right_triangle(matrix):
accum = []
for i in range(matrix.shape[0]):
for j in range(i + 1, matrix.shape[1]):
accum.append(matrix[i, j])
return np.array(accum)
class FreqCorrelation:
"""
Correlation in the frequency domain. First take FFT with (start, end) slice options,
then calculate correlation co-efficients on the FFT output, followed by calculating
eigenvalues on the correlation co-efficients matrix.
The output features are (fft, upper_right_diagonal(correlation_coefficients), eigenvalues)
Features can be selected/omitted using the constructor arguments.
"""
def __init__(
self, start, end, scale_option, with_fft=False, with_corr=True, with_eigen=True
):
self.start = start
self.end = end
self.scale_option = scale_option
self.with_fft = with_fft
self.with_corr = with_corr
self.with_eigen = with_eigen
assert scale_option in ("first_axis", "last_axis", "none")
assert with_corr or with_eigen
def get_name(self):
selections = []
if not self.with_corr:
selections.append("nocorr")
if not self.with_eigen:
selections.append("noeig")
if len(selections) > 0:
selection_str = "-" + "-".join(selections)
else:
selection_str = ""
return f"freq-correlation-{self.start:d}-{self.end:d}-{'withfft' if self.with_fft else 'nofft'}-{self.scale_option}{selection_str}"
def apply(self, data):
data1 = FFT().apply(data)
data1 = Slice(self.start, self.end).apply(data1)
data1 = Magnitude().apply(data1)
data1 = Log10().apply(data1)
data2 = data1
if self.scale_option == "first_axis":
data2 = StandardizeFirst().apply(data2)
elif self.scale_option == "last_axis":
data2 = StandardizeLast().apply(data2)
data2 = CorrelationMatrix().apply(data2)
out = []
if self.with_corr:
ur = upper_right_triangle(data2)
out.append(ur)
if self.with_eigen:
w = Eigenvalues().apply(data2)
out.append(w)
if self.with_fft:
data1 = data1.ravel()
out.append(data1)
for d in out:
assert d.ndim == 1
return np.concatenate(out, axis=0)
class TimeCorrelation:
"""
Correlation in the time domain. First downsample the data, then calculate correlation co-efficients
followed by calculating eigenvalues on the correlation co-efficients matrix.
The output features are (upper_right_diagonal(correlation_coefficients), eigenvalues)
Features can be selected/omitted using the constructor arguments.
"""
def __init__(self, max_hz, scale_option, with_corr=True, with_eigen=True):
self.max_hz = max_hz
self.scale_option = scale_option
self.with_corr = with_corr
self.with_eigen = with_eigen
assert scale_option in ("first_axis", "last_axis", "none")
assert with_corr or with_eigen
def get_name(self):
selections = []
if not self.with_corr:
selections.append("nocorr")
if not self.with_eigen:
selections.append("noeig")
if len(selections) > 0:
selection_str = "-" + "-".join(selections)
else:
selection_str = ""
return f"time-correlation-r{self.max_hz:d}-{self.scale_option}{selection_str}"
def apply(self, data):
# so that correlation matrix calculation doesn't crash
for ch in data:
if np.all(ch == 0.0):
ch[-1] += 0.00001
data1 = data
if data1.shape[1] > self.max_hz:
data1 = Resample(self.max_hz).apply(data1)
if self.scale_option == "first_axis":
data1 = StandardizeFirst().apply(data1)
elif self.scale_option == "last_axis":
data1 = StandardizeLast().apply(data1)
data1 = CorrelationMatrix().apply(data1)
out = []
if self.with_corr:
ur = upper_right_triangle(data1)
out.append(ur)
if self.with_eigen:
w = Eigenvalues().apply(data1)
out.append(w)
for d in out:
assert d.ndim == 1
return np.concatenate(out, axis=0)
class FFTWithTimeFreqCorrelation:
"""
Combines FFT with time and frequency correlation, taking both correlation coefficients and eigenvalues.
"""
def __init__(self, start, end, max_hz, scale_option):
self.start = start
self.end = end
self.max_hz = max_hz
self.scale_option = scale_option
assert scale_option in ("first_axis", "last_axis", "none")
def get_name(self):
return f"fft-with-time-freq-corr-{self.start:d}-{self.end:d}-r{self.max_hz:d}-{self.scale_option}"
def apply(self, data):
data1 = TimeCorrelation(self.max_hz, self.scale_option).apply(data)
data2 = FreqCorrelation(
self.start, self.end, self.scale_option, with_fft=True
).apply(data)
assert data1.ndim == data2.ndim
return np.concatenate((data1, data2), axis=data1.ndim - 1)
class _TimeFreqEigenVectorsImpl:
def __init__(
self,
window_length=1,
window_step=0.5,
fft_min_freq=1,
fft_max_freq=24,
sampling_frequency=250,
):
self.window_length = window_length
self.window_step = window_step
self.fft_min_freq = fft_min_freq
self.fft_max_freq = fft_max_freq
self.sampling_frequency = sampling_frequency
def transform(self, X, y=None):
warnings.filterwarnings("ignore")
pipeline = Pipeline(
[
FFTWithTimeFreqCorrelation(
self.fft_min_freq,
self.fft_max_freq,
self.sampling_frequency,
"first_axis",
)
]
)
X_transformed = []
y_transformed = np.empty((0))
self.end_index_list = (
[]
) # This is the list of end indices for samples generated per seizure
# The transformation map is just a list of indices corresponding to the last sample generated by each time-series.
for i in range(len(X)): # pylint:disable=consider-using-enumerate
seizure_data = X[i]
start, step = 0, int(np.floor(self.window_step * self.sampling_frequency))
stop = start + int(np.floor(self.window_length * self.sampling_frequency))
fft_data = []
while stop < seizure_data.shape[1]:
signal_window = seizure_data[:, start:stop]
fft_window = pipeline.apply(signal_window)
fft_data.append(fft_window)
start, stop = start + step, stop + step
X_transformed.extend(fft_data)
if y is not None:
seizure_label = y[i]
labels_for_all_seizure_samples = np.full(len(fft_data), seizure_label)
y_transformed = np.hstack(
(y_transformed, labels_for_all_seizure_samples)
)
previous_element = self.end_index_list[i - 1] if (i - 1) >= 0 else 0
self.end_index_list.append(previous_element + len(fft_data))
X_transformed = np.array(X_transformed)
if y is None:
y_transformed = None
return X_transformed, y_transformed
def get_transform_meta_output(self):
if self.end_index_list is not None:
return {"end_index_list": self.end_index_list}
else:
raise ValueError(
"Must call transform before trying to access its meta output."
)
_hyperparams_schema = {
"description": "TODO",
"allOf": [
{
"type": "object",
"additionalProperties": False,
"required": [
"window_length",
"window_step",
"fft_min_freq",
"fft_max_freq",
"sampling_frequency",
],
"relevantToOptimizer": ["window_length", "window_step", "fft_max_freq"],
"properties": {
"window_length": {
"type": "number",
"default": 1,
"description": "TODO",
"minimumForOptimizer": 0.25,
"maximumForOptimizer": 2,
"distribution": "uniform",
},
"window_step": {
"type": "number",
"default": 0.5,
"description": "TODO",
"minimumForOptimizer": 0.25,
"maximumForOptimizer": 1, # TODO: This is $data->window_length once $data is implemented
"distribution": "uniform",
},
"fft_min_freq": {
"type": "integer",
"default": 1,
"description": "TODO",
},
"fft_max_freq": {
"type": "integer",
"default": 24,
"description": "TODO",
"minimumForOptimizer": 2,
"maximumForOptimizer": 30, # TODO: This is $data->sampling_frequency/2 once $data is implemented
"distribution": "uniform",
},
"sampling_frequency": {
"type": "integer",
"default": 250,
"description": "TODO",
},
},
}
# TODO: Any constraints on hyper-parameter combinations?
],
}
_input_transform_schema = {
"description": "Input format for data passed to the transform method.",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"items": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
"description": "The input data to complete.",
},
"y": {
"type": "array",
"items": {"anyOf": [{"type": "integer"}, {"type": "string"}]},
},
},
}
_output_transform_schema = {
"description": "The input data to complete.",
"type": "array", # This is actually a tuple of X and y
"items": {"type": "array"},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
TimeFreqEigenVectors = lale.operators.make_operator(
_TimeFreqEigenVectorsImpl, _combined_schemas
)
lale.docstrings.set_docstrings(TimeFreqEigenVectors)
| 15,407 | 28.976654 | 139 |
py
|
lale
|
lale-master/lale/lib/lale/hyperopt.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import logging
import multiprocessing
import sys
import time
import traceback
import warnings
from typing import Any, Dict, Optional
import hyperopt
import numpy as np
import pandas as pd
from hyperopt.exceptions import AllTrialsFailed
from sklearn.metrics import check_scoring, log_loss
from sklearn.model_selection import check_cv, train_test_split
import lale.docstrings
import lale.helpers
import lale.operators
import lale.pretty_print
from lale.helpers import (
create_instance_from_hyperopt_search_space,
cross_val_score_track_trials,
)
from lale.lib._common_schemas import (
schema_best_score_single,
schema_cv,
schema_estimator,
schema_max_opt_time,
schema_scoring_single,
)
from lale.lib.sklearn import LogisticRegression
from lale.search.op2hp import hyperopt_search_space
from lale.search.PGO import PGO
SEED = 42
logger = logging.getLogger(__name__)
logger.setLevel(logging.ERROR)
class _HyperoptImpl:
def __init__(
self,
*,
estimator=None,
scoring=None,
best_score=0.0,
args_to_scorer=None,
cv=5,
handle_cv_failure=False,
verbose=False,
show_progressbar=True,
algo="tpe",
max_evals=50,
frac_evals_with_defaults=0,
max_opt_time=None,
max_eval_time=None,
pgo: Optional[PGO] = None,
):
self.max_evals = max_evals
if estimator is None:
self.estimator = LogisticRegression()
else:
self.estimator = estimator
if frac_evals_with_defaults > 0:
self.evals_with_defaults = int(frac_evals_with_defaults * max_evals)
else:
self.evals_with_defaults = 0
self.algo = algo
self.scoring = scoring
if self.scoring is None:
is_clf = self.estimator.is_classifier()
if is_clf:
self.scoring = "accuracy"
else:
self.scoring = "r2"
self.best_score = best_score
self.handle_cv_failure = handle_cv_failure
self.cv = cv
self._trials = hyperopt.Trials()
self._default_trials = hyperopt.Trials()
self.max_opt_time = max_opt_time
self.max_eval_time = max_eval_time
self.pgo = pgo
self.show_progressbar = show_progressbar
if args_to_scorer is not None:
self.args_to_scorer = args_to_scorer
else:
self.args_to_scorer = {}
self.verbose = verbose
def _summarize_statuses(self):
status_list = self._trials.statuses()
status_hist = {}
for status in status_list:
status_hist[status] = 1 + status_hist.get(status, 0)
if hyperopt.STATUS_FAIL in status_hist:
print(
f"{status_hist[hyperopt.STATUS_FAIL]} out of {len(status_list)} trials failed, call summary() for details."
)
if not self.verbose:
print("Run with verbose=True to see per-trial exceptions.")
def fit(self, X_train, y_train, X_valid=None, y_valid=None, **fit_params):
opt_start_time = time.time()
is_clf = self.estimator.is_classifier()
if X_valid is not None:
assert (
self.cv is None
), "cv should be None when using X_valid to pass validation dataset."
else:
self.cv = check_cv(self.cv, y=y_train, classifier=is_clf)
try:
data_schema = lale.helpers.fold_schema(X_train, y_train, self.cv, is_clf)
except (
BaseException
): # we may not always be able to extract schema for the given data format.
data_schema = None
self.search_space = hyperopt.hp.choice(
"meta_model",
[
hyperopt_search_space(
self.estimator, pgo=self.pgo, data_schema=data_schema
)
],
)
# Create a search space with default hyperparameters for all trainable parts of the pipeline.
# This search space is used for `frac_evals_with_defaults` fraction of the total trials.
try:
self.search_space_with_defaults = hyperopt.hp.choice(
"meta_model",
[
hyperopt_search_space(
self.estimator.freeze_trainable(),
pgo=self.pgo,
data_schema=data_schema,
)
],
)
except Exception:
logger.warning(
"Exception caught during generation of default search space, setting frac_evals_with_defaults to zero."
)
self.evals_with_defaults = 0
def merge_trials(trials1, trials2):
max_tid = max(trial["tid"] for trial in trials1.trials)
for trial in trials2:
tid = trial["tid"] + max_tid + 1
hyperopt_trial = hyperopt.Trials().new_trial_docs(
tids=[None], specs=[None], results=[None], miscs=[None]
)
hyperopt_trial[0] = trial
hyperopt_trial[0]["tid"] = tid
hyperopt_trial[0]["misc"]["tid"] = tid
for key in hyperopt_trial[0]["misc"]["idxs"].keys():
hyperopt_trial[0]["misc"]["idxs"][key] = [tid]
trials1.insert_trial_docs(hyperopt_trial)
trials1.refresh()
return trials1
def get_final_trained_estimator(params, X_train, y_train):
warnings.filterwarnings("ignore")
trainable = create_instance_from_hyperopt_search_space(
self.estimator, params
)
if trainable is None:
return None
else:
trained = trainable.fit(X_train, y_train, **fit_params)
return trained
def f(params):
current_time = time.time()
if (self.max_opt_time is not None) and (
(current_time - opt_start_time) > self.max_opt_time
):
# if max optimization time set, and we have crossed it, exit optimization completely
sys.exit(0)
if self.max_eval_time:
# Run hyperopt in a subprocess that can be interupted
manager = multiprocessing.Manager()
proc_dict: Dict[str, Any] = manager.dict() # type: ignore
p = multiprocessing.Process(
target=_proc_train_test,
args=(
self,
params,
X_train,
y_train,
X_valid,
y_valid,
fit_params,
proc_dict,
),
)
p.start()
p.join(self.max_eval_time)
if p.is_alive():
p.terminate()
p.join()
logger.warning(
f"Maximum alloted evaluation time exceeded. with hyperparams: {params}, setting status to FAIL"
)
proc_dict["status"] = hyperopt.STATUS_FAIL
if "status" not in proc_dict:
logger.warning("Corrupted results, setting status to FAIL")
proc_dict["status"] = hyperopt.STATUS_FAIL
else:
proc_dict = {}
_proc_train_test(
self,
params,
X_train,
y_train,
X_valid,
y_valid,
fit_params,
proc_dict,
)
return proc_dict
algo = getattr(hyperopt, self.algo)
# Search in the search space with defaults
if self.evals_with_defaults > 0:
try:
hyperopt.fmin(
f,
self.search_space_with_defaults,
algo=algo.suggest,
max_evals=self.evals_with_defaults,
trials=self._default_trials,
rstate=np.random.RandomState(SEED),
show_progressbar=self.show_progressbar,
)
except SystemExit:
logger.warning(
"Maximum alloted optimization time exceeded. Optimization exited prematurely"
)
except AllTrialsFailed as exc:
self._best_estimator = None
if hyperopt.STATUS_OK not in self._trials.statuses():
raise ValueError(
"Error from hyperopt, none of the trials succeeded."
) from exc
try:
hyperopt.fmin(
f,
self.search_space,
algo=algo.suggest,
max_evals=self.max_evals - self.evals_with_defaults,
trials=self._trials,
rstate=np.random.RandomState(SEED),
show_progressbar=self.show_progressbar,
)
except SystemExit:
logger.warning(
"Maximum alloted optimization time exceeded. Optimization exited prematurely"
)
except AllTrialsFailed as exc:
self._best_estimator = None
if hyperopt.STATUS_OK not in self._trials.statuses():
self._summarize_statuses()
raise ValueError(
"Error from hyperopt, none of the trials succeeded."
) from exc
self._trials = merge_trials(self._trials, self._default_trials)
if self.show_progressbar:
self._summarize_statuses()
try:
best_trial = self._trials.best_trial
val_loss = self._trials.best_trial["result"]["loss"]
if len(self._default_trials) > 0:
default_val_loss = self._default_trials.best_trial["result"]["loss"]
if default_val_loss < val_loss:
best_trial = self._default_trials.best_trial
best_params = best_trial["result"]["params"]
logger.info(
f"best score: {self.best_score - self._trials.average_best_error():.1%}\nbest hyperparams found using {self.max_evals} hyperopt trials: {best_params}"
)
trained = get_final_trained_estimator(best_params, X_train, y_train)
self._best_estimator = trained
except BaseException as e:
logger.warning(
f"Unable to extract the best parameters from optimization, the error: {e}"
)
self._best_estimator = None
return self
def predict(self, X_eval, **predict_params):
warnings.filterwarnings("ignore")
if self._best_estimator is None:
raise ValueError(
"Can not predict as the best estimator is None. Either an attempt to call `predict` "
"before calling `fit` or all the trials during `fit` failed."
)
trained = self._best_estimator
try:
predictions = trained.predict(X_eval, **predict_params)
except ValueError as e:
logger.warning(
f"ValueError in predicting using Hyperopt:{trained}, the error is:{e}"
)
predictions = None
return predictions
def summary(self):
"""Table summarizing the trial results (ID, loss, time, log_loss, status).
Returns
-------
result : DataFrame"""
def make_record(trial_dict):
return {
"name": f'p{trial_dict["tid"]}',
"tid": trial_dict["tid"],
"loss": trial_dict["result"].get("loss", float("nan")),
"time": trial_dict["result"].get("time", float("nan")),
"log_loss": trial_dict["result"].get("log_loss", float("nan")),
"status": trial_dict["result"]["status"],
}
records = [make_record(td) for td in self._trials.trials]
result = pd.DataFrame.from_records(records, index="name")
return result
def get_pipeline(
self,
pipeline_name: Optional[str] = None,
astype: lale.helpers.astype_type = "lale",
):
"""Retrieve one of the trials.
Parameters
----------
pipeline_name : union type, default None
- string
Key for table returned by summary(), return a trainable pipeline.
- None
When not specified, return the best trained pipeline found.
astype : 'lale' or 'sklearn', default 'lale'
Type of resulting pipeline.
Returns
-------
result : Trained operator if best, trainable operator otherwise."""
best_name = None
if self._best_estimator is not None:
best_name = f'p{self._trials.best_trial["tid"]}'
if pipeline_name is None:
pipeline_name = best_name
if pipeline_name == best_name:
result = getattr(self, "_best_estimator", None)
else:
assert pipeline_name is not None
tid = int(pipeline_name[1:])
params = self._trials.trials[tid]["result"]["params"]
result = create_instance_from_hyperopt_search_space(self.estimator, params)
if result is None or astype == "lale":
return result
assert astype == "sklearn", astype
return result.export_to_sklearn_pipeline()
def _hyperopt_train_test(
hyperopt_impl, params, X_train, y_train, X_valid, y_valid, fit_params
):
warnings.filterwarnings("ignore")
trainable = create_instance_from_hyperopt_search_space(
hyperopt_impl.estimator, params
)
if hyperopt_impl.cv is not None:
try:
cv_score, logloss, execution_time = cross_val_score_track_trials(
trainable,
X_train,
y_train,
cv=hyperopt_impl.cv,
scoring=hyperopt_impl.scoring,
args_to_scorer=hyperopt_impl.args_to_scorer,
**fit_params,
)
logger.debug(f"Successful trial of hyperopt with hyperparameters:{params}")
except BaseException as e:
# If there is any error in cross validation, use the score based on a random train-test split as the evaluation criterion
if hyperopt_impl.handle_cv_failure and trainable is not None:
(
X_train_part,
X_validation,
y_train_part,
y_validation,
) = train_test_split(X_train, y_train, test_size=0.20)
# remove cv params from fit_params
if "args_to_cv" in fit_params.keys():
del fit_params["args_to_cv"]
start = time.time()
trained = trainable.fit(X_train_part, y_train_part, **fit_params)
scorer = check_scoring(trainable, scoring=hyperopt_impl.scoring)
cv_score = scorer(
trained, X_validation, y_validation, **hyperopt_impl.args_to_scorer
)
execution_time = time.time() - start
y_pred_proba = trained.predict_proba(X_validation)
try:
logloss = log_loss(y_true=y_validation, y_pred=y_pred_proba)
except BaseException:
logloss = 0
logger.debug("Warning, log loss cannot be computed")
else:
logger.debug(e)
if trainable is None:
logger.debug(
f"Error {e} with uncreatable pipeline with parameters:{lale.pretty_print.hyperparams_to_string(params)}"
)
else:
logger.debug(f"Error {e} with pipeline:{trainable.to_json()}")
raise e
else:
assert X_valid is not None, "X_valid needs to be passed when cv is None."
# remove cv params from fit_params
if "args_to_cv" in fit_params.keys():
del fit_params["args_to_cv"]
start = time.time()
trained = trainable.fit(X_train, y_train, **fit_params)
scorer = check_scoring(trainable, scoring=hyperopt_impl.scoring)
cv_score = scorer(trained, X_valid, y_valid, **hyperopt_impl.args_to_scorer)
execution_time = time.time() - start
try:
y_pred_proba = trained.predict_proba(X_valid)
logloss = log_loss(y_true=y_valid, y_pred=y_pred_proba)
except BaseException:
logloss = 0
logger.debug("Warning, log loss cannot be computed")
return cv_score, logloss, execution_time
def _proc_train_test(
hyperopt_impl, params, X_train, y_train, X_valid, y_valid, fit_params, return_dict
):
return_dict["params"] = copy.deepcopy(params)
try:
score, logloss, execution_time = _hyperopt_train_test(
hyperopt_impl,
params,
X_train=X_train,
y_train=y_train,
X_valid=X_valid,
y_valid=y_valid,
fit_params=fit_params,
)
return_dict["loss"] = hyperopt_impl.best_score - score
return_dict["time"] = execution_time
return_dict["log_loss"] = logloss
return_dict["status"] = hyperopt.STATUS_OK
except BaseException as e:
exception_type = f"{type(e).__module__}.{type(e).__name__}"
try:
trainable = create_instance_from_hyperopt_search_space(
hyperopt_impl.estimator, params
)
if trainable is None:
trial_info = f"hyperparams: {params}"
else:
trial_info = (
f'pipeline: """{trainable.pretty_print(show_imports=False)}"""'
)
except BaseException:
trial_info = f"hyperparams: {params}"
error_msg = f"Exception caught in Hyperopt: {exception_type}, {traceback.format_exc()}with {trial_info}"
logger.warning(error_msg + ", setting status to FAIL")
return_dict["status"] = hyperopt.STATUS_FAIL
return_dict["error_msg"] = error_msg
if hyperopt_impl.verbose:
print(return_dict["error_msg"])
_hyperparams_schema = {
"allOf": [
{
"type": "object",
"required": [
"estimator",
"max_evals",
"cv",
"handle_cv_failure",
"max_opt_time",
"pgo",
"show_progressbar",
],
"relevantToOptimizer": ["estimator", "max_evals", "cv"],
"additionalProperties": False,
"properties": {
"estimator": schema_estimator,
"scoring": schema_scoring_single,
"best_score": schema_best_score_single,
"args_to_scorer": {
"anyOf": [
{"type": "object"}, # Python dictionary
{"enum": [None]},
],
"description": "A dictionary of additional keyword arguments to pass to the scorer. Used for cases where the scorer has a signature such as ``scorer(estimator, X, y, **kwargs)``.",
"default": None,
},
"cv": schema_cv,
"handle_cv_failure": {
"description": """How to deal with cross validation failure for a trial.
If True, continue the trial by doing a 80-20 percent train-validation
split of the dataset input to fit and report the score on the
validation part. If False, terminate the trial with FAIL status.""",
"type": "boolean",
"default": False,
},
"verbose": {
"description": """Whether to print errors from each of the trials if any.
This is also logged using logger.warning.""",
"type": "boolean",
"default": False,
},
"show_progressbar": {
"description": "Display progress bar during optimization.",
"type": "boolean",
"default": True,
},
"algo": {
"description": "Algorithm for searching the space.",
"anyOf": [
{
"enum": ["tpe"],
"description": "tree-structured Parzen estimator: https://proceedings.neurips.cc/paper/2011/hash/86e8f7ab32cfd12577bc2619bc635690-Abstract.html",
},
{"enum": ["atpe"], "description": "adaptive TPE"},
{"enum": ["rand"], "description": "random search"},
{
"enum": ["anneal"],
"description": "variant on random search that takes some advantage of a smooth response surface",
},
],
"default": "tpe",
},
"max_evals": {
"description": "Number of trials of Hyperopt search.",
"type": "integer",
"minimum": 1,
"default": 50,
},
"frac_evals_with_defaults": {
"description": """Sometimes, using default values of hyperparameters works quite well.
This value would allow a fraction of the trials to use default values. Hyperopt searches the entire search space
for (1-frac_evals_with_defaults) fraction of max_evals.""",
"type": "number",
"minimum": 0.0,
"default": 0,
},
"max_opt_time": schema_max_opt_time,
"max_eval_time": {
"description": "Maximum amout of time in seconds for each evaluation.",
"anyOf": [
{"type": "number", "minimum": 0.0},
{"description": "No runtime bound.", "enum": [None]},
],
"default": None,
},
"pgo": {
"anyOf": [{"description": "lale.search.PGO"}, {"enum": [None]}],
"default": None,
},
},
}
]
}
_input_fit_schema = {
"type": "object",
"required": ["X", "y"],
"properties": {"X": {}, "y": {}},
}
_input_predict_schema = {"type": "object", "required": ["X"], "properties": {"X": {}}}
_output_predict_schema: Dict[str, Any] = {}
_combined_schemas = {
"description": """Hyperopt_ is a popular open-source Bayesian optimizer.
.. _Hyperopt: https://github.com/hyperopt/hyperopt
Examples
--------
>>> from lale.lib.sklearn import LogisticRegression as LR
>>> clf = Hyperopt(estimator=LR, cv=3, max_evals=5)
>>> from sklearn import datasets
>>> diabetes = datasets.load_diabetes()
>>> X = diabetes.data[:150]
>>> y = diabetes.target[:150]
>>> trained = clf.fit(X, y)
>>> predictions = trained.predict(X)
Other scoring metrics:
>>> from sklearn.metrics import make_scorer, f1_score
>>> clf = Hyperopt(estimator=LR,
... scoring=make_scorer(f1_score, average='macro'), cv=3, max_evals=5)
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.lale.hyperopt.html",
"import_from": "lale.lib.lale",
"type": "object",
"tags": {"pre": [], "op": ["estimator"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
Hyperopt = lale.operators.make_operator(_HyperoptImpl, _combined_schemas)
lale.docstrings.set_docstrings(Hyperopt)
| 24,787 | 37.431008 | 200 |
py
|
lale
|
lale-master/lale/lib/lale/optimize_suffix.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import Any, Dict, Optional
import pandas as pd
import lale.docstrings
import lale.helpers
import lale.operators
import lale.pretty_print
from lale.lib.lale.hyperopt import Hyperopt
from lale.lib.sklearn import LogisticRegression
logger = logging.getLogger(__name__)
logger.setLevel(logging.ERROR)
class _OptimizeSuffix:
_prefix: Optional[lale.operators.TrainedOperator]
_optimizer: lale.operators.Operator
def __init__(
self,
prefix: Optional[lale.operators.TrainedOperator] = None,
suffix: Optional[lale.operators.Operator] = None,
optimizer: Optional[lale.operators.PlannedIndividualOp] = None,
optimizer_args=None,
**kwargs
):
self._prefix = prefix
_suffix: lale.operators.Operator
if suffix is None:
_suffix = LogisticRegression()
else:
_suffix = suffix
_optimizer: lale.operators.PlannedIndividualOp
if optimizer is None:
_optimizer = Hyperopt
else:
_optimizer = optimizer
if optimizer_args is None:
_optimizer_args = kwargs
else:
_optimizer_args = {**optimizer_args, **kwargs}
self._optimizer = _optimizer(estimator=_suffix, **_optimizer_args)
def fit(self, X_train, y_train=None, **kwargs):
# Transform the input data using transformation steps in pipeline
if self._prefix:
X_train_transformed = self._prefix.transform(X_train)
if isinstance(X_train, pd.DataFrame):
X_train_transformed = pd.DataFrame(
data=X_train_transformed, index=X_train.index
)
else:
X_train_transformed = X_train
self._optimizer = self._optimizer.fit(X_train_transformed, y_train, **kwargs)
return self
def add_suffix(
self, suffix: lale.operators.TrainedOperator
) -> lale.operators.TrainedOperator:
"""Given a trained suffix, adds it to the prefix to give a trained pipeline"""
trained: lale.operators.TrainedOperator
if self._prefix is None:
trained = suffix
else:
trained = self._prefix >> suffix
assert isinstance(trained, lale.operators.TrainedOperator)
return trained
def predict(self, X_eval, **predict_params):
if self._prefix is None:
X_input = X_eval
else:
X_input = self._prefix.transform(X_eval)
return self._optimizer.predict(X_input, **predict_params)
def summary(self, **kwargs):
return self._optimizer.summary(**kwargs)
def get_pipeline(
self,
pipeline_name: Optional[str] = None,
astype: lale.helpers.astype_type = "lale",
**kwargs
):
"""Retrieve one of the trials.
Parameters
----------
pipeline_name : union type, default None
- string
Key for table returned by summary(), return a trainable pipeline.
- None
When not specified, return the best trained pipeline found.
astype : 'lale' or 'sklearn', default 'lale'
Type of resulting pipeline.
kwargs :
additional arguments to pass to the underlying optimizer
Returns
-------
result : Trained operator if best, trainable operator otherwise."""
result = self.add_suffix(
self._optimizer.get_pipeline(
pipeline_name=pipeline_name, astype=astype, **kwargs
)
)
if result is None or astype == "lale":
return result
assert astype == "sklearn", astype
return result.export_to_sklearn_pipeline()
_hyperparams_schema = {
"allOf": [
{
"type": "object",
"required": [
"prefix",
"suffix",
"optimizer",
],
"relevantToOptimizer": [],
"additionalProperties": True,
"properties": {
"prefix": {
"description": "Trained Lale operator or pipeline,\nby default None.",
"anyOf": [
{"laleType": "operator", "not": {"enum": [None]}},
{"enum": [None]},
],
"default": None,
},
"suffix": {
"description": "Lale operator or pipeline, which is to be optimized.\nIf (default) None is specified, LogisticRegression is used.",
"anyOf": [
{"laleType": "operator", "not": {"enum": [None]}},
{"enum": [None]},
],
"default": None,
},
"optimizer": {
"description": "Lale optimizer.\nIf (default) None is specified, Hyperopt is used.",
"anyOf": [
{"laleType": "operator", "not": {"enum": [None]}},
{"enum": [None]},
],
"default": None,
},
"optimizer_args": {
"description": "Parameters to be passed to the optimizer",
"anyOf": [
{"type": "object"},
{"enum": [None]},
],
"default": None,
},
},
}
]
}
_input_fit_schema = {
"type": "object",
"required": ["X", "y"],
"properties": {"X": {}, "y": {}},
}
_input_predict_schema = {"type": "object", "required": ["X"], "properties": {"X": {}}}
_output_predict_schema: Dict[str, Any] = {}
_combined_schemas = {
"description": """OptimizeSuffix is a wrapper around other optimizers, which runs the given optimizer
against the suffix, after transforming the data according to the prefix, and then stitches the result together into
a single trained pipeline.
Examples
--------
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.lale.optimize_suffix.html",
"import_from": "lale.lib.lale",
"type": "object",
"tags": {"pre": [], "op": ["estimator"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
OptimizeSuffix = lale.operators.make_operator(_OptimizeSuffix, _combined_schemas)
lale.docstrings.set_docstrings(OptimizeSuffix)
| 7,229 | 31.421525 | 151 |
py
|
lale
|
lale-master/lale/lib/aif360/_mystic_util.py
|
from typing import Dict, Set
import numpy as np
from mystic.coupler import and_
from mystic.penalty import quadratic_equality
from mystic.solvers import diffev2
def parse_solver_soln(n_flat, group_mapping):
sorted_osize_keys = sorted(group_mapping.keys())
mapped_nsize_tups = list(zip(sorted_osize_keys, n_flat))
mapped_nsize_dict = dict(mapped_nsize_tups)
nsizes = {g2: int(mapped_nsize_dict[g1]) for g1, g2 in group_mapping.items()}
return nsizes
def _calculate_di_ratios(
sizes: Dict[str, int], favorable_labels: Set[int], symmetric=False
):
group_mapping = {k: k for k in sizes.keys()}
di = []
num_prot_attr = len(list(group_mapping.keys())[0]) - 1
for pa in range(num_prot_attr):
disadv_grp = [x for x in group_mapping.keys() if x[pa] == "0"]
adv_grp = [x for x in group_mapping.keys() if x[pa] == "1"]
disadv_grp_adv_cls = [x for x in disadv_grp if int(x[-1]) in favorable_labels]
disadv_grp_adv_cls_ct = sum(sizes[x] for x in disadv_grp_adv_cls)
disadv_grp_disadv_cls = [
x for x in disadv_grp if int(x[-1]) not in favorable_labels
]
disadv_grp_disadv_cls_ct = sum(sizes[x] for x in disadv_grp_disadv_cls)
adv_grp_disadv_cls = [x for x in adv_grp if int(x[-1]) not in favorable_labels]
adv_grp_disadv_cls_ct = sum(sizes[x] for x in adv_grp_disadv_cls)
adv_grp_adv_cls = [x for x in adv_grp if int(x[-1]) in favorable_labels]
adv_grp_adv_cls_ct = sum(sizes[x] for x in adv_grp_adv_cls)
calc_di = (
(disadv_grp_adv_cls_ct) / (disadv_grp_adv_cls_ct + disadv_grp_disadv_cls_ct)
) / ((adv_grp_adv_cls_ct) / (adv_grp_adv_cls_ct + adv_grp_disadv_cls_ct))
if calc_di > 1 and symmetric:
calc_di = 1 / calc_di
di.append(calc_di)
return di
def _get_sorted_class_counts(sizes: Dict[str, int]):
# get class counts
class_count_dict = {}
for k, v in sizes.items():
c = k[-1]
if c not in class_count_dict:
class_count_dict[c] = 0
class_count_dict[c] += v
sorted_by_count = sorted(class_count_dict.items(), key=lambda x: x[1])
return sorted_by_count
def _calculate_ci_ratios(sizes: Dict[str, int]):
# sorting by class count ensures that ci ratios will be <= 1
sorted_by_count = _get_sorted_class_counts(sizes)
ci = []
for i in range(len(sorted_by_count) - 1):
ci.append(sorted_by_count[i][1] / sorted_by_count[i + 1][1])
return ci
def obtain_solver_info(
osizes: Dict[str, int],
imbalance_repair_level: float,
bias_repair_level: float,
favorable_labels: Set[int],
):
oci = _calculate_ci_ratios(osizes)
sorted_by_count = _get_sorted_class_counts(osizes)
# if any class reordering has happened, update the group mapping and favorable_labels (for di calculations) accordingly
class_mapping = {old: new for new, (old, _) in enumerate(sorted_by_count)}
group_mapping = {k: k for k in osizes.keys()}
for old, new in class_mapping.items():
if int(old) in favorable_labels:
favorable_labels.remove(int(old))
favorable_labels.add(int(new))
old_groups = [x for x in group_mapping.keys() if x[-1] == old]
for g in old_groups:
group_mapping[g] = group_mapping[g][:-1] + str(new)
mapped_osizes = {k1: osizes[k2] for k1, k2 in group_mapping.items()}
# calculate di ratios and invert if needed
num_prot_attr = len(list(group_mapping.keys())[0]) - 1
odi = _calculate_di_ratios(mapped_osizes, favorable_labels)
for pa in range(num_prot_attr):
calc_di = odi[pa]
if calc_di > 1:
odi[pa] = 1 / calc_di
disadv_grp = [x for x in group_mapping.keys() if x[pa] == "0"]
adv_grp = [x for x in group_mapping.keys() if x[pa] == "1"]
for g in disadv_grp:
group_mapping[g] = (
group_mapping[g][0:pa] + "1" + group_mapping[g][pa + 1 :]
)
for g in adv_grp:
group_mapping[g] = (
group_mapping[g][0:pa] + "0" + group_mapping[g][pa + 1 :]
)
# recompute mapping based on any flipping of protected attribute values
mapped_osizes = {k1: osizes[k2] for k1, k2 in group_mapping.items()}
sorted_osizes = [x[1] for x in sorted(mapped_osizes.items(), key=lambda x: x[0])]
# construct variables for solver
o_flat = np.array(sorted_osizes)
oci_vec = np.array(oci).reshape(-1, 1)
nci_vec = oci_vec + imbalance_repair_level * (1 - oci_vec)
odi_vec = np.array(odi).reshape(-1, 1)
ndi_vec = odi_vec + bias_repair_level * (1 - odi_vec)
return group_mapping, o_flat, nci_vec, ndi_vec
def construct_ci_penalty(A, C, n_ci, i):
def condition(x):
reshape_list = []
for _ in range(A):
reshape_list.append(2)
reshape_list.append(C)
ndx = np.array(x).reshape(reshape_list)
return (
np.sum(ndx[(slice(None),) * A + (i,)])
/ np.sum(ndx[(slice(None),) * A + (i + 1,)])
) - n_ci[i, 0]
@quadratic_equality(condition, k=1e6, h=10)
def penalty(x):
return 0
return penalty
def create_ci_penalties(n_ci, n_di):
C = n_ci.shape[0] + 1
A = n_di.shape[0]
ci_penalties = []
# specify C-1 class imbalance constraints as heavy penalties
for i in range(C - 1):
penalty = construct_ci_penalty(A, C, n_ci, i)
ci_penalties.append(penalty)
return ci_penalties
def construct_di_penalty(A, C, n_di, F, i):
def condition(x):
reshape_list = []
for _ in range(A):
reshape_list.append(2)
reshape_list.append(C)
ndx = np.array(x).reshape(reshape_list)
di_ratio_top = np.sum(
ndx[(slice(None),) * i + (0,) + (slice(None),) * (A - i - 1) + (tuple(F),)]
) / np.sum(
ndx[
(slice(None),) * i
+ (0,)
+ (slice(None),) * (A - i - 1)
+ (slice(None),)
]
)
di_ratio_bottom = np.sum(
ndx[(slice(None),) * i + (1,) + (slice(None),) * (A - i - 1) + (tuple(F),)]
) / np.sum(
ndx[
(slice(None),) * i
+ (1,)
+ (slice(None),) * (A - i - 1)
+ (slice(None),)
]
)
return (di_ratio_top / di_ratio_bottom) - n_di[i, 0]
@quadratic_equality(condition, k=1e6, h=10)
def penalty(x):
return 0
return penalty
def create_di_penalties(n_ci, n_di, F):
C = n_ci.shape[0] + 1
A = n_di.shape[0]
di_penalties = []
# specify A disparate imapct ratio constraints as heavy penalties
for i in range(A):
penalty = construct_di_penalty(A, C, n_di, F, i)
di_penalties.append(penalty)
return di_penalties
def calc_oversample_soln(o_flat, F, n_ci, n_di):
# integer constraint
ints = np.round
# minimize sum of new number of examples
def cost(x):
return np.sum(x)
# specify observed example counts as lower bounds and maximum observed observed count as upper bounds
bounds = [(x, max(o_flat)) for x in o_flat]
# combine all penalties
ci_penalties = create_ci_penalties(n_ci, n_di)
di_penalties = create_di_penalties(n_ci, n_di, F)
all_penalties = and_(*ci_penalties, *di_penalties)
# integer constraint
constraint = ints
# pass to solver
result = diffev2(
cost,
x0=o_flat,
bounds=bounds,
constraints=constraint,
penalty=all_penalties,
full_output=False,
disp=False,
npop=50,
gtol=100,
)
return result
def calc_undersample_soln(o_flat, F, n_ci, n_di):
# integer constraint
ints = np.round
# minimize negative sum of new number of examples (equivalent to maximizing positive sum)
def cost(x):
return -np.sum(x)
# specify observed example counts as upper bounds and minimum observed count as lower bounds
bounds = [(min(o_flat), x) for x in o_flat]
# combine all penalties
ci_penalties = create_ci_penalties(n_ci, n_di)
di_penalties = create_di_penalties(n_ci, n_di, F)
all_penalties = and_(*ci_penalties, *di_penalties)
# integer constraint
constraint = ints
# pass to solver
result = diffev2(
cost,
x0=o_flat,
bounds=bounds,
constraints=constraint,
penalty=all_penalties,
full_output=False,
disp=False,
npop=50,
gtol=100,
)
return result
def calc_mixedsample_soln(o_flat, F, n_ci, n_di):
# integer constraint
ints = np.round
# minimize sum of absolute value of differences from original numbers of examples
def cost(x):
return np.sum(np.abs(x - o_flat))
# specify minimum and maximum observed counts as lower bounds and upper bounds
bounds = [(min(o_flat), max(o_flat)) for _ in o_flat]
# combine all penalties
ci_penalties = create_ci_penalties(n_ci, n_di)
di_penalties = create_di_penalties(n_ci, n_di, F)
all_penalties = and_(*ci_penalties, *di_penalties)
# integer constraint
constraint = ints
# pass to solver
result = diffev2(
cost,
x0=o_flat,
bounds=bounds,
constraints=constraint,
penalty=all_penalties,
full_output=False,
disp=False,
npop=50,
gtol=100,
)
return result
| 9,559 | 30.551155 | 123 |
py
|
lale
|
lale-master/lale/lib/aif360/bagging_orbis_classifier.py
|
# Copyright 2023 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
import pandas as pd
import sklearn.preprocessing
import lale.docstrings
import lale.lib.sklearn
import lale.operators
from lale.lib.imblearn._common_schemas import _hparam_n_jobs, _hparam_random_state
from .orbis import Orbis
from .orbis import _hyperparams_schema as orbis_hyperparams_schema
from .util import (
_categorical_fairness_properties,
_categorical_input_predict_proba_schema,
_categorical_input_predict_schema,
_categorical_output_predict_proba_schema,
_categorical_output_predict_schema,
_categorical_supervised_input_fit_schema,
_validate_fairness_info,
)
def _orbis_schema(hparam):
return orbis_hyperparams_schema["allOf"][0]["properties"][hparam]
class _BaggingOrbisClassifierImpl:
def __init__(
self,
*,
favorable_labels,
protected_attributes,
unfavorable_labels=None,
redact=True,
preparation=None,
estimator=None,
n_estimators=10,
imbalance_repair_level=0.8,
bias_repair_level=0.8,
combine="keep_separate",
sampling_strategy="mixed",
replacement=False,
n_jobs=None,
random_state=None,
):
assert unfavorable_labels is None, "not yet implemented"
self.fairness_info = {
"favorable_labels": favorable_labels,
"protected_attributes": protected_attributes,
"unfavorable_labels": unfavorable_labels,
}
_validate_fairness_info(**self.fairness_info, check_schema=False)
self.redact = redact
self.preparation = preparation
self.estimator = estimator
self.n_estimators = n_estimators
self.imbalance_repair_level = imbalance_repair_level
self.bias_repair_level = bias_repair_level
self.combine = combine
self.sampling_strategy = sampling_strategy
self.sampler_hparams = {
"replacement": replacement,
"n_jobs": n_jobs,
"random_state": random_state,
}
def fit(self, X, y):
assert isinstance(X, pd.DataFrame), "not yet implemented"
# preemptively encode labels before BaggingClassifier does so
self.lab_enc = sklearn.preprocessing.LabelEncoder().fit(y)
fav_labels = list(
self.lab_enc.transform(self.fairness_info["favorable_labels"])
)
if self.estimator is None:
final_est = lale.lib.sklearn.DecisionTreeClassifier()
else:
final_est = self.estimator
if self.preparation is None:
prep_and_est = final_est
else:
prep_and_est = self.preparation >> final_est
orbis = Orbis(
favorable_labels=fav_labels,
protected_attributes=self.fairness_info["protected_attributes"],
estimator=prep_and_est,
redact=self.redact,
imbalance_repair_level=self.imbalance_repair_level,
bias_repair_level=self.bias_repair_level,
combine=self.combine,
sampling_strategy=self.sampling_strategy,
**self.sampler_hparams,
)
def _repair_dtypes(inner_X): # for some reason BaggingClassifier spoils dtypes
d = {
col: pd.Series(inner_X[col], index=inner_X.index, dtype=typ, name=col)
for col, typ in X.dtypes.items()
}
return pd.DataFrame(d)
repair_dtypes = lale.lib.sklearn.FunctionTransformer(func=_repair_dtypes)
trainable_ensemble = lale.lib.sklearn.BaggingClassifier(
base_estimator=repair_dtypes >> orbis,
n_estimators=self.n_estimators,
n_jobs=self.sampler_hparams["n_jobs"],
random_state=self.sampler_hparams["random_state"],
)
encoded_y = pd.Series(self.lab_enc.transform(y), index=y.index)
self.trained_ensemble = trainable_ensemble.fit(X, encoded_y)
return self
def predict(self, X, **predict_params):
with warnings.catch_warnings():
# Bagging calls predict_proba on the trainable instead of the result of fit
warnings.simplefilter("ignore", category=DeprecationWarning)
encoded_y = self.trained_ensemble.predict(X, **predict_params)
return self.lab_enc.inverse_transform(encoded_y)
def predict_proba(self, X, **predict_params):
with warnings.catch_warnings():
# Bagging calls predict_proba on the trainable instead of the result of fit
warnings.simplefilter("ignore", category=DeprecationWarning)
result = self.trained_ensemble.predict_proba(X, **predict_params)
return result
_hyperparams_schema = {
"description": "Hyperparameter schema.",
"allOf": [
{
"type": "object",
"additionalProperties": False,
"required": [*_categorical_fairness_properties.keys()],
"relevantToOptimizer": [
"n_estimators",
"imbalance_repair_level",
"bias_repair_level",
],
"properties": {
**_categorical_fairness_properties,
"redact": {
"description": "Whether to redact protected attributes before data preparation (recommended) or not.",
"type": "boolean",
"default": True,
},
"preparation": {
"description": "Transformer, which may be an individual operator or a sub-pipeline.",
"anyOf": [
{"laleType": "operator"},
{"description": "NoOp", "enum": [None]},
],
"default": None,
},
"estimator": {
"description": "The nested classifier to fit on balanced subsets of the data.",
"anyOf": [
{"laleType": "operator"},
{"enum": [None], "description": "DecisionTreeClassifier"},
],
"default": None,
},
"n_estimators": {
"description": "The number of base estimators in the ensemble.",
"type": "integer",
"minimumForOptimizer": 10,
"maximumForOptimizer": 100,
"distribution": "uniform",
"default": 10,
},
"imbalance_repair_level": _orbis_schema("imbalance_repair_level"),
"bias_repair_level": _orbis_schema("bias_repair_level"),
"combine": _orbis_schema("combine"),
"sampling_strategy": _orbis_schema("sampling_strategy"),
"replacement": {
"description": "Whether under-sampling is with or without replacement.",
"type": "boolean",
"default": False,
},
"n_jobs": _hparam_n_jobs,
"random_state": _hparam_random_state,
},
},
{
"description": "When sampling_strategy is minimum or maximum, both repair levels must be 1.",
"anyOf": [
{
"type": "object",
"properties": {
"sampling_strategy": {"not": {"enum": ["minimum", "maximum"]}}
},
},
{
"type": "object",
"properties": {
"imbalance_repair_level": {"enum": [1]},
"bias_repair_level": {"enum": [1]},
},
},
],
},
],
}
_combined_schemas = {
"description": """Experimental BaggingOrbisClassifier in-estimator fairness mitigator.
Work in progress and subject to change; only supports pandas DataFrame so far.
Bagging ensemble classifier, where each inner classifier gets trained
on a subset of the data that has been balanced with `Orbis`_.
Unlike other mitigators in `lale.lib.aif360`, this mitigator does not
come from AIF360.
.. _`Orbis`: https://lale.readthedocs.io/en/latest/modules/lale.lib.aif360.orbis.html#lale.lib.aif360.orbis.Orbis
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.aif360.bagging_orbis_classifier.html#lale.lib.aif360.bagging_orbis_classifier.BaggingOrbisClassifier",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "classifier"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _categorical_supervised_input_fit_schema,
"input_predict": _categorical_input_predict_schema,
"output_predict": _categorical_output_predict_schema,
"input_predict_proba": _categorical_input_predict_proba_schema,
"output_predict_proba": _categorical_output_predict_proba_schema,
},
}
BaggingOrbisClassifier = lale.operators.make_operator(
_BaggingOrbisClassifierImpl, _combined_schemas
)
lale.docstrings.set_docstrings(BaggingOrbisClassifier)
| 9,739 | 38.274194 | 183 |
py
|
lale
|
lale-master/lale/lib/aif360/redacting.py
|
# Copyright 2020, 2021 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
import lale.docstrings
import lale.operators
from .util import (
_categorical_fairness_properties,
_categorical_input_transform_schema,
_categorical_output_transform_schema,
_categorical_unsupervised_input_fit_schema,
)
def _redaction_value(column_values):
unique_values, unique_counts = np.unique(column_values, return_counts=True)
most_frequent_index = np.argmax(unique_counts)
most_frequent_value = unique_values[most_frequent_index]
return most_frequent_value
class _RedactingImpl:
def __init__(
self, *, favorable_labels, protected_attributes, unfavorable_labels=None
):
self.prot_attr_names = [pa["feature"] for pa in protected_attributes]
def fit(self, X, y=None):
if isinstance(X, pd.DataFrame):
self.redaction_values = {
pa: _redaction_value(X[pa]) for pa in self.prot_attr_names
}
elif isinstance(X, np.ndarray):
self.redaction_values = {
pa: _redaction_value(X[:, pa]) for pa in self.prot_attr_names
}
else:
raise TypeError(f"unexpected type {type(X)}")
return self
def transform(self, X):
if isinstance(X, pd.DataFrame):
def get_reduction_values(name):
return lambda val: self.redaction_values[name]
new_columns = [
(
X[name].map(get_reduction_values(name))
if name in self.redaction_values
else X[name]
)
for name in X.columns
]
result = pd.concat(new_columns, axis=1)
elif isinstance(X, np.ndarray):
result = X.copy()
for column, value in self.redaction_values.items():
result[:, column].fill(value)
else:
raise TypeError(f"unexpected type {type(X)}")
return result
def transform_schema(self, s_X):
"""Used internally by Lale for type-checking downstream operators."""
return s_X
_input_fit_schema = _categorical_unsupervised_input_fit_schema
_input_transform_schema = _categorical_input_transform_schema
_output_transform_schema = _categorical_output_transform_schema
_hyperparams_schema = {
"description": "Hyperparameter schema.",
"allOf": [
{
"type": "object",
"additionalProperties": False,
"required": [
"favorable_labels",
"protected_attributes",
"unfavorable_labels",
],
"relevantToOptimizer": [],
"properties": {
"favorable_labels": {
"description": "Ignored.",
"laleType": "Any",
},
"protected_attributes": _categorical_fairness_properties[
"protected_attributes"
],
"unfavorable_labels": {
"description": "Ignored.",
"laleType": "Any",
},
},
}
],
}
_combined_schemas = {
"description": """Redacting preprocessor for fairness mitigation.
This sets all the protected attributes to constants,
using the most frequent value in the column.
This operator is used internally by various lale.lib.aif360 metrics
and mitigators, so you often do not need to use it directly yourself.
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.aif360.redacting.html#lale.lib.aif360.redacting.Redacting",
"import_from": "lale.lib.aif360",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
Redacting = lale.operators.make_operator(_RedactingImpl, _combined_schemas)
lale.docstrings.set_docstrings(Redacting)
| 4,665 | 32.328571 | 140 |
py
|
lale
|
lale-master/lale/lib/aif360/gerry_fair_classifier.py
|
# Copyright 2020-2023 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import aif360.algorithms.inprocessing
import sklearn.linear_model
import lale.docstrings
import lale.operators
from .util import (
_BaseInEstimatorImpl,
_categorical_fairness_properties,
_categorical_input_predict_proba_schema,
_categorical_input_predict_schema,
_categorical_output_predict_proba_schema,
_categorical_output_predict_schema,
_categorical_supervised_input_fit_schema,
)
class _GerryFairClassifierImpl(_BaseInEstimatorImpl):
def __init__(
self,
*,
favorable_labels,
protected_attributes,
unfavorable_labels=None,
redact=True,
preparation=None,
**hyperparams,
):
predictor = hyperparams.get("predictor", None)
if predictor is None:
predictor = sklearn.linear_model.LinearRegression()
if isinstance(predictor, lale.operators.Operator):
if isinstance(predictor, lale.operators.IndividualOp):
predictor = predictor._impl_instance()._wrapped_model
else:
raise ValueError(
"If predictor is a Lale operator, it needs to be an individual operator."
)
hyperparams["predictor"] = predictor
mitigator = aif360.algorithms.inprocessing.GerryFairClassifier(**hyperparams)
super().__init__(
favorable_labels=favorable_labels,
protected_attributes=protected_attributes,
unfavorable_labels=unfavorable_labels,
redact=redact,
preparation=preparation,
mitigator=mitigator,
)
_input_fit_schema = _categorical_supervised_input_fit_schema
_input_predict_schema = _categorical_input_predict_schema
_output_predict_schema = _categorical_output_predict_schema
_input_predict_proba_schema = _categorical_input_predict_proba_schema
_output_predict_proba_schema = _categorical_output_predict_proba_schema
_hyperparams_schema = {
"description": "Hyperparameter schema.",
"allOf": [
{
"description": "This first sub-object lists all constructor arguments with their types, one at a time, omitting cross-argument constraints.",
"type": "object",
"additionalProperties": False,
"required": [
*_categorical_fairness_properties.keys(),
"redact",
"preparation",
"C",
"printflag",
"heatmapflag",
"heatmap_iter",
"heatmap_path",
"max_iters",
"gamma",
"fairness_def",
"predictor",
],
"relevantToOptimizer": ["C", "max_iters", "gamma", "fairness_def"],
"properties": {
**_categorical_fairness_properties,
"redact": {
"description": "Whether to redact protected attributes before data preparation (recommended) or not.",
"type": "boolean",
"default": True,
},
"preparation": {
"description": "Transformer, which may be an individual operator or a sub-pipeline.",
"anyOf": [
{"laleType": "operator"},
{"description": "lale.lib.lale.NoOp", "enum": [None]},
],
"default": None,
},
"C": {
"description": "Maximum L1 norm for the dual variables.",
"type": "number",
"default": 10,
"minimumForOptimizer": 0.03125,
"maximumForOptimizer": 32768,
},
"printflag": {
"description": "Print output flag.",
"type": "boolean",
"default": False,
},
"heatmapflag": {
"description": "Save heatmaps every heatmap_iter flag.",
"type": "boolean",
"default": False,
},
"heatmap_iter": {
"description": "Save heatmaps every heatmap_iter.",
"type": "integer",
"minimum": 1,
"default": 10,
},
"heatmap_path": {
"description": "Save heatmaps path.",
"type": "string",
"default": ".",
},
"max_iters": {
"description": "Time horizon for the fictitious play dynamic.",
"type": "integer",
"minimum": 1,
"default": 10,
"distribution": "loguniform",
"maximumForOptimizer": 1000,
},
"gamma": {
"description": "Fairness approximation parameter.",
"type": "number",
"minimum": 0.0,
"exclusiveMinimum": True,
"default": 0.01,
"distribution": "loguniform",
"minimumForOptimizer": 0.001,
"maximumForOptimizer": 1.0,
},
"fairness_def": {
"description": "Fairness notion.",
"enum": ["FP", "FN"],
"default": "FP",
},
"predictor": {
"description": "Hypothesis class for the learner.",
"anyOf": [
{
"description": "Supports LR, SVM, KR, Trees.",
"laleType": "operator",
},
{
"description": "sklearn.linear_model.LinearRegression",
"enum": [None],
},
],
"default": None,
},
},
},
],
}
_combined_schemas = {
"description": """`GerryFairClassifier`_ in-estimator fairness mitigator. Attempts to learn classifiers that are fair with respect to rich subgroups (`Kearns et al. 2018`_, `Kearns et al. 2019`_). Rich subgroups are defined by (linear) functions over the sensitive attributes, and fairness notions are statistical: false positive, false negative, and statistical parity rates. This implementation uses a max of two regressions as a cost-sensitive classification oracle, and supports linear regression, support vector machines, decision trees, and kernel regression.
.. _`GerryFairClassifier`: https://aif360.readthedocs.io/en/latest/modules/generated/aif360.algorithms.inprocessing.GerryFairClassifier.html
.. _`Kearns et al. 2018`: http://proceedings.mlr.press/v80/kearns18a.html
.. _`Kearns et al. 2019`: https://doi.org/10.1145/3287560.3287592
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.aif360.gerry_fair_classifier.html#lale.lib.aif360.gerry_fair_classifier.GerryFairClassifier",
"import_from": "aif360.sklearn.inprocessing",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "classifier"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
"input_predict_proba": _input_predict_proba_schema,
"output_predict_proba": _output_predict_proba_schema,
},
}
GerryFairClassifier = lale.operators.make_operator(
_GerryFairClassifierImpl, _combined_schemas
)
lale.docstrings.set_docstrings(GerryFairClassifier)
| 8,383 | 40.098039 | 569 |
py
|
lale
|
lale-master/lale/lib/aif360/adversarial_debiasing.py
|
# Copyright 2020-2023 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import io
import os
import uuid
import packaging.version
import lale.docstrings
import lale.operators
# suppress spurious warnings from TensorFlow that are caused by
# indirectly importing it via aif360.algorithms.inprocessing
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
import aif360.algorithms.inprocessing # noqa:E402 # pylint:disable=wrong-import-position,wrong-import-order
from .util import ( # noqa:E402 # pylint:disable=wrong-import-position,wrong-import-order
_BaseInEstimatorImpl,
_categorical_fairness_properties,
_categorical_input_predict_proba_schema,
_categorical_input_predict_schema,
_categorical_output_predict_proba_schema,
_categorical_output_predict_schema,
_categorical_supervised_input_fit_schema,
)
try:
import tensorflow as tf
tensorflow_installed = True
except ImportError:
tensorflow_installed = False
class _AdversarialDebiasingImpl(_BaseInEstimatorImpl):
def __init__( # pylint:disable=super-init-not-called
self,
*,
favorable_labels,
protected_attributes,
unfavorable_labels=None,
redact=True,
preparation=None,
scope_name="adversarial_debiasing",
verbose=0,
**hyperparams,
):
assert tensorflow_installed, """Your Python environment does not have tensorflow installed. You can install it with
pip install tensorflow
or with
pip install 'lale[full]'"""
tf_version = packaging.version.parse(getattr(tf, "__version__"))
assert packaging.version.Version("1.13.1") <= tf_version, tf_version
self.scope_name = scope_name
self.protected_attributes = protected_attributes
self.favorable_labels = favorable_labels
self.unfavorable_labels = unfavorable_labels
self.redact = redact
self.preparation = preparation
self.verbose = verbose
self.hyperparams = hyperparams
def fit(self, X, y=None):
tf.compat.v1.disable_eager_execution()
tf.compat.v1.reset_default_graph()
if self.hyperparams.get("sess", None) is None:
self.hyperparams["sess"] = tf.compat.v1.Session()
prot_attr_names = [pa["feature"] for pa in self.protected_attributes]
unprivileged_groups = [{name: 0 for name in prot_attr_names}]
privileged_groups = [{name: 1 for name in prot_attr_names}]
mitigator = aif360.algorithms.inprocessing.AdversarialDebiasing(
unprivileged_groups=unprivileged_groups,
privileged_groups=privileged_groups,
scope_name=self.scope_name + str(uuid.uuid4()),
**self.hyperparams,
)
super().__init__(
favorable_labels=self.favorable_labels,
protected_attributes=self.protected_attributes,
unfavorable_labels=self.unfavorable_labels,
redact=self.redact,
preparation=self.preparation,
mitigator=mitigator,
)
if self.verbose == 0:
with contextlib.redirect_stdout(io.StringIO()):
super().fit(X, y)
else:
super().fit(X, y)
return self
_input_fit_schema = _categorical_supervised_input_fit_schema
_input_predict_schema = _categorical_input_predict_schema
_output_predict_schema = _categorical_output_predict_schema
_input_predict_proba_schema = _categorical_input_predict_proba_schema
_output_predict_proba_schema = _categorical_output_predict_proba_schema
_hyperparams_schema = {
"description": "Hyperparameter schema.",
"allOf": [
{
"description": "This first sub-object lists all constructor arguments with their types, one at a time, omitting cross-argument constraints.",
"type": "object",
"additionalProperties": False,
"required": [
*_categorical_fairness_properties.keys(),
"redact",
"preparation",
"scope_name",
"sess",
"seed",
"adversary_loss_weight",
"num_epochs",
"batch_size",
"classifier_num_hidden_units",
"debias",
"verbose",
],
"relevantToOptimizer": [
"adversary_loss_weight",
"num_epochs",
"batch_size",
"classifier_num_hidden_units",
],
"properties": {
**_categorical_fairness_properties,
"redact": {
"description": "Whether to redact protected attributes before data preparation (recommended) or not.",
"type": "boolean",
"default": True,
},
"preparation": {
"description": "Transformer, which may be an individual operator or a sub-pipeline.",
"anyOf": [
{"laleType": "operator"},
{"description": "lale.lib.lale.NoOp", "enum": [None]},
],
"default": None,
},
"scope_name": {
"description": "Scope name for the tenforflow variables. A unique alpha-numeric suffix is added to this value.",
"type": "string",
"default": "adversarial_debiasing",
},
"sess": {
"description": "TensorFlow session.",
"anyOf": [
{
"description": "User-provided session object.",
"laleType": "Any",
},
{
"description": "Create a session for the user.",
"enum": [None],
},
],
"default": None,
},
"seed": {
"description": "Seed to make `predict` repeatable.",
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"default": None,
},
"adversary_loss_weight": {
"description": "Hyperparameter that chooses the strength of the adversarial loss.",
"type": "number",
"default": 0.1,
"distribution": "loguniform",
"minimumForOptimizer": 0.03125,
"maximumForOptimizer": 32768,
},
"num_epochs": {
"description": "Number of training epochs.",
"type": "integer",
"minimum": 1,
"default": 50,
"distribution": "loguniform",
"minimumForOptimizer": 5,
"maximumForOptimizer": 500,
},
"batch_size": {
"description": "Batch size.",
"type": "integer",
"minimum": 1,
"default": 128,
"distribution": "loguniform",
"minimumForOptimizer": 4,
"maximumForOptimizer": 512,
},
"classifier_num_hidden_units": {
"description": "Number of hidden units in the classifier model.",
"type": "integer",
"minimum": 1,
"default": 200,
"distribution": "loguniform",
"minimumForOptimizer": 16,
"maximumForOptimizer": 1024,
},
"debias": {
"description": "Learn a classifier with or without debiasing.",
"type": "boolean",
"default": True,
},
"verbose": {
"description": "If zero, then no output.",
"type": "integer",
"default": 0,
},
},
},
],
}
_combined_schemas = {
"description": """`AdversarialDebiasing`_ in-estimator fairness mitigator. Learns a classifier to maximize prediction accuracy and simultaneously reduce an adversary's ability to determine the protected attribute from the predictions (`Zhang et al. 2018`_). This approach leads to a fair classifier as the predictions cannot carry any group discrimination information that the adversary can exploit. Implemented based on TensorFlow.
.. _`AdversarialDebiasing`: https://aif360.readthedocs.io/en/latest/modules/generated/aif360.algorithms.inprocessing.AdversarialDebiasing.html
.. _`Zhang et al. 2018`: https://doi.org/10.1145/3278721.3278779
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.aif360.adversarial_debiasing.html#lale.lib.aif360.adversarial_debiasing.AdversarialDebiasing",
"import_from": "aif360.sklearn.inprocessing",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "classifier"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
"input_predict_proba": _input_predict_proba_schema,
"output_predict_proba": _output_predict_proba_schema,
},
}
AdversarialDebiasing = lale.operators.make_operator(
_AdversarialDebiasingImpl, _combined_schemas
)
lale.docstrings.set_docstrings(AdversarialDebiasing)
| 10,214 | 39.216535 | 436 |
py
|
lale
|
lale-master/lale/lib/aif360/_suppress_aif360_warnings.py
|
# Copyright 2019-2023 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
_logger = logging.getLogger()
_old_log_level = _logger.getEffectiveLevel()
_logger.setLevel(level=logging.ERROR)
# the following triggers spurious AIF360 warning "No module named 'fairlearn'":
import aif360.algorithms.inprocessing # isort:skip # noqa:E402,F401 # pylint:disable=wrong-import-position,wrong-import-order
# the following triggers spurious AIF360 warning "No module named 'tempeh'":
import aif360.datasets # isort:skip # noqa:E402,F401 # pylint:disable=wrong-import-position,wrong-import-order
_logger.setLevel(_old_log_level)
dummy = "dummy"
| 1,161 | 37.733333 | 127 |
py
|
lale
|
lale-master/lale/lib/aif360/prejudice_remover.py
|
# Copyright 2020-2023 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import aif360.algorithms.inprocessing
import lale.docstrings
import lale.operators
from .util import (
_BaseInEstimatorImpl,
_categorical_fairness_properties,
_categorical_input_predict_proba_schema,
_categorical_input_predict_schema,
_categorical_output_predict_proba_schema,
_categorical_output_predict_schema,
_categorical_supervised_input_fit_schema,
)
class _PrejudiceRemoverImpl(_BaseInEstimatorImpl):
def __init__(
self,
*,
favorable_labels,
protected_attributes,
unfavorable_labels=None,
redact=True,
preparation=None,
**hyperparams,
):
mitigator = aif360.algorithms.inprocessing.PrejudiceRemover(**hyperparams)
super().__init__(
favorable_labels=favorable_labels,
protected_attributes=protected_attributes,
unfavorable_labels=unfavorable_labels,
redact=redact,
preparation=preparation,
mitigator=mitigator,
)
_input_fit_schema = _categorical_supervised_input_fit_schema
_input_predict_schema = _categorical_input_predict_schema
_output_predict_schema = _categorical_output_predict_schema
_input_predict_proba_schema = _categorical_input_predict_proba_schema
_output_predict_proba_schema = _categorical_output_predict_proba_schema
_hyperparams_schema = {
"description": "Hyperparameter schema.",
"allOf": [
{
"description": "This first sub-object lists all constructor arguments with their types, one at a time, omitting cross-argument constraints.",
"type": "object",
"additionalProperties": False,
"required": [
*_categorical_fairness_properties.keys(),
"redact",
"preparation",
"eta",
],
"relevantToOptimizer": ["eta"],
"properties": {
**_categorical_fairness_properties,
"redact": {
"description": "Whether to redact protected attributes before data preparation (recommended) or not.",
"type": "boolean",
"default": True,
},
"preparation": {
"description": "Transformer, which may be an individual operator or a sub-pipeline.",
"anyOf": [
{"laleType": "operator"},
{"description": "lale.lib.lale.NoOp", "enum": [None]},
],
"default": None,
},
"eta": {
"description": "Fairness penalty parameter.",
"type": "number",
"minimum": 0.0,
"exclusiveMinimum": True,
"default": 1.0,
"minimumForOptimizer": 0.03125,
"maximumForOptimizer": 32768,
},
},
},
],
}
_combined_schemas = {
"description": """`PrejudiceRemover`_ in-estimator fairness mitigator. Adds a discrimination-aware regularization term to the learning objective (`Kamishima et al. 2012`_).
.. _`PrejudiceRemover`: https://aif360.readthedocs.io/en/latest/modules/generated/aif360.algorithms.inprocessing.PrejudiceRemover.html
.. _`Kamishima et al. 2012`: https://doi.org/10.1007/978-3-642-33486-3_3
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.aif360.prejudice_remover.html#lale.lib.aif360.prejudice_remover.PrejudiceRemover",
"import_from": "aif360.sklearn.inprocessing",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "classifier"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
"input_predict_proba": _input_predict_proba_schema,
"output_predict_proba": _output_predict_proba_schema,
},
}
PrejudiceRemover = lale.operators.make_operator(
_PrejudiceRemoverImpl, _combined_schemas
)
lale.docstrings.set_docstrings(PrejudiceRemover)
| 4,777 | 36.328125 | 176 |
py
|
lale
|
lale-master/lale/lib/aif360/orbis.py
|
# Copyright 2023 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import Dict, Set
import imblearn.over_sampling
import imblearn.under_sampling
import numpy as np
import pandas as pd
import sklearn.preprocessing
from numpy.testing import assert_allclose
import lale.docstrings
import lale.lib.lale
import lale.operators
from lale.lib.imblearn._common_schemas import (
_hparam_n_jobs,
_hparam_n_neighbors,
_hparam_random_state,
)
from ._mystic_util import (
_calculate_ci_ratios,
_calculate_di_ratios,
calc_mixedsample_soln,
calc_oversample_soln,
calc_undersample_soln,
obtain_solver_info,
parse_solver_soln,
)
from .protected_attributes_encoder import ProtectedAttributesEncoder
from .redacting import Redacting
from .util import (
_categorical_fairness_properties,
_categorical_input_predict_proba_schema,
_categorical_input_predict_schema,
_categorical_output_predict_proba_schema,
_categorical_output_predict_schema,
_categorical_supervised_input_fit_schema,
_validate_fairness_info,
)
def _make_diaeresis(X, y, fairness_info, combine):
prot_attr_enc = ProtectedAttributesEncoder(
**fairness_info, remainder="drop", combine=combine
)
encoded_X = prot_attr_enc.transform(X)
lab_enc = sklearn.preprocessing.LabelEncoder().fit(y)
encoded_y = pd.Series(lab_enc.transform(y), index=y.index)
encoded_Xy = pd.concat([encoded_X, encoded_y], axis=1, ignore_index=True)
diaeresis_y = encoded_Xy.apply(
lambda row: "".join([str(v) for v in row]), axis=1
).rename("diaeresis_y")
assert X.shape[0] == diaeresis_y.shape[0]
fav_set = set(lab_enc.transform(fairness_info["favorable_labels"]))
return diaeresis_y, fav_set
# This method assumes we have at most 9 classes and binary protected attributes
# (should revisit if these assumptions change)
def _orbis_pick_sizes(
osizes: Dict[str, int],
imbalance_repair_level: float,
bias_repair_level: float,
favorable_labels: Set[int],
sampling_strategy: str,
) -> Dict[str, int]:
if sampling_strategy in ["minimum", "maximum"]:
assert imbalance_repair_level == 1, imbalance_repair_level
assert bias_repair_level == 1, bias_repair_level
if sampling_strategy == "minimum":
one_size_fits_all = min(osizes.values())
else:
one_size_fits_all = max(osizes.values())
nsizes = {k: one_size_fits_all for k in osizes.keys()}
else:
group_mapping, o_flat, nci_vec, ndi_vec = obtain_solver_info(
osizes, imbalance_repair_level, bias_repair_level, favorable_labels
)
if sampling_strategy == "under":
n_flat = calc_undersample_soln(o_flat, favorable_labels, nci_vec, ndi_vec)
elif sampling_strategy == "over":
n_flat = calc_oversample_soln(o_flat, favorable_labels, nci_vec, ndi_vec)
elif sampling_strategy == "mixed":
n_flat = calc_mixedsample_soln(o_flat, favorable_labels, nci_vec, ndi_vec)
else:
assert False, f"unexpected sampling_strategy {sampling_strategy}"
nsizes = parse_solver_soln(n_flat, group_mapping)
obtained_ci = np.array(_calculate_ci_ratios(nsizes)).reshape(-1, 1)
obtained_di = np.array(
_calculate_di_ratios(nsizes, favorable_labels, symmetric=True)
).reshape(-1, 1)
assert_allclose(obtained_ci, nci_vec, rtol=0.05)
assert_allclose(obtained_di, ndi_vec, rtol=0.05)
return nsizes
def _orbis_resample(X, y, diaeresis_y, osizes, nsizes, sampler_hparams):
# concat y so we can get it back out without needing an inverse to diaeresis
# concat diaeresis_y so we can filter on it after resampling
Xyy = pd.concat([X, y, diaeresis_y], axis=1)
# under-sample entire data, then keep only shrunk labels
under_sizes = {k: min(ns, osizes[k]) for k, ns in nsizes.items()}
under_hparams = {
**{
h: v
for h, v in sampler_hparams.items()
if h not in ["k_neighbors", "n_jobs"]
},
"sampling_strategy": under_sizes,
}
under_op = imblearn.under_sampling.RandomUnderSampler(**under_hparams)
under_Xyy_all, _ = under_op.fit_resample(Xyy, diaeresis_y)
shrunk_labels = [k for k, ns in nsizes.items() if ns < osizes[k]]
under_Xyy = under_Xyy_all[under_Xyy_all.iloc[:, -1].isin(shrunk_labels)]
# over-sample entire data, then keep only not-shrunk labels
over_sizes = {k: max(ns, osizes[k]) for k, ns in nsizes.items()}
over_hparams = {
**{h: v for h, v in sampler_hparams.items() if h not in ["replacement"]},
"sampling_strategy": over_sizes,
}
cats_mask = [not np.issubdtype(typ, np.number) for typ in Xyy.dtypes]
if all(cats_mask): # all nominal -> use SMOTEN
over_op = imblearn.over_sampling.SMOTEN(**over_hparams)
elif not any(cats_mask): # all continuous -> use vanilla SMOTE
over_op = imblearn.over_sampling.SMOTE(**over_hparams)
else: # mix of nominal and continuous -> use SMOTENC
over_op = imblearn.over_sampling.SMOTENC(
categorical_features=cats_mask, **over_hparams
)
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=UserWarning)
over_Xyy_all, _ = over_op.fit_resample(Xyy, diaeresis_y)
not_shrunk_labels = [k for k in nsizes if k not in shrunk_labels]
over_Xyy = over_Xyy_all[over_Xyy_all.iloc[:, -1].isin(not_shrunk_labels)]
# combine and use sample(frac=1) to randomize the order of instances
assert set(over_Xyy.iloc[:, -1].unique()).isdisjoint(under_Xyy.iloc[:, -1])
resampled_Xyy = pd.concat([under_Xyy, over_Xyy], axis=0).sample(frac=1)
resampled_X = resampled_Xyy.iloc[:, :-2]
resampled_y = resampled_Xyy.iloc[:, -2]
return resampled_X, resampled_y
class _OrbisImpl:
def __init__(
self,
*,
favorable_labels,
protected_attributes,
estimator,
unfavorable_labels=None,
redact=True,
imbalance_repair_level=0.8,
bias_repair_level=0.8,
combine="keep_separate",
sampling_strategy="mixed",
**sampler_hparams,
):
self.fairness_info = {
"favorable_labels": favorable_labels,
"protected_attributes": protected_attributes,
"unfavorable_labels": unfavorable_labels,
}
_validate_fairness_info(**self.fairness_info, check_schema=False)
self.estimator = estimator
self.redact = redact
self.imbalance_repair_level = imbalance_repair_level
self.bias_repair_level = bias_repair_level
self.combine = combine
self.sampling_strategy = sampling_strategy
self.sampler_hparams = sampler_hparams
@property
def classes_(self):
return self.estimator.classes_
def fit(self, X, y):
assert isinstance(X, pd.DataFrame), "not yet implemented"
assert X.shape[0] == y.shape[0], (X.shape, y.shape)
if not isinstance(y, pd.Series):
y = pd.Series(y, index=X.index, name="y")
diaeresis_y, fav_set = _make_diaeresis(X, y, self.fairness_info, self.combine)
osizes = diaeresis_y.value_counts().sort_index().to_dict()
nsizes = _orbis_pick_sizes(
osizes,
self.imbalance_repair_level,
self.bias_repair_level,
fav_set,
self.sampling_strategy,
)
resampled_X, resampled_y = _orbis_resample(
X, y, diaeresis_y, osizes, nsizes, self.sampler_hparams
)
if self.redact:
self.redacting = Redacting(**self.fairness_info).fit(resampled_X)
else:
self.redacting = lale.lib.lale.NoOp
redacted_X = self.redacting.transform(resampled_X)
self.estimator = self.estimator.fit(redacted_X, resampled_y)
return self
def predict(self, X, **predict_params):
redacted_X = self.redacting.transform(X)
result = self.estimator.predict(redacted_X, **predict_params)
return result
def predict_proba(self, X, **predict_params):
redacted_X = self.redacting.transform(X)
result = self.estimator.predict_proba(redacted_X, **predict_params)
return result
_hyperparams_schema = {
"description": "Hyperparameter schema.",
"allOf": [
{
"type": "object",
"additionalProperties": False,
"required": [*_categorical_fairness_properties.keys(), "estimator"],
"relevantToOptimizer": ["imbalance_repair_level", "bias_repair_level"],
"properties": {
**_categorical_fairness_properties,
"estimator": {
"description": "Nested classifier.",
"laleType": "operator",
},
"redact": {
"description": "Whether to redact protected attributes before data preparation (recommended) or not.",
"type": "boolean",
"default": True,
},
"imbalance_repair_level": {
"description": "How much to repair for class imbalance (0 means original imbalance, 1 means perfect balance).",
"type": "number",
"minimum": 0.0,
"maximum": 1.0,
"default": 0.8,
},
"bias_repair_level": {
"description": "How much to repair for group bias (0 means original bias, 1 means perfect fairness).",
"type": "number",
"minimum": 0.0,
"maximum": 1.0,
"default": 0.8,
},
"combine": {
"description": "How to handle the case when there is more than one protected attribute.",
"enum": ["keep_separate", "and", "or", "error"],
"default": "keep_separate",
},
"sampling_strategy": {
"enum": ["under", "over", "mixed", "minimum", "maximum"],
"description": """How to change the intersection sizes.
Possible choices are:
- ``'under'``: under-sample large intersections to desired repair levels;
- ``'over'``: over-sample small intersection to desired repair levels;
- ``'mixed'``: mix under- with over-sampling while keeping sizes similar to original;
- ``'minimum'``: under-sample everything to the size of the smallest intersection;
- ``'maximum'``: over-sample everything to the size of the largest intersection.""",
"default": "mixed",
},
"replacement": {
"description": "Whether under-sampling is with or without replacement.",
"type": "boolean",
"default": False,
},
"n_jobs": _hparam_n_jobs,
"random_state": _hparam_random_state,
"k_neighbors": {
**_hparam_n_neighbors,
"description": "Number of nearest neighbours to use to construct synthetic samples.",
"default": 5,
},
},
},
{
"description": "When sampling_strategy is minimum or maximum, both repair levels must be 1.",
"anyOf": [
{
"type": "object",
"properties": {
"sampling_strategy": {"not": {"enum": ["minimum", "maximum"]}}
},
},
{
"type": "object",
"properties": {
"imbalance_repair_level": {"enum": [1]},
"bias_repair_level": {"enum": [1]},
},
},
],
},
],
}
_combined_schemas = {
"description": """Experimental Orbis (Oversampling to Repair Bias and Imbalance Simultaneously) pre-estimator fairness mitigator.
Work in progress and subject to change; only supports pandas DataFrame so far.
Uses `SMOTE`_ and `RandomUnderSampler`_ to resample not only for
repairing class imbalance, but also group bias.
Internally, this works by replacing class labels by the cross product
of classes and groups, then changing the sizes of the new
intersections to achieve the desired repair levels.
Unlike other mitigators in `lale.lib.aif360`, this mitigator does not
come from AIF360.
.. _`RandomUnderSampler`: https://imbalanced-learn.org/stable/references/generated/imblearn.under_sampling.RandomUnderSampler.html
.. _`SMOTE`: https://imbalanced-learn.org/stable/references/generated/imblearn.over_sampling.SMOTE.html
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.aif360.orbis.html#lale.lib.aif360.orbis.Orbis",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "classifier"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _categorical_supervised_input_fit_schema,
"input_predict": _categorical_input_predict_schema,
"output_predict": _categorical_output_predict_schema,
"input_predict_proba": _categorical_input_predict_proba_schema,
"output_predict_proba": _categorical_output_predict_proba_schema,
},
}
Orbis = lale.operators.make_operator(_OrbisImpl, _combined_schemas)
lale.docstrings.set_docstrings(Orbis)
| 14,096 | 40.219298 | 133 |
py
|
lale
|
lale-master/lale/lib/aif360/calibrated_eq_odds_postprocessing.py
|
# Copyright 2019-2023 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import aif360.algorithms.postprocessing
import lale.docstrings
import lale.operators
from .util import (
_BasePostEstimatorImpl,
_categorical_fairness_properties,
_categorical_input_predict_proba_schema,
_categorical_input_predict_schema,
_categorical_output_predict_proba_schema,
_categorical_output_predict_schema,
_categorical_supervised_input_fit_schema,
)
class _CalibratedEqOddsPostprocessingImpl(_BasePostEstimatorImpl):
def __init__(
self,
*,
favorable_labels,
protected_attributes,
unfavorable_labels=None,
estimator,
redact=True,
**hyperparams,
):
prot_attr_names = [pa["feature"] for pa in protected_attributes]
unprivileged_groups = [{name: 0 for name in prot_attr_names}]
privileged_groups = [{name: 1 for name in prot_attr_names}]
mitigator = aif360.algorithms.postprocessing.CalibratedEqOddsPostprocessing(
unprivileged_groups=unprivileged_groups,
privileged_groups=privileged_groups,
**hyperparams,
)
super().__init__(
favorable_labels=favorable_labels,
protected_attributes=protected_attributes,
unfavorable_labels=unfavorable_labels,
estimator=estimator,
redact=redact,
mitigator=mitigator,
)
_input_fit_schema = _categorical_supervised_input_fit_schema
_input_predict_schema = _categorical_input_predict_schema
_output_predict_schema = _categorical_output_predict_schema
_input_predict_proba_schema = _categorical_input_predict_proba_schema
_output_predict_proba_schema = _categorical_output_predict_proba_schema
_hyperparams_schema = {
"description": "Hyperparameter schema.",
"allOf": [
{
"description": "This first sub-object lists all constructor arguments with their "
"types, one at a time, omitting cross-argument constraints.",
"type": "object",
"additionalProperties": False,
"required": [
*_categorical_fairness_properties.keys(),
"estimator",
"redact",
"cost_constraint",
"seed",
],
"relevantToOptimizer": ["cost_constraint"],
"properties": {
**_categorical_fairness_properties,
"estimator": {
"description": "Nested supervised learning operator for which to mitigate fairness.",
"laleType": "operator",
},
"redact": {
"description": "Whether to redact protected attributes before data preparation (recommended) or not.",
"type": "boolean",
"default": True,
},
"cost_constraint": {
"enum": ["fpr", "fnr", "weighted"],
"default": "weighted",
},
"seed": {
"description": "Seed to make `predict` repeatable.",
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"default": None,
},
},
}
],
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """`Calibrated equalized odds postprocessing`_ post-estimator fairness mitigator. Optimizes over calibrated classifier score outputs to find probabilities with which to change output labels with an equalized odds objective (`Pleiss et al. 2017`_).
.. _`Calibrated equalized odds postprocessing`: https://aif360.readthedocs.io/en/latest/modules/generated/aif360.algorithms.postprocessing.CalibratedEqOddsPostprocessing.html
.. _`Pleiss et al. 2017`: https://proceedings.neurips.cc/paper/2017/hash/b8b9c74ac526fffbeb2d39ab038d1cd7-Abstract.html
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.aif360.calibrated_eq_odds_postprocessing.html#lale.lib.aif360.calibrated_eq_odds_postprocessing.CalibratedEqOddsPostprocessing",
"import_from": "aif360.algorithms.postprocessing",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "classifier", "interpretable"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
"input_predict_proba": _input_predict_proba_schema,
"output_predict_proba": _output_predict_proba_schema,
},
}
CalibratedEqOddsPostprocessing = lale.operators.make_operator(
_CalibratedEqOddsPostprocessingImpl, _combined_schemas
)
lale.docstrings.set_docstrings(CalibratedEqOddsPostprocessing)
| 5,376 | 39.428571 | 266 |
py
|
lale
|
lale-master/lale/lib/aif360/optim_preproc.py
|
# Copyright 2019, 2020, 2021 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import aif360.algorithms.preprocessing
import lale.docstrings
import lale.operators
from .protected_attributes_encoder import ProtectedAttributesEncoder
from .redacting import Redacting
from .util import (
_categorical_fairness_properties,
_PandasToDatasetConverter,
dataset_to_pandas,
)
try:
# because the import is only done as a check and flake fails.
import cvxpy # noqa
cvxpy_installed = True
except ImportError:
cvxpy_installed = False
if cvxpy_installed:
import aif360.algorithms.preprocessing.optim_preproc_helpers.opt_tools # pylint:disable=ungrouped-imports
class _OptimPreprocImpl:
def __init__(
self,
*,
favorable_labels,
protected_attributes,
unfavorable_labels=None,
optimizer,
optim_options,
verbose=0,
seed=None,
):
assert cvxpy_installed, """Your Python environment does not have cvxpy installed. You can install it with
pip install 'cvxpy>=1.0'
or with
pip install 'lale[full]'"""
if optimizer is None:
optimizer = (
aif360.algorithms.preprocessing.optim_preproc_helpers.opt_tools.OptTools
)
self._hyperparams = {
"favorable_labels": favorable_labels,
"protected_attributes": protected_attributes,
"unfavorable_labels": unfavorable_labels,
"optimizer": optimizer,
"optim_options": optim_options,
"verbose": verbose,
"seed": seed,
}
fairness_info = {
"favorable_labels": favorable_labels,
"protected_attributes": protected_attributes,
"unfavorable_labels": unfavorable_labels,
}
self._prot_attr_enc = ProtectedAttributesEncoder(
**fairness_info,
remainder="passthrough",
)
prot_attr_names = [pa["feature"] for pa in protected_attributes]
self._unprivileged_groups = [{name: 0 for name in prot_attr_names}]
self._privileged_groups = [{name: 1 for name in prot_attr_names}]
self._pandas_to_dataset = _PandasToDatasetConverter(
favorable_label=1,
unfavorable_label=0,
protected_attribute_names=prot_attr_names,
)
self._redacting = Redacting(**fairness_info)
def _encode(self, X, y=None):
encoded_X, encoded_y = self._prot_attr_enc.transform_X_y(X, y)
result = self._pandas_to_dataset.convert(encoded_X, encoded_y)
return result
def fit(self, X, y):
self._wrapped_model = aif360.algorithms.preprocessing.OptimPreproc(
optimizer=self._hyperparams["optimizer"],
optim_options=self._hyperparams["optim_options"],
unprivileged_groups=self._unprivileged_groups,
privileged_groups=self._privileged_groups,
verbose=self._hyperparams["verbose"],
seed=self._hyperparams["seed"],
)
encoded_data = self._encode(X, y)
self._wrapped_model.fit(encoded_data)
self._redacting = self._redacting.fit(X)
return self
def transform(self, X):
encoded_data = self._encode(X)
remediated_data = self._wrapped_model.transform(encoded_data)
remediated_X, _ = dataset_to_pandas(remediated_data, return_only="X")
redacted_X = self._redacting.transform(remediated_X)
return redacted_X
_input_fit_schema = {
"type": "object",
"required": ["X", "y"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {
"type": "array",
"items": {"anyOf": [{"type": "number"}, {"type": "string"}]},
},
},
"y": {
"description": "Target class labels; the array is over samples.",
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "string"}},
],
},
},
}
_input_transform_schema = {
"description": "Input data schema for transform.",
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {
"type": "array",
"items": {"anyOf": [{"type": "number"}, {"type": "string"}]},
},
}
},
}
_output_transform_schema = {
"description": "Output data schema for transform.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
_hyperparams_schema = {
"description": "Hyperparameter schema.",
"allOf": [
{
"type": "object",
"additionalProperties": False,
"required": [
*_categorical_fairness_properties.keys(),
"optimizer",
"optim_options",
"verbose",
"seed",
],
"relevantToOptimizer": [],
"properties": {
**_categorical_fairness_properties,
"optimizer": {
"description": "Optimizer class.",
"anyOf": [
{"description": "User-provided.", "laleType": "Any"},
{
"description": "Use `aif360.algorithms.preprocessing.optim_preproc_helpers.opt_tools.OptTools`.",
"enum": [None],
},
],
"default": None,
},
"optim_options": {
"description": "Options for optimization to estimate the transformation.",
"type": "object",
"patternProperties": {"^[A-Za-z_][A-Za-z_0-9]*$": {}},
"default": {},
},
"verbose": {
"description": "If zero, then no output.",
"type": "integer",
"default": 0,
},
"seed": {
"description": "Seed to make `transform` repeatable.",
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"default": None,
},
},
}
],
}
_combined_schemas = {
"description": """Work-in-progress, not covered in successful test yet: `Optimized Preprocessing`_ pre-estimator fairness mitigator. Learns a probabilistic transformation that edits the features and labels in the data with group fairness, individual distortion, and data fidelity constraints and objectives (`Calmon et al. 2017`_).
.. _`Optimized Preprocessing`: https://aif360.readthedocs.io/en/latest/modules/generated/aif360.algorithms.preprocessing.OptimPreproc.html
.. _`Calmon et al. 2017`: https://proceedings.neurips.cc/paper/2017/hash/9a49a25d845a483fae4be7e341368e36-Abstract.html
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.aif360.optim_preproc.html#lale.lib.aif360.optim_preproc.OptimPreproc",
"import_from": "aif360.algorithms.preprocessing",
"type": "object",
"tags": {"pre": ["~categoricals"], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
OptimPreproc = lale.operators.make_operator(_OptimPreprocImpl, _combined_schemas)
lale.docstrings.set_docstrings(OptimPreproc)
| 8,354 | 35.168831 | 335 |
py
|
lale
|
lale-master/lale/lib/aif360/disparate_impact_remover.py
|
# Copyright 2019-2023 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import typing
import aif360.algorithms.preprocessing
import numpy as np
import pandas as pd
import lale.docstrings
import lale.lib.lale
import lale.operators
from .protected_attributes_encoder import ProtectedAttributesEncoder
from .redacting import Redacting
from .util import (
_categorical_fairness_properties,
_categorical_input_transform_schema,
_categorical_supervised_input_fit_schema,
_numeric_output_transform_schema,
_validate_fairness_info,
)
class _DisparateImpactRemoverImpl:
def __init__(
self,
*,
favorable_labels,
protected_attributes,
unfavorable_labels=None,
redact=True,
preparation=None,
repair_level=1.0,
):
_validate_fairness_info(
favorable_labels, protected_attributes, unfavorable_labels, False
)
self.favorable_labels = favorable_labels
self.protected_attributes = protected_attributes
self.unfavorable_labels = unfavorable_labels
self.redact = redact
if preparation is None:
preparation = lale.lib.lale.NoOp
self.preparation = preparation
self.repair_level = repair_level
def _prep_and_encode(self, X, y=None):
prepared_X = self.redact_and_prep.transform(X, y)
encoded_X, _encoded_y = self.prot_attr_enc.transform_X_y(X, y)
assert isinstance(encoded_X, pd.DataFrame), type(encoded_X)
assert encoded_X.shape[1] == 1, encoded_X.columns
if isinstance(prepared_X, pd.DataFrame):
combined_attribute_names = list(prepared_X.columns) + [
name for name in encoded_X.columns if name not in prepared_X.columns
]
combined_columns = [
encoded_X[name] if name in encoded_X else prepared_X[name]
for name in combined_attribute_names
]
combined_X = pd.concat(combined_columns, axis=1)
sensitive_attribute = list(encoded_X.columns)[0]
else:
if isinstance(prepared_X, pd.DataFrame):
prepared_X = prepared_X.to_numpy()
assert isinstance(prepared_X, np.ndarray)
encoded_X = encoded_X.to_numpy()
assert isinstance(encoded_X, np.ndarray)
combined_X = np.concatenate([prepared_X, encoded_X], axis=1)
sensitive_attribute = combined_X.shape[1] - 1
return combined_X, sensitive_attribute
def fit(self, X, y=None):
fairness_info = {
"favorable_labels": self.favorable_labels,
"protected_attributes": self.protected_attributes,
"unfavorable_labels": self.unfavorable_labels,
}
if self.redact:
redacting = Redacting(**fairness_info)
trainable_redact_and_prep = redacting >> self.preparation
else:
trainable_redact_and_prep = self.preparation
assert isinstance(trainable_redact_and_prep, lale.operators.TrainablePipeline)
self.redact_and_prep = trainable_redact_and_prep.fit(X, y)
self.prot_attr_enc = ProtectedAttributesEncoder(
**fairness_info, remainder="drop", combine="and"
)
encoded_X, sensitive_attribute = self._prep_and_encode(X, y)
if isinstance(sensitive_attribute, str):
assert isinstance(encoded_X, pd.DataFrame)
enc = typing.cast(
pd.DataFrame, encoded_X
) # not sure why this cast is needed
features = enc.to_numpy().tolist()
index = enc.columns.to_list().index(sensitive_attribute)
else:
assert isinstance(encoded_X, np.ndarray)
features = encoded_X.tolist()
index = sensitive_attribute
# workaround for "Matplotlib is currently using agg, which is a non-GUI backend"
import matplotlib
old_matplotlib_use_function = matplotlib.use
matplotlib.use = lambda _: None
# since DisparateImpactRemover does not have separate fit and transform
di_remover = aif360.algorithms.preprocessing.DisparateImpactRemover(
repair_level=self.repair_level, sensitive_attribute=sensitive_attribute
)
matplotlib.use = old_matplotlib_use_function
self.mitigator = di_remover.Repairer(features, index, self.repair_level, False)
return self
def transform(self, X):
encoded_X, _ = self._prep_and_encode(X)
columns = None
if isinstance(encoded_X, pd.DataFrame):
enc = typing.cast(
pd.DataFrame, encoded_X
) # not sure why this cast is needed
features = enc.to_numpy().tolist()
columns = enc.columns
else:
assert isinstance(encoded_X, np.ndarray)
features = encoded_X.tolist()
mitigated_X = self.mitigator.repair(features)
if isinstance(encoded_X, pd.DataFrame):
result = pd.DataFrame(mitigated_X, index=X.index, columns=columns)
else:
result = np.array(mitigated_X)
return result
_input_fit_schema = _categorical_supervised_input_fit_schema
_input_transform_schema = _categorical_input_transform_schema
_output_transform_schema = _numeric_output_transform_schema
_hyperparams_schema = {
"description": "Hyperparameter schema.",
"allOf": [
{
"type": "object",
"additionalProperties": False,
"required": [
*_categorical_fairness_properties.keys(),
"redact",
"preparation",
"repair_level",
],
"relevantToOptimizer": ["repair_level"],
"properties": {
**_categorical_fairness_properties,
"redact": {
"description": "Whether to redact protected attributes before data preparation (recommended) or not.",
"type": "boolean",
"default": True,
},
"preparation": {
"description": "Transformer, which may be an individual operator or a sub-pipeline.",
"anyOf": [
{"laleType": "operator"},
{"description": "lale.lib.lale.NoOp", "enum": [None]},
],
"default": None,
},
"repair_level": {
"description": "Repair amount from 0 = none to 1 = full.",
"type": "number",
"minimum": 0,
"maximum": 1,
"default": 1,
},
},
}
],
}
_combined_schemas = {
"description": """`Disparate impact remover`_ pre-estimator fairness mitigator. Edits feature values to increase group fairness while preserving rank-ordering within groups (`Feldman et al. 2015`_). In the case of multiple protected attributes, the combined reference group is the intersection of the reference groups for each attribute.
.. _`Disparate impact remover`: https://aif360.readthedocs.io/en/latest/modules/generated/aif360.algorithms.preprocessing.DisparateImpactRemover.html
.. _`Feldman et al. 2015`: https://doi.org/10.1145/2783258.2783311
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.aif360.disparate_impact_remover.html#lale.lib.aif360.disparate_impact_remover.DisparateImpactRemover",
"import_from": "aif360.algorithms.preprocessing",
"type": "object",
"tags": {"pre": ["~categoricals"], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
DisparateImpactRemover = lale.operators.make_operator(
_DisparateImpactRemoverImpl, _combined_schemas
)
lale.docstrings.set_docstrings(DisparateImpactRemover)
| 8,586 | 39.314554 | 341 |
py
|
lale
|
lale-master/lale/lib/aif360/reject_option_classification.py
|
# Copyright 2019-2023 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import aif360.algorithms.postprocessing
import lale.docstrings
import lale.operators
from .util import (
_BasePostEstimatorImpl,
_categorical_fairness_properties,
_categorical_input_predict_schema,
_categorical_output_predict_schema,
_categorical_supervised_input_fit_schema,
)
class _RejectOptionClassificationImpl(_BasePostEstimatorImpl):
def __init__(
self,
*,
favorable_labels,
protected_attributes,
unfavorable_labels=None,
estimator,
redact=True,
repair_level=None,
**hyperparams,
):
prot_attr_names = [pa["feature"] for pa in protected_attributes]
unprivileged_groups = [{name: 0 for name in prot_attr_names}]
privileged_groups = [{name: 1 for name in prot_attr_names}]
if repair_level is not None:
hyperparams["metric_lb"] = -(1 - repair_level)
hyperparams["metric_ub"] = 1 - repair_level
mitigator = aif360.algorithms.postprocessing.RejectOptionClassification(
unprivileged_groups=unprivileged_groups,
privileged_groups=privileged_groups,
**hyperparams,
)
super().__init__(
favorable_labels=favorable_labels,
protected_attributes=protected_attributes,
unfavorable_labels=unfavorable_labels,
estimator=estimator,
redact=redact,
mitigator=mitigator,
)
def predict_proba(self, X):
raise NotImplementedError()
_input_fit_schema = _categorical_supervised_input_fit_schema
_input_predict_schema = _categorical_input_predict_schema
_output_predict_schema = _categorical_output_predict_schema
_hyperparams_schema = {
"description": "Hyperparameter schema.",
"allOf": [
{
"description": "This first sub-object lists all constructor arguments with their "
"types, one at a time, omitting cross-argument constraints.",
"type": "object",
"additionalProperties": False,
"required": [
*_categorical_fairness_properties.keys(),
"estimator",
"redact",
"low_class_thresh",
"high_class_thresh",
"num_class_thresh",
"num_ROC_margin",
"metric_name",
"metric_ub",
"metric_lb",
],
"relevantToOptimizer": ["metric_name"],
"properties": {
**_categorical_fairness_properties,
"estimator": {
"description": "Nested supervised learning operator for which to mitigate fairness.",
"laleType": "operator",
},
"redact": {
"description": "Whether to redact protected attributes before data preparation (recommended) or not.",
"type": "boolean",
"default": True,
},
"low_class_thresh": {
"description": "Smallest classification threshold to use in the optimization.",
"type": "number",
"minimum": 0.0,
"maximum": 1.0,
"default": 0.01,
},
"high_class_thresh": {
"description": "Highest classification threshold to use in the optimization.",
"type": "number",
"minimum": 0.0,
"maximum": 1.0,
"default": 0.99,
},
"num_class_thresh": {
"description": "Number of classification thresholds between low_class_thresh and high_class_thresh for the optimization search.",
"type": "integer",
"minimum": 1,
"default": 100,
},
"num_ROC_margin": {
"description": "Number of relevant ROC margins to be used in the optimization search.",
"type": "integer",
"minimum": 1,
"default": 50,
},
"metric_name": {
"description": "Name of the metric to use for the optimization.",
"enum": [
"Statistical parity difference",
"Average odds difference",
"Equal opportunity difference",
],
"default": "Statistical parity difference",
},
"metric_ub": {
"description": "Upper bound of constraint on the metric value.",
"type": "number",
"minimum": 0,
"default": 0.05,
"maximum": 1,
},
"metric_lb": {
"description": "Lower bound of constraint on the metric value.",
"type": "number",
"minimum": -1,
"default": -0.05,
"maximum": 0,
},
"repair_level": {
"description": "Repair amount from 0 = none to 1 = full.",
"anyOf": [
{
"description": "Keep metric_lb and metric_ub unchanged.",
"enum": [None],
},
{
"description": "Set metric_ub = 1 - repair_level and metric_lb = - metric_ub.",
"type": "number",
"minimum": 0,
"maximum": 1,
},
],
"default": None,
},
},
}
],
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """`Reject option classification`_ post-estimator fairness mitigator. Gives favorable outcomes to unpriviliged groups and unfavorable outcomes to priviliged groups in a confidence band around the decision boundary with the highest uncertainty (`Kamiran et al. 2012`_).
.. _`Reject option classification`: https://aif360.readthedocs.io/en/latest/modules/generated/aif360.algorithms.postprocessing.RejectOptionClassification.html
.. _`Kamiran et al. 2012`: https://doi.org/10.1109/ICDM.2012.45
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.aif360.reject_option_classification.html#lale.lib.aif360.reject_option_classification.RejectOptionClassification",
"import_from": "aif360.algorithms.postprocessing",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "classifier", "interpretable"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
RejectOptionClassification = lale.operators.make_operator(
_RejectOptionClassificationImpl, _combined_schemas
)
lale.docstrings.set_docstrings(RejectOptionClassification)
| 7,843 | 39.020408 | 287 |
py
|
lale
|
lale-master/lale/lib/aif360/datasets.py
|
# Copyright 2021-2023 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import typing
import urllib.request
from enum import Enum
import aif360
import aif360.datasets
import numpy as np
import pandas as pd
from aif360.algorithms.preprocessing.optim_preproc_helpers.data_preproc_functions import (
load_preproc_data_compas,
)
import lale.datasets
import lale.datasets.openml
import lale.lib.aif360.util
logger = logging.getLogger(__name__)
logger.setLevel(logging.ERROR)
def fetch_adult_df(preprocess: bool = False):
"""
Fetch the `adult`_ dataset from OpenML and add `fairness_info`.
It contains information about individuals from the 1994 U.S. census.
The prediction task is a binary classification on whether the
income of a person exceeds 50K a year. Without preprocessing,
the dataset has 48,842 rows and 14 columns. There are two
protected attributes, sex and race, and the disparate impact is
0.23. The data includes both categorical and numeric columns, and
has some missing values.
.. _`adult`: https://www.openml.org/d/179
Parameters
----------
preprocess : boolean, optional, default False
If True,
impute missing values;
encode protected attributes in X as 0 or 1 to indicate privileged groups;
encode labels in y as 0 or 1 to indicate favorable outcomes;
and apply one-hot encoding to any remaining features in X that
are categorical and not protected attributes.
Returns
-------
result : tuple
- item 0: pandas Dataframe
Features X, including both protected and non-protected attributes.
- item 1: pandas Series
Labels y.
- item 3: fairness_info
JSON meta-data following the format understood by fairness metrics
and mitigation operators in `lale.lib.aif360`.
"""
(train_X, train_y), (test_X, test_y) = lale.datasets.openml.fetch(
"adult", "classification", astype="pandas", preprocess=preprocess
)
orig_X = pd.concat([train_X, test_X]).sort_index()
orig_y = pd.concat([train_y, test_y]).sort_index()
if preprocess:
sex = pd.Series(orig_X["sex_Male"] == 1, dtype=np.float64)
race = pd.Series(orig_X["race_White"] == 1, dtype=np.float64)
dropped_X = orig_X.drop(
labels=[
"race_Amer-Indian-Eskimo",
"race_Asian-Pac-Islander",
"race_Black",
"race_Other",
"race_White",
"sex_Female",
"sex_Male",
],
axis=1,
)
encoded_X = dropped_X.assign(sex=sex, race=race)
assert not encoded_X.isna().any().any()
assert not orig_y.isna().any().any()
fairness_info = {
"favorable_labels": [1],
"protected_attributes": [
{"feature": "sex", "reference_group": [1]},
{"feature": "race", "reference_group": [1]},
],
}
return encoded_X, orig_y, fairness_info
else:
fairness_info = {
"favorable_labels": [">50K"],
"protected_attributes": [
{"feature": "race", "reference_group": ["White"]},
{"feature": "sex", "reference_group": ["Male"]},
],
}
return orig_X, orig_y, fairness_info
def fetch_bank_df(preprocess: bool = False):
"""
Fetch the `bank-marketing`_ dataset from OpenML and add `fairness_info`.
It contains information from marketing campaigns of a Portuguise
bank. The prediction task is a binary classification on whether
the client will subscribe a term deposit. Without preprocessing,
the dataset has 45,211 rows and 16 columns. There is one protected
attribute, age, and the disparate impact of 0.84. The data
includes both categorical and numeric columns, with no missing
values.
.. _`bank-marketing`: https://www.openml.org/d/1461
Parameters
----------
preprocess : boolean, optional, default False
If True,
encode protected attributes in X as 0 or 1 to indicate privileged groups;
encode labels in y as 0 or 1 to indicate favorable outcomes;
and apply one-hot encoding to any remaining features in X that
are categorical and not protected attributes.
Returns
-------
result : tuple
- item 0: pandas Dataframe
Features X, including both protected and non-protected attributes.
- item 1: pandas Series
Labels y.
- item 3: fairness_info
JSON meta-data following the format understood by fairness metrics
and mitigation operators in `lale.lib.aif360`.
"""
(train_X, train_y), (test_X, test_y) = lale.datasets.openml.fetch(
"bank-marketing", "classification", astype="pandas", preprocess=preprocess
)
orig_X = pd.concat([train_X, test_X]).sort_index()
orig_y = pd.concat([train_y, test_y]).sort_index().astype(np.float64)
column_map = {
"v1": "age",
"v2": "job",
"v3": "marital",
"v4": "education",
"v5": "default",
"v6": "balance",
"v7": "housing",
"v8": "loan",
"v9": "contact",
"v10": "day",
"v11": "month",
"v12": "duration",
"v13": "campaign",
"v14": "pdays",
"v15": "previous",
"v16": "poutcome",
}
if preprocess:
def map_col(col):
if col.find("_") == -1:
return column_map[col]
prefix, suffix = col.split("_")
return column_map[prefix] + "_" + suffix
orig_X.columns = [map_col(col) for col in orig_X.columns]
age = pd.Series(orig_X["age"] >= 25, dtype=np.float64)
encoded_X = orig_X.assign(age=age)
encoded_y = pd.Series(orig_y == 0, dtype=np.float64, name=orig_y.name)
fairness_info = {
"favorable_labels": [1],
"protected_attributes": [
{"feature": "age", "reference_group": [1]},
],
}
return encoded_X, encoded_y, fairness_info
else:
orig_X.columns = [column_map[col] for col in orig_X.columns]
fairness_info = {
"favorable_labels": [1],
"protected_attributes": [
{"feature": "age", "reference_group": [[25, 1000]]},
],
}
return orig_X, orig_y, fairness_info
def fetch_default_credit_df():
"""
Fetch the `Default of Credit Card Clients Dataset`_ from OpenML and add `fairness_info`.
It is a binary classification to predict whether the customer suffers
a default in the next month (1) or not (0).
The dataset has 30,000 rows and 24 columns, all numeric.
The protected attribute is sex and the disparate impact is 0.957.
.. _`Default of Credit Card Clients Dataset`: https://www.openml.org/d/43435
Returns
-------
result : tuple
- item 0: pandas Dataframe
Features X, including both protected and non-protected attributes.
- item 1: pandas Series
Labels y.
- item 3: fairness_info
JSON meta-data following the format understood by fairness metrics
and mitigation operators in `lale.lib.aif360`.
"""
(train_X, train_y), (test_X, test_y) = lale.datasets.openml.fetch(
"Default-of-Credit-Card-Clients-Dataset",
"classification",
astype="pandas",
preprocess=False,
)
orig_X = pd.concat([train_X, test_X]).sort_index()
orig_y = pd.concat([train_y, test_y]).sort_index()
fairness_info = {
"favorable_labels": [0],
"protected_attributes": [
{"feature": "sex", "reference_group": [2]}, # female
],
}
return orig_X, orig_y, fairness_info
def fetch_heart_disease_df():
"""
Fetch the `heart-disease`_ dataset from OpenML and add `fairness_info`.
It is a binary classification to predict heart disease from the
Cleveland database, with 303 rows and 13 columns, all numeric.
The protected attribute is age and the disparate impact is 0.589.
.. _`heart-disease`: https://www.openml.org/d/43398
Returns
-------
result : tuple
- item 0: pandas Dataframe
Features X, including both protected and non-protected attributes.
- item 1: pandas Series
Labels y.
- item 3: fairness_info
JSON meta-data following the format understood by fairness metrics
and mitigation operators in `lale.lib.aif360`.
"""
(train_X, train_y), (test_X, test_y) = lale.datasets.openml.fetch(
"heart-disease", "classification", astype="pandas", preprocess=False
)
orig_X = pd.concat([train_X, test_X]).sort_index()
orig_y = pd.concat([train_y, test_y]).sort_index()
fairness_info = {
"favorable_labels": [1],
"protected_attributes": [
{"feature": "age", "reference_group": [[0, 54]]},
],
}
return orig_X, orig_y, fairness_info
def fetch_law_school_df():
"""Fetch the `law school`_ dataset from OpenML and add `fairness_info`.
This function returns both X and y unchanged, since the dataset
was already binarized by the OpenML contributors, with the target
of predicting whether the GPA is greater than 3.
The protected attributes is race1 and the disparate impact is 0.704.
The dataset has 20,800 rows and 11 columns (5 categorical and 6
numeric columns).
.. _`law school`: https://www.openml.org/d/43890
Returns
-------
result : tuple
- item 0: pandas Dataframe
Features X, including both protected and non-protected attributes.
- item 1: pandas Series
Labels y.
- item 3: fairness_info
JSON meta-data following the format understood by fairness metrics
and mitigation operators in `lale.lib.aif360`.
"""
(train_X, train_y), (test_X, test_y) = lale.datasets.openml.fetch(
"law-school-admission-bianry",
"classification",
astype="pandas",
preprocess=False,
)
orig_X = pd.concat([train_X, test_X]).sort_index()
orig_y = pd.concat([train_y, test_y]).sort_index()
fairness_info = {
"favorable_labels": ["TRUE"],
"protected_attributes": [
{"feature": "race1", "reference_group": ["white"]},
],
}
return orig_X, orig_y, fairness_info
def fetch_nlsy_df():
"""
Fetch the `National Longitudinal Survey for the Youth (NLSY)`_ (also known as "University of Michigan Health and Retirement Study (HRS)") dataset from OpenML and add `fairness_info`.
It is a binary classification to predict whether the income at a
certain time exceeds a threshold, with 4,908 rows and 15 columns
(comprising 6 categorical and 9 numerical columns).
The protected attributes are age and gender and the disparate
impact is 0.668.
.. _`National Longitudinal Survey for the Youth (NLSY)`: https://www.openml.org/d/43892
Returns
-------
result : tuple
- item 0: pandas Dataframe
Features X, including both protected and non-protected attributes.
- item 1: pandas Series
Labels y.
- item 3: fairness_info
JSON meta-data following the format understood by fairness metrics
and mitigation operators in `lale.lib.aif360`.
"""
(train_X, train_y), (test_X, test_y) = lale.datasets.openml.fetch(
"national-longitudinal-survey-binary",
"classification",
astype="pandas",
preprocess=False,
)
orig_X = pd.concat([train_X, test_X]).sort_index()
orig_y = pd.concat([train_y, test_y]).sort_index()
dropped_X = orig_X.drop(labels=["income96"], axis=1)
fairness_info = {
"favorable_labels": ["1"],
"protected_attributes": [
{"feature": "age", "reference_group": [[18, 120]]},
{"feature": "gender", "reference_group": ["Male"]},
],
}
return dropped_X, orig_y, fairness_info
def fetch_student_math_df():
"""
Fetch the `Student Performance (Math)`_ dataset from OpenML and add `fairness_info`.
The original prediction target is a integer math grade from 1 to 20.
This function returns X unchanged but with a binarized version of
the target y, using 1 for values >=10 and 0 otherwise.
The two protected attributes are sex and age
and the disparate impact is 0.894.
The dataset has 395 rows and 32 columns,
including both categorical and numeric columns.
.. _`Student Performance (Math)`: https://www.openml.org/d/42352
Returns
-------
result : tuple
- item 0: pandas Dataframe
Features X, including both protected and non-protected attributes.
- item 1: pandas Series
Labels y.
- item 3: fairness_info
JSON meta-data following the format understood by fairness metrics
and mitigation operators in `lale.lib.aif360`.
"""
(train_X, train_y), (test_X, test_y) = lale.datasets.openml.fetch(
"UCI-student-performance-mat", "regression", astype="pandas", preprocess=False
)
orig_X = pd.concat([train_X, test_X]).sort_index()
orig_y = pd.concat([train_y, test_y]).sort_index()
encoded_y = pd.Series(orig_y >= 12, dtype=np.float64, name="g3_ge_10")
fairness_info = {
"favorable_labels": [1],
"protected_attributes": [
{"feature": "sex", "reference_group": ["F"]},
{"feature": "age", "reference_group": [[0, 17]]},
],
}
return orig_X, encoded_y, fairness_info
def fetch_student_por_df():
"""
Fetch the `Student Performance (Portuguese)`_ dataset from OpenML and add `fairness_info`.
The original prediction target is a integer Portuguese grade from 1 to 20.
This function returns X unchanged but with a binarized version of
the target y, using 1 for values >=10 and 0 otherwise.
The two protected attributes are sex and age
and the disparate impact is 0.858.
The dataset has 649 rows and 32 columns,
including both categorical and numeric columns.
.. _`Student Performance (Portuguese)`: https://www.openml.org/d/42351
Returns
-------
result : tuple
- item 0: pandas Dataframe
Features X, including both protected and non-protected attributes.
- item 1: pandas Series
Labels y.
- item 3: fairness_info
JSON meta-data following the format understood by fairness metrics
and mitigation operators in `lale.lib.aif360`.
"""
(train_X, train_y), (test_X, test_y) = lale.datasets.openml.fetch(
"UCI-student-performance-por", "regression", astype="pandas", preprocess=False
)
orig_X = pd.concat([train_X, test_X]).sort_index()
orig_y = pd.concat([train_y, test_y]).sort_index()
encoded_y = pd.Series(orig_y >= 10, dtype=np.float64, name="g3_ge_10")
fairness_info = {
"favorable_labels": [1],
"protected_attributes": [
{"feature": "sex", "reference_group": ["F"]},
{"feature": "age", "reference_group": [[0, 17]]},
],
}
return orig_X, encoded_y, fairness_info
def fetch_tae_df(preprocess: bool = False):
"""
Fetch the `tae`_ dataset from OpenML and add `fairness_info`.
It contains information from teaching assistant (TA) evaluations.
at the University of Wisconsin--Madison.
The prediction task is a classification on the type
of rating a TA receives (1=Low, 2=Medium, 3=High). Without preprocessing,
the dataset has 151 rows and 5 columns. There is one protected
attributes, "whether_of_not_the_ta_is_a_native_english_speaker" [sic],
and the disparate impact of 0.45. The data
includes both categorical and numeric columns, with no missing
values.
.. _`tae`: https://www.openml.org/d/48
Parameters
----------
preprocess : boolean or "y", optional, default False
If True,
encode protected attributes in X as 0 or 1 to indicate privileged group
("native_english_speaker");
encode labels in y as 0 or 1 to indicate favorable outcomes;
and apply one-hot encoding to any remaining features in X that
are categorical and not protecteded attributes.
If "y", leave features X unchanged and only encode labels y as 0 or 1.
If False, encode neither features X nor labels y.
Returns
-------
result : tuple
- item 0: pandas Dataframe
Features X, including both protected and non-protected attributes.
- item 1: pandas Series
Labels y.
- item 3: fairness_info
JSON meta-data following the format understood by fairness metrics
and mitigation operators in `lale.lib.aif360`.
"""
(train_X, train_y), (test_X, test_y) = lale.datasets.openml.fetch(
"tae", "classification", astype="pandas", preprocess=(preprocess is True)
)
orig_X = pd.concat([train_X, test_X]).sort_index().astype(np.float64)
orig_y = pd.concat([train_y, test_y]).sort_index().astype(np.float64)
if preprocess is True:
native_english_speaker = pd.Series(
orig_X["whether_of_not_the_ta_is_a_native_english_speaker_1"] == 1,
dtype=np.float64,
)
dropped_X = orig_X.drop(
labels=[
"whether_of_not_the_ta_is_a_native_english_speaker_1",
"whether_of_not_the_ta_is_a_native_english_speaker_2",
],
axis=1,
)
encoded_X = dropped_X.assign(native_english_speaker=native_english_speaker)
encoded_y = pd.Series(orig_y == 2, dtype=np.float64)
fairness_info = {
"favorable_labels": [1],
"protected_attributes": [
{"feature": "native_english_speaker", "reference_group": [1]},
],
}
return encoded_X, encoded_y, fairness_info
elif preprocess == "y":
encoded_y = pd.Series(orig_y == 2, dtype=np.float64)
fairness_info = {
"favorable_labels": [1],
"protected_attributes": [
{
"feature": "whether_of_not_the_ta_is_a_native_english_speaker",
"reference_group": [1],
},
],
}
return orig_X, encoded_y, fairness_info
else:
fairness_info = {
"favorable_labels": [3],
"protected_attributes": [
{
"feature": "whether_of_not_the_ta_is_a_native_english_speaker",
"reference_group": [1],
},
],
}
return orig_X, orig_y, fairness_info
def fetch_us_crime_df():
"""
Fetch the `us_crime`_ (also known as "communities and crime") dataset from OpenML and add `fairness_info`.
The original dataset has several columns with a large number of
missing values, which this function drops.
The binary protected attribute is blackgt6pct, which is derived by
thresholding racepctblack > 0.06 and dropping the original racepctblack.
The binary target is derived by thresholding its original y > 0.70.
The disparate impact is 0.888.
The resulting dataset has 1,994 rows and 102 columns,
all but one of which are numeric.
.. _`us_crime`: https://www.openml.org/d/315
Returns
-------
result : tuple
- item 0: pandas Dataframe
Features X, including both protected and non-protected attributes.
- item 1: pandas Series
Labels y.
- item 3: fairness_info
JSON meta-data following the format understood by fairness metrics
and mitigation operators in `lale.lib.aif360`.
"""
(train_X, train_y), (test_X, test_y) = lale.datasets.openml.fetch(
"us_crime", "regression", astype="pandas", preprocess=False
)
orig_X = pd.concat([train_X, test_X]).sort_index()
orig_y = pd.concat([train_y, test_y]).sort_index()
blackgt6pct = orig_X.racepctblack > 0.06
to_drop = ["racepctblack"] + [c for c in orig_X.columns if orig_X[c].hasnans]
dropped_X = orig_X.drop(labels=to_drop, axis=1)
encoded_X = dropped_X.assign(blackgt6pct=blackgt6pct)
encoded_y = pd.Series(orig_y >= 0.7, name="crimegt70pct")
fairness_info = {
"favorable_labels": [0],
"protected_attributes": [{"feature": "blackgt6pct", "reference_group": [0]}],
}
return encoded_X, encoded_y, fairness_info
# COMPAS HELPERS
def _get_compas_filename(violent_recidivism=False):
violent_tag = ""
if violent_recidivism:
violent_tag = "-violent"
filename = f"compas-scores-two-years{violent_tag}.csv"
return filename
def _get_compas_filepath(filename):
directory = os.path.join(
os.path.dirname(os.path.abspath(aif360.__file__)), "data", "raw", "compas"
)
return os.path.join(
directory,
filename,
)
def _try_download_compas(violent_recidivism=False):
filename = _get_compas_filename(violent_recidivism=violent_recidivism)
filepath = _get_compas_filepath(filename)
csv_exists = os.path.exists(filepath)
if not csv_exists:
# this request is to a string that begins with a hardcoded https url, so does not risk leaking local data
urllib.request.urlretrieve( # nosec
f"https://raw.githubusercontent.com/propublica/compas-analysis/master/{filename}",
filepath,
)
def _get_pandas_and_fairness_info_from_compas_dataset(dataset):
X, y = lale.lib.aif360.util.dataset_to_pandas(dataset)
assert X is not None
fairness_info = {
"favorable_labels": [0],
"protected_attributes": [
{"feature": "sex", "reference_group": [1]},
{"feature": "race", "reference_group": [1]},
],
}
return X, y, fairness_info
def _get_dataframe_from_compas_csv(violent_recidivism=False):
filename = _get_compas_filename(violent_recidivism=violent_recidivism)
filepath = _get_compas_filepath(filename)
df: typing.Any = None
try:
df = pd.read_csv(filepath, index_col="id", na_values=[])
except IOError as err:
# In practice should not get here because of the _try_download_compas call above, but adding failure logic just in case
logger.error(f"IOError: {err}")
logger.error("To use this class, please download the following file:")
logger.error(
"\n\thttps://raw.githubusercontent.com/propublica/compas-analysis/master/compas-scores-two-years.csv"
)
logger.error("\nand place it, as-is, in the folder:")
logger.error(f"\n\t{os.path.abspath(os.path.dirname(filepath))}\n")
import sys
sys.exit(1)
if violent_recidivism:
# violent recidivism dataset includes extra label column for some reason
df = pd.DataFrame(
df, columns=[x for x in df.columns.tolist() if x != "two_year_recid.1"]
).sort_index()
return df
def _perform_default_preprocessing(df):
return df[
(df.days_b_screening_arrest <= 30)
& (df.days_b_screening_arrest >= -30)
& (df.is_recid != -1)
& (df.c_charge_degree != "O")
& (df.score_text != "N/A")
]
def _perform_custom_preprocessing(df):
"""The custom pre-processing function is adapted from
https://github.com/fair-preprocessing/nips2017/blob/master/compas/code/Generate_Compas_Data.ipynb
"""
df = df[
[
"age",
"c_charge_degree",
"race",
"age_cat",
"score_text",
"sex",
"priors_count",
"days_b_screening_arrest",
"decile_score",
"is_recid",
"two_year_recid",
"c_jail_in",
"c_jail_out",
]
]
# Indices of data samples to keep
ix = df["days_b_screening_arrest"] <= 30
ix = (df["days_b_screening_arrest"] >= -30) & ix
ix = (df["is_recid"] != -1) & ix
ix = (df["c_charge_degree"] != "O") & ix
ix = (df["score_text"] != "N/A") & ix
df = df.loc[ix, :]
df["length_of_stay"] = (
pd.to_datetime(df["c_jail_out"]) - pd.to_datetime(df["c_jail_in"])
).apply(lambda x: x.days)
# Restrict races to African-American and Caucasian
dfcut = df.loc[
~df["race"].isin(["Native American", "Hispanic", "Asian", "Other"]), :
]
# Restrict the features to use
dfcutQ = dfcut[
[
"sex",
"race",
"age_cat",
"c_charge_degree",
"score_text",
"priors_count",
"is_recid",
"two_year_recid",
"length_of_stay",
]
].copy()
# Quantize priors count between 0, 1-3, and >3
def quantizePrior(x):
if x <= 0:
return "0"
elif 1 <= x <= 3:
return "1 to 3"
else:
return "More than 3"
# Quantize length of stay
def quantizeLOS(x):
if x <= 7:
return "<week"
if 8 < x <= 93:
return "<3months"
else:
return ">3 months"
# Quantize length of stay
def adjustAge(x):
if x == "25 - 45":
return "25 to 45"
else:
return x
# Quantize score_text to MediumHigh
def quantizeScore(x):
if (x == "High") | (x == "Medium"):
return "MediumHigh"
else:
return x
def group_race(x):
if x == "Caucasian":
return 1.0
else:
return 0.0
dfcutQ["priors_count"] = dfcutQ["priors_count"].apply(quantizePrior)
dfcutQ["length_of_stay"] = dfcutQ["length_of_stay"].apply(quantizeLOS)
dfcutQ["score_text"] = dfcutQ["score_text"].apply(quantizeScore)
dfcutQ["age_cat"] = dfcutQ["age_cat"].apply(adjustAge)
# Recode sex and race
dfcutQ["sex"] = dfcutQ["sex"].replace({"Female": 1.0, "Male": 0.0})
dfcutQ["race"] = dfcutQ["race"].apply(group_race)
features = [
"two_year_recid",
"sex",
"race",
"age_cat",
"priors_count",
"c_charge_degree",
]
# Pass vallue to df
df = dfcutQ[features]
return df
def _get_pandas_and_fairness_info_from_compas_csv(violent_recidivism=False):
df = _get_dataframe_from_compas_csv(violent_recidivism=violent_recidivism)
# preprocessing steps performed by ProPublica team, even in the preprocess=False case
df = _perform_default_preprocessing(df)
X = pd.DataFrame(
df, columns=[x for x in df.columns.tolist() if x != "two_year_recid"]
).sort_index()
y = pd.Series(
df["two_year_recid"], name="two_year_recid", dtype=np.float64
).sort_index()
fairness_info = {
"favorable_labels": [0],
"protected_attributes": [
{"feature": "sex", "reference_group": ["Female"]},
{"feature": "race", "reference_group": ["Caucasian"]},
],
}
return X, y, fairness_info
def fetch_compas_df(preprocess: bool = False):
"""
Fetch the `compas-two-years`_ dataset, also known as ProPublica recidivism, from GitHub and add `fairness_info`.
It contains information about individuals with a binary
classification for recidivism, indicating whether they were
re-arrested within two years after the first arrest. Without
preprocessing, the dataset has 6,172 rows and 51 columns. There
are two protected attributes, sex and race, and the disparate
impact is 0.75. The data includes numeric and categorical columns, with some
missing values.
.. _`compas-two-years`: https://github.com/propublica/compas-analysis
Parameters
----------
preprocess : boolean, optional, default False
If True,
encode protected attributes in X as 0 or 1 to indicate privileged groups
(1 if Female or Caucasian for the corresponding sex and race columns respectively);
and apply one-hot encoding to any remaining features in X that
are categorical and not protected attributes.
Returns
-------
result : tuple
- item 0: pandas Dataframe
Features X, including both protected and non-protected attributes.
- item 1: pandas Series
Labels y.
- item 3: fairness_info
JSON meta-data following the format understood by fairness metrics
and mitigation operators in `lale.lib.aif360`.
"""
violent_recidivism = False
_try_download_compas(violent_recidivism=violent_recidivism)
if preprocess:
# Odd finding here: "Female" is a privileged class in the dataset, but the original
# COMPAS algorithm actually predicted worse outcomes for that class after controlling
# for other factors. Leaving it as "Female" for now (AIF360 does this by default as well)
# but potentially worthy of revisiting.
# See https://www.propublica.org/article/how-we-analyzed-the-compas-recidivism-algorithm
# and https://github.com/propublica/compas-analysis/blob/master/Compas%20Analysis.ipynb
# (hunch is that COMPAS was trained on more biased data that is not reproduced in ProPublica's dataset)
dataset = load_preproc_data_compas()
# above preprocessing results in a WARNING of "Missing Data: 5 rows removed from CompasDataset."
# unclear how to resolve at the moment
return _get_pandas_and_fairness_info_from_compas_dataset(dataset)
else:
return _get_pandas_and_fairness_info_from_compas_csv(
violent_recidivism=violent_recidivism
)
def fetch_compas_violent_df(preprocess: bool = False):
"""
Fetch the `compas-two-years-violent`_ dataset, also known as ProPublica violent recidivism, from GitHub and add `fairness_info`.
It contains information about individuals with a binary
classification for violent recidivism, indicating whether they were
re-arrested within two years after the first arrest. Without
preprocessing, the dataset has 4,020 rows and 51 columns. There
are three protected attributes, sex, race, and age, and the disparate
impact is 0.85. The data includes numeric and categorical columns, with some
missing values.
.. _`compas-two-years-violent`: https://github.com/propublica/compas-analysis
Parameters
----------
preprocess : boolean, optional, default False
If True,
encode protected attributes in X as 0 or 1 to indicate privileged groups
(1 if Female, Caucasian, or at least 25 for the corresponding sex, race, and
age columns respectively);
and apply one-hot encoding to any remaining features in X that
are categorical and not protected attributes.
Returns
-------
result : tuple
- item 0: pandas Dataframe
Features X, including both protected and non-protected attributes.
- item 1: pandas Series
Labels y.
- item 3: fairness_info
JSON meta-data following the format understood by fairness metrics
and mitigation operators in `lale.lib.aif360`.
"""
violent_recidivism = True
_try_download_compas(violent_recidivism=violent_recidivism)
if preprocess:
# Odd finding here: "Female" is a privileged class in the dataset, but the original
# COMPAS algorithm actually predicted worse outcomes for that class after controlling
# for other factors. Leaving it as "Female" for now (AIF360 does this by default as well)
# but potentially worthy of revisiting.
# See https://www.propublica.org/article/how-we-analyzed-the-compas-recidivism-algorithm
# and https://github.com/propublica/compas-analysis/blob/master/Compas%20Analysis.ipynb
# (hunch is that COMPAS was trained on more biased data that is not reproduced in ProPublica's dataset)
# Loading violent recidivism dataset using StandardDataset and default settings found in the CompasDataset
# class since AIF360 lacks a violent recidivism dataset implementation
df = _get_dataframe_from_compas_csv(violent_recidivism=violent_recidivism)
default_mappings = {
"label_maps": [{1.0: "Did recid.", 0.0: "No recid."}],
"protected_attribute_maps": [
{0.0: "Male", 1.0: "Female"},
{1.0: "Caucasian", 0.0: "Not Caucasian"},
],
}
dataset = aif360.datasets.StandardDataset(
df=df,
label_name="two_year_recid",
favorable_classes=[0],
protected_attribute_names=["sex", "race"],
privileged_classes=[[1.0], [1.0]],
categorical_features=["age_cat", "priors_count", "c_charge_degree"],
instance_weights_name=None,
features_to_keep=[
"sex",
"age_cat",
"race",
"priors_count",
"c_charge_degree",
"two_year_recid",
],
features_to_drop=[],
na_values=[],
custom_preprocessing=_perform_custom_preprocessing,
metadata=default_mappings,
)
# above preprocessing results in a WARNING of "Missing Data: 5 rows removed from StandardDataset."
# unclear how to resolve at the moment
return _get_pandas_and_fairness_info_from_compas_dataset(dataset)
else:
return _get_pandas_and_fairness_info_from_compas_csv(
violent_recidivism=violent_recidivism
)
def fetch_creditg_df(preprocess: bool = False):
"""
Fetch the `credit-g`_ dataset from OpenML and add `fairness_info`.
It contains information about individuals with a binary
classification into good or bad credit risks. Without
preprocessing, the dataset has 1,000 rows and 20 columns. There
are two protected attributs, personal_status/sex and age, and the
disparate impact is 0.75. The data includes both categorical and
numeric columns, with no missing values.
.. _`credit-g`: https://www.openml.org/d/31
Parameters
----------
preprocess : boolean, optional, default False
If True,
encode protected attributes in X as 0 or 1 to indicate privileged groups;
encode labels in y as 0 or 1 to indicate favorable outcomes;
and apply one-hot encoding to any remaining features in X that
are categorical and not protected attributes.
Returns
-------
result : tuple
- item 0: pandas Dataframe
Features X, including both protected and non-protected attributes.
- item 1: pandas Series
Labels y.
- item 3: fairness_info
JSON meta-data following the format understood by fairness metrics
and mitigation operators in `lale.lib.aif360`.
"""
(train_X, train_y), (test_X, test_y) = lale.datasets.openml.fetch(
"credit-g", "classification", astype="pandas", preprocess=preprocess
)
orig_X = pd.concat([train_X, test_X]).sort_index()
orig_y = pd.concat([train_y, test_y]).sort_index()
if preprocess:
sex = pd.Series(
(orig_X["personal_status_male div/sep"] == 1)
| (orig_X["personal_status_male mar/wid"] == 1)
| (orig_X["personal_status_male single"] == 1),
dtype=np.float64,
)
age = pd.Series(orig_X["age"] > 25, dtype=np.float64)
dropped_X = orig_X.drop(
labels=[
"personal_status_female div/dep/mar",
"personal_status_male div/sep",
"personal_status_male mar/wid",
"personal_status_male single",
],
axis=1,
)
encoded_X = dropped_X.assign(sex=sex, age=age)
fairness_info = {
"favorable_labels": [1],
"protected_attributes": [
{"feature": "sex", "reference_group": [1]},
{"feature": "age", "reference_group": [1]},
],
}
return encoded_X, orig_y, fairness_info
else:
fairness_info = {
"favorable_labels": ["good"],
"protected_attributes": [
{
"feature": "personal_status",
"reference_group": [
"male div/sep",
"male mar/wid",
"male single",
],
},
{"feature": "age", "reference_group": [[26, 1000]]},
],
}
return orig_X, orig_y, fairness_info
def fetch_ricci_df(preprocess: bool = False):
"""
Fetch the `ricci_vs_destefano`_ dataset from OpenML and add `fairness_info`.
It contains test scores for 2003 New Haven Fire Department
promotion exams with a binary classification into promotion or no
promotion. Without preprocessing, the dataset has 118 rows and 5
columns. There is one protected attribute, race, and the
disparate impact is 0.50. The data includes both categorical and
numeric columns, with no missing values.
.. _`ricci_vs_destefano`: https://www.openml.org/d/42665
Parameters
----------
preprocess : boolean, optional, default False
If True,
encode protected attributes in X as 0 or 1 to indicate privileged groups;
encode labels in y as 0 or 1 to indicate favorable outcomes;
and apply one-hot encoding to any remaining features in X that
are categorical and not protected attributes.
Returns
-------
result : tuple
- item 0: pandas Dataframe
Features X, including both protected and non-protected attributes.
- item 1: pandas Series
Labels y.
- item 3: fairness_info
JSON meta-data following the format understood by fairness metrics
and mitigation operators in `lale.lib.aif360`.
"""
(train_X, train_y), (test_X, test_y) = lale.datasets.openml.fetch(
"ricci", "classification", astype="pandas", preprocess=preprocess
)
orig_X = pd.concat([train_X, test_X]).sort_index()
orig_y = pd.concat([train_y, test_y]).sort_index()
if preprocess:
race = pd.Series(orig_X["race_W"] == 1, dtype=np.float64)
dropped_X = orig_X.drop(labels=["race_B", "race_H", "race_W"], axis=1)
encoded_X = dropped_X.assign(race=race)
fairness_info = {
"favorable_labels": [1],
"protected_attributes": [{"feature": "race", "reference_group": [1]}],
}
return encoded_X, orig_y, fairness_info
else:
fairness_info = {
"favorable_labels": ["Promotion"],
"protected_attributes": [{"feature": "race", "reference_group": ["W"]}],
}
return orig_X, orig_y, fairness_info
def fetch_speeddating_df(preprocess: bool = False):
"""
Fetch the `SpeedDating`_ dataset from OpenML and add `fairness_info`.
It contains data gathered from participants in experimental speed dating events
from 2002-2004 with a binary classification into match or no
match. Without preprocessing, the dataset has 8378 rows and 122
columns. There are two protected attributes, whether the other candidate has the same
race and importance of having the same race, and the disparate impact
is 0.85. The data includes both categorical and
numeric columns, with some missing values.
.. _`SpeedDating`: https://www.openml.org/d/40536
Parameters
----------
preprocess : boolean, optional, default False
If True,
encode protected attributes in X as 0 or 1 to indicate privileged groups;
encode labels in y as 0 or 1 to indicate favorable outcomes;
and apply one-hot encoding to any remaining features in X that
are categorical and not protected attributes.
Returns
-------
result : tuple
- item 0: pandas Dataframe
Features X, including both protected and non-protected attributes.
- item 1: pandas Series
Labels y.
- item 3: fairness_info
JSON meta-data following the format understood by fairness metrics
and mitigation operators in `lale.lib.aif360`.
"""
(train_X, train_y), (test_X, test_y) = lale.datasets.openml.fetch(
"SpeedDating", "classification", astype="pandas", preprocess=preprocess
)
orig_X = pd.concat([train_X, test_X]).sort_index()
orig_y = pd.concat([train_y, test_y]).sort_index()
if preprocess:
importance_same_race = pd.Series(
orig_X["importance_same_race"] >= 9, dtype=np.float64
)
samerace = pd.Series(orig_X["samerace_1"] == 1, dtype=np.float64)
# drop samerace-related columns
columns_to_drop = ["samerace_0", "samerace_1"]
# drop preprocessed columns
def preprocessed_column_filter(x: str):
return x.startswith("d_")
columns_to_drop.extend(
[x for x in orig_X.columns if preprocessed_column_filter(x)]
)
# drop has-null columns
columns_to_drop.extend(["has_null_0", "has_null_1"])
# drop decision columns
def decision_column_filter(x: str):
return x.startswith("decision")
columns_to_drop.extend([x for x in orig_X.columns if decision_column_filter(x)])
# drop field columns
def field_column_filter(x: str):
return x.startswith("field")
columns_to_drop.extend([x for x in orig_X.columns if field_column_filter(x)])
# drop wave column
columns_to_drop.append("wave")
dropped_X = orig_X.drop(labels=columns_to_drop, axis=1)
encoded_X = dropped_X.assign(
samerace=samerace, importance_same_race=importance_same_race
)
fairness_info = {
"favorable_labels": [1],
"protected_attributes": [
{"feature": "samerace", "reference_group": [1]},
{"feature": "importance_same_race", "reference_group": [1]},
],
}
return encoded_X, orig_y, fairness_info
else:
fairness_info = {
"favorable_labels": ["1"],
"protected_attributes": [
{"feature": "samerace", "reference_group": ["1"]},
{"feature": "importance_same_race", "reference_group": [[9, 1000]]},
],
}
return orig_X, orig_y, fairness_info
def _fetch_boston_housing_df(preprocess: bool = False):
"""
Fetch the `Boston housing`_ dataset from sklearn and add `fairness info`.
It contains data about housing values in the suburbs of Boston with various
features that can be used to perform regression. Without preprocessing,
the dataset has 506 rows and 14 columns. There is one protected attribute,
1000(Bk - 0.63)^2 where Bk is the proportion of Blacks by town, and the disparate
impact is 0.5. The data includes only numeric columns, with no missing values.
Hiding dataset from public consumption based on issues described at length `here`_
.. _`Boston housing`: https://scikit-learn.org/0.20/datasets/index.html#boston-house-prices-dataset
.. _`here`: https://medium.com/@docintangible/racist-data-destruction-113e3eff54a8
Parameters
----------
preprocess : boolean, optional, default False
If True,
encode protected attribute in X as 0 or 1 to indicate privileged groups.
Returns
-------
result : tuple
- item 0: pandas Dataframe
Features X, including both protected and non-protected attributes.
- item 1: pandas Series
Labels y.
- item 3: fairness_info
JSON meta-data following the format understood by fairness metrics
and mitigation operators in `lale.lib.aif360`.
"""
(train_X, train_y), (test_X, test_y) = lale.datasets.boston_housing_df(
test_size=0.33
)
orig_X = pd.concat([train_X, test_X]).sort_index()
orig_y = pd.concat([train_y, test_y]).sort_index()
assert train_X is not None
black_median = np.median(train_X["B"])
label_median = np.median(train_y)
if preprocess:
# 1000(Bk - 0.63)^2 where Bk is the proportion of Blacks by town
B = pd.Series(orig_X["B"] > black_median, dtype=np.float64)
encoded_X = orig_X.assign(B=B)
fairness_info = {
"favorable_labels": [[-10000.0, label_median]],
"protected_attributes": [
{"feature": "B", "reference_group": [0]},
],
}
return encoded_X, orig_y, fairness_info
else:
fairness_info = {
"favorable_labels": [[-10000.0, label_median]],
"protected_attributes": [
# 1000(Bk - 0.63)^2 where Bk is the proportion of Blacks by town
{"feature": "B", "reference_group": [[0.0, black_median]]},
],
}
return orig_X, orig_y, fairness_info
def fetch_nursery_df(preprocess: bool = False):
"""
Fetch the `nursery`_ dataset from OpenML and add `fairness_info`.
It contains data gathered from applicants to public schools in
Ljubljana, Slovenia during a competitive time period.
Without preprocessing, the dataset has
12960 rows and 8 columns. There is one protected attribute, parents, and the
disparate impact is 0.46. The data has categorical columns (with
numeric ones if preprocessing is applied), with no missing values.
.. _`nursery`: https://www.openml.org/d/26
Parameters
----------
preprocess : boolean, optional, default False
If True,
encode protected attributes in X as 0 or 1 to indicate privileged groups
and apply one-hot encoding to any remaining features in X that
are categorical and not protected attributes.
Returns
-------
result : tuple
- item 0: pandas Dataframe
Features X, including both protected and non-protected attributes.
- item 1: pandas Series
Labels y.
- item 3: fairness_info
JSON meta-data following the format understood by fairness metrics
and mitigation operators in `lale.lib.aif360`.
"""
(train_X, train_y), (test_X, test_y) = lale.datasets.openml.fetch(
"nursery", "classification", astype="pandas", preprocess=preprocess
)
orig_X = pd.concat([train_X, test_X]).sort_index()
orig_y = pd.concat([train_y, test_y]).sort_index()
if preprocess:
parents = pd.Series(orig_X["parents_usual"] == 0, dtype=np.float64)
dropped_X = orig_X.drop(
labels=[
"parents_great_pret",
"parents_pretentious",
"parents_usual",
],
axis=1,
)
encoded_X = dropped_X.assign(parents=parents)
# orig_y == 3 corresponds to "spec_prior"
encoded_y = pd.Series((orig_y == 3), dtype=np.float64)
fairness_info = {
"favorable_labels": [1],
"protected_attributes": [{"feature": "parents", "reference_group": [1]}],
}
return encoded_X, encoded_y, fairness_info
else:
fairness_info = {
"favorable_labels": ["spec_prior"],
"protected_attributes": [
{
"feature": "parents",
"reference_group": ["great_pret", "pretentious"],
}
],
}
return orig_X, orig_y, fairness_info
def fetch_titanic_df(preprocess: bool = False):
"""
Fetch the `Titanic`_ dataset from OpenML and add `fairness_info`.
It contains data gathered from passengers on the Titanic with a binary classification
into "survived" or "did not survive". Without preprocessing, the dataset has
1309 rows and 13 columns. There is one protected attribute, sex, and the
disparate impact is 0.26. The data includes both categorical and
numeric columns, with some missing values.
.. _`Titanic`: https://www.openml.org/d/40945
Parameters
----------
preprocess : boolean, optional, default False
If True,
encode protected attributes in X as 0 or 1 to indicate privileged groups;
and apply one-hot encoding to any remaining features in X that
are categorical and not protected attributes.
Returns
-------
result : tuple
- item 0: pandas Dataframe
Features X, including both protected and non-protected attributes.
- item 1: pandas Series
Labels y.
- item 3: fairness_info
JSON meta-data following the format understood by fairness metrics
and mitigation operators in `lale.lib.aif360`.
"""
(train_X, train_y), (test_X, test_y) = lale.datasets.openml.fetch(
"titanic", "classification", astype="pandas", preprocess=preprocess
)
orig_X = pd.concat([train_X, test_X]).sort_index()
orig_y = pd.concat([train_y, test_y]).sort_index()
if preprocess:
sex = pd.Series(orig_X["sex_female"] == 1, dtype=np.float64)
columns_to_drop = ["sex_female", "sex_male"]
# drop more columns that turn into gigantic one-hot encodings otherwise, like name and cabin
def extra_categorical_columns_filter(c: str):
return (
c.startswith("name")
or c.startswith("ticket")
or c.startswith("cabin")
or c.startswith("home.dest")
)
columns_to_drop.extend(
[x for x in orig_X.columns if extra_categorical_columns_filter(x)]
)
dropped_X = orig_X.drop(labels=columns_to_drop, axis=1)
encoded_X = dropped_X.assign(sex=sex)
fairness_info = {
"favorable_labels": [1],
"protected_attributes": [
{"feature": "sex", "reference_group": [1]},
],
}
return encoded_X, orig_y, fairness_info
else:
fairness_info = {
"favorable_labels": ["1"],
"protected_attributes": [
{"feature": "sex", "reference_group": ["female"]},
],
}
return orig_X, orig_y, fairness_info
# MEPS HELPERS
class _MepsYear(Enum):
FY2015 = 15
FY2016 = 16
class _MepsPanel(Enum):
PANEL19 = 19
PANEL20 = 20
PANEL21 = 21
def _race(row):
if (row["HISPANX"] == 2) and (
row["RACEV2X"] == 1
): # non-Hispanic Whites are marked as WHITE; all others as NON-WHITE
return "White"
return "Non-White"
def _get_utilization_columns(fiscal_year):
return [
f"OBTOTV{fiscal_year.value}",
f"OPTOTV{fiscal_year.value}",
f"ERTOT{fiscal_year.value}",
f"IPNGTD{fiscal_year.value}",
f"HHTOTD{fiscal_year.value}",
]
def _get_total_utilization(row, fiscal_year):
cols = _get_utilization_columns(fiscal_year)
return sum((row[x] for x in cols))
def _should_drop_column(x, fiscal_year):
utilization_cols = set(_get_utilization_columns(fiscal_year))
return x in utilization_cols
def _fetch_meps_raw_df(panel, fiscal_year):
filename = ""
if fiscal_year == _MepsYear.FY2015:
assert panel in [_MepsPanel.PANEL19, _MepsPanel.PANEL20]
filename = "h181.csv"
elif fiscal_year == _MepsYear.FY2016:
assert panel == _MepsPanel.PANEL21
filename = "h192.csv"
else:
logger.error(f"Unexpected FiscalYear received: {fiscal_year}")
raise ValueError(f"Unexpected FiscalYear received: {fiscal_year}")
filepath = os.path.join(
os.path.dirname(os.path.abspath(aif360.__file__)),
"data",
"raw",
"meps",
filename,
)
df: typing.Any = None
try:
df = pd.read_csv(filepath, sep=",", na_values=[])
except IOError as err:
logger.error(f"IOError: {err}")
logger.error("To use this class, please follow the instructions found here:")
logger.error(
f"\n\t{'https://github.com/Trusted-AI/AIF360/tree/master/aif360/data/raw/meps'}\n"
)
logger.error(
f"\n to download and convert the data and place the final {filename} file, as-is, in the folder:"
)
logger.error(f"\n\t{os.path.abspath(os.path.dirname(filepath))}\n")
import sys
sys.exit(1)
df["RACEV2X"] = df.apply(_race, axis=1)
df = df.rename(columns={"RACEV2X": "RACE"})
df = df[df["PANEL"] == panel.value]
df["TOTEXP15"] = df.apply(
lambda row: _get_total_utilization(row, fiscal_year), axis=1
)
lessE = df["TOTEXP15"] < 10.0
df.loc[lessE, "TOTEXP15"] = 0.0
moreE = df["TOTEXP15"] >= 10.0
df.loc[moreE, "TOTEXP15"] = 1.0
df = df.rename(columns={"TOTEXP15": "UTILIZATION"})
columns_to_drop = set(
(x for x in df.columns.tolist() if _should_drop_column(x, fiscal_year))
)
df = df[sorted(set(df.columns.tolist()) - columns_to_drop, key=df.columns.get_loc)]
X = pd.DataFrame(
df, columns=[x for x in df.columns.tolist() if x != "UTILIZATION"]
).sort_index()
y = pd.Series(df["UTILIZATION"], name="UTILIZATION").sort_index()
fairness_info = {
"favorable_labels": [1],
"protected_attributes": [
{"feature": "RACE", "reference_group": ["White"]},
],
}
return X, y, fairness_info
def _get_pandas_and_fairness_info_from_meps_dataset(dataset):
X, y = lale.lib.aif360.util.dataset_to_pandas(dataset)
fairness_info = {
"favorable_labels": [1],
"protected_attributes": [
{"feature": "RACE", "reference_group": [1]},
],
}
return X, y, fairness_info
def fetch_meps_panel19_fy2015_df(preprocess: bool = False):
"""
Fetch a subset of the `MEPS`_ dataset from aif360 and add fairness info.
It contains information collected on a nationally representative sample
of the civilian noninstitutionalized population of the United States,
specifically reported medical expenditures and civilian demographics.
This dataframe corresponds to data from panel 19 from the year 2015.
Without preprocessing, the dataframe contains 16578 rows and 1825 columns.
(With preprocessing the dataframe contains 15830 rows and 138 columns.)
There is one protected attribute, race, and the disparate impact is 0.496
if preprocessing is not applied and 0.490 if preprocessing is applied.
The data includes numeric and categorical columns, with some missing values.
Note: in order to use this dataset, be sure to follow the instructions
found in the `AIF360 documentation`_ and accept the corresponding license agreement.
.. _`MEPS`: https://meps.ahrq.gov/mepsweb/data_stats/download_data_files_detail.jsp?cboPufNumber=HC-181
.. _`AIF360 documentation`: https://github.com/Trusted-AI/AIF360/tree/master/aif360/data/raw/meps
Parameters
----------
preprocess : boolean, optional, default False
If True,
encode protected attribute in X corresponding to race as 0 or 1
to indicate privileged groups;
encode labels in y as 0 or 1 to indicate faborable outcomes;
rename columns that are panel or round-specific;
drop columns such as ID columns that are not relevant to the task at hand;
and drop rows where features are unknown.
Returns
-------
result : tuple
- item 0: pandas Dataframe
Features X, including both protected and non-protected attributes.
- item 1: pandas Series
Labels y.
- item 3: fairness_info
JSON meta-data following the format understood by fairness metrics
and mitigation operators in `lale.lib.aif360`.
"""
if preprocess:
dataset = aif360.datasets.MEPSDataset19()
return _get_pandas_and_fairness_info_from_meps_dataset(dataset)
else:
return _fetch_meps_raw_df(_MepsPanel.PANEL19, _MepsYear.FY2015)
def fetch_meps_panel20_fy2015_df(preprocess: bool = False):
"""
Fetch a subset of the `MEPS`_ dataset from aif360 and add fairness info.
It contains information collected on a nationally representative sample
of the civilian noninstitutionalized population of the United States,
specifically reported medical expenditures and civilian demographics.
This dataframe corresponds to data from panel 20 from the year 2015.
Without preprocessing, the dataframe contains 18849 rows and 1825 columns.
(With preprocessing the dataframe contains 17570 rows and 138 columns.)
There is one protected attribute, race, and the disparate impact is 0.493
if preprocessing is not applied and 0.488 if preprocessing is applied.
The data includes numeric and categorical columns, with some missing values.
Note: in order to use this dataset, be sure to follow the instructions
found in the `AIF360 documentation`_ and accept the corresponding license agreement.
.. _`MEPS`: https://meps.ahrq.gov/mepsweb/data_stats/download_data_files_detail.jsp?cboPufNumber=HC-181
.. _`AIF360 documentation`: https://github.com/Trusted-AI/AIF360/tree/master/aif360/data/raw/meps
Parameters
----------
preprocess : boolean, optional, default False
If True,
encode protected attribute in X corresponding to race as 0 or 1
to indicate privileged groups;
encode labels in y as 0 or 1 to indicate faborable outcomes;
rename columns that are panel or round-specific;
drop columns such as ID columns that are not relevant to the task at hand;
and drop rows where features are unknown.
Returns
-------
result : tuple
- item 0: pandas Dataframe
Features X, including both protected and non-protected attributes.
- item 1: pandas Series
Labels y.
- item 3: fairness_info
JSON meta-data following the format understood by fairness metrics
and mitigation operators in `lale.lib.aif360`.
"""
if preprocess:
dataset = aif360.datasets.MEPSDataset20()
return _get_pandas_and_fairness_info_from_meps_dataset(dataset)
else:
return _fetch_meps_raw_df(_MepsPanel.PANEL20, _MepsYear.FY2015)
def fetch_meps_panel21_fy2016_df(preprocess: bool = False):
"""
Fetch a subset of the `MEPS`_ dataset from aif360 and add fairness info.
It contains information collected on a nationally representative sample
of the civilian noninstitutionalized population of the United States,
specifically reported medical expenditures and civilian demographics.
This dataframe corresponds to data from panel 20 from the year 2016.
Without preprocessing, the dataframe contains 17052 rows and 1936 columns.
(With preprocessing the dataframe contains 15675 rows and 138 columns.)
There is one protected attribute, race, and the disparate impact is 0.462
if preprocessing is not applied and 0.451 if preprocessing is applied.
The data includes numeric and categorical columns, with some missing values.
Note: in order to use this dataset, be sure to follow the instructions
found in the `AIF360 documentation`_ and accept the corresponding license agreement.
.. _`MEPS`: https://meps.ahrq.gov/mepsweb/data_stats/download_data_files_detail.jsp?cboPufNumber=HC-181
.. _`AIF360 documentation`: https://github.com/Trusted-AI/AIF360/tree/master/aif360/data/raw/meps
Parameters
----------
preprocess : boolean, optional, default False
If True,
encode protected attribute in X corresponding to race as 0 or 1
to indicate privileged groups;
encode labels in y as 0 or 1 to indicate faborable outcomes;
rename columns that are panel or round-specific;
drop columns such as ID columns that are not relevant to the task at hand;
and drop rows where features are unknown.
Returns
-------
result : tuple
- item 0: pandas Dataframe
Features X, including both protected and non-protected attributes.
- item 1: pandas Series
Labels y.
- item 3: fairness_info
JSON meta-data following the format understood by fairness metrics
and mitigation operators in `lale.lib.aif360`.
"""
if preprocess:
dataset = aif360.datasets.MEPSDataset21()
return _get_pandas_and_fairness_info_from_meps_dataset(dataset)
else:
return _fetch_meps_raw_df(_MepsPanel.PANEL21, _MepsYear.FY2016)
| 61,223 | 33.668177 | 186 |
py
|
lale
|
lale-master/lale/lib/aif360/eq_odds_postprocessing.py
|
# Copyright 2019-2023 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import aif360.algorithms.postprocessing
import lale.docstrings
import lale.operators
from .util import (
_BasePostEstimatorImpl,
_categorical_fairness_properties,
_categorical_input_predict_schema,
_categorical_output_predict_schema,
_categorical_supervised_input_fit_schema,
)
class _EqOddsPostprocessingImpl(_BasePostEstimatorImpl):
def __init__(
self,
*,
favorable_labels,
protected_attributes,
unfavorable_labels=None,
estimator,
redact=True,
**hyperparams,
):
prot_attr_names = [pa["feature"] for pa in protected_attributes]
unprivileged_groups = [{name: 0 for name in prot_attr_names}]
privileged_groups = [{name: 1 for name in prot_attr_names}]
mitigator = aif360.algorithms.postprocessing.EqOddsPostprocessing(
unprivileged_groups=unprivileged_groups,
privileged_groups=privileged_groups,
**hyperparams,
)
super().__init__(
favorable_labels=favorable_labels,
protected_attributes=protected_attributes,
unfavorable_labels=unfavorable_labels,
estimator=estimator,
redact=redact,
mitigator=mitigator,
)
def predict_proba(self, X):
raise NotImplementedError()
_input_fit_schema = _categorical_supervised_input_fit_schema
_input_predict_schema = _categorical_input_predict_schema
_output_predict_schema = _categorical_output_predict_schema
_hyperparams_schema = {
"description": "Hyperparameter schema.",
"allOf": [
{
"description": "This first sub-object lists all constructor arguments with their "
"types, one at a time, omitting cross-argument constraints.",
"type": "object",
"additionalProperties": False,
"required": [
*_categorical_fairness_properties.keys(),
"estimator",
"redact",
"seed",
],
"relevantToOptimizer": [],
"properties": {
**_categorical_fairness_properties,
"estimator": {
"description": "Nested supervised learning operator for which to mitigate fairness.",
"laleType": "operator",
},
"redact": {
"description": "Whether to redact protected attributes before data preparation (recommended) or not.",
"type": "boolean",
"default": True,
},
"seed": {
"description": "Seed to make `predict` repeatable.",
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"default": None,
},
},
}
],
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """`Equalized odds postprocessing`_ post-estimator fairness mitigator. Solves a linear program to find probabilities with which to change output labels to optimize equalized odds (`Hardt et al. 2016`_, `Pleiss et al. 2017`_).
.. _`Equalized odds postprocessing`: https://aif360.readthedocs.io/en/latest/modules/generated/aif360.algorithms.postprocessing.EqOddsPostprocessing.html
.. _`Hardt et al. 2016`: https://papers.nips.cc/paper/2016/hash/9d2682367c3935defcb1f9e247a97c0d-Abstract.html
.. _`Pleiss et al. 2017`: https://proceedings.neurips.cc/paper/2017/hash/b8b9c74ac526fffbeb2d39ab038d1cd7-Abstract.html
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.aif360.eq_odds_postprocessing.html#lale.lib.aif360.eq_odds_postprocessing.EqOddsPostprocessing",
"import_from": "aif360.algorithms.postprocessing",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "classifier", "interpretable"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
EqOddsPostprocessing = lale.operators.make_operator(
_EqOddsPostprocessingImpl, _combined_schemas
)
lale.docstrings.set_docstrings(EqOddsPostprocessing)
| 4,869 | 37.650794 | 244 |
py
|
lale
|
lale-master/lale/lib/aif360/util.py
|
# Copyright 2019-2022 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import logging
import sys
from typing import List, Optional, Tuple, Union, cast
import aif360.algorithms.postprocessing
import aif360.datasets
import aif360.metrics
import numpy as np
import pandas as pd
import sklearn.metrics
import sklearn.model_selection
import lale.datasets.data_schemas
import lale.datasets.openml
import lale.lib.lale
import lale.lib.rasl
from lale.datasets.data_schemas import add_schema_adjusting_n_rows
from lale.expressions import astype, it, sum # pylint:disable=redefined-builtin
from lale.helpers import GenSym, _ensure_pandas, randomstate_type
from lale.lib.dataframe import get_columns
from lale.lib.rasl import Aggregate, ConcatFeatures, Map
from lale.lib.rasl.metrics import MetricMonoid, MetricMonoidFactory
from lale.operators import TrainablePipeline, TrainedOperator
from lale.type_checking import JSON_TYPE, validate_schema_directly
if sys.version_info >= (3, 8):
from typing import Literal # raises a mypy error for <3.8
else:
from typing_extensions import Literal
logger = logging.getLogger(__name__)
logger.setLevel(logging.WARNING)
_FAV_LABELS_TYPE = List[Union[float, str, bool, List[float]]]
def dataset_to_pandas(
dataset, return_only: Literal["X", "y", "Xy"] = "Xy"
) -> Tuple[Optional[pd.Series], Optional[pd.Series]]:
"""
Return pandas representation of the AIF360 dataset.
Parameters
----------
dataset : aif360.datasets.BinaryLabelDataset
AIF360 dataset to convert to a pandas representation.
return_only : 'Xy', 'X', or 'y'
Which part of features X or labels y to convert and return.
Returns
-------
result : tuple
- item 0: pandas Dataframe or None, features X
- item 1: pandas Series or None, labels y
"""
if "X" in return_only:
X = pd.DataFrame(dataset.features, columns=dataset.feature_names)
result_X = lale.datasets.data_schemas.add_schema(X)
assert isinstance(result_X, pd.DataFrame), type(result_X)
else:
result_X = None
if "y" in return_only:
y = pd.Series(dataset.labels.ravel(), name=dataset.label_names[0])
result_y = lale.datasets.data_schemas.add_schema(y)
assert isinstance(result_y, pd.Series), type(result_y)
else:
result_y = None
return result_X, result_y
def count_fairness_groups(
X: Union[pd.DataFrame, np.ndarray],
y: Union[pd.Series, np.ndarray],
favorable_labels: _FAV_LABELS_TYPE,
protected_attributes: List[JSON_TYPE],
unfavorable_labels: Optional[_FAV_LABELS_TYPE] = None,
) -> pd.DataFrame:
"""
Count size of each intersection of groups induced by the fairness info.
Parameters
----------
X : array
Features including protected attributes as numpy ndarray or pandas dataframe.
y : array
Labels as numpy ndarray or pandas series.
favorable_labels : array
Label values which are considered favorable (i.e. "positive").
protected_attributes : array
Features for which fairness is desired.
unfavorable_labels : array or None, default None
Label values which are considered unfavorable (i.e. "negative").
Returns
-------
result : pd.DataFrame
DataFrame with a multi-level index on the rows, where the first level
indicates the binarized outcome, and the remaining levels indicate the
binarized group membership according to the protected attributes.
Column "count" specifies the number of instances for each group.
Column "ratio" gives the ratio of the given outcome relative to the
total number of instances with any outcome but the same encoded
protected attributes.
"""
from lale.lib.aif360 import ProtectedAttributesEncoder
prot_attr_enc = ProtectedAttributesEncoder(
favorable_labels=favorable_labels,
protected_attributes=protected_attributes,
unfavorable_labels=unfavorable_labels,
remainder="drop",
)
encoded_X, encoded_y = prot_attr_enc.transform_X_y(X, y)
prot_attr_names = [pa["feature"] for pa in protected_attributes]
gensym = GenSym(set(prot_attr_names))
encoded_y = pd.Series(encoded_y, index=encoded_y.index, name=gensym("y_true"))
counts = pd.Series(data=1, index=encoded_y.index, name=gensym("count"))
enc = pd.concat([encoded_y, encoded_X, counts], axis=1)
grouped = enc.groupby([encoded_y.name] + prot_attr_names).count()
count_column = grouped["count"]
ratio_column = pd.Series(0.0, count_column.index, name="ratio")
for group, count in count_column.items():
comp_group = tuple(
1 - group[k] if k == 0 else group[k] for k in range(len(group))
)
comp_count = count_column[comp_group]
ratio = count / (count + comp_count)
ratio_column[group] = ratio
result = pd.DataFrame({"count": count_column, "ratio": ratio_column})
return result
_categorical_fairness_properties: JSON_TYPE = {
"favorable_labels": {
"description": 'Label values which are considered favorable (i.e. "positive").',
"type": "array",
"minItems": 1,
"items": {
"anyOf": [
{"description": "Numerical value.", "type": "number"},
{"description": "Literal string value.", "type": "string"},
{"description": "Boolean value.", "type": "boolean"},
{
"description": "Numeric range [a,b] from a to b inclusive.",
"type": "array",
"minItems": 2,
"maxItems": 2,
"items": {"type": "number"},
},
]
},
},
"protected_attributes": {
"description": "Features for which fairness is desired.",
"type": "array",
"minItems": 1,
"items": {
"type": "object",
"required": ["feature", "reference_group"],
"properties": {
"feature": {
"description": "Column name or column index.",
"anyOf": [{"type": "string"}, {"type": "integer"}],
},
"reference_group": {
"description": "Values or ranges that indicate being a member of the privileged group.",
"type": "array",
"minItems": 1,
"items": {
"anyOf": [
{"description": "Literal value.", "type": "string"},
{"description": "Numerical value.", "type": "number"},
{
"description": "Numeric range [a,b] from a to b inclusive.",
"type": "array",
"minItems": 2,
"maxItems": 2,
"items": {"type": "number"},
},
]
},
},
"monitored_group": {
"description": "Values or ranges that indicate being a member of the unprivileged group.",
"anyOf": [
{
"description": "If `monitored_group` is not explicitly specified, consider any values not captured by `reference_group` as monitored.",
"enum": [None],
},
{
"type": "array",
"minItems": 1,
"items": {
"anyOf": [
{"description": "Literal value.", "type": "string"},
{
"description": "Numerical value.",
"type": "number",
},
{
"description": "Numeric range [a,b] from a to b inclusive.",
"type": "array",
"minItems": 2,
"maxItems": 2,
"items": {"type": "number"},
},
]
},
},
],
"default": None,
},
},
},
},
"unfavorable_labels": {
"description": 'Label values which are considered unfavorable (i.e. "negative").',
"anyOf": [
{
"description": "If `unfavorable_labels` is not explicitly specified, consider any labels not captured by `favorable_labels` as unfavorable.",
"enum": [None],
},
{
"type": "array",
"minItems": 1,
"items": {
"anyOf": [
{"description": "Numerical value.", "type": "number"},
{"description": "Literal string value.", "type": "string"},
{"description": "Boolean value.", "type": "boolean"},
{
"description": "Numeric range [a,b] from a to b inclusive.",
"type": "array",
"minItems": 2,
"maxItems": 2,
"items": {"type": "number"},
},
],
},
},
],
"default": None,
},
}
FAIRNESS_INFO_SCHEMA = {
"type": "object",
"properties": _categorical_fairness_properties,
}
def _validate_fairness_info(
favorable_labels, protected_attributes, unfavorable_labels, check_schema
):
if check_schema:
validate_schema_directly(
{
"favorable_labels": favorable_labels,
"protected_attributes": protected_attributes,
"unfavorable_labels": unfavorable_labels,
},
FAIRNESS_INFO_SCHEMA,
)
def _check_ranges(base_name, name, groups):
for group in groups:
if isinstance(group, list):
if group[0] > group[1]:
if base_name is None:
logger.warning(f"range {group} in {name} has min>max")
else:
logger.warning(
f"range {group} in {name} of feature '{base_name}' has min>max"
)
def _check_overlaps(base_name, name1, groups1, name2, groups2):
for g1 in groups1:
for g2 in groups2:
overlap = False
if isinstance(g1, list):
if isinstance(g2, list):
overlap = g1[0] <= g2[0] <= g1[1] or g1[0] <= g2[1] <= g1[1]
else:
overlap = g1[0] <= g2 <= g1[1]
else:
if isinstance(g2, list):
overlap = g2[0] <= g1 <= g2[1]
else:
overlap = g1 == g2
if overlap:
s1 = f"'{g1}'" if isinstance(g1, str) else str(g1)
s2 = f"'{g2}'" if isinstance(g2, str) else str(g2)
if base_name is None:
logger.warning(
f"overlap between {name1} and {name2} on {s1} and {s2}"
)
else:
logger.warning(
f"overlap between {name1} and {name2} of feature '{base_name}' on {s1} and {s2}"
)
_check_ranges(None, "favorable labels", favorable_labels)
if unfavorable_labels is not None:
_check_ranges(None, "unfavorable labels", unfavorable_labels)
_check_overlaps(
None,
"favorable labels",
favorable_labels,
"unfavorable labels",
unfavorable_labels,
)
for attr in protected_attributes:
base_name = attr["feature"]
reference = attr["reference_group"]
_check_ranges(base_name, "reference group", reference)
monitored = attr.get("monitored_group", None)
if monitored is not None:
_check_ranges(base_name, "monitored group", monitored)
_check_overlaps(
base_name, "reference group", reference, "monitored group", monitored
)
class _PandasToDatasetConverter:
def __init__(self, favorable_label, unfavorable_label, protected_attribute_names):
self.favorable_label = favorable_label
self.unfavorable_label = unfavorable_label
self.protected_attribute_names = protected_attribute_names
def convert(self, X, y, probas=None):
assert isinstance(X, pd.DataFrame), type(X)
assert isinstance(y, pd.Series), type(y)
assert X.shape[0] == y.shape[0], f"X.shape {X.shape}, y.shape {y.shape}"
assert not X.isna().any().any(), f"X\n{X}\n"
assert not y.isna().any().any(), f"y\n{X}\n"
y_reindexed = pd.Series(data=y.values, index=X.index, name=y.name)
df = pd.concat([X, y_reindexed], axis=1)
assert df.shape[0] == X.shape[0], f"df.shape {df.shape}, X.shape {X.shape}"
assert not df.isna().any().any(), f"df\n{df}\nX\n{X}\ny\n{y}"
label_names = [y.name]
result = aif360.datasets.BinaryLabelDataset(
favorable_label=self.favorable_label,
unfavorable_label=self.unfavorable_label,
protected_attribute_names=self.protected_attribute_names,
df=df,
label_names=label_names,
)
if probas is not None:
pos_ind = 1 # TODO: is this always the case?
result.scores = probas[:, pos_ind].reshape(-1, 1)
return result
def _ensure_str(str_or_int: Union[str, int]) -> str:
return f"f{str_or_int}" if isinstance(str_or_int, int) else str_or_int
def _ndarray_to_series(data, name, index=None, dtype=None) -> pd.Series:
if isinstance(data, pd.Series):
return data
if isinstance(data, pd.DataFrame):
assert len(data.columns) == 1, data.columns
data = data[data.columns[0]]
result = pd.Series(data=data, index=index, dtype=dtype, name=_ensure_str(name))
schema = getattr(data, "json_schema", None)
if schema is not None:
result = lale.datasets.data_schemas.add_schema(result, schema)
return result
def _ndarray_to_dataframe(array) -> pd.DataFrame:
assert len(array.shape) == 2
column_names = None
schema = getattr(array, "json_schema", None)
if schema is not None:
column_schemas = schema.get("items", {}).get("items", None)
if isinstance(column_schemas, list):
column_names = [s.get("description", None) for s in column_schemas]
if column_names is None or None in column_names:
column_names = [_ensure_str(i) for i in range(array.shape[1])]
result = pd.DataFrame(array, columns=column_names)
if schema is not None:
result = lale.datasets.data_schemas.add_schema(result, schema)
return result
#####################################################################
# Mitigator base classes and common schemas
#####################################################################
class _BaseInEstimatorImpl:
def __init__(
self,
*,
favorable_labels,
protected_attributes,
unfavorable_labels,
redact,
preparation,
mitigator,
):
_validate_fairness_info(
favorable_labels, protected_attributes, unfavorable_labels, False
)
self.favorable_labels = favorable_labels
self.protected_attributes = protected_attributes
self.unfavorable_labels = unfavorable_labels
self.redact = redact
if preparation is None:
preparation = lale.lib.lale.NoOp
self.preparation = preparation
self.mitigator = mitigator
def _prep_and_encode(self, X, y=None):
prepared_X = self.redact_and_prep.transform(X, y)
encoded_X, encoded_y = self.prot_attr_enc.transform_X_y(X, y)
combined_attribute_names = list(prepared_X.columns) + [
name for name in encoded_X.columns if name not in prepared_X.columns
]
combined_columns = [
encoded_X[name] if name in encoded_X else prepared_X[name]
for name in combined_attribute_names
]
combined_X = pd.concat(combined_columns, axis=1)
result = self.pandas_to_dataset.convert(combined_X, encoded_y)
return result
def _decode(self, y):
assert isinstance(y, pd.Series)
assert len(self.favorable_labels) == 1 and len(self.not_favorable_labels) == 1
favorable, not_favorable = (
self.favorable_labels[0],
self.not_favorable_labels[0],
)
result = y.map(lambda label: favorable if label == 1 else not_favorable)
return result
def fit(self, X, y):
from lale.lib.aif360 import ProtectedAttributesEncoder, Redacting
fairness_info = {
"favorable_labels": self.favorable_labels,
"protected_attributes": self.protected_attributes,
"unfavorable_labels": self.unfavorable_labels,
}
redacting = Redacting(**fairness_info) if self.redact else lale.lib.lale.NoOp
trainable_redact_and_prep = redacting >> self.preparation
assert isinstance(trainable_redact_and_prep, TrainablePipeline)
self.redact_and_prep = trainable_redact_and_prep.fit(X, y)
self.prot_attr_enc = ProtectedAttributesEncoder(
**fairness_info,
remainder="drop",
)
prot_attr_names = [pa["feature"] for pa in self.protected_attributes]
self.pandas_to_dataset = _PandasToDatasetConverter(
favorable_label=1,
unfavorable_label=0,
protected_attribute_names=prot_attr_names,
)
encoded_data = self._prep_and_encode(X, y)
self.mitigator.fit(encoded_data)
self.classes_ = set(list(y))
self.not_favorable_labels = list(
self.classes_ - set(list(self.favorable_labels))
)
self.classes_ = np.array(list(self.classes_))
return self
def predict(self, X, **predict_params):
encoded_data = self._prep_and_encode(X)
result_data = self.mitigator.predict(encoded_data, **predict_params)
_, result_y = dataset_to_pandas(result_data, return_only="y")
decoded_y = self._decode(result_y)
return decoded_y
def predict_proba(self, X):
# Note, will break for GerryFairClassifier
encoded_data = self._prep_and_encode(X)
result_data = self.mitigator.predict(encoded_data)
favorable_probs = result_data.scores
all_probs = np.hstack([1 - favorable_probs, favorable_probs])
return all_probs
class _BasePostEstimatorImpl:
def __init__(
self,
*,
favorable_labels,
protected_attributes,
unfavorable_labels,
estimator,
redact,
mitigator,
):
_validate_fairness_info(
favorable_labels, protected_attributes, unfavorable_labels, True
)
self.favorable_labels = favorable_labels
self.protected_attributes = protected_attributes
self.unfavorable_labels = unfavorable_labels
self.estimator = estimator
self.redact = redact
self.mitigator = mitigator
def _decode(self, y):
assert isinstance(y, pd.Series), type(y)
assert len(self.favorable_labels) == 1, self.favorable_labels
assert len(self.not_favorable_labels) == 1, self.not_favorable_labels
favorable, not_favorable = (
self.favorable_labels[0],
self.not_favorable_labels[0],
)
result = y.map(lambda label: favorable if label == 1 else not_favorable)
return result
def fit(self, X, y):
from lale.lib.aif360 import ProtectedAttributesEncoder, Redacting
fairness_info = {
"favorable_labels": self.favorable_labels,
"protected_attributes": self.protected_attributes,
"unfavorable_labels": self.unfavorable_labels,
}
redacting = Redacting(**fairness_info) if self.redact else lale.lib.lale.NoOp
trainable_redact_and_estim = redacting >> self.estimator
assert isinstance(trainable_redact_and_estim, TrainablePipeline)
self.redact_and_estim = trainable_redact_and_estim.fit(X, y)
self.prot_attr_enc = ProtectedAttributesEncoder(
**fairness_info,
remainder="drop",
)
prot_attr_names = [pa["feature"] for pa in self.protected_attributes]
self.pandas_to_dataset = _PandasToDatasetConverter(
favorable_label=1,
unfavorable_label=0,
protected_attribute_names=prot_attr_names,
)
encoded_X, encoded_y = self.prot_attr_enc.transform_X_y(X, y)
self.y_dtype = encoded_y.dtype
self.y_name = encoded_y.name
predicted_y = self.redact_and_estim.predict(X)
predicted_y = _ndarray_to_series(predicted_y, self.y_name, X.index)
_, predicted_y = self.prot_attr_enc.transform_X_y(X, predicted_y)
predicted_probas = self.redact_and_estim.predict_proba(X)
dataset_true = self.pandas_to_dataset.convert(encoded_X, encoded_y)
dataset_pred = self.pandas_to_dataset.convert(
encoded_X, predicted_y, predicted_probas
)
self.mitigator = self.mitigator.fit(dataset_true, dataset_pred)
self.classes_ = set(list(y))
self.not_favorable_labels = list(
self.classes_ - set(list(self.favorable_labels))
)
self.classes_ = np.array(list(self.classes_))
return self
def predict(self, X):
predicted_y = self.redact_and_estim.predict(X)
predicted_probas = self.redact_and_estim.predict_proba(X)
predicted_y = _ndarray_to_series(predicted_y, self.y_name, X.index)
encoded_X, predicted_y = self.prot_attr_enc.transform_X_y(X, predicted_y)
dataset_pred = self.pandas_to_dataset.convert(
encoded_X, predicted_y, predicted_probas
)
dataset_out = self.mitigator.predict(dataset_pred)
_, result_y = dataset_to_pandas(dataset_out, return_only="y")
decoded_y = self._decode(result_y)
return decoded_y
def predict_proba(self, X):
predicted_y = self.redact_and_estim.predict(X)
predicted_probas = self.redact_and_estim.predict_proba(X)
predicted_y = _ndarray_to_series(predicted_y, self.y_name, X.index)
encoded_X, predicted_y = self.prot_attr_enc.transform_X_y(X, predicted_y)
dataset_pred = self.pandas_to_dataset.convert(
encoded_X, predicted_y, predicted_probas
)
dataset_out = self.mitigator.predict(dataset_pred)
favorable_probs = dataset_out.scores
all_probs = np.hstack([1 - favorable_probs, favorable_probs])
return all_probs
_categorical_supervised_input_fit_schema = {
"type": "object",
"required": ["X", "y"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {
"type": "array",
"items": {"anyOf": [{"type": "number"}, {"type": "string"}]},
},
},
"y": {
"description": "Target class labels; the array is over samples.",
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "string"}},
],
},
},
}
_categorical_unsupervised_input_fit_schema = {
"description": "Input data schema for training.",
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {
"type": "array",
"items": {"anyOf": [{"type": "number"}, {"type": "string"}]},
},
},
"y": {"description": "Target values; the array is over samples."},
},
}
_categorical_input_predict_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {
"type": "array",
"items": {"anyOf": [{"type": "number"}, {"type": "string"}]},
},
}
},
}
_categorical_output_predict_schema = {
"description": "Predicted class label per sample.",
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "string"}},
],
}
_categorical_input_predict_proba_schema = {
"type": "object",
"additionalProperties": False,
"required": ["X"],
"properties": {
"X": {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {
"type": "array",
"items": {"anyOf": [{"type": "number"}, {"type": "string"}]},
},
}
},
}
_categorical_output_predict_proba_schema = {
"description": "The class probabilities of the input samples",
"anyOf": [
{"type": "array", "items": {"laleType": "Any"}},
{"type": "array", "items": {"type": "array", "items": {"laleType": "Any"}}},
],
}
_categorical_input_transform_schema = {
"description": "Input data schema for transform.",
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {
"type": "array",
"items": {"anyOf": [{"type": "number"}, {"type": "string"}]},
},
}
},
}
_categorical_output_transform_schema = {
"description": "Output data schema for reweighted features.",
"type": "array",
"items": {
"type": "array",
"items": {"anyOf": [{"type": "number"}, {"type": "string"}]},
},
}
_numeric_output_transform_schema = {
"description": "Output data schema for reweighted features.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
#####################################################################
# Metrics
#####################################################################
def _y_pred_series(
y_true: Union[pd.Series, np.ndarray, None],
y_pred: Union[pd.Series, np.ndarray],
X: Union[pd.DataFrame, np.ndarray],
) -> pd.Series:
if isinstance(y_pred, pd.Series):
return y_pred
assert y_true is not None
return _ndarray_to_series(
y_pred,
y_true.name if isinstance(y_true, pd.Series) else _ensure_str(X.shape[1]), # type: ignore
X.index if isinstance(X, pd.DataFrame) else None, # type: ignore
y_pred.dtype,
)
class _AIF360ScorerFactory:
_cached_pandas_to_dataset: Optional[_PandasToDatasetConverter]
def __init__(
self,
metric: str,
favorable_labels: _FAV_LABELS_TYPE,
protected_attributes: List[JSON_TYPE],
unfavorable_labels: Optional[_FAV_LABELS_TYPE],
):
_validate_fairness_info(
favorable_labels, protected_attributes, unfavorable_labels, True
)
if metric in ["disparate_impact", "statistical_parity_difference"]:
unfavorable_labels = None # not used and may confound AIF360
if hasattr(aif360.metrics.BinaryLabelDatasetMetric, metric):
self.kind = "BinaryLabelDatasetMetric"
elif hasattr(aif360.metrics.ClassificationMetric, metric):
self.kind = "ClassificationMetric"
else:
raise ValueError(f"unknown metric {metric}")
self.metric = metric
self.fairness_info = {
"favorable_labels": favorable_labels,
"protected_attributes": protected_attributes,
"unfavorable_labels": unfavorable_labels,
}
from lale.lib.aif360 import ProtectedAttributesEncoder
self.prot_attr_enc = ProtectedAttributesEncoder(
**self.fairness_info,
remainder="drop",
)
pas = protected_attributes
self.unprivileged_groups = [{_ensure_str(pa["feature"]): 0 for pa in pas}]
self.privileged_groups = [{_ensure_str(pa["feature"]): 1 for pa in pas}]
self._cached_pandas_to_dataset = None
def _pandas_to_dataset(self) -> _PandasToDatasetConverter:
if self._cached_pandas_to_dataset is None:
self._cached_pandas_to_dataset = _PandasToDatasetConverter(
favorable_label=1,
unfavorable_label=0,
protected_attribute_names=list(self.privileged_groups[0].keys()),
)
return self._cached_pandas_to_dataset
def score_data(
self,
y_true: Union[pd.Series, np.ndarray, None] = None,
y_pred: Union[pd.Series, np.ndarray, None] = None,
X: Union[pd.DataFrame, np.ndarray, None] = None,
) -> float:
assert y_pred is not None and X is not None
y_pred_orig = y_pred
y_pred = _y_pred_series(y_true, y_pred, X)
encoded_X, y_pred = self.prot_attr_enc.transform_X_y(X, y_pred)
try:
dataset_pred = self._pandas_to_dataset().convert(encoded_X, y_pred)
except ValueError as e:
raise ValueError(
"The data has unexpected labels given the fairness info: "
f"favorable labels {self.fairness_info['favorable_labels']}, "
f"unfavorable labels {self.fairness_info['unfavorable_labels']}, "
f"unique values in y_pred {set(y_pred_orig)}."
) from e
if self.kind == "BinaryLabelDatasetMetric":
fairness_metrics = aif360.metrics.BinaryLabelDatasetMetric(
dataset_pred, self.unprivileged_groups, self.privileged_groups
)
else:
assert self.kind == "ClassificationMetric"
assert y_pred is not None and y_true is not None
if not isinstance(y_true, pd.Series):
y_true = _ndarray_to_series(
y_true, y_pred.name, y_pred.index, y_pred_orig.dtype # type: ignore
)
_, y_true = self.prot_attr_enc.transform_X_y(X, y_true)
dataset_true = self._pandas_to_dataset().convert(encoded_X, y_true)
fairness_metrics = aif360.metrics.ClassificationMetric(
dataset_true,
dataset_pred,
self.unprivileged_groups,
self.privileged_groups,
)
method = getattr(fairness_metrics, self.metric)
result = method()
if np.isnan(result) or not np.isfinite(result):
if 0 == fairness_metrics.num_positives(privileged=True):
logger.warning("there are 0 positives in the privileged group")
if 0 == fairness_metrics.num_positives(privileged=False):
logger.warning("there are 0 positives in the unprivileged group")
if 0 == fairness_metrics.num_instances(privileged=True):
logger.warning("there are 0 instances in the privileged group")
if 0 == fairness_metrics.num_instances(privileged=False):
logger.warning("there are 0 instances in the unprivileged group")
logger.warning(
f"The metric {self.metric} is ill-defined and returns {result}. Check your fairness configuration. The set of predicted labels is {set(y_pred_orig)}."
)
return result
def score_estimator(
self,
estimator: TrainedOperator,
X: Union[pd.DataFrame, np.ndarray],
y: Union[pd.Series, np.ndarray],
) -> float:
return self.score_data(y_true=y, y_pred=estimator.predict(X), X=X)
def __call__(
self,
estimator: TrainedOperator,
X: Union[pd.DataFrame, np.ndarray],
y: Union[pd.Series, np.ndarray],
) -> float:
return self.score_estimator(estimator, X, y)
_Batch_Xy = Tuple[pd.DataFrame, pd.Series]
_Batch_yyX = Tuple[Optional[pd.Series], pd.Series, pd.DataFrame]
class _DIorSPDData(MetricMonoid):
def __init__(
self, priv0_fav0: float, priv0_fav1: float, priv1_fav0: float, priv1_fav1: float
):
self.priv0_fav0 = priv0_fav0
self.priv0_fav1 = priv0_fav1
self.priv1_fav0 = priv1_fav0
self.priv1_fav1 = priv1_fav1
def combine(self, other: "_DIorSPDData") -> "_DIorSPDData":
return _DIorSPDData(
priv0_fav0=self.priv0_fav0 + other.priv0_fav0,
priv0_fav1=self.priv0_fav1 + other.priv0_fav1,
priv1_fav0=self.priv1_fav0 + other.priv1_fav0,
priv1_fav1=self.priv1_fav1 + other.priv1_fav1,
)
class _DIorSPDScorerFactory(_AIF360ScorerFactory):
def to_monoid(self, batch: _Batch_yyX) -> _DIorSPDData:
y_true, y_pred, X = batch
assert y_pred is not None and X is not None, batch
y_pred = _y_pred_series(y_true, y_pred, X)
encoded_X, y_pred = self.prot_attr_enc.transform_X_y(X, y_pred)
gensym = GenSym(set(_ensure_str(n) for n in get_columns(encoded_X)))
y_pred_name = gensym("y_pred")
y_pred = pd.DataFrame({y_pred_name: y_pred})
pa_names = self.privileged_groups[0].keys()
priv0 = functools.reduce(lambda a, b: a & b, (it[pa] == 0 for pa in pa_names))
priv1 = functools.reduce(lambda a, b: a & b, (it[pa] == 1 for pa in pa_names))
prd = it[y_pred_name]
map_op = Map(
columns={
"priv0_fav0": astype("int", priv0 & (prd == 0)),
"priv0_fav1": astype("int", priv0 & (prd == 1)),
"priv1_fav0": astype("int", priv1 & (prd == 0)),
"priv1_fav1": astype("int", priv1 & (prd == 1)),
}
)
agg_op = Aggregate(
columns={
"priv0_fav0": sum(it.priv0_fav0),
"priv0_fav1": sum(it.priv0_fav1),
"priv1_fav0": sum(it.priv1_fav0),
"priv1_fav1": sum(it.priv1_fav1),
}
)
pipeline = ConcatFeatures >> map_op >> agg_op
agg_df = _ensure_pandas(pipeline.transform([encoded_X, y_pred]))
return _DIorSPDData(
priv0_fav0=agg_df.at[0, "priv0_fav0"],
priv0_fav1=agg_df.at[0, "priv0_fav1"],
priv1_fav0=agg_df.at[0, "priv1_fav0"],
priv1_fav1=agg_df.at[0, "priv1_fav1"],
)
class _AODorEODData(MetricMonoid):
def __init__(
self,
tru0_pred0_priv0: float,
tru0_pred0_priv1: float,
tru0_pred1_priv0: float,
tru0_pred1_priv1: float,
tru1_pred0_priv0: float,
tru1_pred0_priv1: float,
tru1_pred1_priv0: float,
tru1_pred1_priv1: float,
):
self.tru0_pred0_priv0 = tru0_pred0_priv0
self.tru0_pred0_priv1 = tru0_pred0_priv1
self.tru0_pred1_priv0 = tru0_pred1_priv0
self.tru0_pred1_priv1 = tru0_pred1_priv1
self.tru1_pred0_priv0 = tru1_pred0_priv0
self.tru1_pred0_priv1 = tru1_pred0_priv1
self.tru1_pred1_priv0 = tru1_pred1_priv0
self.tru1_pred1_priv1 = tru1_pred1_priv1
def combine(self, other: "_AODorEODData") -> "_AODorEODData":
return _AODorEODData(
tru0_pred0_priv0=self.tru0_pred0_priv0 + other.tru0_pred0_priv0,
tru0_pred0_priv1=self.tru0_pred0_priv1 + other.tru0_pred0_priv1,
tru0_pred1_priv0=self.tru0_pred1_priv0 + other.tru0_pred1_priv0,
tru0_pred1_priv1=self.tru0_pred1_priv1 + other.tru0_pred1_priv1,
tru1_pred0_priv0=self.tru1_pred0_priv0 + other.tru1_pred0_priv0,
tru1_pred0_priv1=self.tru1_pred0_priv1 + other.tru1_pred0_priv1,
tru1_pred1_priv0=self.tru1_pred1_priv0 + other.tru1_pred1_priv0,
tru1_pred1_priv1=self.tru1_pred1_priv1 + other.tru1_pred1_priv1,
)
class _AODorEODScorerFactory(_AIF360ScorerFactory):
def to_monoid(self, batch: _Batch_yyX) -> _AODorEODData:
y_true, y_pred, X = batch
assert y_pred is not None and X is not None, batch
y_pred = _y_pred_series(y_true, y_pred, X)
encoded_X, y_pred = self.prot_attr_enc.transform_X_y(X, y_pred)
gensym = GenSym(set(_ensure_str(n) for n in get_columns(encoded_X)))
y_true_name, y_pred_name = gensym("y_true"), gensym("y_pred")
y_pred = pd.DataFrame({y_pred_name: y_pred})
_, y_true = self.prot_attr_enc.transform_X_y(X, y_true)
y_true = pd.DataFrame({y_true_name: pd.Series(y_true, y_pred.index)})
pa_names = self.privileged_groups[0].keys()
priv0 = functools.reduce(lambda a, b: a & b, (it[pa] == 0 for pa in pa_names))
priv1 = functools.reduce(lambda a, b: a & b, (it[pa] == 1 for pa in pa_names))
tru, prd = it[y_true_name], it[y_pred_name]
map_op = Map(
columns={
"tru0_pred0_priv0": astype("int", (tru == 0) & (prd == 0) & priv0),
"tru0_pred0_priv1": astype("int", (tru == 0) & (prd == 0) & priv1),
"tru0_pred1_priv0": astype("int", (tru == 0) & (prd == 1) & priv0),
"tru0_pred1_priv1": astype("int", (tru == 0) & (prd == 1) & priv1),
"tru1_pred0_priv0": astype("int", (tru == 1) & (prd == 0) & priv0),
"tru1_pred0_priv1": astype("int", (tru == 1) & (prd == 0) & priv1),
"tru1_pred1_priv0": astype("int", (tru == 1) & (prd == 1) & priv0),
"tru1_pred1_priv1": astype("int", (tru == 1) & (prd == 1) & priv1),
}
)
agg_op = Aggregate(
columns={
"tru0_pred0_priv0": sum(it.tru0_pred0_priv0),
"tru0_pred0_priv1": sum(it.tru0_pred0_priv1),
"tru0_pred1_priv0": sum(it.tru0_pred1_priv0),
"tru0_pred1_priv1": sum(it.tru0_pred1_priv1),
"tru1_pred0_priv0": sum(it.tru1_pred0_priv0),
"tru1_pred0_priv1": sum(it.tru1_pred0_priv1),
"tru1_pred1_priv0": sum(it.tru1_pred1_priv0),
"tru1_pred1_priv1": sum(it.tru1_pred1_priv1),
}
)
pipeline = ConcatFeatures >> map_op >> agg_op
agg_df = _ensure_pandas(pipeline.transform([encoded_X, y_true, y_pred]))
return _AODorEODData(
tru0_pred0_priv0=agg_df.at[0, "tru0_pred0_priv0"],
tru0_pred0_priv1=agg_df.at[0, "tru0_pred0_priv1"],
tru0_pred1_priv0=agg_df.at[0, "tru0_pred1_priv0"],
tru0_pred1_priv1=agg_df.at[0, "tru0_pred1_priv1"],
tru1_pred0_priv0=agg_df.at[0, "tru1_pred0_priv0"],
tru1_pred0_priv1=agg_df.at[0, "tru1_pred0_priv1"],
tru1_pred1_priv0=agg_df.at[0, "tru1_pred1_priv0"],
tru1_pred1_priv1=agg_df.at[0, "tru1_pred1_priv1"],
)
_SCORER_DOCSTRING_ARGS = """
Parameters
----------
favorable_labels : array of union
Label values which are considered favorable (i.e. "positive").
- string
Literal value
- *or* number
Numerical value
- *or* array of numbers, >= 2 items, <= 2 items
Numeric range [a,b] from a to b inclusive.
protected_attributes : array of dict
Features for which fairness is desired.
- feature : string or integer
Column name or column index.
- reference_group : array of union
Values or ranges that indicate being a member of the privileged group.
- string
Literal value
- *or* number
Numerical value
- *or* array of numbers, >= 2 items, <= 2 items
Numeric range [a,b] from a to b inclusive.
- monitored_group : union type, default None
Values or ranges that indicate being a member of the unprivileged group.
- None
If `monitored_group` is not explicitly specified, consider any values not captured by `reference_group` as monitored.
- *or* array of union
- string
Literal value
- *or* number
Numerical value
- *or* array of numbers, >= 2 items, <= 2 items
Numeric range [a,b] from a to b inclusive.
unfavorable_labels : union type, default None
Label values which are considered unfavorable (i.e. "negative").
- None
If `unfavorable_labels` is not explicitly specified, consider any labels not captured by `favorable_labels` as unfavorable.
- *or* array of union
- string
Literal value
- *or* number
Numerical value
- *or* array of numbers, >= 2 items, <= 2 items
Numeric range [a,b] from a to b inclusive."""
_SCORER_DOCSTRING_RETURNS = """
Returns
-------
result : callable
Scorer that takes three arguments ``(estimator, X, y)`` and returns a
scalar number. Furthermore, besides being callable, the returned object
also has two methods, ``score_data(y_true, y_pred, X)`` for evaluating
datasets and ``score_estimator(estimator, X, y)`` for evaluating
estimators.
"""
_SCORER_DOCSTRING = _SCORER_DOCSTRING_ARGS + _SCORER_DOCSTRING_RETURNS
_BLENDED_SCORER_DOCSTRING = (
_SCORER_DOCSTRING_ARGS
+ """
fairness_weight : number, >=0, <=1, default=0.5
At the default weight of 0.5, the two metrics contribute equally to the blended result. Above 0.5, fairness influences the combination more, and below 0.5, fairness influences the combination less. In the extreme, at 1, the outcome is only determined by fairness, and at 0, the outcome ignores fairness.
"""
+ _SCORER_DOCSTRING_RETURNS
)
class _AccuracyAndSymmDIData(MetricMonoid):
def __init__(
self,
accuracy_data: lale.lib.rasl.metrics._AccuracyData,
symm_di_data: _DIorSPDData,
):
self.accuracy_data = accuracy_data
self.symm_di_data = symm_di_data
def combine(self, other: "_AccuracyAndSymmDIData") -> "_AccuracyAndSymmDIData":
return _AccuracyAndSymmDIData(
self.accuracy_data.combine(other.accuracy_data),
self.symm_di_data.combine(other.symm_di_data),
)
class _AccuracyAndDisparateImpact(MetricMonoidFactory[_AccuracyAndSymmDIData]):
def __init__(
self,
favorable_labels: _FAV_LABELS_TYPE,
protected_attributes: List[JSON_TYPE],
unfavorable_labels: Optional[_FAV_LABELS_TYPE],
fairness_weight: float,
):
if fairness_weight < 0.0 or fairness_weight > 1.0:
logger.warning(
f"invalid fairness_weight {fairness_weight}, setting it to 0.5"
)
fairness_weight = 0.5
self.accuracy_scorer = lale.lib.rasl.get_scorer("accuracy")
self.symm_di_scorer = symmetric_disparate_impact(
favorable_labels, protected_attributes, unfavorable_labels
)
self.fairness_weight = fairness_weight
def _blend_metrics(self, accuracy: float, symm_di: float) -> float:
if accuracy < 0.0 or accuracy > 1.0:
logger.warning(f"invalid accuracy {accuracy}, setting it to zero")
accuracy = 0.0
if symm_di < 0.0 or symm_di > 1.0 or np.isinf(symm_di) or np.isnan(symm_di):
logger.warning(f"invalid symm_di {symm_di}, setting it to zero")
symm_di = 0.0
result = (1 - self.fairness_weight) * accuracy + self.fairness_weight * symm_di
if result < 0.0 or result > 1.0:
logger.warning(
f"unexpected result {result} for accuracy {accuracy} and symm_di {symm_di}"
)
return result
def to_monoid(self, batch: _Batch_yyX) -> _AccuracyAndSymmDIData:
return _AccuracyAndSymmDIData(
self.accuracy_scorer.to_monoid(batch), self.symm_di_scorer.to_monoid(batch)
)
def from_monoid(self, monoid: _AccuracyAndSymmDIData) -> float:
accuracy = self.accuracy_scorer.from_monoid(monoid.accuracy_data)
symm_di = self.symm_di_scorer.from_monoid(monoid.symm_di_data)
return self._blend_metrics(accuracy, symm_di)
def score_data(
self,
y_true: Union[pd.Series, np.ndarray, None] = None,
y_pred: Union[pd.Series, np.ndarray, None] = None,
X: Union[pd.DataFrame, np.ndarray, None] = None,
) -> float:
assert y_true is not None and y_pred is not None and X is not None
accuracy = self.accuracy_scorer.score_data(y_true, y_pred, X)
symm_di = self.symm_di_scorer.score_data(y_true, y_pred, X)
return self._blend_metrics(accuracy, symm_di)
def score_estimator(
self,
estimator: TrainedOperator,
X: Union[pd.DataFrame, np.ndarray],
y: Union[pd.Series, np.ndarray],
) -> float:
accuracy = self.accuracy_scorer.score_estimator(estimator, X, y)
symm_di = self.symm_di_scorer.score_estimator(estimator, X, y)
return self._blend_metrics(accuracy, symm_di)
def __call__(
self,
estimator: TrainedOperator,
X: Union[pd.DataFrame, np.ndarray],
y: Union[pd.Series, np.ndarray],
) -> float:
return self.score_estimator(estimator, X, y)
def accuracy_and_disparate_impact(
favorable_labels: _FAV_LABELS_TYPE,
protected_attributes: List[JSON_TYPE],
unfavorable_labels: Optional[_FAV_LABELS_TYPE] = None,
fairness_weight: float = 0.5,
) -> _AccuracyAndDisparateImpact:
"""
Create a scikit-learn compatible blended scorer for `accuracy`_
and `symmetric disparate impact`_ given the fairness info.
The scorer is suitable for classification problems,
with higher resulting scores indicating better outcomes.
The result is a linear combination of accuracy and
symmetric disparate impact, and is between 0 and 1.
This metric can be used as the `scoring` argument
of an optimizer such as `Hyperopt`_, as shown in this `demo`_.
.. _`accuracy`: https://scikit-learn.org/stable/modules/generated/sklearn.metrics.accuracy_score.html
.. _`symmetric disparate impact`: lale.lib.aif360.util.html#lale.lib.aif360.util.symmetric_disparate_impact
.. _`Hyperopt`: lale.lib.lale.hyperopt.html#lale.lib.lale.hyperopt.Hyperopt
.. _`demo`: https://nbviewer.jupyter.org/github/IBM/lale/blob/master/examples/demo_aif360.ipynb
"""
return _AccuracyAndDisparateImpact(
favorable_labels, protected_attributes, unfavorable_labels, fairness_weight
)
accuracy_and_disparate_impact.__doc__ = (
str(accuracy_and_disparate_impact.__doc__) + _BLENDED_SCORER_DOCSTRING
)
class _AverageOddsDifference(
_AODorEODScorerFactory, MetricMonoidFactory[_AODorEODData]
):
def __init__(
self,
favorable_labels: _FAV_LABELS_TYPE,
protected_attributes: List[JSON_TYPE],
unfavorable_labels: Optional[_FAV_LABELS_TYPE],
):
super().__init__(
"average_odds_difference",
favorable_labels,
protected_attributes,
unfavorable_labels,
)
def from_monoid(self, monoid: _AODorEODData) -> float:
fpr_priv0 = monoid.tru0_pred1_priv0 / np.float64(
monoid.tru0_pred1_priv0 + monoid.tru0_pred0_priv0
)
fpr_priv1 = monoid.tru0_pred1_priv1 / np.float64(
monoid.tru0_pred1_priv1 + monoid.tru0_pred0_priv1
)
tpr_priv0 = monoid.tru1_pred1_priv0 / np.float64(
monoid.tru1_pred1_priv0 + monoid.tru1_pred0_priv0
)
tpr_priv1 = monoid.tru1_pred1_priv1 / np.float64(
monoid.tru1_pred1_priv1 + monoid.tru1_pred0_priv1
)
return 0.5 * float(fpr_priv0 - fpr_priv1 + tpr_priv0 - tpr_priv1)
def average_odds_difference(
favorable_labels: _FAV_LABELS_TYPE,
protected_attributes: List[JSON_TYPE],
unfavorable_labels: Optional[_FAV_LABELS_TYPE] = None,
) -> _AverageOddsDifference:
r"""
Create a scikit-learn compatible `average odds difference`_ scorer
given the fairness info. Average of difference in false positive
rate and true positive rate between unprivileged and privileged
groups.
.. math::
\tfrac{1}{2}\left[(\text{FPR}_{D = \text{unprivileged}} - \text{FPR}_{D = \text{privileged}}) + (\text{TPR}_{D = \text{unprivileged}} - \text{TPR}_{D = \text{privileged}})\right]
The ideal value of this metric is 0. A value of <0 implies higher
benefit for the privileged group and a value >0 implies higher
benefit for the unprivileged group. Fairness for this metric is
between -0.1 and 0.1.
.. _`average odds difference`: https://aif360.readthedocs.io/en/latest/modules/generated/aif360.metrics.ClassificationMetric.html#aif360.metrics.ClassificationMetric.average_odds_difference
"""
return _AverageOddsDifference(
favorable_labels,
protected_attributes,
unfavorable_labels,
)
average_odds_difference.__doc__ = (
str(average_odds_difference.__doc__) + _SCORER_DOCSTRING
)
class _BalAccAndSymmDIData(MetricMonoid):
def __init__(
self,
bal_acc_data: lale.lib.rasl.metrics._BalancedAccuracyData,
symm_di_data: _DIorSPDData,
):
self.bal_acc_data = bal_acc_data
self.symm_di_data = symm_di_data
def combine(self, other: "_BalAccAndSymmDIData") -> "_BalAccAndSymmDIData":
return _BalAccAndSymmDIData(
self.bal_acc_data.combine(other.bal_acc_data),
self.symm_di_data.combine(other.symm_di_data),
)
class _BalancedAccuracyAndDisparateImpact(MetricMonoidFactory[_BalAccAndSymmDIData]):
def __init__(
self,
favorable_labels: _FAV_LABELS_TYPE,
protected_attributes: List[JSON_TYPE],
unfavorable_labels: Optional[_FAV_LABELS_TYPE],
fairness_weight: float,
):
if fairness_weight < 0.0 or fairness_weight > 1.0:
logger.warning(
f"invalid fairness_weight {fairness_weight}, setting it to 0.5"
)
fairness_weight = 0.5
self.bal_acc_scorer = lale.lib.rasl.get_scorer("balanced_accuracy")
self.symm_di_scorer = symmetric_disparate_impact(
favorable_labels, protected_attributes, unfavorable_labels
)
self.fairness_weight = fairness_weight
def _blend_metrics(self, bal_acc: float, symm_di: float) -> float:
if bal_acc < 0.0 or bal_acc > 1.0:
logger.warning(f"invalid bal_acc {bal_acc}, setting it to zero")
bal_acc = 0.0
if symm_di < 0.0 or symm_di > 1.0 or np.isinf(symm_di) or np.isnan(symm_di):
logger.warning(f"invalid symm_di {symm_di}, setting it to zero")
symm_di = 0.0
result = (1 - self.fairness_weight) * bal_acc + self.fairness_weight * symm_di
if result < 0.0 or result > 1.0:
logger.warning(
f"unexpected result {result} for bal_acc {bal_acc} and symm_di {symm_di}"
)
return result
def to_monoid(self, batch: _Batch_yyX) -> _BalAccAndSymmDIData:
return _BalAccAndSymmDIData(
self.bal_acc_scorer.to_monoid(batch), self.symm_di_scorer.to_monoid(batch)
)
def from_monoid(self, monoid: _BalAccAndSymmDIData) -> float:
bal_acc = self.bal_acc_scorer.from_monoid(monoid.bal_acc_data)
symm_di = self.symm_di_scorer.from_monoid(monoid.symm_di_data)
return self._blend_metrics(bal_acc, symm_di)
def score_data(
self,
y_true: Union[pd.Series, np.ndarray, None] = None,
y_pred: Union[pd.Series, np.ndarray, None] = None,
X: Union[pd.DataFrame, np.ndarray, None] = None,
) -> float:
assert y_true is not None and y_pred is not None and X is not None
bal_acc = self.bal_acc_scorer.score_data(y_true, y_pred, X)
symm_di = self.symm_di_scorer.score_data(y_true, y_pred, X)
return self._blend_metrics(bal_acc, symm_di)
def score_estimator(
self,
estimator: TrainedOperator,
X: Union[pd.DataFrame, np.ndarray],
y: Union[pd.Series, np.ndarray],
) -> float:
bal_acc = self.bal_acc_scorer.score_estimator(estimator, X, y)
symm_di = self.symm_di_scorer.score_estimator(estimator, X, y)
return self._blend_metrics(bal_acc, symm_di)
def __call__(
self,
estimator: TrainedOperator,
X: Union[pd.DataFrame, np.ndarray],
y: Union[pd.Series, np.ndarray],
) -> float:
return self.score_estimator(estimator, X, y)
def balanced_accuracy_and_disparate_impact(
favorable_labels: _FAV_LABELS_TYPE,
protected_attributes: List[JSON_TYPE],
unfavorable_labels: Optional[_FAV_LABELS_TYPE] = None,
fairness_weight: float = 0.5,
) -> _BalancedAccuracyAndDisparateImpact:
"""
Create a scikit-learn compatible blended scorer for `balanced accuracy`_
and `symmetric disparate impact`_ given the fairness info.
The scorer is suitable for classification problems,
with higher resulting scores indicating better outcomes.
The result is a linear combination of accuracy and
symmetric disparate impact, and is between 0 and 1.
This metric can be used as the `scoring` argument
of an optimizer such as `Hyperopt`_, as shown in this `demo`_.
.. _`balanced accuracy`: https://scikit-learn.org/stable/modules/generated/sklearn.metrics.balanced_accuracy_score.html
.. _`symmetric disparate impact`: lale.lib.aif360.util.html#lale.lib.aif360.util.symmetric_disparate_impact
.. _`Hyperopt`: lale.lib.lale.hyperopt.html#lale.lib.lale.hyperopt.Hyperopt
.. _`demo`: https://nbviewer.jupyter.org/github/IBM/lale/blob/master/examples/demo_aif360.ipynb
"""
return _BalancedAccuracyAndDisparateImpact(
favorable_labels, protected_attributes, unfavorable_labels, fairness_weight
)
balanced_accuracy_and_disparate_impact.__doc__ = (
str(balanced_accuracy_and_disparate_impact.__doc__) + _BLENDED_SCORER_DOCSTRING
)
class _DisparateImpact(_DIorSPDScorerFactory, MetricMonoidFactory[_DIorSPDData]):
def __init__(
self,
favorable_labels: _FAV_LABELS_TYPE,
protected_attributes: List[JSON_TYPE],
unfavorable_labels: Optional[_FAV_LABELS_TYPE],
):
super().__init__(
"disparate_impact",
favorable_labels,
protected_attributes,
unfavorable_labels,
)
def from_monoid(self, monoid: _DIorSPDData) -> float:
numerator = monoid.priv0_fav1 / np.float64(
monoid.priv0_fav0 + monoid.priv0_fav1
)
denominator = monoid.priv1_fav1 / np.float64(
monoid.priv1_fav0 + monoid.priv1_fav1
)
return float(numerator / denominator)
def disparate_impact(
favorable_labels: _FAV_LABELS_TYPE,
protected_attributes: List[JSON_TYPE],
unfavorable_labels: Optional[_FAV_LABELS_TYPE] = None,
) -> _DisparateImpact:
r"""
Create a scikit-learn compatible `disparate_impact`_ scorer given
the fairness info (`Feldman et al. 2015`_). Ratio of rate of
favorable outcome for the unprivileged group to that of the
privileged group.
.. math::
\frac{\text{Pr}(Y = \text{favorable} | D = \text{unprivileged})}
{\text{Pr}(Y = \text{favorable} | D = \text{privileged})}
In the case of multiple protected attributes,
`D=privileged` means all protected attributes of the sample have
corresponding privileged values in the reference group, and
`D=unprivileged` means all protected attributes of the sample have
corresponding unprivileged values in the monitored group.
The ideal value of this metric is 1. A value <1 implies a higher
benefit for the privileged group and a value >1 implies a higher
benefit for the unprivileged group. Fairness for this metric is
between 0.8 and 1.25.
.. _`disparate_impact`: https://aif360.readthedocs.io/en/latest/modules/generated/aif360.metrics.BinaryLabelDatasetMetric.html#aif360.metrics.BinaryLabelDatasetMetric.disparate_impact
.. _`Feldman et al. 2015`: https://doi.org/10.1145/2783258.2783311"""
return _DisparateImpact(favorable_labels, protected_attributes, unfavorable_labels)
disparate_impact.__doc__ = str(disparate_impact.__doc__) + _SCORER_DOCSTRING
class _EqualOpportunityDifference(
_AODorEODScorerFactory, MetricMonoidFactory[_AODorEODData]
):
def __init__(
self,
favorable_labels: _FAV_LABELS_TYPE,
protected_attributes: List[JSON_TYPE],
unfavorable_labels: Optional[_FAV_LABELS_TYPE],
):
super().__init__(
"equal_opportunity_difference",
favorable_labels,
protected_attributes,
unfavorable_labels,
)
def from_monoid(self, monoid: _AODorEODData) -> float:
tpr_priv0 = monoid.tru1_pred1_priv0 / np.float64(
monoid.tru1_pred1_priv0 + monoid.tru1_pred0_priv0
)
tpr_priv1 = monoid.tru1_pred1_priv1 / np.float64(
monoid.tru1_pred1_priv1 + monoid.tru1_pred0_priv1
)
return tpr_priv0 - tpr_priv1 # type: ignore
def equal_opportunity_difference(
favorable_labels: _FAV_LABELS_TYPE,
protected_attributes: List[JSON_TYPE],
unfavorable_labels: Optional[_FAV_LABELS_TYPE] = None,
) -> _EqualOpportunityDifference:
r"""
Create a scikit-learn compatible `equal opportunity difference`_
scorer given the fairness info. Difference of true positive rates
between the unprivileged and the privileged groups. The true
positive rate is the ratio of true positives to the total number
of actual positives for a given group.
.. math::
\text{TPR}_{D = \text{unprivileged}} - \text{TPR}_{D = \text{privileged}}
The ideal value is 0. A value of <0 implies disparate benefit for the
privileged group and a value >0 implies disparate benefit for the
unprivileged group. Fairness for this metric is between -0.1 and 0.1.
.. _`equal opportunity difference`: https://aif360.readthedocs.io/en/latest/modules/generated/aif360.metrics.ClassificationMetric.html#aif360.metrics.ClassificationMetric.equal_opportunity_difference
"""
return _EqualOpportunityDifference(
favorable_labels,
protected_attributes,
unfavorable_labels,
)
equal_opportunity_difference.__doc__ = (
str(equal_opportunity_difference.__doc__) + _SCORER_DOCSTRING
)
class _F1AndSymmDIData(MetricMonoid):
def __init__(
self,
f1_data: lale.lib.rasl.metrics._F1Data,
symm_di_data: _DIorSPDData,
):
self.f1_data = f1_data
self.symm_di_data = symm_di_data
def combine(self, other: "_F1AndSymmDIData") -> "_F1AndSymmDIData":
return _F1AndSymmDIData(
self.f1_data.combine(other.f1_data),
self.symm_di_data.combine(other.symm_di_data),
)
class _F1AndDisparateImpact(MetricMonoidFactory[_F1AndSymmDIData]):
def __init__(
self,
favorable_labels: _FAV_LABELS_TYPE,
protected_attributes: List[JSON_TYPE],
unfavorable_labels: Optional[_FAV_LABELS_TYPE],
fairness_weight: float,
):
from lale.lib.aif360 import ProtectedAttributesEncoder
if fairness_weight < 0.0 or fairness_weight > 1.0:
logger.warning(
f"invalid fairness_weight {fairness_weight}, setting it to 0.5"
)
fairness_weight = 0.5
self.prot_attr_enc = ProtectedAttributesEncoder(
favorable_labels=favorable_labels,
protected_attributes=protected_attributes,
unfavorable_labels=unfavorable_labels,
remainder="drop",
)
self.f1_scorer = lale.lib.rasl.get_scorer("f1", pos_label=1)
self.symm_di_scorer = symmetric_disparate_impact(
favorable_labels, protected_attributes, unfavorable_labels
)
self.fairness_weight = fairness_weight
def _blend_metrics(self, f1: float, symm_di: float) -> float:
if f1 < 0.0 or f1 > 1.0:
logger.warning(f"invalid f1 {f1}, setting it to zero")
f1 = 0.0
if symm_di < 0.0 or symm_di > 1.0 or np.isinf(symm_di) or np.isnan(symm_di):
logger.warning(f"invalid symm_di {symm_di}, setting it to zero")
symm_di = 0.0
result = (1 - self.fairness_weight) * f1 + self.fairness_weight * symm_di
if result < 0.0 or result > 1.0:
logger.warning(
f"unexpected result {result} for f1 {f1} and symm_di {symm_di}"
)
return result
def _encode_batch(self, batch: _Batch_yyX) -> _Batch_yyX:
y_true, y_pred, X = batch
assert y_true is not None and y_pred is not None, batch
y_pred = _y_pred_series(y_true, y_pred, X)
_, enc_y_true = self.prot_attr_enc.transform_X_y(X, y_true)
_, enc_y_pred = self.prot_attr_enc.transform_X_y(X, y_pred)
return enc_y_true, enc_y_pred, X
def to_monoid(self, batch: _Batch_yyX) -> _F1AndSymmDIData:
return _F1AndSymmDIData(
self.f1_scorer.to_monoid(self._encode_batch(batch)),
self.symm_di_scorer.to_monoid(batch),
)
def from_monoid(self, monoid: _F1AndSymmDIData) -> float:
f1 = self.f1_scorer.from_monoid(monoid.f1_data)
symm_di = self.symm_di_scorer.from_monoid(monoid.symm_di_data)
return self._blend_metrics(f1, symm_di)
def score_data(
self,
y_true: Union[pd.Series, np.ndarray, None] = None,
y_pred: Union[pd.Series, np.ndarray, None] = None,
X: Union[pd.DataFrame, np.ndarray, None] = None,
) -> float:
assert y_true is not None and y_pred is not None and X is not None
enc_y_true, enc_y_pred, _ = self._encode_batch((y_true, y_pred, X))
f1 = self.f1_scorer.score_data(enc_y_true, enc_y_pred, X)
symm_di = self.symm_di_scorer.score_data(y_true, y_pred, X)
return self._blend_metrics(f1, symm_di)
def score_estimator(
self,
estimator: TrainedOperator,
X: Union[pd.DataFrame, np.ndarray],
y: Union[pd.Series, np.ndarray],
) -> float:
return self.score_data(y_true=y, y_pred=estimator.predict(X), X=X)
def __call__(
self,
estimator: TrainedOperator,
X: Union[pd.DataFrame, np.ndarray],
y: Union[pd.Series, np.ndarray],
) -> float:
return self.score_estimator(estimator, X, y)
def f1_and_disparate_impact(
favorable_labels: _FAV_LABELS_TYPE,
protected_attributes: List[JSON_TYPE],
unfavorable_labels: Optional[_FAV_LABELS_TYPE] = None,
fairness_weight: float = 0.5,
) -> _F1AndDisparateImpact:
"""
Create a scikit-learn compatible blended scorer for `f1`_
and `symmetric disparate impact`_ given the fairness info.
The scorer is suitable for classification problems,
with higher resulting scores indicating better outcomes.
The result is a linear combination of F1 and
symmetric disparate impact, and is between 0 and 1.
This metric can be used as the `scoring` argument
of an optimizer such as `Hyperopt`_, as shown in this `demo`_.
.. _`f1`: https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html
.. _`symmetric disparate impact`: lale.lib.aif360.util.html#lale.lib.aif360.util.symmetric_disparate_impact
.. _`Hyperopt`: lale.lib.lale.hyperopt.html#lale.lib.lale.hyperopt.Hyperopt
.. _`demo`: https://nbviewer.jupyter.org/github/IBM/lale/blob/master/examples/demo_aif360.ipynb
"""
return _F1AndDisparateImpact(
favorable_labels, protected_attributes, unfavorable_labels, fairness_weight
)
f1_and_disparate_impact.__doc__ = (
str(f1_and_disparate_impact.__doc__) + _BLENDED_SCORER_DOCSTRING
)
class _R2AndSymmDIData(MetricMonoid):
def __init__(
self,
r2_data: lale.lib.rasl.metrics._R2Data,
symm_di_data: _DIorSPDData,
):
self.r2_data = r2_data
self.symm_di_data = symm_di_data
def combine(self, other: "_R2AndSymmDIData") -> "_R2AndSymmDIData":
return _R2AndSymmDIData(
self.r2_data.combine(other.r2_data),
self.symm_di_data.combine(other.symm_di_data),
)
class _R2AndDisparateImpact(MetricMonoidFactory[_R2AndSymmDIData]):
def __init__(
self,
favorable_labels: _FAV_LABELS_TYPE,
protected_attributes: List[JSON_TYPE],
unfavorable_labels: Optional[_FAV_LABELS_TYPE],
fairness_weight: float,
):
if fairness_weight < 0.0 or fairness_weight > 1.0:
logger.warning(
f"invalid fairness_weight {fairness_weight}, setting it to 0.5"
)
fairness_weight = 0.5
self.r2_scorer = lale.lib.rasl.get_scorer("r2")
self.symm_di_scorer = symmetric_disparate_impact(
favorable_labels, protected_attributes, unfavorable_labels
)
self.fairness_weight = fairness_weight
def _blend_metrics(self, r2: float, symm_di: float) -> float:
if r2 > 1.0:
logger.warning(f"invalid r2 {r2}, setting it to float min")
r2 = cast(float, np.finfo(np.float32).min)
if symm_di < 0.0 or symm_di > 1.0 or np.isinf(symm_di) or np.isnan(symm_di):
logger.warning(f"invalid symm_di {symm_di}, setting it to zero")
symm_di = 0.0
pos_r2 = 1 / (2.0 - r2)
result = (1 - self.fairness_weight) * pos_r2 + self.fairness_weight * symm_di
if result < 0.0 or result > 1.0:
logger.warning(
f"unexpected result {result} for r2 {r2} and symm_di {symm_di}"
)
return result
def to_monoid(self, batch: _Batch_yyX) -> _R2AndSymmDIData:
return _R2AndSymmDIData(
self.r2_scorer.to_monoid(batch), self.symm_di_scorer.to_monoid(batch)
)
def from_monoid(self, monoid: _R2AndSymmDIData) -> float:
r2 = self.r2_scorer.from_monoid(monoid.r2_data)
symm_di = self.symm_di_scorer.from_monoid(monoid.symm_di_data)
return self._blend_metrics(r2, symm_di)
def score_data(
self,
y_true: Union[pd.Series, np.ndarray, None] = None,
y_pred: Union[pd.Series, np.ndarray, None] = None,
X: Union[pd.DataFrame, np.ndarray, None] = None,
) -> float:
assert y_true is not None and y_pred is not None and X is not None
r2 = self.r2_scorer.score_data(y_true, y_pred, X)
symm_di = self.symm_di_scorer.score_data(y_true, y_pred, X)
return self._blend_metrics(r2, symm_di)
def score_estimator(
self,
estimator: TrainedOperator,
X: Union[pd.DataFrame, np.ndarray],
y: Union[pd.Series, np.ndarray],
) -> float:
r2 = self.r2_scorer.score_estimator(estimator, X, y)
symm_di = self.symm_di_scorer.score_estimator(estimator, X, y)
return self._blend_metrics(r2, symm_di)
def __call__(
self,
estimator: TrainedOperator,
X: Union[pd.DataFrame, np.ndarray],
y: Union[pd.Series, np.ndarray],
) -> float:
return self.score_estimator(estimator, X, y)
def r2_and_disparate_impact(
favorable_labels: _FAV_LABELS_TYPE,
protected_attributes: List[JSON_TYPE],
unfavorable_labels: Optional[_FAV_LABELS_TYPE] = None,
fairness_weight: float = 0.5,
) -> _R2AndDisparateImpact:
"""
Create a scikit-learn compatible blended scorer for `R2 score`_
and `symmetric disparate impact`_ given the fairness info.
The scorer is suitable for regression problems,
with higher resulting scores indicating better outcomes.
It first scales R2, which might be negative, to be between 0 and 1.
Then, the result is a linear combination of the scaled R2 and
symmetric disparate impact, and is also between 0 and 1.
This metric can be used as the `scoring` argument
of an optimizer such as `Hyperopt`_.
.. _`R2 score`: https://scikit-learn.org/stable/modules/generated/sklearn.metrics.r2_score.html
.. _`symmetric disparate impact`: lale.lib.aif360.util.html#lale.lib.aif360.util.symmetric_disparate_impact
.. _`Hyperopt`: lale.lib.lale.hyperopt.html#lale.lib.lale.hyperopt.Hyperopt"""
return _R2AndDisparateImpact(
favorable_labels, protected_attributes, unfavorable_labels, fairness_weight
)
r2_and_disparate_impact.__doc__ = (
str(r2_and_disparate_impact.__doc__) + _BLENDED_SCORER_DOCSTRING
)
class _StatisticalParityDifference(
_DIorSPDScorerFactory, MetricMonoidFactory[_DIorSPDData]
):
def __init__(
self,
favorable_labels: _FAV_LABELS_TYPE,
protected_attributes: List[JSON_TYPE],
unfavorable_labels: Optional[_FAV_LABELS_TYPE],
):
super().__init__(
"statistical_parity_difference",
favorable_labels,
protected_attributes,
unfavorable_labels,
)
def from_monoid(self, monoid: _DIorSPDData) -> float:
minuend = monoid.priv0_fav1 / np.float64(monoid.priv0_fav0 + monoid.priv0_fav1)
subtrahend = monoid.priv1_fav1 / np.float64(
monoid.priv1_fav0 + monoid.priv1_fav1
)
return float(minuend - subtrahend)
def statistical_parity_difference(
favorable_labels: _FAV_LABELS_TYPE,
protected_attributes: List[JSON_TYPE],
unfavorable_labels: Optional[_FAV_LABELS_TYPE] = None,
) -> _StatisticalParityDifference:
r"""
Create a scikit-learn compatible `statistical parity difference`_
scorer given the fairness info. Difference of the rate of
favorable outcomes received by the unprivileged group to the
privileged group.
.. math::
\text{Pr}(Y = \text{favorable} | D = \text{unprivileged})
- \text{Pr}(Y = \text{favorable} | D = \text{privileged})
The ideal value of this metric is 0. A value of <0 implies higher
benefit for the privileged group and a value >0 implies higher
benefit for the unprivileged group. Fairness for this metric is
between -0.1 and 0.1. For a discussion of potential issues with
this metric see (`Dwork et al. 2012`_).
.. _`statistical parity difference`: https://aif360.readthedocs.io/en/latest/modules/generated/aif360.metrics.BinaryLabelDatasetMetric.html#aif360.metrics.BinaryLabelDatasetMetric.statistical_parity_difference
.. _`Dwork et al. 2012`: https://doi.org/10.1145/2090236.2090255"""
return _StatisticalParityDifference(
favorable_labels,
protected_attributes,
unfavorable_labels,
)
statistical_parity_difference.__doc__ = (
str(statistical_parity_difference.__doc__) + _SCORER_DOCSTRING
)
class _SymmetricDisparateImpact(MetricMonoidFactory[_DIorSPDData]):
def __init__(
self,
favorable_labels: _FAV_LABELS_TYPE,
protected_attributes: List[JSON_TYPE],
unfavorable_labels: Optional[_FAV_LABELS_TYPE],
):
self.disparate_impact_scorer = disparate_impact(
favorable_labels, protected_attributes, unfavorable_labels
)
def _make_symmetric(self, disp_impact: float) -> float:
if np.isnan(disp_impact): # empty privileged or unprivileged groups
return disp_impact
if disp_impact <= 1.0:
return disp_impact
return 1.0 / disp_impact
def to_monoid(self, batch: _Batch_yyX) -> _DIorSPDData:
return self.disparate_impact_scorer.to_monoid(batch)
def from_monoid(self, monoid: _DIorSPDData) -> float:
return self._make_symmetric(self.disparate_impact_scorer.from_monoid(monoid))
def score_data(
self,
y_true: Union[pd.Series, np.ndarray, None] = None,
y_pred: Union[pd.Series, np.ndarray, None] = None,
X: Union[pd.DataFrame, np.ndarray, None] = None,
) -> float:
assert y_pred is not None and X is not None
disp_impact = self.disparate_impact_scorer.score_data(y_true, y_pred, X)
return self._make_symmetric(disp_impact)
def score_estimator(
self,
estimator: TrainedOperator,
X: Union[pd.DataFrame, np.ndarray],
y: Union[pd.Series, np.ndarray],
) -> float:
disp_impact = self.disparate_impact_scorer.score_estimator(estimator, X, y)
return self._make_symmetric(disp_impact)
def __call__(
self,
estimator: TrainedOperator,
X: Union[pd.DataFrame, np.ndarray],
y: Union[pd.Series, np.ndarray],
) -> float:
return self.score_estimator(estimator, X, y)
def symmetric_disparate_impact(
favorable_labels: _FAV_LABELS_TYPE,
protected_attributes: List[JSON_TYPE],
unfavorable_labels: Optional[_FAV_LABELS_TYPE] = None,
) -> _SymmetricDisparateImpact:
"""
Create a scikit-learn compatible scorer for symmetric `disparate impact`_ given the fairness info.
For disparate impact <= 1.0, return that value, otherwise return
its inverse. The result is between 0 and 1. The higher this
metric, the better, and the ideal value is 1. A value <1 implies
that either the privileged group or the unprivileged group is
receiving a disparate benefit.
.. _`disparate impact`: lale.lib.aif360.util.html#lale.lib.aif360.util.disparate_impact
"""
return _SymmetricDisparateImpact(
favorable_labels, protected_attributes, unfavorable_labels
)
symmetric_disparate_impact.__doc__ = (
str(symmetric_disparate_impact.__doc__) + _SCORER_DOCSTRING
)
def theil_index(
favorable_labels: _FAV_LABELS_TYPE,
protected_attributes: List[JSON_TYPE],
unfavorable_labels: Optional[_FAV_LABELS_TYPE] = None,
) -> _AIF360ScorerFactory:
r"""
Create a scikit-learn compatible `Theil index`_ scorer given the
fairness info (`Speicher et al. 2018`_). Generalized entropy of
benefit for all individuals in the dataset, with alpha=1. Measures
the inequality in benefit allocation for individuals. With
:math:`b_i = \hat{y}_i - y_i + 1`:
.. math::
\mathcal{E}(\alpha) = \begin{cases}
\frac{1}{n \alpha (\alpha-1)}\sum_{i=1}^n\left[\left(\frac{b_i}{\mu}\right)^\alpha - 1\right],& \alpha \ne 0, 1,\\
\frac{1}{n}\sum_{i=1}^n\frac{b_{i}}{\mu}\ln\frac{b_{i}}{\mu},& \alpha=1,\\
-\frac{1}{n}\sum_{i=1}^n\ln\frac{b_{i}}{\mu},& \alpha=0.
\end{cases}
A value of 0 implies perfect fairness. Fairness is indicated by
lower scores, higher scores are problematic.
.. _`Theil index`: https://aif360.readthedocs.io/en/latest/modules/generated/aif360.metrics.ClassificationMetric.html#aif360.metrics.ClassificationMetric.theil_index
.. _`Speicher et al. 2018`: https://doi.org/10.1145/3219819.3220046"""
return _AIF360ScorerFactory(
"theil_index", favorable_labels, protected_attributes, unfavorable_labels
)
theil_index.__doc__ = str(theil_index.__doc__) + _SCORER_DOCSTRING
#####################################################################
# Stratification
#####################################################################
def _column_for_stratification(
X: Union[pd.DataFrame, np.ndarray],
y: Union[pd.Series, np.ndarray],
favorable_labels: _FAV_LABELS_TYPE,
protected_attributes: List[JSON_TYPE],
unfavorable_labels: Optional[_FAV_LABELS_TYPE] = None,
) -> pd.Series:
from lale.lib.aif360 import ProtectedAttributesEncoder
prot_attr_enc = ProtectedAttributesEncoder(
favorable_labels=favorable_labels,
protected_attributes=protected_attributes,
unfavorable_labels=unfavorable_labels,
remainder="drop",
)
encoded_X, encoded_y = prot_attr_enc.transform_X_y(X, y)
df = pd.concat([encoded_X, encoded_y], axis=1)
def label_for_stratification(row):
return "".join(["T" if v == 1 else "F" if v == 0 else "N" for v in row])
result = df.apply(label_for_stratification, axis=1)
result.name = "stratify"
return result
def fair_stratified_train_test_split(
X,
y,
*arrays,
favorable_labels: _FAV_LABELS_TYPE,
protected_attributes: List[JSON_TYPE],
unfavorable_labels: Optional[_FAV_LABELS_TYPE] = None,
test_size: float = 0.25,
random_state: randomstate_type = None,
) -> Tuple:
"""
Splits X and y into random train and test subsets stratified by
labels and protected attributes.
Behaves similar to the `train_test_split`_ function from scikit-learn.
.. _`train_test_split`: https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html
Parameters
----------
X : array
Features including protected attributes as numpy ndarray or pandas dataframe.
y : array
Labels as numpy ndarray or pandas series.
*arrays : array
Sequence of additional arrays with same length as X and y.
favorable_labels : array
Label values which are considered favorable (i.e. "positive").
protected_attributes : array
Features for which fairness is desired.
unfavorable_labels : array or None, default None
Label values which are considered unfavorable (i.e. "negative").
test_size : float or int, default=0.25
If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the test split.
If int, represents the absolute number of test samples.
random_state : int, RandomState instance or None, default=None
Controls the shuffling applied to the data before applying the split.
Pass an integer for reproducible output across multiple function calls.
- None
RandomState used by numpy.random
- numpy.random.RandomState
Use the provided random state, only affecting other users of that same random state instance.
- integer
Explicit seed.
Returns
-------
result : tuple
- item 0: train_X
- item 1: test_X
- item 2: train_y
- item 3: test_y
- item 4+: Each argument in `*arrays`, if any, yields two items in the result, for the two splits of that array.
"""
_validate_fairness_info(
favorable_labels, protected_attributes, unfavorable_labels, True
)
stratify = _column_for_stratification(
X, y, favorable_labels, protected_attributes, unfavorable_labels
)
(
train_X,
test_X,
train_y,
test_y,
*arrays_splits,
) = sklearn.model_selection.train_test_split(
X, y, *arrays, test_size=test_size, random_state=random_state, stratify=stratify
)
if hasattr(X, "json_schema"):
train_X = add_schema_adjusting_n_rows(train_X, X.json_schema)
test_X = add_schema_adjusting_n_rows(test_X, X.json_schema)
if hasattr(y, "json_schema"):
train_y = add_schema_adjusting_n_rows(train_y, y.json_schema)
test_y = add_schema_adjusting_n_rows(test_y, y.json_schema)
return (train_X, test_X, train_y, test_y, *arrays_splits)
class FairStratifiedKFold:
"""
Stratified k-folds cross-validator by labels and protected attributes.
Behaves similar to the `StratifiedKFold`_ and `RepeatedStratifiedKFold`_
cross-validation iterators from scikit-learn.
This cross-validation object can be passed to the `cv` argument of
the `auto_configure`_ method.
.. _`StratifiedKFold`: https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.StratifiedKFold.html
.. _`RepeatedStratifiedKFold`: https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.RepeatedStratifiedKFold.html
.. _`auto_configure`: https://lale.readthedocs.io/en/latest/modules/lale.operators.html#lale.operators.PlannedOperator.auto_configure
"""
def __init__(
self,
*,
favorable_labels: _FAV_LABELS_TYPE,
protected_attributes: List[JSON_TYPE],
unfavorable_labels: Optional[_FAV_LABELS_TYPE] = None,
n_splits: int = 5,
n_repeats: int = 1,
shuffle: bool = False,
random_state=None,
):
"""
Parameters
----------
favorable_labels : array
Label values which are considered favorable (i.e. "positive").
protected_attributes : array
Features for which fairness is desired.
unfavorable_labels : array or None, default None
Label values which are considered unfavorable (i.e. "negative").
n_splits : integer, optional, default 5
Number of folds. Must be at least 2.
n_repeats : integer, optional, default 1
Number of times the cross-validator needs to be repeated.
When >1, this behaves like RepeatedStratifiedKFold.
shuffle : boolean, optional, default False
Whether to shuffle each class's samples before splitting into batches.
Ignored when n_repeats>1.
random_state : union type, not for optimizer, default None
When shuffle is True, random_state affects the ordering of the indices.
- None
RandomState used by np.random
- numpy.random.RandomState
Use the provided random state, only affecting other users of that same random state instance.
- integer
Explicit seed.
"""
_validate_fairness_info(
favorable_labels, protected_attributes, unfavorable_labels, True
)
self._fairness_info = {
"favorable_labels": favorable_labels,
"protected_attributes": protected_attributes,
"unfavorable_labels": unfavorable_labels,
}
if n_repeats == 1:
self._stratified_k_fold = sklearn.model_selection.StratifiedKFold(
n_splits=n_splits, shuffle=shuffle, random_state=random_state
)
else:
self._stratified_k_fold = sklearn.model_selection.RepeatedStratifiedKFold(
n_splits=n_splits, n_repeats=n_repeats, random_state=random_state
)
def get_n_splits(self, X=None, y=None, groups=None) -> int:
"""
The number of splitting iterations in the cross-validator.
Parameters
----------
X : Any
Always ignored, exists for compatibility.
y : Any
Always ignored, exists for compatibility.
groups : Any
Always ignored, exists for compatibility.
Returns
-------
integer
The number of splits.
"""
return self._stratified_k_fold.get_n_splits(X, y, groups)
def split(self, X, y, groups=None):
"""
Generate indices to split data into training and test set.
X : array **of** items : array **of** items : Any
Training data, including columns with the protected attributes.
y : union type
Target class labels; the array is over samples.
- array **of** items : float
- array **of** items : string
groups : Any
Always ignored, exists for compatibility.
Returns
------
result : tuple
- train
The training set indices for that split.
- test
The testing set indices for that split.
"""
stratify = _column_for_stratification(X, y, **self._fairness_info)
result = self._stratified_k_fold.split(X, stratify, groups)
return result
| 85,188 | 36.511669 | 309 |
py
|
lale
|
lale-master/lale/lib/aif360/__init__.py
|
# Copyright 2019-2023 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Scikit-learn compatible wrappers for several operators and metrics from AIF360_ along with schemas to enable hyperparameter tuning, as well as functions for fetching fairness dataset.
.. _AIF360: https://github.com/IBM/AIF360
All operators and metrics in the Lale wrappers for AIF360 take two
arguments, `favorable_labels` and `protected_attributes`, collectively
referred to as *fairness info*. For example, the following code
indicates that the reference group comprises male values in the
`personal_status` attribute as well as values from 26 to 1000 in the
`age` attribute.
.. code:: Python
creditg_fairness_info = {
"favorable_labels": ["good"],
"protected_attributes": [
{
"feature": "personal_status",
"reference_group": [
"male div/sep", "male mar/wid", "male single",
],
},
{"feature": "age", "reference_group": [[26, 1000]]},
],
}
See the following notebooks for more detailed examples:
* https://nbviewer.jupyter.org/github/IBM/lale/blob/master/examples/demo_aif360.ipynb
* https://nbviewer.jupyter.org/github/IBM/watson-machine-learning-samples/blob/master/cloud/notebooks/python_sdk/experiments/autoai/Use%20Lale%20AIF360%20scorers%20to%20calculate%20and%20mitigate%20bias%20for%20credit%20risk%20AutoAI%20model.ipynb
Pre-Estimator Mitigation Operators:
===================================
* `DisparateImpactRemover`_
* `LFR`_
* `Orbis`_
* `Reweighing`_
In-Estimator Mitigation Operators:
==================================
* `AdversarialDebiasing`_
* `BaggingOrbisClassifier`_
* `GerryFairClassifier`_
* `MetaFairClassifier`_
* `PrejudiceRemover`_
Post-Estimator Mitigation Operators:
====================================
* `CalibratedEqOddsPostprocessing`_
* `EqOddsPostprocessing`_
* `RejectOptionClassification`_
Datasets:
=========
* `fetch_adult_df`_
* `fetch_bank_df`_
* `fetch_compas_df`_
* `fetch_compas_violent_df`_
* `fetch_creditg_df`_
* `fetch_default_credit_df`_
* `fetch_heart_disease_df`_
* `fetch_law_school_df`_
* `fetch_meps_panel19_fy2015_df`_
* `fetch_meps_panel20_fy2015_df`_
* `fetch_meps_panel21_fy2016_df`_
* `fetch_nlsy_df`_
* `fetch_nursery_df`_
* `fetch_ricci_df`_
* `fetch_speeddating_df`_
* `fetch_student_math_df`_
* `fetch_student_por_df`_
* `fetch_tae_df`_
* `fetch_titanic_df`_
* `fetch_us_crime_df`_
Metrics:
========
* `accuracy_and_disparate_impact`_
* `balanced_accuracy_and_disparate_impact`_
* `average_odds_difference`_
* `disparate_impact`_
* `equal_opportunity_difference`_
* `f1_and_disparate_impact`_
* `r2_and_disparate_impact`_
* `statistical_parity_difference`_
* `symmetric_disparate_impact`_
* `theil_index`_
Other Classes and Operators:
============================
* `FairStratifiedKFold`_
* `ProtectedAttributesEncoder`_
* `Redacting`_
Other Functions:
================
* `count_fairness_groups`_
* `dataset_to_pandas`_
* `fair_stratified_train_test_split`_
Mitigator Patterns:
===================
AIF360 provides three kinds of fairness mitigators, illustrated in the
following picture. *Pre-estimator* mitigators transform the data
before it gets to an estimator; *in-estimator* mitigators include
their own estimator; and *post-estimator* mitigators transform
predictions after those come back from an estimator.
.. image:: ../../docs/img/fairness_patterns.png
In the picture, italics indicate parameters of the pattern.
For example, consider the following code:
.. code:: Python
pipeline = LFR(
**fairness_info,
preparation=(
(Project(columns={"type": "string"}) >> OneHotEncoder(handle_unknown="ignore"))
& Project(columns={"type": "number"})
)
>> ConcatFeatures
) >> LogisticRegression(max_iter=1000)
In this example, the *mitigator* is LFR (which is pre-estimator), the
*estimator* is LogisticRegression, and the *preparation* is a
sub-pipeline that one-hot-encodes strings. If all features of the data
are numerical, then the preparation can be omitted. Internally, the
LFR higher-order operator uses two auxiliary operators, Redacting
and ProtectedAttributesEncoder. Redacting sets protected attributes
to a constant to prevent them from directly influencing
fairness-agnostic data preparation or estimators. And the
ProtectedAttributesEncoder encodes protected attributes and labels as
zero or one to simplify the task for the mitigator.
.. _`AdversarialDebiasing`: lale.lib.aif360.adversarial_debiasing.html#lale.lib.aif360.adversarial_debiasing.AdversarialDebiasing
.. _`BaggingOrbisClassifier`: lale.lib.aif360.bagging_orbis.html#lale.lib.aif360.bagging_orbis_classifier.BaggingOrbisClassifier
.. _`CalibratedEqOddsPostprocessing`: lale.lib.aif360.calibrated_eq_odds_postprocessing.html#lale.lib.aif360.calibrated_eq_odds_postprocessing.CalibratedEqOddsPostprocessing
.. _`DisparateImpactRemover`: lale.lib.aif360.disparate_impact_remover.html#lale.lib.aif360.disparate_impact_remover.DisparateImpactRemover
.. _`EqOddsPostprocessing`: lale.lib.aif360.eq_odds_postprocessing.html#lale.lib.aif360.eq_odds_postprocessing.EqOddsPostprocessing
.. _`FairStratifiedKFold`: lale.lib.aif360.util.html#lale.lib.aif360.util.FairStratifiedKFold
.. _`LFR`: lale.lib.aif360.lfr.html#lale.lib.aif360.lfr.LFR
.. _`GerryFairClassifier`: lale.lib.aif360.gerry_fair_classifier.html#lale.lib.aif360.gerry_fair_classifier.GerryFairClassifier
.. _`MetaFairClassifier`: lale.lib.aif360.meta_fair_classifier.html#lale.lib.aif360.meta_fair_classifier.MetaFairClassifier
.. _`OptimPreproc`: lale.lib.aif360.optim_preproc.html#lale.lib.aif360.optim_preproc.OptimPreproc
.. _`Orbis`: lale.lib.aif360.orbis.html#lale.lib.aif360.orbis.Orbis
.. _`PrejudiceRemover`: lale.lib.aif360.prejudice_remover.html#lale.lib.aif360.prejudice_remover.PrejudiceRemover
.. _`ProtectedAttributesEncoder`: lale.lib.aif360.protected_attributes_encoder.html#lale.lib.aif360.protected_attributes_encoder.ProtectedAttributesEncoder
.. _`Redacting`: lale.lib.aif360.redacting.html#lale.lib.aif360.redacting.Redacting
.. _`RejectOptionClassification`: lale.lib.aif360.reject_option_classification.html#lale.lib.aif360.reject_option_classification.RejectOptionClassification
.. _`Reweighing`: lale.lib.aif360.reweighing.html#lale.lib.aif360.reweighing.Reweighing
.. _`accuracy_and_disparate_impact`: lale.lib.aif360.util.html#lale.lib.aif360.util.accuracy_and_disparate_impact
.. _`average_odds_difference`: lale.lib.aif360.util.html#lale.lib.aif360.util.average_odds_difference
.. _`balanced_accuracy_and_disparate_impact`: lale.lib.aif360.util.html#lale.lib.aif360.util.balanced_accuracy_and_disparate_impact
.. _`count_fairness_groups`: lale.lib.aif360.util.html#lale.lib.aif360.util.count_fairness_groups
.. _`dataset_to_pandas`: lale.lib.aif360.util.html#lale.lib.aif360.util.dataset_to_pandas
.. _`disparate_impact`: lale.lib.aif360.util.html#lale.lib.aif360.util.disparate_impact
.. _`equal_opportunity_difference`: lale.lib.aif360.util.html#lale.lib.aif360.util.equal_opportunity_difference
.. _`f1_and_disparate_impact`: lale.lib.aif360.util.html#lale.lib.aif360.util.f1_and_disparate_impact
.. _`fair_stratified_train_test_split`: lale.lib.aif360.util.html#lale.lib.aif360.util.fair_stratified_train_test_split
.. _`fetch_adult_df`: lale.lib.aif360.datasets.html#lale.lib.aif360.datasets.fetch_adult_df
.. _`fetch_bank_df`: lale.lib.aif360.datasets.html#lale.lib.aif360.datasets.fetch_bank_df
.. _`fetch_compas_df`: lale.lib.aif360.datasets.html#lale.lib.aif360.datasets.fetch_compas_df
.. _`fetch_compas_violent_df`: lale.lib.aif360.datasets.html#lale.lib.aif360.datasets.fetch_compas_violent_df
.. _`fetch_creditg_df`: lale.lib.aif360.datasets.html#lale.lib.aif360.datasets.fetch_creditg_df
.. _`fetch_default_credit_df`: lale.lib.aif360.datasets.html#lale.lib.aif360.datasets.fetch_default_credit_df
.. _`fetch_heart_disease_df`: lale.lib.aif360.datasets.html#lale.lib.aif360.datasets.fetch_heart_disease_df
.. _`fetch_law_school_df`: lale.lib.aif360.datasets.html#lale.lib.aif360.datasets.fetch_law_school_df
.. _`fetch_meps_panel19_fy2015_df`: lale.lib.aif360.datasets.html#lale.lib.aif360.datasets.fetch_meps_panel19_fy2015_df
.. _`fetch_meps_panel20_fy2015_df`: lale.lib.aif360.datasets.html#lale.lib.aif360.datasets.fetch_meps_panel20_fy2015_df
.. _`fetch_meps_panel21_fy2016_df`: lale.lib.aif360.datasets.html#lale.lib.aif360.datasets.fetch_meps_panel21_fy2016_df
.. _`fetch_nlsy_df`: lale.lib.aif360.datasets.html#lale.lib.aif360.datasets.fetch_nlsy_df
.. _`fetch_nursery_df`: lale.lib.aif360.datasets.html#lale.lib.aif360.datasets.fetch_nursery_df
.. _`fetch_ricci_df`: lale.lib.aif360.datasets.html#lale.lib.aif360.datasets.fetch_ricci_df
.. _`fetch_speeddating_df`: lale.lib.aif360.datasets.html#lale.lib.aif360.datasets.fetch_speeddating_df
.. _`fetch_student_math_df`: lale.lib.aif360.datasets.html#lale.lib.aif360.datasets.fetch_student_math_df
.. _`fetch_student_por_df`: lale.lib.aif360.datasets.html#lale.lib.aif360.datasets.fetch_student_por_df
.. _`fetch_tae_df`: lale.lib.aif360.datasets.html#lale.lib.aif360.datasets.fetch_tae_df
.. _`fetch_titanic_df`: lale.lib.aif360.datasets.html#lale.lib.aif360.datasets.fetch_titanic_df
.. _`fetch_us_crime_df`: lale.lib.aif360.datasets.html#lale.lib.aif360.datasets.fetch_us_crime_df
.. _`r2_and_disparate_impact`: lale.lib.aif360.util.html#lale.lib.aif360.util.r2_and_disparate_impact
.. _`statistical_parity_difference`: lale.lib.aif360.util.html#lale.lib.aif360.util.statistical_parity_difference
.. _`symmetric_disparate_impact`: lale.lib.aif360.util.html#lale.lib.aif360.util.symmetric_disparate_impact
.. _`theil_index`: lale.lib.aif360.util.html#lale.lib.aif360.util.theil_index
"""
# Note: all imports should be done as
# from .xxx import XXX as XXX
# this ensures that pyright considers them to be publicly available
# and not private imports (this affects lale users that use pyright)
from ._suppress_aif360_warnings import dummy as _dummy_from_suppress_warnings
from .adversarial_debiasing import AdversarialDebiasing as AdversarialDebiasing
from .bagging_orbis_classifier import BaggingOrbisClassifier as BaggingOrbisClassifier
from .calibrated_eq_odds_postprocessing import (
CalibratedEqOddsPostprocessing as CalibratedEqOddsPostprocessing,
)
from .datasets import _fetch_boston_housing_df as _fetch_boston_housing_df
from .datasets import fetch_adult_df as fetch_adult_df
from .datasets import fetch_bank_df as fetch_bank_df
from .datasets import fetch_compas_df as fetch_compas_df
from .datasets import fetch_compas_violent_df as fetch_compas_violent_df
from .datasets import fetch_creditg_df as fetch_creditg_df
from .datasets import fetch_default_credit_df as fetch_default_credit_df
from .datasets import fetch_heart_disease_df as fetch_heart_disease_df
from .datasets import fetch_law_school_df as fetch_law_school_df
from .datasets import fetch_meps_panel19_fy2015_df as fetch_meps_panel19_fy2015_df
from .datasets import fetch_meps_panel20_fy2015_df as fetch_meps_panel20_fy2015_df
from .datasets import fetch_meps_panel21_fy2016_df as fetch_meps_panel21_fy2016_df
from .datasets import fetch_nlsy_df as fetch_nlsy_df
from .datasets import fetch_nursery_df as fetch_nursery_df
from .datasets import fetch_ricci_df as fetch_ricci_df
from .datasets import fetch_speeddating_df as fetch_speeddating_df
from .datasets import fetch_student_math_df as fetch_student_math_df
from .datasets import fetch_student_por_df as fetch_student_por_df
from .datasets import fetch_tae_df as fetch_tae_df
from .datasets import fetch_titanic_df as fetch_titanic_df
from .datasets import fetch_us_crime_df as fetch_us_crime_df
from .disparate_impact_remover import DisparateImpactRemover as DisparateImpactRemover
from .eq_odds_postprocessing import EqOddsPostprocessing as EqOddsPostprocessing
from .gerry_fair_classifier import GerryFairClassifier as GerryFairClassifier
from .lfr import LFR as LFR
from .meta_fair_classifier import MetaFairClassifier as MetaFairClassifier
from .optim_preproc import OptimPreproc as OptimPreproc
from .orbis import Orbis as Orbis
from .prejudice_remover import PrejudiceRemover as PrejudiceRemover
from .protected_attributes_encoder import (
ProtectedAttributesEncoder as ProtectedAttributesEncoder,
)
from .redacting import Redacting as Redacting
from .reject_option_classification import (
RejectOptionClassification as RejectOptionClassification,
)
from .reweighing import Reweighing as Reweighing
from .util import FAIRNESS_INFO_SCHEMA as FAIRNESS_INFO_SCHEMA
from .util import FairStratifiedKFold as FairStratifiedKFold
from .util import accuracy_and_disparate_impact as accuracy_and_disparate_impact
from .util import average_odds_difference as average_odds_difference
from .util import (
balanced_accuracy_and_disparate_impact as balanced_accuracy_and_disparate_impact,
)
from .util import count_fairness_groups as count_fairness_groups
from .util import dataset_to_pandas as dataset_to_pandas
from .util import disparate_impact as disparate_impact
from .util import equal_opportunity_difference as equal_opportunity_difference
from .util import f1_and_disparate_impact as f1_and_disparate_impact
from .util import fair_stratified_train_test_split as fair_stratified_train_test_split
from .util import r2_and_disparate_impact as r2_and_disparate_impact
from .util import statistical_parity_difference as statistical_parity_difference
from .util import symmetric_disparate_impact as symmetric_disparate_impact
from .util import theil_index as theil_index
| 14,123 | 51.505576 | 247 |
py
|
lale
|
lale-master/lale/lib/aif360/reweighing.py
|
# Copyright 2020-2023 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import aif360.algorithms.preprocessing
import lale.docstrings
import lale.lib.lale
import lale.operators
from .protected_attributes_encoder import ProtectedAttributesEncoder
from .redacting import Redacting
from .util import (
_categorical_fairness_properties,
_categorical_input_predict_schema,
_categorical_output_predict_schema,
_categorical_supervised_input_fit_schema,
_PandasToDatasetConverter,
_validate_fairness_info,
)
class _ReweighingImpl:
def __init__(
self,
*,
favorable_labels,
protected_attributes,
estimator,
unfavorable_labels=None,
redact=True
):
_validate_fairness_info(
favorable_labels, protected_attributes, unfavorable_labels, False
)
self.favorable_labels = favorable_labels
self.protected_attributes = protected_attributes
self.estimator = estimator
self.unfavorable_labels = unfavorable_labels
self.redact = redact
def fit(self, X, y):
fairness_info = {
"favorable_labels": self.favorable_labels,
"protected_attributes": self.protected_attributes,
"unfavorable_labels": self.unfavorable_labels,
}
prot_attr_enc = ProtectedAttributesEncoder(
**fairness_info,
remainder="drop",
)
encoded_X, encoded_y = prot_attr_enc.transform_X_y(X, y)
prot_attr_names = [pa["feature"] for pa in self.protected_attributes]
pandas_to_dataset = _PandasToDatasetConverter(
favorable_label=1,
unfavorable_label=0,
protected_attribute_names=prot_attr_names,
)
encoded_data = pandas_to_dataset.convert(encoded_X, encoded_y)
unprivileged_groups = [{name: 0 for name in prot_attr_names}]
privileged_groups = [{name: 1 for name in prot_attr_names}]
reweighing_trainable = aif360.algorithms.preprocessing.Reweighing(
unprivileged_groups=unprivileged_groups,
privileged_groups=privileged_groups,
)
reweighing_trained = reweighing_trainable.fit(encoded_data)
reweighted_data = reweighing_trained.transform(encoded_data)
sample_weight = reweighted_data.instance_weights
if self.redact:
redacting_trainable = Redacting(**fairness_info)
self.redacting = redacting_trainable.fit(X)
else:
self.redacting = lale.lib.lale.NoOp
redacted_X = self.redacting.transform(X)
if isinstance(self.estimator, lale.operators.TrainablePipeline):
trainable_prefix = self.estimator.remove_last()
trainable_suffix = self.estimator.get_last()
assert trainable_suffix is not None
trained_prefix = trainable_prefix.fit(X, y)
transformed_X = trained_prefix.transform(redacted_X)
trained_suffix = trainable_suffix.fit(
transformed_X, y, sample_weight=sample_weight
)
self.estimator = trained_prefix >> trained_suffix
else:
self.estimator = self.estimator.fit(
redacted_X, y, sample_weight=sample_weight
)
return self
def predict(self, X, **predict_params):
redacted_X = self.redacting.transform(X)
result = self.estimator.predict(redacted_X, **predict_params)
return result
_input_fit_schema = _categorical_supervised_input_fit_schema
_input_predict_schema = _categorical_input_predict_schema
_output_predict_schema = _categorical_output_predict_schema
_hyperparams_schema = {
"description": "Hyperparameter schema.",
"allOf": [
{
"type": "object",
"additionalProperties": False,
"required": [
*_categorical_fairness_properties.keys(),
"estimator",
"redact",
],
"relevantToOptimizer": [],
"properties": {
**_categorical_fairness_properties,
"estimator": {
"description": "Nested classifier, fit method must support sample_weight.",
"laleType": "operator",
},
"redact": {
"description": "Whether to redact protected attributes before data preparation (recommended) or not.",
"type": "boolean",
"default": True,
},
},
}
],
}
_combined_schemas = {
"description": """`Reweighing`_ pre-estimator fairness mitigator. Weights the examples in each (group, label) combination differently to ensure fairness before classification (`Kamiran and Calders 2012`_).
.. _`Reweighing`: https://aif360.readthedocs.io/en/latest/modules/generated/aif360.sklearn.preprocessing.Reweighing.html
.. _`Kamiran and Calders 2012`: https://doi.org/10.1007/s10115-011-0463-8
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.aif360.reweighing.html#lale.lib.aif360.reweighing.Reweighing",
"import_from": "aif360.sklearn.preprocessing",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "classifier"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
Reweighing = lale.operators.make_operator(_ReweighingImpl, _combined_schemas)
lale.docstrings.set_docstrings(Reweighing)
| 6,151 | 37.21118 | 209 |
py
|
lale
|
lale-master/lale/lib/aif360/protected_attributes_encoder.py
|
# Copyright 2020-2022 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import Any, Dict, List, Union
import numpy as np
import pandas as pd
import lale.datasets.data_schemas
import lale.docstrings
import lale.operators
import lale.type_checking
from lale.helpers import GenSym
from .util import (
_categorical_fairness_properties,
_ensure_str,
_ndarray_to_dataframe,
_ndarray_to_series,
)
_hyperparams_schema = {
"allOf": [
{
"description": "This first object lists all constructor arguments with their types, but omits constraints for conditional hyperparameters.",
"type": "object",
"additionalProperties": False,
"required": ["protected_attributes"],
"relevantToOptimizer": [],
"properties": {
"favorable_labels": {
"anyOf": [
_categorical_fairness_properties["favorable_labels"],
{"enum": [None]},
],
"default": None,
},
"protected_attributes": {
"anyOf": [
_categorical_fairness_properties["protected_attributes"],
{"enum": [None]},
],
"default": None,
},
"unfavorable_labels": _categorical_fairness_properties[
"unfavorable_labels"
],
"remainder": {
"description": "Transformation for columns that were not specified in protected_attributes.",
"enum": ["passthrough", "drop"],
"default": "drop",
},
"return_X_y": {
"description": "Deprecated, use transform_X_y instead. If True, transform returns a tuple with X and y; otherwise, transform returns only X, not as a tuple.",
"type": "boolean",
"default": False,
},
"combine": {
"description": "How to handle the case when there is more than one protected attribute.",
"enum": ["keep_separate", "and", "or", "error"],
"default": "keep_separate",
},
},
},
{
"description": "If returning y, need to know how to encode it.",
"anyOf": [
{"type": "object", "properties": {"return_X_y": {"enum": [False]}}},
{
"type": "object",
"properties": {"favorable_labels": {"not": {"enum": [None]}}},
},
],
},
{
"description": "If combine is error, must have only one protected attribute.",
"anyOf": [
{
"type": "object",
"properties": {"combine": {"not": {"enum": ["error"]}}},
},
{
"type": "object",
"properties": {
"protected_attributes": {"type": "array", "maxItems": 1}
},
},
],
},
],
}
_input_transform_schema = {
"type": "object",
"required": ["X", "y"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {
"type": "array",
"items": {"anyOf": [{"type": "number"}, {"type": "string"}]},
},
},
"y": {
"description": "Target labels.",
"anyOf": [
{
"type": "array",
"items": {"anyOf": [{"type": "number"}, {"type": "string"}]},
},
{"enum": [None]},
],
"default": None,
},
},
}
_output_transform_schema = {
"anyOf": [
{
"description": "If return_X_y is False, return X.",
"type": "array",
"items": {
"description": "This operator encodes protected attributes as `0`, `0.5`, or `1`. So if the remainder (non-protected attributes) is dropped, the output is numeric. Otherwise, the output may still contain non-numeric values.",
"type": "array",
"items": {"anyOf": [{"type": "number"}, {"type": "string"}]},
},
},
{
"description": "If return_X_y is True, return tuple of X and y.",
"type": "array",
"laleType": "tuple",
"items": [
{
"description": "X",
"type": "array",
"items": {
"type": "array",
"items": {"anyOf": [{"type": "number"}, {"type": "string"}]},
},
},
{
"description": "y",
"type": "array",
"items": {"anyOf": [{"type": "number"}, {"type": "string"}]},
},
],
},
],
}
_input_transform_X_y_schema = {
"type": "object",
"required": ["X", "y"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {
"type": "array",
"items": {"anyOf": [{"type": "number"}, {"type": "string"}]},
},
},
"y": {
"description": "Target labels.",
"type": "array",
"items": {"anyOf": [{"type": "number"}, {"type": "string"}]},
},
},
}
_output_transform_X_y_schema = {
"type": "array",
"laleType": "tuple",
"items": [
{
"description": "X",
"type": "array",
"items": {
"type": "array",
"items": {"anyOf": [{"type": "number"}, {"type": "string"}]},
},
},
{
"description": "y",
"type": "array",
"items": {"anyOf": [{"type": "number"}, {"type": "string"}]},
},
],
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """Protected attributes encoder.
The `protected_attributes` argument describes each sensitive column by
a `feature` name or index and a `reference_group` list of values or
ranges. This transformer encodes protected attributes with values of
`0`, `0.5`, or `1` to indicate membership in the unprivileged, neither,
or privileged group, respectively. That encoding makes the
protected attributes suitable as input for downstream fairness
mitigation operators. This operator does not encode the remaining
(non-protected) attributes. A common usage is to encode non-protected
attributes with a separate data preparation pipeline and to perform a
feature union before piping the transformed data to downstream
operators that require numeric data.
This operator is used internally by various lale.lib.aif360 metrics
and mitigators, so you often do not need to use it directly yourself.
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.aif360.protected_attributes_encoder.html#lale.lib.aif360.protected_attributes_encoder.ProtectedAttributesEncoder",
"import_from": "lale.lib.aif360",
"type": "object",
"tags": {"pre": ["categoricals"], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
"input_transform_X_y": _input_transform_X_y_schema,
"output_transform_X_y": _output_transform_X_y_schema,
},
}
def _dataframe_replace(dataframe, subst):
new_columns = [
subst.get(i, subst.get(name, dataframe.iloc[:, i]))
for i, name in enumerate(dataframe.columns)
]
result = pd.concat(new_columns, axis=1)
return result
def _group_flag(value, pos_groups, other_groups):
for group in pos_groups:
if value == group:
return 1
if isinstance(group, list) and group[0] <= value <= group[1]:
return 1
if other_groups is None:
return 0
for group in other_groups:
if value == group:
return 0
if isinstance(group, list) and group[0] <= value <= group[1]:
return 0
return 0.5 # neither positive nor other
class _ProtectedAttributesEncoderImpl:
y_name: str
protected_attributes: List[Dict[str, Any]]
def __init__(
self,
*,
favorable_labels=None,
protected_attributes=None,
unfavorable_labels=None,
remainder="drop",
return_X_y=False,
combine="keep_separate",
):
self.favorable_labels = favorable_labels
if protected_attributes is None:
self.protected_attributes = []
else:
self.protected_attributes = protected_attributes
self.unfavorable_labels = unfavorable_labels
self.remainder = remainder
self.return_X_y = return_X_y
self.combine = combine
if return_X_y:
warnings.warn(
"Constructor argument return_X_y=True is deprecated, call method transform_X_y() instead.",
DeprecationWarning,
)
def _transform_X(self, X: Union[np.ndarray, pd.DataFrame]):
X_pd: pd.DataFrame
if isinstance(X, np.ndarray):
X_pd = _ndarray_to_dataframe(X)
else:
X_pd = X
assert isinstance(X_pd, pd.DataFrame), type(X_pd)
protected = {}
def flag_grouper(pos_groups, other_groups):
return lambda v: _group_flag(v, pos_groups, other_groups)
for prot_attr in self.protected_attributes:
feature = prot_attr["feature"]
pos_groups = prot_attr["reference_group"]
other_groups = prot_attr.get("monitored_group", None)
if isinstance(feature, str):
column = X_pd[feature]
else:
column = X_pd.iloc[:, feature]
series = column.apply(flag_grouper(pos_groups, other_groups))
protected[feature] = series
if self.combine in ["and", "or"]:
prot_attr_names = [
_ensure_str(pa["feature"]) for pa in self.protected_attributes
]
comb_df = pd.concat(list(protected.values()), axis=1)
if self.combine == "and":
comb_series = comb_df.min(axis=1)
elif self.combine == "or":
comb_series = comb_df.max(axis=1)
else:
assert False, self.combine
gensym = GenSym(set(X_pd.columns))
comb_name = gensym(f"_{self.combine}_".join(prot_attr_names))
comb_series.name = comb_name
protected = {comb_name: comb_series}
if self.remainder == "drop":
result_X = pd.concat(list(protected.values()), axis=1)
else:
result_X = _dataframe_replace(X_pd, protected)
s_X = lale.datasets.data_schemas.to_schema(X_pd)
s_result = self.transform_schema(s_X)
result_X = lale.datasets.data_schemas.add_schema(result_X, s_result)
return result_X
def _transform_y(self, result_X: pd.DataFrame, y):
assert self.favorable_labels is not None
if y is None:
assert hasattr(self, "y_name"), "must call transform with non-None y first"
result_y = pd.Series(
data=0.0, index=result_X.index, dtype=np.float64, name=self.y_name
)
else:
if isinstance(y, np.ndarray):
self.y_name = _ensure_str(result_X.shape[1])
series_y = _ndarray_to_series(y, self.y_name, result_X.index, y.dtype)
else:
series_y = y.squeeze() if isinstance(y, pd.DataFrame) else y
assert isinstance(series_y, pd.Series), type(series_y)
self.y_name = series_y.name
result_y = series_y.apply(
lambda v: _group_flag(v, self.favorable_labels, self.unfavorable_labels)
)
return result_y
def transform(self, X: Union[np.ndarray, pd.DataFrame], y=None):
result_X = self._transform_X(X)
if self.return_X_y:
result_y = self._transform_y(result_X, y)
return result_X, result_y
else:
return result_X
def transform_X_y(self, X: Union[np.ndarray, pd.DataFrame], y=None):
result_X = self._transform_X(X)
result_y = self._transform_y(result_X, y)
return result_X, result_y
def transform_schema(self, s_X):
"""Used internally by Lale for type-checking downstream operators."""
s_X = lale.datasets.data_schemas.to_schema(s_X)
if self.remainder == "drop" and self.combine == "keep_separate":
out_names = [pa["feature"] for pa in self.protected_attributes]
if all(isinstance(n, str) for n in out_names):
result = {
**s_X,
"items": {
"type": "array",
"minItems": len(out_names),
"maxItems": len(out_names),
"items": [
{"description": n, "enum": [0, 0.5, 1]} for n in out_names
],
},
}
else:
result = {
**s_X,
"items": {
"type": "array",
"minItems": len(out_names),
"maxItems": len(out_names),
"items": {"enum": [0, 0.5, 1]},
},
}
else:
result = {
"type": "array",
"items": {
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "string"}},
],
},
}
return result
ProtectedAttributesEncoder = lale.operators.make_operator(
_ProtectedAttributesEncoderImpl,
_combined_schemas,
)
lale.docstrings.set_docstrings(ProtectedAttributesEncoder)
| 15,147 | 35.239234 | 241 |
py
|
lale
|
lale-master/lale/lib/aif360/meta_fair_classifier.py
|
# Copyright 2020-2023 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import aif360.algorithms.inprocessing
import lale.docstrings
import lale.operators
from .util import (
_BaseInEstimatorImpl,
_categorical_fairness_properties,
_categorical_input_predict_proba_schema,
_categorical_output_predict_proba_schema,
)
class _MetaFairClassifierImpl(_BaseInEstimatorImpl):
def __init__(
self,
*,
favorable_labels,
protected_attributes,
unfavorable_labels,
redact=True,
preparation=None,
**hyperparams,
):
prot_attr_names = [pa["feature"] for pa in protected_attributes]
mitigator = aif360.algorithms.inprocessing.MetaFairClassifier(
sensitive_attr=prot_attr_names[0],
**hyperparams,
)
super().__init__(
favorable_labels=favorable_labels,
protected_attributes=protected_attributes,
unfavorable_labels=unfavorable_labels,
redact=redact,
preparation=preparation,
mitigator=mitigator,
)
_input_fit_schema = {
"type": "object",
"required": ["X", "y"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {
"type": "array",
"items": {"anyOf": [{"type": "number"}, {"type": "string"}]},
},
},
"y": {
"description": "Target class labels; the array is over samples.",
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "string"}},
],
},
},
}
_input_predict_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {
"type": "array",
"items": {"anyOf": [{"type": "number"}, {"type": "string"}]},
},
}
},
}
_output_predict_schema = {
"description": "Predicted class label per sample.",
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "string"}},
],
}
_input_predict_proba_schema = _categorical_input_predict_proba_schema
_output_predict_proba_schema = _categorical_output_predict_proba_schema
_hyperparams_schema = {
"description": "Hyperparameter schema.",
"allOf": [
{
"description": "This first sub-object lists all constructor arguments with their types, one at a time, omitting cross-argument constraints.",
"type": "object",
"additionalProperties": False,
"required": [
*_categorical_fairness_properties.keys(),
"redact",
"preparation",
"tau",
"type",
],
"relevantToOptimizer": ["tau", "type"],
"properties": {
**_categorical_fairness_properties,
"redact": {
"description": "Whether to redact protected attributes before data preparation (recommended) or not.",
"type": "boolean",
"default": True,
},
"preparation": {
"description": "Transformer, which may be an individual operator or a sub-pipeline.",
"anyOf": [
{"laleType": "operator"},
{"description": "lale.lib.lale.NoOp", "enum": [None]},
],
"default": None,
},
"tau": {
"description": "Fairness penalty parameter.",
"type": "number",
"minimum": 0.0,
"maximum": 1.0,
"default": 0.8,
},
"type": {
"description": "The type of fairness metric to be used.",
"anyOf": [
{
"description": "False discovery rate ratio.",
"enum": ["fdr"],
},
{
"description": "Statistical rate / disparate impact.",
"enum": ["sr"],
},
],
"default": "fdr",
},
},
},
],
}
_combined_schemas = {
"description": """`MetaFairClassifier`_ in-estimator fairness mitigator. Takes the fairness metric as part of the input and returns a classifier optimized with respect to that fairness metric (`Celis et al. 2019`_).
.. _`MetaFairClassifier`: https://aif360.readthedocs.io/en/latest/modules/generated/aif360.algorithms.inprocessing.MetaFairClassifier.html
.. _`Celis et al. 2019`: https://doi.org/10.1145/3287560.3287586
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.aif360.meta_fair_classifier.html#lale.lib.aif360.meta_fair_classifier.MetaFairClassifier",
"import_from": "aif360.sklearn.inprocessing",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "classifier"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
"input_predict_proba": _input_predict_proba_schema,
"output_predict_proba": _output_predict_proba_schema,
},
}
MetaFairClassifier = lale.operators.make_operator(
_MetaFairClassifierImpl, _combined_schemas
)
lale.docstrings.set_docstrings(MetaFairClassifier)
| 6,471 | 33.795699 | 219 |
py
|
lale
|
lale-master/lale/lib/aif360/lfr.py
|
# Copyright 2019-2023 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import aif360.algorithms.preprocessing
import pandas as pd
import lale.docstrings
import lale.lib.lale
import lale.operators
from .protected_attributes_encoder import ProtectedAttributesEncoder
from .redacting import Redacting
from .util import (
_categorical_fairness_properties,
_categorical_input_transform_schema,
_categorical_supervised_input_fit_schema,
_numeric_output_transform_schema,
_PandasToDatasetConverter,
_validate_fairness_info,
dataset_to_pandas,
)
class _LFRImpl:
def __init__(
self,
*,
favorable_labels,
protected_attributes,
unfavorable_labels=None,
redact=True,
preparation=None,
**hyperparams,
):
_validate_fairness_info(
favorable_labels, protected_attributes, unfavorable_labels, False
)
self.favorable_labels = favorable_labels
self.protected_attributes = protected_attributes
self.unfavorable_labels = unfavorable_labels
self.redact = redact
if preparation is None:
preparation = lale.lib.lale.NoOp
self.preparation = preparation
prot_attr_names = [pa["feature"] for pa in protected_attributes]
unprivileged_groups = [{name: 0 for name in prot_attr_names}]
privileged_groups = [{name: 1 for name in prot_attr_names}]
self.mitigator = aif360.algorithms.preprocessing.LFR(
unprivileged_groups=unprivileged_groups,
privileged_groups=privileged_groups,
**hyperparams,
)
def _prep_and_encode(self, X, y=None):
prepared_X = self.redact1_and_prep.transform(X, y)
encoded_X, encoded_y = self.prot_attr_enc.transform_X_y(X, y)
combined_attribute_names = list(prepared_X.columns) + [
name for name in encoded_X.columns if name not in prepared_X.columns
]
combined_columns = [
encoded_X[name] if name in encoded_X else prepared_X[name]
for name in combined_attribute_names
]
combined_X = pd.concat(combined_columns, axis=1)
result = self.pandas_to_dataset.convert(combined_X, encoded_y)
return result
def _mitigate(self, encoded_data):
mitigated_data = self.mitigator.transform(encoded_data)
mitigated_X, _ = dataset_to_pandas(mitigated_data, return_only="X")
return mitigated_X
def fit(self, X, y):
fairness_info = {
"favorable_labels": self.favorable_labels,
"protected_attributes": self.protected_attributes,
"unfavorable_labels": self.unfavorable_labels,
}
redacting = Redacting(**fairness_info) if self.redact else lale.lib.lale.NoOp
preparation = self.preparation
trainable_redact1_and_prep = redacting >> preparation
assert isinstance(trainable_redact1_and_prep, lale.operators.TrainablePipeline)
self.redact1_and_prep = trainable_redact1_and_prep.fit(X, y)
self.prot_attr_enc = ProtectedAttributesEncoder(
**fairness_info,
remainder="drop",
)
prot_attr_names = [pa["feature"] for pa in self.protected_attributes]
self.pandas_to_dataset = _PandasToDatasetConverter(
favorable_label=1,
unfavorable_label=0,
protected_attribute_names=prot_attr_names,
)
encoded_data = self._prep_and_encode(X, y)
self.mitigator.fit(encoded_data)
mitigated_X = self._mitigate(encoded_data)
self.redact2 = redacting.fit(mitigated_X)
return self
def transform(self, X):
encoded_data = self._prep_and_encode(X)
mitigated_X = self._mitigate(encoded_data)
redacted_X = self.redact2.transform(mitigated_X)
return redacted_X
_input_fit_schema = _categorical_supervised_input_fit_schema
_input_transform_schema = _categorical_input_transform_schema
_output_transform_schema = _numeric_output_transform_schema
_hyperparams_schema = {
"description": "Hyperparameter schema.",
"allOf": [
{
"type": "object",
"additionalProperties": False,
"required": [
*_categorical_fairness_properties.keys(),
"redact",
"preparation",
"k",
"Ax",
"Az",
"Ay",
"print_interval",
"verbose",
"seed",
],
"relevantToOptimizer": ["k", "Ax", "Az", "Ay"],
"properties": {
**_categorical_fairness_properties,
"redact": {
"description": "Whether to redact protected attributes before data preparation (recommended) or not.",
"type": "boolean",
"default": True,
},
"preparation": {
"description": "Transformer, which may be an individual operator or a sub-pipeline.",
"anyOf": [
{"laleType": "operator"},
{"description": "lale.lib.lale.NoOp", "enum": [None]},
],
"default": None,
},
"k": {
"description": "Number of prototypes.",
"type": "integer",
"minimum": 1,
"default": 5,
"maximumForOptimizer": 20,
},
"Ax": {
"description": "Input recontruction quality term weight.",
"type": "number",
"minimum": 0.0,
"default": 0.01,
"maximumForOptimizer": 100.0,
},
"Az": {
"description": "Fairness constraint term weight.",
"type": "number",
"minimum": 0.0,
"default": 1.0,
"maximumForOptimizer": 100.0,
},
"Ay": {
"description": "Output prediction error.",
"type": "number",
"minimum": 0.0,
"default": 50.0,
"maximumForOptimizer": 100.0,
},
"print_interval": {
"description": "Print optimization objective value every print_interval iterations.",
"type": "integer",
"minimum": 1,
"default": 250,
},
"verbose": {
"description": "If zero, then no output.",
"type": "integer",
"default": 0,
},
"seed": {
"description": "Seed to make `transform` repeatable.",
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"default": None,
},
},
}
],
}
_combined_schemas = {
"description": """`LFR`_ (learning fair representations) pre-estimator fairness mitigator. Finds a latent representation that encodes the data well but obfuscates information about protected attributes (`Zemel et al. 2013`_).
.. _`LFR`: https://aif360.readthedocs.io/en/latest/modules/generated/aif360.algorithms.preprocessing.LFR.html
.. _`Zemel et al. 2013`: http://proceedings.mlr.press/v28/zemel13.html
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.aif360.lfr.html#lale.lib.aif360.lfr.LFR",
"import_from": "aif360.algorithms.preprocessing",
"type": "object",
"tags": {"pre": ["~categoricals"], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
LFR = lale.operators.make_operator(_LFRImpl, _combined_schemas)
lale.docstrings.set_docstrings(LFR)
| 8,662 | 37.162996 | 229 |
py
|
lale
|
lale-master/lale/lib/autoai_libs/numpy_permute_array.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import autoai_libs.transformers.exportable
import lale.docstrings
import lale.operators
from ._common_schemas import _hparams_column_index_list
class _NumpyPermuteArrayImpl:
def __init__(self, **hyperparams):
self._wrapped_model = autoai_libs.transformers.exportable.NumpyPermuteArray(
**hyperparams
)
def fit(self, X, y=None):
self._wrapped_model.fit(X, y)
return self
def transform(self, X):
return self._wrapped_model.transform(X)
_hyperparams_schema = {
"allOf": [
{
"description": "This first object lists all constructor arguments with their types, but omits constraints for conditional hyperparameters.",
"type": "object",
"additionalProperties": False,
"required": ["permutation_indices", "axis"],
"relevantToOptimizer": [],
"properties": {
"permutation_indices": _hparams_column_index_list(
description="List of indexes based on which columns will be rearranged."
),
"axis": {
"anyOf": [
{"enum": [0, None], "description": "Permute along columns."},
{"enum": [1], "description": "Permute along rows."},
],
"default": None,
},
},
}
]
}
_input_fit_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": { # Handles 1-D arrays as well
"anyOf": [
{"type": "array", "items": {"laleType": "Any"}},
{
"type": "array",
"items": {"type": "array", "items": {"laleType": "Any"}},
},
]
},
"y": {"laleType": "Any"},
},
}
_input_transform_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": { # Handles 1-D arrays as well
"anyOf": [
{"type": "array", "items": {"laleType": "Any"}},
{
"type": "array",
"items": {"type": "array", "items": {"laleType": "Any"}},
},
]
}
},
}
_output_transform_schema = {
"description": "Features; the outer array is over samples.",
"anyOf": [
{"type": "array", "items": {"laleType": "Any"}},
{"type": "array", "items": {"type": "array", "items": {"laleType": "Any"}}},
],
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """Operator from `autoai_libs`_. Rearranges columns or rows of a numpy array based on a list of indices.
.. _`autoai_libs`: https://pypi.org/project/autoai-libs""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.autoai_libs.numpy_permute_array.html",
"import_from": "autoai_libs.transformers.exportable",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
NumpyPermuteArray = lale.operators.make_operator(
_NumpyPermuteArrayImpl, _combined_schemas
)
lale.docstrings.set_docstrings(NumpyPermuteArray)
| 4,072 | 31.070866 | 152 |
py
|
lale
|
lale-master/lale/lib/autoai_libs/fs2.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import autoai_libs.cognito.transforms.transform_utils
import lale.docstrings
import lale.operators
from ._common_schemas import (
_hparam_fs_cols_ids_must_keep,
_hparams_fs_additional_col_count_to_keep,
_hparams_fs_ptype,
)
class _FS2Impl:
def __init__(self, **hyperparams):
self._wrapped_model = autoai_libs.cognito.transforms.transform_utils.FS2(
**hyperparams
)
def fit(self, X, y=None):
self._wrapped_model.fit(X, y)
return self
def transform(self, X):
result = self._wrapped_model.transform(X)
try:
if hasattr(self, "column_names"):
self.column_names = [
self.column_names[i]
for i in self._wrapped_model.cols_to_keep_final_
]
if hasattr(self, "column_dtypes"):
self.column_dtypes = [
self.column_dtypes[i]
for i in self._wrapped_model.cols_to_keep_final_
]
except Exception: # nosec
pass
return result
def set_meta_data(self, meta_data_dict):
if "column_names" in meta_data_dict.keys():
self.column_names = meta_data_dict["column_names"]
if "column_dtypes" in meta_data_dict.keys():
self.column_dtypes = meta_data_dict["column_dtypes"]
def get_transform_meta_output(self):
return_dict = {}
if hasattr(self, "column_names"):
return_dict["column_names"] = self.column_names
if hasattr(self, "column_dtypes"):
return_dict["column_dtypes"] = self.column_dtypes
return return_dict
_hyperparams_schema = {
"allOf": [
{
"description": "This first object lists all constructor arguments with their types, but omits constraints for conditional hyperparameters.",
"type": "object",
"additionalProperties": False,
"required": [
"cols_ids_must_keep",
"additional_col_count_to_keep",
"ptype",
"eval_algo",
],
"relevantToOptimizer": [],
"properties": {
"cols_ids_must_keep": _hparam_fs_cols_ids_must_keep,
"additional_col_count_to_keep": _hparams_fs_additional_col_count_to_keep,
"ptype": _hparams_fs_ptype,
"eval_algo": {
"description": "A supervised model where fit() sets `feature_importances_`.",
"laleType": "Any",
"transient": "alwaysPrint", # since positional argument
"default": None,
},
},
}
]
}
_input_fit_schema = {
"type": "object",
"required": ["X", "y"],
"additionalProperties": False,
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"laleType": "Any"}},
},
"y": {
"type": "array",
"items": {"laleType": "Any"},
"description": "Target values.",
},
},
}
_input_transform_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {"type": "array", "items": {"type": "array", "items": {"laleType": "Any"}}}
},
}
_output_transform_schema = {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {"type": "array", "items": {"laleType": "Any"}},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """Operator from `autoai_libs`_. Feature selection, type 2.
.. _`autoai_libs`: https://pypi.org/project/autoai-libs""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.autoai_libs.fs2.html",
"import_from": "autoai_libs.cognito.transforms.transform_utils",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
FS2 = lale.operators.make_operator(_FS2Impl, _combined_schemas)
lale.docstrings.set_docstrings(FS2)
| 4,922 | 31.82 | 152 |
py
|
lale
|
lale-master/lale/lib/autoai_libs/word2vec_transformer.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import autoai_libs.transformers.text_transformers
import lale.docstrings
import lale.operators
from ._common_schemas import _hparam_activate_flag_features, _hparam_column_headers_list
# This is currently needed just to hide get_params so that lale does not call clone
# when doing a defensive copy
class _Word2VecTransformerImpl:
def __init__(self, **hyperparams):
if hyperparams.get("column_headers_list", None) is None:
hyperparams["column_headers_list"] = []
if hyperparams.get("text_processing_options", None) is None:
hyperparams["text_processing_options"] = []
self._wrapped_model = (
autoai_libs.transformers.text_transformers.Word2VecTransformer(
**hyperparams
)
)
def fit(self, X, y=None):
self._wrapped_model.fit(X, y)
return self
def transform(self, X):
return self._wrapped_model.transform(X)
_hyperparams_schema = {
"allOf": [
{
"description": """This transformer converts text columns in the dataset to its word2vec embedding vectors.
It then performs SVD on those vectors for dimensionality reduction.""",
"type": "object",
"additionalProperties": False,
"required": [
"output_dim",
"column_headers_list",
"svd_num_iter",
"drop_columns",
"activate_flag",
"min_count",
"text_columns",
"text_processing_options",
],
"relevantToOptimizer": [],
"properties": {
"output_dim": {
"description": "Number of numeric features generated per text column.",
"type": "integer",
"default": 30,
},
"column_headers_list": _hparam_column_headers_list(
description="""Column headers passed from autoai_core. The new feature's column headers are
appended to this."""
),
"svd_num_iter": {
"description": "Number of iterations for which svd was run.",
"type": "integer",
"default": 5,
},
"drop_columns": {
"description": "If true, drops text columns",
"type": "boolean",
"default": False,
},
"activate_flag": _hparam_activate_flag_features,
"min_count": {
"description": "Word2vec model ignores all the words whose frequency is less than this.",
"type": "integer",
"default": 5,
},
"text_columns": {
"description": "If passed, then word2vec features are applied to these columns.",
"anyOf": [
{"type": "array", "items": {"type": "string"}},
{"type": "array", "items": {"type": "integer"}},
{"enum": [None]},
],
"default": None,
},
"text_processing_options": {
"description": "The parameter values to initialize this transformer are passed through this dictionary.",
"anyOf": [
{"type": "object"},
{"enum": [None]},
],
"default": None,
},
},
}
]
}
_input_fit_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": { # Handles 1-D arrays as well
"anyOf": [
{"type": "array", "items": {"laleType": "Any"}},
{
"type": "array",
"items": {"type": "array", "items": {"laleType": "Any"}},
},
]
},
"y": {"laleType": "Any"},
},
}
_input_transform_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": { # Handles 1-D arrays as well
"anyOf": [
{"type": "array", "items": {"laleType": "Any"}},
{
"type": "array",
"items": {"type": "array", "items": {"laleType": "Any"}},
},
]
}
},
}
_output_transform_schema = {
"description": "Features; the outer array is over samples.",
"anyOf": [
{"type": "array", "items": {"laleType": "Any"}},
{"type": "array", "items": {"type": "array", "items": {"laleType": "Any"}}},
],
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """Operator from `autoai_libs`_. Converts text columns to numeric features using a combination of word2vec and SVD.
.. _`autoai_libs`: https://pypi.org/project/autoai-libs""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.autoai_libs.word2vec_transformer.html",
"import_from": "autoai_libs.transformers.text_transformers",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
Word2VecTransformer = lale.operators.make_operator(
_Word2VecTransformerImpl, _combined_schemas
)
lale.docstrings.set_docstrings(Word2VecTransformer)
| 6,288 | 34.732955 | 134 |
py
|
lale
|
lale-master/lale/lib/autoai_libs/numpy_column_selector.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import autoai_libs.transformers.exportable
import pandas as pd
import lale.docstrings
import lale.operators
from ._common_schemas import _hparams_column_index_list
class _NumpyColumnSelectorImpl:
def __init__(self, **hyperparams):
self._wrapped_model = autoai_libs.transformers.exportable.NumpyColumnSelector(
**hyperparams
)
def fit(self, X, y=None):
if isinstance(X, pd.DataFrame):
X = X.to_numpy()
self._wrapped_model.fit(X, y)
return self
def transform(self, X):
if isinstance(X, pd.DataFrame):
X = X.to_numpy()
return self._wrapped_model.transform(X)
_hyperparams_schema = {
"allOf": [
{
"description": "This first object lists all constructor arguments with their types, but omits constraints for conditional hyperparameters.",
"type": "object",
"additionalProperties": False,
"required": ["columns"],
"relevantToOptimizer": [],
"properties": {
"columns": _hparams_column_index_list(
description="List of indices to select numpy columns."
),
},
}
]
}
_input_fit_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": { # Handles 1-D arrays as well
"anyOf": [
{"type": "array", "items": {"laleType": "Any"}},
{
"type": "array",
"items": {"type": "array", "items": {"laleType": "Any"}},
},
]
},
"y": {"laleType": "Any"},
},
}
_input_transform_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": { # Handles 1-D arrays as well
"anyOf": [
{"type": "array", "items": {"laleType": "Any"}},
{
"type": "array",
"items": {"type": "array", "items": {"laleType": "Any"}},
},
]
}
},
}
_output_transform_schema = {
"description": "Features; the outer array is over samples.",
"anyOf": [
{"type": "array", "items": {"laleType": "Any"}},
{"type": "array", "items": {"type": "array", "items": {"laleType": "Any"}}},
],
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """Operator from `autoai_libs`_. Selects a subset of columns of a numpy array.
.. _`autoai_libs`: https://pypi.org/project/autoai-libs""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.autoai_libs.numpy_column_selector.html",
"import_from": "autoai_libs.transformers.exportable",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
NumpyColumnSelector = lale.operators.make_operator(
_NumpyColumnSelectorImpl, _combined_schemas
)
lale.docstrings.set_docstrings(NumpyColumnSelector)
| 3,867 | 29.944 | 152 |
py
|
lale
|
lale-master/lale/lib/autoai_libs/text_transformer.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import autoai_libs.transformers.text_transformers
import lale.docstrings
import lale.operators
from ._common_schemas import _hparam_activate_flag_features, _hparam_column_headers_list
# This is currently needed just to hide get_params so that lale does not call clone
# when doing a defensive copy
class _TextTransformerImpl:
def __init__(self, **hyperparams):
if hyperparams.get("column_headers_list", None) is None:
hyperparams["column_headers_list"] = []
if hyperparams.get("columns_to_be_deleted", None) is None:
hyperparams["columns_to_be_deleted"] = []
self._wrapped_model = (
autoai_libs.transformers.text_transformers.TextTransformer(**hyperparams)
)
def fit(self, X, y=None):
self._wrapped_model.fit(X, y)
return self
def transform(self, X):
return self._wrapped_model.transform(X)
_hyperparams_schema = {
"allOf": [
{
"description": """TextTransformer is a transformer that converts text columns to numeric features.""",
"type": "object",
"additionalProperties": False,
"required": [
"text_processing_options",
"column_headers_list",
"drop_columns",
"min_num_words",
"columns_to_be_deleted",
"text_columns",
"activate_flag",
],
"relevantToOptimizer": [],
"properties": {
"text_processing_options": {
"description": """A map of the transformers to be applied and the hyper parameters of the transformers.
{TextTransformersList.word2vec:{'output_dim':vocab_size}}""",
"type": "object",
"default": {},
},
"column_headers_list": _hparam_column_headers_list(
description="""The list of columns generated by autoai's processing.
The column headers of the generated features will be appended to this and returned."""
),
"drop_columns": {
"description": "If the original text columns need to be dropped.",
"type": "boolean",
"default": False,
},
"min_num_words": {
"description": "The minimum numbers of words a column must have in order to be considered as a text column.",
"type": "integer",
"default": 3,
},
"columns_to_be_deleted": _hparam_column_headers_list(
description="List of columns to be deleted."
),
"text_columns": _hparam_column_headers_list(
description="If text columns are sent, then text detection is not done again."
),
"activate_flag": _hparam_activate_flag_features,
},
}
]
}
_input_fit_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": { # Handles 1-D arrays as well
"anyOf": [
{"type": "array", "items": {"laleType": "Any"}},
{
"type": "array",
"items": {"type": "array", "items": {"laleType": "Any"}},
},
]
},
"y": {"laleType": "Any"},
},
}
_input_transform_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": { # Handles 1-D arrays as well
"anyOf": [
{"type": "array", "items": {"laleType": "Any"}},
{
"type": "array",
"items": {"type": "array", "items": {"laleType": "Any"}},
},
]
}
},
}
_output_transform_schema = {
"description": "Features; the outer array is over samples.",
"anyOf": [
{"type": "array", "items": {"laleType": "Any"}},
{"type": "array", "items": {"type": "array", "items": {"laleType": "Any"}}},
],
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """Operator from `autoai_libs`_. Converts text columns to numeric features using a combination of word2vec and SVD.
.. _`autoai_libs`: https://pypi.org/project/autoai-libs""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.autoai_libs.word2vec_transformer.html",
"import_from": "autoai_libs.transformers.text_transformers",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
TextTransformer = lale.operators.make_operator(_TextTransformerImpl, _combined_schemas)
lale.docstrings.set_docstrings(TextTransformer)
| 5,654 | 35.483871 | 134 |
py
|
lale
|
lale-master/lale/lib/autoai_libs/boolean2float.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import autoai_libs.transformers.exportable
import numpy as np
import pandas as pd
import lale.datasets.data_schemas
import lale.docstrings
import lale.operators
from ._common_schemas import _hparam_activate_flag_unmodified
class _boolean2floatImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = autoai_libs.transformers.exportable.boolean2float(
**hyperparams
)
def fit(self, X, y=None):
self._wrapped_model.fit(X, y)
return self
def transform(self, X):
raw = self._wrapped_model.transform(X)
if isinstance(raw, (np.ndarray, pd.DataFrame)):
s_X = lale.datasets.data_schemas.to_schema(X)
s_result = self.transform_schema(s_X)
result = lale.datasets.data_schemas.add_schema(raw, s_result, recalc=True)
else:
result = raw
return result
def transform_schema(self, s_X):
"""Used internally by Lale for type-checking downstream operators."""
if self._hyperparams["activate_flag"]:
result = {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
else:
result = s_X
return result
_hyperparams_schema = {
"allOf": [
{
"description": "This first object lists all constructor arguments with their types, but omits constraints for conditional hyperparameters.",
"type": "object",
"additionalProperties": False,
"required": ["activate_flag"],
"relevantToOptimizer": [],
"properties": {"activate_flag": _hparam_activate_flag_unmodified},
}
]
}
_input_fit_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": { # Handles 1-D arrays as well
"anyOf": [
{"type": "array", "items": {"laleType": "Any"}},
{
"type": "array",
"items": {"type": "array", "items": {"laleType": "Any"}},
},
]
},
"y": {"laleType": "Any"},
},
}
_input_transform_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": { # Handles 1-D arrays as well
"anyOf": [
{"type": "array", "items": {"laleType": "Any"}},
{
"type": "array",
"items": {"type": "array", "items": {"laleType": "Any"}},
},
]
}
},
}
_output_transform_schema = {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {"type": "array", "items": {"laleType": "Any"}},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """Operator from `autoai_libs`_. Converts strings that represent booleans to floats and replaces missing values with np.nan.
.. _`autoai_libs`: https://pypi.org/project/autoai-libs""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.autoai_libs.boolean2float.html",
"import_from": "autoai_libs.transformers.exportable",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
boolean2float = lale.operators.make_operator(_boolean2floatImpl, _combined_schemas)
lale.docstrings.set_docstrings(boolean2float)
| 4,321 | 31.014815 | 152 |
py
|
lale
|
lale-master/lale/lib/autoai_libs/float_str2_float.py
|
# Copyright 2020-2023 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import autoai_libs.transformers.exportable
import lale.docstrings
import lale.operators
from ._common_schemas import _hparam_activate_flag_unmodified, _hparam_dtypes_list
class _FloatStr2FloatImpl:
def __init__(self, **hyperparams):
self._wrapped_model = autoai_libs.transformers.exportable.FloatStr2Float(
**hyperparams
)
def fit(self, X, y=None):
self._wrapped_model.fit(X, y)
return self
def transform(self, X):
return self._wrapped_model.transform(X)
_hyperparams_schema = {
"allOf": [
{
"description": "This first object lists all constructor arguments with their types, but omits constraints for conditional hyperparameters.",
"type": "object",
"additionalProperties": False,
"required": [
"dtypes_list",
"missing_values_reference_list",
"activate_flag",
],
"relevantToOptimizer": [],
"properties": {
"dtypes_list": _hparam_dtypes_list,
"missing_values_reference_list": {
"anyOf": [
{
"description": "Reference list of missing values in the input numpy array X.",
"type": "array",
"items": {"laleType": "Any"},
},
{
"description": "If None, default to ``['?', '', '-', np.nan]``.",
"enum": [None],
},
],
"default": None,
},
"activate_flag": _hparam_activate_flag_unmodified,
},
}
]
}
_input_fit_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": { # Handles 1-D arrays as well
"anyOf": [
{"type": "array", "items": {"laleType": "Any"}},
{
"type": "array",
"items": {"type": "array", "items": {"laleType": "Any"}},
},
]
},
"y": {"laleType": "Any"},
},
}
_input_transform_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": { # Handles 1-D arrays as well
"anyOf": [
{"type": "array", "items": {"laleType": "Any"}},
{
"type": "array",
"items": {"type": "array", "items": {"laleType": "Any"}},
},
]
}
},
}
_output_transform_schema = {
"description": "Features; the outer array is over samples.",
"anyOf": [
{"type": "array", "items": {"laleType": "Any"}},
{"type": "array", "items": {"type": "array", "items": {"laleType": "Any"}}},
],
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """Operator from `autoai_libs`_. Replaces columns of strings that represent floats (type ``float_str`` in dtypes_list) to columns of floats and replaces their missing values with np.nan.
.. _`autoai_libs`: https://pypi.org/project/autoai-libs""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.autoai_libs.float_str2_float.html",
"import_from": "autoai_libs.transformers.exportable",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
FloatStr2Float = lale.operators.make_operator(_FloatStr2FloatImpl, _combined_schemas)
lale.docstrings.set_docstrings(FloatStr2Float)
| 4,515 | 32.451852 | 205 |
py
|
lale
|
lale-master/lale/lib/autoai_libs/_common_schemas.py
|
# Copyright 2023 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file contains common schema fragments used in the autoai_libs schemas
"""
from typing import Any, Dict
JSON_TYPE = Dict[str, Any]
# activate_flag
_hparam_activate_flag_unmodified: JSON_TYPE = {
"description": "If False, transform(X) outputs the input numpy array X unmodified.",
"type": "boolean",
"default": True,
}
_hparam_activate_flag_features: JSON_TYPE = {
"description": "If False, the features are not generated.",
"type": "boolean",
"default": True,
}
_hparam_activate_flag_active: JSON_TYPE = {
"description": "Determines whether transformer is active or not.",
"type": "boolean",
"default": True,
}
_hparam_col_dtypes: JSON_TYPE = {
"description": "List of the datatypes of the feature columns.",
"anyOf": [
{"type": "array", "items": {"laleType": "Any"}},
{"enum": [None]},
],
"default": None,
}
_hparam_dtypes_list: JSON_TYPE = {
"anyOf": [
{
"description": "Strings that denote the type of each column of the input numpy array X.",
"type": "array",
"items": {
"enum": [
"char_str",
"int_str",
"float_str",
"float_num",
"float_int_num",
"int_num",
"boolean",
"Unknown",
"missing",
]
},
},
{
"description": "If None, the column types are discovered.",
"enum": [None],
},
],
"transient": "alwaysPrint", # since positional argument
"default": None,
}
_hparam_sklearn_version_family: JSON_TYPE = {
"description": "The sklearn version for backward compatibility with versions 019 and 020dev. Currently unused.",
"enum": ["20", "21", "22", "23", "24", None, "1"],
"default": None,
}
def _hparam_column_headers_list(description: str) -> JSON_TYPE:
return {
"description": description,
"anyOf": [
{"type": "array", "items": {"type": "string"}},
{"type": "array", "items": {"type": "integer"}},
{"enum": [None]},
],
"default": None,
}
_hparam_fs_cols_ids_must_keep: JSON_TYPE = {
"description": "Serial numbers of the columns that must be kept irrespective of their feature importance.",
"laleType": "Any", # Found a value `range(0, 20)`
"transient": "alwaysPrint", # since positional argument
"default": [],
}
_hparams_fs_additional_col_count_to_keep: JSON_TYPE = {
"description": "How many columns need to be retained.",
"type": "integer",
"transient": "alwaysPrint", # since positional argument
"minimum": 0,
}
_hparams_fs_ptype: JSON_TYPE = {
"description": "Problem type.",
"enum": ["classification", "regression"],
"transient": "alwaysPrint", # since positional argument
"default": "classification",
}
def _hparams_column_index_list(description: str) -> JSON_TYPE:
return {
"description": description,
"anyOf": [
{"type": "array", "items": {"type": "integer", "minimum": 0}},
{"enum": [None]},
],
"transient": "alwaysPrint", # since positional argument
"default": None,
}
_hparams_transformer_name: JSON_TYPE = {
"description": "A string name that uniquely identifies this transformer from others.",
"anyOf": [{"type": "string"}, {"enum": [None]}],
"transient": "alwaysPrint", # since positional argument
"default": None,
}
def _hparams_fun_pointer(description: str) -> JSON_TYPE:
return {
"description": description,
"laleType": "Any",
"transient": "alwaysPrint", # since positional argument
"default": None,
}
_hparams_datatype_spec: JSON_TYPE = {"type": "array", "items": {"type": "string"}}
def _hparams_datatypes(description: str) -> JSON_TYPE:
return {
"description": description,
"anyOf": [
_hparams_datatype_spec,
{"enum": [None]},
],
"transient": "alwaysPrint", # since positional argument
"default": None,
}
def _hparams_feat_constraints(description: str) -> JSON_TYPE:
return {
"description": description,
"laleType": "Any",
"transient": "alwaysPrint", # since positional argument
"default": None,
}
_hparams_tgraph: JSON_TYPE = {
"description": "Should be the invoking TGraph() object.",
"anyOf": [
{"laleType": "Any"},
{
"enum": [None],
"description": "Passing None will result in some failure to detect some inefficiencies due to lack of caching.",
},
],
"default": None,
}
_hparams_apply_all: JSON_TYPE = {
"description": "Only use applyAll = True. It means that the transformer will enumerate all features (or feature sets) that match the specified criteria and apply the provided function to each.",
"type": "boolean",
"default": True,
}
_hparams_col_names: JSON_TYPE = {
"description": "Names of the feature columns in a list.",
"anyOf": [
{"type": "array", "items": {"type": "string"}},
{"enum": [None]},
],
"default": None,
}
_hparams_col_as_json_objects: JSON_TYPE = {
"description": "Names of the feature columns in a json dict.",
"anyOf": [{"type": "object"}, {"enum": [None]}],
"default": None,
}
_hparams_tans_class: JSON_TYPE = {
"description": "A class that implements fit() and transform() in accordance with the transformation function definition.",
"laleType": "Any",
"transient": "alwaysPrint", # since positional argument
"default": None,
}
| 6,322 | 28.966825 | 198 |
py
|
lale
|
lale-master/lale/lib/autoai_libs/tam.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import autoai_libs.cognito.transforms.transform_utils
import lale.docstrings
import lale.operators
from ._common_schemas import (
_hparam_col_dtypes,
_hparams_apply_all,
_hparams_col_as_json_objects,
_hparams_col_names,
_hparams_tans_class,
_hparams_tgraph,
_hparams_transformer_name,
)
class _TAMImpl:
def __init__(self, **hyperparams):
self._wrapped_model = autoai_libs.cognito.transforms.transform_utils.TAM(
**hyperparams
)
def fit(self, X, y=None, **fit_params):
self._wrapped_model.fit(X, y, **fit_params)
return self
def transform(self, X):
result = self._wrapped_model.transform(X)
return result
_hyperparams_schema = {
"allOf": [
{
"description": "This first object lists all constructor arguments with their types, but omits constraints for conditional hyperparameters.",
"type": "object",
"additionalProperties": False,
"required": [
"tans_class",
"name",
"tgraph",
"apply_all",
"col_names",
"col_dtypes",
"col_as_json_objects",
],
"relevantToOptimizer": [],
"properties": {
"tans_class": _hparams_tans_class,
"name": _hparams_transformer_name,
"tgraph": _hparams_tgraph,
"apply_all": _hparams_apply_all,
"col_names": _hparams_col_names,
"col_dtypes": _hparam_col_dtypes,
"col_as_json_objects": _hparams_col_as_json_objects,
},
}
]
}
_input_fit_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"laleType": "Any"}},
},
"y": {"laleType": "Any"},
},
}
_input_transform_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {"type": "array", "items": {"type": "array", "items": {"laleType": "Any"}}}
},
}
_output_transform_schema = {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {"type": "array", "items": {"laleType": "Any"}},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """Operator from `autoai_libs`_. Feature transformation that applies at the data level, such as PCA.
.. _`autoai_libs`: https://pypi.org/project/autoai-libs""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.autoai_libs.tam.html",
"import_from": "autoai_libs.cognito.transforms.transform_utils",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
TAM = lale.operators.make_operator(_TAMImpl, _combined_schemas)
lale.docstrings.set_docstrings(TAM)
| 3,810 | 29.733871 | 152 |
py
|
lale
|
lale-master/lale/lib/autoai_libs/numpy_replace_missing_values.py
|
# Copyright 2020-2023 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import autoai_libs.transformers.exportable
import lale.docstrings
import lale.operators
class _NumpyReplaceMissingValuesImpl:
def __init__(self, **hyperparams):
self._wrapped_model = (
autoai_libs.transformers.exportable.NumpyReplaceMissingValues(**hyperparams)
)
def fit(self, X, y=None):
self._wrapped_model.fit(X, y)
return self
def transform(self, X):
return self._wrapped_model.transform(X)
_hyperparams_schema = {
"allOf": [
{
"description": "This first object lists all constructor arguments with their types, but omits constraints for conditional hyperparameters.",
"type": "object",
"additionalProperties": False,
"required": ["missing_values", "filling_values"],
"relevantToOptimizer": [],
"properties": {
"missing_values": {
"description": 'List of values considered as "missing" for the array.',
"anyOf": [
{"type": "array", "items": {"laleType": "Any"}},
{"enum": [None]},
],
"transient": "alwaysPrint", # since positional argument
"default": None,
},
"filling_values": {
"description": "Value to replace the missing values.",
"laleType": "Any",
"default": None,
},
},
}
]
}
_input_fit_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": { # Handles 1-D arrays as well
"anyOf": [
{"type": "array", "items": {"laleType": "Any"}},
{
"type": "array",
"items": {"type": "array", "items": {"laleType": "Any"}},
},
]
},
"y": {"laleType": "Any"},
},
}
_input_transform_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": { # Handles 1-D arrays as well
"anyOf": [
{"type": "array", "items": {"laleType": "Any"}},
{
"type": "array",
"items": {"type": "array", "items": {"laleType": "Any"}},
},
]
}
},
}
_output_transform_schema = {
"description": "Features; the outer array is over samples.",
"anyOf": [
{"type": "array", "items": {"laleType": "Any"}},
{"type": "array", "items": {"type": "array", "items": {"laleType": "Any"}}},
],
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """Operator from `autoai_libs`_. Given a numpy array and a reference list of missing values for it, replaces missing values with a special value (typically a special missing value such as np.nan).
.. _`autoai_libs`: https://pypi.org/project/autoai-libs""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.autoai_libs.numpy_replace_missing_values.html",
"import_from": "autoai_libs.transformers.exportable",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
NumpyReplaceMissingValues = lale.operators.make_operator(
_NumpyReplaceMissingValuesImpl, _combined_schemas
)
lale.docstrings.set_docstrings(NumpyReplaceMissingValues)
| 4,326 | 32.542636 | 215 |
py
|
lale
|
lale-master/lale/lib/autoai_libs/opt_standard_scaler.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import autoai_libs.transformers.exportable
import lale.docstrings
import lale.operators
class _OptStandardScalerImpl:
def __init__(self, **hyperparams):
self._wrapped_model = autoai_libs.transformers.exportable.OptStandardScaler(
**hyperparams
)
def fit(self, X, y=None):
self._wrapped_model.fit(X, y)
return self
def transform(self, X):
return self._wrapped_model.transform(X)
_hyperparams_schema = {
"allOf": [
{
"description": "This first object lists all constructor arguments with their types, but omits constraints for conditional hyperparameters.",
"type": "object",
"additionalProperties": False,
"required": [
"use_scaler_flag",
"num_scaler_copy",
"num_scaler_with_mean",
"num_scaler_with_std",
],
"relevantToOptimizer": [
"use_scaler_flag",
"num_scaler_with_mean",
"num_scaler_with_std",
],
"properties": {
"use_scaler_flag": {
"anyOf": [{"type": "boolean"}, {"enum": [None]}],
"default": True,
"description": "If False, return the input array unchanged.",
},
"num_scaler_copy": {
"anyOf": [{"type": "boolean"}, {"enum": [None]}],
"default": True,
"description": "If False, try to avoid a copy and do inplace scaling instead.",
},
"num_scaler_with_mean": {
"anyOf": [{"type": "boolean"}, {"enum": [None]}],
"default": True,
"description": "If True, center the data before scaling.",
},
"num_scaler_with_std": {
"anyOf": [{"type": "boolean"}, {"enum": [None]}],
"default": True,
"description": "If True, scale the data to unit variance (or equivalently, unit standard deviation).",
},
},
}
]
}
_input_fit_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": { # Handles 1-D arrays as well
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
]
},
"y": {},
},
}
_input_transform_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": { # Handles 1-D arrays as well
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
]
}
},
}
_output_transform_schema = {
"description": "Features; the outer array is over samples.",
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "array", "items": {"type": "number"}}},
],
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """Operator from `autoai_libs`_. Acts like an optional StandardScaler_.
.. _`autoai_libs`: https://pypi.org/project/autoai-libs
.. _StandardScaler: https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.autoai_libs.opt_standard_scaler.html",
"import_from": "autoai_libs.transformers.exportable",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
OptStandardScaler = lale.operators.make_operator(
_OptStandardScalerImpl, _combined_schemas
)
lale.docstrings.set_docstrings(OptStandardScaler)
| 4,898 | 32.786207 | 152 |
py
|
lale
|
lale-master/lale/lib/autoai_libs/date_transformer.py
|
# Copyright 2022 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
from autoai_libs.transformers.date_time.date_time_transformer import (
DateTransformer as model_to_be_wrapped,
)
import lale.docstrings
import lale.operators
from ._common_schemas import _hparam_activate_flag_active, _hparam_column_headers_list
class _DateTransformerImpl:
def __init__(self, **hyperparams):
self._wrapped_model = model_to_be_wrapped(**hyperparams)
def fit(self, X, y=None):
if isinstance(X, pd.DataFrame):
X = X.to_numpy()
self._wrapped_model.fit(X, y)
return self
def transform(self, X):
if isinstance(X, pd.DataFrame):
X = X.to_numpy()
return self._wrapped_model.transform(X)
_hyperparams_schema = {
"allOf": [
{
"description": "This first object lists all constructor arguments with their types, but omits constraints for conditional hyperparameters.",
"type": "object",
"additionalProperties": False,
"required": [
"options",
"delete_source_columns",
"column_headers_list",
"missing_values_reference_list",
"activate_flag",
"float32_processing_flag",
],
"relevantToOptimizer": [],
"properties": {
"options": {
"description": """List containing the types of new feature columns to add for each detected datetime column.
Default is None, in this case all the above options are applied""",
"anyOf": [
{
"type": "array",
"items": {
"enum": [
"all",
"Datetime",
"DateToFloatTimestamp",
"DateToTimestamp",
"Timestamp",
"FloatTimestamp",
"DayOfWeek",
"DayOfMonth",
"Hour",
"DayOfYear",
"Week",
"Month",
"Year",
"Second",
"Minute",
]
},
},
{"enum": [None]},
],
"default": None,
},
"delete_source_columns": {
"description": "Flag determining whether the original date columns will be deleted or not.",
"type": "boolean",
"default": True,
},
"column_headers_list": _hparam_column_headers_list(
description="List containing the column names of the input array"
),
"missing_values_reference_list": {
"description": "List containing missing values of the input array",
"anyOf": [
{"type": "array", "items": {"laleType": "Any"}},
{"enum": [None]},
],
"default": None,
},
"activate_flag": _hparam_activate_flag_active,
"float32_processing_flag": {
"description": "Flag that determines whether timestamps will be float32-compatible.",
"type": "boolean",
"default": True,
},
},
}
]
}
_input_fit_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": { # Handles 1-D arrays as well
"anyOf": [
{"type": "array", "items": {"laleType": "Any"}},
{
"type": "array",
"items": {"type": "array", "items": {"laleType": "Any"}},
},
]
},
"y": {"laleType": "Any"},
},
}
_input_transform_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": { # Handles 1-D arrays as well
"anyOf": [
{"type": "array", "items": {"laleType": "Any"}},
{
"type": "array",
"items": {"type": "array", "items": {"laleType": "Any"}},
},
]
}
},
}
_output_transform_schema = {
"description": "Features; the outer array is over samples.",
"anyOf": [
{"type": "array", "items": {"laleType": "Any"}},
{"type": "array", "items": {"type": "array", "items": {"laleType": "Any"}}},
],
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """Operator from `autoai_libs`_. Detects date columns on an input array and adds new feature columns for each detected date column.
.. _`autoai_libs`: https://pypi.org/project/autoai-libs""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.autoai_libs.date_transformer.html",
"import_from": "autoai_libs.transformers.date_time.date_time_transformer",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
DateTransformer = lale.operators.make_operator(_DateTransformerImpl, _combined_schemas)
lale.docstrings.set_docstrings(DateTransformer)
| 6,493 | 35.27933 | 152 |
py
|
lale
|
lale-master/lale/lib/autoai_libs/num_imputer.py
|
# Copyright 2020-2023 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import autoai_libs.transformers.exportable
import numpy as np
import lale.docstrings
import lale.operators
from ._common_schemas import (
_hparam_activate_flag_unmodified,
_hparam_sklearn_version_family,
)
class _NumImputerImpl:
def __init__(self, *args, **hyperparams):
self._wrapped_model = autoai_libs.transformers.exportable.NumImputer(
*args, **hyperparams
)
def fit(self, X, y=None, **fit_params):
self._wrapped_model.fit(X, y, **fit_params)
return self
def transform(self, X):
return self._wrapped_model.transform(X)
_hyperparams_schema = {
"allOf": [
{
"description": "This first object lists all constructor arguments with their types, but omits constraints for conditional hyperparameters.",
"type": "object",
"additionalProperties": False,
"required": ["strategy", "missing_values", "activate_flag"],
"relevantToOptimizer": ["strategy"],
"properties": {
"strategy": {
"description": "The imputation strategy.",
"enum": ["mean", "median", "most_frequent"],
"transient": "alwaysPrint", # since positional argument
"default": "mean",
},
"missing_values": {
"description": "The placeholder for the missing values. All occurrences of missing_values will be imputed.",
"anyOf": [
{"laleType": "Any"},
{
"description": "For missing values encoded as np.nan.",
"enum": [np.nan],
},
],
"transient": "alwaysPrint", # since positional argument
"default": np.nan,
},
"activate_flag": _hparam_activate_flag_unmodified,
},
}
]
}
_input_fit_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": { # Handles 1-D arrays as well
"anyOf": [
{"type": "array", "items": {"laleType": "Any"}},
{
"type": "array",
"items": {"type": "array", "items": {"laleType": "Any"}},
},
]
},
"y": {"laleType": "Any"},
},
}
_input_transform_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": { # Handles 1-D arrays as well
"anyOf": [
{"type": "array", "items": {"laleType": "Any"}},
{
"type": "array",
"items": {"type": "array", "items": {"laleType": "Any"}},
},
]
}
},
}
_output_transform_schema = {
"description": "Features; the outer array is over samples.",
"anyOf": [
{"type": "array", "items": {"laleType": "Any"}},
{"type": "array", "items": {"type": "array", "items": {"laleType": "Any"}}},
],
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """Operator from `autoai_libs`_. Missing value imputation for numerical features, currently internally uses the sklearn SimpleImputer_.
.. _`autoai_libs`: https://pypi.org/project/autoai-libs
.. _SimpleImputer: https://scikit-learn.org/0.20/modules/generated/sklearn.impute.SimpleImputer.html#sklearn-impute-simpleimputer""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.autoai_libs.num_imputer.html",
"import_from": "autoai_libs.transformers.exportable",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
NumImputer = lale.operators.make_operator(_NumImputerImpl, _combined_schemas)
autoai_libs_version_str = getattr(autoai_libs, "__version__", None)
if isinstance(autoai_libs_version_str, str): # beware sphinx _MockModule
import typing
from packaging import version
from lale.schemas import AnyOf, Array, Enum, Float, Not, Null, Object, String
autoai_libs_version = version.parse(autoai_libs_version_str)
if autoai_libs_version >= version.Version("1.12.18"):
NumImputer = typing.cast(
lale.operators.PlannedIndividualOp,
NumImputer.customize_schema(
set_as_available=True,
constraint=[
AnyOf(
desc="fill_value and fill_values cannot both be specified",
forOptimizer=False,
types=[Object(fill_value=Null()), Object(fill_values=Null())],
),
AnyOf(
desc="if strategy=constants, the fill_values cannot be None",
forOptimizer=False,
types=[
Object(strategy=Not(Enum(["constants"]))),
Not(Object(fill_values=Null())),
],
),
],
fill_value=AnyOf(
types=[Float(), String(), Enum(values=[np.nan]), Null()],
desc="The placeholder for fill value used in constant strategy",
default=None,
),
fill_values=AnyOf(
types=[
Array(
items=AnyOf(
types=[Float(), String(), Enum(values=[np.nan]), Null()]
)
),
Null(),
],
desc="The placeholder for fill values used in constants strategy",
default=None,
),
missing_values=AnyOf(
types=[Float(), String(), Enum(values=[np.nan]), Null()],
desc="The placeholder for the missing values. All occurrences of missing_values will be imputed.",
default=np.nan,
),
sklearn_version_family=_hparam_sklearn_version_family,
strategy=AnyOf(
types=[
Enum(
values=["mean"],
desc="Replace using the mean along each column. Can only be used with numeric data.",
),
Enum(
values=["median"],
desc="Replace using the median along each column. Can only be used with numeric data.",
),
Enum(
values=["most_frequent"],
desc="Replace using most frequent value each column. Used with strings or numeric data.",
),
Enum(
values=["constant"],
desc="Replace with fill_value. Can be used with strings or numeric data.",
),
Enum(
values=["constants"],
desc="Replace missing values in columns with values in fill_values list. Can be used with list of strings or numeric data.",
),
],
desc="The imputation strategy.",
default="mean",
),
),
)
lale.docstrings.set_docstrings(NumImputer)
| 8,477 | 37.536364 | 154 |
py
|
lale
|
lale-master/lale/lib/autoai_libs/compress_strings.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import autoai_libs.transformers.exportable
import lale.docstrings
import lale.operators
from ._common_schemas import _hparam_activate_flag_unmodified, _hparam_dtypes_list
class _CompressStringsImpl:
def __init__(self, **hyperparams):
self._wrapped_model = autoai_libs.transformers.exportable.CompressStrings(
**hyperparams
)
def fit(self, X, y=None):
self._wrapped_model.fit(X, y)
return self
def transform(self, X):
return self._wrapped_model.transform(X)
_hyperparams_schema = {
"allOf": [
{
"description": "This first object lists all constructor arguments with their types, but omits constraints for conditional hyperparameters.",
"type": "object",
"additionalProperties": False,
"required": [
"compress_type",
"dtypes_list",
"misslist_list",
"missing_values_reference_list",
"activate_flag",
],
"relevantToOptimizer": ["compress_type", "activate_flag"],
"properties": {
"compress_type": {
"description": "Type of string compression: `string` for removing spaces from a string and `hash` for creating an int hash, used when there are columns with strings and cat_imp_strategy=`most_frequent`.",
"enum": ["string", "hash"],
"default": "string",
},
"dtypes_list": _hparam_dtypes_list,
"misslist_list": {
"anyOf": [
{
"description": "List containing lists of missing values of each column of the input numpy array X.",
"type": "array",
"items": {"type": "array", "items": {"laleType": "Any"}},
},
{
"description": "If None, the missing values of each column are discovered.",
"enum": [None],
},
],
"default": None,
},
"missing_values_reference_list": {
"anyOf": [
{
"description": "Reference list of missing values in the input numpy array X.",
"type": "array",
"items": {"laleType": "Any"},
},
{
"description": "If None, the missing values of each column are discovered.",
"enum": [None],
},
],
"default": None,
},
"activate_flag": _hparam_activate_flag_unmodified,
},
}
]
}
_input_fit_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": { # Handles 1-D arrays as well
"anyOf": [
{"type": "array", "items": {"laleType": "Any"}},
{
"type": "array",
"items": {"type": "array", "items": {"laleType": "Any"}},
},
]
},
"y": {"laleType": "Any"},
},
}
_input_transform_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": { # Handles 1-D arrays as well
"anyOf": [
{"type": "array", "items": {"laleType": "Any"}},
{
"type": "array",
"items": {"type": "array", "items": {"laleType": "Any"}},
},
]
}
},
}
_output_transform_schema = {
"description": "Features; the outer array is over samples.",
"anyOf": [
{"type": "array", "items": {"laleType": "Any"}},
{"type": "array", "items": {"type": "array", "items": {"laleType": "Any"}}},
],
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """Operator from `autoai_libs`_. Removes spaces and special characters from string columns of a numpy array.
.. _`autoai_libs`: https://pypi.org/project/autoai-libs""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.autoai_libs.compress_strings.html",
"import_from": "autoai_libs.transformers.exportable",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
CompressStrings = lale.operators.make_operator(_CompressStringsImpl, _combined_schemas)
lale.docstrings.set_docstrings(CompressStrings)
| 5,574 | 34.737179 | 224 |
py
|
lale
|
lale-master/lale/lib/autoai_libs/fs1.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import autoai_libs.cognito.transforms.transform_utils
import lale.docstrings
import lale.operators
from ._common_schemas import (
_hparam_fs_cols_ids_must_keep,
_hparams_fs_additional_col_count_to_keep,
_hparams_fs_ptype,
)
class _FS1Impl:
def __init__(self, **hyperparams):
self._wrapped_model = autoai_libs.cognito.transforms.transform_utils.FS1(
**hyperparams
)
def fit(self, X, y=None, **fit_params):
self._wrapped_model.fit(X, y)
return self
def transform(self, X):
result = self._wrapped_model.transform(X)
try:
if hasattr(self, "column_names") and len(self.column_names) == len(
self._wrapped_model.cols_to_keep_final_
):
self.column_names = [
self.column_names[i]
for i in self._wrapped_model.cols_to_keep_final_
]
if hasattr(self, "column_dtypes") and len(self.column_dtypes) == len(
self._wrapped_model.cols_to_keep_final_
):
self.column_dtypes = [
self.column_dtypes[i]
for i in self._wrapped_model.cols_to_keep_final_
]
except Exception: # nosec
pass
return result
def set_meta_data(self, meta_data_dict):
if "column_names" in meta_data_dict.keys():
self.column_names = meta_data_dict["column_names"]
if "column_dtypes" in meta_data_dict.keys():
self.column_dtypes = meta_data_dict["column_dtypes"]
def get_transform_meta_output(self):
return_dict = {}
if hasattr(self, "column_names"):
return_dict["column_names"] = self.column_names
if hasattr(self, "column_dtypes"):
return_dict["column_dtypes"] = self.column_dtypes
return return_dict
_hyperparams_schema = {
"allOf": [
{
"description": "This first object lists all constructor arguments with their types, but omits constraints for conditional hyperparameters.",
"type": "object",
"additionalProperties": False,
"required": ["cols_ids_must_keep", "additional_col_count_to_keep", "ptype"],
"relevantToOptimizer": [],
"properties": {
"cols_ids_must_keep": _hparam_fs_cols_ids_must_keep,
"additional_col_count_to_keep": _hparams_fs_additional_col_count_to_keep,
"ptype": _hparams_fs_ptype,
},
}
]
}
_input_fit_schema = {
"type": "object",
"required": ["X", "y"],
"additionalProperties": False,
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"laleType": "Any"}},
},
"y": {
"type": "array",
"items": {"laleType": "Any"},
"description": "Target values.",
},
},
}
_input_transform_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {"type": "array", "items": {"type": "array", "items": {"laleType": "Any"}}}
},
}
_output_transform_schema = {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {"type": "array", "items": {"laleType": "Any"}},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """Operator from `autoai_libs`_. Feature selection, type 1 (using pairwise correlation between each feature and target.)
.. _`autoai_libs`: https://pypi.org/project/autoai-libs""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.autoai_libs.fs1.html",
"import_from": "autoai_libs.cognito.transforms.transform_utils",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
FS1 = lale.operators.make_operator(_FS1Impl, _combined_schemas)
lale.docstrings.set_docstrings(FS1)
| 4,815 | 32.678322 | 152 |
py
|
lale
|
lale-master/lale/lib/autoai_libs/ta1.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import autoai_libs.cognito.transforms.transform_utils
import numpy as np
import lale.docstrings
import lale.operators
from ._common_schemas import (
_hparam_col_dtypes,
_hparams_apply_all,
_hparams_col_as_json_objects,
_hparams_col_names,
_hparams_datatypes,
_hparams_feat_constraints,
_hparams_fun_pointer,
_hparams_tgraph,
_hparams_transformer_name,
)
class _TA1Impl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = autoai_libs.cognito.transforms.transform_utils.TA1(
**hyperparams
)
def fit(self, X, y=None, **fit_params):
num_columns = X.shape[1]
col_dtypes = self._hyperparams["col_dtypes"]
if len(col_dtypes) < num_columns:
if hasattr(self, "column_names") and len(self.column_names) == num_columns:
col_names = self.column_names
else:
col_names = self._hyperparams["col_names"]
for i in range(num_columns - len(col_dtypes)):
col_names.append("col" + str(i))
if (
hasattr(self, "column_dtypes")
and len(self.column_dtypes) == num_columns
):
col_dtypes = self.column_dtypes
else:
for i in range(num_columns - len(col_dtypes)):
col_dtypes.append(np.float32)
fit_params["col_names"] = col_names
fit_params["col_dtypes"] = col_dtypes
self._wrapped_model.fit(X, y, **fit_params)
return self
def transform(self, X):
result = self._wrapped_model.transform(X)
return result
def get_transform_meta_output(self):
return_meta_data_dict = {}
if self._wrapped_model.new_column_names_ is not None:
final_column_names = []
final_column_names.extend(self._wrapped_model.col_names_)
final_column_names.extend(self._wrapped_model.new_column_names_)
return_meta_data_dict["column_names"] = final_column_names
if self._wrapped_model.new_column_dtypes_ is not None:
final_column_dtypes = []
final_column_dtypes.extend(self._wrapped_model.col_dtypes)
final_column_dtypes.extend(self._wrapped_model.new_column_dtypes_)
return_meta_data_dict["column_dtypes"] = final_column_dtypes
return return_meta_data_dict
def set_meta_data(self, meta_data_dict):
if "column_names" in meta_data_dict.keys():
self.column_names = meta_data_dict["column_names"]
if "column_dtypes" in meta_data_dict.keys():
self.column_dtypes = meta_data_dict["column_dtypes"]
_hyperparams_schema = {
"allOf": [
{
"description": "This first object lists all constructor arguments with their types, but omits constraints for conditional hyperparameters.",
"type": "object",
"additionalProperties": False,
"required": [
"fun",
"name",
"datatypes",
"feat_constraints",
"tgraph",
"apply_all",
"col_names",
"col_dtypes",
"col_as_json_objects",
],
"relevantToOptimizer": [],
"properties": {
"fun": _hparams_fun_pointer(description="The function pointer."),
"name": _hparams_transformer_name,
"datatypes": _hparams_datatypes(
description="List of datatypes that are valid input to the transformer function (`numeric`, `float`, `int`, `integer`)."
),
"feat_constraints": _hparams_feat_constraints(
description="All constraints that must be satisfied by a column to be considered a valid input to this transform."
),
"tgraph": _hparams_tgraph,
"apply_all": _hparams_apply_all,
"col_names": _hparams_col_names,
"col_dtypes": _hparam_col_dtypes,
"col_as_json_objects": _hparams_col_as_json_objects,
},
}
]
}
_input_fit_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"laleType": "Any"}},
},
"y": {"laleType": "Any"},
},
}
_input_transform_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {"type": "array", "items": {"type": "array", "items": {"laleType": "Any"}}}
},
}
_output_transform_schema = {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {"type": "array", "items": {"laleType": "Any"}},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """Operator from `autoai_libs`_. Feature transformation for unary stateless functions, such as square, log, etc.
.. _`autoai_libs`: https://pypi.org/project/autoai-libs""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.autoai_libs.ta1.html",
"import_from": "autoai_libs.cognito.transforms.transform_utils",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
TA1 = lale.operators.make_operator(_TA1Impl, _combined_schemas)
lale.docstrings.set_docstrings(TA1)
| 6,356 | 34.915254 | 152 |
py
|
lale
|
lale-master/lale/lib/autoai_libs/tb1.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import autoai_libs.cognito.transforms.transform_utils
import lale.docstrings
import lale.operators
from ._common_schemas import (
_hparam_col_dtypes,
_hparams_apply_all,
_hparams_col_as_json_objects,
_hparams_col_names,
_hparams_datatypes,
_hparams_feat_constraints,
_hparams_tans_class,
_hparams_tgraph,
_hparams_transformer_name,
)
class _TB1Impl:
def __init__(self, **hyperparams):
self._wrapped_model = autoai_libs.cognito.transforms.transform_utils.TB1(
**hyperparams
)
def fit(self, X, y=None, **fit_params):
self._wrapped_model.fit(X, y, **fit_params)
return self
def transform(self, X):
result = self._wrapped_model.transform(X)
return result
_hyperparams_schema = {
"allOf": [
{
"description": "This first object lists all constructor arguments with their types, but omits constraints for conditional hyperparameters.",
"type": "object",
"additionalProperties": False,
"required": [
"tans_class",
"name",
"datatypes",
"feat_constraints",
"tgraph",
"apply_all",
"col_names",
"col_dtypes",
"col_as_json_objects",
],
"relevantToOptimizer": [],
"properties": {
"tans_class": _hparams_tans_class,
"name": _hparams_transformer_name,
"datatypes": _hparams_datatypes(
description="List of datatypes that are valid input to the transformer function (numeric, float, int, etc.)."
),
"feat_constraints": _hparams_feat_constraints(
description="All constraints that must be satisfied by a column to be considered a valid input to this transform."
),
"tgraph": _hparams_tgraph,
"apply_all": _hparams_apply_all,
"col_names": _hparams_col_names,
"col_dtypes": _hparam_col_dtypes,
"col_as_json_objects": _hparams_col_as_json_objects,
},
}
]
}
_input_fit_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"laleType": "Any"}},
},
"y": {"laleType": "Any"},
},
}
_input_transform_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {"type": "array", "items": {"type": "array", "items": {"laleType": "Any"}}}
},
}
_output_transform_schema = {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {"type": "array", "items": {"laleType": "Any"}},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """Operator from `autoai_libs`_. Feature transformation for unary state-based transformations (with fit/transform), such as frequent count.
.. _`autoai_libs`: https://pypi.org/project/autoai-libs""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.autoai_libs.tb1.html",
"import_from": "autoai_libs.cognito.transforms.transform_utils",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
TB1 = lale.operators.make_operator(_TB1Impl, _combined_schemas)
lale.docstrings.set_docstrings(TB1)
| 4,384 | 31.723881 | 158 |
py
|
lale
|
lale-master/lale/lib/autoai_libs/numpy_replace_unknown_values.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import autoai_libs.transformers.exportable
import lale.docstrings
import lale.operators
class _NumpyReplaceUnknownValuesImpl:
def __init__(self, **hyperparams):
self._wrapped_model = (
autoai_libs.transformers.exportable.NumpyReplaceUnknownValues(**hyperparams)
)
def fit(self, X, y=None):
self._wrapped_model.fit(X, y)
return self
def transform(self, X):
return self._wrapped_model.transform(X)
_hyperparams_schema = {
"allOf": [
{
"description": "This first object lists all constructor arguments with their types, but omits constraints for conditional hyperparameters.",
"type": "object",
"additionalProperties": False,
"required": [
"known_values_list",
"filling_values",
"missing_values_reference_list",
"filling_values_list",
],
"relevantToOptimizer": [],
"properties": {
"known_values_list": {
"description": "Reference list of lists of known values for each column.",
"anyOf": [
{"type": "array", "items": {"laleType": "Any"}},
{"enum": [None]},
],
"transient": True,
"default": None,
},
"filling_values": {
"description": "Special value assigned to unknown values.",
"laleType": "Any",
"default": None,
},
"missing_values_reference_list": {
"description": "Reference list of missing values.",
"anyOf": [
{"type": "array", "items": {"laleType": "Any"}},
{"enum": [None]},
],
"default": None,
},
"filling_values_list": {
"description": "list of special value assigned to unknown values.",
"anyOf": [
{"type": "array", "items": {"laleType": "Any"}},
{"enum": [None]},
],
"default": None,
},
},
}
]
}
_input_fit_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": { # Handles 1-D arrays as well
"anyOf": [
{"type": "array", "items": {"laleType": "Any"}},
{
"type": "array",
"items": {"type": "array", "items": {"laleType": "Any"}},
},
]
},
"y": {"laleType": "Any"},
},
}
_input_transform_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": { # Handles 1-D arrays as well
"anyOf": [
{"type": "array", "items": {"laleType": "Any"}},
{
"type": "array",
"items": {"type": "array", "items": {"laleType": "Any"}},
},
]
}
},
}
_output_transform_schema = {
"description": "Features; the outer array is over samples.",
"anyOf": [
{"type": "array", "items": {"laleType": "Any"}},
{"type": "array", "items": {"type": "array", "items": {"laleType": "Any"}}},
],
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """Operator from `autoai_libs`_. Given a numpy array and a reference list of known values for each column, replaces values that are not part of a reference list with a special value (typically np.nan). This is typically used to remove labels for columns in a test dataset that have not been seen in the corresponding columns of the training dataset.
.. _`autoai_libs`: https://pypi.org/project/autoai-libs""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.autoai_libs.numpy_replace_unknown_values.html",
"import_from": "autoai_libs.transformers.exportable",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
NumpyReplaceUnknownValues = lale.operators.make_operator(
_NumpyReplaceUnknownValuesImpl, _combined_schemas
)
lale.docstrings.set_docstrings(NumpyReplaceUnknownValues)
| 5,287 | 34.253333 | 368 |
py
|
lale
|
lale-master/lale/lib/autoai_libs/util.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import lale.lib.sklearn
def wrap_pipeline_segments(orig_pipeline):
"""Wrap segments of the pipeline to mark them for pretty_print() and visualize().
If the pipeline does not look like it came from AutoAI, just return it
unchanged. Otherwise, find the NumpyPermuteArray operator. Everything
before that operator is preprocessing. Everything after
NumpyPermuteArray but before the final estimator is feature
engineering."""
from lale.lib.autoai_libs.numpy_permute_array import NumpyPermuteArray
if len(orig_pipeline.steps_list()) <= 2:
return orig_pipeline
estimator = orig_pipeline.get_last()
prep = orig_pipeline.remove_last()
cognito = None
PREP_END = NumpyPermuteArray.class_name()
while True:
last = prep.get_last()
if last is None or not last.class_name().startswith("lale.lib.autoai_libs."):
return orig_pipeline
if last.class_name() == PREP_END:
break
prep = prep.remove_last()
if cognito is None:
cognito = last
else:
cognito = last >> cognito
prep_wrapped = lale.lib.sklearn.Pipeline(steps=[("preprocessing_pipeline", prep)])
if cognito is None:
result = prep_wrapped >> estimator
else:
cognito_wrapped = lale.lib.sklearn.Pipeline(
steps=[("feature_engineering_pipeline", cognito)]
)
result = prep_wrapped >> cognito_wrapped >> estimator
return result
| 2,056 | 37.092593 | 86 |
py
|
lale
|
lale-master/lale/lib/autoai_libs/column_selector.py
|
# Copyright 2021 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import autoai_libs.transformers.exportable
import pandas as pd
import lale.docstrings
import lale.operators
from ._common_schemas import _hparam_activate_flag_active, _hparams_column_index_list
class _ColumnSelectorImpl:
def __init__(self, **hyperparams):
self._wrapped_model = autoai_libs.transformers.exportable.ColumnSelector(
**hyperparams
)
def fit(self, X, y=None):
if isinstance(X, pd.DataFrame):
X = X.to_numpy()
self._wrapped_model.fit(X, y)
return self
def transform(self, X):
if isinstance(X, pd.DataFrame):
X = X.to_numpy()
return self._wrapped_model.transform(X)
_hyperparams_schema = {
"allOf": [
{
"description": "This first object lists all constructor arguments with their types, but omits constraints for conditional hyperparameters.",
"type": "object",
"additionalProperties": False,
"required": ["columns_indices_list", "activate_flag"],
"relevantToOptimizer": [],
"properties": {
"columns_indices_list": _hparams_column_index_list(
description="List of indices to select numpy columns or list elements."
),
"activate_flag": _hparam_activate_flag_active,
},
}
]
}
_input_fit_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": { # Handles 1-D arrays as well
"anyOf": [
{"type": "array", "items": {"laleType": "Any"}},
{
"type": "array",
"items": {"type": "array", "items": {"laleType": "Any"}},
},
]
},
"y": {"laleType": "Any"},
},
}
_input_transform_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": { # Handles 1-D arrays as well
"anyOf": [
{"type": "array", "items": {"laleType": "Any"}},
{
"type": "array",
"items": {"type": "array", "items": {"laleType": "Any"}},
},
]
}
},
}
_output_transform_schema = {
"description": "Features; the outer array is over samples.",
"anyOf": [
{"type": "array", "items": {"laleType": "Any"}},
{"type": "array", "items": {"type": "array", "items": {"laleType": "Any"}}},
],
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """Operator from `autoai_libs`_. Selects a subset of columns for a given numpy array or subset of elements of a list.
.. _`autoai_libs`: https://pypi.org/project/autoai-libs""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.autoai_libs.column_selector.html",
"import_from": "autoai_libs.transformers.exportable",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
ColumnSelector = lale.operators.make_operator(_ColumnSelectorImpl, _combined_schemas)
lale.docstrings.set_docstrings(ColumnSelector)
| 4,022 | 31.443548 | 152 |
py
|
lale
|
lale-master/lale/lib/autoai_libs/float32_transform.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import autoai_libs.transformers.exportable
import numpy as np
import pandas as pd
import lale.datasets.data_schemas
import lale.docstrings
import lale.operators
from ._common_schemas import _hparam_activate_flag_unmodified
class _float32_transformImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = autoai_libs.transformers.exportable.float32_transform(
**hyperparams
)
def fit(self, X, y=None):
self._wrapped_model.fit(X, y)
return self
def transform(self, X):
raw = self._wrapped_model.transform(X)
if isinstance(raw, (np.ndarray, pd.DataFrame)):
s_X = lale.datasets.data_schemas.to_schema(X)
s_result = self.transform_schema(s_X)
result = lale.datasets.data_schemas.add_schema(raw, s_result, recalc=True)
else:
result = raw
return result
def transform_schema(self, s_X):
"""Used internally by Lale for type-checking downstream operators."""
if self._hyperparams["activate_flag"]:
result = {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
else:
result = s_X
return result
_hyperparams_schema = {
"allOf": [
{
"description": "This first object lists all constructor arguments with their types, but omits constraints for conditional hyperparameters.",
"type": "object",
"additionalProperties": False,
"required": ["activate_flag"],
"relevantToOptimizer": [],
"properties": {"activate_flag": _hparam_activate_flag_unmodified},
}
]
}
_input_fit_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": { # Handles 1-D arrays as well
"anyOf": [
{"type": "array", "items": {"laleType": "Any"}},
{
"type": "array",
"items": {"type": "array", "items": {"laleType": "Any"}},
},
]
},
"y": {"laleType": "Any"},
},
}
_input_transform_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": { # Handles 1-D arrays as well
"anyOf": [
{"type": "array", "items": {"laleType": "Any"}},
{
"type": "array",
"items": {"type": "array", "items": {"laleType": "Any"}},
},
]
}
},
}
_output_transform_schema = {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {"type": "array", "items": {"laleType": "Any"}},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """Operator from `autoai_libs`_. Transforms a numpy array to float32.
.. _`autoai_libs`: https://pypi.org/project/autoai-libs""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.autoai_libs.float32_transform.html",
"import_from": "autoai_libs.transformers.exportable",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
float32_transform = lale.operators.make_operator(
_float32_transformImpl, _combined_schemas
)
lale.docstrings.set_docstrings(float32_transform)
| 4,295 | 30.588235 | 152 |
py
|
lale
|
lale-master/lale/lib/autoai_libs/__init__.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Schema-enhanced versions of the operators from `autoai_libs`_ to enable hyperparameter tuning.
.. _`autoai_libs`: https://pypi.org/project/autoai-libs
Operators
=========
Preprocessing any columns:
* lale.lib.autoai_libs. `ColumnSelector`_
* lale.lib.autoai_libs. `NumpyColumnSelector`_
* lale.lib.autoai_libs. `NumpyReplaceMissingValues`_
* lale.lib.autoai_libs. `float32_transform`_
* lale.lib.autoai_libs. `NumpyPermuteArray`_
Preprocessing categorical columns:
* lale.lib.autoai_libs. `CompressStrings`_
* lale.lib.autoai_libs. `NumpyReplaceUnknownValues`_
* lale.lib.autoai_libs. `boolean2float`_
* lale.lib.autoai_libs. `CatImputer`_
* lale.lib.autoai_libs. `CatEncoder`_
Preprocessing numeric columns:
* lale.lib.autoai_libs. `FloatStr2Float`_
* lale.lib.autoai_libs. `NumImputer`_
* lale.lib.autoai_libs. `OptStandardScaler`_
Preprocessing text columns:
* lale.lib.autoai_libs. `TextTransformer`_
* lale.lib.autoai_libs. `Word2VecTransformer`_
Preprocessing date columns:
* lale.lib.autoai_libs. `DateTransformer`_
Feature transformation:
* lale.lib.autoai_libs. `TNoOp`_
* lale.lib.autoai_libs. `TA1`_
* lale.lib.autoai_libs. `TA2`_
* lale.lib.autoai_libs. `TB1`_
* lale.lib.autoai_libs. `TB2`_
* lale.lib.autoai_libs. `TAM`_
* lale.lib.autoai_libs. `TGen`_
* lale.lib.autoai_libs. `FS1`_
* lale.lib.autoai_libs. `FS2`_
.. _`ColumnSelector`: lale.lib.autoai_libs.column_selector.html
.. _`NumpyColumnSelector`: lale.lib.autoai_libs.numpy_column_selector.html
.. _`CompressStrings`: lale.lib.autoai_libs.compress_strings.html
.. _`NumpyReplaceMissingValues`: lale.lib.autoai_libs.numpy_replace_missing_values.html
.. _`NumpyReplaceUnknownValues`: lale.lib.autoai_libs.numpy_replace_unknown_values.html
.. _`boolean2float`: lale.lib.autoai_libs.boolean2float.html
.. _`CatImputer`: lale.lib.autoai_libs.cat_imputer.html
.. _`CatEncoder`: lale.lib.autoai_libs.cat_encoder.html
.. _`float32_transform`: lale.lib.autoai_libs.float32_transform.html
.. _`FloatStr2Float`: lale.lib.autoai_libs.float_str2_float.html
.. _`NumImputer`: lale.lib.autoai_libs.num_imputer.html
.. _`OptStandardScaler`: lale.lib.autoai_libs.opt_standard_scaler.html
.. _`TextTransformer`: lale.lib.autoai_libs.text_transformer.html
.. _`Word2VecTransformer`: lale.lib.autoai_libs.word2vec_transformer.html
.. _`DateTransformer`: lale.lib.autoai_libs.date_transformer.html
.. _`NumpyPermuteArray`: lale.lib.autoai_libs.numpy_permute_array.html
.. _`TNoOp`: lale.lib.autoai_libs.t_no_op.html
.. _`TA1`: lale.lib.autoai_libs.ta1.html
.. _`TA2`: lale.lib.autoai_libs.ta2.html
.. _`TB1`: lale.lib.autoai_libs.tb1.html
.. _`TB2`: lale.lib.autoai_libs.tb2.html
.. _`TAM`: lale.lib.autoai_libs.tam.html
.. _`TGen`: lale.lib.autoai_libs.tgen.html
.. _`FS1`: lale.lib.autoai_libs.fs1.html
.. _`FS2`: lale.lib.autoai_libs.fs2.html
"""
from lale import register_lale_wrapper_modules
from .boolean2float import boolean2float as boolean2float
from .cat_encoder import CatEncoder as CatEncoder
from .cat_imputer import CatImputer as CatImputer
from .column_selector import ColumnSelector as ColumnSelector
from .compress_strings import CompressStrings as CompressStrings
from .date_transformer import DateTransformer as DateTransformer
from .float32_transform import float32_transform as float32_transform
from .float_str2_float import FloatStr2Float as FloatStr2Float
from .fs1 import FS1 as FS1
from .fs2 import FS2 as FS2
from .num_imputer import NumImputer as NumImputer
from .numpy_column_selector import NumpyColumnSelector as NumpyColumnSelector
from .numpy_permute_array import NumpyPermuteArray as NumpyPermuteArray
from .numpy_replace_missing_values import (
NumpyReplaceMissingValues as NumpyReplaceMissingValues,
)
from .numpy_replace_unknown_values import (
NumpyReplaceUnknownValues as NumpyReplaceUnknownValues,
)
from .opt_standard_scaler import OptStandardScaler as OptStandardScaler
from .t_no_op import TNoOp as TNoOp
from .ta1 import TA1 as TA1
from .ta2 import TA2 as TA2
from .tam import TAM as TAM
from .tb1 import TB1 as TB1
from .tb2 import TB2 as TB2
from .text_transformer import TextTransformer as TextTransformer
from .tgen import TGen as TGen
from .util import wrap_pipeline_segments as wrap_pipeline_segments
from .word2vec_transformer import Word2VecTransformer as Word2VecTransformer
# Note: all imports should be done as
# from .xxx import XXX as XXX
# this ensures that pyright considers them to be publicly available
# and not private imports (this affects lale users that use pyright)
register_lale_wrapper_modules(__name__)
| 5,133 | 37.601504 | 94 |
py
|
lale
|
lale-master/lale/lib/autoai_libs/cat_encoder.py
|
# Copyright 2020-2023 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import autoai_libs.transformers.exportable
import numpy as np
import lale.docstrings
import lale.operators
from ._common_schemas import (
_hparam_activate_flag_unmodified,
_hparam_sklearn_version_family,
)
class _CatEncoderImpl:
def __init__(self, encode_unknown_with="auto", **hyperparams):
self.encode_unknown_with = encode_unknown_with
self._wrapped_model = autoai_libs.transformers.exportable.CatEncoder(
**hyperparams
)
def fit(self, X, y=None):
self._wrapped_model.fit(X, y)
return self
def transform(self, X):
try:
return self._wrapped_model.transform(X)
except ValueError as e:
if self._wrapped_model.encoding == "ordinal":
if X.ndim == 1:
X = X.reshape(-1, 1)
(transformed_X, X_mask) = self._wrapped_model.encoder._transform(
X, handle_unknown="ignore"
)
# transformed_X is output with the encoding of the unknown category in column i set to be same
# as encoding of the first element in categories_[i] and X_mask is a boolean mask
# that indicates which values were unknown.
n_features = transformed_X.shape[1]
for i in range(n_features):
if self.encode_unknown_with == "auto":
transformed_X[:, i][~X_mask[:, i]] = len(
self._wrapped_model.encoder.categories_[i]
)
else:
transformed_X[:, i][~X_mask[:, i]] = self.encode_unknown_with
transformed_X[:, i] = transformed_X[:, i].astype(
self._wrapped_model.encoder.categories_[i].dtype
)
# Following lines are borrowed from CatEncoder as is:
if (
isinstance(transformed_X[0], np.ndarray)
and transformed_X[0].shape[0] == 1
):
# this is a numpy array whose elements are numpy arrays (arises from string targets)
transformed_X = np.concatenate(transformed_X).ravel()
if transformed_X.ndim > 1 and transformed_X.shape[1] == 1:
transformed_X = transformed_X.reshape(-1, 1)
transformed_X = transformed_X.reshape(transformed_X.shape[0], -1)
return transformed_X
else:
raise e
_hyperparams_schema = {
"allOf": [
{
"description": "This first object lists all constructor arguments with their types, but omits constraints for conditional hyperparameters.",
"type": "object",
"additionalProperties": False,
"required": [
"encoding",
"categories",
"dtype",
"handle_unknown",
"sklearn_version_family",
"activate_flag",
"encode_unknown_with",
],
"relevantToOptimizer": ["encoding"],
"properties": {
"encoding": {
"description": "The type of encoding to use.",
"enum": ["onehot", "ordinal"],
"transient": "alwaysPrint", # since positional argument
"default": "ordinal",
},
"categories": {
"description": "Categories (unique values) per feature.",
"anyOf": [
{
"description": "Determine categories automatically from training data.",
"enum": ["auto", None],
},
{
"description": "The ith list element holds the categories expected in the ith column.",
"type": "array",
"items": {
"anyOf": [
{
"type": "array",
"items": {"type": "string"},
},
{
"type": "array",
"items": {"type": "number"},
"description": "Should be sorted.",
},
]
},
},
],
"transient": "alwaysPrint", # since positional argument
"default": "auto",
},
"dtype": {
"description": "Desired dtype of output, must be number. See https://docs.scipy.org/doc/numpy-1.14.0/reference/arrays.scalars.html#arrays-scalars-built-in",
"laleType": "Any",
"transient": "alwaysPrint", # since positional argument
"default": "float64",
},
"handle_unknown": {
"description": """Whether to raise an error or ignore if an unknown categorical feature is present during transform.
When this parameter is set to `ignore` and an unknown category is encountered during transform,
the resulting one-hot encoded columns for this feature will be all zeros for encoding 'onehot' and
the resulting encoding with be set to the value indicated by `encode_unknown_with` for encoding 'ordinal'.
In the inverse transform, an unknown category will be denoted as None.""",
"enum": ["error", "ignore"],
"transient": "alwaysPrint", # since positional argument
"default": "ignore",
},
"sklearn_version_family": _hparam_sklearn_version_family,
"activate_flag": _hparam_activate_flag_unmodified,
"encode_unknown_with": {
"description": """When an unknown categorical feature value is found during transform, and 'handle_unknown' is
set to 'ignore', and encoding is 'ordinal', that value is encoded with this value. Default of 'auto' sets it to an integer equal to n+1, where
n is the maximum encoding value based on known categories.""",
"anyOf": [{"type": "integer"}, {"enum": ["auto"]}],
"default": "auto",
},
},
}
]
}
_input_fit_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": { # Handles 1-D arrays as well
"anyOf": [
{"type": "array", "items": {"laleType": "Any"}},
{
"type": "array",
"items": {"type": "array", "items": {"laleType": "Any"}},
},
]
},
"y": {"laleType": "Any"},
},
}
_input_transform_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": { # Handles 1-D arrays as well
"anyOf": [
{"type": "array", "items": {"laleType": "Any"}},
{
"type": "array",
"items": {"type": "array", "items": {"laleType": "Any"}},
},
]
}
},
}
_output_transform_schema = {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """Operator from `autoai_libs`_. Encoding of categorical features as numbers, currently internally uses the sklearn OneHotEncoder_ and OrdinalEncoder_.
.. _`autoai_libs`: https://pypi.org/project/autoai-libs
.. _OneHotEncoder: https://scikit-learn.org/0.20/modules/generated/sklearn.preprocessing.OneHotEncoder.html#sklearn.preprocessing.OneHotEncoder
.. _OrdinalEncoder: https://scikit-learn.org/0.20/modules/generated/sklearn.preprocessing.OrdinalEncoder.html#sklearn.preprocessing.OrdinalEncoder""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.autoai_libs.cat_encoder.html",
"import_from": "autoai_libs.transformers.exportable",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
CatEncoder = lale.operators.make_operator(_CatEncoderImpl, _combined_schemas)
lale.docstrings.set_docstrings(CatEncoder)
| 9,489 | 41.747748 | 176 |
py
|
lale
|
lale-master/lale/lib/autoai_libs/t_no_op.py
|
# Copyright 2020-2023 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import autoai_libs.cognito.transforms.transform_utils
import lale.docstrings
import lale.operators
from ._common_schemas import (
_hparams_feat_constraints,
_hparams_fun_pointer,
_hparams_transformer_name,
)
class _TNoOpImpl:
def __init__(self, **hyperparams):
self._wrapped_model = autoai_libs.cognito.transforms.transform_utils.TNoOp(
**hyperparams
)
def fit(self, X, y=None, **fit_params):
self._wrapped_model.fit(X, y, **fit_params)
return self
def transform(self, X):
result = self._wrapped_model.transform(X)
return result
_hyperparams_schema = {
"allOf": [
{
"description": "This first object lists all constructor arguments with their types, but omits constraints for conditional hyperparameters.",
"type": "object",
"additionalProperties": False,
"required": ["fun", "name", "datatypes", "feat_constraints", "tgraph"],
"relevantToOptimizer": [],
"properties": {
"fun": _hparams_fun_pointer(description="Function pointer (ignored)."),
"name": _hparams_transformer_name,
"datatypes": {
"description": "List of datatypes that are valid input (ignored).",
"laleType": "Any",
"transient": "alwaysPrint", # since positional argument
"default": None,
},
"feat_constraints": _hparams_feat_constraints(
description="Constraints that must be satisfied by a column to be considered a valid input to this transform (ignored)."
),
"tgraph": {
"description": "Should be the invoking TGraph() object.",
"anyOf": [{"laleType": "Any"}, {"enum": [None]}],
"default": None,
},
},
}
]
}
_input_fit_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {"description": "Features; no restrictions on data type."},
"y": {"laleType": "Any"},
},
}
_input_transform_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {"X": {"description": "Features; no restrictions on data type."}},
}
_output_transform_schema = {
"description": "Features; no restrictions on data type.",
"laleType": "Any",
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """Operator from `autoai_libs`_. Passes the data through unchanged.
.. _`autoai_libs`: https://pypi.org/project/autoai-libs""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.autoai_libs.t_no_op.html",
"import_from": "autoai_libs.cognito.transforms.transform_utils",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
TNoOp = lale.operators.make_operator(_TNoOpImpl, _combined_schemas)
lale.docstrings.set_docstrings(TNoOp)
| 3,914 | 33.043478 | 152 |
py
|
lale
|
lale-master/lale/lib/autoai_libs/cat_imputer.py
|
# Copyright 2020-2023 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import autoai_libs.transformers.exportable
import numpy as np
import lale.docstrings
import lale.operators
from ._common_schemas import (
_hparam_activate_flag_unmodified,
_hparam_sklearn_version_family,
)
class _CatImputerImpl:
def __init__(self, *args, **hyperparams):
self._wrapped_model = autoai_libs.transformers.exportable.CatImputer(
*args, **hyperparams
)
def fit(self, X, y=None, **fit_params):
self._wrapped_model.fit(X, y, **fit_params)
return self
def transform(self, X):
return self._wrapped_model.transform(X)
_hyperparams_schema = {
"allOf": [
{
"description": "This first object lists all constructor arguments with their types, but omits constraints for conditional hyperparameters.",
"type": "object",
"additionalProperties": False,
"required": [
"strategy",
"missing_values",
"sklearn_version_family",
"activate_flag",
],
"relevantToOptimizer": ["strategy"],
"properties": {
"strategy": {
"description": "The imputation strategy.",
"anyOf": [
{
"enum": ["mean"],
"description": "Replace using the mean along each column. Can only be used with numeric data.",
},
{
"enum": ["median"],
"description": "Replace using the median along each column. Can only be used with numeric data.",
},
{
"enum": ["most_frequent"],
"description": "Replace using most frequent value each column. Used with strings or numeric data.",
},
{
"enum": ["constant"],
"description": "Replace with fill_value. Can be used with strings or numeric data.",
},
],
"transient": "alwaysPrint", # since positional argument
"default": "mean",
},
"missing_values": {
"description": "The placeholder for the missing values. All occurrences of missing_values will be imputed.",
"anyOf": [
{"type": "number"},
{"type": "string"},
{"enum": [np.nan]},
{"enum": [None]},
],
"transient": "alwaysPrint", # since positional argument
"default": np.nan,
},
"sklearn_version_family": _hparam_sklearn_version_family,
"activate_flag": _hparam_activate_flag_unmodified,
},
}
]
}
_input_fit_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": { # Handles 1-D arrays as well
"anyOf": [
{"type": "array", "items": {"laleType": "Any"}},
{
"type": "array",
"items": {"type": "array", "items": {"laleType": "Any"}},
},
]
},
"y": {"laleType": "Any"},
},
}
_input_transform_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": { # Handles 1-D arrays as well
"anyOf": [
{"type": "array", "items": {"laleType": "Any"}},
{
"type": "array",
"items": {"type": "array", "items": {"laleType": "Any"}},
},
]
}
},
}
_output_transform_schema = {
"description": "Features; the outer array is over samples.",
"anyOf": [
{"type": "array", "items": {"laleType": "Any"}},
{"type": "array", "items": {"type": "array", "items": {"laleType": "Any"}}},
],
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """Operator from `autoai_libs`_. Missing value imputation for categorical features, currently internally uses the sklearn SimpleImputer_.
.. _`autoai_libs`: https://pypi.org/project/autoai-libs
.. _SimpleImputer: https://scikit-learn.org/0.20/modules/generated/sklearn.impute.SimpleImputer.html#sklearn-impute-simpleimputer""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.autoai_libs.cat_imputer.html",
"import_from": "autoai_libs.transformers.exportable",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
CatImputer = lale.operators.make_operator(_CatImputerImpl, _combined_schemas)
autoai_libs_version_str = getattr(autoai_libs, "__version__", None)
if isinstance(autoai_libs_version_str, str): # beware sphinx _MockModule
import typing
from packaging import version
from lale.schemas import AnyOf, Array, Enum, Float, Not, Null, Object, String
autoai_libs_version = version.parse(autoai_libs_version_str)
if autoai_libs_version >= version.Version("1.12.18"):
CatImputer = typing.cast(
lale.operators.PlannedIndividualOp,
CatImputer.customize_schema(
set_as_available=True,
constraint=[
AnyOf(
desc="fill_value and fill_values cannot both be specified",
forOptimizer=False,
types=[Object(fill_value=Null()), Object(fill_values=Null())],
),
AnyOf(
desc="if strategy=constants, the fill_values cannot be None",
forOptimizer=False,
types=[
Object(strategy=Not(Enum(["constants"]))),
Not(Object(fill_values=Null())),
],
),
],
fill_value=AnyOf(
types=[Float(), String(), Enum(values=[np.nan]), Null()],
desc="The placeholder for fill value used in constant strategy",
default=None,
),
fill_values=AnyOf(
types=[
Array(
items=AnyOf(
types=[Float(), String(), Enum(values=[np.nan]), Null()]
)
),
Null(),
],
desc="The placeholder for fill values used in constants strategy",
default=None,
),
sklearn_version_family=_hparam_sklearn_version_family,
strategy=AnyOf(
types=[
Enum(
values=["most_frequent"],
desc="Replace using most frequent value each column. Used with strings or numeric data.",
),
Enum(
values=["constant"],
desc="Replace with fill_value. Can be used with strings or numeric data.",
),
Enum(
values=["constants"],
desc="Replace missing values in columns with values in fill_values list. Can be used with list of strings or numeric data.",
),
],
desc="The imputation strategy.",
default="most_frequent",
),
),
)
lale.docstrings.set_docstrings(CatImputer)
| 8,778 | 37.336245 | 156 |
py
|
lale
|
lale-master/lale/lib/autoai_libs/tgen.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import autoai_libs.cognito.transforms.transform_utils
import lale.datasets.data_schemas
import lale.docstrings
import lale.helpers
import lale.operators
from ._common_schemas import (
_hparam_col_dtypes,
_hparams_apply_all,
_hparams_col_as_json_objects,
_hparams_col_names,
_hparams_datatype_spec,
_hparams_fun_pointer,
_hparams_tgraph,
_hparams_transformer_name,
)
class _TGenImpl:
def __init__(self, **hyperparams):
self._wrapped_model = autoai_libs.cognito.transforms.transform_utils.TGen(
**hyperparams
)
def fit(self, X, y=None):
stripped_X = lale.datasets.data_schemas.strip_schema(X)
self._wrapped_model.fit(stripped_X, y)
return self
def transform(self, X):
stripped_X = lale.datasets.data_schemas.strip_schema(X)
result = self._wrapped_model.transform(stripped_X)
return result
_hyperparams_schema = {
"allOf": [
{
"description": "This first object lists all constructor arguments with their types, but omits constraints for conditional hyperparameters.",
"type": "object",
"additionalProperties": False,
"required": [
"fun",
"name",
"arg_count",
"datatypes_list",
"feat_constraints_list",
"tgraph",
"apply_all",
"col_names",
"col_dtypes",
"col_as_json_objects",
],
"relevantToOptimizer": [],
"properties": {
"fun": _hparams_fun_pointer(description="The function pointer."),
"name": _hparams_transformer_name,
"arg_count": {
"description": "Number of arguments to the function, e.g., 1 for unary, 2 for binary, and so on.",
"type": "integer",
"minimum": 1,
"transient": "alwaysPrint", # since positional argument
"default": 1,
},
"datatypes_list": {
"description": "A list of arg_count lists that correspond to the acceptable input data types for each argument.",
"anyOf": [
{
"type": "array",
"items": {
"description": "List of datatypes that are valid input to the corresponding argument (numeric, float, int, etc.).",
**_hparams_datatype_spec,
},
},
{"enum": [None]},
],
"transient": "alwaysPrint", # since positional argument
"default": None,
},
"feat_constraints_list": {
"description": "A list of arg_count lists that correspond to some constraints that should be imposed on selection of the input features.",
"anyOf": [
{
"type": "array",
"items": {
"description": "List of feature constraints for the corresponding argument.",
"type": "array",
"items": {"laleType": "Any"},
},
},
{"enum": [None]},
],
"transient": "alwaysPrint", # since positional argument
"default": None,
},
"tgraph": _hparams_tgraph,
"apply_all": _hparams_apply_all,
"col_names": _hparams_col_names,
"col_dtypes": _hparam_col_dtypes,
"col_as_json_objects": _hparams_col_as_json_objects,
},
}
]
}
_input_fit_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"laleType": "Any"}},
},
"y": {"laleType": "Any"},
},
}
_input_transform_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {"type": "array", "items": {"type": "array", "items": {"laleType": "Any"}}}
},
}
_output_transform_schema = {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {"type": "array", "items": {"laleType": "Any"}},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """Operator from `autoai_libs`_. Feature transformation via a general wrapper that can be used for most functions (may not be most efficient though).
.. _`autoai_libs`: https://pypi.org/project/autoai-libs""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.autoai_libs.ta1.html",
"import_from": "autoai_libs.cognito.transforms.transform_utils",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
TGen = lale.operators.make_operator(_TGenImpl, _combined_schemas)
lale.docstrings.set_docstrings(TGen)
| 6,139 | 35.117647 | 168 |
py
|
lale
|
lale-master/lale/lib/autoai_libs/tb2.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import autoai_libs.cognito.transforms.transform_utils
import lale.docstrings
import lale.operators
from ._common_schemas import (
_hparams_apply_all,
_hparams_datatypes,
_hparams_feat_constraints,
_hparams_tans_class,
_hparams_tgraph,
_hparams_transformer_name,
)
class _TB2Impl:
def __init__(
self,
tans_class,
name,
datatypes1,
feat_constraints1,
datatypes2,
feat_constraints2,
tgraph=None,
apply_all=True,
):
self._hyperparams = {
"tans_class": tans_class,
"name": name,
"datatypes1": datatypes1,
"feat_constraints1": feat_constraints1,
"datatypes2": datatypes2,
"feat_constraints2": feat_constraints2,
"tgraph": tgraph,
"apply_all": apply_all,
}
self._wrapped_model = autoai_libs.cognito.transforms.transform_utils.TB2(
**self._hyperparams
)
def fit(self, X, y=None):
self._wrapped_model.fit(X, y)
return self
def transform(self, X):
result = self._wrapped_model.transform(X)
return result
_hyperparams_schema = {
"allOf": [
{
"description": "This first object lists all constructor arguments with their types, but omits constraints for conditional hyperparameters.",
"type": "object",
"additionalProperties": False,
"required": [
"tans_class",
"name",
"datatypes1",
"feat_constraints1",
"datatypes2",
"feat_constraints2",
"tgraph",
"apply_all",
],
"relevantToOptimizer": [],
"properties": {
"tans_class": _hparams_tans_class,
"name": _hparams_transformer_name,
"datatypes1": _hparams_datatypes(
description="List of datatypes that are valid input (first argument) to the transformer function (numeric, float, int, etc.)."
),
"feat_constraints1": _hparams_feat_constraints(
description="All constraints that must be satisfied by a column to be considered a valid input (first argument) to this transform."
),
"datatypes2": _hparams_datatypes(
description="List of datatypes that are valid input (second argument) to the transformer function (numeric, float, int, etc.)."
),
"feat_constraints2": _hparams_feat_constraints(
description="All constraints that must be satisfied by a column to be considered a valid input (second argument) to this transform."
),
"tgraph": _hparams_tgraph,
"apply_all": _hparams_apply_all,
},
}
]
}
_input_fit_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"laleType": "Any"}},
},
"y": {"laleType": "Any"},
},
}
_input_transform_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {"type": "array", "items": {"type": "array", "items": {"laleType": "Any"}}}
},
}
_output_transform_schema = {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {"type": "array", "items": {"laleType": "Any"}},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """Operator from `autoai_libs`_. Feature transformation for binary state-based transformations (with fit/transform), such as group-by.
.. _`autoai_libs`: https://pypi.org/project/autoai-libs""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.autoai_libs.tb2.html",
"import_from": "autoai_libs.cognito.transforms.transform_utils",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
TB2 = lale.operators.make_operator(_TB2Impl, _combined_schemas)
lale.docstrings.set_docstrings(TB2)
| 5,090 | 32.27451 | 153 |
py
|
lale
|
lale-master/lale/lib/autoai_libs/ta2.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import autoai_libs.cognito.transforms.transform_utils
import lale.docstrings
import lale.operators
from ._common_schemas import (
_hparam_col_dtypes,
_hparams_apply_all,
_hparams_col_as_json_objects,
_hparams_col_names,
_hparams_datatypes,
_hparams_feat_constraints,
_hparams_fun_pointer,
_hparams_tgraph,
_hparams_transformer_name,
)
class _TA2Impl:
def __init__(self, **hyperparams):
self._wrapped_model = autoai_libs.cognito.transforms.transform_utils.TA2(
**hyperparams
)
def fit(self, X, y=None, **fit_params):
self._wrapped_model.fit(X, y, **fit_params)
return self
def transform(self, X):
result = self._wrapped_model.transform(X)
return result
_hyperparams_schema = {
"allOf": [
{
"description": "This first object lists all constructor arguments with their types, but omits constraints for conditional hyperparameters.",
"type": "object",
"additionalProperties": False,
"required": [
"fun",
"name",
"datatypes1",
"feat_constraints1",
"datatypes2",
"feat_constraints2",
"tgraph",
"apply_all",
"col_names",
"col_dtypes",
"col_as_json_objects",
],
"relevantToOptimizer": [],
"properties": {
"fun": _hparams_fun_pointer(description="The function pointer."),
"name": _hparams_transformer_name,
"datatypes1": _hparams_datatypes(
description="List of datatypes that are valid input to the first argument of the transformer function (`numeric`, `float`, `int`, `integer`)."
),
"feat_constraints1": _hparams_feat_constraints(
description="All constraints that must be satisfied by a column to be considered a valid first argument to this transform."
),
"datatypes2": _hparams_datatypes(
description="List of datatypes that are valid input to the second argument of the transformer function (numeric, float, int, etc.)."
),
"feat_constraints2": _hparams_feat_constraints(
description="All constraints that must be satisfied by a column to be considered a valid second argument to this transform."
),
"tgraph": _hparams_tgraph,
"apply_all": _hparams_apply_all,
"col_names": _hparams_col_names,
"col_dtypes": _hparam_col_dtypes,
"col_as_json_objects": _hparams_col_as_json_objects,
},
}
]
}
_input_fit_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"laleType": "Any"}},
},
"y": {"laleType": "Any"},
},
}
_input_transform_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {"type": "array", "items": {"type": "array", "items": {"laleType": "Any"}}}
},
}
_output_transform_schema = {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {"type": "array", "items": {"laleType": "Any"}},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """Operator from `autoai_libs`_. Feature transformation for binary stateless functions, such as sum or product.
.. _`autoai_libs`: https://pypi.org/project/autoai-libs""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.autoai_libs.ta2.html",
"import_from": "autoai_libs.cognito.transforms.transform_utils",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
TA2 = lale.operators.make_operator(_TA2Impl, _combined_schemas)
lale.docstrings.set_docstrings(TA2)
| 4,944 | 33.823944 | 162 |
py
|
lale
|
lale-master/lale/lib/imblearn/edited_nearest_neighbours.py
|
# Copyright 2019-2023 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import imblearn.under_sampling
import lale.docstrings
import lale.operators
from ._common_schemas import (
_hparam_kind_sel,
_hparam_n_jobs,
_hparam_n_neighbors,
_hparam_operator,
_hparam_sampling_strategy_anyof_elc,
_input_fit_schema,
_input_predict_schema,
_input_transform_schema,
_output_decision_function_schema,
_output_predict_proba_schema,
_output_predict_schema,
_output_transform_schema,
)
from .base_resampler import _BaseResamplerImpl
class _EditedNearestNeighboursImpl(_BaseResamplerImpl):
def __init__(self, operator=None, **hyperparams):
if operator is None:
raise ValueError("Operator is a required argument.")
resampler_instance = imblearn.under_sampling.EditedNearestNeighbours(
**hyperparams
)
super().__init__(operator=operator, resampler=resampler_instance)
_hyperparams_schema = {
"allOf": [
{
"type": "object",
"relevantToOptimizer": ["operator"],
"additionalProperties": False,
"properties": {
"operator": _hparam_operator,
"sampling_strategy": _hparam_sampling_strategy_anyof_elc,
"n_neighbors": {
**_hparam_n_neighbors,
"default": 3,
},
"kind_sel": _hparam_kind_sel,
"n_jobs": _hparam_n_jobs,
},
}
]
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """Class to perform under-sampling based on the edited nearest neighbour method.""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.imblearn.edited_nearest_neighbours.html",
"import_from": "imblearn.under_sampling",
"type": "object",
"tags": {
"pre": [],
"op": [
"transformer",
"estimator",
"resampler",
], # transformer and estimator both as a higher-order operator
"post": [],
},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
"input_predict_proba": _input_predict_schema,
"output_predict_proba": _output_predict_proba_schema,
"input_decision_function": _input_predict_schema,
"output_decision_function": _output_decision_function_schema,
},
}
EditedNearestNeighbours = lale.operators.make_operator(
_EditedNearestNeighboursImpl, _combined_schemas
)
lale.docstrings.set_docstrings(EditedNearestNeighbours)
| 3,375 | 31.776699 | 122 |
py
|
lale
|
lale-master/lale/lib/imblearn/base_resampler.py
|
# Copyright 2019-2023 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
class _BaseResamplerImpl:
def __init__(self, operator=None, resampler=None):
self.operator = operator
self.resampler = resampler
def fit(self, X, y=None):
resampler = self.resampler
assert resampler is not None
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
X, y = resampler.fit_resample(X, y)
op = self.operator
assert op is not None
self.trained_operator = op.fit(X, y)
if hasattr(self.trained_operator, "classes_"):
self.classes_ = self.trained_operator.classes_
return self
def transform(self, X, y=None):
return self.trained_operator.transform(X, y)
def predict(self, X, **predict_params):
return self.trained_operator.predict(X, **predict_params)
def predict_proba(self, X):
return self.trained_operator.predict_proba(X)
def decision_function(self, X):
return self.trained_operator.decision_function(X)
| 1,629 | 32.958333 | 74 |
py
|
lale
|
lale-master/lale/lib/imblearn/smoten.py
|
# Copyright 2023 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import imblearn.over_sampling
import lale.docstrings
import lale.operators
from ._common_schemas import (
_hparam_n_jobs,
_hparam_n_neighbors,
_hparam_operator,
_hparam_random_state,
_hparam_sampling_strategy_anyof_neoc_over,
_input_fit_schema_cats,
_input_predict_schema_cats,
_input_transform_schema_cats,
_output_decision_function_schema,
_output_predict_proba_schema,
_output_predict_schema,
_output_transform_schema,
)
from .base_resampler import _BaseResamplerImpl
class _SMOTENImpl(_BaseResamplerImpl):
def __init__(self, operator=None, **hyperparams):
if operator is None:
raise ValueError("Operator is a required argument.")
resampler_instance = imblearn.over_sampling.SMOTEN(**hyperparams)
super().__init__(operator=operator, resampler=resampler_instance)
_hyperparams_schema = {
"allOf": [
{
"type": "object",
"required": ["operator"],
"relevantToOptimizer": ["operator"],
"additionalProperties": False,
"properties": {
"operator": _hparam_operator,
"sampling_strategy": _hparam_sampling_strategy_anyof_neoc_over,
"random_state": _hparam_random_state,
"k_neighbors": {
**_hparam_n_neighbors,
"description": "Number of nearest neighbours to use to construct synthetic samples.",
"default": 5,
},
"n_jobs": _hparam_n_jobs,
},
}
]
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """Synthetic Minority Over-sampling Technique for Nominal (SMOTEN).
Expects that the data to resample are only made of categorical features.""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.imblearn.smoten.html",
"import_from": "imblearn.over_sampling",
"type": "object",
"tags": {
"pre": [],
"op": [
"transformer",
"estimator",
"resampler",
], # transformer and estimator both as a higher-order operator
"post": [],
},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema_cats,
"input_transform": _input_transform_schema_cats,
"output_transform": _output_transform_schema,
"input_predict": _input_predict_schema_cats,
"output_predict": _output_predict_schema,
"input_predict_proba": _input_predict_schema_cats,
"output_predict_proba": _output_predict_proba_schema,
"input_decision_function": _input_predict_schema_cats,
"output_decision_function": _output_decision_function_schema,
},
}
SMOTEN = lale.operators.make_operator(_SMOTENImpl, _combined_schemas)
lale.docstrings.set_docstrings(SMOTEN)
| 3,503 | 33.352941 | 105 |
py
|
lale
|
lale-master/lale/lib/imblearn/condensed_nearest_neighbour.py
|
# Copyright 2019-2023 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import imblearn.under_sampling
import lale.docstrings
import lale.operators
from ._common_schemas import (
_hparam_n_jobs,
_hparam_n_neighbors,
_hparam_operator,
_hparam_random_state,
_hparam_sampling_strategy_anyof_elc,
_input_fit_schema,
_input_predict_schema,
_input_transform_schema,
_output_decision_function_schema,
_output_predict_proba_schema,
_output_predict_schema,
_output_transform_schema,
)
from .base_resampler import _BaseResamplerImpl
class _CondensedNearestNeighbourImpl(_BaseResamplerImpl):
def __init__(self, operator=None, **hyperparams):
if operator is None:
raise ValueError("Operator is a required argument.")
resampler_instance = imblearn.under_sampling.CondensedNearestNeighbour(
**hyperparams
)
super().__init__(operator=operator, resampler=resampler_instance)
_hyperparams_schema = {
"allOf": [
{
"type": "object",
"relevantToOptimizer": ["operator"],
"additionalProperties": False,
"properties": {
"operator": _hparam_operator,
"sampling_strategy": _hparam_sampling_strategy_anyof_elc,
"random_state": _hparam_random_state,
"n_neighbors": {
**_hparam_n_neighbors,
"anyOf": [
*_hparam_n_neighbors["anyOf"],
{
"enum": [None],
"description": "KNeighborsClassifier(n_neighbors=1)",
},
],
"default": None,
},
"n_seeds_S": {
"description": """Number of samples to extract in order to build the set S.""",
"type": "integer",
"default": 1,
},
"n_jobs": _hparam_n_jobs,
},
}
]
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """Class to perform under-sampling based on the condensed nearest neighbour method.""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.imblearn.condensed_nearest_neighbour.html",
"import_from": "imblearn.under_sampling",
"type": "object",
"tags": {
"pre": [],
"op": [
"transformer",
"estimator",
"resampler",
], # transformer and estimator both as a higher-order operator
"post": [],
},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
"input_predict_proba": _input_predict_schema,
"output_predict_proba": _output_predict_proba_schema,
"input_decision_function": _input_predict_schema,
"output_decision_function": _output_decision_function_schema,
},
}
CondensedNearestNeighbour = lale.operators.make_operator(
_CondensedNearestNeighbourImpl, _combined_schemas
)
lale.docstrings.set_docstrings(CondensedNearestNeighbour)
| 3,916 | 33.06087 | 124 |
py
|
lale
|
lale-master/lale/lib/imblearn/adasyn.py
|
# Copyright 2019-2023 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import imblearn.over_sampling
import numpy as np
import lale.docstrings
import lale.operators
from ._common_schemas import (
_hparam_n_jobs,
_hparam_n_neighbors,
_hparam_operator,
_hparam_random_state,
_hparam_sampling_strategy_anyof_neoc_over,
_input_fit_schema,
_input_predict_schema,
_input_transform_schema,
_output_decision_function_schema,
_output_predict_proba_schema,
_output_predict_schema,
_output_transform_schema,
)
from .base_resampler import _BaseResamplerImpl
class _ADASYNImpl(_BaseResamplerImpl):
def __init__(self, operator=None, **hyperparams):
if operator is None:
raise ValueError("Operator is a required argument.")
resampler_instance = imblearn.over_sampling.ADASYN(**hyperparams)
super().__init__(operator=operator, resampler=resampler_instance)
def fit(self, X, y=None):
resampler = self.resampler
assert resampler is not None
X, y = resampler.fit_resample(X, np.array(y))
op = self.operator
assert op is not None
self.trained_operator = op.fit(X, y)
return self
_hyperparams_schema = {
"allOf": [
{
"type": "object",
"relevantToOptimizer": ["operator"],
"additionalProperties": False,
"properties": {
"operator": _hparam_operator,
"sampling_strategy": _hparam_sampling_strategy_anyof_neoc_over,
"random_state": _hparam_random_state,
"n_neighbors": {**_hparam_n_neighbors, "default": 5},
"n_jobs": _hparam_n_jobs,
},
}
]
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """Perform over-sampling using Adaptive Synthetic (ADASYN) sampling approach for imbalanced datasets.""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.imblearn.adasyn.html",
"import_from": "imblearn.over_sampling",
"type": "object",
"tags": {
"pre": [],
"op": [
"transformer",
"estimator",
"resampler",
], # transformer and estimator both as a higher-order operator
"post": [],
},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
"input_predict_proba": _input_predict_schema,
"output_predict_proba": _output_predict_proba_schema,
"input_decision_function": _input_predict_schema,
"output_decision_function": _output_decision_function_schema,
},
}
ADASYN = lale.operators.make_operator(_ADASYNImpl, _combined_schemas)
lale.docstrings.set_docstrings(ADASYN)
| 3,524 | 32.254717 | 124 |
py
|
lale
|
lale-master/lale/lib/imblearn/smotenc.py
|
# Copyright 2023 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import imblearn.over_sampling
import numpy as np
import lale.docstrings
import lale.operators
from ._common_schemas import (
_hparam_n_jobs,
_hparam_n_neighbors,
_hparam_operator,
_hparam_random_state,
_hparam_sampling_strategy_anyof_neoc_over,
_input_fit_schema_cats,
_input_predict_schema_cats,
_input_transform_schema_cats,
_output_decision_function_schema,
_output_predict_proba_schema,
_output_predict_schema,
_output_transform_schema,
)
from .base_resampler import _BaseResamplerImpl
class _SMOTENCImpl(_BaseResamplerImpl):
def __init__(self, operator=None, **hyperparams):
if operator is None:
raise ValueError("Operator is a required argument.")
self._hyperparams = hyperparams
super().__init__(operator=operator, resampler=None)
def fit(self, X, y=None):
if self.resampler is None:
if self._hyperparams["categorical_features"] is None:
self._hyperparams["categorical_features"] = [
not np.issubdtype(typ, np.number) for typ in X.dtypes
]
self.resampler = imblearn.over_sampling.SMOTENC(**self._hyperparams)
return super().fit(X, y)
_hyperparams_schema = {
"allOf": [
{
"type": "object",
"required": ["operator"],
"relevantToOptimizer": ["operator"],
"additionalProperties": False,
"properties": {
"operator": _hparam_operator,
"categorical_features": {
"description": "Specifies which features are categorical.",
"anyOf": [
{
"description": "Treat all features with non-numeric dtype as categorical.",
"enum": [None],
},
{
"description": "Indices specifying the categorical features.",
"type": "array",
"items": {"type": "integer"},
},
{
"description": "Mask array of shape `(n_features,)` where True indicates the categorical features.",
"type": "array",
"items": {"type": "boolean"},
},
],
"default": None,
},
"sampling_strategy": _hparam_sampling_strategy_anyof_neoc_over,
"random_state": _hparam_random_state,
"k_neighbors": {
**_hparam_n_neighbors,
"description": "Number of nearest neighbours to use to construct synthetic samples.",
"default": 5,
},
"n_jobs": _hparam_n_jobs,
},
}
]
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """Synthetic Minority Over-sampling Technique for Nominal and Continuous (SMOTENC).
Can handle some nominal features, but not designed to work with only nominal features.""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.imblearn.smotenc.html",
"import_from": "imblearn.over_sampling",
"type": "object",
"tags": {
"pre": [],
"op": [
"transformer",
"estimator",
"resampler",
], # transformer and estimator both as a higher-order operator
"post": [],
},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema_cats,
"input_transform": _input_transform_schema_cats,
"output_transform": _output_transform_schema,
"input_predict": _input_predict_schema_cats,
"output_predict": _output_predict_schema,
"input_predict_proba": _input_predict_schema_cats,
"output_predict_proba": _output_predict_proba_schema,
"input_decision_function": _input_predict_schema_cats,
"output_decision_function": _output_decision_function_schema,
},
}
SMOTENC = lale.operators.make_operator(_SMOTENCImpl, _combined_schemas)
lale.docstrings.set_docstrings(SMOTENC)
| 4,874 | 35.931818 | 128 |
py
|
lale
|
lale-master/lale/lib/imblearn/svm_smote.py
|
# Copyright 2019-2023 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import imblearn.over_sampling
import lale.docstrings
import lale.operators
from ._common_schemas import (
_hparam_n_jobs,
_hparam_n_neighbors,
_hparam_operator,
_hparam_random_state,
_hparam_sampling_strategy_anyof_neoc_over,
_input_fit_schema,
_input_predict_schema,
_input_transform_schema,
_output_decision_function_schema,
_output_predict_proba_schema,
_output_predict_schema,
_output_transform_schema,
)
from .base_resampler import _BaseResamplerImpl
class _SVMSMOTEImpl(_BaseResamplerImpl):
def __init__(self, operator=None, **hyperparams):
if operator is None:
raise ValueError("Operator is a required argument.")
resampler_instance = imblearn.over_sampling.SVMSMOTE(**hyperparams)
super().__init__(operator=operator, resampler=resampler_instance)
_hyperparams_schema = {
"allOf": [
{
"type": "object",
"required": ["operator"],
"relevantToOptimizer": ["operator"],
"additionalProperties": False,
"properties": {
"operator": _hparam_operator,
"sampling_strategy": _hparam_sampling_strategy_anyof_neoc_over,
"random_state": _hparam_random_state,
"k_neighbors": {
**_hparam_n_neighbors,
"description": "Number of nearest neighbours to use to construct synthetic samples.",
"default": 5,
},
"n_jobs": _hparam_n_jobs,
"m_neighbors": {
**_hparam_n_neighbors,
"description": "Number of nearest neighbours to use to determine if a minority sample is in danger.",
"default": 10,
},
"svm_estimator": {
"description": "A parametrized sklearn.svm.SVC classifier can be passed.",
"anyOf": [{"laleType": "Any"}, {"enum": [None]}],
"default": None,
},
"out_step": {
"description": "Step size when extrapolating.",
"type": "number",
"default": 0.5,
},
},
}
]
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """Over-sampling using SVM-SMOTE,
Variant of SMOTE algorithm which use an SVM algorithm to detect sample to use for generating new synthetic samples.""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.imblearn.svm_smote.html",
"import_from": "imblearn.over_sampling",
"type": "object",
"tags": {
"pre": [],
"op": [
"transformer",
"estimator",
"resampler",
], # transformer and estimator both as a higher-order operator
"post": [],
},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
"input_predict_proba": _input_predict_schema,
"output_predict_proba": _output_predict_proba_schema,
"input_decision_function": _input_predict_schema,
"output_decision_function": _output_decision_function_schema,
},
}
SVMSMOTE = lale.operators.make_operator(_SVMSMOTEImpl, _combined_schemas)
lale.docstrings.set_docstrings(SVMSMOTE)
| 4,189 | 34.811966 | 121 |
py
|
lale
|
lale-master/lale/lib/imblearn/_common_schemas.py
|
# Copyright 2023 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict
JSON_TYPE = Dict[str, Any]
_hparam_kind_sel = {
"description": """Strategy to use in order to exclude samples.
If ``all``, all neighbours will have to agree with the samples of interest to not be excluded.
If ``mode``, the majority vote of the neighbours will be used in order to exclude a sample.""",
"enum": ["all", "mode"],
"default": "all",
}
_hparam_n_jobs = {
"description": "The number of threads to open if possible.",
"type": "integer",
"default": 1,
}
_hparam_n_neighbors: JSON_TYPE = {
"description": "Number of neighbors.",
"anyOf": [
{
"type": "integer",
"description": "Number of nearest neighbours to use to construct synthetic samples.",
},
{
"laleType": "Any",
"description": "An estimator that inherits from :class:`sklearn.neighbors.base.KNeighborsMixin` that will be used to find the `n_neighbors`.",
},
],
}
_hparam_operator = {
"description": """Trainable Lale pipeline that is trained using the data obtained from the current imbalance corrector.
Predict, transform, predict_proba or decision_function would just be
forwarded to the trained pipeline. If operator is a Planned pipeline,
the current imbalance corrector can't be trained without using an
optimizer to choose a trainable operator first. Please refer to
lale/examples for more examples.""",
"laleType": "operator",
}
_hparam_random_state = {
"description": "Control the randomization of the algorithm.",
"anyOf": [
{
"description": "RandomState used by np.random",
"enum": [None],
},
{
"description": "The seed used by the random number generator",
"type": "integer",
},
{
"description": "Random number generator instance.",
"laleType": "numpy.random.RandomState",
},
],
"default": None,
}
_hparam_sampling_strategy_number = {
"type": "number",
"forOptimizer": False,
"description": """Desired ratio of the number of samples in the
minority class over the number of samples in the majority class after
resampling. Therefore, the ratio is expressed as :math:`\\alpha_{os} =
N_{rm} / N_{M}` where :math:`N_{rm}` is the number of samples in the
minority class after resampling and :math:`N_{M}` is the number of
samples in the majority class.
.. warning::
Only available for **binary** classification.
An error is raised for multi-class classification.""",
}
_hparam_sampling_strategy_enum_over = {
"enum": ["minority", "not minority", "not majority", "all", "auto"],
"description": """The class targeted by the resampling.
The number of samples in the different classes will be equalized.
Possible choices are:
- ``'minority'``: resample only the minority class;
- ``'not minority'``: resample all classes but the minority class;
- ``'not majority'``: resample all classes but the majority class;
- ``'all'``: resample all classes;
- ``'auto'``: equivalent to ``'not majority'``.""",
}
_hparam_sampling_strategy_enum_under = {
"enum": ["majority", "not minority", "not majority", "all", "auto"],
"description": """The class targeted by the resampling.
The number of samples in the different classes will be equalized.
Possible choices are:
- ``'majority'``: resample only the majority class;
- ``'not minority'``: resample all classes but the minority class;
- ``'not majority'``: resample all classes but the majority class;
- ``'all'``: resample all classes;
- ``'auto'``: equivalent to ``'not minority'``.""",
}
_hparam_sampling_strategy_object = {
"type": "object",
"forOptimizer": False,
"description": "Keys correspond to the targeted classes and values correspond to the desired number of samples for each targeted class.",
}
_hparam_sampling_strategy_callable = {
"laleType": "callable",
"forOptimizer": False,
"description": """Function taking ``y`` and returns a ``dict``.
The keys correspond to the targeted classes and the values correspond to the desired number of samples for each class.""",
}
_hparam_sampling_strategy_list = {
"description": "Classes targeted by the resampling.",
"forOptimizer": False,
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "string"}},
],
}
_hparam_sampling_strategy_anyof_elc = {
"description": "Sampling information to resample the data set.",
"anyOf": [
_hparam_sampling_strategy_enum_over,
_hparam_sampling_strategy_list,
_hparam_sampling_strategy_callable,
],
"default": "auto",
}
_hparam_sampling_strategy_anyof_neoc_over = {
"description": "Sampling information to resample the data set.",
"anyOf": [
_hparam_sampling_strategy_number,
_hparam_sampling_strategy_enum_over,
_hparam_sampling_strategy_object,
_hparam_sampling_strategy_callable,
],
"default": "auto",
}
_hparam_sampling_strategy_anyof_neoc_under = {
"description": "Sampling information to resample the data set.",
"anyOf": [
_hparam_sampling_strategy_number,
_hparam_sampling_strategy_enum_under,
_hparam_sampling_strategy_object,
_hparam_sampling_strategy_callable,
],
"default": "auto",
}
_input_fit_schema = {
"type": "object",
"required": ["X", "y"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
"y": {
"description": "Target class labels; the array is over samples.",
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "string"}},
],
},
},
}
_input_fit_schema_cats = {
"type": "object",
"required": ["X", "y"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {
"type": "array",
"items": {"anyOf": [{"type": "number"}, {"type": "string"}]},
},
},
"y": {
"description": "Target class labels; the array is over samples.",
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "string"}},
],
},
},
}
_input_transform_schema = {
"type": "object",
"required": ["X", "y"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
"y": {
"description": "Target class labels; the array is over samples.",
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "string"}},
{"enum": [None]},
],
},
},
}
_input_transform_schema_cats = {
"type": "object",
"required": ["X", "y"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {
"type": "array",
"items": {"anyOf": [{"type": "number"}, {"type": "string"}]},
},
},
"y": {
"description": "Target class labels; the array is over samples.",
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "string"}},
{"enum": [None]},
],
},
},
}
_output_transform_schema = {
"description": "Output data schema for transformed data.",
"laleType": "Any",
}
_input_predict_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
},
}
_input_predict_schema_cats = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {
"type": "array",
"items": {"anyOf": [{"type": "number"}, {"type": "string"}]},
},
}
},
}
_output_predict_schema = {
"description": "Output data schema for predictions.",
"laleType": "Any",
}
_output_predict_proba_schema = {
"description": "Probability of the sample for each class in the model.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
_output_decision_function_schema = {
"description": "Output data schema for predictions.",
"laleType": "Any",
}
| 9,910 | 30.970968 | 154 |
py
|
lale
|
lale-master/lale/lib/imblearn/repeated_edited_nearest_neighbours.py
|
# Copyright 2019-2023 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import imblearn.under_sampling
import lale.docstrings
import lale.operators
from ._common_schemas import (
_hparam_kind_sel,
_hparam_n_jobs,
_hparam_n_neighbors,
_hparam_operator,
_hparam_sampling_strategy_anyof_elc,
_input_fit_schema,
_input_predict_schema,
_input_transform_schema,
_output_decision_function_schema,
_output_predict_proba_schema,
_output_predict_schema,
_output_transform_schema,
)
from .base_resampler import _BaseResamplerImpl
class _RepeatedEditedNearestNeighboursImpl(_BaseResamplerImpl):
def __init__(self, operator=None, **hyperparams):
if operator is None:
raise ValueError("Operator is a required argument.")
resampler_instance = imblearn.under_sampling.RepeatedEditedNearestNeighbours(
**hyperparams
)
super().__init__(operator=operator, resampler=resampler_instance)
_hyperparams_schema = {
"allOf": [
{
"type": "object",
"relevantToOptimizer": ["operator"],
"additionalProperties": False,
"properties": {
"operator": _hparam_operator,
"sampling_strategy": _hparam_sampling_strategy_anyof_elc,
"n_neighbors": {
**_hparam_n_neighbors,
"default": 3,
},
"max_iter": {
"description": "Maximum number of iterations of the edited nearest neighbours algorithm for a single run.",
"type": "integer",
"default": 100,
},
"kind_sel": _hparam_kind_sel,
"n_jobs": _hparam_n_jobs,
},
}
]
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """Class to perform under-sampling based on the repeated edited nearest neighbour method.""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.imblearn.repeated_edited_nearest_neighbours.html",
"import_from": "imblearn.under_sampling",
"type": "object",
"tags": {
"pre": [],
"op": [
"transformer",
"estimator",
"resampler",
], # transformer and estimator both as a higher-order operator
"post": [],
},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
"input_predict_proba": _input_predict_schema,
"output_predict_proba": _output_predict_proba_schema,
"input_decision_function": _input_predict_schema,
"output_decision_function": _output_decision_function_schema,
},
}
RepeatedEditedNearestNeighbours = lale.operators.make_operator(
_RepeatedEditedNearestNeighboursImpl, _combined_schemas
)
lale.docstrings.set_docstrings(RepeatedEditedNearestNeighbours)
| 3,685 | 33.12963 | 131 |
py
|
lale
|
lale-master/lale/lib/imblearn/random_under_sampler.py
|
# Copyright 2023 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import imblearn.under_sampling
import lale.docstrings
import lale.operators
from ._common_schemas import (
_hparam_operator,
_hparam_random_state,
_hparam_sampling_strategy_anyof_neoc_under,
_input_fit_schema_cats,
_input_predict_schema_cats,
_input_transform_schema_cats,
_output_decision_function_schema,
_output_predict_proba_schema,
_output_predict_schema,
_output_transform_schema,
)
from .base_resampler import _BaseResamplerImpl
class _RandomUnderSamplerImpl(_BaseResamplerImpl):
def __init__(self, *, operator=None, **hyperparams):
if operator is None:
raise ValueError("Operator is a required argument.")
resampler_instance = imblearn.under_sampling.RandomUnderSampler(**hyperparams)
super().__init__(operator=operator, resampler=resampler_instance)
_hyperparams_schema = {
"allOf": [
{
"type": "object",
"required": ["operator"],
"relevantToOptimizer": ["operator"],
"additionalProperties": False,
"properties": {
"operator": _hparam_operator,
"sampling_strategy": _hparam_sampling_strategy_anyof_neoc_under,
"random_state": _hparam_random_state,
"replacement": {
"description": "Whether the sample is with or without replacement.",
"type": "boolean",
"default": False,
},
},
}
]
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """Class to perform random under-sampling, i.e. under-sample the minority class(es) by picking samples at random with replacement.""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.imblearn.random_under_sampler.html",
"import_from": "imblearn.under_sampling",
"type": "object",
"tags": {
"pre": [],
"op": [
"transformer",
"estimator",
"resampler",
], # transformer and estimator both as a higher-order operator
"post": [],
},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema_cats,
"input_transform": _input_transform_schema_cats,
"output_transform": _output_transform_schema,
"input_predict": _input_predict_schema_cats,
"output_predict": _output_predict_schema,
"input_predict_proba": _input_predict_schema_cats,
"output_predict_proba": _output_predict_proba_schema,
"input_decision_function": _input_predict_schema_cats,
"output_decision_function": _output_decision_function_schema,
},
}
RandomUnderSampler = lale.operators.make_operator(
_RandomUnderSamplerImpl, _combined_schemas
)
lale.docstrings.set_docstrings(RandomUnderSampler)
| 3,477 | 33.78 | 153 |
py
|
lale
|
lale-master/lale/lib/imblearn/borderline_smote.py
|
# Copyright 2019-2023 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import imblearn.over_sampling
import lale.docstrings
import lale.operators
from ._common_schemas import (
_hparam_n_jobs,
_hparam_n_neighbors,
_hparam_operator,
_hparam_random_state,
_hparam_sampling_strategy_anyof_neoc_over,
_input_fit_schema,
_input_predict_schema,
_input_transform_schema,
_output_decision_function_schema,
_output_predict_proba_schema,
_output_predict_schema,
_output_transform_schema,
)
from .base_resampler import _BaseResamplerImpl
class _BorderlineSMOTEImpl(_BaseResamplerImpl):
def __init__(self, operator=None, **hyperparams):
if operator is None:
raise ValueError("Operator is a required argument.")
resampler_instance = imblearn.over_sampling.BorderlineSMOTE(**hyperparams)
super().__init__(operator=operator, resampler=resampler_instance)
_hyperparams_schema = {
"allOf": [
{
"type": "object",
"relevantToOptimizer": ["operator"],
"additionalProperties": False,
"properties": {
"operator": _hparam_operator,
"sampling_strategy": _hparam_sampling_strategy_anyof_neoc_over,
"random_state": _hparam_random_state,
"k_neighbors": {
**_hparam_n_neighbors,
"description": "Number of nearest neighbours to use to construct synthetic samples.",
"default": 5,
},
"n_jobs": _hparam_n_jobs,
"m_neighbors": {
**_hparam_n_neighbors,
"description": "Number of nearest neighbours to use to determine if a minority sample is in danger.",
"default": 10,
},
"kind": {
"description": "The type of SMOTE algorithm to use.",
"enum": ["borderline-1", "borderline-2"],
"default": "borderline-1",
},
},
}
]
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """Over-sampling using Borderline SMOTE, which is a variant of the original SMOTE algorithm.
Borderline samples will be detected and used to generate new synthetic samples.""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.imblearn.borderline_smote.html",
"import_from": "imblearn.over_sampling",
"type": "object",
"tags": {
"pre": [],
"op": [
"transformer",
"estimator",
"resampler",
], # transformer and estimator both as a higher-order operator
"post": [],
},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
"input_predict_proba": _input_predict_schema,
"output_predict_proba": _output_predict_proba_schema,
"input_decision_function": _input_predict_schema,
"output_decision_function": _output_decision_function_schema,
},
}
BorderlineSMOTE = lale.operators.make_operator(_BorderlineSMOTEImpl, _combined_schemas)
lale.docstrings.set_docstrings(BorderlineSMOTE)
| 3,997 | 35.018018 | 121 |
py
|
lale
|
lale-master/lale/lib/imblearn/smote.py
|
# Copyright 2019-2023 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import imblearn.over_sampling
import lale.docstrings
import lale.operators
from ._common_schemas import (
_hparam_n_jobs,
_hparam_n_neighbors,
_hparam_operator,
_hparam_random_state,
_hparam_sampling_strategy_anyof_neoc_over,
_input_fit_schema,
_input_predict_schema,
_input_transform_schema,
_output_decision_function_schema,
_output_predict_proba_schema,
_output_predict_schema,
_output_transform_schema,
)
from .base_resampler import _BaseResamplerImpl
class _SMOTEImpl(_BaseResamplerImpl):
def __init__(self, operator=None, **hyperparams):
if operator is None:
raise ValueError("Operator is a required argument.")
resampler_instance = imblearn.over_sampling.SMOTE(**hyperparams)
super().__init__(operator=operator, resampler=resampler_instance)
_hyperparams_schema = {
"allOf": [
{
"type": "object",
"required": ["operator"],
"relevantToOptimizer": ["operator", "sampling_strategy"],
"additionalProperties": False,
"properties": {
"operator": _hparam_operator,
"sampling_strategy": _hparam_sampling_strategy_anyof_neoc_over,
"random_state": _hparam_random_state,
"k_neighbors": {
**_hparam_n_neighbors,
"description": "Number of nearest neighbours to use to construct synthetic samples.",
"default": 5,
},
"n_jobs": _hparam_n_jobs,
},
}
]
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """Class to perform over-sampling using Synthetic Minority Over-sampling Technique (SMOTE).""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.imblearn.smote.html",
"import_from": "imblearn.over_sampling",
"type": "object",
"tags": {
"pre": [],
"op": [
"transformer",
"estimator",
"resampler",
], # transformer and estimator both as a higher-order operator
"post": [],
},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
"input_predict_proba": _input_predict_schema,
"output_predict_proba": _output_predict_proba_schema,
"input_decision_function": _input_predict_schema,
"output_decision_function": _output_decision_function_schema,
},
}
SMOTE = lale.operators.make_operator(_SMOTEImpl, _combined_schemas)
lale.docstrings.set_docstrings(SMOTE)
| 3,434 | 33.009901 | 114 |
py
|
lale
|
lale-master/lale/lib/imblearn/__init__.py
|
# Copyright 2019-2023 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Scikit-learn compatible wrappers for a subset of the operators from imbalanced-learn_ along with schemas to enable hyperparameter tuning.
.. _imbalanced-learn: https://imbalanced-learn.readthedocs.io/en/stable/index.html
Operators:
==========
* `CondensedNearestNeighbour`_
* `EditedNearestNeighbours`_
* `RepeatedEditedNearestNeighbours`_
* `AllKNN`_
* `InstanceHardnessThreshold`_
* `ADASYN`_
* `BorderlineSMOTE`_
* `RandomOverSampler`_
* `RandomUnderSampler`_
* `SMOTE`_
* `SMOTEN`_
* `SMOTENC`_
* `SVMSMOTE`_
* `SMOTEENN`_
.. _`CondensedNearestNeighbour`: lale.lib.imblearn.condensed_nearest_neighbour.html
.. _`EditedNearestNeighbours`: lale.lib.imblearn.edited_nearest_neighbours.html
.. _`RepeatedEditedNearestNeighbours`: lale.lib.imblearn.repeated_edited_nearest_neighbours.html
.. _`AllKNN`: lale.lib.imblearn.all_knn.html
.. _`InstanceHardnessThreshold`: lale.lib.imblearn.instance_hardness_threshold.html
.. _`ADASYN`: lale.lib.imblearn.adasyn.html
.. _`BorderlineSMOTE`: lale.lib.imblearn.borderline_smote.html
.. _`RandomOverSampler`: lale.lib.imblearn.random_over_sampler.html
.. _`RandomUnderSampler`: lale.lib.imblearn.random_under_sampler.html
.. _`SMOTE`: lale.lib.imblearn.smote.html
.. _`SMOTEN`: lale.lib.imblearn.smoten.html
.. _`SMOTENC`: lale.lib.imblearn.smotenc.html
.. _`SVMSMOTE`: lale.lib.imblearn.svm_smote.html
.. _`SMOTEENN`: lale.lib.imblearn.smoteenn.html
"""
# Note: all imports should be done as
# from .xxx import XXX as XXX
# this ensures that pyright considers them to be publicly available
# and not private imports (this affects lale users that use pyright)
from .adasyn import ADASYN as ADASYN
from .all_knn import AllKNN as AllKNN
from .borderline_smote import BorderlineSMOTE as BorderlineSMOTE
from .condensed_nearest_neighbour import (
CondensedNearestNeighbour as CondensedNearestNeighbour,
)
from .edited_nearest_neighbours import (
EditedNearestNeighbours as EditedNearestNeighbours,
)
from .instance_hardness_threshold import (
InstanceHardnessThreshold as InstanceHardnessThreshold,
)
from .random_over_sampler import RandomOverSampler as RandomOverSampler
from .random_under_sampler import RandomUnderSampler as RandomUnderSampler
from .repeated_edited_nearest_neighbours import (
RepeatedEditedNearestNeighbours as RepeatedEditedNearestNeighbours,
)
from .smote import SMOTE as SMOTE
from .smoteenn import SMOTEENN as SMOTEENN
from .smoten import SMOTEN as SMOTEN
from .smotenc import SMOTENC as SMOTENC
from .svm_smote import SVMSMOTE as SVMSMOTE
| 3,109 | 37.395062 | 137 |
py
|
lale
|
lale-master/lale/lib/imblearn/instance_hardness_threshold.py
|
# Copyright 2019-2023 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import imblearn.under_sampling
import lale.docstrings
import lale.operators
from ._common_schemas import (
_hparam_n_jobs,
_hparam_operator,
_hparam_random_state,
_hparam_sampling_strategy_anyof_neoc_under,
_input_fit_schema,
_input_predict_schema,
_input_transform_schema,
_output_decision_function_schema,
_output_predict_proba_schema,
_output_predict_schema,
_output_transform_schema,
)
from .base_resampler import _BaseResamplerImpl
class _InstanceHardnessThresholdImpl(_BaseResamplerImpl):
def __init__(self, operator=None, **hyperparams):
if operator is None:
raise ValueError("Operator is a required argument.")
resampler_instance = imblearn.under_sampling.InstanceHardnessThreshold(
**hyperparams
)
super().__init__(operator=operator, resampler=resampler_instance)
_hyperparams_schema = {
"allOf": [
{
"type": "object",
"relevantToOptimizer": ["operator"],
"additionalProperties": False,
"properties": {
"operator": _hparam_operator,
"estimator": {
"description": """Classifier to be used to estimate instance hardness of the samples.
By default a :class:`sklearn.ensemble.RandomForestClassifer` will be used.
If ``str``, the choices using a string are the following: ``'knn'``,
``'decision-tree'``, ``'random-forest'``, ``'adaboost'``,
``'gradient-boosting'`` and ``'linear-svm'``. If object, an estimator
inherited from :class:`sklearn.base.ClassifierMixin` and having an
attribute :func:`predict_proba`.""",
"anyOf": [
{"laleType": "Any"},
{
"enum": [
"knn",
"decision-tree",
"random-forest",
"adaboost",
"gradient-boosting",
"linear-svm",
]
},
{"enum": [None]},
],
"default": None,
},
"sampling_strategy": _hparam_sampling_strategy_anyof_neoc_under,
"random_state": _hparam_random_state,
"cv": {
"description": "Number of folds to be used when estimating samples’ instance hardness.",
"type": "integer",
"minimum": 1,
"default": 5,
},
"n_jobs": _hparam_n_jobs,
},
}
]
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """Class to perform under-sampling based on the instance hardness threshold.""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.imblearn.instance_hardness_threshold.html",
"import_from": "imblearn.under_sampling",
"type": "object",
"tags": {
"pre": [],
"op": [
"transformer",
"estimator",
"resampler",
], # transformer and estimator both as a higher-order operator
"post": [],
},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
"input_predict_proba": _input_predict_schema,
"output_predict_proba": _output_predict_proba_schema,
"input_decision_function": _input_predict_schema,
"output_decision_function": _output_decision_function_schema,
},
}
InstanceHardnessThreshold = lale.operators.make_operator(
_InstanceHardnessThresholdImpl, _combined_schemas
)
lale.docstrings.set_docstrings(InstanceHardnessThreshold)
| 4,626 | 35.148438 | 124 |
py
|
lale
|
lale-master/lale/lib/imblearn/smoteenn.py
|
# Copyright 2019-2023 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import imblearn.combine
import lale.docstrings
import lale.operators
from ._common_schemas import (
_hparam_operator,
_hparam_random_state,
_hparam_sampling_strategy_anyof_neoc_over,
_input_fit_schema,
_input_predict_schema,
_input_transform_schema,
_output_decision_function_schema,
_output_predict_proba_schema,
_output_predict_schema,
_output_transform_schema,
)
from .base_resampler import _BaseResamplerImpl
class _SMOTEENNImpl(_BaseResamplerImpl):
def __init__(self, operator=None, **hyperparams):
if operator is None:
raise ValueError("Operator is a required argument.")
resampler_instance = imblearn.combine.SMOTEENN(**hyperparams)
super().__init__(operator=operator, resampler=resampler_instance)
_hyperparams_schema = {
"allOf": [
{
"type": "object",
"relevantToOptimizer": ["operator"],
"additionalProperties": False,
"properties": {
"operator": _hparam_operator,
"sampling_strategy": _hparam_sampling_strategy_anyof_neoc_over,
"random_state": _hparam_random_state,
"smote": {
"description": """The imblearn.over_sampling.SMOTE object to use.
If not given, a imblearn.over_sampling.SMOTE object with default parameters will be given.""",
"anyOf": [{"laleType": "Any"}, {"enum": [None]}],
"default": None,
},
"enn": {
"description": """The imblearn.under_sampling.EditedNearestNeighbours object to use.
If not given, a imblearn.under_sampling.EditedNearestNeighbours object with sampling strategy=’all’ will be given.""",
"anyOf": [{"laleType": "Any"}, {"enum": [None]}],
"default": None,
},
},
}
]
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """Class to perform over-sampling using SMOTE and cleaning using ENN.
Combine over- and under-sampling using SMOTE and Edited Nearest Neighbours.""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.imblearn.smoteenn.html",
"import_from": "imblearn.over_sampling",
"type": "object",
"tags": {
"pre": [],
"op": [
"transformer",
"estimator",
"resampler",
], # transformer and estimator both as a higher-order operator
"post": [],
},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
"input_predict_proba": _input_predict_schema,
"output_predict_proba": _output_predict_proba_schema,
"input_decision_function": _input_predict_schema,
"output_decision_function": _output_decision_function_schema,
},
}
SMOTEENN = lale.operators.make_operator(_SMOTEENNImpl, _combined_schemas)
lale.docstrings.set_docstrings(SMOTEENN)
| 3,822 | 35.409524 | 118 |
py
|
lale
|
lale-master/lale/lib/imblearn/random_over_sampler.py
|
# Copyright 2019-2023 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import imblearn.over_sampling
import lale.docstrings
import lale.operators
from ._common_schemas import (
_hparam_operator,
_hparam_random_state,
_hparam_sampling_strategy_anyof_neoc_over,
_input_fit_schema,
_input_predict_schema,
_input_transform_schema,
_output_decision_function_schema,
_output_predict_proba_schema,
_output_predict_schema,
_output_transform_schema,
)
from .base_resampler import _BaseResamplerImpl
class _RandomOverSamplerImpl(_BaseResamplerImpl):
def __init__(self, operator=None, **hyperparams):
if operator is None:
raise ValueError("Operator is a required argument.")
resampler_instance = imblearn.over_sampling.RandomOverSampler(**hyperparams)
super().__init__(operator=operator, resampler=resampler_instance)
_hyperparams_schema = {
"allOf": [
{
"type": "object",
"required": ["operator"],
"relevantToOptimizer": ["operator"],
"additionalProperties": False,
"properties": {
"operator": _hparam_operator,
"sampling_strategy": _hparam_sampling_strategy_anyof_neoc_over,
"random_state": _hparam_random_state,
},
}
]
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """Class to perform random over-sampling, i.e. over-sample the minority class(es) by picking samples at random with replacement.""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.imblearn.random_over_sampler.html",
"import_from": "imblearn.over_sampling",
"type": "object",
"tags": {
"pre": [],
"op": [
"transformer",
"estimator",
"resampler",
], # transformer and estimator both as a higher-order operator
"post": [],
},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
"input_predict_proba": _input_predict_schema,
"output_predict_proba": _output_predict_proba_schema,
"input_decision_function": _input_predict_schema,
"output_decision_function": _output_decision_function_schema,
},
}
RandomOverSampler = lale.operators.make_operator(
_RandomOverSamplerImpl, _combined_schemas
)
lale.docstrings.set_docstrings(RandomOverSampler)
| 3,208 | 32.778947 | 151 |
py
|
lale
|
lale-master/lale/lib/imblearn/all_knn.py
|
# Copyright 2019-2023 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import imblearn.under_sampling
import lale.docstrings
import lale.operators
from ._common_schemas import (
_hparam_kind_sel,
_hparam_n_jobs,
_hparam_n_neighbors,
_hparam_operator,
_hparam_sampling_strategy_anyof_elc,
_input_fit_schema,
_input_predict_schema,
_input_transform_schema,
_output_decision_function_schema,
_output_predict_proba_schema,
_output_predict_schema,
_output_transform_schema,
)
from .base_resampler import _BaseResamplerImpl
class _AllKNNImpl(_BaseResamplerImpl):
def __init__(self, operator=None, **hyperparams):
if operator is None:
raise ValueError("Operator is a required argument.")
resampler_instance = imblearn.under_sampling.AllKNN(**hyperparams)
super().__init__(operator=operator, resampler=resampler_instance)
_hyperparams_schema = {
"allOf": [
{
"type": "object",
"relevantToOptimizer": ["operator"],
"additionalProperties": False,
"properties": {
"operator": _hparam_operator,
"sampling_strategy": _hparam_sampling_strategy_anyof_elc,
"n_neighbors": {**_hparam_n_neighbors, "default": 3},
"kind_sel": _hparam_kind_sel,
"allow_minority": {
"description": """If True, it allows the majority classes to become the minority class without early stopping.""",
"type": "boolean",
"default": False,
},
"n_jobs": _hparam_n_jobs,
},
}
]
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """Class to perform under-sampling based on the AllKNN method.""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.imblearn.all_knn.html",
"import_from": "imblearn.under_sampling",
"type": "object",
"tags": {
"pre": [],
"op": [
"transformer",
"estimator",
"resampler",
], # transformer and estimator both as a higher-order operator
"post": [],
},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
"input_predict_proba": _input_predict_schema,
"output_predict_proba": _output_predict_proba_schema,
"input_decision_function": _input_predict_schema,
"output_decision_function": _output_decision_function_schema,
},
}
AllKNN = lale.operators.make_operator(_AllKNNImpl, _combined_schemas)
lale.docstrings.set_docstrings(AllKNN)
| 3,434 | 33.009901 | 134 |
py
|
lale
|
lale-master/lale/lib/sklearn/k_neighbors_classifier.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sklearn.neighbors
import lale.docstrings
import lale.operators
_hyperparams_schema = {
"description": "Hyperparameter schema for the KNeighborsClassifier model from scikit-learn.",
"allOf": [
{
"description": "This first sub-object lists all constructor arguments with their "
"types, one at a time, omitting cross-argument constraints.",
"type": "object",
"additionalProperties": False,
"required": [
"n_neighbors",
"weights",
"algorithm",
"leaf_size",
"p",
"metric",
"metric_params",
"n_jobs",
],
"relevantToOptimizer": [
"n_neighbors",
"weights",
"algorithm",
"p",
"metric",
],
"properties": {
"n_neighbors": {
"description": "Number of neighbors to use by default for kneighbors queries.",
"type": "integer",
"distribution": "uniform",
"minimum": 1,
"laleMaximum": "X/maxItems", # number of rows
"default": 5,
"maximumForOptimizer": 100,
},
"weights": {
"description": "Weight function used in prediction.",
"enum": ["uniform", "distance"],
"default": "uniform",
},
"algorithm": {
"description": "Algorithm used to compute the nearest neighbors.",
"enum": ["ball_tree", "kd_tree", "brute", "auto"],
"default": "auto",
},
"leaf_size": {
"description": "Leaf size passed to BallTree or KDTree.",
"type": "integer",
"distribution": "uniform",
"minimum": 1,
"default": 30,
"maximumForOptimizer": 100,
},
"p": {
"description": "Power parameter for the Minkowski metric.",
"type": "integer",
"distribution": "uniform",
"minimum": 1,
"default": 2,
"maximumForOptimizer": 3,
},
"metric": {
"description": "The distance metric to use for the tree.",
"enum": ["euclidean", "manhattan", "minkowski"],
"default": "minkowski",
},
"metric_params": {
"description": "Additional keyword arguments for the metric function.",
"anyOf": [
{"enum": [None]},
{
"type": "object",
"propertyNames": {"pattern": "[_a-zA-Z][_a-zA-Z0-9]*"},
},
],
"default": None,
},
"n_jobs": {
"description": "Number of parallel jobs to run for the neighbor search.",
"anyOf": [
{
"description": "1 unless in joblib.parallel_backend context.",
"enum": [None],
},
{"description": "Use all processors.", "enum": [(-1)]},
{
"description": "Number of CPU cores.",
"type": "integer",
"minimum": 1,
},
],
"default": None,
},
},
},
],
}
_input_fit_schema = {
"description": "Input data schema for training the KNeighborsClassifier model from scikit-learn.",
"type": "object",
"required": ["X", "y"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
},
"y": {
"description": "Target class labels; the array is over samples.",
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
{"type": "array", "items": {"type": "string"}},
{"type": "array", "items": {"type": "boolean"}},
],
},
},
}
_input_predict_schema = {
"description": "Input data schema for predictions using the KNeighborsClassifier model from scikit-learn.",
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
},
}
_output_predict_schema = {
"description": "Predicted class label per sample.",
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "array", "items": {"type": "number"}}},
{"type": "array", "items": {"type": "string"}},
{"type": "array", "items": {"type": "boolean"}},
],
}
_input_predict_proba_schema = {
"description": "Input data schema for predictions using the KNeighborsClassifier model from scikit-learn.",
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
},
}
_output_predict_proba_schema = {
"description": "Probability of the sample for each class in the model.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """`K nearest neighbors classifier`_ from scikit-learn.
.. _`K nearest neighbors classifier`: https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsClassifier.html
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.k_neighbors_classifier.html",
"import_from": "sklearn.neighbors",
"type": "object",
"tags": {
"pre": ["~categoricals"],
"op": ["estimator", "classifier", "interpretable"],
"post": ["probabilities"],
},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
"input_predict_proba": _input_predict_proba_schema,
"output_predict_proba": _output_predict_proba_schema,
},
}
KNeighborsClassifier = lale.operators.make_operator(
sklearn.neighbors.KNeighborsClassifier,
_combined_schemas,
)
lale.docstrings.set_docstrings(KNeighborsClassifier)
| 8,052 | 35.438914 | 131 |
py
|
lale
|
lale-master/lale/lib/sklearn/multinomial_nb.py
|
# Copyright 2019-2022 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sklearn.naive_bayes
from packaging import version
import lale.docstrings
import lale.operators
from ...schemas import Bool
from ._common_schemas import (
schema_1D_cats,
schema_2D_numbers,
schema_sample_weight,
schema_X_numbers,
)
_hyperparams_schema = {
"description": "Naive Bayes classifier for multinomial models",
"allOf": [
{
"type": "object",
"required": ["alpha", "fit_prior"],
"relevantToOptimizer": ["alpha", "fit_prior"],
"properties": {
"alpha": {
"type": "number",
"distribution": "loguniform",
"minimumForOptimizer": 1e-10,
"maximumForOptimizer": 1.0,
"default": 1.0,
"description": "Additive (Laplace/Lidstone) smoothing parameter (0 for no smoothing).",
},
"fit_prior": {
"type": "boolean",
"default": True,
"description": "Whether to learn class prior probabilities or not.",
},
"class_prior": {
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"enum": [None]},
],
"default": None,
"description": "Prior probabilities of the classes. If specified the priors are not adjusted according to the data.",
},
},
}
],
}
_input_fit_schema = {
"description": "Fit Naive Bayes classifier according to X, y",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": schema_2D_numbers,
"y": schema_1D_cats,
"sample_weight": schema_sample_weight,
},
}
_input_partial_fit_schema = {
"type": "object",
"required": ["X", "y"],
"properties": {
"X": schema_2D_numbers,
"y": schema_1D_cats,
"classes": schema_1D_cats,
"sample_weight": schema_sample_weight,
},
}
_output_predict_proba_schema = {
"description": "Returns the probability of the samples for each class in the model. The columns correspond to the classes in sorted order, as they appear in the attribute `classes_`.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """`Multinomial Naive Bayes`_ classifier from scikit-learn.
.. _`Multinomial Naive Bayes`: https://scikit-learn.org/stable/modules/generated/sklearn.naive_bayes.MultinomialNB.html
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.multinomial_naive_bayes.html",
"import_from": "sklearn.naive_bayes",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "classifier"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_partial_fit": _input_partial_fit_schema,
"input_predict": schema_X_numbers,
"output_predict": schema_1D_cats,
"input_predict_proba": schema_X_numbers,
"output_predict_proba": _output_predict_proba_schema,
},
}
MultinomialNB = lale.operators.make_operator(
sklearn.naive_bayes.MultinomialNB, _combined_schemas
)
if lale.operators.sklearn_version >= version.Version("1.2"):
MultinomialNB = MultinomialNB.customize_schema(
force_alpha=Bool(
default=False,
desc="""If False and alpha is less than 1e-10,
it will set alpha to 1e-10. If True, alpha will remain unchanged.
This may cause numerical errors if alpha is too close to 0.
""",
)
)
if lale.operators.sklearn_version >= version.Version("1.4"):
MultinomialNB = MultinomialNB.customize_schema(
force_alpha=Bool(
default=True,
desc="""If False and alpha is less than 1e-10,
it will set alpha to 1e-10. If True, alpha will remain unchanged.
This may cause numerical errors if alpha is too close to 0.
""",
)
)
lale.docstrings.set_docstrings(MultinomialNB)
| 4,783 | 33.171429 | 188 |
py
|
lale
|
lale-master/lale/lib/sklearn/sgd_classifier.py
|
# Copyright 2019-2022 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from sklearn.linear_model import SGDClassifier as SKLModel
import lale.docstrings
import lale.operators
from ._common_schemas import (
schema_1D_cats,
schema_2D_numbers,
schema_sample_weight,
schema_X_numbers,
)
_hyperparams_schema = {
"description": "inherited docstring for SGDClassifier Linear classifiers (SVM, logistic regression, a.o.) with SGD training.",
"allOf": [
{
"type": "object",
"required": [
"loss",
"penalty",
"alpha",
"l1_ratio",
"fit_intercept",
"max_iter",
"tol",
"shuffle",
"verbose",
"epsilon",
"n_jobs",
"random_state",
"learning_rate",
"eta0",
"power_t",
"early_stopping",
"validation_fraction",
"n_iter_no_change",
"class_weight",
"warm_start",
"average",
],
"relevantToOptimizer": [
"loss",
"penalty",
"alpha",
"l1_ratio",
"fit_intercept",
"max_iter",
"tol",
"shuffle",
"epsilon",
"learning_rate",
"eta0",
"power_t",
],
"additionalProperties": False,
"properties": {
"loss": {
"enum": [
"hinge",
"log",
"modified_huber",
"squared_hinge",
"perceptron",
"squared_loss",
"huber",
"epsilon_insensitive",
"squared_epsilon_insensitive",
],
"default": "hinge",
"description": "The loss function to be used. Defaults to 'hinge', which gives a linear SVM.",
},
"penalty": {
"description": "The penalty (aka regularization term) to be used. Defaults to 'l2'",
"enum": ["elasticnet", "l1", "l2"],
"default": "l2",
},
"alpha": {
"type": "number",
"minimumForOptimizer": 1e-10,
"maximumForOptimizer": 1.0,
"distribution": "loguniform",
"default": 0.0001,
"description": "Constant that multiplies the regularization term. Defaults to 0.0001",
},
"l1_ratio": {
"type": "number",
"minimumForOptimizer": 1e-9,
"maximumForOptimizer": 1.0,
"distribution": "loguniform",
"default": 0.15,
"description": "The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.",
},
"fit_intercept": {
"type": "boolean",
"default": True,
"description": "Whether the intercept should be estimated or not. If False, the",
},
"max_iter": {
"type": "integer",
"minimumForOptimizer": 10,
"maximumForOptimizer": 1000,
"distribution": "uniform",
"default": 1000,
"description": "The maximum number of passes over the training data (aka epochs).",
},
"tol": {
"anyOf": [
{
"type": "number",
"minimumForOptimizer": 1e-08,
"maximumForOptimizer": 0.01,
},
{"enum": [None]},
],
"default": 0.001,
"description": "The stopping criterion.",
},
"shuffle": {
"type": "boolean",
"default": True,
"description": "Whether or not the training data should be shuffled after each epoch.",
},
"verbose": {
"type": "integer",
"default": 0,
"description": "The verbosity level",
},
"epsilon": {
"type": "number",
"minimumForOptimizer": 1e-08,
"maximumForOptimizer": 1.35,
"distribution": "loguniform",
"default": 0.1,
"description": "Epsilon in the epsilon-insensitive loss functions; only if `loss` is",
},
"n_jobs": {
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"default": None,
"description": "The number of CPUs to use to do the OVA (One Versus All, for",
},
"random_state": {
"anyOf": [
{"type": "integer"},
{"laleType": "numpy.random.RandomState"},
{"enum": [None]},
],
"default": None,
"description": "The seed of the pseudo random number generator to use when shuffling",
},
"learning_rate": {
"enum": ["optimal", "constant", "invscaling", "adaptive"],
"default": "optimal",
"description": "The learning rate schedule:",
},
"eta0": {
"type": "number",
"minimumForOptimizer": 0.01,
"maximumForOptimizer": 1.0,
"distribution": "loguniform",
"default": 0.0,
"description": "The initial learning rate for the 'constant', 'invscaling' or",
},
"power_t": {
"type": "number",
"minimumForOptimizer": 0.00001,
"maximumForOptimizer": 1.0,
"distribution": "uniform",
"default": 0.5,
"description": "The exponent for inverse scaling learning rate [default 0.5].",
},
"early_stopping": {
"type": "boolean",
"default": False,
"description": "Whether to use early stopping to terminate training when validation",
},
"validation_fraction": {
"type": "number",
"minimum": 0.0,
"maximum": 1.0,
"default": 0.1,
"description": "The proportion of training data to set aside as validation set for",
},
"n_iter_no_change": {
"type": "integer",
"minimumForOptimizer": 5,
"maximumForOptimizer": 10,
"default": 5,
"description": "Number of iterations with no improvement to wait before early stopping.",
},
"class_weight": {
"anyOf": [{"type": "object"}, {"enum": ["balanced", None]}],
"default": None,
"description": "Preset for the class_weight fit parameter.",
},
"warm_start": {
"type": "boolean",
"default": False,
"description": "When set to True, reuse the solution of the previous call to fit as",
},
"average": {
"anyOf": [
{"type": "boolean"},
{"type": "integer", "forOptimizer": False},
],
"default": False,
"description": "When set to True, computes the averaged SGD weights and stores the result in the ``coef_`` attribute.",
},
},
},
{
"description": "eta0 must be greater than 0 if the learning_rate is not ‘optimal’.",
"anyOf": [
{
"type": "object",
"properties": {
"learning_rate": {"enum": ["optimal"]},
},
},
{
"type": "object",
"properties": {
"eta0": {
"type": "number",
"minimum": 0.0,
"exclusiveMinimum": True,
},
},
},
],
},
],
}
_input_fit_schema = {
"description": "Fit linear model with Stochastic Gradient Descent.",
"required": ["X", "y"],
"type": "object",
"properties": {
"X": schema_2D_numbers,
"y": schema_1D_cats,
"coef_init": {
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
"description": "The initial coefficients to warm-start the optimization.",
},
"intercept_init": {
"type": "array",
"items": {"type": "number"},
"description": "The initial intercept to warm-start the optimization.",
},
"sample_weight": schema_sample_weight,
},
}
_input_partial_fit_schema = {
"type": "object",
"required": ["X", "y"],
"properties": {
"X": schema_2D_numbers,
"y": schema_1D_cats,
"classes": schema_1D_cats,
"sample_weight": schema_sample_weight,
},
}
_output_predict_proba_schema = {
"description": "Returns the probability of the sample for each class in the model,",
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
}
_output_decision_function_schema = {
"description": "Confidence scores for samples for each class in the model.",
"anyOf": [
{
"description": "In the multi-way case, score per (sample, class) combination.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
{
"description": "In the binary case, score for `self._classes[1]`.",
"type": "array",
"items": {"type": "number"},
},
],
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """`SGD classifier`_ from scikit-learn uses linear classifiers (SVM, logistic regression, a.o.) with stochastic gradient descent training.
.. _`SGD classifier`: https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.SGDClassifier.html
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.sgd_classifier.html",
"import_from": "sklearn.linear_model",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "classifier"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_partial_fit": _input_partial_fit_schema,
"input_predict": schema_X_numbers,
"output_predict": schema_1D_cats,
"input_predict_proba": schema_X_numbers,
"output_predict_proba": _output_predict_proba_schema,
"input_decision_function": schema_X_numbers,
"output_decision_function": _output_decision_function_schema,
},
}
SGDClassifier = lale.operators.make_operator(SKLModel, _combined_schemas)
if lale.operators.sklearn_version >= version.Version("1.0"):
# old: https://scikit-learn.org/0.24/modules/generated/sklearn.linear_model.SGDClassifer.html
# new: https://scikit-learn.org/1.0/modules/generated/sklearn.linear_model.SGDClassifier.html
SGDClassifier = SGDClassifier.customize_schema(
loss={
"description": """The loss function to be used. Defaults to ‘hinge’, which gives a linear SVM.
The possible options are ‘hinge’, ‘log’, ‘modified_huber’, ‘squared_hinge’, ‘perceptron’,
or a regression loss: ‘squared_error’, ‘huber’, ‘epsilon_insensitive’, or ‘squared_epsilon_insensitive’.
The ‘log’ loss gives logistic regression, a probabilistic classifier.
‘modified_huber’ is another smooth loss that brings tolerance to outliers as well as probability estimates.
‘squared_hinge’ is like hinge but is quadratically penalized.
‘perceptron’ is the linear loss used by the perceptron algorithm.
The other losses are designed for regression but can be useful in classification as well; see SGDRegressor for a description.
More details about the losses formulas can be found in the scikit-learn User Guide.""",
"anyOf": [
{
"enum": [
"hinge",
"log",
"modified_huber",
"squared_hinge",
"perceptron",
"squared_error",
"huber",
"epsilon_insensitive",
"squared_epsilon_insensitive",
],
},
{"enum": ["squared_loss"], "forOptimizer": False},
],
"default": "hinge",
},
set_as_available=True,
)
lale.docstrings.set_docstrings(SGDClassifier)
| 14,474 | 38.016173 | 157 |
py
|
lale
|
lale-master/lale/lib/sklearn/extra_trees_classifier.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sklearn
import sklearn.ensemble
from packaging import version
import lale.docstrings
import lale.operators
_hyperparams_schema = {
"description": "An extra-trees classifier.",
"allOf": [
{
"type": "object",
"required": ["class_weight"],
"relevantToOptimizer": [
"n_estimators",
"criterion",
"max_depth",
"min_samples_split",
"min_samples_leaf",
"max_features",
"bootstrap",
],
"additionalProperties": False,
"properties": {
"n_estimators": {
"type": "integer",
"minimum": 1,
"minimumForOptimizer": 10,
"maximumForOptimizer": 100,
"default": 10,
"description": "The number of trees in the forest.",
},
"criterion": {
"enum": ["gini", "entropy"],
"default": "gini",
"description": "The function to measure the quality of a split.",
},
"max_depth": {
"anyOf": [
{
"type": "integer",
"minimum": 1,
"minimumForOptimizer": 3,
"maximumForOptimizer": 5,
},
{"enum": [None]},
],
"default": None,
"description": "The maximum depth of the tree. If None, then nodes are expanded until",
},
"min_samples_split": {
"anyOf": [
{
"type": "integer",
"minimum": 2,
"laleMaximum": "X/maxItems", # number of rows
"forOptimizer": False,
"description": "Consider min_samples_split as the minimum number.",
},
{
"type": "number",
"minimum": 0.0,
"exclusiveMinimum": True,
"maximum": 1.0,
"minimumForOptimizer": 0.01,
"maximumForOptimizer": 0.5,
"default": 0.05,
"description": "min_samples_split is a fraction and ceil(min_samples_split * n_samples) are the minimum number of samples for each split.",
},
],
"default": 2,
"description": "The minimum number of samples required to split an internal node.",
},
"min_samples_leaf": {
"anyOf": [
{
"type": "integer",
"minimum": 1,
"laleMaximum": "X/maxItems", # number of rows
"forOptimizer": False,
"description": "Consider min_samples_leaf as the minimum number.",
},
{
"type": "number",
"minimum": 0.0,
"exclusiveMinimum": True,
"maximum": 0.5,
"description": "min_samples_leaf is a fraction and ceil(min_samples_leaf * n_samples) are the minimum number of samples for each node.",
"default": 0.05,
},
],
"default": 1,
"description": "The minimum number of samples required to be at a leaf node.",
},
"min_weight_fraction_leaf": {
"type": "number",
"minimum": 0.0,
"maximum": 0.5,
"default": 0.0,
"description": "The minimum weighted fraction of the sum total of weights (of all the input samples) required to be at a leaf node. Samples have equal weight when sample_weight is not provided.",
},
"max_features": {
"anyOf": [
{
"type": "integer",
"minimum": 2,
"laleMaximum": "X/items/maxItems", # number of columns
"forOptimizer": False,
"description": "Consider max_features features at each split.",
},
{
"type": "number",
"minimum": 0.0,
"exclusiveMinimum": True,
"maximum": 1.0,
"distribution": "uniform",
"minimumForOptimizer": 0.01,
"default": 0.5,
"description": "max_features is a fraction and int(max_features * n_features) features are considered at each split.",
},
{"enum": ["auto", "sqrt", "log2", None]},
],
"default": "auto",
"description": "The number of features to consider when looking for the best split:",
},
"max_leaf_nodes": {
"anyOf": [
{
"type": "integer",
"minimum": 1,
"minimumForOptimizer": 3,
"maximumForOptimizer": 1000,
},
{
"enum": [None],
"description": "Unlimited number of leaf nodes.",
},
],
"default": None,
"description": "Grow trees with ``max_leaf_nodes`` in best-first fashion.",
},
"min_impurity_decrease": {
"type": "number",
"minimum": 0.0,
"maximumForOptimizer": 10.0,
"default": 0.0,
"description": "A node will be split if this split induces a decrease of the impurity greater than or equal to this value.",
},
"min_impurity_split": {
"anyOf": [{"type": "number", "minimum": 0.0}, {"enum": [None]}],
"default": None,
"description": "Threshold for early stopping in tree growth. A node will split",
},
"bootstrap": {
"type": "boolean",
"default": False,
"description": "Whether bootstrap samples are used when building trees. If False, the whole dataset is used to build each tree.",
},
"oob_score": {
"type": "boolean",
"default": False,
"description": "Whether to use out-of-bag samples to estimate the generalization accuracy.",
},
"n_jobs": {
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"default": None,
"description": "The number of jobs to run in parallel for both `fit` and `predict`.",
},
"random_state": {
"anyOf": [
{"type": "integer"},
{"laleType": "numpy.random.RandomState"},
{"enum": [None]},
],
"default": None,
"description": "If int, random_state is the seed used by the random number generator;",
},
"verbose": {
"type": "integer",
"default": 0,
"description": "Controls the verbosity when fitting and predicting.",
},
"warm_start": {
"type": "boolean",
"default": False,
"description": "When set to True, reuse the solution of the previous call to fit and add more estimators to the ensemble, otherwise, just erase the previous solution.",
},
"class_weight": {
"anyOf": [
{"type": "object"}, # dict, list of dicts,
{"enum": ["balanced", "balanced_subsample", None]},
],
"description": "Weights associated with classes in the form ``{class_label: weight}``.",
"default": None,
},
},
},
{
"description": "This classifier does not support sparse labels.",
"type": "object",
"laleNot": "y/isSparse",
},
{
"description": "Out of bag estimation only available if bootstrap=True",
"anyOf": [
{"type": "object", "properties": {"bootstrap": {"enum": [True]}}},
{"type": "object", "properties": {"oob_score": {"enum": [False]}}},
],
},
],
}
_input_fit_schema = {
"description": "Build a forest of trees from the training set (X, y).",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"anyOf": [
{
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
}
],
"description": "The training input samples. Internally, its dtype will be converted",
},
"y": {
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "string"}},
{"type": "array", "items": {"type": "boolean"}},
],
"description": "The target values (class labels in classification, real numbers in",
},
"sample_weight": {
"anyOf": [
{
"type": "array",
"items": {"type": "number"},
},
{"enum": [None]},
],
"description": "Sample weights. If None, then samples are equally weighted. Splits",
},
},
}
_input_predict_schema = {
"description": "Predict class for X.",
"type": "object",
"properties": {
"X": {
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
"description": "The input samples. Internally, its dtype will be converted to",
},
},
}
_output_predict_schema = {
"description": "The predicted classes.",
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "string"}},
{"type": "array", "items": {"type": "boolean"}},
],
}
_input_predict_proba_schema = {
"description": "Predict class probabilities for X.",
"type": "object",
"properties": {
"X": {
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
"description": "The input samples. Internally, its dtype will be converted to",
},
},
}
_output_predict_proba_schema = {
"description": "such arrays if n_outputs > 1.",
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """`Extra trees classifier`_ random forest from scikit-learn.
.. _`Extra trees classifier`: https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.ExtraTreesClassifier.html
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.extra_trees_classifier.html",
"import_from": "sklearn.ensemble",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "classifier"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
"input_predict_proba": _input_predict_proba_schema,
"output_predict_proba": _output_predict_proba_schema,
},
}
ExtraTreesClassifier: lale.operators.PlannedIndividualOp
ExtraTreesClassifier = lale.operators.make_operator(
sklearn.ensemble.ExtraTreesClassifier, _combined_schemas
)
if lale.operators.sklearn_version >= version.Version("0.22"):
# old: https://scikit-learn.org/0.20/modules/generated/sklearn.ensemble.ExtraTreesClassifier.html
# new: https://scikit-learn.org/0.23/modules/generated/sklearn.ensemble.ExtraTreesClassifier.html
from lale.schemas import AnyOf, Float, Int, Null
ExtraTreesClassifier = ExtraTreesClassifier.customize_schema(
n_estimators=Int(
desc="The number of trees in the forest.",
minimum=1,
default=100,
forOptimizer=True,
minimumForOptimizer=10,
maximumForOptimizer=100,
),
ccp_alpha=Float(
desc="Complexity parameter used for Minimal Cost-Complexity Pruning. The subtree with the largest cost complexity that is smaller than ccp_alpha will be chosen. By default, no pruning is performed.",
default=0.0,
forOptimizer=False,
minimum=0.0,
maximumForOptimizer=0.1,
),
max_samples=AnyOf(
types=[
Null(desc="Draw X.shape[0] samples."),
Int(desc="Draw max_samples samples.", minimum=1),
Float(
desc="Draw max_samples * X.shape[0] samples.",
minimum=0.0,
exclusiveMinimum=True,
maximum=1.0,
exclusiveMaximum=True,
),
],
desc="If bootstrap is True, the number of samples to draw from X to train each base estimator.",
default=None,
),
set_as_available=True,
)
if lale.operators.sklearn_version >= version.Version("1.0"):
# old: https://scikit-learn.org/0.24/modules/generated/sklearn.ensemble.ExtraTreesClassifier.html
# new: https://scikit-learn.org/1.0/modules/generated/sklearn.ensemble.ExtraTreesClassifier.html
ExtraTreesClassifier = ExtraTreesClassifier.customize_schema(
min_impurity_split=None, set_as_available=True
)
if lale.operators.sklearn_version >= version.Version("1.1"):
# old: https://scikit-learn.org/1.0/modules/generated/sklearn.ensemble.ExtraTreesClassifier.html
# new: https://scikit-learn.org/1.1/modules/generated/sklearn.ensemble.ExtraTreesClassifier.html
ExtraTreesClassifier = ExtraTreesClassifier.customize_schema(
max_features={
"anyOf": [
{
"type": "integer",
"minimum": 2,
"laleMaximum": "X/items/maxItems", # number of columns
"forOptimizer": False,
"description": "Consider max_features features at each split.",
},
{
"type": "number",
"minimum": 0.0,
"exclusiveMinimum": True,
"maximum": 1.0,
"distribution": "uniform",
"minimumForOptimizer": 0.01,
"default": 0.5,
"description": "max_features is a fraction and int(max_features * n_features) features are considered at each split.",
},
{"enum": ["auto", "sqrt", "log2", None]},
],
"default": "sqrt",
"description": "The number of features to consider when looking for the best split:",
},
set_as_available=True,
)
lale.docstrings.set_docstrings(ExtraTreesClassifier)
| 17,149 | 40.626214 | 215 |
py
|
lale
|
lale-master/lale/lib/sklearn/decision_tree_classifier.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sklearn
import sklearn.tree
from packaging import version
import lale.docstrings
import lale.operators
_hyperparams_schema = {
"description": "A decision tree classifier.",
"allOf": [
{
"type": "object",
"required": ["class_weight"],
"relevantToOptimizer": [
"criterion",
"splitter",
"max_depth",
"min_samples_split",
"min_samples_leaf",
"max_features",
],
"additionalProperties": False,
"properties": {
"criterion": {
"enum": ["gini", "entropy"],
"default": "gini",
"description": "The function to measure the quality of a split.",
},
"splitter": {
"enum": ["best", "random"],
"default": "best",
"description": "The strategy used to choose the split at each node.",
},
"max_depth": {
"anyOf": [
{
"type": "integer",
"minimum": 1,
"minimumForOptimizer": 3,
"maximumForOptimizer": 5,
},
{
"enum": [None],
"description": "Nodes are expanded until all leaves are pure or until all leaves contain less than min_samples_split samples.",
},
],
"default": None,
"description": "The maximum depth of the tree.",
},
"min_samples_split": {
"anyOf": [
{
"type": "integer",
"minimum": 2,
"laleMaximum": "X/maxItems", # number of rows
"forOptimizer": False,
"description": "Consider min_samples_split as the minimum number.",
},
{
"type": "number",
"minimum": 0.0,
"exclusiveMinimum": True,
"maximum": 1.0,
"minimumForOptimizer": 0.01,
"maximumForOptimizer": 0.5,
"description": "min_samples_split is a fraction and ceil(min_samples_split * n_samples) are the minimum number of samples for each split.",
},
],
"default": 2,
"description": "The minimum number of samples required to split an internal node.",
},
"min_samples_leaf": {
"anyOf": [
{
"type": "integer",
"minimum": 1,
"laleMaximum": "X/maxItems", # number of rows
"forOptimizer": False,
"description": "Consider min_samples_leaf as the minimum number.",
},
{
"type": "number",
"minimum": 0.0,
"exclusiveMinimum": True,
"maximum": 0.5,
"default": 0.05,
"description": "min_samples_leaf is a fraction and ceil(min_samples_leaf * n_samples) are the minimum number of samples for each node.",
},
],
"default": 1,
"description": "The minimum number of samples required to be at a leaf node.",
},
"min_weight_fraction_leaf": {
"type": "number",
"minimum": 0.0,
"maximum": 0.5,
"default": 0.0,
"description": "The minimum weighted fraction of the sum total of weights (of all the input samples) required to be at a leaf node. Samples have equal weight when sample_weight is not provided.",
},
"max_features": {
"anyOf": [
{
"type": "integer",
"minimum": 2,
"laleMaximum": "X/items/maxItems", # number of columns
"forOptimizer": False,
"description": "Consider max_features features at each split.",
},
{
"type": "number",
"minimum": 0.0,
"exclusiveMinimum": True,
"maximum": 1.0,
"distribution": "uniform",
"minimumForOptimizer": 0.01,
"default": 0.5,
"description": "max_features is a fraction and int(max_features * n_features) features are considered at each split.",
},
{"enum": ["auto", "sqrt", "log2", None]},
],
"default": "auto",
"description": "The number of features to consider when looking for the best split.",
},
"random_state": {
"description": "Seed of pseudo-random number generator.",
"anyOf": [
{"laleType": "numpy.random.RandomState"},
{
"description": "RandomState used by np.random",
"enum": [None],
},
{"description": "Explicit seed.", "type": "integer"},
],
"default": None,
},
"max_leaf_nodes": {
"anyOf": [
{
"type": "integer",
"minimum": 1,
"minimumForOptimizer": 3,
"maximumForOptimizer": 1000,
},
{
"enum": [None],
"description": "Unlimited number of leaf nodes.",
},
],
"default": None,
"description": "Grow a tree with ``max_leaf_nodes`` in best-first fashion.",
},
"min_impurity_decrease": {
"type": "number",
"minimum": 0.0,
"maximumForOptimizer": 10.0,
"default": 0.0,
"description": "A node will be split if this split induces a decrease of the impurity greater than or equal to this value.",
},
"min_impurity_split": {
"anyOf": [{"type": "number", "minimum": 0.0}, {"enum": [None]}],
"default": None,
"description": "Threshold for early stopping in tree growth.",
},
"class_weight": {
"anyOf": [
{"type": "object", "additionalProperties": {"type": "number"}},
{
"type": "array",
"items": {
"type": "object",
"additionalProperties": {"type": "number"},
},
},
{"enum": ["balanced", None]},
],
"description": "Weights associated with classes in the form ``{class_label: weight}``.",
},
"presort": {
"type": "boolean",
"default": False,
"description": "Whether to presort the data to speed up the finding of best splits in splitting.",
},
},
}
],
}
_input_fit_schema = {
"required": ["X", "y"],
"type": "object",
"properties": {
"X": {
"type": "array",
"description": "The outer array is over samples aka rows.",
"items": {
"type": "array",
"description": "The inner array is over features aka columns.",
"items": {"type": "number"},
},
},
"y": {
"description": "The predicted classes.",
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "string"}},
{"type": "array", "items": {"type": "boolean"}},
],
},
"sample_weight": {
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"enum": [None], "description": "Samples are equally weighted."},
],
"description": "Sample weights.",
},
"check_input": {
"type": "boolean",
"default": True,
"description": "Allow to bypass several input checking.",
},
"X_idx_sorted": {
"anyOf": [
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
{"enum": [None]},
],
"default": None,
"description": "The indexes of the sorted training input samples. If many tree",
},
},
}
_input_predict_schema = {
"type": "object",
"properties": {
"X": {
"type": "array",
"description": "The outer array is over samples aka rows.",
"items": {
"type": "array",
"description": "The inner array is over features aka columns.",
"items": {"type": "number"},
},
},
"check_input": {
"type": "boolean",
"default": True,
"description": "Allow to bypass several input checking.",
},
},
}
_output_predict_schema = {
"description": "The predicted classes.",
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "string"}},
{"type": "array", "items": {"type": "boolean"}},
],
}
_input_predict_proba_schema = {
"type": "object",
"properties": {
"X": {
"type": "array",
"description": "The outer array is over samples aka rows.",
"items": {
"type": "array",
"description": "The inner array is over features aka columns.",
"items": {"type": "number"},
},
},
"check_input": {"type": "boolean", "description": "Run check_array on X."},
},
}
_output_predict_proba_schema = {
"type": "array",
"description": "The outer array is over samples aka rows.",
"items": {
"type": "array",
"description": "The inner array has items corresponding to each class.",
"items": {"type": "number"},
},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """`Decision tree classifier`_ from scikit-learn.
.. _`Decision tree classifier`: https://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeClassifier.html
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.decision_tree_classifier.html",
"import_from": "sklearn.tree",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "classifier"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
"input_predict_proba": _input_predict_proba_schema,
"output_predict_proba": _output_predict_proba_schema,
},
}
DecisionTreeClassifier: lale.operators.PlannedIndividualOp
DecisionTreeClassifier = lale.operators.make_operator(
sklearn.tree.DecisionTreeClassifier, _combined_schemas
)
if lale.operators.sklearn_version >= version.Version("0.22"):
# old: https://scikit-learn.org/0.20/modules/generated/sklearn.tree.DecisionTreeClassifier.html
# new: https://scikit-learn.org/0.22/modules/generated/sklearn.tree.DecisionTreeClassifier.html
from lale.schemas import AnyOf, Bool, Enum, Float
DecisionTreeClassifier = DecisionTreeClassifier.customize_schema(
presort=AnyOf(
types=[Bool(), Enum(["deprecated"])],
desc="This parameter is deprecated and will be removed in v0.24.",
default="deprecated",
),
ccp_alpha=Float(
desc="Complexity parameter used for Minimal Cost-Complexity Pruning. The subtree with the largest cost complexity that is smaller than ccp_alpha will be chosen. By default, no pruning is performed.",
default=0.0,
forOptimizer=False,
minimum=0.0,
maximumForOptimizer=0.1,
),
set_as_available=True,
)
if lale.operators.sklearn_version >= version.Version("0.24"):
# old: https://scikit-learn.org/0.22/modules/generated/sklearn.tree.DecisionTreeClassifier.html
# new: https://scikit-learn.org/0.24/modules/generated/sklearn.tree.DecisionTreeClassifier.html
DecisionTreeClassifier = DecisionTreeClassifier.customize_schema(
presort=None, set_as_available=True
)
if lale.operators.sklearn_version >= version.Version("1.0"):
# old: https://scikit-learn.org/0.24/modules/generated/sklearn.tree.DecisionTreeClassifier.html
# new: https://scikit-learn.org/1.0/modules/generated/sklearn.tree.DecisionTreeClassifier.html
DecisionTreeClassifier = DecisionTreeClassifier.customize_schema(
min_impurity_split=None, set_as_available=True
)
lale.docstrings.set_docstrings(DecisionTreeClassifier)
| 14,977 | 40.035616 | 215 |
py
|
lale
|
lale-master/lale/lib/sklearn/tfidf_vectorizer.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
import sklearn.feature_extraction.text
import lale.docstrings
import lale.operators
class _TfidfVectorizerImpl:
def __init__(self, **hyperparams):
if "dtype" in hyperparams and hyperparams["dtype"] == "float64":
hyperparams = {**hyperparams, "dtype": np.float64}
self._wrapped_model = sklearn.feature_extraction.text.TfidfVectorizer(
**hyperparams
)
def fit(self, X, y=None):
if isinstance(X, (np.ndarray, pd.DataFrame)):
X = X.squeeze().astype("U")
self._wrapped_model.fit(X, y)
return self
def transform(self, X):
if isinstance(X, (np.ndarray, pd.DataFrame)):
X = X.squeeze().astype("U")
return self._wrapped_model.transform(X)
_hyperparams_schema = {
"description": "Convert a collection of raw documents to a matrix of TF-IDF features.",
"allOf": [
{
"type": "object",
"required": [
"input",
"encoding",
"decode_error",
"strip_accents",
"lowercase",
"preprocessor",
"tokenizer",
"analyzer",
"stop_words",
"ngram_range",
"max_df",
"min_df",
"max_features",
"vocabulary",
"binary",
"dtype",
"norm",
"use_idf",
"smooth_idf",
"sublinear_tf",
],
"relevantToOptimizer": [
"analyzer",
"ngram_range",
"max_df",
"min_df",
"binary",
"norm",
"use_idf",
"smooth_idf",
"sublinear_tf",
],
"additionalProperties": False,
"properties": {
"input": {
"enum": ["filename", "file", "content"],
"default": "content",
},
"encoding": {"type": "string", "default": "utf-8"},
"decode_error": {
"enum": ["strict", "ignore", "replace"],
"default": "strict",
},
"strip_accents": {"enum": ["ascii", "unicode", None], "default": None},
"lowercase": {"type": "boolean", "default": True},
"preprocessor": {
"anyOf": [
{"laleType": "callable", "forOptimizer": False},
{"enum": [None]},
],
"default": None,
},
"tokenizer": {
"anyOf": [
{"laleType": "callable", "forOptimizer": False},
{"enum": [None]},
],
"default": None,
},
"analyzer": {
"anyOf": [
{"enum": ["word", "char", "char_wb"]},
{"laleType": "callable", "forOptimizer": False},
],
"default": "word",
},
"stop_words": {
"anyOf": [
{"enum": [None, "english"]},
{"type": "array", "items": {"type": "string"}},
],
"default": None,
},
"token_pattern": {"type": "string", "default": "(?u)\\b\\w\\w+\\b"},
"ngram_range": {
"default": (1, 1),
"anyOf": [
{
"type": "array",
"laleType": "tuple",
"minItemsForOptimizer": 2,
"maxItemsForOptimizer": 2,
"items": {
"type": "integer",
"minimumForOptimizer": 1,
"maximumForOptimizer": 3,
},
"forOptimizer": False,
},
{"enum": [(1, 1), (1, 2), (1, 3), (2, 2), (2, 3), (3, 3)]},
],
},
"max_df": {
"anyOf": [
{
"description": "float in range [0.0, 1.0]",
"type": "number",
"minimum": 0.0,
"maximum": 1.0,
"minimumForOptimizer": 0.8,
"maximumForOptimizer": 0.9,
"distribution": "uniform",
},
{"type": "integer", "forOptimizer": False},
],
"default": 1.0,
},
"min_df": {
"anyOf": [
{
"description": "float in range [0.0, 1.0]",
"type": "number",
"minimum": 0.0,
"maximum": 1.0,
"minimumForOptimizer": 0.0,
"maximumForOptimizer": 0.1,
"distribution": "uniform",
},
{"type": "integer", "forOptimizer": False},
],
"default": 1,
},
"max_features": {
"anyOf": [
{"type": "integer", "minimum": 1, "maximumForOptimizer": 10000},
{"enum": [None]},
],
"default": None,
},
"vocabulary": {
"description": "XXX TODO XXX, Mapping or iterable, optional",
"anyOf": [{"type": "object"}, {"enum": [None]}],
"default": None,
},
"binary": {"type": "boolean", "default": False},
"dtype": {
"description": "XXX TODO XXX, type, optional",
"type": "string",
"default": "float64",
},
"norm": {"enum": ["l1", "l2", None], "default": "l2"},
"use_idf": {"type": "boolean", "default": True},
"smooth_idf": {"type": "boolean", "default": True},
"sublinear_tf": {"type": "boolean", "default": False},
},
},
{
"description": "tokenizer, only applies if analyzer == 'word'",
"anyOf": [
{"type": "object", "properties": {"analyzer": {"enum": ["word"]}}},
{"type": "object", "properties": {"tokenizer": {"enum": [None]}}},
],
},
{
"description": "stop_words can be a list only if analyzer == 'word'",
"anyOf": [
{
"type": "object",
"properties": {
"stop_words": {
"not": {"type": "array", "items": {"type": "string"}}
}
},
},
{"type": "object", "properties": {"analyzer": {"enum": ["word"]}}},
],
},
],
}
_input_fit_schema = {
"description": "Input data schema for training the TfidfVectorizer from scikit-learn.",
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Features; the outer array is over samples.",
"anyOf": [
{"type": "array", "items": {"type": "string"}},
{
"type": "array",
"items": {
"type": "array",
"minItems": 1,
"maxItems": 1,
"items": {"type": "string"},
},
},
],
},
"y": {"description": "Target class labels; the array is over samples."},
},
}
_input_transform_schema = {
"description": "Input data schema for predictions using the TfidfVectorizer model from scikit-learn.",
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Features; the outer array is over samples.",
"anyOf": [
{"type": "array", "items": {"type": "string"}},
{
"type": "array",
"items": {
"type": "array",
"minItems": 1,
"maxItems": 1,
"items": {"type": "string"},
},
},
],
}
},
}
_output_transform_schema = {
"description": "Output data schema for predictions (projected data) using the TfidfVectorizer model from scikit-learn.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """`TF-IDF vectorizer`_ transformer from scikit-learn for turning text into term frequency - inverse document frequency numeric features.
.. _`TF-IDF vectorizer`: https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfVectorizer.html
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.tfidf_vectorizer.html",
"import_from": "sklearn.feature_extraction.text",
"type": "object",
"tags": {"pre": ["text"], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
TfidfVectorizer = lale.operators.make_operator(_TfidfVectorizerImpl, _combined_schemas)
lale.docstrings.set_docstrings(TfidfVectorizer)
| 10,908 | 35.85473 | 156 |
py
|
lale
|
lale-master/lale/lib/sklearn/k_means.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from sklearn.cluster import KMeans as SKLModel
from lale.docstrings import set_docstrings
from lale.operators import make_operator, sklearn_version
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """The k-means problem is solved using either Lloyd's or Elkan's algorithm.
The average complexity is given by O(k n T), where n is the number of
samples and T is the number of iteration.
The worst case complexity is given by O(n^(k+2/p)) with
n = n_samples, p = n_features. (D. Arthur and S. Vassilvitskii,
'How slow is the k-means method?' SoCG2006)
In practice, the k-means algorithm is very fast (one of the fastest
clustering algorithms available), but it falls in local minima. That's why
it can be useful to restart it several times.
If the algorithm stops before fully converging (because of ``tol`` or
``max_iter``), ``labels_`` and ``cluster_centers_`` will not be consistent,
i.e. the ``cluster_centers_`` will not be the means of the points in each
cluster. Also, the estimator will reassign ``labels_`` after the last
iteration to make ``labels_`` consistent with ``predict`` on the training
set.""",
"allOf": [
{
"type": "object",
"required": [
"n_clusters",
"init",
"n_init",
"max_iter",
"tol",
"precompute_distances",
"verbose",
"random_state",
"copy_x",
"n_jobs",
"algorithm",
],
"relevantToOptimizer": [
"n_clusters",
"init",
"n_init",
"max_iter",
"tol",
"precompute_distances",
"copy_x",
"algorithm",
],
"additionalProperties": False,
"properties": {
"n_clusters": {
"type": "integer",
"minimumForOptimizer": 2,
"maximumForOptimizer": 8,
"distribution": "uniform",
"default": 8,
"description": "The number of clusters to form as well as the number of centroids to generate.",
},
"init": {
"anyOf": [
{"enum": ["k-means++", "random"]},
{"laleType": "callable", "forOptimizer": False},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"forOptimizer": False,
},
],
"default": "k-means++",
"description": """Method for initialization, defaults to `k-means++`.
`k-means++` : selects initial cluster centers for k-mean clustering in a smart way to speed up convergence.
See section Notes in k_init for more details.
`random`: choose n_clusters observations (rows) at random from data for the initial centroids.
If an array is passed, it should be of shape (n_clusters, n_features) and gives the initial centers.
If a callable is passed, it should take arguments X, n_clusters and a random state and return an initialization.""",
},
"n_init": {
"type": "integer",
"minimumForOptimizer": 3,
"maximumForOptimizer": 10,
"distribution": "uniform",
"default": 10,
"description": """Number of time the k-means algorithm will be run with different centroid seeds.
The final results will be the best output of n_init consecutive runs in terms of inertia.""",
},
"max_iter": {
"type": "integer",
"minimumForOptimizer": 10,
"maximumForOptimizer": 1000,
"distribution": "uniform",
"default": 300,
"description": "Maximum number of iterations of the k-means algorithm for a single run.",
},
"tol": {
"type": "number",
"minimumForOptimizer": 1e-08,
"maximumForOptimizer": 0.01,
"default": 0.0001,
"description": "Relative tolerance with regards to Frobenius norm of the difference in the cluster centers of two consecutive iterations to declare convergence.",
},
"precompute_distances": {
"enum": ["auto", True, False],
"default": "auto",
"description": "Precompute distances (faster but takes more memory). Deprecated.",
},
"verbose": {
"type": "integer",
"default": 0,
"description": "Verbosity mode.",
},
"random_state": {
"anyOf": [
{"type": "integer"},
{"laleType": "numpy.random.RandomState"},
{"enum": [None]},
],
"default": None,
"description": "Determines random number generation for centroid initialization",
},
"copy_x": {
"type": "boolean",
"default": True,
"description": """When pre-computing distances it is more numerically accurate to center the data first.
If copy_x is True (default), then the original data is not modified.
If False, the original data is modified, and put back before the function returns, but small numerical differences may be introduced by subtracting and then adding the data mean.
Note that if the original data is not C-contiguous, a copy will be made even if copy_x is False.
If the original data is sparse, but not in CSR format, a copy will be made even if copy_x is False.""",
},
"n_jobs": {
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"default": 1,
"description": "The number of jobs to use for the computation. Deprecated.",
},
"algorithm": {
"description": """K-means algorithm to use.
The classical EM-style algorithm is “full”. The “elkan” variation is more efficient on data with well-defined clusters, by using the triangle inequality.
However it’s more memory intensive due to the allocation of an extra array of shape (n_samples, n_clusters).
For now “auto” (kept for backward compatibiliy) chooses “elkan” but it might change in the future for a better heuristic.""",
"enum": ["auto", "full", "elkan"],
"default": "auto",
},
},
}
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Compute k-means clustering.",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Training instances to cluster. Array-like or sparse matrix, shape=(n_samples, n_features)",
},
"y": {
"description": "not used, present here for API consistency by convention."
},
"sample_weight": {
"anyOf": [{"type": "array", "items": {"type": "number"}}, {"enum": [None]}],
"default": None,
"description": "The weights for each observation in X",
},
},
}
_input_transform_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Transform X to a cluster-distance space.",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "New data to transform.",
}
},
}
_output_transform_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "X transformed in the new space.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
_input_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Predict the closest cluster each sample in X belongs to.",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "New data to predict.",
},
"sample_weight": {
"anyOf": [{"type": "array", "items": {"type": "number"}}, {"enum": [None]}],
"default": None,
"description": "The weights for each observation in X",
},
},
}
_output_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Index of the cluster each sample belongs to.",
"type": "array",
"items": {"type": "number"},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """`KMeans`_ from scikit-learn.
.. _`KMeans`: https://scikit-learn.org/stable/modules/generated/sklearn.cluster.KMeans.html
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.k_means.html",
"import_from": "sklearn.cluster",
"type": "object",
"tags": {"pre": [], "op": ["transformer", "clustering", "estimator"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
KMeans = make_operator(SKLModel, _combined_schemas)
if sklearn_version >= version.Version("1.0"):
# old: https://scikit-learn.org/0.24/modules/generated/sklearn.cluster.KMeans.html
# new: https://scikit-learn.org/1.0/modules/generated/sklearn.cluster.KMeans.html
KMeans = KMeans.customize_schema(
precompute_distances=None,
n_jobs=None,
set_as_available=True,
)
if sklearn_version >= version.Version("1.1"):
# old: https://scikit-learn.org/1.0/modules/generated/sklearn.cluster.KMeans.html
# new: https://scikit-learn.org/1.1/modules/generated/sklearn.cluster.KMeans.html
KMeans = KMeans.customize_schema(
algorithm={
"description": """K-means algorithm to use.
The classical EM-style algorithm is “full”. The “elkan” variation is more efficient on data with well-defined clusters, by using the triangle inequality.
However it’s more memory intensive due to the allocation of an extra array of shape (n_samples, n_clusters).
For now “auto” (kept for backward compatibiliy) chooses “elkan” but it might change in the future for a better heuristic.""",
"enum": ["lloyd", "elkan", "auto", "full"],
"default": "lloyd",
},
set_as_available=True,
)
if sklearn_version >= version.Version("1.2"):
# old: https://scikit-learn.org/1.1/modules/generated/sklearn.cluster.KMeans.html
# new: https://scikit-learn.org/1.2/modules/generated/sklearn.cluster.KMeans.html
KMeans = KMeans.customize_schema(
n_init={
"anyOf": [
{
"type": "integer",
"minimumForOptimizer": 3,
"maximumForOptimizer": 10,
"distribution": "uniform",
},
{
"enum": ["auto"],
},
],
"default": 10,
"description": """Number of time the k-means algorithm will be run with different centroid seeds.
The final results will be the best output of n_init consecutive runs in terms of inertia.
When n_init='auto', the number of runs will be 10 if using init='random', and 1 if using init='kmeans++'.""",
}
)
set_docstrings(KMeans)
| 13,001 | 43.22449 | 182 |
py
|
lale
|
lale-master/lale/lib/sklearn/ridge.py
|
# Copyright 2019-2022 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sklearn
import sklearn.linear_model
from packaging import version
import lale.docstrings
import lale.operators
_hyperparams_schema = {
"description": "Linear least squares with l2 regularization.",
"allOf": [
{
"type": "object",
"required": ["alpha", "fit_intercept", "solver"],
"relevantToOptimizer": [
"alpha",
"fit_intercept",
"normalize",
"copy_X",
"max_iter",
"tol",
"solver",
],
"additionalProperties": False,
"properties": {
"alpha": {
"description": "Regularization strength; larger values specify stronger regularization.",
"anyOf": [
{
"type": "number",
"minimum": 0.0,
"exclusiveMinimum": True,
"minimumForOptimizer": 1e-10,
"maximumForOptimizer": 1.0,
"default": 1.0,
"distribution": "loguniform",
},
{
"type": "array",
"description": "Penalties specific to the targets.",
"items": {
"type": "number",
"minimum": 0.0,
"exclusiveMinimum": True,
},
"forOptimizer": False,
},
],
"default": 1.0,
},
"fit_intercept": {
"type": "boolean",
"default": True,
"description": "Whether to calculate the intercept for this model.",
},
"normalize": {
"type": "boolean",
"default": False,
"description": "This parameter is ignored when ``fit_intercept`` is set to False.",
},
"copy_X": {
"type": "boolean",
"default": True,
"description": "If True, X will be copied; else, it may be overwritten.",
},
"max_iter": {
"anyOf": [
{
"type": "integer",
"minimum": 1,
"minimumForOptimizer": 10,
"maximumForOptimizer": 1000,
},
{"enum": [None]},
],
"default": None,
"description": "Maximum number of iterations for conjugate gradient solver.",
},
"tol": {
"type": "number",
"minimumForOptimizer": 1e-08,
"maximumForOptimizer": 0.01,
"default": 0.001,
"description": "Precision of the solution.",
},
"solver": {
"enum": [
"auto",
"svd",
"cholesky",
"lsqr",
"sparse_cg",
"sag",
"saga",
],
"default": "auto",
"description": "Solver to use in the computational routines.",
},
"random_state": {
"anyOf": [
{"type": "integer"},
{"laleType": "numpy.random.RandomState"},
{"enum": [None]},
],
"default": None,
"description": "The seed of the pseudo random number generator to use when shuffling",
},
},
},
{
"description": "solver {svd, lsqr, cholesky, saga} does not support fitting the intercept on sparse data. Please set the solver to 'auto' or 'sparse_cg', 'sag', or set `fit_intercept=False`.",
"anyOf": [
{"type": "object", "laleNot": "X/isSparse"},
{"type": "object", "properties": {"fit_intercept": {"enum": [False]}}},
{
"type": "object",
"properties": {"solver": {"enum": ["auto", "sparse_cg", "sag"]}},
},
],
},
{
"description": "SVD solver does not support sparse inputs currently.",
"anyOf": [
{"type": "object", "laleNot": "X/isSparse"},
{
"type": "object",
"properties": {"solver": {"not": {"enum": ["svd"]}}},
},
],
},
],
}
_input_fit_schema = {
"description": "Fit Ridge regression model",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
"description": "Training data",
},
"y": {
"anyOf": [
{
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
},
{
"type": "array",
"items": {"type": "number"},
},
],
"description": "Target values",
},
"sample_weight": {
"anyOf": [
{"type": "number"},
{
"type": "array",
"items": {"type": "number"},
},
{"enum": [None]},
],
"description": "Individual weights for each sample",
},
},
}
_input_predict_schema = {
"description": "Predict using the linear model",
"type": "object",
"properties": {
"X": {
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
},
],
"description": "Samples.",
},
},
}
_output_predict_schema = {
"description": "Returns predicted values.",
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{ # There was a case where Ridge returned 2-d predictions for a single target.
"type": "array",
"items": {
"type": "array",
"items": {"type": "number"},
},
},
],
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """`Ridge`_ regression estimator from scikit-learn.
.. _`Ridge`: https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.Ridge.html
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.ridge.html",
"import_from": "sklearn.linear_model",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "regressor"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
Ridge = lale.operators.make_operator(sklearn.linear_model.Ridge, _combined_schemas)
if lale.operators.sklearn_version >= version.Version("1.0"):
# old: https://scikit-learn.org/0.24/modules/generated/sklearn.linear_model.Ridge.html
# new: https://scikit-learn.org/1.0/modules/generated/sklearn.linear_model.Ridge.html
Ridge = Ridge.customize_schema(
relevantToOptimizer=[
"alpha",
"fit_intercept",
"copy_X",
"max_iter",
"tol",
"solver",
],
normalize={
"type": "boolean",
"description": """This parameter is ignored when fit_intercept is set to False.
If True, the regressors X will be normalized before regression by subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use StandardScaler before calling fit on an estimator with normalize=False.""",
"default": False,
"forOptimizer": False,
},
positive={
"type": "boolean",
"description": """When set to True, forces the coefficients to be positive. Only ‘lbfgs’ solver is supported in this case.""",
"default": False,
"forOptimizer": False,
},
solver={
"enum": [
"auto",
"svd",
"cholesky",
"lsqr",
"sparse_cg",
"sag",
"saga",
"lbfgs",
],
"default": "auto",
"description": """Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than 'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to obtain a
closed-form solution.
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data (possibility
to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fastest and uses an iterative
procedure.
- 'sag' uses a Stochastic Average Gradient descent, and 'saga' uses
its improved, unbiased version named SAGA. Both methods also use an
iterative procedure, and are often faster than other solvers when
both n_samples and n_features are large. Note that 'sag' and 'saga'
fast convergence is only guaranteed on features with approximately
the same scale. You can preprocess the data with a scaler from
sklearn.preprocessing.
- 'lbfgs' uses L-BFGS-B algorithm implemented in
`scipy.optimize.minimize`. It can be used only when `positive` is
True.
All last six solvers support both dense and sparse data. However, only
'sag', 'sparse_cg', and 'lbfgs' support sparse input when `fit_intercept`
is True.""",
"forOptimizer": True,
},
set_as_available=True,
)
Ridge = Ridge.customize_schema(
constraint={
"description": "Only ‘lbfgs’ solver is supported when positive is True. `auto` works too when tested.",
"anyOf": [
{"type": "object", "properties": {"positive": {"enum": [False]}}},
{
"type": "object",
"properties": {
"solver": {"enum": ["lbfgs", "auto"]},
},
},
],
},
set_as_available=True,
)
Ridge = Ridge.customize_schema(
constraint={
"description": "`lbfgs` solver can be used only when positive=True.",
"anyOf": [
{"type": "object", "properties": {"positive": {"enum": [True]}}},
{
"type": "object",
"properties": {
"solver": {"not": {"enum": ["lbfgs"]}},
},
},
],
},
set_as_available=True,
)
if lale.operators.sklearn_version >= version.Version("1.2"):
# old: https://scikit-learn.org/1.1/modules/generated/sklearn.linear_model.Ridge.html
# new: https://scikit-learn.org/1.2/modules/generated/sklearn.linear_model.Ridge.html
Ridge = Ridge.customize_schema(
tol={
"type": "number",
"minimumForOptimizer": 1e-08,
"maximumForOptimizer": 0.01,
"default": 0.0001,
"description": "Precision of the solution.",
},
normalize=None,
)
lale.docstrings.set_docstrings(Ridge)
| 13,133 | 34.690217 | 204 |
py
|
lale
|
lale-master/lale/lib/sklearn/column_transformer.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sklearn.compose
from packaging import version
import lale.docstrings
import lale.operators
_hyperparams_schema = {
"allOf": [
{
"description": "This first sub-object lists all constructor arguments with their "
"types, one at a time, omitting cross-argument constraints, if any.",
"type": "object",
"additionalProperties": False,
"required": ["transformers"],
"relevantToOptimizer": [],
"properties": {
"transformers": {
"description": "Operators or pipelines to be applied to subsets of the data.",
"type": "array",
"items": {
"description": "Tuple of (name, transformer, column(s)).",
"type": "array",
"laleType": "tuple",
"minItems": 3,
"maxItems": 3,
"items": [
{"description": "Name.", "type": "string"},
{
"description": "Transformer.",
"anyOf": [
{
"description": "Transformer supporting fit and transform.",
"laleType": "operator",
},
{"enum": ["passthrough", "drop"]},
],
},
{
"description": "Column(s).",
"anyOf": [
{
"type": "integer",
"description": "One column by index.",
},
{
"type": "array",
"items": {"type": "integer"},
"description": "Multiple columns by index.",
},
{
"type": "string",
"description": "One Dataframe column by name.",
},
{
"type": "array",
"items": {"type": "string"},
"description": "Multiple Dataframe columns by names.",
},
{
"type": "array",
"items": {"type": "boolean"},
"description": "Boolean mask.",
},
{
"laleType": "callable",
"not": {"type": ["integer", "array", "string"]},
"description": "Callable that is passed the input data X and can return any of the above.",
},
],
},
],
},
},
"remainder": {
"description": "Transformation for columns that were not specified in transformers.",
"anyOf": [
{
"description": "Transformer supporting fit and transform.",
"laleType": "operator",
},
{"enum": ["passthrough", "drop"]},
],
"default": "drop",
},
"sparse_threshold": {
"description": """If the output of the different transfromers contains sparse matrices,
these will be stacked as a sparse matrix if the overall density is
lower than this value. Use sparse_threshold=0 to always return dense.""",
"type": "number",
"minimum": 0.0,
"maximum": 1.0,
"default": 0.3,
},
"n_jobs": {
"description": "Number of jobs to run in parallel",
"anyOf": [
{
"description": "1 unless in joblib.parallel_backend context.",
"enum": [None],
},
{"description": "Use all processors.", "enum": [-1]},
{
"description": "Number of CPU cores.",
"type": "integer",
"minimum": 1,
},
],
"default": None,
},
"transformer_weights": {
"description": """Multiplicative weights for features per transformer.
The output of the transformer is multiplied by these weights.""",
"anyOf": [
{
"description": "Keys are transformer names, values the weights.",
"type": "object",
},
{"enum": [None]},
],
"default": None,
},
},
},
{
"description": "A sparse matrix was passed, but dense data is required. Use X.toarray() to convert to a dense numpy array.",
"type": "object",
"laleNot": "X/isSparse",
},
]
}
_input_fit_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {
"type": "array",
"items": {"anyOf": [{"type": "number"}, {"type": "string"}]},
},
},
"y": {"description": "Target for supervised learning (ignored)."},
},
}
_input_transform_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {
"type": "array",
"items": {"anyOf": [{"type": "number"}, {"type": "string"}]},
},
}
},
}
_output_transform_schema = {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """ColumnTransformer_ from scikit-learn applies transformers to columns of an array or pandas DataFrame.
.. _ColumnTransformer: https://scikit-learn.org/stable/modules/generated/sklearn.compose.ColumnTransformer.html
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.column_transformer.html",
"import_from": "sklearn.compose",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
ColumnTransformer: lale.operators.PlannedIndividualOp
ColumnTransformer = lale.operators.make_operator(
sklearn.compose.ColumnTransformer, _combined_schemas
)
if lale.operators.sklearn_version >= version.Version("0.21"):
# old: https://scikit-learn.org/0.20/modules/generated/sklearn.compose.ColumnTransformer.html
# new: https://scikit-learn.org/0.21/modules/generated/sklearn.compose.ColumnTransformer.html
ColumnTransformer = ColumnTransformer.customize_schema(
verbose={
"description": "If True, the time elapsed while fitting each transformer will be printed as it is completed.",
"type": "boolean",
"default": False,
},
set_as_available=True,
)
if lale.operators.sklearn_version >= version.Version("1.0"):
# old: https://scikit-learn.org/0.24/modules/generated/sklearn.compose.ColumnTransformer.html
# new: https://scikit-learn.org/1.0/modules/generated/sklearn.compose.ColumnTransformer.html
ColumnTransformer = ColumnTransformer.customize_schema(
verbose_feature_names_out={
"description": """If True, get_feature_names_out will prefix all feature names with the name of the transformer that generated that feature.
If False, get_feature_names_out will not prefix any feature names and will error if feature names are not unique.""",
"type": "boolean",
"default": True,
},
set_as_available=True,
)
lale.docstrings.set_docstrings(ColumnTransformer)
| 9,897 | 40.940678 | 152 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.