repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
lale
|
lale-master/test/test_autogen_lib.py
|
import inspect
import logging
import pytest
from sklearn import datasets
from lale.lib import autogen
from lale.lib.lale import Hyperopt
from lale.lib.lale.hyperopt import logger
from lale.lib.sklearn import LogisticRegression
from lale.operators import Operator, make_choice
logger.setLevel(logging.ERROR)
def load_iris():
iris = datasets.load_iris()
return iris.data, iris.target
def load_regression():
return datasets.make_regression(
n_features=4, n_informative=2, random_state=0, shuffle=False
)
def base_test(name, pipeline, data_loader, max_evals=250, scoring="accuracy"):
def test(i):
if i > max_evals:
assert False
try:
X, y = data_loader()
clf = Hyperopt(estimator=pipeline, max_evals=i, scoring=scoring)
trained_pipeline = clf.fit(X, y)
trained_pipeline.predict(X)
return
except Exception:
test(3 * i)
test(1)
kls = inspect.getmembers(autogen, lambda m: isinstance(m, Operator))
LR = LogisticRegression.customize_schema(relevantToOptimizer=[])
classifiers = [
"BernoulliNB",
"CalibratedClassifierCV",
"ComplementNB",
"GaussianProcessClassifier",
"LGBMClassifier",
"LabelPropagation",
"LabelSpreading",
"LogisticRegressionCV",
"NearestCentroid",
"NuSVC",
"Perceptron",
"RadiusNeighborsClassifier",
"RidgeClassifierCV",
]
@pytest.mark.parametrize("name, Op", [(n, Op) for (n, Op) in kls if n in classifiers])
def test_classifier(name, Op):
base_test(name, Op, load_iris)
multi = [
"MultiTaskElasticNet",
"MultiTaskElasticNetCV",
"MultiTaskLasso",
"MultiTaskLassoCV",
]
@pytest.mark.parametrize("name, Op", [(n, Op) for (n, Op) in kls if n in multi])
def test_multi(name, Op):
def load_multi():
X_multi = [[i, i] for i in range(100)]
return X_multi, X_multi
pytest.xfail(reason="Documentation error predict output type is 2D")
base_test(name, Op, load_multi)
regressors = [
"ARDRegression",
"BayesianRidge",
"ElasticNet",
"ElasticNetCV",
"GaussianProcessRegressor",
"HuberRegressor",
"Lars",
"LarsCV",
"Lasso",
"LassoCV",
"LassoLars",
"LassoLarsCV",
"LassoLarsIC",
"LGBMRegressor",
"NuSVR",
"OrthogonalMatchingPursuit",
"OrthogonalMatchingPursuitCV",
"PassiveAggressiveRegressor",
"RANSACRegressor",
"KernelRidge",
"RidgeCV",
"TheilSenRegressor",
"TransformedTargetRegressor",
]
failed_regressors = [
("MLPRegressor", "Input predict type (matrix with one column)"),
("RadiusNeighborsRegressor", "Radius argument is data dependent"),
]
@pytest.mark.parametrize("name, Op", [(n, Op) for (n, Op) in kls if n in regressors])
def test_regressors(name, Op):
base_test(name, Op, load_regression, scoring="r2")
@pytest.mark.parametrize("name, reason", failed_regressors)
def test_failed_regressor(name, reason):
pytest.xfail(reason)
transformers = [
"AdditiveChi2Sampler",
"BernoulliRBM",
"Binarizer",
"Birch",
"DictionaryLearning",
# "FactorAnalysis",
"FastICA",
"GaussianRandomProjection",
"IncrementalPCA",
"KBinsDiscretizer",
"KernelPCA",
"LinearDiscriminantAnalysis",
"LocallyLinearEmbedding",
"MaxAbsScaler",
"MiniBatchDictionaryLearning",
"MiniBatchKMeans",
"MiniBatchSparsePCA",
"PowerTransformer",
# "RandomTreesEmbedding",
"RBFSampler",
"SkewedChi2Sampler",
"SparsePCA",
"SparseRandomProjection",
"TruncatedSVD",
]
failed_transformers = [
("CCA", "Fit required Y (not y)"),
("LabelBinarizer", "operates on labels (not supported by lale yet)"),
("LabelEncoder", "operates on labels (not supported by lale yet)"),
("LatentDirichletAllocation", "Failed 2D array output"),
("MultiLabelBinarizer", "operates on labels (not supported by lale yet)"),
("PLSCanonical", "Fit required Y (not y)"),
("PLSRegression", "Fit required Y (not y)"),
("PLSSVD", "Fit required Y (not y)"),
]
@pytest.mark.parametrize("name, Op", [(n, Op) for (n, Op) in kls if n in transformers])
def test_transformer(name, Op):
base_test(name, Op >> LR, load_iris)
@pytest.mark.parametrize("name, reason", failed_transformers)
def test_failed_transformer(name, reason):
pytest.xfail(reason)
def test_2_steps_classifier():
T = make_choice(*[Op for (n, Op) in kls if n in transformers])
C = make_choice(*[Op for (n, Op) in kls if n in classifiers])
base_test("transformer_classifier", T >> C, load_iris)
def test_2_steps_regressor():
T = make_choice(*[Op for (n, Op) in kls if n in transformers])
R = make_choice(*[Op for (n, Op) in kls if n in regressors])
base_test("transformer_regressor", T >> R, load_regression, scoring="r2")
| 4,852 | 24.81383 | 87 |
py
|
lale
|
lale-master/test/test_aif360.py
|
# Copyright 2020-2023 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import traceback
import unittest
import urllib.request
import zipfile
import aif360
import jsonschema
import numpy as np
import pandas as pd
import sklearn.model_selection
try:
import cvxpy # noqa because the import is only done as a check and flake fails
cvxpy_installed = True
except ImportError:
cvxpy_installed = False
try:
import numba # noqa because the import is only done as a check and flake fails
numba_installed = True
except ImportError:
numba_installed = False
try:
import tensorflow as tf
tensorflow_installed = True
except ImportError:
tensorflow_installed = False
import lale.helpers
import lale.lib.aif360
import lale.lib.aif360.util
from lale.datasets.data_schemas import NDArrayWithSchema
from lale.lib.aif360 import (
LFR,
AdversarialDebiasing,
BaggingOrbisClassifier,
CalibratedEqOddsPostprocessing,
DisparateImpactRemover,
EqOddsPostprocessing,
GerryFairClassifier,
MetaFairClassifier,
OptimPreproc,
Orbis,
PrejudiceRemover,
Redacting,
RejectOptionClassification,
Reweighing,
count_fairness_groups,
fair_stratified_train_test_split,
)
from lale.lib.aif360.orbis import _orbis_pick_sizes
from lale.lib.lale import ConcatFeatures, Project
from lale.lib.rasl import mockup_data_loader
from lale.lib.sklearn import (
FunctionTransformer,
LinearRegression,
LogisticRegression,
OneHotEncoder,
)
class TestAIF360Datasets(unittest.TestCase):
downloaded_h181 = False
downloaded_h192 = False
@classmethod
def setUpClass(cls) -> None:
cls.downloaded_h181 = cls._try_download_csv("h181.csv")
cls.downloaded_h192 = cls._try_download_csv("h192.csv")
@classmethod
def tearDownClass(cls) -> None:
if cls.downloaded_h181:
cls._cleanup_meps("h181.csv")
if cls.downloaded_h192:
cls._cleanup_meps("h192.csv")
def _attempt_dataset(
self, X, y, fairness_info, n_rows, n_columns, set_y, di_expected
):
self.assertEqual(X.shape, (n_rows, n_columns))
self.assertEqual(y.shape, (n_rows,))
self.assertEqual(set(y), set_y)
di_scorer = lale.lib.aif360.disparate_impact(**fairness_info)
di_measured = di_scorer.score_data(X=X, y_pred=y)
self.assertAlmostEqual(di_measured, di_expected, places=3)
bat3 = ((None, by, bX) for bX, by in mockup_data_loader(X, y, 3, "pandas"))
di_bat3 = di_scorer.score_data_batched(bat3)
self.assertEqual(di_measured, di_bat3)
def test_dataset_adult_pd_cat(self):
X, y, fairness_info = lale.lib.aif360.fetch_adult_df(preprocess=False)
self._attempt_dataset(X, y, fairness_info, 48_842, 14, {"<=50K", ">50K"}, 0.227)
def test_dataset_adult_pd_num(self):
X, y, fairness_info = lale.lib.aif360.fetch_adult_df(preprocess=True)
self._attempt_dataset(X, y, fairness_info, 48_842, 100, {0, 1}, 0.227)
def test_dataset_bank_pd_cat(self):
X, y, fairness_info = lale.lib.aif360.fetch_bank_df(preprocess=False)
self._attempt_dataset(X, y, fairness_info, 45_211, 16, {1, 2}, 0.840)
def test_dataset_bank_pd_num(self):
X, y, fairness_info = lale.lib.aif360.fetch_bank_df(preprocess=True)
self._attempt_dataset(X, y, fairness_info, 45_211, 51, {0, 1}, 0.840)
def test_dataset_compas_pd_cat(self):
X, y, fairness_info = lale.lib.aif360.fetch_compas_df(preprocess=False)
self._attempt_dataset(X, y, fairness_info, 6_172, 51, {0, 1}, 0.747)
def test_dataset_compas_pd_num(self):
X, y, fairness_info = lale.lib.aif360.fetch_compas_df(preprocess=True)
self._attempt_dataset(X, y, fairness_info, 5_278, 10, {0, 1}, 0.687)
def test_dataset_compas_violent_pd_cat(self):
X, y, fairness_info = lale.lib.aif360.fetch_compas_violent_df(preprocess=False)
self._attempt_dataset(X, y, fairness_info, 4_020, 51, {0, 1}, 0.852)
def test_dataset_compas_violent_pd_num(self):
X, y, fairness_info = lale.lib.aif360.fetch_compas_violent_df(preprocess=True)
self._attempt_dataset(X, y, fairness_info, 3_377, 10, {0, 1}, 0.822)
def test_dataset_creditg_pd_cat(self):
X, y, fairness_info = lale.lib.aif360.fetch_creditg_df(preprocess=False)
self._attempt_dataset(X, y, fairness_info, 1_000, 20, {"bad", "good"}, 0.748)
def test_dataset_creditg_pd_num(self):
X, y, fairness_info = lale.lib.aif360.fetch_creditg_df(preprocess=True)
self._attempt_dataset(X, y, fairness_info, 1_000, 58, {0, 1}, 0.748)
def test_dataset_default_credit_pd_cat(self):
X, y, fairness_info = lale.lib.aif360.fetch_default_credit_df()
self._attempt_dataset(X, y, fairness_info, 30_000, 24, {0, 1}, 0.957)
def test_dataset_heart_disease_pd_cat(self):
X, y, fairness_info = lale.lib.aif360.fetch_heart_disease_df()
self._attempt_dataset(X, y, fairness_info, 303, 13, {0, 1}, 0.589)
def test_dataset_law_school_pd_cat(self):
X, y, fairness_info = lale.lib.aif360.fetch_law_school_df()
self._attempt_dataset(X, y, fairness_info, 20_800, 11, {"FALSE", "TRUE"}, 0.704)
def test_dataset_nlsy_pd_cat(self):
X, y, fairness_info = lale.lib.aif360.fetch_nlsy_df()
self._attempt_dataset(X, y, fairness_info, 4908, 15, {"0", "1"}, 0.668)
def test_dataset_nursery_pd_cat(self):
X, y, fairness_info = lale.lib.aif360.fetch_nursery_df(preprocess=False)
self._attempt_dataset(
X,
y,
fairness_info,
12_960,
8,
{"not_recom", "recommend", "very_recom", "priority", "spec_prior"},
0.461,
)
def test_dataset_nursery_pd_num(self):
X, y, fairness_info = lale.lib.aif360.fetch_nursery_df(preprocess=True)
self._attempt_dataset(X, y, fairness_info, 12_960, 25, {0, 1}, 0.461)
def test_dataset_ricci_pd_cat(self):
X, y, fairness_info = lale.lib.aif360.fetch_ricci_df(preprocess=False)
self._attempt_dataset(
X, y, fairness_info, 118, 5, {"No promotion", "Promotion"}, 0.498
)
def test_dataset_ricci_pd_cat_bool(self):
X, y, fairness_info = lale.lib.aif360.fetch_ricci_df(preprocess=False)
y = y == "Promotion"
self.assertIs(y.dtype, np.dtype("bool"))
fairness_info = {**fairness_info, "favorable_labels": [True]}
self._attempt_dataset(X, y, fairness_info, 118, 5, {False, True}, 0.498)
def test_dataset_ricci_pd_num(self):
X, y, fairness_info = lale.lib.aif360.fetch_ricci_df(preprocess=True)
self._attempt_dataset(X, y, fairness_info, 118, 6, {0, 1}, 0.498)
def test_dataset_speeddating_pd_cat(self):
X, y, fairness_info = lale.lib.aif360.fetch_speeddating_df(preprocess=False)
self._attempt_dataset(X, y, fairness_info, 8_378, 122, {"0", "1"}, 0.853)
def test_dataset_speeddating_pd_num(self):
X, y, fairness_info = lale.lib.aif360.fetch_speeddating_df(preprocess=True)
self._attempt_dataset(X, y, fairness_info, 8_378, 70, {0, 1}, 0.853)
def test_dataset_student_math_pd_cat(self):
X, y, fairness_info = lale.lib.aif360.fetch_student_math_df()
self._attempt_dataset(X, y, fairness_info, 395, 32, {0, 1}, 0.894)
def test_dataset_student_por_pd_cat(self):
X, y, fairness_info = lale.lib.aif360.fetch_student_por_df()
self._attempt_dataset(X, y, fairness_info, 649, 32, {0, 1}, 0.858)
def test_dataset_boston_housing_pd_cat(self):
X, y, fairness_info = lale.lib.aif360._fetch_boston_housing_df(preprocess=False)
# TODO: consider better way of handling "set_y" parameter for regression problems
self._attempt_dataset(X, y, fairness_info, 506, 13, set(y), 0.814)
def test_dataset_boston_housing_pd_num(self):
X, y, fairness_info = lale.lib.aif360._fetch_boston_housing_df(preprocess=True)
# TODO: consider better way of handling "set_y" parameter for regression problems
self._attempt_dataset(X, y, fairness_info, 506, 13, set(y), 0.814)
def test_dataset_titanic_pd_cat(self):
X, y, fairness_info = lale.lib.aif360.fetch_titanic_df(preprocess=False)
self._attempt_dataset(X, y, fairness_info, 1_309, 13, {"0", "1"}, 0.263)
def test_dataset_titanic_pd_num(self):
X, y, fairness_info = lale.lib.aif360.fetch_titanic_df(preprocess=True)
self._attempt_dataset(X, y, fairness_info, 1_309, 37, {0, 1}, 0.263)
def test_dataset_tae_pd_cat(self):
X, y, fairness_info = lale.lib.aif360.fetch_tae_df(preprocess=False)
self._attempt_dataset(X, y, fairness_info, 151, 5, {1, 2, 3}, 0.449)
def test_dataset_tae_pd_num(self):
X, y, fairness_info = lale.lib.aif360.fetch_tae_df(preprocess=True)
self._attempt_dataset(X, y, fairness_info, 151, 6, {0, 1}, 0.449)
def test_dataset_us_crime_pd_cat(self):
X, y, fairness_info = lale.lib.aif360.fetch_us_crime_df()
self._attempt_dataset(X, y, fairness_info, 1_994, 102, {0, 1}, 0.888)
@classmethod
def _try_download_csv(cls, filename):
directory = os.path.join(
os.path.dirname(os.path.abspath(aif360.__file__)), "data", "raw", "meps"
)
csv_exists = os.path.exists(
os.path.join(
directory,
filename,
)
)
if csv_exists:
return False
else:
filename_without_extension = os.path.splitext(filename)[0]
zip_filename = f"{filename_without_extension}ssp.zip"
urllib.request.urlretrieve(
f"https://meps.ahrq.gov/mepsweb/data_files/pufs/{zip_filename}",
os.path.join(directory, zip_filename),
)
with zipfile.ZipFile(os.path.join(directory, zip_filename), "r") as zip_ref:
zip_ref.extractall(directory)
ssp_filename = f"{filename_without_extension}.ssp"
df = pd.read_sas(os.path.join(directory, ssp_filename), format="xport")
df.to_csv(os.path.join(directory, filename), index=False)
return True
@classmethod
def _cleanup_meps(cls, filename):
directory = os.path.join(
os.path.dirname(os.path.abspath(aif360.__file__)), "data", "raw", "meps"
)
filename_without_extension = os.path.splitext(filename)[0]
zip_filename = f"{filename_without_extension}ssp.zip"
ssp_filename = f"{filename_without_extension}.ssp"
os.remove(os.path.join(directory, filename))
os.remove(os.path.join(directory, zip_filename))
os.remove(os.path.join(directory, ssp_filename))
def test_dataset_meps_panel19_fy2015_pd_cat(self):
X, y, fairness_info = lale.lib.aif360.fetch_meps_panel19_fy2015_df(
preprocess=False
)
self._attempt_dataset(X, y, fairness_info, 16578, 1825, {0, 1}, 0.496)
def test_dataset_meps_panel19_fy2015_pd_num(self):
X, y, fairness_info = lale.lib.aif360.fetch_meps_panel19_fy2015_df(
preprocess=True
)
self._attempt_dataset(X, y, fairness_info, 15830, 138, {0, 1}, 0.490)
def test_dataset_meps_panel20_fy2015_pd_cat(self):
X, y, fairness_info = lale.lib.aif360.fetch_meps_panel20_fy2015_df(
preprocess=False
)
self._attempt_dataset(X, y, fairness_info, 18849, 1825, {0, 1}, 0.493)
def test_dataset_meps_panel20_fy2015_pd_num(self):
X, y, fairness_info = lale.lib.aif360.fetch_meps_panel20_fy2015_df(
preprocess=True
)
self._attempt_dataset(X, y, fairness_info, 17570, 138, {0, 1}, 0.488)
def test_dataset_meps_panel21_fy2016_pd_cat(self):
X, y, fairness_info = lale.lib.aif360.fetch_meps_panel21_fy2016_df(
preprocess=False
)
self._attempt_dataset(X, y, fairness_info, 17052, 1936, {0, 1}, 0.462)
def test_dataset_meps_panel21_fy2016_pd_num(self):
X, y, fairness_info = lale.lib.aif360.fetch_meps_panel21_fy2016_df(
preprocess=True
)
self._attempt_dataset(X, y, fairness_info, 15675, 138, {0, 1}, 0.451)
class TestAIF360Num(unittest.TestCase):
@classmethod
def _creditg_pd_num(cls):
X, y, fairness_info = lale.lib.aif360.fetch_creditg_df(preprocess=True)
cv = lale.lib.aif360.FairStratifiedKFold(**fairness_info, n_splits=3)
splits = []
lr = LogisticRegression()
for train, test in cv.split(X, y):
train_X, train_y = lale.helpers.split_with_schemas(lr, X, y, train)
assert isinstance(train_X, pd.DataFrame), type(train_X)
assert isinstance(train_y, pd.Series), type(train_y)
test_X, test_y = lale.helpers.split_with_schemas(lr, X, y, test, train)
assert isinstance(test_X, pd.DataFrame), type(test_X)
assert isinstance(test_y, pd.Series), type(test_y)
splits.append(
{
"train_X": train_X,
"train_y": train_y,
"test_X": test_X,
"test_y": test_y,
}
)
result = {"splits": splits, "fairness_info": fairness_info}
return result
@classmethod
def _creditg_np_num(cls):
train_X = cls.creditg_pd_num["splits"][0]["train_X"].to_numpy()
train_y = cls.creditg_pd_num["splits"][0]["train_y"].to_numpy()
test_X = cls.creditg_pd_num["splits"][0]["test_X"].to_numpy()
test_y = cls.creditg_pd_num["splits"][0]["test_y"].to_numpy()
assert isinstance(train_X, np.ndarray), type(train_X)
assert not isinstance(train_X, NDArrayWithSchema), type(train_X)
assert isinstance(train_y, np.ndarray), type(train_y)
assert not isinstance(train_y, NDArrayWithSchema), type(train_y)
assert isinstance(test_X, np.ndarray), type(test_X)
assert not isinstance(test_X, NDArrayWithSchema), type(test_X)
assert isinstance(test_y, np.ndarray), type(test_y)
assert not isinstance(test_y, NDArrayWithSchema), type(test_y)
pd_columns = cls.creditg_pd_num["splits"][0]["train_X"].columns
fairness_info = {
"favorable_labels": [1],
"protected_attributes": [
{
"feature": pd_columns.get_loc("sex"),
"reference_group": [1],
},
{
"feature": pd_columns.get_loc("age"),
"reference_group": [1],
},
],
}
result = {
"train_X": train_X,
"train_y": train_y,
"test_X": test_X,
"test_y": test_y,
"fairness_info": fairness_info,
}
return result
@classmethod
def _boston_pd_num(cls):
# TODO: Consider investigating test failure when preprocess is set to True
# (eo_diff is not less than 0 in this case; perhaps regression model learns differently?)
orig_X, orig_y, fairness_info = lale.lib.aif360._fetch_boston_housing_df(
preprocess=False
)
train_X, test_X, train_y, test_y = sklearn.model_selection.train_test_split(
orig_X, orig_y, test_size=0.33, random_state=42
)
assert isinstance(train_X, pd.DataFrame), type(train_X)
assert isinstance(train_y, pd.Series), type(train_y)
assert isinstance(test_X, pd.DataFrame), type(test_X)
assert isinstance(test_y, pd.Series), type(test_y)
result = {
"train_X": train_X,
"train_y": train_y,
"test_X": test_X,
"test_y": test_y,
"fairness_info": fairness_info,
}
return result
@classmethod
def _boston_np_num(cls):
train_X = cls.boston_pd_num["train_X"].to_numpy()
train_y = cls.boston_pd_num["train_y"].to_numpy()
test_X = cls.boston_pd_num["test_X"].to_numpy()
test_y = cls.boston_pd_num["test_y"].to_numpy()
assert isinstance(train_X, np.ndarray), type(train_X)
assert not isinstance(train_X, NDArrayWithSchema), type(train_X)
assert isinstance(train_y, np.ndarray), type(train_y)
assert not isinstance(train_y, NDArrayWithSchema), type(train_y)
assert isinstance(test_X, np.ndarray), type(test_X)
assert not isinstance(test_X, NDArrayWithSchema), type(test_X)
assert isinstance(test_y, np.ndarray), type(test_y)
assert not isinstance(test_y, NDArrayWithSchema), type(test_y)
pd_columns = cls.boston_pd_num["train_X"].columns
# pulling attributes off of stored fairness_info to avoid recomputing medians
fairness_info = {
"favorable_labels": cls.boston_pd_num["fairness_info"]["favorable_labels"],
"protected_attributes": [
{
"feature": pd_columns.get_loc("B"),
"reference_group": cls.boston_pd_num["fairness_info"][
"protected_attributes"
][0]["reference_group"],
},
],
}
result = {
"train_X": train_X,
"train_y": train_y,
"test_X": test_X,
"test_y": test_y,
"fairness_info": fairness_info,
}
return result
@classmethod
def setUpClass(cls):
cls.creditg_pd_num = cls._creditg_pd_num()
cls.creditg_np_num = cls._creditg_np_num()
cls.boston_pd_num = cls._boston_pd_num()
cls.boston_np_num = cls._boston_np_num()
def test_fair_stratified_train_test_split(self):
X = self.creditg_np_num["train_X"]
y = self.creditg_np_num["train_y"]
fairness_info = self.creditg_np_num["fairness_info"]
z = range(X.shape[0])
(
train_X,
test_X,
train_y,
test_y,
train_z,
test_z,
) = fair_stratified_train_test_split(X, y, z, **fairness_info)
self.assertEqual(train_X.shape[0], train_y.shape[0])
self.assertEqual(train_X.shape[0], len(train_z))
self.assertEqual(test_X.shape[0], test_y.shape[0])
self.assertEqual(test_X.shape[0], len(test_z))
self.assertEqual(train_X.shape[0] + test_X.shape[0], X.shape[0])
def _attempt_scorers(self, fairness_info, estimator, test_X, test_y):
fi = fairness_info
disparate_impact_scorer = lale.lib.aif360.disparate_impact(**fi)
impact = disparate_impact_scorer(estimator, test_X, test_y)
self.assertLess(impact, 0.9)
if estimator.is_classifier():
blended_scorer = lale.lib.aif360.accuracy_and_disparate_impact(**fi)
else:
blended_scorer = lale.lib.aif360.r2_and_disparate_impact(**fi)
blended = blended_scorer(estimator, test_X, test_y)
self.assertLess(0.0, blended)
self.assertLess(blended, 1.0)
parity_scorer = lale.lib.aif360.statistical_parity_difference(**fi)
parity = parity_scorer(estimator, test_X, test_y)
self.assertLess(parity, 0.0)
eo_diff_scorer = lale.lib.aif360.equal_opportunity_difference(**fi)
eo_diff = eo_diff_scorer(estimator, test_X, test_y)
self.assertLess(eo_diff, 0.0)
ao_diff_scorer = lale.lib.aif360.average_odds_difference(**fi)
ao_diff = ao_diff_scorer(estimator, test_X, test_y)
self.assertLess(ao_diff, 0.1)
theil_index_scorer = lale.lib.aif360.theil_index(**fi)
theil_index = theil_index_scorer(estimator, test_X, test_y)
self.assertGreater(theil_index, 0.1)
symm_di_scorer = lale.lib.aif360.symmetric_disparate_impact(**fi)
symm_di = symm_di_scorer(estimator, test_X, test_y)
self.assertLess(symm_di, 0.9)
def test_scorers_pd_num(self):
fairness_info = self.creditg_pd_num["fairness_info"]
trainable = LogisticRegression(max_iter=1000)
train_X = self.creditg_pd_num["splits"][0]["train_X"]
train_y = self.creditg_pd_num["splits"][0]["train_y"]
trained = trainable.fit(train_X, train_y)
test_X = self.creditg_pd_num["splits"][0]["test_X"]
test_y = self.creditg_pd_num["splits"][0]["test_y"]
self._attempt_scorers(fairness_info, trained, test_X, test_y)
def test_scorers_np_num(self):
fairness_info = self.creditg_np_num["fairness_info"]
trainable = LogisticRegression(max_iter=1000)
train_X = self.creditg_np_num["train_X"]
train_y = self.creditg_np_num["train_y"]
trained = trainable.fit(train_X, train_y)
test_X = self.creditg_np_num["test_X"]
test_y = self.creditg_np_num["test_y"]
self._attempt_scorers(fairness_info, trained, test_X, test_y)
def test_scorers_regression_pd_num(self):
fairness_info = self.boston_pd_num["fairness_info"]
trainable = LinearRegression()
train_X = self.boston_pd_num["train_X"]
train_y = self.boston_pd_num["train_y"]
trained = trainable.fit(train_X, train_y)
test_X = self.boston_pd_num["test_X"]
test_y = self.boston_pd_num["test_y"]
self._attempt_scorers(fairness_info, trained, test_X, test_y)
def test_scorers_regression_np_num(self):
fairness_info = self.boston_np_num["fairness_info"]
trainable = LinearRegression()
train_X = self.boston_np_num["train_X"]
train_y = self.boston_np_num["train_y"]
trained = trainable.fit(train_X, train_y)
test_X = self.boston_np_num["test_X"]
test_y = self.boston_np_num["test_y"]
self._attempt_scorers(fairness_info, trained, test_X, test_y)
def test_scorers_blend_acc(self):
dummy_fairness_info = {
"favorable_labels": ["fav"],
"protected_attributes": [{"feature": "prot", "reference_group": ["ref"]}],
}
scorer = lale.lib.aif360.accuracy_and_disparate_impact(**dummy_fairness_info)
for acc in [0.2, 0.8, 1]:
for di in [0.7, 0.9, 1.0]:
score = scorer._blend_metrics(acc, di)
self.assertLess(0.0, score)
self.assertLessEqual(score, 1.0)
for di in [0.0, float("inf"), float("-inf"), float("nan")]:
score = scorer._blend_metrics(acc, di)
self.assertEqual(score, 0.5 * acc)
def test_scorers_blend_r2(self):
dummy_fairness_info = {
"favorable_labels": ["fav"],
"protected_attributes": [{"feature": "prot", "reference_group": ["ref"]}],
}
scorer = lale.lib.aif360.r2_and_disparate_impact(**dummy_fairness_info)
for r2 in [-2, 0, 0.5, 1]:
for di in [0.7, 0.9, 1.0]:
score = scorer._blend_metrics(r2, di)
self.assertLess(0.0, score)
self.assertLessEqual(score, 1.0)
for di in [0.0, float("inf"), float("-inf"), float("nan")]:
score = scorer._blend_metrics(r2, di)
self.assertEqual(score, 0.5 / (2.0 - r2))
def _attempt_remi_creditg_pd_num(
self, fairness_info, trainable_remi, min_di, max_di
):
splits = self.creditg_pd_num["splits"]
disparate_impact_scorer = lale.lib.aif360.disparate_impact(**fairness_info)
di_list = []
for split in splits:
if tensorflow_installed: # for AdversarialDebiasing
tf.compat.v1.reset_default_graph()
tf.compat.v1.disable_eager_execution()
train_X = split["train_X"]
train_y = split["train_y"]
trained_remi = trainable_remi.fit(train_X, train_y)
test_X = split["test_X"]
test_y = split["test_y"]
di_list.append(disparate_impact_scorer(trained_remi, test_X, test_y))
di = pd.Series(di_list)
_, _, function_name, _ = traceback.extract_stack()[-2]
print(f"disparate impact {di.mean():.3f} +- {di.std():.3f} {function_name}")
if min_di > 0:
self.assertLessEqual(min_di, di.mean())
self.assertLessEqual(di.mean(), max_di)
def test_disparate_impact_remover_np_num(self):
fairness_info = self.creditg_np_num["fairness_info"]
trainable_orig = LogisticRegression(max_iter=1000)
trainable_remi = DisparateImpactRemover(**fairness_info) >> trainable_orig
train_X = self.creditg_np_num["train_X"]
train_y = self.creditg_np_num["train_y"]
trained_orig = trainable_orig.fit(train_X, train_y)
trained_remi = trainable_remi.fit(train_X, train_y)
test_X = self.creditg_np_num["test_X"]
test_y = self.creditg_np_num["test_y"]
disparate_impact_scorer = lale.lib.aif360.disparate_impact(**fairness_info)
impact_orig = disparate_impact_scorer(trained_orig, test_X, test_y)
self.assertTrue(0.6 < impact_orig < 1.0, f"impact_orig {impact_orig}")
impact_remi = disparate_impact_scorer(trained_remi, test_X, test_y)
self.assertTrue(0.8 < impact_remi < 1.0, f"impact_remi {impact_remi}")
def test_adversarial_debiasing_pd_num(self):
if tensorflow_installed:
fairness_info = self.creditg_pd_num["fairness_info"]
tf.compat.v1.reset_default_graph()
trainable_remi = AdversarialDebiasing(**fairness_info)
self._attempt_remi_creditg_pd_num(fairness_info, trainable_remi, 0.0, 1.5)
def test_calibrated_eq_odds_postprocessing_pd_num(self):
fairness_info = self.creditg_pd_num["fairness_info"]
estim = LogisticRegression(max_iter=1000)
trainable_remi = CalibratedEqOddsPostprocessing(
**fairness_info, estimator=estim
)
self._attempt_remi_creditg_pd_num(fairness_info, trainable_remi, 0.65, 0.85)
def test_disparate_impact_remover_pd_num(self):
fairness_info = self.creditg_pd_num["fairness_info"]
trainable_remi = DisparateImpactRemover(**fairness_info) >> LogisticRegression(
max_iter=1000
)
self._attempt_remi_creditg_pd_num(fairness_info, trainable_remi, 0.78, 0.88)
def test_eq_odds_postprocessing_pd_num(self):
fairness_info = self.creditg_pd_num["fairness_info"]
estim = LogisticRegression(max_iter=1000)
trainable_remi = EqOddsPostprocessing(**fairness_info, estimator=estim)
self._attempt_remi_creditg_pd_num(fairness_info, trainable_remi, 0.82, 1.02)
def test_gerry_fair_classifier_pd_num(self):
fairness_info = self.creditg_pd_num["fairness_info"]
trainable_remi = GerryFairClassifier(**fairness_info)
self._attempt_remi_creditg_pd_num(fairness_info, trainable_remi, 0.677, 0.678)
def test_lfr_pd_num(self):
if numba_installed:
fairness_info = self.creditg_pd_num["fairness_info"]
trainable_remi = LFR(**fairness_info) >> LogisticRegression(max_iter=1000)
self._attempt_remi_creditg_pd_num(fairness_info, trainable_remi, 0.95, 1.05)
def test_meta_fair_classifier_pd_num(self):
if aif360.__version__ >= "4.0.0":
fairness_info = self.creditg_pd_num["fairness_info"]
trainable_remi = MetaFairClassifier(**fairness_info)
self._attempt_remi_creditg_pd_num(fairness_info, trainable_remi, 0.62, 0.87)
def test_orbis_mixed_pd_num(self):
fairness_info = self.creditg_pd_num["fairness_info"]
estim = LogisticRegression(max_iter=1000)
trainable_remi = Orbis(
estimator=estim, **fairness_info, sampling_strategy="over"
)
self._attempt_remi_creditg_pd_num(fairness_info, trainable_remi, 0.70, 0.92)
def test_orbis_over_pd_num(self):
fairness_info = self.creditg_pd_num["fairness_info"]
estim = LogisticRegression(max_iter=1000)
trainable_remi = Orbis(
estimator=estim, **fairness_info, sampling_strategy="over"
)
self._attempt_remi_creditg_pd_num(fairness_info, trainable_remi, 0.70, 0.92)
def test_orbis_under_pd_num(self):
fairness_info = self.creditg_pd_num["fairness_info"]
estim = LogisticRegression(max_iter=1000)
trainable_remi = Orbis(
estimator=estim, **fairness_info, sampling_strategy="under"
)
self._attempt_remi_creditg_pd_num(fairness_info, trainable_remi, 0.70, 1.05)
def test_prejudice_remover_pd_num(self):
fairness_info = self.creditg_pd_num["fairness_info"]
trainable_remi = PrejudiceRemover(**fairness_info)
self._attempt_remi_creditg_pd_num(fairness_info, trainable_remi, 0.73, 0.83)
def test_redacting_pd_num(self):
fairness_info = self.creditg_pd_num["fairness_info"]
redacting = Redacting(**fairness_info)
logistic_regression = LogisticRegression(max_iter=1000)
trainable_remi = redacting >> logistic_regression
self._attempt_remi_creditg_pd_num(fairness_info, trainable_remi, 0.78, 0.94)
def test_reject_option_classification_pd_num(self):
fairness_info = self.creditg_pd_num["fairness_info"]
estim = LogisticRegression(max_iter=1000)
trainable_remi = RejectOptionClassification(**fairness_info, estimator=estim)
self._attempt_remi_creditg_pd_num(fairness_info, trainable_remi, 0.88, 0.98)
def test_reweighing_pd_num(self):
fairness_info = self.creditg_pd_num["fairness_info"]
estim = LogisticRegression(max_iter=1000)
trainable_remi = Reweighing(estimator=estim, **fairness_info)
self._attempt_remi_creditg_pd_num(fairness_info, trainable_remi, 0.82, 0.92)
def test_sans_mitigation_pd_num(self):
fairness_info = self.creditg_pd_num["fairness_info"]
trainable_remi = LogisticRegression(max_iter=1000)
self._attempt_remi_creditg_pd_num(fairness_info, trainable_remi, 0.5, 1.0)
class TestAIF360OrbisPickSizes(unittest.TestCase):
def setUp(self):
from mystic.tools import random_seed
random_seed(42)
self.maxDiff = None
def test_pick_sizes_mixed_single_pa_single_class_normal(self):
nsizes = _orbis_pick_sizes(
osizes={"00": 100, "01": 200, "10": 300, "11": 400},
imbalance_repair_level=1,
bias_repair_level=1,
favorable_labels={1},
sampling_strategy="mixed",
)
self.assertDictEqual(nsizes, {"00": 122, "01": 122, "10": 398, "11": 398})
def test_pick_sizes_mixed_single_pa_single_class_reversed(self):
nsizes = _orbis_pick_sizes(
osizes={"00": 400, "01": 300, "10": 200, "11": 100},
imbalance_repair_level=1,
bias_repair_level=1,
favorable_labels={1},
sampling_strategy="mixed",
)
self.assertDictEqual(nsizes, {"00": 398, "01": 398, "10": 122, "11": 122})
def test_pick_sizes_mixed_multi_pa_multi_class_normal(self):
osizes = {
"000": 570,
"001": 670,
"002": 770,
"010": 870,
"011": 970,
"012": 1070,
"100": 7070,
"101": 7170,
"102": 7270,
"110": 7370,
"111": 7471,
"112": 7571,
}
nsizes = _orbis_pick_sizes(
osizes,
imbalance_repair_level=1,
bias_repair_level=1,
favorable_labels={1},
sampling_strategy="mixed",
)
self.assertDictEqual(
nsizes,
{
"000": 573,
"001": 670,
"002": 725,
"010": 923,
"011": 970,
"012": 1059,
"100": 7246,
"101": 7170,
"102": 7137,
"110": 7406,
"111": 7471,
"112": 7495,
},
)
def test_pick_sizes_mixed_multi_pa_multi_class_reversed(self):
osizes = {
"112": 100,
"111": 200,
"110": 300,
"102": 400,
"101": 500,
"100": 600,
"012": 700,
"011": 800,
"010": 900,
"002": 3000,
"001": 1100,
"000": 1200,
}
nsizes = _orbis_pick_sizes(
osizes,
imbalance_repair_level=1,
bias_repair_level=1,
favorable_labels={2},
sampling_strategy="mixed",
)
self.assertDictEqual(
nsizes,
{
"002": 1406,
"000": 1200,
"001": 1308,
"012": 700,
"010": 900,
"011": 800,
"102": 400,
"100": 600,
"101": 500,
"112": 306,
"110": 111,
"111": 200,
},
)
def test_pick_sizes_over_single_pa_single_class_normal(self):
nsizes = _orbis_pick_sizes(
osizes={"00": 100, "01": 200, "10": 300, "11": 400},
imbalance_repair_level=1,
bias_repair_level=1,
favorable_labels={1},
sampling_strategy="over",
)
self.assertDictEqual(nsizes, {"00": 200, "01": 200, "10": 400, "11": 400})
def test_pick_sizes_over_single_pa_single_class_reversed(self):
nsizes = _orbis_pick_sizes(
osizes={"00": 400, "01": 300, "10": 200, "11": 100},
imbalance_repair_level=1,
bias_repair_level=1,
favorable_labels={1},
sampling_strategy="over",
)
self.assertDictEqual(nsizes, {"00": 400, "01": 400, "10": 200, "11": 200})
def test_pick_sizes_over_multi_pa_multi_class_normal(self):
osizes = {
"000": 570,
"001": 670,
"002": 770,
"010": 870,
"011": 970,
"012": 1070,
"100": 7070,
"101": 7170,
"102": 7270,
"110": 7370,
"111": 7471,
"112": 7571,
}
nsizes = _orbis_pick_sizes(
osizes,
imbalance_repair_level=1,
bias_repair_level=1,
favorable_labels={1},
sampling_strategy="over",
)
self.assertDictEqual(
nsizes,
{
"000": 570,
"001": 670,
"002": 770,
"010": 884,
"011": 970,
"012": 1070,
"100": 7244,
"101": 7223,
"102": 7270,
"110": 7571,
"111": 7541,
"112": 7571,
},
)
def test_pick_sizes_over_multi_pa_multi_class_reversed(self):
osizes = {
"112": 100,
"111": 200,
"110": 300,
"102": 400,
"101": 500,
"100": 600,
"012": 700,
"011": 800,
"010": 900,
"002": 3000,
"001": 1100,
"000": 1200,
}
nsizes = _orbis_pick_sizes(
osizes,
imbalance_repair_level=1,
bias_repair_level=1,
favorable_labels={2},
sampling_strategy="over",
)
self.assertDictEqual(
nsizes,
{
"112": 403,
"111": 200,
"110": 300,
"102": 400,
"101": 500,
"100": 600,
"012": 700,
"011": 800,
"010": 900,
"002": 3000,
"001": 2981,
"000": 2688,
},
)
def test_pick_sizes_under_single_pa_single_class_normal(self):
nsizes = _orbis_pick_sizes(
osizes={"00": 100, "01": 200, "10": 300, "11": 400},
imbalance_repair_level=1,
bias_repair_level=1,
favorable_labels={1},
sampling_strategy="under",
)
self.assertDictEqual(nsizes, {"00": 100, "01": 100, "10": 300, "11": 300})
def test_pick_sizes_under_single_pa_single_class_reversed(self):
nsizes = _orbis_pick_sizes(
osizes={"00": 400, "01": 300, "10": 200, "11": 100},
imbalance_repair_level=1,
bias_repair_level=1,
favorable_labels={1},
sampling_strategy="under",
)
self.assertDictEqual(nsizes, {"00": 300, "01": 300, "10": 100, "11": 100})
def test_pick_sizes_under_multi_pa_multi_class_normal(self):
osizes = {
"000": 570,
"001": 670,
"002": 770,
"010": 870,
"011": 970,
"012": 1070,
"100": 7070,
"101": 7170,
"102": 7270,
"110": 7370,
"111": 7471,
"112": 7571,
}
nsizes = _orbis_pick_sizes(
osizes,
imbalance_repair_level=1,
bias_repair_level=1,
favorable_labels={1},
sampling_strategy="under",
)
self.assertDictEqual(
nsizes,
{
"000": 570,
"001": 627,
"002": 761,
"010": 870,
"011": 968,
"012": 976,
"100": 7070,
"101": 7170,
"102": 7129,
"110": 7370,
"111": 7381,
"112": 7414,
},
)
def test_pick_sizes_under_multi_pa_multi_class_reversed(self):
osizes = {
"112": 100,
"111": 200,
"110": 300,
"102": 400,
"101": 500,
"100": 600,
"012": 700,
"011": 800,
"010": 900,
"002": 3000,
"001": 1100,
"000": 1200,
}
nsizes = _orbis_pick_sizes(
osizes,
imbalance_repair_level=1,
bias_repair_level=1,
favorable_labels={2},
sampling_strategy="under",
)
self.assertDictEqual(
nsizes,
{
"112": 100,
"111": 100,
"110": 100,
"102": 400,
"101": 498,
"100": 304,
"012": 700,
"011": 655,
"010": 748,
"002": 1150,
"001": 1100,
"000": 1200,
},
)
class TestAIF360Cat(unittest.TestCase):
@classmethod
def _prep_pd_cat(cls):
result = (
(
Project(columns={"type": "string"})
>> OneHotEncoder(handle_unknown="ignore")
)
& Project(columns={"type": "number"})
) >> ConcatFeatures
return result
@classmethod
def _creditg_pd_cat(cls):
X, y, fairness_info = lale.lib.aif360.fetch_creditg_df(preprocess=False)
cv = lale.lib.aif360.FairStratifiedKFold(**fairness_info, n_splits=3)
splits = []
lr = LogisticRegression()
for train, test in cv.split(X, y):
train_X, train_y = lale.helpers.split_with_schemas(lr, X, y, train)
assert isinstance(train_X, pd.DataFrame), type(train_X)
assert isinstance(train_y, pd.Series), type(train_y)
test_X, test_y = lale.helpers.split_with_schemas(lr, X, y, test, train)
assert isinstance(test_X, pd.DataFrame), type(test_X)
assert isinstance(test_y, pd.Series), type(test_y)
splits.append(
{
"train_X": train_X,
"train_y": train_y,
"test_X": test_X,
"test_y": test_y,
}
)
result = {"splits": splits, "fairness_info": fairness_info}
return result
@classmethod
def _creditg_np_cat(cls):
train_X = cls.creditg_pd_cat["splits"][0]["train_X"].to_numpy()
train_y = cls.creditg_pd_cat["splits"][0]["train_y"].to_numpy()
test_X = cls.creditg_pd_cat["splits"][0]["test_X"].to_numpy()
test_y = cls.creditg_pd_cat["splits"][0]["test_y"].to_numpy()
assert isinstance(train_X, np.ndarray), type(train_X)
assert not isinstance(train_X, NDArrayWithSchema), type(train_X)
assert isinstance(train_y, np.ndarray), type(train_y)
assert not isinstance(train_y, NDArrayWithSchema), type(train_y)
pd_columns = cls.creditg_pd_cat["splits"][0]["train_X"].columns
pd_fav_labels = cls.creditg_pd_cat["fairness_info"]["favorable_labels"]
pd_prot_attrs = cls.creditg_pd_cat["fairness_info"]["protected_attributes"]
fairness_info = {
"favorable_labels": pd_fav_labels,
"protected_attributes": [
{
"feature": pd_columns.get_loc("personal_status"),
"reference_group": pd_prot_attrs[0]["reference_group"],
},
{
"feature": pd_columns.get_loc("age"),
"reference_group": pd_prot_attrs[1]["reference_group"],
},
],
}
result = {
"train_X": train_X,
"train_y": train_y,
"test_X": test_X,
"test_y": test_y,
"fairness_info": fairness_info,
}
return result
@classmethod
def _creditg_pd_ternary(cls):
X, y, _ = lale.lib.aif360.fetch_creditg_df(preprocess=False)
fairness_info = {
"favorable_labels": ["good"],
"protected_attributes": [
{
"feature": "personal_status",
"reference_group": ["male div/sep", "male mar/wid", "male single"],
"monitored_group": ["female div/dep/mar"],
},
{
"feature": "age",
"reference_group": [[26, 1000]],
"monitored_group": [[1, 23]],
},
],
"unfavorable_labels": ["bad"],
}
cv = lale.lib.aif360.FairStratifiedKFold(**fairness_info, n_splits=3)
splits = []
lr = LogisticRegression()
for train, test in cv.split(X, y):
train_X, train_y = lale.helpers.split_with_schemas(lr, X, y, train)
assert isinstance(train_X, pd.DataFrame), type(train_X)
assert isinstance(train_y, pd.Series), type(train_y)
test_X, test_y = lale.helpers.split_with_schemas(lr, X, y, test, train)
assert isinstance(test_X, pd.DataFrame), type(test_X)
assert isinstance(test_y, pd.Series), type(test_y)
splits.append(
{
"train_X": train_X,
"train_y": train_y,
"test_X": test_X,
"test_y": test_y,
}
)
result = {"splits": splits, "fairness_info": fairness_info}
return result
@classmethod
def _creditg_pd_repeated(cls):
X, y, fairness_info = lale.lib.aif360.fetch_creditg_df(preprocess=False)
cv = lale.lib.aif360.FairStratifiedKFold(
**fairness_info, n_splits=3, n_repeats=3
)
splits = []
lr = LogisticRegression()
for train, test in cv.split(X, y):
train_X, train_y = lale.helpers.split_with_schemas(lr, X, y, train)
assert isinstance(train_X, pd.DataFrame), type(train_X)
assert isinstance(train_y, pd.Series), type(train_y)
test_X, test_y = lale.helpers.split_with_schemas(lr, X, y, test, train)
assert isinstance(test_X, pd.DataFrame), type(test_X)
assert isinstance(test_y, pd.Series), type(test_y)
splits.append(
{
"train_X": train_X,
"train_y": train_y,
"test_X": test_X,
"test_y": test_y,
}
)
result = {"splits": splits, "fairness_info": fairness_info}
return result
@classmethod
def setUpClass(cls):
cls.prep_pd_cat = cls._prep_pd_cat()
cls.creditg_pd_cat = cls._creditg_pd_cat()
cls.creditg_np_cat = cls._creditg_np_cat()
cls.creditg_pd_ternary = cls._creditg_pd_ternary()
cls.creditg_pd_repeated = cls._creditg_pd_repeated()
def test_encoder_pd_cat(self):
info = self.creditg_pd_cat["fairness_info"]
orig_X = self.creditg_pd_cat["splits"][0]["train_X"]
encoder_separate = lale.lib.aif360.ProtectedAttributesEncoder(
protected_attributes=info["protected_attributes"]
)
csep_X = encoder_separate.transform(orig_X)
encoder_and = lale.lib.aif360.ProtectedAttributesEncoder(
protected_attributes=info["protected_attributes"], combine="and"
)
cand_X = encoder_and.transform(orig_X)
for i in orig_X.index:
orig_row = orig_X.loc[i]
csep_row = csep_X.loc[i]
cand_row = cand_X.loc[i]
cand_name = list(cand_X.columns)[0]
self.assertEqual(
1 if orig_row["personal_status"].startswith("male") else 0,
csep_row["personal_status"],
)
self.assertEqual(1 if orig_row["age"] >= 26 else 0, csep_row["age"])
self.assertEqual(
cand_row[cand_name],
1 if csep_row["personal_status"] == 1 == csep_row["age"] else 0,
)
def test_encoder_np_cat(self):
info = self.creditg_np_cat["fairness_info"]
orig_X = self.creditg_np_cat["train_X"]
encoder = lale.lib.aif360.ProtectedAttributesEncoder(
protected_attributes=info["protected_attributes"]
)
conv_X = encoder.transform(orig_X)
for i in range(orig_X.shape[0]):
self.assertEqual(
1 if orig_X[i, 8].startswith("male") else 0,
conv_X.at[i, "f8"],
)
self.assertEqual(1 if orig_X[i, 12] >= 26 else 0, conv_X.at[i, "f12"])
def test_encoder_pd_ternary(self):
info = self.creditg_pd_ternary["fairness_info"]
orig_X = self.creditg_pd_ternary["splits"][0]["train_X"]
encoder_separate = lale.lib.aif360.ProtectedAttributesEncoder(
protected_attributes=info["protected_attributes"]
)
csep_X = encoder_separate.transform(orig_X)
encoder_and = lale.lib.aif360.ProtectedAttributesEncoder(
protected_attributes=info["protected_attributes"], combine="and"
)
cand_X = encoder_and.transform(orig_X)
for i in orig_X.index:
orig_row = orig_X.loc[i]
csep_row = csep_X.loc[i]
cand_row = cand_X.loc[i]
cand_name = list(cand_X.columns)[0]
self.assertEqual(
1
if orig_row["personal_status"].startswith("male")
else 0
if orig_row["personal_status"].startswith("female")
else 0.5,
csep_row["personal_status"],
f"personal_status {orig_row['personal_status']}",
)
self.assertEqual(
1
if 26 <= orig_row["age"] <= 1000
else 0
if 1 <= orig_row["age"] <= 23
else 0.5,
csep_row["age"],
f"age {orig_row['age']}",
)
self.assertEqual(
cand_row[cand_name],
min(csep_row["personal_status"], csep_row["age"]),
f"age {orig_row['age']}, personal_status {orig_row['personal_status']}",
)
def test_column_for_stratification(self):
fairness_info = self.creditg_pd_cat["fairness_info"]
train_X = self.creditg_pd_cat["splits"][0]["train_X"]
train_y = self.creditg_pd_cat["splits"][0]["train_y"]
stratify = lale.lib.aif360.util._column_for_stratification(
train_X, train_y, **fairness_info, unfavorable_labels=None
)
for i in train_X.index:
male = train_X.loc[i]["personal_status"].startswith("male")
old = train_X.loc[i]["age"] >= 26
favorable = train_y.loc[i] == "good"
strat = stratify.loc[i]
self.assertEqual(male, strat[0] == "T")
self.assertEqual(old, strat[1] == "T")
self.assertEqual(favorable, strat[2] == "T")
def test_fair_stratified_train_test_split(self):
X, y, fairness_info = lale.lib.aif360.fetch_creditg_df(preprocess=False)
z = range(X.shape[0])
(
train_X,
test_X,
train_y,
test_y,
train_z,
test_z,
) = fair_stratified_train_test_split(X, y, z, **fairness_info)
self.assertEqual(train_X.shape[0], train_y.shape[0])
self.assertEqual(train_X.shape[0], len(train_z))
self.assertEqual(test_X.shape[0], test_y.shape[0])
self.assertEqual(test_X.shape[0], len(test_z))
def _attempt_scorers(self, fairness_info, estimator, test_X, test_y):
fi = fairness_info
disparate_impact_scorer = lale.lib.aif360.disparate_impact(**fi)
impact = disparate_impact_scorer(estimator, test_X, test_y)
self.assertLess(impact, 0.9)
if estimator.is_classifier():
blended_scorer = lale.lib.aif360.accuracy_and_disparate_impact(**fi)
blended = blended_scorer(estimator, test_X, test_y)
self.assertLess(0.0, blended)
self.assertLess(blended, 1.0)
else:
blended_scorer = lale.lib.aif360.r2_and_disparate_impact(**fi)
blended = blended_scorer(estimator, test_X, test_y)
self.assertLess(0.0, blended)
self.assertLess(blended, 1.0)
parity_scorer = lale.lib.aif360.statistical_parity_difference(**fi)
parity = parity_scorer(estimator, test_X, test_y)
self.assertLess(parity, 0.0)
eo_diff_scorer = lale.lib.aif360.equal_opportunity_difference(**fi)
eo_diff = eo_diff_scorer(estimator, test_X, test_y)
self.assertLess(eo_diff, 0.0)
ao_diff_scorer = lale.lib.aif360.average_odds_difference(**fi)
ao_diff = ao_diff_scorer(estimator, test_X, test_y)
self.assertLess(ao_diff, 0.1)
theil_index_scorer = lale.lib.aif360.theil_index(**fi)
theil_index = theil_index_scorer(estimator, test_X, test_y)
self.assertGreater(theil_index, 0.1)
symm_di_scorer = lale.lib.aif360.symmetric_disparate_impact(**fi)
symm_di = symm_di_scorer(estimator, test_X, test_y)
self.assertLess(symm_di, 0.9)
def _attempt_scorers_batched(self, fairness_info, estimator, test_X, test_y):
batched_scorer_factories = [
lale.lib.aif360.statistical_parity_difference,
lale.lib.aif360.disparate_impact,
lale.lib.aif360.equal_opportunity_difference,
lale.lib.aif360.average_odds_difference,
lale.lib.aif360.symmetric_disparate_impact,
lale.lib.aif360.accuracy_and_disparate_impact,
lale.lib.aif360.balanced_accuracy_and_disparate_impact,
lale.lib.aif360.f1_and_disparate_impact,
] # not including r2_and_disparate_impact, because it's for regression
for factory in batched_scorer_factories:
scorer = factory(**fairness_info)
score_orig = scorer(estimator, test_X, test_y)
for n_batches in [1, 3]:
batches = mockup_data_loader(test_X, test_y, n_batches, "pandas")
score_batched = scorer.score_estimator_batched(estimator, batches)
self.assertEqual(score_orig, score_batched, (type(scorer), n_batches))
def test_scorers_pd_cat(self):
fairness_info = self.creditg_pd_cat["fairness_info"]
trainable = self.prep_pd_cat >> LogisticRegression(max_iter=1000)
train_X = self.creditg_pd_cat["splits"][0]["train_X"]
train_y = self.creditg_pd_cat["splits"][0]["train_y"]
trained = trainable.fit(train_X, train_y)
test_X = self.creditg_pd_cat["splits"][0]["test_X"]
test_y = self.creditg_pd_cat["splits"][0]["test_y"]
self._attempt_scorers(fairness_info, trained, test_X, test_y)
test_y_frame = pd.DataFrame({test_y.name: test_y})
self._attempt_scorers(fairness_info, trained, test_X, test_y_frame)
self._attempt_scorers_batched(fairness_info, trained, test_X, test_y)
def test_scorers_np_cat(self):
fairness_info = self.creditg_np_cat["fairness_info"]
train_X = self.creditg_np_cat["train_X"]
train_y = self.creditg_np_cat["train_y"]
cat_columns, num_columns = [], []
for i in range(train_X.shape[1]):
try:
_ = train_X[:, i].astype(np.float64)
num_columns.append(i)
except ValueError:
cat_columns.append(i)
trainable = (
(
(Project(columns=cat_columns) >> OneHotEncoder(handle_unknown="ignore"))
& (
Project(columns=num_columns)
>> FunctionTransformer(func=lambda x: x.astype(np.float64))
)
)
>> ConcatFeatures
>> LogisticRegression(max_iter=1000)
)
trained = trainable.fit(train_X, train_y)
test_X = self.creditg_np_cat["test_X"]
test_y = self.creditg_np_cat["test_y"]
self._attempt_scorers(fairness_info, trained, test_X, test_y)
def test_scorers_pd_ternary(self):
fairness_info = self.creditg_pd_ternary["fairness_info"]
trainable = self.prep_pd_cat >> LogisticRegression(max_iter=1000)
train_X = self.creditg_pd_ternary["splits"][0]["train_X"]
train_y = self.creditg_pd_ternary["splits"][0]["train_y"]
trained = trainable.fit(train_X, train_y)
test_X = self.creditg_pd_ternary["splits"][0]["test_X"]
test_y = self.creditg_pd_ternary["splits"][0]["test_y"]
self._attempt_scorers(fairness_info, trained, test_X, test_y)
self._attempt_scorers_batched(fairness_info, trained, test_X, test_y)
def test_scorers_warn(self):
fairness_info = {
"favorable_labels": ["good"],
"protected_attributes": [{"feature": "age", "reference_group": [1]}],
}
trainable = self.prep_pd_cat >> LogisticRegression(max_iter=1000)
train_X = self.creditg_pd_cat["splits"][0]["train_X"]
train_y = self.creditg_pd_cat["splits"][0]["train_y"]
trained = trainable.fit(train_X, train_y)
test_X = self.creditg_pd_cat["splits"][0]["test_X"]
test_y = self.creditg_pd_cat["splits"][0]["test_y"]
disparate_impact_scorer = lale.lib.aif360.disparate_impact(**fairness_info)
with self.assertLogs(lale.lib.aif360.util.logger) as log_context_manager:
impact = disparate_impact_scorer(trained, test_X, test_y)
self.assertRegex(log_context_manager.output[-1], "is ill-defined")
self.assertTrue(np.isnan(impact))
def test_scorers_warn2(self):
fairness_info = {
"favorable_labels": ["good"],
"unfavorable_labels": ["good"],
"protected_attributes": [
{"feature": "age", "reference_group": [[26, 1000]]}
],
}
with self.assertLogs(lale.lib.aif360.util.logger) as log_context_manager:
_ = lale.lib.aif360.disparate_impact(**fairness_info)
self.assertRegex(
log_context_manager.output[-1],
"overlap between favorable labels and unfavorable labels on 'good' and 'good'",
)
def test_scorers_warn3(self):
fairness_info = {
"favorable_labels": ["good"],
"protected_attributes": [
{"feature": "age", "reference_group": [[1, 2, 3]]}
],
}
with self.assertRaises(jsonschema.ValidationError):
_ = lale.lib.aif360.disparate_impact(**fairness_info)
def test_scorers_warn4(self):
fairness_info = {
"favorable_labels": ["good"],
"protected_attributes": [
{
"feature": "age",
"reference_group": [[20, 40]],
"monitored_group": [30],
}
],
}
with self.assertLogs(lale.lib.aif360.util.logger) as log_context_manager:
_ = lale.lib.aif360.disparate_impact(**fairness_info)
self.assertRegex(
log_context_manager.output[-1],
"overlap between reference group and monitored group of feature 'age'",
)
def test_scorers_ternary_nonexhaustive(self):
X, y, fairness_info = lale.lib.aif360.fetch_nursery_df(preprocess=False)
self.assertEqual(
set(y), {"not_recom", "recommend", "very_recom", "priority", "spec_prior"}
)
fairness_info = {**fairness_info, "unfavorable_labels": ["not_recom"]}
self.assertEqual(
set(
fairness_info["favorable_labels"] + fairness_info["unfavorable_labels"]
),
{"spec_prior", "not_recom"},
)
di_scorer = lale.lib.aif360.disparate_impact(**fairness_info)
di_measured = di_scorer.score_data(X=X, y_pred=y)
self.assertAlmostEqual(di_measured, 0.461, places=3)
sp_scorer = lale.lib.aif360.statistical_parity_difference(**fairness_info)
sp_measured = sp_scorer.score_data(X=X, y_pred=y)
self.assertAlmostEqual(sp_measured, -0.205, places=3)
sdi_scorer = lale.lib.aif360.symmetric_disparate_impact(**fairness_info)
sdi_measured = sdi_scorer.score_data(X=X, y_pred=y)
self.assertAlmostEqual(sdi_measured, 0.461, places=3)
adi_scorer = lale.lib.aif360.accuracy_and_disparate_impact(**fairness_info)
adi_measured = adi_scorer.score_data(X=X, y_pred=y, y_true=y)
self.assertAlmostEqual(adi_measured, 0.731, places=3)
ao_scorer = lale.lib.aif360.average_odds_difference(**fairness_info)
with self.assertRaisesRegex(ValueError, "unexpected labels"):
_ = ao_scorer.score_data(X=X, y_pred=y)
def _attempt_remi_creditg_pd_cat(
self, fairness_info, trainable_remi, min_di, max_di
):
splits = self.creditg_pd_cat["splits"]
disparate_impact_scorer = lale.lib.aif360.disparate_impact(**fairness_info)
di_list = []
for split in splits:
if tensorflow_installed: # for AdversarialDebiasing
tf.compat.v1.reset_default_graph()
train_X = split["train_X"]
train_y = split["train_y"]
trained_remi = trainable_remi.fit(train_X, train_y)
test_X = split["test_X"]
test_y = split["test_y"]
di_list.append(disparate_impact_scorer(trained_remi, test_X, test_y))
di = pd.Series(di_list)
_, _, function_name, _ = traceback.extract_stack()[-2]
print(f"disparate impact {di.mean():.3f} +- {di.std():.3f} {function_name}")
self.assertTrue(
min_di <= di.mean() <= max_di,
f"{min_di} <= {di.mean()} <= {max_di}",
)
def test_adversarial_debiasing_pd_cat(self):
if tensorflow_installed:
fairness_info = self.creditg_pd_cat["fairness_info"]
trainable_remi = AdversarialDebiasing(
**fairness_info, preparation=self.prep_pd_cat
)
self._attempt_remi_creditg_pd_cat(fairness_info, trainable_remi, 0.0, 1.5)
def test_bagging_orbis_classifier_pd_cat(self):
fairness_info = self.creditg_pd_cat["fairness_info"]
trainable_remi = BaggingOrbisClassifier(
**fairness_info, preparation=self.prep_pd_cat
)
self._attempt_remi_creditg_pd_cat(fairness_info, trainable_remi, 0.7, 0.92)
def test_calibrated_eq_odds_postprocessing_pd_cat(self):
fairness_info = self.creditg_pd_cat["fairness_info"]
estim = self.prep_pd_cat >> LogisticRegression(max_iter=1000)
trainable_remi = CalibratedEqOddsPostprocessing(
**fairness_info, estimator=estim
)
self._attempt_remi_creditg_pd_cat(fairness_info, trainable_remi, 0.65, 0.85)
def test_disparate_impact_remover_pd_cat(self):
fairness_info = self.creditg_pd_cat["fairness_info"]
trainable_remi = DisparateImpactRemover(
**fairness_info, preparation=self.prep_pd_cat
) >> LogisticRegression(max_iter=1000)
self._attempt_remi_creditg_pd_cat(fairness_info, trainable_remi, 0.72, 0.92)
def test_disparate_impact_remover_pd_cat_no_redact(self):
fairness_info = self.creditg_pd_cat["fairness_info"]
trainable_remi = DisparateImpactRemover(
**fairness_info, redact=False, preparation=self.prep_pd_cat
) >> LogisticRegression(max_iter=1000)
self._attempt_remi_creditg_pd_cat(fairness_info, trainable_remi, 0.65, 0.75)
def test_disparate_impact_remover_pd_cat_bool(self):
X, y, fairness_info = lale.lib.aif360.fetch_ricci_df(preprocess=False)
y = y == "Promotion"
self.assertIs(y.dtype, np.dtype("bool"))
fairness_info = {**fairness_info, "favorable_labels": [True]}
trainable_remi = DisparateImpactRemover(
**fairness_info, preparation=self.prep_pd_cat
) >> LogisticRegression(max_iter=1000)
trained_remi = trainable_remi.fit(X, y)
_ = trained_remi.predict(X)
def test_eq_odds_postprocessing_pd_cat(self):
fairness_info = self.creditg_pd_cat["fairness_info"]
estim = self.prep_pd_cat >> LogisticRegression(max_iter=1000)
trainable_remi = EqOddsPostprocessing(**fairness_info, estimator=estim)
self._attempt_remi_creditg_pd_cat(fairness_info, trainable_remi, 0.82, 1.02)
def test_gerry_fair_classifier_pd_cat(self):
fairness_info = self.creditg_pd_cat["fairness_info"]
trainable_remi = GerryFairClassifier(
**fairness_info, preparation=self.prep_pd_cat
)
self._attempt_remi_creditg_pd_cat(fairness_info, trainable_remi, 0.677, 0.678)
def test_lfr_pd_cat(self):
if numba_installed:
fairness_info = self.creditg_pd_cat["fairness_info"]
trainable_remi = LFR(
**fairness_info, preparation=self.prep_pd_cat
) >> LogisticRegression(max_iter=1000)
self._attempt_remi_creditg_pd_cat(fairness_info, trainable_remi, 0.95, 1.05)
def test_meta_fair_classifier_pd_cat(self):
if aif360.__version__ >= "4.0.0":
fairness_info = self.creditg_pd_cat["fairness_info"]
trainable_remi = MetaFairClassifier(
**fairness_info, preparation=self.prep_pd_cat
)
self._attempt_remi_creditg_pd_cat(fairness_info, trainable_remi, 0.62, 0.87)
def test_optim_preproc_pd_cat(self):
# TODO: set the optimizer options as shown in the example https://github.com/Trusted-AI/AIF360/blob/master/examples/demo_optim_data_preproc.ipynb
if cvxpy_installed:
fairness_info = self.creditg_pd_cat["fairness_info"]
_ = OptimPreproc(**fairness_info, optim_options={}) >> LogisticRegression(
max_iter=1000
)
# TODO: this test does not yet call fit or predict
def test_orbis_mixed_pd_cat(self):
fairness_info = self.creditg_pd_cat["fairness_info"]
estim = self.prep_pd_cat >> LogisticRegression(max_iter=1000)
trainable_remi = Orbis(
estimator=estim, **fairness_info, sampling_strategy="mixed"
)
self._attempt_remi_creditg_pd_cat(fairness_info, trainable_remi, 0.7, 0.92)
def test_orbis_over_pd_cat(self):
fairness_info = self.creditg_pd_cat["fairness_info"]
estim = self.prep_pd_cat >> LogisticRegression(max_iter=1000)
trainable_remi = Orbis(
estimator=estim, **fairness_info, sampling_strategy="over"
)
self._attempt_remi_creditg_pd_cat(fairness_info, trainable_remi, 0.7, 0.92)
def test_orbis_under_pd_cat(self):
fairness_info = self.creditg_pd_cat["fairness_info"]
estim = self.prep_pd_cat >> LogisticRegression(max_iter=1000)
trainable_remi = Orbis(
estimator=estim, **fairness_info, sampling_strategy="under"
)
self._attempt_remi_creditg_pd_cat(fairness_info, trainable_remi, 0.7, 1.05)
def test_prejudice_remover_pd_cat(self):
fairness_info = self.creditg_pd_cat["fairness_info"]
trainable_remi = PrejudiceRemover(**fairness_info, preparation=self.prep_pd_cat)
self._attempt_remi_creditg_pd_cat(fairness_info, trainable_remi, 0.70, 0.80)
def test_redacting_pd_cat(self):
fairness_info = self.creditg_pd_cat["fairness_info"]
estim = self.prep_pd_cat >> LogisticRegression(max_iter=1000)
trainable_remi = Redacting(**fairness_info) >> estim
self._attempt_remi_creditg_pd_cat(fairness_info, trainable_remi, 0.78, 0.94)
def test_reject_option_classification_pd_cat(self):
fairness_info = self.creditg_pd_cat["fairness_info"]
estim = self.prep_pd_cat >> LogisticRegression(max_iter=1000)
trainable_remi = RejectOptionClassification(**fairness_info, estimator=estim)
self._attempt_remi_creditg_pd_cat(fairness_info, trainable_remi, 0.88, 0.98)
def test_sans_mitigation_pd_cat(self):
fairness_info = self.creditg_pd_cat["fairness_info"]
trainable_remi = self.prep_pd_cat >> LogisticRegression(max_iter=1000)
self._attempt_remi_creditg_pd_cat(fairness_info, trainable_remi, 0.66, 0.76)
def test_reweighing_pd_cat(self):
fairness_info = self.creditg_pd_cat["fairness_info"]
estim = self.prep_pd_cat >> LogisticRegression(max_iter=1000)
trainable_remi = Reweighing(estimator=estim, **fairness_info)
self._attempt_remi_creditg_pd_cat(fairness_info, trainable_remi, 0.85, 1.00)
def test_pd_cat_y_not_series(self):
fairness_info = self.creditg_pd_cat["fairness_info"]
trainable_remi = DisparateImpactRemover(
**fairness_info, preparation=self.prep_pd_cat
) >> LogisticRegression(max_iter=1000)
train_X = self.creditg_pd_cat["splits"][0]["train_X"]
train_y = self.creditg_pd_cat["splits"][0]["train_y"].to_frame()
trained_remi = trainable_remi.fit(train_X, train_y)
test_X = self.creditg_pd_cat["splits"][0]["test_X"]
test_y = self.creditg_pd_cat["splits"][0]["test_y"].to_frame()
di_scorer = lale.lib.aif360.disparate_impact(**fairness_info)
di = di_scorer(trained_remi, test_X, test_y)
self.assertLessEqual(0.8, di)
self.assertLessEqual(di, 1.0)
def test_count_fairness_groups(self):
fairness_info = self.creditg_pd_cat["fairness_info"]
train_X = self.creditg_pd_cat["splits"][0]["train_X"]
train_y = self.creditg_pd_cat["splits"][0]["train_y"]
cfg = count_fairness_groups(train_X, train_y, **fairness_info)
self.assertEqual(cfg.at[(0, 0, 0), "count"], 31)
self.assertEqual(cfg.at[(1, 1, 1), "count"], 298)
class TestAIF360Imports(unittest.TestCase):
def test_import_split(self):
# pylint:disable=reimported
# for backward compatibility, need to support import from `...util`
from lale.lib.aif360 import fair_stratified_train_test_split as s_init
from lale.lib.aif360.util import fair_stratified_train_test_split as s_util
self.assertIs(s_init, s_util)
def test_import_disparate_impact(self):
# for backward compatibility, need to support import from `...util`
from lale.lib.aif360 import disparate_impact as di_init
from lale.lib.aif360.util import disparate_impact as di_util
self.assertIs(di_init, di_util)
| 69,089 | 40.64557 | 153 |
py
|
lale
|
lale-master/test/test_pipeline.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import unittest
import warnings
from sklearn.datasets import load_iris
from sklearn.metrics import accuracy_score, make_scorer
from sklearn.model_selection import train_test_split
from sklearn.neural_network import MLPClassifier as SkMLPClassifier
from sklearn.pipeline import Pipeline as SkPipeline
from sklearn.preprocessing import MinMaxScaler as SkMinMaxScaler
from lale.lib.lale import Batching, Hyperopt, NoOp
from lale.lib.sklearn import PCA, LogisticRegression, Nystroem
from lale.search.lale_grid_search_cv import get_grid_search_parameter_grids
class TestBatching(unittest.TestCase):
def setUp(self):
data = load_iris()
X, y = data.data, data.target
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(X, y)
def test_fit(self):
import lale.lib.sklearn as lale_sklearn
warnings.filterwarnings(action="ignore")
pipeline = NoOp() >> Batching(
operator=lale_sklearn.MinMaxScaler()
>> lale_sklearn.MLPClassifier(random_state=42),
batch_size=56,
)
trained = pipeline.fit(self.X_train, self.y_train)
predictions = trained.predict(self.X_test)
lale_accuracy = accuracy_score(self.y_test, predictions)
prep = SkMinMaxScaler()
trained_prep = prep.partial_fit(self.X_train[0:56, :], self.y_train[0:56])
trained_prep.partial_fit(self.X_train[56:, :], self.y_train[56:])
X_transformed = trained_prep.transform(self.X_train)
clf = SkMLPClassifier(random_state=42)
import numpy as np
trained_clf = clf.partial_fit(
X_transformed[0:56, :], self.y_train[0:56], classes=np.unique(self.y_train)
)
trained_clf.partial_fit(
X_transformed[56:, :], self.y_train[56:], classes=np.unique(self.y_train)
)
predictions = trained_clf.predict(trained_prep.transform(self.X_test))
sklearn_accuracy = accuracy_score(self.y_test, predictions)
self.assertEqual(lale_accuracy, sklearn_accuracy)
def test_fit1(self):
warnings.filterwarnings(action="ignore")
from lale.lib.sklearn import MinMaxScaler, MLPClassifier
pipeline = Batching(
operator=MinMaxScaler() >> MLPClassifier(random_state=42), batch_size=56
)
trained = pipeline.fit(self.X_train, self.y_train)
predictions = trained.predict(self.X_test)
lale_accuracy = accuracy_score(self.y_test, predictions)
prep = MinMaxScaler()
trained_prep = prep.partial_fit(self.X_train[0:56, :], self.y_train[0:56])
trained_prep.partial_fit(self.X_train[56:, :], self.y_train[56:])
X_transformed = trained_prep.transform(self.X_train)
clf = SkMLPClassifier(random_state=42)
import numpy as np
trained_clf = clf.partial_fit(
X_transformed[0:56, :], self.y_train[0:56], classes=np.unique(self.y_train)
)
trained_clf.partial_fit(
X_transformed[56:, :], self.y_train[56:], classes=np.unique(self.y_train)
)
predictions = trained_clf.predict(trained_prep.transform(self.X_test))
sklearn_accuracy = accuracy_score(self.y_test, predictions)
self.assertEqual(lale_accuracy, sklearn_accuracy)
def test_fit2(self):
warnings.filterwarnings(action="ignore")
from lale.lib.sklearn import MinMaxScaler
pipeline = Batching(
operator=MinMaxScaler() >> MinMaxScaler(), batch_size=112, shuffle=False
)
trained = pipeline.fit(self.X_train, self.y_train)
lale_transforms = trained.transform(self.X_test)
prep = SkMinMaxScaler()
trained_prep = prep.partial_fit(self.X_train, self.y_train)
X_transformed = trained_prep.transform(self.X_train)
clf = MinMaxScaler()
trained_clf = clf.partial_fit(X_transformed, self.y_train)
sklearn_transforms = trained_clf.transform(trained_prep.transform(self.X_test))
for i in range(5):
for j in range(2):
self.assertAlmostEqual(lale_transforms[i, j], sklearn_transforms[i, j])
def test_fit3(self):
from lale.lib.sklearn import MinMaxScaler, MLPClassifier
pipeline = PCA() >> Batching(
operator=MinMaxScaler() >> MLPClassifier(random_state=42), batch_size=10
)
trained = pipeline.fit(self.X_train, self.y_train)
_ = trained.predict(self.X_test)
def test_no_partial_fit(self):
pipeline = Batching(operator=NoOp() >> LogisticRegression())
_ = pipeline.fit(self.X_train, self.y_train)
def test_fit4(self):
warnings.filterwarnings(action="ignore")
from lale.lib.sklearn import MinMaxScaler, MLPClassifier
pipeline = Batching(
operator=MinMaxScaler() >> MLPClassifier(random_state=42),
batch_size=56,
inmemory=True,
)
trained = pipeline.fit(self.X_train, self.y_train)
predictions = trained.predict(self.X_test)
lale_accuracy = accuracy_score(self.y_test, predictions)
prep = SkMinMaxScaler()
trained_prep = prep.partial_fit(self.X_train[0:56, :], self.y_train[0:56])
trained_prep.partial_fit(self.X_train[56:, :], self.y_train[56:])
X_transformed = trained_prep.transform(self.X_train)
clf = SkMLPClassifier(random_state=42)
import numpy as np
trained_clf = clf.partial_fit(
X_transformed[0:56, :], self.y_train[0:56], classes=np.unique(self.y_train)
)
trained_clf.partial_fit(
X_transformed[56:, :], self.y_train[56:], classes=np.unique(self.y_train)
)
predictions = trained_clf.predict(trained_prep.transform(self.X_test))
sklearn_accuracy = accuracy_score(self.y_test, predictions)
self.assertEqual(lale_accuracy, sklearn_accuracy)
# TODO: Nesting doesn't work yet
# def test_nested_pipeline(self):
# from lale.lib.sklearn import MinMaxScaler, MLPClassifier
# pipeline = Batching(operator = MinMaxScaler() >> Batching(operator = NoOp() >> MLPClassifier(random_state=42)), batch_size = 112)
# trained = pipeline.fit(self.X_train, self.y_train)
# predictions = trained.predict(self.X_test)
# lale_accuracy = accuracy_score(self.y_test, predictions)
class TestPipeline(unittest.TestCase):
def dont_test_with_gridsearchcv2_auto(self):
from sklearn.model_selection import GridSearchCV
lr = LogisticRegression(random_state=42)
pca = PCA(random_state=42, svd_solver="arpack")
trainable = pca >> lr
scikit_pipeline = SkPipeline(
[
(pca.name(), PCA(random_state=42, svd_solver="arpack")),
(lr.name(), LogisticRegression(random_state=42)),
]
)
all_parameters = get_grid_search_parameter_grids(trainable, num_samples=1)
# otherwise the test takes too long
parameters = random.sample(all_parameters, 2)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
clf = GridSearchCV(
scikit_pipeline, parameters, cv=2, scoring=make_scorer(accuracy_score)
)
iris = load_iris()
clf.fit(iris.data, iris.target)
predicted = clf.predict(iris.data)
accuracy_with_lale_operators = accuracy_score(iris.target, predicted)
from sklearn.decomposition import PCA as SklearnPCA
from sklearn.linear_model import LogisticRegression as SklearnLR
scikit_pipeline = SkPipeline(
[
(pca.name(), SklearnPCA(random_state=42, svd_solver="arpack")),
(lr.name(), SklearnLR(random_state=42)),
]
)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
clf = GridSearchCV(
scikit_pipeline, parameters, cv=2, scoring=make_scorer(accuracy_score)
)
iris = load_iris()
clf.fit(iris.data, iris.target)
predicted = clf.predict(iris.data)
accuracy_with_scikit_operators = accuracy_score(iris.target, predicted)
self.assertEqual(accuracy_with_lale_operators, accuracy_with_scikit_operators)
def test_with_gridsearchcv3(self):
from sklearn.model_selection import GridSearchCV
_ = LogisticRegression()
scikit_pipeline = SkPipeline(
[("nystroem", Nystroem()), ("lr", LogisticRegression())]
)
parameters = {"lr__solver": ("liblinear", "lbfgs"), "lr__penalty": ["l2"]}
clf = GridSearchCV(
scikit_pipeline, parameters, cv=2, scoring=make_scorer(accuracy_score)
)
iris = load_iris()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
clf.fit(iris.data, iris.target)
_ = clf.predict(iris.data)
def test_with_gridsearchcv3_auto(self):
from sklearn.model_selection import GridSearchCV
lr = LogisticRegression()
scikit_pipeline = SkPipeline(
[(Nystroem().name(), Nystroem()), (lr.name(), LogisticRegression())]
)
all_parameters = get_grid_search_parameter_grids(
Nystroem() >> lr, num_samples=1
)
# otherwise the test takes too long
parameters = random.sample(all_parameters, 2)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
clf = GridSearchCV(
scikit_pipeline, parameters, cv=2, scoring=make_scorer(accuracy_score)
)
iris = load_iris()
clf.fit(iris.data, iris.target)
_ = clf.predict(iris.data)
def test_with_gridsearchcv3_auto_wrapped(self):
pipeline = Nystroem() >> LogisticRegression()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
from lale.lib.lale import GridSearchCV
clf = GridSearchCV(
estimator=pipeline,
lale_num_samples=1,
lale_num_grids=1,
cv=2,
scoring=make_scorer(accuracy_score),
)
iris = load_iris()
clf.fit(iris.data, iris.target)
_ = clf.predict(iris.data)
class TestBatching2(unittest.TestCase):
def setUp(self):
data = load_iris()
X, y = data.data, data.target
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(X, y)
def test_batching_with_hyperopt(self):
from lale.lib.sklearn import MinMaxScaler, SGDClassifier
pipeline = Batching(operator=MinMaxScaler() >> SGDClassifier())
trained = pipeline.auto_configure(
self.X_train, self.y_train, optimizer=Hyperopt, max_evals=1
)
_ = trained.predict(self.X_test)
class TestImportFromSklearnWithCognito(unittest.TestCase):
def test_import_from_sklearn(self):
pipeline_str = """from lale.lib.autoai_libs import NumpyColumnSelector
from lale.lib.autoai_libs import CompressStrings
from lale.lib.autoai_libs import NumpyReplaceMissingValues
from lale.lib.autoai_libs import NumpyReplaceUnknownValues
from lale.lib.autoai_libs import boolean2float
from lale.lib.autoai_libs import CatImputer
from lale.lib.autoai_libs import CatEncoder
import numpy as np
from lale.lib.autoai_libs import float32_transform
from lale.operators import make_pipeline
from lale.lib.autoai_libs import FloatStr2Float
from lale.lib.autoai_libs import NumImputer
from lale.lib.autoai_libs import OptStandardScaler
from lale.operators import make_union
from lale.lib.autoai_libs import NumpyPermuteArray
from lale.lib.autoai_libs import TA1
import autoai_libs.utils.fc_methods
from lale.lib.autoai_libs import FS1
from xgboost import XGBRegressor
numpy_column_selector_0 = NumpyColumnSelector(columns=[1])
compress_strings = CompressStrings(compress_type='hash', dtypes_list=['int_num'], missing_values_reference_list=['', '-', '?', float('nan')], misslist_list=[[]])
numpy_replace_missing_values_0 = NumpyReplaceMissingValues(filling_values=float('nan'), missing_values=[])
numpy_replace_unknown_values = NumpyReplaceUnknownValues(filling_values=float('nan'), filling_values_list=[float('nan')], known_values_list=[[36, 45, 56, 67, 68, 75, 78, 89]], missing_values_reference_list=['', '-', '?', float('nan')])
cat_imputer = CatImputer(missing_values=float('nan'), sklearn_version_family='20', strategy='most_frequent')
cat_encoder = CatEncoder(dtype=np.float64, handle_unknown='error', sklearn_version_family='20')
pipeline_0 = make_pipeline(numpy_column_selector_0, compress_strings, numpy_replace_missing_values_0, numpy_replace_unknown_values, boolean2float(), cat_imputer, cat_encoder, float32_transform())
numpy_column_selector_1 = NumpyColumnSelector(columns=[0])
float_str2_float = FloatStr2Float(dtypes_list=['int_num'], missing_values_reference_list=[])
numpy_replace_missing_values_1 = NumpyReplaceMissingValues(filling_values=float('nan'), missing_values=[])
num_imputer = NumImputer(missing_values=float('nan'), strategy='median')
opt_standard_scaler = OptStandardScaler(num_scaler_copy=None, num_scaler_with_mean=None, num_scaler_with_std=None, use_scaler_flag=False)
pipeline_1 = make_pipeline(numpy_column_selector_1, float_str2_float, numpy_replace_missing_values_1, num_imputer, opt_standard_scaler, float32_transform())
union = make_union(pipeline_0, pipeline_1)
numpy_permute_array = NumpyPermuteArray(axis=0, permutation_indices=[1, 0])
ta1_0 = TA1(fun=np.tan, name='tan', datatypes=['float'], feat_constraints=[autoai_libs.utils.fc_methods.is_not_categorical], col_names=['age', 'weight'], col_dtypes=[np.dtype('float32'), np.dtype('float32')])
fs1_0 = FS1(cols_ids_must_keep=range(0, 2), additional_col_count_to_keep=4, ptype='regression')
ta1_1 = TA1(fun=np.square, name='square', datatypes=['numeric'], feat_constraints=[autoai_libs.utils.fc_methods.is_not_categorical], col_names=['age', 'weight', 'tan(age)'], col_dtypes=[np.dtype('float32'), np.dtype('float32'), np.dtype('float32')])
fs1_1 = FS1(cols_ids_must_keep=range(0, 2), additional_col_count_to_keep=4, ptype='regression')
ta1_2 = TA1(fun=np.sin, name='sin', datatypes=['float'], feat_constraints=[autoai_libs.utils.fc_methods.is_not_categorical], col_names=['age', 'weight', 'tan(age)', 'square(age)', 'square(tan(age))'], col_dtypes=[np.dtype('float32'), np.dtype('float32'), np.dtype('float32'), np.dtype('float32'), np.dtype('float32')])
fs1_2 = FS1(cols_ids_must_keep=range(0, 2), additional_col_count_to_keep=4, ptype='regression')
xgb_regressor = XGBRegressor(missing=float('nan'), n_jobs=4, random_state=33, silent=True, verbosity=0)
pipeline = make_pipeline(union, numpy_permute_array, ta1_0, fs1_0, ta1_1, fs1_1, ta1_2, fs1_2, xgb_regressor)
"""
globals2 = {}
# This call to exec should be safe since we are using a fixed (completely specified) string
exec(pipeline_str, globals2) # nosec
pipeline2 = globals2["pipeline"]
sklearn_pipeline = pipeline2.export_to_sklearn_pipeline()
from lale import helpers
_ = helpers.import_from_sklearn_pipeline(sklearn_pipeline)
class TestExportToSklearnForEstimator(unittest.TestCase):
def setUp(self):
data = load_iris()
X, y = data.data, data.target
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(X, y)
def create_pipeline(self):
from sklearn.decomposition import PCA as SkPCA
from sklearn.pipeline import make_pipeline
pipeline = make_pipeline(SkPCA(), LogisticRegression())
return pipeline
def test_import_export_trained(self):
import numpy as np
from lale.helpers import import_from_sklearn_pipeline
pipeline = self.create_pipeline()
self.assertEqual(isinstance(pipeline, SkPipeline), True)
pipeline.fit(self.X_train, self.y_train)
predictions_before = pipeline.predict(self.X_test)
lale_pipeline = import_from_sklearn_pipeline(pipeline)
predictions_after = lale_pipeline.predict(self.X_test)
sklearn_pipeline = lale_pipeline.export_to_sklearn_pipeline()
predictions_after_1 = sklearn_pipeline.predict(self.X_test)
self.assertEqual(np.all(predictions_before == predictions_after), True)
self.assertEqual(np.all(predictions_before == predictions_after_1), True)
def test_import_export_trainable(self):
from sklearn.exceptions import NotFittedError
from lale.helpers import import_from_sklearn_pipeline
pipeline = self.create_pipeline()
self.assertEqual(isinstance(pipeline, SkPipeline), True)
pipeline.fit(self.X_train, self.y_train)
lale_pipeline = import_from_sklearn_pipeline(pipeline, fitted=False)
with self.assertRaises(ValueError):
lale_pipeline.predict(self.X_test)
sklearn_pipeline = lale_pipeline.export_to_sklearn_pipeline()
with self.assertRaises(NotFittedError):
sklearn_pipeline.predict(self.X_test)
| 17,752 | 43.717884 | 318 |
py
|
lale
|
lale-master/test/test_core_regressors.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from jsonschema.exceptions import ValidationError
import lale.lib.lale
import lale.type_checking
from lale.lib.lale import NoOp
from lale.lib.sklearn import (
ExtraTreesRegressor,
GradientBoostingRegressor,
RandomForestRegressor,
Ridge,
SGDRegressor,
)
class TestRegression(unittest.TestCase):
def setUp(self):
from sklearn.datasets import make_regression
from sklearn.model_selection import train_test_split
X, y = make_regression(
n_samples=200, n_features=4, n_informative=2, random_state=0, shuffle=False
)
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(X, y)
def create_function_test_regressor(clf_name):
def test_regressor(self):
X_train, y_train = self.X_train, self.y_train
import importlib
module_name = ".".join(clf_name.split(".")[0:-1])
class_name = clf_name.split(".")[-1]
module = importlib.import_module(module_name)
class_ = getattr(module, class_name)
regr = None
if class_name in ["StackingRegressor", "VotingRegressor"]:
regr = class_(estimators=[("base", SGDRegressor())])
else:
regr = class_()
# test_schemas_are_schemas
lale.type_checking.validate_is_schema(regr.input_schema_fit())
lale.type_checking.validate_is_schema(regr.input_schema_predict())
lale.type_checking.validate_is_schema(regr.output_schema_predict())
lale.type_checking.validate_is_schema(regr.hyperparam_schema())
# test_init_fit_predict
trained = regr.fit(self.X_train, self.y_train)
_ = trained.predict(self.X_test)
# test score
_ = trained.score(self.X_test, self.y_test)
# test_predict_on_trainable
trained = regr.fit(X_train, y_train)
regr.predict(X_train)
# test_to_json
regr.to_json()
# test_in_a_pipeline
pipeline = NoOp() >> regr
trained = pipeline.fit(self.X_train, self.y_train)
_ = trained.predict(self.X_test)
# test_with_hyperopt
if isinstance(regr, Ridge): # type: ignore
from lale.lib.lale import Hyperopt
hyperopt = Hyperopt(estimator=pipeline, max_evals=1)
trained = hyperopt.fit(self.X_train, self.y_train)
_ = trained.predict(self.X_test)
test_regressor.__name__ = f"test_{clf_name.split('.')[-1]}"
return test_regressor
regressors = [
"lale.lib.sklearn.BaggingRegressor",
"lale.lib.sklearn.DummyRegressor",
"lale.lib.sklearn.RandomForestRegressor",
"lale.lib.sklearn.DecisionTreeRegressor",
"lale.lib.sklearn.ExtraTreesRegressor",
"lale.lib.sklearn.GradientBoostingRegressor",
"lale.lib.sklearn.LinearRegression",
"lale.lib.sklearn.Ridge",
"lale.lib.lightgbm.LGBMRegressor",
"lale.lib.xgboost.XGBRegressor",
"lale.lib.sklearn.AdaBoostRegressor",
"lale.lib.sklearn.SGDRegressor",
"lale.lib.sklearn.SVR",
"lale.lib.sklearn.KNeighborsRegressor",
"lale.lib.sklearn.LinearSVR",
"lale.lib.sklearn.StackingRegressor",
"lale.lib.sklearn.VotingRegressor",
]
for clf in regressors:
setattr(
TestRegression,
f"test_{clf.rsplit('.', maxsplit=1)[-1]}",
create_function_test_regressor(clf),
)
class TestSpuriousSideConstraintsRegression(unittest.TestCase):
# This was prompted by a bug, keeping it as it may help with support for other sklearn versions
def setUp(self):
from sklearn.datasets import make_regression
from sklearn.model_selection import train_test_split
X, y = make_regression(
n_features=4, n_informative=2, random_state=0, shuffle=False
)
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(X, y)
def test_gradient_boost_regressor(self):
reg = GradientBoostingRegressor(
alpha=0.9789984970831765,
criterion="friedman_mse",
init=None,
learning_rate=0.1,
loss="squared_error",
)
reg.fit(self.X_train, self.y_train)
def test_sgd_regressor(self):
reg = SGDRegressor(loss="squared_error", epsilon=0.2)
reg.fit(self.X_train, self.y_train)
def test_sgd_regressor_1(self):
reg = SGDRegressor(learning_rate="optimal", eta0=0.2)
reg.fit(self.X_train, self.y_train)
def test_sgd_regressor_2(self):
reg = SGDRegressor(early_stopping=False, validation_fraction=0.2)
reg.fit(self.X_train, self.y_train)
def test_sgd_regressor_3(self):
reg = SGDRegressor(l1_ratio=0.2, penalty="l1")
reg.fit(self.X_train, self.y_train)
class TestFriedmanMSE(unittest.TestCase):
# This was prompted buy a bug, keeping it as it may help with support for other sklearn versions
def setUp(self):
from sklearn.datasets import make_regression
from sklearn.model_selection import train_test_split
X, y = make_regression(
n_features=4, n_informative=2, random_state=0, shuffle=False
)
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(X, y)
def test_rfr(self):
import sklearn
if sklearn.__version__ < "1.0":
reg = RandomForestRegressor(
bootstrap=True,
criterion="friedman_mse",
max_depth=4,
max_features=0.9832410473940374,
max_leaf_nodes=None,
min_impurity_decrease=0.0,
min_impurity_split=None,
min_samples_leaf=3,
min_samples_split=2,
min_weight_fraction_leaf=0.0,
n_estimators=29,
n_jobs=4,
oob_score=False,
random_state=33,
verbose=0,
warm_start=False,
)
reg.fit(self.X_train, self.y_train)
def test_etr(self):
import sklearn
if sklearn.__version__ < "1.0":
reg = ExtraTreesRegressor(
bootstrap=True,
criterion="friedman_mse",
max_depth=4,
max_features=0.9832410473940374,
max_leaf_nodes=None,
min_impurity_decrease=0.0,
min_impurity_split=None,
min_samples_leaf=3,
min_samples_split=2,
min_weight_fraction_leaf=0.0,
n_estimators=29,
n_jobs=4,
oob_score=False,
random_state=33,
verbose=0,
warm_start=False,
)
reg.fit(self.X_train, self.y_train)
class TestRidge(unittest.TestCase):
# This was prompted by a bug, keeping it as it may help with support for other sklearn versions
def setUp(self):
from sklearn.datasets import make_regression
from sklearn.model_selection import train_test_split
X, y = make_regression(
n_features=4, n_informative=2, random_state=0, shuffle=False
)
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(X, y)
def test_positive(self):
import sklearn
from lale.settings import set_disable_data_schema_validation
set_disable_data_schema_validation(False)
if sklearn.__version__ > "1.0":
reg = Ridge(solver="lbfgs", positive=True)
reg.fit(self.X_train, self.y_train)
with self.assertRaises(ValidationError):
reg = Ridge(solver="saga", positive=True)
reg = Ridge(solver="auto", positive=True)
reg.fit(self.X_train, self.y_train)
with self.assertRaises(ValidationError):
reg = Ridge(solver="lbfgs", positive=False)
reg = Ridge(solver="auto", positive=False)
reg.fit(self.X_train, self.y_train)
| 8,557 | 32.826087 | 100 |
py
|
lale
|
lale-master/test/test_core_pipeline.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pickle
import traceback
import typing
import unittest
import numpy as np
import sklearn.datasets
import sklearn.pipeline
from sklearn.feature_selection import SelectKBest as SkSelectKBest
from sklearn.metrics import accuracy_score, r2_score
from sklearn.model_selection import train_test_split
import lale.datasets.openml
import lale.helpers
from lale.helpers import import_from_sklearn_pipeline
from lale.lib.lale import ConcatFeatures, NoOp
from lale.lib.sklearn import (
PCA,
AdaBoostClassifier,
GaussianNB,
IsolationForest,
KNeighborsClassifier,
LinearRegression,
LinearSVC,
LogisticRegression,
Nystroem,
OneHotEncoder,
PassiveAggressiveClassifier,
SelectKBest,
SGDClassifier,
StandardScaler,
)
from lale.lib.xgboost import XGBClassifier
from lale.operators import (
TrainableIndividualOp,
TrainablePipeline,
TrainedIndividualOp,
TrainedPipeline,
make_choice,
make_pipeline,
make_union,
)
class TestCreation(unittest.TestCase):
def setUp(self):
data = sklearn.datasets.load_iris()
X, y = data.data, data.target
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(X, y)
def test_pipeline_create(self):
from lale.operators import Pipeline
pipeline = Pipeline(([("pca1", PCA()), ("lr1", LogisticRegression())]))
trained = pipeline.fit(self.X_train, self.y_train)
predictions = trained.predict(self.X_test)
accuracy_score(self.y_test, predictions)
def test_pipeline_create_trainable(self):
from lale.lib.sklearn import Pipeline as SkPipeline
pipeline = SkPipeline(steps=[("pca1", PCA()), ("lr1", LogisticRegression())])
self.assertIsInstance(pipeline, TrainableIndividualOp)
trained = pipeline.fit(self.X_train, self.y_train)
pca_trained, lr_trained = [op for _, op in trained.hyperparams()["steps"]]
self.assertIsInstance(pca_trained, TrainedIndividualOp)
self.assertIsInstance(lr_trained, TrainedIndividualOp)
predictions = trained.predict(self.X_test)
accuracy_score(self.y_test, predictions)
def test_pipeline_create_trained(self):
from lale.lib.sklearn import Pipeline as SkPipeline
orig_trainable = PCA() >> LogisticRegression()
orig_trained = orig_trainable.fit(self.X_train, self.y_train)
self.assertIsInstance(orig_trained, TrainedPipeline)
pca_trained, lr_trained = orig_trained.steps_list()
pre_trained = SkPipeline(steps=[("pca1", pca_trained), ("lr1", lr_trained)])
self.assertIsInstance(pre_trained, TrainedIndividualOp)
predictions = pre_trained.predict(self.X_test)
accuracy_score(self.y_test, predictions)
def test_pipeline_clone(self):
from sklearn.base import clone
from lale.operators import Pipeline
pipeline = Pipeline(([("pca1", PCA()), ("lr1", LogisticRegression())]))
trained = pipeline.fit(self.X_train, self.y_train)
predictions = trained.predict(self.X_test)
orig_acc = accuracy_score(self.y_test, predictions)
cloned_pipeline = clone(pipeline)
trained = cloned_pipeline.fit(self.X_train, self.y_train)
predictions = trained.predict(self.X_test)
cloned_acc = accuracy_score(self.y_test, predictions)
self.assertEqual(orig_acc, cloned_acc)
def test_make_pipeline(self):
tfm = PCA(n_components=10)
clf = LogisticRegression(random_state=42)
trainable = make_pipeline(tfm, clf)
digits = sklearn.datasets.load_digits()
trained = trainable.fit(digits.data, digits.target)
_ = trained.predict(digits.data)
def test_compose2(self):
tfm = PCA(n_components=10)
clf = LogisticRegression(random_state=42)
trainable = tfm >> clf
digits = sklearn.datasets.load_digits()
trained = trainable.fit(digits.data, digits.target)
_ = trained.predict(digits.data)
def test_compose3(self):
nys = Nystroem(n_components=15)
pca = PCA(n_components=10)
lr = LogisticRegression(random_state=42)
trainable = nys >> pca >> lr
digits = sklearn.datasets.load_digits()
trained = trainable.fit(digits.data, digits.target)
_ = trained.predict(digits.data)
def test_pca_nys_lr(self):
nys = Nystroem(n_components=15)
pca = PCA(n_components=10)
lr = LogisticRegression(random_state=42)
trainable = make_union(nys, pca) >> lr
digits = sklearn.datasets.load_digits()
trained = trainable.fit(digits.data, digits.target)
_ = trained.predict(digits.data)
def test_compose4(self):
digits = sklearn.datasets.load_digits()
_ = digits
ohe = OneHotEncoder(handle_unknown=OneHotEncoder.enum.handle_unknown.ignore)
ohe.get_params()
no_op = NoOp()
pca = PCA()
nys = Nystroem()
lr = LogisticRegression()
knn = KNeighborsClassifier()
step1 = ohe | no_op
step2 = pca | nys
step3 = lr | knn
model_plan = step1 >> step2 >> step3
_ = model_plan
# TODO: optimize on this plan and then fit and predict
def test_compose5(self):
ohe = OneHotEncoder(handle_unknown=OneHotEncoder.enum.handle_unknown.ignore)
digits = sklearn.datasets.load_digits()
lr = LogisticRegression()
lr_trained = lr.fit(digits.data, digits.target)
lr_trained.predict(digits.data)
pipeline1 = ohe >> lr
pipeline1_trained = pipeline1.fit(digits.data, digits.target)
pipeline1_trained.predict(digits.data)
def test_compare_with_sklearn(self):
tfm = PCA()
clf = LogisticRegression(
LogisticRegression.enum.solver.saga,
LogisticRegression.enum.multi_class.auto,
)
trainable = make_pipeline(tfm, clf)
digits = sklearn.datasets.load_digits()
trained = trainable.fit(digits.data, digits.target)
predicted = trained.predict(digits.data)
from sklearn.decomposition import PCA as SklearnPCA
from sklearn.linear_model import LogisticRegression as SklearnLR
sklearn_pipeline = sklearn.pipeline.make_pipeline(
SklearnPCA(), SklearnLR(solver="saga", multi_class="auto")
)
sklearn_pipeline.fit(digits.data, digits.target)
predicted_sklearn = sklearn_pipeline.predict(digits.data)
lale_score = accuracy_score(digits.target, predicted)
scikit_score = accuracy_score(digits.target, predicted_sklearn)
self.assertEqual(lale_score, scikit_score)
class TestImportExport(unittest.TestCase):
def setUp(self):
data = sklearn.datasets.load_iris()
X, y = data.data, data.target
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(X, y)
@classmethod
def get_sklearn_params(cls, op):
lale_sklearn_impl = op._impl_instance()
wrapped_model = getattr(lale_sklearn_impl, "_wrapped_model", None)
if wrapped_model is not None:
lale_sklearn_impl = wrapped_model
return lale_sklearn_impl.get_params()
def assert_equal_predictions(self, pipeline1, pipeline2):
trained = pipeline1.fit(self.X_train, self.y_train)
predictions1 = trained.predict(self.X_test)
trained = pipeline2.fit(self.X_train, self.y_train)
predictions2 = trained.predict(self.X_test)
for i, p1 in enumerate(predictions1):
self.assertEqual(p1, predictions2[i])
def test_import_from_sklearn_pipeline(self):
from sklearn.feature_selection import f_regression
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC as SklearnSVC
anova_filter = SkSelectKBest(f_regression, k=3)
clf = SklearnSVC(kernel="linear")
sklearn_pipeline = Pipeline([("anova", anova_filter), ("svc", clf)])
lale_pipeline = typing.cast(
TrainablePipeline,
import_from_sklearn_pipeline(sklearn_pipeline),
)
for i, pipeline_step in enumerate(sklearn_pipeline.named_steps):
sklearn_step_params = sklearn_pipeline.named_steps[
pipeline_step
].get_params()
lale_sklearn_params = self.get_sklearn_params(lale_pipeline.steps_list()[i])
self.assertEqual(sklearn_step_params, lale_sklearn_params)
self.assert_equal_predictions(sklearn_pipeline, lale_pipeline)
def test_import_from_sklearn_pipeline1(self):
from sklearn.decomposition import PCA as SklearnPCA
from sklearn.neighbors import KNeighborsClassifier as SklearnKNN
sklearn_pipeline = sklearn.pipeline.make_pipeline(
SklearnPCA(n_components=3), SklearnKNN()
)
lale_pipeline = typing.cast(
TrainablePipeline,
import_from_sklearn_pipeline(sklearn_pipeline),
)
for i, pipeline_step in enumerate(sklearn_pipeline.named_steps):
sklearn_step_params = sklearn_pipeline.named_steps[
pipeline_step
].get_params()
lale_sklearn_params = self.get_sklearn_params(lale_pipeline.steps_list()[i])
self.assertEqual(sklearn_step_params, lale_sklearn_params)
self.assert_equal_predictions(sklearn_pipeline, lale_pipeline)
def test_import_from_sklearn_pipeline_feature_union(self):
from sklearn.decomposition import PCA as SklearnPCA
from sklearn.kernel_approximation import Nystroem as SklearnNystroem
from sklearn.neighbors import KNeighborsClassifier as SklearnKNN
from sklearn.pipeline import FeatureUnion
union = FeatureUnion(
[
("pca", SklearnPCA(n_components=1)),
("nys", SklearnNystroem(n_components=2, random_state=42)),
]
)
sklearn_pipeline = sklearn.pipeline.make_pipeline(union, SklearnKNN())
lale_pipeline = typing.cast(
TrainablePipeline,
import_from_sklearn_pipeline(sklearn_pipeline),
)
self.assertEqual(len(lale_pipeline.edges()), 3)
self.assertIsInstance(lale_pipeline.edges()[0][0], PCA) # type: ignore
self.assertIsInstance(lale_pipeline.edges()[0][1], ConcatFeatures) # type: ignore
self.assertIsInstance(lale_pipeline.edges()[1][0], Nystroem) # type: ignore
self.assertIsInstance(lale_pipeline.edges()[1][1], ConcatFeatures) # type: ignore
self.assertIsInstance(lale_pipeline.edges()[2][0], ConcatFeatures) # type: ignore
self.assertIsInstance(lale_pipeline.edges()[2][1], KNeighborsClassifier) # type: ignore
self.assert_equal_predictions(sklearn_pipeline, lale_pipeline)
def test_import_from_sklearn_pipeline_nested_pipeline(self):
from sklearn.decomposition import PCA as SklearnPCA
from sklearn.kernel_approximation import Nystroem as SklearnNystroem
from sklearn.neighbors import KNeighborsClassifier as SklearnKNN
from sklearn.pipeline import FeatureUnion
union = FeatureUnion(
[
(
"selectkbest_pca",
sklearn.pipeline.make_pipeline(
SkSelectKBest(k=3), SklearnPCA(n_components=1)
),
),
("nys", SklearnNystroem(n_components=2, random_state=42)),
]
)
sklearn_pipeline = sklearn.pipeline.make_pipeline(union, SklearnKNN())
lale_pipeline = typing.cast(
TrainablePipeline,
import_from_sklearn_pipeline(sklearn_pipeline),
)
self.assertEqual(len(lale_pipeline.edges()), 4)
# These assertions assume topological sort
self.assertIsInstance(lale_pipeline.edges()[0][0], SelectKBest) # type: ignore
self.assertIsInstance(lale_pipeline.edges()[0][1], PCA) # type: ignore
self.assertIsInstance(lale_pipeline.edges()[1][0], PCA) # type: ignore
self.assertIsInstance(lale_pipeline.edges()[1][1], ConcatFeatures) # type: ignore
self.assertIsInstance(lale_pipeline.edges()[2][0], Nystroem) # type: ignore
self.assertIsInstance(lale_pipeline.edges()[2][1], ConcatFeatures) # type: ignore
self.assertIsInstance(lale_pipeline.edges()[3][0], ConcatFeatures) # type: ignore
self.assertIsInstance(lale_pipeline.edges()[3][1], KNeighborsClassifier) # type: ignore
self.assert_equal_predictions(sklearn_pipeline, lale_pipeline)
def test_import_from_sklearn_pipeline_nested_pipeline1(self):
from sklearn.decomposition import PCA as SklearnPCA
from sklearn.kernel_approximation import Nystroem as SklearnNystroem
from sklearn.neighbors import KNeighborsClassifier as SklearnKNN
from sklearn.pipeline import FeatureUnion
union = FeatureUnion(
[
(
"selectkbest_pca",
sklearn.pipeline.make_pipeline(
SkSelectKBest(k=3),
FeatureUnion(
[
("pca", SklearnPCA(n_components=1)),
(
"nested_pipeline",
sklearn.pipeline.make_pipeline(
SkSelectKBest(k=2), SklearnNystroem()
),
),
]
),
),
),
("nys", SklearnNystroem(n_components=2, random_state=42)),
]
)
sklearn_pipeline = sklearn.pipeline.make_pipeline(union, SklearnKNN())
lale_pipeline = typing.cast(
TrainablePipeline,
import_from_sklearn_pipeline(sklearn_pipeline),
)
self.assertEqual(len(lale_pipeline.edges()), 8)
# These assertions assume topological sort, which may not be unique. So the assertions are brittle.
self.assertIsInstance(lale_pipeline.edges()[0][0], SelectKBest) # type: ignore
self.assertIsInstance(lale_pipeline.edges()[0][1], PCA) # type: ignore
self.assertIsInstance(lale_pipeline.edges()[1][0], SelectKBest) # type: ignore
self.assertIsInstance(lale_pipeline.edges()[1][1], SelectKBest) # type: ignore
self.assertIsInstance(lale_pipeline.edges()[2][0], SelectKBest) # type: ignore
self.assertIsInstance(lale_pipeline.edges()[2][1], Nystroem) # type: ignore
self.assertIsInstance(lale_pipeline.edges()[3][0], PCA) # type: ignore
self.assertIsInstance(lale_pipeline.edges()[3][1], ConcatFeatures) # type: ignore
self.assertIsInstance(lale_pipeline.edges()[4][0], Nystroem) # type: ignore
self.assertIsInstance(lale_pipeline.edges()[4][1], ConcatFeatures) # type: ignore
self.assertIsInstance(lale_pipeline.edges()[5][0], ConcatFeatures) # type: ignore
self.assertIsInstance(lale_pipeline.edges()[5][1], ConcatFeatures) # type: ignore
self.assertIsInstance(lale_pipeline.edges()[6][0], Nystroem) # type: ignore
self.assertIsInstance(lale_pipeline.edges()[6][1], ConcatFeatures) # type: ignore
self.assertIsInstance(lale_pipeline.edges()[7][0], ConcatFeatures) # type: ignore
self.assertIsInstance(lale_pipeline.edges()[7][1], KNeighborsClassifier) # type: ignore
self.assert_equal_predictions(sklearn_pipeline, lale_pipeline)
def test_import_from_sklearn_pipeline_nested_pipeline2(self):
from sklearn.decomposition import PCA as SklearnPCA
from sklearn.kernel_approximation import Nystroem as SklearnNystroem
from sklearn.neighbors import KNeighborsClassifier as SklearnKNN
from sklearn.pipeline import FeatureUnion
union = FeatureUnion(
[
(
"selectkbest_pca",
sklearn.pipeline.make_pipeline(
SkSelectKBest(k=3),
sklearn.pipeline.make_pipeline(
SkSelectKBest(k=2), SklearnPCA()
),
),
),
("nys", SklearnNystroem(n_components=2, random_state=42)),
]
)
sklearn_pipeline = sklearn.pipeline.make_pipeline(union, SklearnKNN())
lale_pipeline = typing.cast(
TrainablePipeline,
import_from_sklearn_pipeline(sklearn_pipeline),
)
self.assertEqual(len(lale_pipeline.edges()), 5)
self.assertIsInstance(lale_pipeline.edges()[0][0], SelectKBest) # type: ignore
self.assertIsInstance(lale_pipeline.edges()[0][1], SelectKBest) # type: ignore
self.assertIsInstance(lale_pipeline.edges()[1][0], SelectKBest) # type: ignore
self.assertIsInstance(lale_pipeline.edges()[1][1], PCA) # type: ignore
self.assertIsInstance(lale_pipeline.edges()[2][0], PCA) # type: ignore
self.assertIsInstance(lale_pipeline.edges()[2][1], ConcatFeatures) # type: ignore
self.assertIsInstance(lale_pipeline.edges()[3][0], Nystroem) # type: ignore
self.assertIsInstance(lale_pipeline.edges()[3][1], ConcatFeatures) # type: ignore
self.assertIsInstance(lale_pipeline.edges()[4][0], ConcatFeatures) # type: ignore
self.assertIsInstance(lale_pipeline.edges()[4][1], KNeighborsClassifier) # type: ignore
self.assert_equal_predictions(sklearn_pipeline, lale_pipeline)
def test_import_from_sklearn_pipeline_noop(self):
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.pipeline import Pipeline
pipe = Pipeline([("noop", None), ("gbc", GradientBoostingClassifier())])
_ = import_from_sklearn_pipeline(pipe)
def test_import_from_sklearn_pipeline_noop1(self):
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.pipeline import Pipeline
pipe = Pipeline([("noop", NoOp()), ("gbc", GradientBoostingClassifier())])
_ = import_from_sklearn_pipeline(pipe)
def test_import_from_sklearn_pipeline_no_wrapper(self):
from sklearn.neighbors import LocalOutlierFactor
from sklearn.pipeline import make_pipeline as sk_make_pipeline
sklearn_pipeline = sk_make_pipeline(PCA(), LocalOutlierFactor())
_ = import_from_sklearn_pipeline(sklearn_pipeline, fitted=False)
def test_import_from_sklearn_pipeline_higherorder(self):
from sklearn.ensemble import VotingClassifier as VC
from sklearn.feature_selection import f_regression
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC as SklearnSVC
anova_filter = SkSelectKBest(f_regression, k=3)
clf = SklearnSVC(kernel="linear")
sklearn_pipeline = Pipeline(
[("anova", anova_filter), ("vc_svc", VC(estimators=[("clf", clf)]))]
)
lale_pipeline = typing.cast(
TrainablePipeline,
import_from_sklearn_pipeline(sklearn_pipeline),
)
# for i, pipeline_step in enumerate(sklearn_pipeline.named_steps):
# sklearn_step_params = sklearn_pipeline.named_steps[
# pipeline_step
# ].get_params()
# lale_sklearn_params = self.get_sklearn_params(lale_pipeline.steps_list()[i])
# self.assertEqual(sklearn_step_params, lale_sklearn_params)
self.assert_equal_predictions(sklearn_pipeline, lale_pipeline)
def test_export_to_sklearn_pipeline(self):
lale_pipeline = PCA(n_components=3) >> KNeighborsClassifier()
trained_lale_pipeline = lale_pipeline.fit(self.X_train, self.y_train)
sklearn_pipeline = trained_lale_pipeline.export_to_sklearn_pipeline()
for i, pipeline_step in enumerate(sklearn_pipeline.named_steps):
sklearn_step_params = sklearn_pipeline.named_steps[
pipeline_step
].get_params()
lale_sklearn_params = self.get_sklearn_params(
trained_lale_pipeline.steps_list()[i]
)
self.assertEqual(sklearn_step_params, lale_sklearn_params)
self.assert_equal_predictions(sklearn_pipeline, trained_lale_pipeline)
def test_export_to_sklearn_pipeline1(self):
lale_pipeline = SkSelectKBest(k=3) >> KNeighborsClassifier()
trained_lale_pipeline = lale_pipeline.fit(self.X_train, self.y_train)
sklearn_pipeline = trained_lale_pipeline.export_to_sklearn_pipeline()
for i, pipeline_step in enumerate(sklearn_pipeline.named_steps):
sklearn_step_params = type(sklearn_pipeline.named_steps[pipeline_step])
lale_sklearn_params = (
type(trained_lale_pipeline.steps_list()[i]._impl._wrapped_model)
if hasattr(
trained_lale_pipeline.steps_list()[i]._impl, "_wrapped_model"
)
else type(trained_lale_pipeline.steps_list()[i]._impl)
)
self.assertEqual(sklearn_step_params, lale_sklearn_params)
self.assert_equal_predictions(sklearn_pipeline, trained_lale_pipeline)
def test_export_to_sklearn_pipeline2(self):
from sklearn.pipeline import FeatureUnion
lale_pipeline = (
(
(
(PCA(svd_solver="randomized", random_state=42) & SkSelectKBest(k=3))
>> ConcatFeatures()
)
& Nystroem(random_state=42)
)
>> ConcatFeatures()
>> KNeighborsClassifier()
)
trained_lale_pipeline = lale_pipeline.fit(self.X_train, self.y_train)
sklearn_pipeline = trained_lale_pipeline.export_to_sklearn_pipeline()
self.assertIsInstance(
sklearn_pipeline.named_steps["featureunion"], FeatureUnion
)
from sklearn.neighbors import KNeighborsClassifier as SklearnKNN
self.assertIsInstance(
sklearn_pipeline.named_steps["kneighborsclassifier"], SklearnKNN
)
self.assert_equal_predictions(sklearn_pipeline, trained_lale_pipeline)
def test_export_to_sklearn_pipeline3(self):
from sklearn.pipeline import FeatureUnion
lale_pipeline = (
(
(PCA() >> SkSelectKBest(k=2))
& (Nystroem(random_state=42) >> SkSelectKBest(k=3))
& (SkSelectKBest(k=3))
)
>> ConcatFeatures()
>> SkSelectKBest(k=2)
>> LogisticRegression()
)
trained_lale_pipeline = lale_pipeline.fit(self.X_train, self.y_train)
sklearn_pipeline = trained_lale_pipeline.export_to_sklearn_pipeline()
self.assertIsInstance(
sklearn_pipeline.named_steps["featureunion"], FeatureUnion
)
self.assertIsInstance(
sklearn_pipeline.named_steps["selectkbest"], SkSelectKBest
)
from sklearn.linear_model import LogisticRegression as SklearnLR
self.assertIsInstance(
sklearn_pipeline.named_steps["logisticregression"], SklearnLR
)
self.assert_equal_predictions(sklearn_pipeline, trained_lale_pipeline)
def test_export_to_sklearn_pipeline4(self):
lale_pipeline = make_pipeline(LogisticRegression())
trained_lale_pipeline = lale_pipeline.fit(self.X_train, self.y_train)
sklearn_pipeline = trained_lale_pipeline.export_to_sklearn_pipeline()
from sklearn.linear_model import LogisticRegression as SklearnLR
self.assertIsInstance(
sklearn_pipeline.named_steps["logisticregression"], SklearnLR
)
self.assert_equal_predictions(sklearn_pipeline, trained_lale_pipeline)
def test_export_to_sklearn_pipeline5(self):
lale_pipeline = PCA() >> (XGBClassifier() | SGDClassifier())
with self.assertRaises(ValueError):
_ = lale_pipeline.export_to_sklearn_pipeline()
def test_export_to_pickle(self):
lale_pipeline = make_pipeline(LogisticRegression())
trained_lale_pipeline = lale_pipeline.fit(self.X_train, self.y_train)
pickle.dumps(lale_pipeline)
pickle.dumps(trained_lale_pipeline)
def test_import_from_sklearn_pipeline2(self):
from sklearn.feature_selection import f_regression
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC as SklearnSVC
anova_filter = SkSelectKBest(f_regression, k=3)
clf = SklearnSVC(kernel="linear")
sklearn_pipeline = Pipeline([("anova", anova_filter), ("svc", clf)])
sklearn_pipeline.fit(self.X_train, self.y_train)
lale_pipeline = typing.cast(
TrainedPipeline,
import_from_sklearn_pipeline(sklearn_pipeline),
)
lale_pipeline.predict(self.X_test)
def test_import_from_sklearn_pipeline3(self):
from sklearn.feature_selection import f_regression
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC as SklearnSVC
anova_filter = SkSelectKBest(f_regression, k=3)
clf = SklearnSVC(kernel="linear")
sklearn_pipeline = Pipeline([("anova", anova_filter), ("svc", clf)])
lale_pipeline = typing.cast(
TrainablePipeline,
import_from_sklearn_pipeline(sklearn_pipeline, fitted=False),
)
with self.assertRaises(
ValueError
): # fitted=False returns a Trainable, so calling predict is invalid.
lale_pipeline.predict(self.X_test)
def test_export_to_sklearn_pipeline_with_noop_1(self):
lale_pipeline = NoOp() >> PCA(n_components=3) >> KNeighborsClassifier()
trained_lale_pipeline = lale_pipeline.fit(self.X_train, self.y_train)
sklearn_pipeline = trained_lale_pipeline.export_to_sklearn_pipeline()
self.assert_equal_predictions(sklearn_pipeline, trained_lale_pipeline)
def test_export_to_sklearn_pipeline_with_noop_2(self):
lale_pipeline = PCA(n_components=3) >> NoOp() >> KNeighborsClassifier()
trained_lale_pipeline = lale_pipeline.fit(self.X_train, self.y_train)
sklearn_pipeline = trained_lale_pipeline.export_to_sklearn_pipeline()
self.assert_equal_predictions(sklearn_pipeline, trained_lale_pipeline)
def test_export_to_sklearn_pipeline_with_noop_3(self):
# This test is probably unnecessary, but doesn't harm at this point
lale_pipeline = PCA(n_components=3) >> KNeighborsClassifier() >> NoOp()
trained_lale_pipeline = lale_pipeline.fit(self.X_train, self.y_train)
_ = trained_lale_pipeline.export_to_sklearn_pipeline()
def test_export_to_sklearn_pipeline_with_noop_4(self):
lale_pipeline = NoOp() >> KNeighborsClassifier()
trained_lale_pipeline = lale_pipeline.fit(self.X_train, self.y_train)
sklearn_pipeline = trained_lale_pipeline.export_to_sklearn_pipeline()
self.assert_equal_predictions(sklearn_pipeline, trained_lale_pipeline)
class TestComposition(unittest.TestCase):
def setUp(self):
data = sklearn.datasets.load_iris()
X, y = data.data, data.target
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(X, y)
def test_two_estimators_predict(self):
pipeline = (
StandardScaler()
>> (PCA() & Nystroem() & LogisticRegression())
>> ConcatFeatures()
>> NoOp()
>> LogisticRegression()
)
trained = pipeline.fit(self.X_train, self.y_train)
trained.predict(self.X_test)
def test_two_estimators_predict1(self):
pipeline = (
StandardScaler()
>> (PCA() & Nystroem() & PassiveAggressiveClassifier())
>> ConcatFeatures()
>> NoOp()
>> PassiveAggressiveClassifier()
)
trained = pipeline.fit(self.X_train, self.y_train)
trained.predict(self.X_test)
def test_two_estimators_predict_proba(self):
pipeline = (
StandardScaler()
>> (PCA() & Nystroem() & LogisticRegression())
>> ConcatFeatures()
>> NoOp()
>> LogisticRegression()
)
trained = pipeline.fit(self.X_train, self.y_train)
trained.predict_proba(self.X_test)
def test_two_estimators_predict_proba1(self):
pipeline = (
StandardScaler()
>> (PCA() & Nystroem() & GaussianNB())
>> ConcatFeatures()
>> NoOp()
>> GaussianNB()
)
pipeline.fit(self.X_train, self.y_train)
pipeline.predict_proba(self.X_test)
def test_multiple_estimators_predict_predict_proba(self):
pipeline = (
StandardScaler()
>> (LogisticRegression() & PCA())
>> ConcatFeatures()
>> (NoOp() & LinearSVC())
>> ConcatFeatures()
>> KNeighborsClassifier()
)
pipeline.fit(self.X_train, self.y_train)
_ = pipeline.predict_proba(self.X_test)
_ = pipeline.predict(self.X_test)
def test_two_transformers(self):
tfm1 = PCA()
tfm2 = Nystroem()
trainable = tfm1 >> tfm2
digits = sklearn.datasets.load_digits()
trained = trainable.fit(digits.data, digits.target)
_ = trained.transform(digits.data)
def test_duplicate_instances(self):
tfm = PCA()
clf = LogisticRegression(
LogisticRegression.enum.solver.lbfgs,
LogisticRegression.enum.multi_class.auto,
)
with self.assertRaises(ValueError):
_ = make_pipeline(tfm, tfm, clf)
def test_increase_num_rows_predict(self):
from test.mock_custom_operators import IncreaseRows
increase_rows = IncreaseRows()
trainable = increase_rows >> LogisticRegression()
iris = sklearn.datasets.load_iris()
X, y = iris.data, iris.target
trained = trainable.fit(X, y)
y_pred = trained.predict(X)
self.assertEqual(len(y_pred), len(y) + increase_rows.impl.n_rows)
def test_increase_num_rows_transform_X_y(self):
from test.mock_custom_operators import IncreaseRows
increase_rows_4 = IncreaseRows(n_rows=4)
increase_rows_2 = IncreaseRows(n_rows=2)
trainable = increase_rows_4 >> increase_rows_2
iris = sklearn.datasets.load_iris()
X, y = iris.data, iris.target
trained = trainable.fit(X, y)
output_X, output_y = trained.transform_X_y(X, y)
self.assertEqual(output_X.shape[0], X.shape[0] + 4 + 2)
self.assertEqual(output_X.shape[1], X.shape[1])
self.assertEqual(output_y.shape[0], y.shape[0] + 4 + 2)
def test_remove_last1(self):
pipeline = (
StandardScaler()
>> (PCA() & Nystroem() & PassiveAggressiveClassifier())
>> ConcatFeatures()
>> NoOp()
>> PassiveAggressiveClassifier()
)
new_pipeline = pipeline.remove_last()
self.assertEqual(len(new_pipeline._steps), 6)
self.assertEqual(len(pipeline._steps), 7)
def test_remove_last2(self):
pipeline = (
StandardScaler()
>> (PCA() & Nystroem() & PassiveAggressiveClassifier())
>> ConcatFeatures()
>> NoOp()
>> (PassiveAggressiveClassifier() & LogisticRegression())
)
with self.assertRaises(ValueError):
pipeline.remove_last()
def test_remove_last3(self):
pipeline = (
StandardScaler()
>> (PCA() & Nystroem() & PassiveAggressiveClassifier())
>> ConcatFeatures()
>> NoOp()
>> PassiveAggressiveClassifier()
)
pipeline.remove_last().freeze_trainable()
def test_remove_last4(self):
pipeline = (
StandardScaler()
>> (PCA() & Nystroem() & PassiveAggressiveClassifier())
>> ConcatFeatures()
>> NoOp()
>> PassiveAggressiveClassifier()
)
new_pipeline = pipeline.remove_last(inplace=True)
self.assertEqual(len(new_pipeline._steps), 6)
self.assertEqual(len(pipeline._steps), 6)
def test_remove_last5(self):
pipeline = (
StandardScaler()
>> (PCA() & Nystroem() & PassiveAggressiveClassifier())
>> ConcatFeatures()
>> NoOp()
>> PassiveAggressiveClassifier()
)
pipeline.remove_last(inplace=True).freeze_trainable()
class TestAutoPipeline(unittest.TestCase):
def _fit_predict(self, prediction_type, all_X, all_y, verbose=True):
if verbose:
_file_name, _line, fn_name, _text = traceback.extract_stack()[-2]
print(f"--- TestAutoPipeline.{fn_name}() ---")
from lale.lib.lale import AutoPipeline
train_X, test_X, train_y, test_y = train_test_split(all_X, all_y)
trainable = AutoPipeline(
prediction_type=prediction_type, max_evals=10, verbose=verbose
)
trained = trainable.fit(train_X, train_y)
predicted = trained.predict(test_X)
if prediction_type == "regression":
score = f"r2 score {r2_score(test_y, predicted):.2f}"
else:
score = f"accuracy {accuracy_score(test_y, predicted):.1%}"
if verbose:
print(score)
pipe = trained.get_pipeline()
assert pipe is not None
print(pipe.pretty_print(show_imports=False))
def test_sklearn_iris(self):
# classification, only numbers, no missing values
all_X, all_y = sklearn.datasets.load_iris(return_X_y=True)
self._fit_predict("classification", all_X, all_y)
def test_sklearn_digits(self):
# classification, numbers but some appear categorical, no missing values
all_X, all_y = sklearn.datasets.load_digits(return_X_y=True)
self._fit_predict("classification", all_X, all_y)
def test_sklearn_boston(self):
# regression, categoricals+numbers, no missing values
from lale.datasets.util import load_boston
all_X, all_y = load_boston(return_X_y=True)
self._fit_predict("regression", all_X, all_y)
def test_sklearn_diabetes(self):
# regression, categoricals+numbers, no missing values
all_X, all_y = sklearn.datasets.load_diabetes(return_X_y=True)
self._fit_predict("regression", all_X, all_y)
def test_openml_creditg(self):
# classification, categoricals+numbers incl. string, no missing values
(orig_train_X, orig_train_y), _ = lale.datasets.openml.fetch(
"credit-g", "classification", preprocess=False
)
subsample_X, _, subsample_y, _ = train_test_split(
orig_train_X, orig_train_y, train_size=0.05
)
self._fit_predict("classification", subsample_X, subsample_y)
def test_missing_iris(self):
# classification, only numbers, synthetically added missing values
all_X, all_y = sklearn.datasets.load_iris(return_X_y=True)
with_missing_X = lale.helpers.add_missing_values(all_X)
with self.assertRaisesRegex(ValueError, "Input.*contains NaN"):
lr_trainable = LogisticRegression()
_ = lr_trainable.fit(with_missing_X, all_y)
self._fit_predict("classification", with_missing_X, all_y)
def test_missing_boston(self):
# regression, categoricals+numbers, synthetically added missing values
from lale.datasets.util import load_boston
all_X, all_y = load_boston(return_X_y=True)
with_missing_X = lale.helpers.add_missing_values(all_X)
with self.assertRaisesRegex(ValueError, "Input.*contains NaN"):
lr_trainable = LinearRegression()
_ = lr_trainable.fit(with_missing_X, all_y)
self._fit_predict("regression", with_missing_X, all_y)
def test_missing_creditg(self):
# classification, categoricals+numbers incl. string, synth. missing
(orig_train_X, orig_train_y), _ = lale.datasets.openml.fetch(
"credit-g", "classification", preprocess=False
)
subsample_X, _, subsample_y, _ = train_test_split(
orig_train_X, orig_train_y, train_size=0.05
)
with_missing_X = lale.helpers.add_missing_values(subsample_X)
self._fit_predict("classification", with_missing_X, subsample_y)
class TestOperatorChoice(unittest.TestCase):
def test_make_choice_with_instance(self):
from sklearn.datasets import load_iris
iris = load_iris()
X, y = iris.data, iris.target
tfm = PCA() | Nystroem() | NoOp()
with self.assertRaises(AttributeError):
# we are trying to trigger a runtime error here, so we ignore the static warning
_ = tfm.fit(X, y) # type: ignore
_ = (OneHotEncoder | NoOp) >> tfm >> (LogisticRegression | KNeighborsClassifier)
_ = (
(OneHotEncoder | NoOp)
>> (PCA | Nystroem)
>> (LogisticRegression | KNeighborsClassifier)
)
_ = (
make_choice(OneHotEncoder, NoOp)
>> make_choice(PCA, Nystroem)
>> make_choice(LogisticRegression, KNeighborsClassifier)
)
class TestScore(unittest.TestCase):
def setUp(self):
data = sklearn.datasets.load_iris()
X, y = data.data, data.target
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(X, y)
def test_trained_pipeline(self):
trainable_pipeline = StandardScaler() >> LogisticRegression()
trained_pipeline = trainable_pipeline.fit(self.X_train, self.y_train)
score = trained_pipeline.score(self.X_test, self.y_test)
predictions = trained_pipeline.predict(self.X_test)
accuracy = accuracy_score(self.y_test, predictions)
self.assertEqual(accuracy, score)
def test_trainable_pipeline(self):
trainable_pipeline = StandardScaler() >> LogisticRegression()
trainable_pipeline.fit(self.X_train, self.y_train)
score = trainable_pipeline.score(self.X_test, self.y_test)
predictions = trainable_pipeline.predict(self.X_test)
accuracy = accuracy_score(self.y_test, predictions)
self.assertEqual(accuracy, score)
def test_planned_pipeline(self):
planned_pipeline = StandardScaler >> LogisticRegression
with self.assertRaises(AttributeError):
planned_pipeline.score(self.X_test, self.y_test) # type: ignore
class TestScoreSamples(unittest.TestCase):
def setUp(self):
data = sklearn.datasets.load_iris()
X, y = data.data, data.target
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(X, y)
import warnings
warnings.filterwarnings("ignore")
def test_trained_pipeline(self):
trainable_pipeline = StandardScaler() >> IsolationForest()
trained_pipeline = trainable_pipeline.fit(self.X_train, self.y_train)
_ = trained_pipeline.score_samples(self.X_test)
def test_trainable_pipeline(self):
trainable_pipeline = StandardScaler() >> IsolationForest()
trainable_pipeline.fit(self.X_train, self.y_train)
with self.assertWarns(DeprecationWarning):
_ = trainable_pipeline.score_samples(self.X_test)
def test_planned_pipeline(self):
planned_pipeline = StandardScaler >> IsolationForest
with self.assertRaises(AttributeError):
planned_pipeline.score_samples(self.X_test) # type: ignore
def test_with_incompatible_estimator(self):
trainable_pipeline = StandardScaler() >> LogisticRegression()
trained_pipeline = trainable_pipeline.fit(self.X_train, self.y_train)
with self.assertRaises(AttributeError):
_ = trained_pipeline.score_samples(self.X_test)
class TestPredictLogProba(unittest.TestCase):
def setUp(self):
data = sklearn.datasets.load_iris()
X, y = data.data, data.target
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(X, y)
import warnings
warnings.filterwarnings("ignore")
def test_trained_pipeline(self):
trainable_pipeline = StandardScaler() >> AdaBoostClassifier()
trained_pipeline = trainable_pipeline.fit(self.X_train, self.y_train)
_ = trained_pipeline.predict_log_proba(self.X_test)
def test_trainable_pipeline(self):
trainable_pipeline = StandardScaler() >> AdaBoostClassifier()
trainable_pipeline.fit(self.X_train, self.y_train)
with self.assertWarns(DeprecationWarning):
_ = trainable_pipeline.predict_log_proba(self.X_test)
def test_planned_pipeline(self):
planned_pipeline = StandardScaler >> AdaBoostClassifier
with self.assertRaises(AttributeError):
planned_pipeline.predict_log_proba(self.X_test) # type: ignore
def test_with_incompatible_estimator(self):
trainable_pipeline = StandardScaler() >> IsolationForest()
trained_pipeline = trainable_pipeline.fit(self.X_train, self.y_train)
with self.assertRaises(AttributeError):
_ = trained_pipeline.predict_log_proba(self.X_test)
def test_with_incompatible_estimator_1(self):
trainable_pipeline = IsolationForest()
trained_pipeline = trainable_pipeline.fit(self.X_train, self.y_train)
with self.assertRaises(AttributeError):
_ = trained_pipeline.predict_log_proba(self.X_test)
class TestPartialFit(unittest.TestCase):
def setUp(self):
data = sklearn.datasets.load_iris()
X, y = data.data, data.target
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(X, y)
import warnings
warnings.filterwarnings("ignore")
def test_first_call(self):
trainable_pipeline = StandardScaler()
trained_pipeline = trainable_pipeline.fit(self.X_train, self.y_train)
new_pipeline = trained_pipeline.freeze_trained() >> SGDClassifier()
new_trained_pipeline = new_pipeline.partial_fit(
self.X_train, self.y_train, classes=[0, 1, 2]
)
_ = new_trained_pipeline.predict(self.X_test)
def test_multiple_calls_with_classes(self):
trainable_pipeline = StandardScaler()
trained_pipeline = trainable_pipeline.fit(self.X_train, self.y_train)
new_pipeline = trained_pipeline.freeze_trained() >> SGDClassifier()
new_trained_pipeline = new_pipeline.partial_fit(
self.X_train, self.y_train, classes=[0, 1, 2]
)
new_trained_pipeline = new_trained_pipeline.partial_fit(
self.X_test, self.y_test, classes=[0, 1, 2]
)
_ = new_trained_pipeline.predict(self.X_test)
def _last_impl_has(self, op, attr):
last = op.get_last()
assert last is not None
return hasattr(last._impl, attr)
def test_second_call_without_classes(self):
trainable_pipeline = StandardScaler()
trained_pipeline = trainable_pipeline.fit(self.X_train, self.y_train)
new_pipeline = trained_pipeline.freeze_trained() >> SGDClassifier()
new_trained_pipeline = new_pipeline.partial_fit(
self.X_train, self.y_train, classes=[0, 1, 2]
)
# Once SGDClassifier is trained, it has a classes_ attribute.
self.assertTrue(self._last_impl_has(new_trained_pipeline, "classes_"))
new_trained_pipeline = new_trained_pipeline.partial_fit(
self.X_test, self.y_test
)
_ = new_trained_pipeline.predict(self.X_test)
def test_second_call_with_different_classes(self):
trainable_pipeline = StandardScaler()
trained_pipeline = trainable_pipeline.fit(self.X_train, self.y_train)
new_pipeline = trained_pipeline.freeze_trained() >> SGDClassifier()
new_trained_pipeline = new_pipeline.partial_fit(
self.X_train, self.y_train, classes=[0, 1, 2]
)
# Once SGDClassifier is trained, it has a classes_ attribute.
self.assertTrue(self._last_impl_has(new_trained_pipeline, "classes_"))
subset_labels = self.y_test[np.where(self.y_test != 0)]
subset_X = self.X_test[0 : len(subset_labels)]
new_trained_pipeline = new_trained_pipeline.partial_fit(subset_X, subset_labels)
_ = new_trained_pipeline.predict(self.X_test)
def test_second_call_with_different_classes_trainable(self):
trainable_pipeline = StandardScaler()
trained_pipeline = trainable_pipeline.fit(self.X_train, self.y_train)
new_pipeline = trained_pipeline.freeze_trained() >> SGDClassifier()
new_pipeline.partial_fit(self.X_train, self.y_train, classes=[0, 1, 2])
# Once SGDClassifier is trained, it has a classes_ attribute.
self.assertTrue(self._last_impl_has(new_pipeline._trained, "classes_"))
subset_labels = self.y_test[np.where(self.y_test != 0)]
subset_X = self.X_test[0 : len(subset_labels)]
new_trained_pipeline = new_pipeline.partial_fit(subset_X, subset_labels)
_ = new_trained_pipeline.predict(self.X_test)
def test_call_on_trainable(self):
trainable_pipeline = StandardScaler()
trained_pipeline = trainable_pipeline.fit(self.X_train, self.y_train)
new_pipeline = trained_pipeline.freeze_trained() >> SGDClassifier()
new_pipeline.partial_fit(self.X_train, self.y_train, classes=[0, 1, 2])
new_pipeline.pretty_print()
new_trained_pipeline = new_pipeline.partial_fit(
self.X_test, self.y_test, classes=[0, 1, 2]
)
self.assertEqual(new_trained_pipeline, new_pipeline._trained)
_ = new_trained_pipeline.predict(self.X_test)
new_pipeline.partial_fit(self.X_train, self.y_train, classes=[0, 1, 2])
def test_call_on_trainable_with_freeze_trained_prefix(self):
trainable_pipeline = StandardScaler()
trained_pipeline = trainable_pipeline.fit(self.X_train, self.y_train)
new_pipeline = trained_pipeline >> SGDClassifier()
new_pipeline.partial_fit(self.X_train, self.y_train, classes=[0, 1, 2])
new_pipeline.pretty_print()
new_trained_pipeline = new_pipeline.partial_fit(
self.X_test, self.y_test, classes=[0, 1, 2]
)
self.assertEqual(new_trained_pipeline, new_pipeline._trained)
_ = new_trained_pipeline.predict(self.X_test)
new_pipeline.partial_fit(self.X_train, self.y_train, classes=[0, 1, 2])
def test_call_on_trainable_with_freeze_trained_prefix_false(self):
trainable_pipeline = StandardScaler()
trained_pipeline = trainable_pipeline.fit(self.X_train, self.y_train)
new_pipeline = trained_pipeline >> SGDClassifier()
with self.assertRaises(ValueError):
new_pipeline.partial_fit(
self.X_train,
self.y_train,
freeze_trained_prefix=False,
classes=[0, 1, 2],
)
def test_call_on_trained_with_freeze_trained_prefix(self):
trainable_pipeline = StandardScaler() >> SGDClassifier()
trained_pipeline = trainable_pipeline.fit(self.X_train, self.y_train)
new_pipeline = trained_pipeline
new_pipeline.partial_fit(self.X_train, self.y_train, classes=[0, 1, 2])
new_pipeline.pretty_print()
new_trained_pipeline = new_pipeline.partial_fit(
self.X_test, self.y_test, classes=[0, 1, 2]
)
_ = new_trained_pipeline.predict(self.X_test)
new_pipeline.partial_fit(self.X_train, self.y_train, classes=[0, 1, 2])
def test_call_on_trained_with_freeze_trained_prefix_false(self):
trainable_pipeline = StandardScaler() >> SGDClassifier()
trained_pipeline = trainable_pipeline.fit(self.X_train, self.y_train)
new_pipeline = trained_pipeline
with self.assertRaises(ValueError):
new_pipeline.partial_fit(
self.X_train,
self.y_train,
freeze_trained_prefix=False,
classes=[0, 1, 2],
)
| 48,983 | 42.348673 | 107 |
py
|
lale
|
lale-master/test/test_aif360_ensembles.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from typing import Any, Dict
try:
import tensorflow as tf
tensorflow_installed = True
except ImportError:
tensorflow_installed = False
from lale.lib.aif360 import (
CalibratedEqOddsPostprocessing,
DisparateImpactRemover,
PrejudiceRemover,
fair_stratified_train_test_split,
)
from lale.lib.aif360.adversarial_debiasing import AdversarialDebiasing
from lale.lib.aif360.datasets import fetch_creditg_df
from lale.lib.sklearn import (
AdaBoostClassifier,
BaggingClassifier,
DecisionTreeClassifier,
LogisticRegression,
StackingClassifier,
VotingClassifier,
)
class TestEnsemblesWithAIF360(unittest.TestCase):
train_X = None
train_y = None
test_X = None
fairness_info: Dict[str, Any] = {"temp": 0}
@classmethod
def setUpClass(cls) -> None:
X, y, fi = fetch_creditg_df(preprocess=True)
train_X, test_X, train_y, _ = fair_stratified_train_test_split(X, y, **fi)
cls.train_X = train_X
cls.train_y = train_y
cls.test_X = test_X
cls.fairness_info = fi
@classmethod
def _attempt_fit_predict(cls, model):
trained = model.fit(cls.train_X, cls.train_y)
trained.predict(cls.test_X)
def test_bagging_pre_estimator_mitigation_ensemble(self):
model = DisparateImpactRemover(**self.fairness_info) >> BaggingClassifier(
base_estimator=DecisionTreeClassifier()
)
self._attempt_fit_predict(model)
def test_bagging_post_estimator_mitigation_ensemble(self):
model = CalibratedEqOddsPostprocessing(
**self.fairness_info,
estimator=BaggingClassifier(base_estimator=DecisionTreeClassifier())
)
self._attempt_fit_predict(model)
def test_bagging_pre_estimator_mitigation_base(self):
model = BaggingClassifier(
base_estimator=DisparateImpactRemover(**self.fairness_info)
>> DecisionTreeClassifier()
)
self._attempt_fit_predict(model)
def test_bagging_in_estimator_mitigation_base(self):
model = BaggingClassifier(base_estimator=PrejudiceRemover(**self.fairness_info))
self._attempt_fit_predict(model)
def test_bagging_in_estimator_mitigation_base_1(self):
if tensorflow_installed:
tf.compat.v1.disable_eager_execution()
model = BaggingClassifier(
base_estimator=AdversarialDebiasing(**self.fairness_info),
n_estimators=2,
)
self._attempt_fit_predict(model)
def test_bagging_post_estimator_mitigation_base(self):
model = BaggingClassifier(
base_estimator=CalibratedEqOddsPostprocessing(
**self.fairness_info, estimator=DecisionTreeClassifier()
)
)
self._attempt_fit_predict(model)
def test_adaboost_pre_estimator_mitigation_ensemble(self):
model = DisparateImpactRemover(**self.fairness_info) >> AdaBoostClassifier(
base_estimator=DecisionTreeClassifier()
)
self._attempt_fit_predict(model)
def test_adaboost_post_estimator_mitigation_ensemble(self):
model = CalibratedEqOddsPostprocessing(
**self.fairness_info,
estimator=AdaBoostClassifier(base_estimator=DecisionTreeClassifier())
)
self._attempt_fit_predict(model)
def test_adaboost_pre_estimator_mitigation_base(self):
model = AdaBoostClassifier(
base_estimator=DisparateImpactRemover(**self.fairness_info)
>> DecisionTreeClassifier()
)
self._attempt_fit_predict(model)
def test_adaboost_in_estimator_mitigation_base(self):
model = AdaBoostClassifier(
base_estimator=PrejudiceRemover(**self.fairness_info)
)
self._attempt_fit_predict(model)
def test_adaboost_post_estimator_mitigation_base(self):
model = AdaBoostClassifier(
base_estimator=CalibratedEqOddsPostprocessing(
**self.fairness_info, estimator=DecisionTreeClassifier()
)
)
self._attempt_fit_predict(model)
def test_voting_pre_estimator_mitigation_ensemble(self):
model = DisparateImpactRemover(**self.fairness_info) >> VotingClassifier(
estimators=[("dtc", DecisionTreeClassifier()), ("lr", LogisticRegression())]
)
self._attempt_fit_predict(model)
@unittest.skip("TODO: find out why it does not find predict_proba")
def test_voting_post_estimator_mitigation_ensemble(self):
model = CalibratedEqOddsPostprocessing(
**self.fairness_info,
estimator=VotingClassifier(
estimators=[
("dtc", DecisionTreeClassifier()),
("lr", LogisticRegression()),
]
)
)
self._attempt_fit_predict(model)
def test_voting_pre_estimator_mitigation_base(self):
model = VotingClassifier(
estimators=[
(
"dir+dtc",
DisparateImpactRemover(**self.fairness_info)
>> DecisionTreeClassifier(),
),
("lr", LogisticRegression()),
]
)
self._attempt_fit_predict(model)
def test_voting_in_estimator_mitigation_base(self):
model = VotingClassifier(
estimators=[
("pr", PrejudiceRemover(**self.fairness_info)),
("lr", LogisticRegression()),
]
)
self._attempt_fit_predict(model)
def test_voting_post_estimator_mitigation_base(self):
model = VotingClassifier(
estimators=[
(
"dtc+ceop",
CalibratedEqOddsPostprocessing(
**self.fairness_info, estimator=DecisionTreeClassifier()
),
),
("lr", LogisticRegression()),
]
)
self._attempt_fit_predict(model)
def test_stacking_pre_estimator_mitigation_ensemble(self):
model = DisparateImpactRemover(**self.fairness_info) >> StackingClassifier(
estimators=[("dtc", DecisionTreeClassifier()), ("lr", LogisticRegression())]
)
self._attempt_fit_predict(model)
def test_stacking_post_estimator_mitigation_ensemble(self):
model = CalibratedEqOddsPostprocessing(
**self.fairness_info,
estimator=StackingClassifier(
estimators=[
("dtc", DecisionTreeClassifier()),
("lr", LogisticRegression()),
]
)
)
self._attempt_fit_predict(model)
def test_stacking_pre_estimator_mitigation_base_only(self):
model = StackingClassifier(
estimators=[
(
"dir+dtc",
DisparateImpactRemover(**self.fairness_info)
>> DecisionTreeClassifier(),
),
("lr", LogisticRegression()),
]
)
self._attempt_fit_predict(model)
def test_stacking_pre_estimator_mitigation_base_and_final(self):
model = StackingClassifier(
estimators=[
(
"dir+dtc",
DisparateImpactRemover(**self.fairness_info)
>> DecisionTreeClassifier(),
),
("lr", LogisticRegression()),
],
final_estimator=DisparateImpactRemover(**self.fairness_info)
>> DecisionTreeClassifier(),
passthrough=True,
)
self._attempt_fit_predict(model)
def test_stacking_pre_estimator_mitigation_final_only(self):
model = StackingClassifier(
estimators=[
("dtc", DecisionTreeClassifier()),
("lr", LogisticRegression()),
],
final_estimator=DisparateImpactRemover(**self.fairness_info)
>> DecisionTreeClassifier(),
passthrough=True,
)
self._attempt_fit_predict(model)
def test_stacking_in_estimator_mitigation_base_only(self):
model = StackingClassifier(
estimators=[
("pr", PrejudiceRemover(**self.fairness_info)),
("lr", LogisticRegression()),
]
)
self._attempt_fit_predict(model)
def test_stacking_in_estimator_mitigation_base_and_final(self):
model = StackingClassifier(
estimators=[
("pr", PrejudiceRemover(**self.fairness_info)),
("lr", LogisticRegression()),
],
final_estimator=PrejudiceRemover(**self.fairness_info),
passthrough=True,
)
self._attempt_fit_predict(model)
def test_stacking_in_estimator_mitigation_final_only(self):
model = StackingClassifier(
estimators=[
("dtc", DecisionTreeClassifier()),
("lr", LogisticRegression()),
],
final_estimator=PrejudiceRemover(**self.fairness_info),
passthrough=True,
)
self._attempt_fit_predict(model)
def test_stacking_post_estimator_mitigation_base_only(self):
model = StackingClassifier(
estimators=[
(
"dtc+ceop",
CalibratedEqOddsPostprocessing(
**self.fairness_info, estimator=DecisionTreeClassifier()
),
),
("lr", LogisticRegression()),
]
)
self._attempt_fit_predict(model)
def test_stacking_post_estimator_mitigation_base_and_final(self):
model = StackingClassifier(
estimators=[
(
"dtc+ceop",
CalibratedEqOddsPostprocessing(
**self.fairness_info, estimator=DecisionTreeClassifier()
),
),
("lr", LogisticRegression()),
],
final_estimator=CalibratedEqOddsPostprocessing(
**self.fairness_info, estimator=DecisionTreeClassifier()
),
passthrough=True,
)
self._attempt_fit_predict(model)
def test_stacking_post_estimator_mitigation_final_only(self):
model = StackingClassifier(
estimators=[
("dtc", DecisionTreeClassifier()),
("lr", LogisticRegression()),
],
final_estimator=CalibratedEqOddsPostprocessing(
**self.fairness_info, estimator=DecisionTreeClassifier()
),
passthrough=True,
)
self._attempt_fit_predict(model)
| 11,421 | 33.929664 | 88 |
py
|
lale
|
lale-master/test/test_autoai_libs.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import pandas as pd
import sklearn.datasets
import sklearn.model_selection
import lale.lib.autoai_libs
# from lale.datasets.uci import fetch_household_power_consumption
from lale.lib.autoai_libs import float32_transform
from lale.lib.lale import Hyperopt
from lale.lib.sklearn import LogisticRegression as LR
from lale.lib.xgboost.xgb_classifier import XGBClassifier
class TestAutoaiLibs(unittest.TestCase):
@classmethod
def setUpClass(cls):
iris = sklearn.datasets.load_iris()
iris_X, iris_y = iris.data, iris.target
(
iris_train_X,
iris_test_X,
iris_train_y,
iris_test_y,
) = sklearn.model_selection.train_test_split(iris_X, iris_y)
cls._iris = {
"train_X": iris_train_X,
"train_y": iris_train_y,
"test_X": iris_test_X,
"test_y": iris_test_y,
}
def doTest(self, trainable, train_X, train_y, test_X, test_y):
trained = trainable.fit(train_X, train_y)
_ = trained.transform(test_X)
with self.assertWarns(DeprecationWarning):
trainable.transform(train_X)
trainable.to_json()
trainable_pipeline = trainable >> float32_transform() >> LR()
trained_pipeline = trainable_pipeline.fit(train_X, train_y)
trained_pipeline.predict(test_X)
hyperopt = Hyperopt(estimator=trainable_pipeline, max_evals=1, verbose=True)
trained_hyperopt = hyperopt.fit(train_X, train_y)
trained_hyperopt.predict(test_X)
def test_NumpyColumnSelector(self):
trainable = lale.lib.autoai_libs.NumpyColumnSelector()
self.doTest(trainable, **self._iris)
def test_NumpyColumnSelector_pandas(self):
iris_X, iris_y = sklearn.datasets.load_iris(return_X_y=True, as_frame=True)
keys = ["train_X", "test_X", "train_y", "test_y"]
splits = sklearn.model_selection.train_test_split(iris_X, iris_y)
iris = dict(zip(keys, splits))
self.assertIsInstance(iris["train_X"], pd.DataFrame)
trainable = lale.lib.autoai_libs.NumpyColumnSelector(columns=[0, 2, 3])
self.doTest(trainable, **iris)
def test_CompressStrings(self):
n_columns = self._iris["train_X"].shape[1]
trainable = lale.lib.autoai_libs.CompressStrings(
dtypes_list=["int_num" for i in range(n_columns)],
misslist_list=[[] for i in range(n_columns)],
)
self.doTest(trainable, **self._iris)
def test_NumpyReplaceMissingValues(self):
trainable = lale.lib.autoai_libs.NumpyReplaceMissingValues()
self.doTest(trainable, **self._iris)
def test_NumpyReplaceUnknownValues(self):
trainable = lale.lib.autoai_libs.NumpyReplaceUnknownValues(filling_values=42.0)
self.doTest(trainable, **self._iris)
def test_boolean2float(self):
trainable = lale.lib.autoai_libs.boolean2float()
self.doTest(trainable, **self._iris)
def test_CatImputer(self):
trainable = lale.lib.autoai_libs.CatImputer()
self.doTest(trainable, **self._iris)
def test_CatEncoder(self):
trainable = lale.lib.autoai_libs.CatEncoder(
encoding="ordinal",
categories="auto",
dtype="float64",
handle_unknown="ignore",
)
self.doTest(trainable, **self._iris)
def test_float32_transform(self):
trainable = lale.lib.autoai_libs.float32_transform()
self.doTest(trainable, **self._iris)
def test_FloatStr2Float(self):
n_columns = self._iris["train_X"].shape[1]
trainable = lale.lib.autoai_libs.FloatStr2Float(
dtypes_list=["int_num" for i in range(n_columns)]
)
self.doTest(trainable, **self._iris)
def test_OptStandardScaler(self):
trainable = lale.lib.autoai_libs.OptStandardScaler()
self.doTest(trainable, **self._iris)
def test_NumImputer(self):
trainable = lale.lib.autoai_libs.NumImputer()
self.doTest(trainable, **self._iris)
def test_NumpyPermuteArray(self):
trainable = lale.lib.autoai_libs.NumpyPermuteArray(
axis=0, permutation_indices=[2, 0, 1, 3]
)
self.doTest(trainable, **self._iris)
def test_TNoOp(self):
from autoai_libs.utils.fc_methods import is_not_categorical
trainable = lale.lib.autoai_libs.TNoOp(
fun=np.rint,
name="do nothing",
datatypes=["numeric"],
feat_constraints=[is_not_categorical],
)
self.doTest(trainable, **self._iris)
def test_TA1(self):
from autoai_libs.utils.fc_methods import is_not_categorical
float32 = np.dtype("float32")
trainable = lale.lib.autoai_libs.TA1(
fun=np.rint,
name="round",
datatypes=["numeric"],
feat_constraints=[is_not_categorical],
col_names=["a", "b", "c", "d"],
col_dtypes=[float32, float32, float32, float32],
)
self.doTest(trainable, **self._iris)
def test_TA2(self):
from autoai_libs.utils.fc_methods import is_not_categorical
float32 = np.dtype("float32")
trainable = lale.lib.autoai_libs.TA2(
fun=np.add,
name="sum",
datatypes1=["numeric"],
feat_constraints1=[is_not_categorical],
datatypes2=["numeric"],
feat_constraints2=[is_not_categorical],
col_names=["a", "b", "c", "d"],
col_dtypes=[float32, float32, float32, float32],
)
self.doTest(trainable, **self._iris)
def test_TB1(self):
from autoai_libs.utils.fc_methods import is_not_categorical
from sklearn.preprocessing import StandardScaler
float32 = np.dtype("float32")
trainable = lale.lib.autoai_libs.TB1(
tans_class=StandardScaler,
name="stdscaler",
datatypes=["numeric"],
feat_constraints=[is_not_categorical],
col_names=["a", "b", "c", "d"],
col_dtypes=[float32, float32, float32, float32],
)
self.doTest(trainable, **self._iris)
def test_TB2(self):
pass # TODO: not sure how to instantiate, what to pass for tans_class
def test_TAM(self):
from autoai_libs.cognito.transforms.transform_extras import (
IsolationForestAnomaly,
)
float32 = np.dtype("float32")
trainable = lale.lib.autoai_libs.TAM(
tans_class=IsolationForestAnomaly,
name="isoforestanomaly",
col_names=["a", "b", "c", "d"],
col_dtypes=[float32, float32, float32, float32],
)
self.doTest(trainable, **self._iris)
def test_TGen(self):
from autoai_libs.cognito.transforms.transform_extras import NXOR
from autoai_libs.utils.fc_methods import is_not_categorical
float32 = np.dtype("float32")
trainable = lale.lib.autoai_libs.TGen(
fun=NXOR,
name="nxor",
arg_count=2,
datatypes_list=[["numeric"], ["numeric"]],
feat_constraints_list=[[is_not_categorical], [is_not_categorical]],
col_names=["a", "b", "c", "d"],
col_dtypes=[float32, float32, float32, float32],
)
self.doTest(trainable, **self._iris)
def test_FS1(self):
trainable = lale.lib.autoai_libs.FS1(
cols_ids_must_keep=[1],
additional_col_count_to_keep=3,
ptype="classification",
)
self.doTest(trainable, **self._iris)
def test_FS2(self):
from sklearn.ensemble import ExtraTreesClassifier
trainable = lale.lib.autoai_libs.FS2(
cols_ids_must_keep=[1],
additional_col_count_to_keep=3,
ptype="classification",
eval_algo=ExtraTreesClassifier,
)
self.doTest(trainable, **self._iris)
def test_ColumnSelector(self):
trainable = lale.lib.autoai_libs.ColumnSelector()
self.doTest(trainable, **self._iris)
def test_ColumnSelector_pandas(self):
iris_X, iris_y = sklearn.datasets.load_iris(return_X_y=True, as_frame=True)
keys = ["train_X", "test_X", "train_y", "test_y"]
splits = sklearn.model_selection.train_test_split(iris_X, iris_y)
iris = dict(zip(keys, splits))
self.assertIsInstance(iris["train_X"], pd.DataFrame)
trainable = lale.lib.autoai_libs.ColumnSelector(columns_indices_list=[0, 2, 3])
self.doTest(trainable, **iris)
class TestAutoaiLibsText(unittest.TestCase):
def setUp(self):
from sklearn.datasets import fetch_20newsgroups
cats = ["alt.atheism", "sci.space"]
newsgroups_train = fetch_20newsgroups(subset="train", categories=cats)
self.train_X, self.train_y = (
np.array(newsgroups_train.data),
newsgroups_train.target,
)
self.train_X = np.reshape(self.train_X, (self.train_X.shape[0], 1))
newsgroups_test = fetch_20newsgroups(subset="test", categories=cats)
self.test_X, self.test_y = (
np.array(newsgroups_test.data),
newsgroups_test.target,
)
self.test_X = np.reshape(self.test_X, (self.test_X.shape[0], 1))
def doTest(self, trainable, train_X, train_y, test_X, test_y):
trained = trainable.fit(train_X, train_y)
_ = trained.transform(test_X)
with self.assertWarns(DeprecationWarning):
trainable.transform(train_X)
trainable.to_json()
trainable_pipeline = trainable >> float32_transform() >> XGBClassifier()
trained_pipeline = trainable_pipeline.fit(train_X, train_y)
trained_pipeline.predict(test_X)
hyperopt = Hyperopt(estimator=trainable_pipeline, max_evals=1, verbose=True)
trained_hyperopt = hyperopt.fit(train_X, train_y)
trained_hyperopt.predict(test_X)
@unittest.skip(
"skipping for now because this does not work with the latest xgboost."
)
def test_TextTransformer(self):
trainable = lale.lib.autoai_libs.TextTransformer(
drop_columns=True,
columns_to_be_deleted=[0, 1],
text_processing_options={"word2vec": {"output_dim": 5}},
)
self.doTest(trainable, self.train_X, self.train_y, self.test_X, self.test_y)
@unittest.skip(
"skipping for now because this does not work with the latest xgboost."
)
def test_Word2VecTransformer(self):
trainable = lale.lib.autoai_libs.Word2VecTransformer(
drop_columns=True, output_dim=5
)
self.doTest(trainable, self.train_X, self.train_y, self.test_X, self.test_y)
# class TestDateTransformer(unittest.TestCase):
# @classmethod
# def setUpClass(cls):
# data = fetch_household_power_consumption()
# data = data.iloc[:5000, [0, 2, 3, 4, 5]]
# cls.X_train = data.iloc[-1000:]
# cls.X_test = data.iloc[:-1000]
# def test_01_all_mini_options_with_headers(self):
# transformer = lale.lib.autoai_libs.DateTransformer(
# options=["all"], column_headers_list=self.X_train.columns.values.tolist()
# )
# fitted_transformer = transformer.fit(self.X_train.values)
# X_test_transformed = fitted_transformer.transform(self.X_test.values)
# X_train_transformed = fitted_transformer.transform(self.X_train.values)
# header_list = fitted_transformer.impl.new_column_headers_list
# print(f"New columns: {header_list}, new shape: {X_train_transformed.shape}")
# self.assertEqual(
# X_train_transformed.shape[1],
# X_test_transformed.shape[1],
# f"Number of columns after transform is different.:{X_train_transformed.shape[1]}, {X_test_transformed.shape[1]}",
# )
# def test_02_all_options_without_headers(self):
# transformer = lale.lib.autoai_libs.DateTransformer(options=["all"])
# fitted_transformer = transformer.fit(self.X_train.values)
# X_train = fitted_transformer.transform(self.X_train.values)
# X_test = transformer.transform(self.X_test.values)
# header_list = fitted_transformer.impl.new_column_headers_list
# print(f"New columns: {header_list}")
# self.assertEqual(
# X_train.shape[1], X_test.shape[1], msg="Shape after transform is different."
# )
# def test_03_specific_options_and_delete_source_columns(self):
# transformer = lale.lib.autoai_libs.DateTransformer(
# options=["FloatTimestamp", "DayOfWeek", "Hour", "Minute"],
# delete_source_columns=True,
# column_headers_list=self.X_train.columns.values.tolist(),
# )
# fitted_transformer = transformer.fit(self.X_train.values)
# X_train = fitted_transformer.transform(self.X_train.values)
# X_test = transformer.transform(self.X_test.values)
# header_list = fitted_transformer.impl.new_column_headers_list
# print(f"New columns: {header_list}")
# self.assertEqual(
# X_train.shape[1], X_test.shape[1], msg="Shape after transform is different."
# )
# def test_04_option_Datetime_and_delete_source_columns(self):
# transformer = lale.lib.autoai_libs.DateTransformer(
# options=["Datetime"],
# delete_source_columns=True,
# column_headers_list=self.X_train.columns.values.tolist(),
# )
# fitted_transformer = transformer.fit(self.X_train.values)
# X_train = fitted_transformer.transform(self.X_train.values)
# X_test = transformer.transform(self.X_test.values)
# header_list = fitted_transformer.impl.new_column_headers_list
# print(f"New columns: {header_list}")
# self.assertEqual(
# X_train.shape[1], X_test.shape[1], msg="Shape after transform is different."
# )
| 14,626 | 38.005333 | 127 |
py
|
lale
|
lale-master/test/test_relational.py
|
# Copyright 2019-2022 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from typing import List
import jsonschema
import numpy as np
import pandas as pd
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
import lale.operators
from lale.lib.rasl.convert import Convert
from lale.operator_wrapper import wrap_imported_operators
try:
from pyspark import SparkConf, SparkContext
from pyspark.sql import Row, SparkSession, SQLContext
from lale.datasets.data_schemas import ( # pylint:disable=ungrouped-imports
SparkDataFrameWithIndex,
)
spark_installed = True
except ImportError:
spark_installed = False
from test import EnableSchemaValidation # pylint:disable=wrong-import-order
from lale.datasets import pandas2spark
from lale.datasets.data_schemas import (
add_table_name,
get_index_name,
get_table_name,
make_optional_schema,
)
from lale.datasets.multitable import multitable_train_test_split
from lale.datasets.multitable.fetch_datasets import fetch_go_sales_dataset
from lale.expressions import ( # pylint:disable=redefined-builtin
asc,
astype,
collect_set,
count,
day_of_month,
day_of_week,
day_of_year,
desc,
first,
hour,
identity,
isnan,
isnotnan,
isnotnull,
isnull,
it,
ite,
max,
mean,
median,
min,
minute,
mode,
month,
replace,
string_indexer,
sum,
variance,
)
from lale.helpers import (
_ensure_pandas,
_is_pandas_df,
_is_spark_df,
datatype_param_type,
)
from lale.lib.dataframe import get_columns
from lale.lib.lale import ConcatFeatures, Hyperopt, SplitXy
from lale.lib.rasl import (
Aggregate,
Alias,
Filter,
GroupBy,
Join,
Map,
OrderBy,
Relational,
Scan,
SortIndex,
)
from lale.lib.sklearn import PCA, KNeighborsClassifier, LogisticRegression
def _set_index_name(df, name):
return add_table_name(df.rename_axis(index=name), get_table_name(df))
def _set_index(df, name):
return add_table_name(df.set_index(name), get_table_name(df))
# Testing '==' and '!=' operator with different types of expressions
class TestExpressions(unittest.TestCase):
def test_expr_1(self):
with self.assertRaises(TypeError):
if it.col < 3:
_ = "If it throws an exception, then the test is successful."
def test_expr_2(self):
self.assertFalse(it.col == 5)
def test_expr_3(self):
try:
if it.col == it.col:
_ = "If it does not throw an exception, then the test is successful."
except Exception:
self.fail("Expression 'it.col == it.col' raised an exception unexpectedly!")
def test_expr_4(self):
self.assertFalse(it.col == it.col2)
def test_expr_5(self):
X = it.col
self.assertTrue(X == X) # pylint:disable=comparison-with-itself
def test_expr_6(self):
self.assertFalse(it.col != 5)
def test_expr_7(self):
try:
if it.col != it.col:
_ = "If it does not throw an exception, then the test is successful."
except Exception:
self.fail("Expression 'it.col != it.col' raised an exception unexpectedly!")
def test_expr_8(self):
self.assertFalse(it.col != it.col2)
def test_expr_9(self):
X = it.col
self.assertTrue(X != X) # pylint:disable=comparison-with-itself
# Testing filter operator
class TestFilter(unittest.TestCase):
@classmethod
def setUpClass(cls):
info = [
(1, "NY", 100),
(2, "NY", 150),
(3, "TX", 200),
(4, "TX", 100),
(5, "CA", 200),
]
t1 = [(2, "Warm"), (3, "Cold"), (4, "Warm"), (5, "Cold")]
main = [
(1, "NY", 1, float(1)),
(2, "TX", 6, np.nan),
(3, "CA", 2, float(2)),
(4, "NY", 5, None),
(5, "CA", 0, float(3)),
]
if spark_installed:
conf = (
SparkConf()
.setMaster("local[2]")
.set("spark.driver.bindAddress", "127.0.0.1")
)
sc = SparkContext.getOrCreate(conf=conf)
sqlContext = SQLContext(sc)
rdd = sc.parallelize(main)
table_main = rdd.map(
lambda x: Row(TrainId=int(x[0]), col1=x[1], col2=int(x[2]), col6=x[3])
)
spark_main = add_table_name(sqlContext.createDataFrame(table_main), "main")
spark_main = SparkDataFrameWithIndex(spark_main)
rdd = sc.parallelize(info)
table_info = rdd.map(
lambda x: Row(train_id=int(x[0]), col3=x[1], col4=int(x[2]))
)
spark_info = add_table_name(sqlContext.createDataFrame(table_info), "info")
spark_info = SparkDataFrameWithIndex(spark_info)
rdd = sc.parallelize(t1)
table_t1 = rdd.map(lambda x: Row(tid=int(x[0]), col5=x[1]))
spark_t1 = add_table_name(sqlContext.createDataFrame(table_t1), "t1")
spark_t1 = SparkDataFrameWithIndex(spark_t1)
trainable = Join(
pred=[
it.main.TrainId == it.info.train_id,
it.info.train_id == it.t1.tid,
],
join_type="left",
)
spark_transformed_df = trainable.transform(
[spark_main, spark_info, spark_t1]
)
spark_transformed_df = SparkDataFrameWithIndex(
spark_transformed_df.drop_indexes().sort("TrainId")
)
cls.tgt2datasets = {
"pandas": spark_transformed_df.toPandas(),
"spark": spark_transformed_df,
}
else:
pandas_main = pd.DataFrame(main, index=["TrainId", "col1", "col2", "col6"])
pandas_info = pd.DataFrame(info, index=["train_id", "col3", "col4"])
pandas_t1 = pd.DataFrame(t1, index=["tid", "col5"])
trainable = Join(
pred=[
it.main.TrainId == it.info.train_id,
it.info.train_id == it.t1.tid,
],
join_type="left",
)
pandas_transformed_df = trainable.transform(
[pandas_main, pandas_info, pandas_t1]
).sort("TrainId")
cls.tgt2datasets = {"pandas": pandas_transformed_df}
def test_filter_isnan(self):
pandas_transformed_df = self.tgt2datasets["pandas"]
self.assertEqual(pandas_transformed_df.shape, (5, 9))
self.assertEqual(pandas_transformed_df["col1"][2], "CA")
for tgt, transformed_df in self.tgt2datasets.items():
trainable = Filter(pred=[isnan(it.col6)])
filtered_df = trainable.transform(transformed_df)
if tgt == "pandas":
# `None` is considered as `nan` in Pandas
self.assertEqual(filtered_df.shape, (2, 9), tgt)
self.assertTrue(all(np.isnan(filtered_df["col6"])), tgt)
elif tgt == "spark":
self.assertEqual(_ensure_pandas(filtered_df).shape, (1, 9), tgt)
test_list = [row[0] for row in filtered_df.select("col6").collect()]
self.assertTrue(all((np.isnan(i) for i in test_list if i is not None)))
self.assertEqual(
get_index_name(transformed_df), get_index_name(filtered_df)
)
else:
assert False
def test_filter_isnotnan(self):
for tgt, transformed_df in self.tgt2datasets.items():
trainable = Filter(pred=[isnotnan(it.col6)])
filtered_df = trainable.transform(transformed_df)
if tgt == "pandas":
self.assertTrue(all(np.logical_not(np.isnan(filtered_df["col6"]))), tgt)
self.assertEqual(filtered_df.shape, (3, 9), tgt)
elif tgt == "spark":
self.assertEqual(_ensure_pandas(filtered_df).shape, (4, 9), tgt)
test_list = [row[0] for row in filtered_df.select("col6").collect()]
self.assertTrue(
all((not np.isnan(i) for i in test_list if i is not None))
)
self.assertEqual(
get_index_name(transformed_df), get_index_name(filtered_df)
)
else:
assert False
def test_filter_isnull(self):
for tgt, transformed_df in self.tgt2datasets.items():
trainable = Filter(pred=[isnull(it.col6)])
filtered_df = trainable.transform(transformed_df)
if tgt == "pandas":
# `None` is considered as `nan` in Pandas
self.assertEqual(filtered_df.shape, (2, 9), tgt)
self.assertTrue(all(np.isnan(filtered_df["col6"])), tgt)
elif tgt == "spark":
self.assertEqual(_ensure_pandas(filtered_df).shape, (1, 9), tgt)
test_list = [row[0] for row in filtered_df.select("col6").collect()]
self.assertTrue(all((i is None for i in test_list)))
self.assertEqual(
get_index_name(transformed_df), get_index_name(filtered_df)
)
else:
assert False
def test_filter_isnotnull(self):
for tgt, transformed_df in self.tgt2datasets.items():
trainable = Filter(pred=[isnotnull(it.col6)])
filtered_df = trainable.transform(transformed_df)
if tgt == "pandas":
# `None` is considered as `nan` in Pandas
self.assertEqual(filtered_df.shape, (3, 9), tgt)
self.assertTrue(all(np.logical_not(np.isnan(filtered_df["col6"]))))
elif tgt == "spark":
self.assertEqual(_ensure_pandas(filtered_df).shape, (4, 9), tgt)
test_list = [row[0] for row in filtered_df.select("col6").collect()]
self.assertTrue(all((i is not None for i in test_list)))
self.assertEqual(
get_index_name(transformed_df), get_index_name(filtered_df)
)
else:
assert False
def test_filter_eq(self):
for tgt, transformed_df in self.tgt2datasets.items():
trainable = Filter(pred=[it.col3 == "TX"])
filtered_df = trainable.transform(transformed_df)
filtered_df = _ensure_pandas(filtered_df)
self.assertEqual(filtered_df.shape, (2, 9), tgt)
self.assertTrue(all(filtered_df["col3"] == "TX"), tgt)
def test_filter_neq(self):
for tgt, transformed_df in self.tgt2datasets.items():
trainable = Filter(pred=[it.col1 != it["col3"]])
filtered_df = trainable.transform(transformed_df)
filtered_df = _ensure_pandas(filtered_df)
self.assertEqual(filtered_df.shape, (3, 9), tgt)
self.assertTrue(all(filtered_df["col1"] != filtered_df["col3"]), tgt)
def test_filter_ge(self):
for tgt, transformed_df in self.tgt2datasets.items():
trainable = Filter(pred=[it["col4"] >= 150])
filtered_df = trainable.transform(transformed_df)
filtered_df = _ensure_pandas(filtered_df)
self.assertEqual(filtered_df.shape, (3, 9), tgt)
self.assertTrue(all(filtered_df["col4"] >= 150), tgt)
def test_filter_gt(self):
for tgt, transformed_df in self.tgt2datasets.items():
trainable = Filter(pred=[it["col4"] > 150])
filtered_df = trainable.transform(transformed_df)
filtered_df = _ensure_pandas(filtered_df)
self.assertEqual(filtered_df.shape, (2, 9), tgt)
self.assertTrue(all(filtered_df["col4"] > 150), tgt)
def test_filter_le(self):
for tgt, transformed_df in self.tgt2datasets.items():
trainable = Filter(pred=[it["col3"] <= "NY"])
filtered_df = trainable.transform(transformed_df)
filtered_df = _ensure_pandas(filtered_df)
self.assertEqual(filtered_df.shape, (3, 9), tgt)
self.assertTrue(all(filtered_df["col3"] <= "NY"), tgt)
def test_filter_lt(self):
for tgt, transformed_df in self.tgt2datasets.items():
trainable = Filter(pred=[it["col2"] < it["TrainId"]])
filtered_df = trainable.transform(transformed_df)
filtered_df = _ensure_pandas(filtered_df)
self.assertEqual(filtered_df.shape, (2, 9), tgt)
self.assertTrue(all(filtered_df["col2"] < filtered_df["TrainId"]), tgt)
def test_filter_multiple1(self):
for tgt, transformed_df in self.tgt2datasets.items():
trainable = Filter(pred=[it.col3 == "TX", it["col2"] > 4])
filtered_df = trainable.transform(transformed_df)
filtered_df = _ensure_pandas(filtered_df)
self.assertEqual(filtered_df.shape, (1, 9))
self.assertTrue(all(filtered_df["col3"] == "TX"), tgt)
self.assertTrue(all(filtered_df["col2"] > 4), tgt)
def test_filter_multiple2(self):
for tgt, transformed_df in self.tgt2datasets.items():
trainable = Filter(pred=[it.col5 != "Cold", it.train_id < 4])
filtered_df = trainable.transform(transformed_df)
if tgt == "pandas":
self.assertEqual(filtered_df.shape, (2, 9))
elif tgt == "spark":
# `None != "Cold"` is not true in Spark
filtered_df = _ensure_pandas(filtered_df)
self.assertEqual(filtered_df.shape, (1, 9))
else:
assert False
self.assertTrue(all(filtered_df["col5"] != "Cold"), tgt)
self.assertTrue(all(filtered_df["train_id"] < 4), tgt)
def test_multiple3(self):
for tgt, transformed_df in self.tgt2datasets.items():
trainable = Filter(
pred=[
it["tid"] == it["TrainId"],
it["col2"] >= it.train_id,
it.col3 == "NY",
]
)
filtered_df = trainable.transform(transformed_df)
filtered_df = _ensure_pandas(filtered_df)
self.assertEqual(filtered_df.shape, (1, 9), tgt)
self.assertTrue(all(filtered_df["tid"] == filtered_df["TrainId"]), tgt)
self.assertTrue(all(filtered_df["col2"] >= filtered_df["train_id"]), tgt)
self.assertTrue(all(filtered_df["col3"] == "NY"), tgt)
def test_filter_no_col_error(self):
for _tgt, transformed_df in self.tgt2datasets.items():
with self.assertRaises(ValueError):
trainable = Filter(pred=[it["TrainId"] < it.col_na])
_ = trainable.transform(transformed_df)
class TestScan(unittest.TestCase):
def setUp(self):
self.go_sales = fetch_go_sales_dataset()
def test_attribute(self):
with EnableSchemaValidation():
trained = Scan(table=it.go_products)
transformed = trained.transform(self.go_sales)
self.assertEqual(get_table_name(transformed), "go_products")
self.assertIs(self.go_sales[3], transformed)
def test_subscript(self):
with EnableSchemaValidation():
trained = Scan(table=it["go_products"])
transformed = trained.transform(self.go_sales)
self.assertEqual(get_table_name(transformed), "go_products")
self.assertIs(self.go_sales[3], transformed)
def test_error1(self):
with EnableSchemaValidation():
trained = Scan(table=it.go_products)
with self.assertRaisesRegex(ValueError, "invalid X"):
_ = trained.transform(self.go_sales[3])
def test_error2(self):
trained = Scan(table=it.unknown_table)
with self.assertRaisesRegex(ValueError, "could not find 'unknown_table'"):
_ = trained.transform(self.go_sales)
def test_error3(self):
with self.assertRaisesRegex(ValueError, "expected `it.table_name` or"):
_ = Scan(table=(it.go_products == 42))
# Testing alias operator for pandas and spark dataframes
class TestAlias(unittest.TestCase):
@classmethod
def setUpClass(cls):
targets: List[datatype_param_type] = ["pandas", "spark"]
cls.tgt2datasets = {tgt: fetch_go_sales_dataset(tgt) for tgt in targets}
def test_alias(self):
for tgt, datasets in self.tgt2datasets.items():
trainable = Alias(name="test_alias")
go_products = datasets[3]
self.assertEqual(get_table_name(go_products), "go_products")
transformed_df = trainable.transform(go_products)
self.assertEqual(get_table_name(transformed_df), "test_alias")
if tgt == "pandas":
self.assertTrue(_is_pandas_df(transformed_df))
elif tgt == "spark":
self.assertTrue(_is_spark_df(transformed_df))
transformed_df = transformed_df.toPandas()
else:
assert False
self.assertEqual(transformed_df.shape, (274, 8))
def test_alias_name_error(self):
with self.assertRaises(jsonschema.ValidationError):
_ = Alias()
with self.assertRaises(jsonschema.ValidationError):
_ = Alias(name="")
with self.assertRaises(jsonschema.ValidationError):
_ = Alias(name=" ")
def test_filter_name(self):
for tgt, datasets in self.tgt2datasets.items():
go_products = datasets[3]
trained = Filter(pred=[it["Unit cost"] >= 10])
transformed = trained.transform(go_products)
self.assertEqual(get_table_name(transformed), "go_products", tgt)
if tgt == "spark":
self.assertEqual(get_index_name(transformed), "index", tgt)
def test_map_name(self):
for tgt, datasets in self.tgt2datasets.items():
go_products = datasets[3]
trained = Map(columns={"unit_cost": it["Unit cost"]})
transformed = trained.transform(go_products)
self.assertEqual(get_table_name(transformed), "go_products", tgt)
if tgt == "spark":
self.assertEqual(get_index_name(transformed), "index", tgt)
def test_join_name(self):
for tgt, datasets in self.tgt2datasets.items():
trained = Join(
pred=[it.go_1k["Retailer code"] == it.go_retailers["Retailer code"]],
name="joined_tables",
)
transformed = trained.transform(datasets)
self.assertEqual(get_table_name(transformed), "joined_tables", tgt)
def test_groupby_name(self):
for tgt, datasets in self.tgt2datasets.items():
go_products = datasets[3]
trained = GroupBy(by=[it["Product line"]])
transformed = trained.transform(go_products)
self.assertEqual(get_table_name(transformed), "go_products", tgt)
def test_aggregate_name(self):
for tgt, datasets in self.tgt2datasets.items():
go_daily_sales = datasets[1]
group_by = GroupBy(by=[it["Retailer code"]])
aggregate = Aggregate(columns={"min_quantity": min(it.Quantity)})
trained = group_by >> aggregate
transformed = trained.transform(go_daily_sales)
self.assertEqual(get_table_name(transformed), "go_daily_sales", tgt)
# Testing group_by operator for pandas and spark dataframes
class TestGroupBy(unittest.TestCase):
@classmethod
def setUpClass(cls):
targets: List[datatype_param_type] = ["pandas", "spark"]
cls.tgt2datasets = {tgt: fetch_go_sales_dataset(tgt) for tgt in targets}
def test_groupby(self):
trainable = GroupBy(by=[it["Product line"]])
for tgt, datasets in self.tgt2datasets.items():
go_products = datasets[3]
assert get_table_name(go_products) == "go_products"
grouped_df = trainable.transform(go_products)
if tgt == "pandas":
self.assertEqual(grouped_df.ngroups, 5, tgt)
aggregate = Aggregate(columns={"count": count(it["Product line"])})
df = _ensure_pandas(aggregate.transform(grouped_df))
self.assertEqual(df.shape, (5, 1), tgt)
def test_groupby1(self):
trainable = GroupBy(by=[it["Product line"], it.Product])
for tgt, datasets in self.tgt2datasets.items():
go_products = datasets[3]
assert get_table_name(go_products) == "go_products"
grouped_df = trainable.transform(go_products)
if tgt == "pandas":
self.assertEqual(grouped_df.ngroups, 144, tgt)
aggregate = Aggregate(columns={"count": count(it["Product line"])})
df = _ensure_pandas(aggregate.transform(grouped_df))
self.assertEqual(df.shape, (144, 1), tgt)
# Testing Aggregate operator for both pandas and Spark
class TestAggregate(unittest.TestCase):
@classmethod
def setUpClass(cls):
targets: List[datatype_param_type] = ["pandas", "spark"]
cls.tgt2datasets = {tgt: fetch_go_sales_dataset(tgt) for tgt in targets}
def test_sales_not_grouped_single_col(self):
pipeline = Scan(table=it.go_daily_sales) >> Aggregate(
columns={
"min_method_code": min(it["Order method code"]),
"max_method_code": max(it["Order method code"]),
"collect_set('Order method code')": collect_set(
it["Order method code"]
),
"mode_method_code": mode(it["Order method code"]),
"median_method_code": median(it["Order method code"]),
}
)
for tgt, datasets in self.tgt2datasets.items():
result = pipeline.transform(datasets)
result = _ensure_pandas(result)
self.assertEqual(result.shape, (1, 5), tgt)
self.assertEqual(result.loc[0, "min_method_code"], 1, tgt)
self.assertEqual(result.loc[0, "max_method_code"], 7, tgt)
self.assertEqual(
sorted(result.loc[0, "collect_set('Order method code')"]),
[1, 2, 3, 4, 5, 6, 7],
tgt,
)
self.assertEqual(result.loc[0, "mode_method_code"], 5, tgt)
self.assertEqual(result.loc[0, "median_method_code"], 5, tgt)
def test_sales_not_grouped_single_func(self):
pipeline = Aggregate(
columns={
"max_method_code": max(it["Order method code"]),
"max_method_type": max(it["Order method type"]),
}
)
for tgt, datasets in self.tgt2datasets.items():
result = pipeline.transform(datasets[2])
result = _ensure_pandas(result)
self.assertEqual(result.shape, (1, 2), tgt)
self.assertEqual(result.loc[0, "max_method_code"], 12, tgt)
self.assertEqual(result.loc[0, "max_method_type"], "Web", tgt)
def test_sales_multi_col_not_grouped(self):
pipeline = Aggregate(
columns={
"min_method_code": min(it["Order method code"]),
"max_method_code": max(it["Order method code"]),
"max_method_type": max(it["Order method type"]),
}
)
for tgt, datasets in self.tgt2datasets.items():
result = pipeline.transform(datasets[2])
result = _ensure_pandas(result)
self.assertEqual(result.shape, (1, 3), tgt)
self.assertEqual(result.loc[0, "min_method_code"], 1, tgt)
self.assertEqual(result.loc[0, "max_method_code"], 12, tgt)
self.assertEqual(result.loc[0, "max_method_type"], "Web", tgt)
def test_sales_onekey_grouped(self):
pipeline = (
Scan(table=it.go_daily_sales)
>> GroupBy(by=[it["Retailer code"]])
>> Aggregate(
columns={
"retailer_code": it["Retailer code"],
"min_method_code": min(it["Order method code"]),
"max_method_code": max(it["Order method code"]),
"min_quantity": min(it["Quantity"]),
"method_codes": collect_set(it["Order method code"]),
# Mode is not supported on GroupedData as of now.
# "mode_method_code": mode(it["Order method code"]),
"median_method_code": median(it["Order method code"]),
}
)
)
for tgt, datasets in self.tgt2datasets.items():
result = pipeline.transform(datasets)
result = _ensure_pandas(result)
self.assertEqual(result.shape, (289, 6))
row = result[result.retailer_code == 1201]
self.assertEqual(row.loc[row.index[0], "retailer_code"], 1201, tgt)
self.assertEqual(row.loc[row.index[0], "min_method_code"], 2, tgt)
self.assertEqual(row.loc[row.index[0], "max_method_code"], 6, tgt)
self.assertEqual(row.loc[row.index[0], "min_quantity"], 1, tgt)
self.assertEqual(
sorted(row.loc[row.index[0], "method_codes"]), [2, 3, 4, 5, 6], tgt
)
# self.assertEqual(result.loc[row.index[0], "mode_method_code"], 5, tgt)
self.assertEqual(result.loc[row.index[0], "median_method_code"], 5, tgt)
self.assertEqual(result.index.name, "Retailer code", tgt)
def test_sales_onekey_grouped_single_col(self):
pipeline = (
Scan(table=it.go_daily_sales)
>> GroupBy(by=[it["Retailer code"]])
>> Aggregate(
columns={
"min_method_code": min(it["Order method code"]),
"max_method_code": max(it["Order method code"]),
"method_codes": collect_set(it["Order method code"]),
"median_method_code": median(it["Order method code"]),
}
)
)
for tgt, datasets in self.tgt2datasets.items():
result = pipeline.transform(datasets)
result = _ensure_pandas(result)
self.assertEqual(result.shape, (289, 4))
self.assertEqual(result.loc[1201, "min_method_code"], 2, tgt)
self.assertEqual(result.loc[1201, "max_method_code"], 6, tgt)
self.assertEqual(
sorted(result.loc[1201, "method_codes"]), [2, 3, 4, 5, 6], tgt
)
self.assertEqual(result.loc[1201, "median_method_code"], 5, tgt)
self.assertEqual(result.index.name, "Retailer code", tgt)
def test_sales_onekey_grouped_single_func(self):
pipeline = (
Scan(table=it.go_daily_sales)
>> GroupBy(by=[it["Retailer code"]])
>> Aggregate(
columns={
"min_method_code": min(it["Order method code"]),
"min_quantity": min(it["Quantity"]),
}
)
)
for tgt, datasets in self.tgt2datasets.items():
result = pipeline.transform(datasets)
result = _ensure_pandas(result)
self.assertEqual(result.shape, (289, 2))
self.assertEqual(result.loc[1201, "min_method_code"], 2, tgt)
self.assertEqual(result.loc[1201, "min_quantity"], 1, tgt)
self.assertEqual(result.index.name, "Retailer code", tgt)
def test_products_onekey_grouped(self):
pipeline = (
Scan(table=it.go_products)
>> GroupBy(by=[it["Product line"]])
>> Aggregate(
columns={
"line": first(it["Product line"]),
"mean_uc": mean(it["Unit cost"]),
"min_up": min(it["Unit price"]),
"count_pc": count(it["Product color"]),
}
)
)
for tgt, datasets in self.tgt2datasets.items():
result = pipeline.transform(datasets)
result = _ensure_pandas(result)
self.assertEqual(result.shape, (5, 4))
row = result[result.line == "Camping Equipment"]
self.assertEqual(row.loc[row.index[0], "line"], "Camping Equipment", tgt)
self.assertAlmostEqual(row.loc[row.index[0], "mean_uc"], 89.0, 1, tgt)
self.assertEqual(row.loc[row.index[0], "min_up"], 2.06, tgt)
self.assertEqual(row.loc[row.index[0], "count_pc"], 41, tgt)
def test_sales_twokeys_grouped(self):
pipeline = (
Scan(table=it.go_daily_sales)
>> GroupBy(by=[it["Product number"], it["Retailer code"]])
>> Aggregate(
columns={
"product": it["Product number"],
"retailer": it["Retailer code"],
"mean_quantity": mean(it.Quantity),
"max_usp": max(it["Unit sale price"]),
"count_quantity": count(it.Quantity),
}
)
)
for tgt, datasets in self.tgt2datasets.items():
result = pipeline.transform(datasets)
result = _ensure_pandas(result)
self.assertEqual(result.shape, (5000, 5))
row = result[(result["product"] == 70240) & (result["retailer"] == 1205)]
self.assertEqual(row.loc[row.index[0], "product"], 70240, tgt)
self.assertEqual(row.loc[row.index[0], "retailer"], 1205, tgt)
self.assertAlmostEqual(
row.loc[row.index[0], "mean_quantity"], 48.39, 2, tgt
)
self.assertEqual(row.loc[row.index[0], "max_usp"], 122.70, tgt)
self.assertEqual(row.loc[row.index[0], "count_quantity"], 41, tgt)
self.assertEqual(
result.index.names, ["Product number", "Retailer code"], tgt
)
def test_products_twokeys_grouped(self):
pipeline = (
Scan(table=it.go_products)
>> GroupBy(by=[it["Product line"], it["Product brand"]])
>> Aggregate(
columns={
"sum_uc": sum(it["Unit cost"]),
"max_uc": max(it["Unit cost"]),
"line": first(it["Product line"]),
"brand": first(it["Product brand"]),
}
)
)
for tgt, datasets in self.tgt2datasets.items():
result = pipeline.transform(datasets)
result = _ensure_pandas(result)
self.assertEqual(result.shape, (30, 4))
row = result[
(result.line == "Camping Equipment") & (result.brand == "Star")
]
self.assertEqual(row.loc[row.index[0], "sum_uc"], 1968.19, tgt)
self.assertEqual(row.loc[row.index[0], "max_uc"], 490.00, tgt)
self.assertEqual(row.loc[row.index[0], "line"], "Camping Equipment", tgt)
self.assertEqual(row.loc[row.index[0], "brand"], "Star", tgt)
def test_error_unknown_column(self):
pipeline = (
Scan(table=it.go_daily_sales)
>> GroupBy(by=[it["Product number"]])
>> Aggregate(columns={"mean_quantity": mean(it["Quantity_1"])})
)
with self.assertRaises(KeyError):
_ = pipeline.transform(self.tgt2datasets["pandas"])
def test_error_columns_not_dict(self):
pipeline = (
Scan(table=it.go_daily_sales)
>> GroupBy(by=[it["Product number"]])
>> Aggregate(columns=[mean(it["Quantity_1"])])
)
with self.assertRaises(ValueError):
_ = pipeline.transform(self.tgt2datasets["pandas"])
def test_error_X_not_pandas_or_Spark(self):
trainable = Aggregate(columns={"mean_quantity": mean(it["Quantity_1"])})
with self.assertRaises(ValueError):
_ = trainable.transform(pd.Series([1, 2, 3]))
# Testing join operator for pandas dataframes
class TestJoin(unittest.TestCase):
def test_init(self):
_ = Join(pred=[it.main.train_id == it.info.TrainId], join_type="inner")
# Define pandas dataframes with different structures
@classmethod
def setUpClass(cls):
targets: List[datatype_param_type] = ["pandas", "spark"]
cls.tgt2datasets = {
tgt: {"go_sales": fetch_go_sales_dataset(tgt)} for tgt in targets
}
def add_df(name, df):
cls.tgt2datasets["pandas"][name] = df
cls.tgt2datasets["spark"][name] = pandas2spark(df)
table1 = {
"train_id": [1, 2, 3, 4, 5],
"col1": ["NY", "TX", "CA", "NY", "CA"],
"col2": [0, 1, 1, 0, 1],
}
df1 = add_table_name(pd.DataFrame(data=table1), "main")
add_df("df1", df1)
table2 = {
"TrainId": [1, 2, 3],
"col3": ["USA", "USA", "UK"],
"col4": [100, 100, 200],
}
df2 = add_table_name(pd.DataFrame(data=table2), "info")
add_df("df2", df2)
table3 = {
"tid": [1, 2, 3],
"col5": ["Warm", "Cold", "Warm"],
}
df3 = add_table_name(pd.DataFrame(data=table3), "t1")
add_df("df3", df3)
table4 = {
"TrainId": [1, 2, 3, 4, 5],
"col1": ["NY", "TX", "CA", "NY", "CA"],
"col2": [0, 1, 1, 0, 1],
}
df4 = add_table_name(pd.DataFrame(data=table4), "main")
add_df("df4", df4)
table5 = {
"TrainId": [1, 2, 3],
"col3": ["NY", "NY", "CA"],
"col4": [100, 100, 200],
}
df5 = add_table_name(pd.DataFrame(data=table5), "info")
add_df("df5", df5)
table6 = {
"t_id": [2, 3],
"col6": ["USA", "UK"],
}
df6 = add_table_name(pd.DataFrame(data=table6), "t2")
add_df("df6", df6)
# Multiple elements in predicate with different key column names
def test_join_multiple_inner(self):
trainable = Join(
pred=[it.main.train_id == it.info.TrainId, it.info.TrainId == it.t1.tid],
join_type="inner",
)
for tgt, datasets in self.tgt2datasets.items():
df1, df2, df3 = datasets["df1"], datasets["df2"], datasets["df3"]
transformed_df = trainable.transform([df1, df2, df3])
transformed_df = _ensure_pandas(transformed_df)
transformed_df = transformed_df.sort_values(by="train_id").reset_index(
drop=True
)
self.assertEqual(transformed_df.shape, (3, 8), tgt)
self.assertEqual(transformed_df["col5"][1], "Cold", tgt)
# Multiple elements in predicate with identical key columns names
def test_join_multiple_left(self):
trainable = Join(
pred=[it.main.TrainId == it.info.TrainId, it.info.TrainId == it.t1.tid],
join_type="left",
)
for tgt, datasets in self.tgt2datasets.items():
df4, df2, df3 = datasets["df4"], datasets["df2"], datasets["df3"]
transformed_df = trainable.transform([df4, df2, df3])
transformed_df = _ensure_pandas(transformed_df)
transformed_df = transformed_df.sort_values(by="TrainId").reset_index(
drop=True
)
self.assertEqual(transformed_df.shape, (5, 7), tgt)
self.assertEqual(transformed_df["col3"][2], "UK", tgt)
# Invert one of the join conditions as compared to the test case: test_join_pandas_multiple_left
def test_join_multiple_right(self):
trainable = Join(
pred=[it.main.train_id == it.info.TrainId, it.t1.tid == it.info.TrainId],
join_type="right",
)
for tgt, datasets in self.tgt2datasets.items():
df1, df2, df3 = datasets["df1"], datasets["df2"], datasets["df3"]
transformed_df = trainable.transform([df1, df2, df3])
transformed_df = _ensure_pandas(transformed_df)
transformed_df = transformed_df.sort_values(by="TrainId").reset_index(
drop=True
)
self.assertEqual(transformed_df.shape, (3, 8), tgt)
self.assertEqual(transformed_df["col3"][2], "UK", tgt)
# Composite key join
def test_join_composite(self):
trainable = Join(
pred=[
it.t1.tid == it.info.TrainId,
[it.main.train_id == it.info.TrainId, it.main.col1 == it.info.col3],
],
join_type="left",
)
for tgt, datasets in self.tgt2datasets.items():
df1, df5, df3, df6 = (
datasets["df1"],
datasets["df5"],
datasets["df3"],
datasets["df6"],
)
transformed_df = trainable.transform([df1, df5, df3, df6])
transformed_df = _ensure_pandas(transformed_df)
transformed_df = transformed_df.sort_values(by="train_id").reset_index(
drop=True
)
self.assertEqual(transformed_df.shape, (5, 8), tgt)
self.assertEqual(transformed_df["col3"][2], "CA", tgt)
# Invert one of the join conditions as compared to the test case: test_join_pandas_composite
def test_join_composite1(self):
trainable = Join(
pred=[
[it.main.train_id == it.info.TrainId, it.main.col1 == it.info.col3],
it.t1.tid == it.info.TrainId,
it.t1.tid == it.t2.t_id,
],
join_type="inner",
)
for tgt, datasets in self.tgt2datasets.items():
df1, df5, df3, df6 = (
datasets["df1"],
datasets["df5"],
datasets["df3"],
datasets["df6"],
)
transformed_df = trainable.transform([df1, df5, df3, df6])
transformed_df = _ensure_pandas(transformed_df)
transformed_df = transformed_df.sort_values(by="train_id").reset_index(
drop=True
)
self.assertEqual(transformed_df.shape, (1, 10), tgt)
self.assertEqual(transformed_df["col4"][0], 200, tgt)
# Composite key join having conditions involving more than 2 tables
# This test case execution should throw a ValueError which is handled in the test case itself
def test_join_composite_error(self):
with self.assertRaisesRegex(
ValueError, "info.*main.*inFo.* more than two tables"
):
_ = Join(
pred=[
it.t1.tid == it.info.TrainId,
[it.main.train_id == it.info.TrainId, it.main.col1 == it.inFo.col3],
it.t1.tid == it.t2.t_id,
],
join_type="inner",
)
# Single joining conditions are not chained
# This test case execution should throw a ValueError which is handled in the test case itself
def test_join_single_error1(self):
with self.assertRaisesRegex(ValueError, "t3.*t2.* were used"):
_ = Join(
pred=[
it.t1.tid == it.info.TrainId,
[it.main.train_id == it.info.TrainId, it.main.col1 == it.info.col3],
it.t3.tid == it.t2.t_id,
],
join_type="inner",
)
def test_join_composite_nochain_error(self):
with self.assertRaisesRegex(ValueError, "t3.*t2.* were used"):
_ = Join(
pred=[
it.t1.tid == it.info.TrainId,
[it.main.train_id == it.info.TrainId, it.main.col1 == it.info.col3],
[it.t3.tid == it.t2.t_id, it.t3.TrainId == it.t2.TrainId],
],
join_type="inner",
)
# _ = trainable.transform([self.df1, self.df5, self.df3, self.df6])
# Composite key join having conditions involving more than 2 tables
# This test case execution should throw a ValueError which is handled in the test case itself
def test_join_composite_error2(self):
with self.assertRaisesRegex(
ValueError, "main.*info.*Main.*inFo.*more than two"
):
_ = Join(
pred=[
it.t1.tid == it.info.TrainId,
[it.main.train_id == it.info.TrainId, it.Main.col1 == it.inFo.col3],
it.t1.tid == it.t2.t_id,
],
join_type="inner",
)
# A table to be joined not present in input X
# This test case execution should throw a ValueError which is handled in the test case itself
def test_join_composite_error3(self):
for _tgt, datasets in self.tgt2datasets.items():
df5, df3 = datasets["df5"], datasets["df3"]
with self.assertRaises(ValueError):
trainable = Join(
pred=[
it.t1.tid == it.info.TrainId,
[
it.main.train_id == it.info.TrainId,
it.main.col1 == it.info.col3,
],
],
join_type="inner",
)
_ = trainable.transform([df5, df3])
# TestCase 1: Go_Sales dataset with different forms of predicate (join conditions)
def test_join_go_sales1(self):
for tgt, datasets in self.tgt2datasets.items():
go_sales = datasets["go_sales"]
trainable = Join(
pred=[
it.go_daily_sales["Retailer code"]
== it["go_retailers"]["Retailer code"]
],
join_type="inner",
)
transformed_df = trainable.transform(go_sales)
order = ["Retailer code", "Product number", "Date"]
if tgt == "pandas":
transformed_df = transformed_df.sort_values(by=order).reset_index(
drop=True
)
self.assertEqual(transformed_df.shape, (149257, 10), tgt)
self.assertEqual(transformed_df["Country"][4], "France", tgt)
elif tgt == "spark":
self.assertEqual(len(get_columns(transformed_df)), 10, tgt)
self.assertEqual(transformed_df.count(), 149257, tgt)
# transformed_df = transformed_df.orderBy(order).collect()
# self.assertEqual(transformed_df[4]["Country"], "France", tgt)
else:
assert False
# TestCase 2: Go_Sales dataset throws error because of duplicate non-key columns
def test_join_go_sales2(self):
for _tgt, datasets in self.tgt2datasets.items():
go_sales = datasets["go_sales"]
trainable = Join(
pred=[
[
it["go_1k"]["Retailer code"]
== it.go_daily_sales["Retailer code"],
it.go_1k["Product number"]
== it["go_daily_sales"]["Product number"],
]
],
join_type="left",
)
with self.assertRaises(ValueError):
_ = trainable.transform(go_sales)
def test_join_index(self):
tgt = "spark"
trainable = Join(
pred=[it.info.idx == it.main.idx, it.info.idx == it.t1.idx],
join_type="inner",
)
df1 = _set_index_name(self.tgt2datasets["pandas"]["df1"], "idx")
df2 = _set_index_name(self.tgt2datasets["pandas"]["df2"], "idx")
df3 = _set_index_name(self.tgt2datasets["pandas"]["df3"], "idx")
df1 = pandas2spark(df1)
df2 = pandas2spark(df2)
df3 = pandas2spark(df3)
transformed_df = trainable.transform([df1, df2, df3])
transformed_df = _ensure_pandas(transformed_df)
self.assertEqual(transformed_df.index.name, "idx", tgt)
transformed_df = transformed_df.sort_values(by="TrainId").reset_index(drop=True)
self.assertEqual(transformed_df.shape, (3, 8), tgt)
self.assertEqual(transformed_df["col5"][1], "Cold", tgt)
def test_join_index_multiple_names(self):
trainable = Join(
pred=[it.info.TrainId == it.main.train_id, it.info.TrainId == it.t1.tid],
join_type="inner",
)
df1 = _set_index(self.tgt2datasets["pandas"]["df1"], "train_id")
df2 = _set_index(self.tgt2datasets["pandas"]["df2"], "TrainId")
df3 = _set_index(self.tgt2datasets["pandas"]["df3"], "tid")
df1 = pandas2spark(df1)
df2 = pandas2spark(df2)
df3 = pandas2spark(df3)
transformed_df = trainable.transform([df1, df2, df3])
transformed_df = _ensure_pandas(transformed_df)
transformed_df = transformed_df.sort_values(by="TrainId").reset_index(drop=True)
self.assertEqual(transformed_df.shape, (3, 6))
self.assertEqual(transformed_df["col5"][1], "Cold")
class TestMap(unittest.TestCase):
@classmethod
def setUpClass(cls):
targets: List[datatype_param_type] = ["pandas", "spark"]
cls.tgt2datasets = {
tgt: {"go_sales": fetch_go_sales_dataset(tgt)} for tgt in targets
}
def add_df(name, df):
cls.tgt2datasets["pandas"][name] = df
cls.tgt2datasets["spark"][name] = pandas2spark(df)
df = pd.DataFrame(
{
"gender": ["m", "f", "m", "m", "f"],
"state": ["NY", "NY", "CA", "NY", "CA"],
"status": [0, 1, 1, 0, 1],
}
)
add_df("df", df)
df_date = pd.DataFrame(
{"date_column": ["2016-05-28", "2016-06-27", "2016-07-26"]}
)
add_df("df_date", df_date)
df_date_alt = pd.DataFrame(
{"date_column": ["28/05/2016", "27/06/2016", "26/07/2016"]}
)
add_df("df_date_alt", df_date_alt)
df_date_time = pd.DataFrame(
{
"date_column": [
"2016-01-01 15:16:45",
"2016-06-28 12:18:51",
"2016-07-28 01:01:01",
]
}
)
add_df("df_date_time", df_date_time)
df_num = pd.DataFrame(
{
"height": [3, 4, 6, 3, 5],
"weight": [30, 50, 170, 40, 130],
"status": [0, 1, 1, 0, 1],
}
)
add_df("df_num", df_num)
df_month = pd.DataFrame(
{
"month": ["jan", "feb", "mar", "may", "aug"],
}
)
add_df("df_month", df_month)
def test_init(self):
gender_map = {"m": "Male", "f": "Female"}
state_map = {"NY": "New York", "CA": "California"}
_ = Map(columns=[replace(it.gender, gender_map), replace(it.state, state_map)])
# The rename column functionality implemented as part of identity function for Map operator
# does not support explicit identity calls for now.
def test_transform_identity_map(self):
trainable = Map(
columns={
"new_gender": it.gender,
"new_status": it["status"],
}
)
for tgt, datasets in self.tgt2datasets.items():
df = datasets["df"]
trained = trainable.fit(df)
transformed_df = trained.transform(df)
df, transformed_df = _ensure_pandas(df), _ensure_pandas(transformed_df)
self.assertEqual(df["gender"][0], transformed_df["new_gender"][0], tgt)
self.assertEqual(df["status"][3], transformed_df["new_status"][3], tgt)
self.assertEqual(len(transformed_df.columns), 2, tgt)
def test_transform_identity_map_implicit_name(self):
trainable = Map(columns=[identity(it.gender), identity(it["status"])])
for tgt, datasets in self.tgt2datasets.items():
df = datasets["df"]
trained = trainable.fit(df)
transformed_df = trained.transform(df)
df, transformed_df = _ensure_pandas(df), _ensure_pandas(transformed_df)
self.assertEqual(df["gender"][0], transformed_df["gender"][0], tgt)
self.assertEqual(df["status"][3], transformed_df["status"][3], tgt)
self.assertEqual(len(transformed_df.columns), 2, tgt)
def test_transform_identity_map_passthrough(self):
trainable = Map(
columns={
"new_gender": it.gender,
"new_status": it["status"],
},
remainder="passthrough",
)
for _tgt, datasets in self.tgt2datasets.items():
df = datasets["df"]
trained = trainable.fit(df)
transformed_df = trained.transform(df)
df, transformed_df = _ensure_pandas(df), _ensure_pandas(transformed_df)
self.assertEqual(df["gender"][0], transformed_df["new_gender"][0])
self.assertEqual(df["status"][3], transformed_df["new_status"][3])
self.assertEqual(df["state"][3], transformed_df["state"][3])
self.assertEqual(len(transformed_df.columns), 3)
def test_transform_identity_map_error(self):
for _tgt, datasets in self.tgt2datasets.items():
df = datasets["df"]
with self.assertRaises(ValueError):
trainable = Map(columns={" ": it.gender})
trained = trainable.fit(df)
_ = trained.transform(df)
with self.assertRaises(ValueError):
trainable = Map(columns={"new_name": it[" "]})
trained = trainable.fit(df)
_ = trained.transform(df)
with self.assertRaises(ValueError):
trainable = Map(columns=[it.gender])
trained = trainable.fit(df)
_ = trained.transform(df)
with self.assertRaises(ValueError):
trainable = Map(columns=[it.dummy])
trained = trainable.fit(df)
_ = trained.transform(df)
def test_transform_replace_list_and_remainder(self):
gender_map = {"m": "Male", "f": "Female"}
state_map = {"NY": "New York", "CA": "California"}
trainable = Map(
columns=[replace(it.gender, gender_map), replace(it.state, state_map)],
remainder="passthrough",
)
for _tgt, datasets in self.tgt2datasets.items():
df = datasets["df"]
trained = trainable.fit(df)
transformed_df = trained.transform(df)
df, transformed_df = _ensure_pandas(df), _ensure_pandas(transformed_df)
self.assertEqual(transformed_df.shape, (5, 3))
self.assertEqual(transformed_df["gender"][0], "Male")
self.assertEqual(transformed_df["state"][0], "New York")
self.assertEqual(transformed_df["status"][0], 0)
def test_transform_replace_list(self):
gender_map = {"m": "Male", "f": "Female"}
state_map = {"NY": "New York", "CA": "California"}
trainable = Map(
columns=[replace(it.gender, gender_map), replace(it.state, state_map)]
)
for _tgt, datasets in self.tgt2datasets.items():
df = datasets["df"]
trained = trainable.fit(df)
transformed_df = trained.transform(df)
df, transformed_df = _ensure_pandas(df), _ensure_pandas(transformed_df)
self.assertEqual(transformed_df.shape, (5, 2))
self.assertEqual(transformed_df["gender"][0], "Male")
self.assertEqual(transformed_df["state"][0], "New York")
def test_transform_replace_map(self):
gender_map = {"m": "Male", "f": "Female"}
state_map = {"NY": "New York", "CA": "California"}
trainable = Map(
columns={
"new_gender": replace(it.gender, gender_map),
"new_state": replace(it.state, state_map),
}
)
for _tgt, datasets in self.tgt2datasets.items():
df = datasets["df"]
trained = trainable.fit(df)
transformed_df = trained.transform(df)
df, transformed_df = _ensure_pandas(df), _ensure_pandas(transformed_df)
self.assertEqual(transformed_df.shape, (5, 2))
self.assertEqual(transformed_df["new_gender"][0], "Male")
self.assertEqual(transformed_df["new_state"][0], "New York")
def test_transform_dom_list(self):
trainable = Map(columns=[day_of_month(it.date_column)])
for _tgt, datasets in self.tgt2datasets.items():
df = datasets["df_date"]
trained = trainable.fit(df)
transformed_df = trained.transform(df)
df, transformed_df = _ensure_pandas(df), _ensure_pandas(transformed_df)
self.assertEqual(transformed_df["date_column"][0], 28)
self.assertEqual(transformed_df["date_column"][1], 27)
self.assertEqual(transformed_df["date_column"][2], 26)
def test_transform_dom_fmt_list(self):
for tgt, datasets in self.tgt2datasets.items():
if tgt == "pandas":
trainable = Map(columns=[day_of_month(it.date_column, "%Y-%m-%d")])
elif tgt == "spark":
trainable = Map(columns=[day_of_month(it.date_column, "y-M-d")])
else:
assert False
df = datasets["df_date"]
trained = trainable.fit(df)
transformed_df = trained.transform(df)
df, transformed_df = _ensure_pandas(df), _ensure_pandas(transformed_df)
self.assertEqual(transformed_df["date_column"][0], 28)
self.assertEqual(transformed_df["date_column"][1], 27)
self.assertEqual(transformed_df["date_column"][2], 26)
def test_transform_dom_fmt_map(self):
for tgt, datasets in self.tgt2datasets.items():
if tgt == "pandas":
trainable = Map(
columns={"dom": day_of_month(it.date_column, "%Y-%m-%d")}
)
elif tgt == "spark":
trainable = Map(columns={"dom": day_of_month(it.date_column, "y-M-d")})
else:
assert False
df = datasets["df_date"]
trained = trainable.fit(df)
transformed_df = trained.transform(df)
df, transformed_df = _ensure_pandas(df), _ensure_pandas(transformed_df)
self.assertEqual(transformed_df.shape, (3, 1))
self.assertEqual(transformed_df["dom"][0], 28)
self.assertEqual(transformed_df["dom"][1], 27)
self.assertEqual(transformed_df["dom"][2], 26)
def test_transform_dow_list(self):
trainable = Map(columns=[day_of_week(it.date_column)])
for tgt, datasets in self.tgt2datasets.items():
# Spark and Pandas have a different semantics for `day_of_week`
df = datasets["df_date"]
trained = trainable.fit(df)
transformed_df = trained.transform(df)
if tgt == "pandas":
self.assertEqual(transformed_df["date_column"][0], 5)
self.assertEqual(transformed_df["date_column"][1], 0)
self.assertEqual(transformed_df["date_column"][2], 1)
elif tgt == "spark":
df, transformed_df = _ensure_pandas(df), _ensure_pandas(transformed_df)
self.assertEqual(transformed_df["date_column"][0], 7)
self.assertEqual(transformed_df["date_column"][1], 2)
self.assertEqual(transformed_df["date_column"][2], 3)
else:
assert False
def test_transform_dow_fmt_list(self):
for tgt, datasets in self.tgt2datasets.items():
if tgt == "pandas":
trainable = Map(columns=[day_of_week(it.date_column, "%Y-%m-%d")])
df = datasets["df_date"]
trained = trainable.fit(df)
transformed_df = trained.transform(df)
self.assertEqual(transformed_df.shape, (3, 1))
self.assertEqual(transformed_df["date_column"][0], 5)
self.assertEqual(transformed_df["date_column"][1], 0)
self.assertEqual(transformed_df["date_column"][2], 1)
elif tgt == "spark":
trainable = Map(columns=[day_of_week(it.date_column, "y-M-d")])
df = datasets["df_date"]
trained = trainable.fit(df)
transformed_df = trained.transform(df)
df, transformed_df = _ensure_pandas(df), _ensure_pandas(transformed_df)
self.assertEqual(transformed_df.shape, (3, 1))
self.assertEqual(transformed_df["date_column"][0], 7)
self.assertEqual(transformed_df["date_column"][1], 2)
self.assertEqual(transformed_df["date_column"][2], 3)
else:
assert False
def test_transform_dow_fmt_map(self):
for tgt, datasets in self.tgt2datasets.items():
if tgt == "pandas":
trainable = Map(
columns={"dow": day_of_week(it.date_column, "%Y-%m-%d")}
)
df = datasets["df_date"]
trained = trainable.fit(df)
transformed_df = trained.transform(df)
df, transformed_df = _ensure_pandas(df), _ensure_pandas(transformed_df)
self.assertEqual(transformed_df.shape, (3, 1))
self.assertEqual(transformed_df["dow"][0], 5)
self.assertEqual(transformed_df["dow"][1], 0)
self.assertEqual(transformed_df["dow"][2], 1)
elif tgt == "spark":
trainable = Map(columns={"dow": day_of_week(it.date_column, "y-M-d")})
df = datasets["df_date"]
trained = trainable.fit(df)
transformed_df = trained.transform(df)
df, transformed_df = _ensure_pandas(df), _ensure_pandas(transformed_df)
self.assertEqual(transformed_df.shape, (3, 1))
self.assertEqual(transformed_df["dow"][0], 7)
self.assertEqual(transformed_df["dow"][1], 2)
self.assertEqual(transformed_df["dow"][2], 3)
else:
assert False
def test_transform_doy_list(self):
trainable = Map(columns=[day_of_year(it.date_column)])
for _tgt, datasets in self.tgt2datasets.items():
df = datasets["df_date"]
trained = trainable.fit(df)
transformed_df = trained.transform(df)
df, transformed_df = _ensure_pandas(df), _ensure_pandas(transformed_df)
self.assertEqual(transformed_df["date_column"][0], 149)
self.assertEqual(transformed_df["date_column"][1], 179)
self.assertEqual(transformed_df["date_column"][2], 208)
def test_transform_doy_fmt_list(self):
for tgt, datasets in self.tgt2datasets.items():
if tgt == "pandas":
trainable = Map(columns=[day_of_year(it.date_column, "%Y-%m-%d")])
elif tgt == "spark":
trainable = Map(columns=[day_of_year(it.date_column, "y-M-d")])
else:
assert False
df = datasets["df_date"]
trained = trainable.fit(df)
transformed_df = trained.transform(df)
df, transformed_df = _ensure_pandas(df), _ensure_pandas(transformed_df)
self.assertEqual(transformed_df["date_column"][0], 149)
self.assertEqual(transformed_df["date_column"][1], 179)
self.assertEqual(transformed_df["date_column"][2], 208)
def test_transform_doy_fmt_map(self):
for tgt, datasets in self.tgt2datasets.items():
if tgt == "pandas":
trainable = Map(
columns={"doy": day_of_year(it.date_column, "%Y-%m-%d")}
)
elif tgt == "spark":
trainable = Map(columns={"doy": day_of_year(it.date_column, "y-M-d")})
else:
assert False
df = datasets["df_date"]
trained = trainable.fit(df)
transformed_df = trained.transform(df)
df, transformed_df = _ensure_pandas(df), _ensure_pandas(transformed_df)
self.assertEqual(transformed_df.shape, (3, 1))
self.assertEqual(transformed_df["doy"][0], 149)
self.assertEqual(transformed_df["doy"][1], 179)
self.assertEqual(transformed_df["doy"][2], 208)
def test_transform_hour_list(self):
trainable = Map(columns=[hour(it.date_column)])
for _tgt, datasets in self.tgt2datasets.items():
df = datasets["df_date_time"]
trained = trainable.fit(df)
transformed_df = trained.transform(df)
df, transformed_df = _ensure_pandas(df), _ensure_pandas(transformed_df)
self.assertEqual(transformed_df["date_column"][0], 15)
self.assertEqual(transformed_df["date_column"][1], 12)
self.assertEqual(transformed_df["date_column"][2], 1)
def test_transform_hour_fmt_list(self):
for tgt, datasets in self.tgt2datasets.items():
if tgt == "pandas":
trainable = Map(columns=[hour(it.date_column, "%Y-%m-%d %H:%M:%S")])
elif tgt == "spark":
trainable = Map(columns=[hour(it.date_column, "y-M-d HH:mm:ss")])
else:
assert False
df = datasets["df_date_time"]
trained = trainable.fit(df)
transformed_df = trained.transform(df)
df, transformed_df = _ensure_pandas(df), _ensure_pandas(transformed_df)
self.assertEqual(transformed_df["date_column"][0], 15)
self.assertEqual(transformed_df["date_column"][1], 12)
self.assertEqual(transformed_df["date_column"][2], 1)
def test_transform_hour_fmt_map(self):
for tgt, datasets in self.tgt2datasets.items():
if tgt == "pandas":
trainable = Map(
columns={"hour": hour(it.date_column, "%Y-%m-%d %H:%M:%S")}
)
elif tgt == "spark":
trainable = Map(
columns={"hour": hour(it.date_column, "y-M-d HH:mm:ss")}
)
else:
assert False
df = datasets["df_date_time"]
trained = trainable.fit(df)
transformed_df = trained.transform(df)
df, transformed_df = _ensure_pandas(df), _ensure_pandas(transformed_df)
self.assertEqual(transformed_df.shape, (3, 1))
self.assertEqual(transformed_df["hour"][0], 15)
self.assertEqual(transformed_df["hour"][1], 12)
self.assertEqual(transformed_df["hour"][2], 1)
def test_transform_minute_list(self):
trainable = Map(columns=[minute(it.date_column)])
for _tgt, datasets in self.tgt2datasets.items():
df = datasets["df_date_time"]
trained = trainable.fit(df)
transformed_df = trained.transform(df)
df, transformed_df = _ensure_pandas(df), _ensure_pandas(transformed_df)
self.assertEqual(transformed_df["date_column"][0], 16)
self.assertEqual(transformed_df["date_column"][1], 18)
self.assertEqual(transformed_df["date_column"][2], 1)
def test_transform_minute_fmt_list(self):
for tgt, datasets in self.tgt2datasets.items():
if tgt == "pandas":
trainable = Map(columns=[minute(it.date_column, "%Y-%m-%d %H:%M:%S")])
elif tgt == "spark":
trainable = Map(columns=[minute(it.date_column, "y-M-d HH:mm:ss")])
else:
assert False
df = datasets["df_date_time"]
trained = trainable.fit(df)
transformed_df = trained.transform(df)
df, transformed_df = _ensure_pandas(df), _ensure_pandas(transformed_df)
self.assertEqual(transformed_df["date_column"][0], 16)
self.assertEqual(transformed_df["date_column"][1], 18)
self.assertEqual(transformed_df["date_column"][2], 1)
def test_transform_minute_fmt_map(self):
for tgt, datasets in self.tgt2datasets.items():
if tgt == "pandas":
trainable = Map(
columns={"minute": minute(it.date_column, "%Y-%m-%d %H:%M:%S")}
)
elif tgt == "spark":
trainable = Map(
columns={"minute": minute(it.date_column, "y-M-d HH:mm:ss")}
)
else:
assert False
df = datasets["df_date_time"]
trained = trainable.fit(df)
transformed_df = trained.transform(df)
df, transformed_df = _ensure_pandas(df), _ensure_pandas(transformed_df)
self.assertEqual(transformed_df.shape, (3, 1))
self.assertEqual(transformed_df["minute"][0], 16)
self.assertEqual(transformed_df["minute"][1], 18)
self.assertEqual(transformed_df["minute"][2], 1)
def test_transform_month_list(self):
trainable = Map(columns=[month(it.date_column)])
for _tgt, datasets in self.tgt2datasets.items():
df = datasets["df_date_time"]
trained = trainable.fit(df)
transformed_df = trained.transform(df)
df, transformed_df = _ensure_pandas(df), _ensure_pandas(transformed_df)
self.assertEqual(transformed_df["date_column"][0], 1)
self.assertEqual(transformed_df["date_column"][1], 6)
self.assertEqual(transformed_df["date_column"][2], 7)
def test_transform_month_fmt_list(self):
for tgt, datasets in self.tgt2datasets.items():
if tgt == "pandas":
trainable = Map(columns=[month(it.date_column, "%Y-%m-%d %H:%M:%S")])
elif tgt == "spark":
trainable = Map(columns=[month(it.date_column, "y-M-d HH:mm:ss")])
else:
assert False
df = datasets["df_date_time"]
trained = trainable.fit(df)
transformed_df = trained.transform(df)
df, transformed_df = _ensure_pandas(df), _ensure_pandas(transformed_df)
self.assertEqual(transformed_df["date_column"][0], 1)
self.assertEqual(transformed_df["date_column"][1], 6)
self.assertEqual(transformed_df["date_column"][2], 7)
def test_transform_month_fmt_map(self):
for tgt, datasets in self.tgt2datasets.items():
if tgt == "pandas":
trainable = Map(
columns={"month": month(it.date_column, "%Y-%m-%d %H:%M:%S")}
)
elif tgt == "spark":
trainable = Map(
columns={"month": month(it.date_column, "y-M-d HH:mm:ss")}
)
else:
assert False
df = datasets["df_date_time"]
trained = trainable.fit(df)
transformed_df = trained.transform(df)
df, transformed_df = _ensure_pandas(df), _ensure_pandas(transformed_df)
self.assertEqual(transformed_df.shape, (3, 1))
self.assertEqual(transformed_df["month"][0], 1)
self.assertEqual(transformed_df["month"][1], 6)
self.assertEqual(transformed_df["month"][2], 7)
def test_not_expression(self):
with EnableSchemaValidation():
with self.assertRaises(jsonschema.ValidationError):
_ = Map(columns=[123, "hello"])
def test_pandas_with_hyperopt(self):
X, y = load_iris(return_X_y=True)
gender_map = {"m": "Male", "f": "Female"}
state_map = {"NY": "New York", "CA": "California"}
map_replace = Map(
columns=[replace(it.gender, gender_map), replace(it.state, state_map)],
remainder="drop",
)
pipeline = (
Relational(
operator=(Scan(table=it.main) & Scan(table=it.delay)) >> map_replace
)
>> LogisticRegression()
)
opt = Hyperopt(estimator=pipeline, cv=3, max_evals=5)
trained = opt.fit(X, y)
_ = trained
def test_string_indexer_map(self):
trainable = Map(columns={"c": string_indexer(it.date_column)})
for _tgt, datasets in self.tgt2datasets.items():
df = datasets["df_date_time"]
trained = trainable.fit(df)
with self.assertRaises(ValueError):
_ = trained.transform(df)
def test_pands_with_hyperopt2(self):
wrap_imported_operators()
scan = Scan(table=it["main"])
scan_0 = Scan(table=it["customers"])
join = Join(
pred=[
(
it["main"]["group_customer_id"]
== it["customers"]["group_customer_id"]
)
]
)
map_op = Map(
columns={
"[main](group_customer_id)[customers]|number_children|identity": it[
"number_children"
],
"[main](group_customer_id)[customers]|name|identity": it["name"],
"[main](group_customer_id)[customers]|income|identity": it["income"],
"[main](group_customer_id)[customers]|address|identity": it["address"],
"[main](group_customer_id)[customers]|age|identity": it["age"],
},
remainder="drop",
)
pipeline_4 = join >> map_op
scan_1 = Scan(table=it["purchase"])
join_0 = Join(
pred=[(it["main"]["group_id"] == it["purchase"]["group_id"])],
join_limit=50.0,
)
aggregate = Aggregate(
columns={
"[main](group_id)[purchase]|price|variance": variance(it["price"]),
"[main](group_id)[purchase]|time|sum": sum(it["time"]),
"[main](group_id)[purchase]|time|mean": mean(it["time"]),
"[main](group_id)[purchase]|time|min": min(it["time"]),
"[main](group_id)[purchase]|price|sum": sum(it["price"]),
"[main](group_id)[purchase]|price|count": count(it["price"]),
"[main](group_id)[purchase]|price|mean": mean(it["price"]),
"[main](group_id)[purchase]|price|min": min(it["price"]),
"[main](group_id)[purchase]|price|max": max(it["price"]),
"[main](group_id)[purchase]|time|max": max(it["time"]),
"[main](group_id)[purchase]|time|variance": variance(it["time"]),
},
group_by=it["row_id"],
)
pipeline_5 = join_0 >> aggregate
map_0 = Map(
columns={
"[main]|group_customer_id|identity": it["group_customer_id"],
"[main]|transaction_id|identity": it["transaction_id"],
"[main]|group_id|identity": it["group_id"],
"[main]|comments|identity": it["comments"],
"[main]|id|identity": it["id"],
"prefix_0_id": it["prefix_0_id"],
"next_purchase": it["next_purchase"],
"[main]|time|identity": it["time"],
},
remainder="drop",
)
scan_2 = Scan(table=it["transactions"])
scan_3 = Scan(table=it["products"])
join_1 = Join(
pred=[
(it["main"]["transaction_id"] == it["transactions"]["transaction_id"]),
(it["transactions"]["product_id"] == it["products"]["product_id"]),
]
)
map_1 = Map(
columns={
"[main](transaction_id)[transactions](product_id)[products]|price|identity": it[
"price"
],
"[main](transaction_id)[transactions](product_id)[products]|type|identity": it[
"type"
],
},
remainder="drop",
)
pipeline_6 = join_1 >> map_1
join_2 = Join(
pred=[
(it["main"]["transaction_id"] == it["transactions"]["transaction_id"])
]
)
map_2 = Map(
columns={
"[main](transaction_id)[transactions]|description|identity": it[
"description"
],
"[main](transaction_id)[transactions]|product_id|identity": it[
"product_id"
],
},
remainder="drop",
)
pipeline_7 = join_2 >> map_2
map_3 = Map(
columns=[
string_indexer(it["[main]|comments|identity"]),
string_indexer(
it["[main](transaction_id)[transactions]|description|identity"]
),
string_indexer(
it[
"[main](transaction_id)[transactions](product_id)[products]|type|identity"
]
),
string_indexer(
it["[main](group_customer_id)[customers]|name|identity"]
),
string_indexer(
it["[main](group_customer_id)[customers]|address|identity"]
),
]
)
pipeline_8 = ConcatFeatures() >> map_3
relational = Relational(
operator=lale.operators.make_pipeline_graph(
steps=[
scan,
scan_0,
pipeline_4,
scan_1,
pipeline_5,
map_0,
scan_2,
scan_3,
pipeline_6,
pipeline_7,
pipeline_8,
],
edges=[
(scan, pipeline_4),
(scan, pipeline_5),
(scan, map_0),
(scan, pipeline_6),
(scan, pipeline_7),
(scan_0, pipeline_4),
(pipeline_4, pipeline_8),
(scan_1, pipeline_5),
(pipeline_5, pipeline_8),
(map_0, pipeline_8),
(scan_2, pipeline_6),
(scan_2, pipeline_7),
(scan_3, pipeline_6),
(pipeline_6, pipeline_8),
(pipeline_7, pipeline_8),
],
)
)
pipeline = relational >> (KNeighborsClassifier | LogisticRegression)
X, y = load_iris(return_X_y=True)
opt = Hyperopt(estimator=pipeline, max_evals=2)
opt.fit(X, y)
def test_transform_ratio_map(self):
trainable = Map(columns={"ratio_h_w": it.height / it.weight})
for _tgt, datasets in self.tgt2datasets.items():
df = datasets["df_num"]
trained = trainable.fit(df)
transformed_df = trained.transform(df)
df, transformed_df = _ensure_pandas(df), _ensure_pandas(transformed_df)
self.assertEqual(transformed_df.shape, (5, 1))
self.assertEqual(transformed_df["ratio_h_w"][0], 0.1)
def test_transform_ratio_map_subscript(self):
trainable = Map(columns={"ratio_h_w": it["height"] / it.weight})
for _tgt, datasets in self.tgt2datasets.items():
df = datasets["df_num"]
trained = trainable.fit(df)
transformed_df = trained.transform(df)
df, transformed_df = _ensure_pandas(df), _ensure_pandas(transformed_df)
self.assertEqual(transformed_df.shape, (5, 1))
self.assertEqual(transformed_df["ratio_h_w"][0], 0.1)
def test_transform_ratio_map_list(self):
trainable = Map(columns=[it.height / it.weight])
for _tgt, datasets in self.tgt2datasets.items():
df = datasets["df_num"]
trained = trainable.fit(df)
with self.assertRaises(ValueError):
_ = trained.transform(df)
def test_transform_subtract_map(self):
trainable = Map(columns={"subtract_h_w": it.height - it.weight})
for _tgt, datasets in self.tgt2datasets.items():
df = datasets["df_num"]
trained = trainable.fit(df)
transformed_df = trained.transform(df)
df, transformed_df = _ensure_pandas(df), _ensure_pandas(transformed_df)
self.assertEqual(transformed_df.shape, (5, 1))
self.assertEqual(transformed_df["subtract_h_w"][0], -27)
def test_transform_subtract_map_subscript(self):
trainable = Map(columns={"subtract_h_w": it["height"] - it.weight})
for _tgt, datasets in self.tgt2datasets.items():
df = datasets["df_num"]
trained = trainable.fit(df)
transformed_df = trained.transform(df)
df, transformed_df = _ensure_pandas(df), _ensure_pandas(transformed_df)
self.assertEqual(transformed_df.shape, (5, 1))
self.assertEqual(transformed_df["subtract_h_w"][0], -27)
def test_transform_subtract_map_list(self):
trainable = Map(columns=[it.height - it.weight])
for _tgt, datasets in self.tgt2datasets.items():
df = datasets["df_num"]
trained = trainable.fit(df)
with self.assertRaises(ValueError):
_ = trained.transform(df)
def test_transform_binops(self):
trainable = Map(
columns={
"add_h_w": it["height"] + it.weight,
"add_h_2": it["height"] + 2,
"sub_h_w": it["height"] - it.weight,
"sub_h_2": it["height"] - 2,
"mul_h_w": it["height"] * it.weight,
"mul_h_2": it["height"] * 2,
"div_h_w": it["height"] / it.weight,
"div_h_2": it["height"] / 2,
"floor_div_h_w": it["height"] // it.weight,
"floor_div_h_2": it["height"] // 2,
"mod_h_w": it["height"] % it.weight,
"mod_h_2": it["height"] % 2,
"pow_h_w": it["height"] ** it.weight,
"pow_h_2": it["height"] ** 2,
}
)
for tgt, datasets in self.tgt2datasets.items():
df = datasets["df_num"]
trained = trainable.fit(df)
transformed_df = trained.transform(df)
df, transformed_df = _ensure_pandas(df), _ensure_pandas(transformed_df)
self.assertEqual(transformed_df.shape, (5, 14))
self.assertEqual(
transformed_df["add_h_w"][1], df["height"][1] + df["weight"][1]
)
self.assertEqual(transformed_df["add_h_2"][1], df["height"][1] + 2)
self.assertEqual(
transformed_df["sub_h_w"][1], df["height"][1] - df["weight"][1]
)
self.assertEqual(transformed_df["sub_h_2"][1], df["height"][1] - 2)
self.assertEqual(
transformed_df["mul_h_w"][1], df["height"][1] * df["weight"][1]
)
self.assertEqual(transformed_df["mul_h_2"][1], df["height"][1] * 2)
self.assertEqual(
transformed_df["div_h_w"][1], df["height"][1] / df["weight"][1]
)
self.assertEqual(transformed_df["div_h_2"][1], df["height"][1] / 2)
self.assertEqual(
transformed_df["floor_div_h_w"][1], df["height"][1] // df["weight"][1]
)
self.assertEqual(transformed_df["floor_div_h_2"][1], df["height"][1] // 2)
self.assertEqual(
transformed_df["mod_h_w"][1], df["height"][1] % df["weight"][1]
)
self.assertEqual(transformed_df["mod_h_2"][1], df["height"][1] % 2)
if tgt == "pandas":
self.assertEqual(
transformed_df["pow_h_w"][1], df["height"][1] ** df["weight"][1]
)
elif tgt == "spark":
# Spark and Pandas have a different semantics for large numbers
self.assertEqual(transformed_df["pow_h_w"][1], 4**50)
else:
assert False
self.assertEqual(transformed_df["pow_h_2"][1], df["height"][1] ** 2)
def test_transform_arithmetic_expression(self):
trainable = Map(columns={"expr": (it["height"] + it.weight * 10) / 2})
for _tgt, datasets in self.tgt2datasets.items():
df = datasets["df_num"]
trained = trainable.fit(df)
transformed_df = trained.transform(df)
df, transformed_df = _ensure_pandas(df), _ensure_pandas(transformed_df)
self.assertEqual(transformed_df.shape, (5, 1))
self.assertEqual(
transformed_df["expr"][2], (df["height"][2] + df["weight"][2] * 10) / 2
)
def test_transform_nested_expressions(self):
month_map = {
"jan": "2021-01-01",
"feb": "2021-02-01",
"mar": "2021-03-01",
"arp": "2021-04-01",
"may": "2021-05-01",
"jun": "2021-06-01",
"jul": "2021-07-01",
"aug": "2021-08-01",
"sep": "2021-09-01",
"oct": "2021-10-01",
"nov": "2021-11-01",
"dec": "2021-12-01",
}
for tgt, datasets in self.tgt2datasets.items():
if tgt == "pandas":
trainable = Map(
columns={
"date": replace(it.month, month_map),
"month_id": month(replace(it.month, month_map), "%Y-%m-%d"),
"next_month_id": identity(
month(replace(it.month, month_map), "%Y-%m-%d") % 12 + 1
),
}
)
elif tgt == "spark":
trainable = Map(
columns={
"date": replace(it.month, month_map),
"month_id": month(replace(it.month, month_map), "y-M-d"),
"next_month_id": identity(
month(replace(it.month, month_map), "y-M-d") % 12 + 1
),
}
)
else:
assert False
df = datasets["df_month"]
trained = trainable.fit(df)
transformed_df = trained.transform(df)
df, transformed_df = _ensure_pandas(df), _ensure_pandas(transformed_df)
self.assertEqual(transformed_df["date"][0], "2021-01-01")
self.assertEqual(transformed_df["date"][1], "2021-02-01")
self.assertEqual(transformed_df["month_id"][2], 3)
self.assertEqual(transformed_df["month_id"][3], 5)
self.assertEqual(transformed_df["next_month_id"][0], 2)
self.assertEqual(transformed_df["next_month_id"][3], 6)
self.assertEqual(transformed_df["next_month_id"][4], 9)
def test_replace_unknown_identity(self):
pipeline = Scan(table=it.go_products) >> Map(
columns={
"prod": it["Product number"],
"line": replace(
it["Product line"],
{"Camping Equipment": "C", "Personal Accessories": "P"},
),
}
)
for tgt, datasets in self.tgt2datasets.items():
result = pipeline.transform(datasets["go_sales"])
result = _ensure_pandas(result)
self.assertEqual(result.shape, (274, 2))
self.assertEqual(result.loc[0, "prod"], 1110, tgt)
self.assertEqual(result.loc[0, "line"], "C", tgt)
self.assertEqual(result.loc[117, "prod"], 101110, tgt)
self.assertEqual(result.loc[117, "line"], "Golf Equipment", tgt)
self.assertEqual(result.loc[273, "prod"], 154150, tgt)
self.assertEqual(result.loc[273, "line"], "P", tgt)
def test_replace_unknown_encoded(self):
pipeline = Scan(table=it.go_products) >> Map(
columns={
"prod": it["Product number"],
"line": replace(
it["Product line"],
{"Camping Equipment": "C", "Personal Accessories": "P"},
handle_unknown="use_encoded_value",
unknown_value="U",
),
}
)
for tgt, datasets in self.tgt2datasets.items():
result = pipeline.transform(datasets["go_sales"])
result = _ensure_pandas(result)
self.assertEqual(result.shape, (274, 2))
self.assertEqual(result.loc[0, "prod"], 1110, tgt)
self.assertEqual(result.loc[0, "line"], "C", tgt)
self.assertEqual(result.loc[117, "prod"], 101110, tgt)
self.assertEqual(result.loc[117, "line"], "U", tgt)
self.assertEqual(result.loc[273, "prod"], 154150, tgt)
self.assertEqual(result.loc[273, "line"], "P", tgt)
def test_dynamic_rename(self):
def expr(X):
return {("new_" + c): it[c] for c in X.columns}
pipeline = Scan(table=it.go_products) >> Map(columns=expr)
for _tgt, datasets in self.tgt2datasets.items():
datasets = datasets["go_sales"]
result = pipeline.fit(datasets).transform(datasets)
result = _ensure_pandas(result)
for c in result.columns:
self.assertRegex(c, "new_.*")
def test_dynamic_rename_lambda(self):
pipeline = Scan(table=it.go_products) >> Map(
columns=lambda X: {("new_" + c): it[c] for c in X.columns}
)
for _tgt, datasets in self.tgt2datasets.items():
datasets = datasets["go_sales"]
result = pipeline.fit(datasets).transform(datasets)
result = _ensure_pandas(result)
for c in result.columns:
self.assertRegex(c, "new_.*")
def _get_col_schemas(self, cols, X):
from lale.datasets import data_schemas
props = {}
s = data_schemas.to_schema(X)
if s is not None:
inner = s.get("items", {})
if inner is not None and isinstance(inner, dict):
col_pairs = inner.get("items", [])
if col_pairs is not None and isinstance(col_pairs, list):
for cp in col_pairs:
d = cp.get("description", None)
if d is not None and isinstance(d, str):
props[d] = cp
for k in cols:
if k not in props:
props[k] = None
return props
def test_dynamic_schema_num(self):
from lale import type_checking
def expr(X):
ret = {}
schemas = self._get_col_schemas(X.columns, X)
for c, s in schemas.items():
if s is None:
ret["unknown_" + c] = it[c]
elif type_checking.is_subschema(
s, make_optional_schema({"type": "number"})
):
ret["num_" + c] = it[c]
ret["shifted_" + c] = it[c] + 5
else:
ret["other_" + c] = it[c]
return ret
pipeline = Scan(table=it.go_products) >> Map(columns=expr)
for _tgt, datasets in self.tgt2datasets.items():
datasets = datasets["go_sales"]
result = pipeline.fit(datasets).transform(datasets)
result = _ensure_pandas(result)
self.assertIn("num_Product number", result.columns)
self.assertIn("shifted_Product number", result.columns)
self.assertIn("other_Product line", result.columns)
self.assertEqual(
result["num_Product number"][0] + 5, result["shifted_Product number"][0]
)
def test_dynamic_categorical(self):
from lale.lib.rasl import categorical
def expr(X):
ret = {}
cats = categorical()(X)
for c in X.columns:
if c in cats:
ret["cat_" + c] = it[c]
else:
ret["other_" + c] = it[c]
return ret
pipeline = Scan(table=it.go_products) >> Map(columns=expr)
for _tgt, datasets in self.tgt2datasets.items():
datasets = datasets["go_sales"]
result = pipeline.fit(datasets).transform(datasets)
result = _ensure_pandas(result)
self.assertIn("cat_Product line", result.columns)
def test_dynamic_lambda_categorical_drop(self):
from lale.lib.rasl import categorical
pipeline = Scan(table=it.go_products) >> Map(
columns=lambda X: {c: it[c] for c in categorical()(_ensure_pandas(X))}
)
for _tgt, datasets in self.tgt2datasets.items():
datasets = datasets["go_sales"]
result = pipeline.fit(datasets).transform(datasets)
result = _ensure_pandas(result)
self.assertEqual(len(result.columns), 1)
self.assertIn("Product line", result.columns)
def test_static_trained(self):
op = Map(columns=[it.col])
self.assertIsInstance(op, lale.operators.TrainedOperator)
def test_dynamic_trainable(self):
op = Map(columns=lambda X: [it.col])
self.assertIsInstance(op, lale.operators.TrainableOperator)
self.assertNotIsInstance(op, lale.operators.TrainedOperator)
pipeline = Scan(table=it.go_products) >> op
pd_data = self.tgt2datasets["pandas"]["go_sales"]
trained = pipeline.fit(pd_data)
trained_map = trained.steps_list()[1]
self.assertIsInstance(trained_map, Map) # type: ignore
self.assertIsInstance(trained_map, lale.operators.TrainedOperator)
def test_project(self):
from lale.lib.lale import Project
pipeline = Scan(table=it.go_products) >> Project(
columns=make_optional_schema({"type": "number"})
)
for _tgt, datasets in self.tgt2datasets.items():
datasets = datasets["go_sales"]
result = pipeline.fit(datasets).transform(datasets)
result = _ensure_pandas(result)
self.assertIn("Product number", result.columns)
self.assertNotIn("Product line", result.columns)
def assertSeriesEqual(self, first_series, second_series, msg=None):
self.assertIsInstance(first_series, pd.Series, msg)
self.assertIsInstance(second_series, pd.Series, msg)
self.assertEqual(first_series.shape, second_series.shape, msg)
self.assertEqual(list(first_series), list(second_series), msg)
def test_transform_compare_ops(self):
trained = Map(
columns={
"height<=5": it.height <= 5,
"int(height<=5)": astype("int", it.height <= 5),
"4==height": 4 == it.height,
"height*10==weight": it.height * 10 == it.weight,
"height>3&<=5": (it.height > 3) & (it.height <= 5),
"height<=3|>5": (it.height <= 3) | (it.height > 5),
}
)
for tgt, datasets in self.tgt2datasets.items():
df = datasets["df_num"]
transformed_df = trained.transform(df)
df, transformed_df = _ensure_pandas(df), _ensure_pandas(transformed_df)
self.assertSeriesEqual(transformed_df["height<=5"], df["height"] <= 5, tgt)
self.assertSeriesEqual(
transformed_df["int(height<=5)"], (df["height"] <= 5).astype(int), tgt
)
self.assertSeriesEqual(transformed_df["4==height"], 4 == df["height"], tgt)
self.assertSeriesEqual(
transformed_df["height*10==weight"], df["height"] * 10 == df.weight, tgt
)
self.assertSeriesEqual(
transformed_df["height>3&<=5"], (df["height"] > 3) & (df["height"] <= 5)
)
self.assertSeriesEqual(
transformed_df["height<=3|>5"], (df["height"] <= 3) | (df["height"] > 5)
)
def test_if_then_else_function(self):
pretrained = Map(
columns={
"weight": it["weight"],
"w<50": ite(it["weight"] < 50, "<50", ">=50"),
"clip_50_inf": ite(it["weight"] < 50, 50, it["weight"]),
"clip_50_150": ite(
it["weight"] < 50, 50, ite(it["weight"] > 150, 150, it["weight"])
),
}
)
for tgt, datasets in self.tgt2datasets.items():
df = datasets["df_num"]
transformed_df = pretrained.transform(df)
df, transformed_df = _ensure_pandas(df), _ensure_pandas(transformed_df)
self.assertEqual(transformed_df.shape, (5, 4), tgt)
self.assertSeriesEqual(df["weight"], transformed_df["weight"], tgt)
self.assertEqual(transformed_df["w<50"][0], "<50", tgt)
self.assertEqual(transformed_df["w<50"][2], ">=50", tgt)
self.assertEqual(transformed_df["clip_50_inf"][0], 50, tgt)
self.assertEqual(transformed_df["clip_50_inf"][2], 170, tgt)
self.assertEqual(transformed_df["clip_50_150"][0], 50, tgt)
self.assertEqual(transformed_df["clip_50_150"][2], 150, tgt)
def test_spark_null(self):
spark_session = (
SparkSession.builder.master("local[2]") # type: ignore
.config("spark.driver.memory", "64g")
.getOrCreate()
)
df = spark_session.createDataFrame([(1, None), (2, "li")], ["num", "name"])
transformer = Map(columns=[replace(it.name, {None: "ABC"})])
transformed_df = transformer.transform(SparkDataFrameWithIndex(df))
self.assertEqual(transformed_df.collect()[0][0], "ABC")
class TestRelationalOperator(unittest.TestCase):
@classmethod
def setUpClass(cls):
targets = ["pandas", "spark"]
cls.tgt2datasets = {tgt: {} for tgt in targets}
def add_df(name, df):
cls.tgt2datasets["pandas"][name] = df
cls.tgt2datasets["spark"][name] = pandas2spark(df)
X, y = load_iris(as_frame=True, return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y)
add_df("X_train", X_train)
add_df("X_test", X_test)
add_df("y_train", y_train)
add_df("y_test", y_test)
def test_fit_transform(self):
relational = Relational(
operator=(Scan(table=it.main) & Scan(table=it.delay))
>> Join(
pred=[
it.main.TrainId == it.delay.TrainId,
it.main["Arrival time"] >= it.delay.TimeStamp,
]
)
>> Aggregate(columns=[count(it.Delay)], group_by=it.MessageId)
)
for _tgt, datasets in self.tgt2datasets.items():
X_train, X_test, y_train = (
datasets["X_train"],
datasets["X_test"],
datasets["y_train"],
)
trained_relational = relational.fit(X_train, y_train)
_ = trained_relational.transform(X_test)
def test_fit_error(self):
relational = Relational(
operator=(Scan(table=it.main) & Scan(table=it.delay))
>> Join(
pred=[
it.main.TrainId == it.delay.TrainId,
it.main["Arrival time"] >= it.delay.TimeStamp,
]
)
>> Aggregate(columns=[count(it.Delay)], group_by=it.MessageId)
)
for _tgt, datasets in self.tgt2datasets.items():
X_train, y_train = datasets["X_train"], datasets["y_train"]
with self.assertRaises(ValueError):
_ = relational.fit([X_train], y_train)
def test_transform_error(self):
relational = Relational(
operator=(Scan(table=it.main) & Scan(table=it.delay))
>> Join(
pred=[
it.main.TrainId == it.delay.TrainId,
it.main["Arrival time"] >= it.delay.TimeStamp,
]
)
>> Aggregate(columns=[count(it.Delay)], group_by=it.MessageId)
)
for _tgt, datasets in self.tgt2datasets.items():
X_train, X_test, y_train = (
datasets["X_train"],
datasets["X_test"],
datasets["y_train"],
)
trained_relational = relational.fit(X_train, y_train)
with self.assertRaises(ValueError):
_ = trained_relational.transform([X_test])
def test_fit_transform_in_pipeline(self):
relational = Relational(
operator=(Scan(table=it.main) & Scan(table=it.delay))
>> Join(
pred=[
it.main.TrainId == it.delay.TrainId,
it.main["Arrival time"] >= it.delay.TimeStamp,
]
)
>> Aggregate(columns=[count(it.Delay)], group_by=it.MessageId)
)
pipeline = relational >> LogisticRegression()
for tgt, datasets in self.tgt2datasets.items():
X_train, X_test, y_train = (
datasets["X_train"],
datasets["X_test"],
datasets["y_train"],
)
if tgt == "pandas":
trained_pipeline = pipeline.fit(X_train, y_train)
_ = trained_pipeline.predict(X_test)
elif tgt == "spark":
# LogisticRegression is not implemented on Spark
pass
else:
assert False
class TestOrderBy(unittest.TestCase):
@classmethod
def setUpClass(cls):
targets = ["pandas", "spark"]
cls.tgt2datasets = {tgt: {} for tgt in targets}
def add_df(name, df):
cls.tgt2datasets["pandas"][name] = df
cls.tgt2datasets["spark"][name] = pandas2spark(df)
df = pd.DataFrame(
{
"gender": ["m", "f", "m", "m", "f"],
"state": ["NY", "NY", "CA", "NY", "CA"],
"status": [0, 1, 1, 0, 1],
}
)
add_df("df", df)
def test_order_attr1(self):
trainable = OrderBy(by=it.status)
for tgt, datasets in self.tgt2datasets.items():
df = datasets["df"]
trained = trainable.fit(df)
transformed_df = trained.transform(df)
if tgt == "spark":
self.assertEqual(get_index_name(transformed_df), get_index_name(df))
transformed_df = _ensure_pandas(transformed_df)
self.assertTrue((transformed_df["status"]).is_monotonic_increasing)
def test_order_attr1_asc(self):
trainable = OrderBy(by=asc(it.status))
for tgt, datasets in self.tgt2datasets.items():
df = datasets["df"]
trained = trainable.fit(df)
transformed_df = trained.transform(df)
if tgt == "spark":
self.assertEqual(get_index_name(transformed_df), get_index_name(df))
transformed_df = _ensure_pandas(transformed_df)
self.assertTrue((transformed_df["status"]).is_monotonic_increasing)
def test_order_attr1_desc(self):
trainable = OrderBy(by=desc(it.status))
for _tgt, datasets in self.tgt2datasets.items():
df = datasets["df"]
trained = trainable.fit(df)
transformed_df = trained.transform(df)
transformed_df = _ensure_pandas(transformed_df)
self.assertTrue((transformed_df["status"]).is_monotonic_decreasing)
def test_order_str1_desc(self):
trainable = OrderBy(by=desc("gender"))
for _tgt, datasets in self.tgt2datasets.items():
df = datasets["df"]
trained = trainable.fit(df)
transformed_df = trained.transform(df)
transformed_df = _ensure_pandas(transformed_df)
self.assertTrue((transformed_df["gender"]).is_monotonic_decreasing)
def test_order_multiple(self):
trainable = OrderBy(by=[it.gender, desc(it.status)])
for _tgt, datasets in self.tgt2datasets.items():
df = datasets["df"]
trained = trainable.fit(df)
transformed_df = trained.transform(df)
transformed_df = _ensure_pandas(transformed_df)
expected_result = pd.DataFrame(
data={
"gender": ["f", "f", "m", "m", "m"],
"state": ["NY", "CA", "CA", "NY", "NY"],
"status": [1, 1, 1, 0, 0],
}
)
self.assertEqual(list(transformed_df.index), [1, 4, 2, 0, 3])
self.assertTrue((transformed_df["gender"]).is_monotonic_increasing)
self.assertTrue(
transformed_df.reset_index(drop=True).equals(expected_result)
)
def test_str1(self):
trainable = OrderBy(by="gender")
for _tgt, datasets in self.tgt2datasets.items():
df = datasets["df"]
trained = trainable.fit(df)
transformed_df = trained.transform(df)
transformed_df = _ensure_pandas(transformed_df)
self.assertTrue((transformed_df["gender"]).is_monotonic_increasing)
class TestSplitXy(unittest.TestCase):
@classmethod
def setUp(cls): # pylint:disable=arguments-differ
data = load_iris()
X, y = data.data, data.target
X_train, _X_test, y_train, _y_test = train_test_split(
pd.DataFrame(X), pd.DataFrame(y)
)
combined_df = pd.concat([X_train, y_train], axis=1)
combined_df.columns = [
"sepal_length",
"sepal_width",
"petal_length",
"petal_width",
"class",
]
spark_df = pandas2spark(combined_df)
cls.tgt2datasets = {
"pandas": combined_df,
"spark": spark_df,
}
def test_split_transform(self):
for _, df in self.tgt2datasets.items():
trainable = SplitXy(label_name="class") >> Convert(astype="pandas") >> PCA()
trained = trainable.fit(df)
_ = trained.transform(df)
def test_split_predict(self):
for _, df in self.tgt2datasets.items():
trainable = (
SplitXy(label_name="class")
>> Convert(astype="pandas")
>> PCA()
>> LogisticRegression(random_state=42)
)
trained = trainable.fit(df)
_ = trained.predict(df)
class TestTrainTestSplit(unittest.TestCase):
# Get go_sales dataset in pandas and spark dataframes
def setUp(self):
self.go_sales = fetch_go_sales_dataset()
self.go_sales_spark = fetch_go_sales_dataset("spark")
def test_split_pandas(self):
train, test, train_y, test_y = multitable_train_test_split(
self.go_sales,
main_table_name="go_products",
label_column_name="Product number",
test_size=0.2,
)
main_table_df: pd.Dataframe = None
for df in train:
if get_table_name(df) == "go_products":
main_table_df = df
self.assertEqual(len(main_table_df), 220)
self.assertEqual(len(train_y), 220)
for df in test:
if get_table_name(df) == "go_products":
main_table_df = df
self.assertEqual(len(main_table_df), 54)
self.assertEqual(len(test_y), 54)
def test_split_pandas_1(self):
_train, test, _train_y, test_y = multitable_train_test_split(
self.go_sales,
main_table_name="go_products",
label_column_name="Product number",
test_size=200,
)
main_table_df: pd.Dataframe = None
for df in test:
if get_table_name(df) == "go_products":
main_table_df = df
self.assertEqual(len(main_table_df), 200)
self.assertEqual(len(test_y), 200)
def test_split_spark(self):
train, test, train_y, test_y = multitable_train_test_split(
self.go_sales_spark,
main_table_name="go_products",
label_column_name="Product number",
test_size=0.2,
)
main_table_df: pd.Dataframe = None
for df in train:
if get_table_name(df) == "go_products":
main_table_df = df
self.assertEqual(main_table_df.count(), 220)
self.assertEqual(train_y.count(), 220)
for df in test:
if get_table_name(df) == "go_products":
main_table_df = df
self.assertEqual(main_table_df.count(), 54)
self.assertEqual(test_y.count(), 54)
class TestConvert(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.targets: List[datatype_param_type] = ["pandas", "spark"]
cls.tgt2datasets = {tgt: fetch_go_sales_dataset(tgt) for tgt in cls.targets}
def _check(self, src, dst, tgt):
self.assertEqual(get_table_name(src), get_table_name(dst), tgt)
self.assertEqual(list(get_columns(src)), list(get_columns(dst)), tgt)
pd_src = _ensure_pandas(src)
pd_dst = _ensure_pandas(dst)
self.assertEqual(pd_src.shape, pd_dst.shape, tgt)
def test_to_pandas(self):
for tgt, datasets in self.tgt2datasets.items():
transformer = Convert(astype="pandas")
go_products = datasets[3]
self.assertEqual(get_table_name(go_products), "go_products", tgt)
transformed_df = transformer.transform(go_products)
self.assertTrue(_is_pandas_df(transformed_df), tgt)
self._check(go_products, transformed_df, tgt)
def test_to_spark(self):
for tgt, datasets in self.tgt2datasets.items():
transformer = Convert(astype="spark")
go_products = datasets[3]
self.assertEqual(get_table_name(go_products), "go_products", tgt)
transformed_df = transformer.transform(go_products)
self.assertTrue(_is_spark_df(transformed_df), tgt)
self._check(go_products, transformed_df, tgt)
def test_to_spark_with_index(self):
for tgt, datasets in self.tgt2datasets.items():
transformer = Convert(astype="spark")
go_products = datasets[3]
self.assertEqual(get_table_name(go_products), "go_products", tgt)
transformed_df = transformer.transform(go_products)
self.assertTrue(_is_spark_df(transformed_df), tgt)
self._check(go_products, transformed_df, tgt)
def test_from_list(self):
df = [[0.1, 0.2, 0.3], [4, 5, 6]]
pd_src = pd.DataFrame(df)
for tgt in self.targets:
transformer = Convert(astype=tgt)
tranformed_df = transformer.transform(df)
pd_dst = _ensure_pandas(tranformed_df)
self.assertEqual(pd_src.shape, pd_dst.shape, tgt)
for row_idx in range(pd_src.shape[0]):
for col_idx in range(pd_src.shape[1]):
self.assertAlmostEqual(
pd_src.iloc[row_idx, col_idx],
pd_dst.iloc[row_idx, col_idx],
msg=(row_idx, col_idx, tgt),
)
class TestSortIndex(unittest.TestCase):
@classmethod
def setUpClass(cls):
targets = ["pandas", "spark"]
cls.tgt2datasets = {tgt: {} for tgt in targets}
def add_df(name, df):
cls.tgt2datasets["pandas"][name] = df
cls.tgt2datasets["spark"][name] = pandas2spark(df)
df = pd.DataFrame(
{
"gender": ["m", "f", "m", "m", "f"],
"state": ["NY", "NY", "CA", "NY", "CA"],
"status": [0, 1, 1, 0, 1],
"id": [9, 20, 35, 7, 100],
}
)
df = df.set_index("id")
add_df("df", df)
cls.y = pd.Series([1, 1, 1, 0, 0])
def test_sort_asc(self):
trainable = SortIndex()
for tgt, datasets in self.tgt2datasets.items():
df = datasets["df"]
trained = trainable.fit(df)
transformed_df = trained.transform(df)
if tgt == "spark":
self.assertEqual(get_index_name(transformed_df), get_index_name(df))
transformed_df = _ensure_pandas(transformed_df)
self.assertTrue((transformed_df["status"]).is_monotonic_increasing)
transformed_X, transformed_y = trained.transform_X_y(df, self.y)
if tgt == "spark":
self.assertEqual(get_index_name(transformed_X), get_index_name(df))
transformed_X = _ensure_pandas(transformed_X)
self.assertTrue((transformed_X["status"]).is_monotonic_increasing)
self.assertTrue((transformed_y).is_monotonic_decreasing)
def test_sort_desc(self):
trainable = SortIndex(ascending=False)
for tgt, datasets in self.tgt2datasets.items():
df = datasets["df"]
trained = trainable.fit(df)
transformed_df = trained.transform(df)
if tgt == "spark":
self.assertEqual(get_index_name(transformed_df), get_index_name(df))
transformed_df = _ensure_pandas(transformed_df)
self.assertTrue((transformed_df["status"]).is_monotonic_decreasing)
transformed_X, transformed_y = trained.transform_X_y(df, self.y)
if tgt == "spark":
self.assertEqual(get_index_name(transformed_X), get_index_name(df))
transformed_X = _ensure_pandas(transformed_X)
self.assertTrue((transformed_X["status"]).is_monotonic_decreasing)
self.assertTrue((transformed_y).is_monotonic_increasing)
| 110,834 | 41.547025 | 100 |
py
|
lale
|
lale-master/test/test_pgo.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import warnings
from typing import Sequence
import lale.lib.lale
from lale.lib.sklearn import PCA, LogisticRegression
from lale.search import PGO
from lale.search.lale_grid_search_cv import get_grid_search_parameter_grids
from lale.search.op2hp import hyperopt_search_space
example_pgo_fp = "test/lale-pgo-example.json"
class TestPGOLoad(unittest.TestCase):
def test_pgo_load(self):
pgo = PGO.load_pgo_file(example_pgo_fp)
_ = pgo["LogisticRegression"]["C"]
def test_pgo_sample(self):
pgo = PGO.load_pgo_file(example_pgo_fp)
lr_c = pgo["LogisticRegression"]["C"]
dist = PGO.FrequencyDistribution.asIntegerValues(lr_c.items())
samples: Sequence[int] = dist.samples(10)
_ = samples
# print(f"LR[C] samples: {samples}")
class TestPGOGridSearchCV(unittest.TestCase):
def test_lr_parameters(self):
pgo = PGO.load_pgo_file(example_pgo_fp)
lr = LogisticRegression()
parameters = get_grid_search_parameter_grids(lr, num_samples=2, pgo=pgo)
_ = parameters
# print(parameters)
def test_lr_run(self):
pgo = PGO.load_pgo_file(example_pgo_fp)
from sklearn.datasets import load_iris
from sklearn.metrics import accuracy_score, make_scorer
lr = LogisticRegression()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
clf = lale.lib.lale.GridSearchCV(
estimator=lr,
lale_num_samples=2,
lale_num_grids=5,
cv=5,
pgo=pgo,
scoring=make_scorer(accuracy_score),
)
iris = load_iris()
clf.fit(iris.data, iris.target)
def test_pipeline_parameters(self):
pgo = PGO.load_pgo_file(example_pgo_fp)
trainable = PCA() >> LogisticRegression()
parameters = get_grid_search_parameter_grids(trainable, num_samples=2, pgo=pgo)
_ = parameters
# print(parameters)
class TestPGOHyperopt(unittest.TestCase):
def test_lr_parameters(self):
pgo = PGO.load_pgo_file(example_pgo_fp)
lr = LogisticRegression()
hp_search_space = hyperopt_search_space(lr, pgo=pgo)
_ = hp_search_space
def test_lr_run(self):
pgo = PGO.load_pgo_file(example_pgo_fp)
from sklearn.datasets import load_iris
from lale.lib.lale import Hyperopt
lr = LogisticRegression()
clf = Hyperopt(estimator=lr, max_evals=5, pgo=pgo)
iris = load_iris()
clf.fit(iris.data, iris.target)
def test_pipeline_parameters(self):
pgo = PGO.load_pgo_file(example_pgo_fp)
trainable = PCA() >> LogisticRegression()
parameter_grids = get_grid_search_parameter_grids(
trainable, num_samples=2, pgo=pgo
)
_ = parameter_grids
# print(parameters)
| 3,501 | 29.719298 | 87 |
py
|
lale
|
lale-master/test/test_core_classifiers.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import warnings
from test import EnableSchemaValidation
import jsonschema
from sklearn.datasets import load_iris
import lale.lib.lale
import lale.type_checking
from lale.lib.lale import NoOp
from lale.lib.sklearn import (
PCA,
SVC,
IsolationForest,
KMeans,
KNeighborsClassifier,
LogisticRegression,
MLPClassifier,
Nystroem,
PassiveAggressiveClassifier,
RidgeClassifier,
SGDClassifier,
SimpleImputer,
VotingClassifier,
)
from lale.search.lale_grid_search_cv import get_grid_search_parameter_grids
from lale.settings import set_disable_data_schema_validation
set_disable_data_schema_validation(False)
class TestClassification(unittest.TestCase):
def setUp(self):
from sklearn.model_selection import train_test_split
data = load_iris()
X, y = data.data, data.target
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(X, y)
def create_function_test_classifier(clf_name):
def test_classifier(self):
X_train, y_train = self.X_train, self.y_train
import importlib
module_name = ".".join(clf_name.split(".")[0:-1])
class_name = clf_name.split(".")[-1]
module = importlib.import_module(module_name)
class_ = getattr(module, class_name)
clf = class_()
# test_schemas_are_schemas
lale.type_checking.validate_is_schema(clf.input_schema_fit())
lale.type_checking.validate_is_schema(clf.input_schema_predict())
lale.type_checking.validate_is_schema(clf.output_schema_predict())
lale.type_checking.validate_is_schema(clf.hyperparam_schema())
# test_init_fit_predict
trained = clf.fit(self.X_train, self.y_train)
_ = trained.predict(self.X_test)
# test score
if not isinstance(
clf, IsolationForest # type: ignore
): # IsolationForest does not define score
_ = trained.score(self.X_test, self.y_test)
from lale.lib.sklearn.gradient_boosting_classifier import (
GradientBoostingClassifier,
)
if isinstance(clf, GradientBoostingClassifier): # type: ignore
# because exponential loss does not work with iris dataset as it is not binary classification
from lale import schemas
clf = clf.customize_schema(
loss=schemas.Enum(default="deviance", values=["deviance"])
)
# test_with_hyperopt
from lale.lib.lale import Hyperopt
hyperopt = Hyperopt(estimator=clf, max_evals=1, verbose=True)
trained = hyperopt.fit(self.X_train, self.y_train)
_ = trained.predict(self.X_test)
# test_cross_validation
from lale.helpers import cross_val_score
cv_results = cross_val_score(clf, X_train, y_train, cv=2)
self.assertEqual(len(cv_results), 2)
# test_with_gridsearchcv_auto_wrapped
from sklearn.metrics import accuracy_score, make_scorer
with warnings.catch_warnings():
warnings.simplefilter("ignore")
grid_search = lale.lib.lale.GridSearchCV(
estimator=clf,
lale_num_samples=1,
lale_num_grids=1,
cv=2,
scoring=make_scorer(accuracy_score),
)
grid_search.fit(X_train, y_train)
# test_predict_on_trainable
trained = clf.fit(X_train, y_train)
clf.predict(X_train)
# test_to_json
clf.to_json()
# test_in_a_pipeline
pipeline = NoOp() >> clf
trained = pipeline.fit(self.X_train, self.y_train)
_ = trained.predict(self.X_test)
test_classifier.__name__ = f"test_{clf_to_test.rsplit('.', maxsplit=1)[-1]}"
return test_classifier
classifiers = [
"lale.lib.sklearn.DummyClassifier",
"lale.lib.sklearn.RandomForestClassifier",
"lale.lib.sklearn.DecisionTreeClassifier",
"lale.lib.sklearn.ExtraTreesClassifier",
"lale.lib.sklearn.GradientBoostingClassifier",
"lale.lib.sklearn.GaussianNB",
"lale.lib.sklearn.QuadraticDiscriminantAnalysis",
"lale.lib.lightgbm.LGBMClassifier",
"lale.lib.xgboost.XGBClassifier",
"lale.lib.sklearn.KNeighborsClassifier",
"lale.lib.sklearn.LinearSVC",
"lale.lib.sklearn.LogisticRegression",
"lale.lib.sklearn.MLPClassifier",
"lale.lib.sklearn.SVC",
"lale.lib.sklearn.Perceptron",
"lale.lib.sklearn.PassiveAggressiveClassifier",
"lale.lib.sklearn.MultinomialNB",
"lale.lib.sklearn.AdaBoostClassifier",
"lale.lib.sklearn.SGDClassifier",
"lale.lib.sklearn.RidgeClassifier",
"lale.lib.sklearn.IsolationForest",
"lale.lib.sklearn.KMeans",
]
for clf_to_test in classifiers:
setattr(
TestClassification,
f"test_{clf_to_test.rsplit('.', maxsplit=1)[-1]}",
create_function_test_classifier(clf_to_test),
)
class TestMLPClassifier(unittest.TestCase):
def test_with_multioutput_targets(self):
import numpy as np
from sklearn.datasets import make_classification
from sklearn.utils import shuffle
X, y1 = make_classification(
n_samples=10, n_features=100, n_informative=30, n_classes=3, random_state=1
)
y2 = shuffle(y1, random_state=1)
y3 = shuffle(y1, random_state=2)
Y = np.vstack((y1, y2, y3)).T
trainable = KNeighborsClassifier()
trained = trainable.fit(X, Y)
_ = trained.predict(X)
def test_predict_proba(self):
trainable = MLPClassifier()
iris = load_iris()
trained = trainable.fit(iris.data, iris.target)
# with self.assertWarns(DeprecationWarning):
_ = trainable.predict_proba(iris.data)
_ = trained.predict_proba(iris.data)
class TestVotingClassifier(unittest.TestCase):
def setUp(self):
from sklearn.model_selection import train_test_split
data = load_iris()
X, y = data.data, data.target
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(X, y)
warnings.filterwarnings("ignore")
def test_with_lale_classifiers(self):
clf = VotingClassifier(
estimators=[("knn", KNeighborsClassifier()), ("lr", LogisticRegression())]
)
trained = clf.fit(self.X_train, self.y_train)
trained.predict(self.X_test)
def test_with_lale_pipeline(self):
clf = VotingClassifier(
estimators=[
("knn", KNeighborsClassifier()),
("pca_lr", PCA() >> LogisticRegression()),
]
)
trained = clf.fit(self.X_train, self.y_train)
trained.predict(self.X_test)
def test_with_hyperopt(self):
from lale.lib.lale import Hyperopt
clf = VotingClassifier(
estimators=[("knn", KNeighborsClassifier()), ("lr", LogisticRegression())]
)
_ = clf.auto_configure(self.X_train, self.y_train, Hyperopt, max_evals=1)
def test_with_gridsearch(self):
from sklearn.metrics import accuracy_score, make_scorer
from lale.lib.lale import GridSearchCV
clf = VotingClassifier(
estimators=[("knn", KNeighborsClassifier()), ("rc", RidgeClassifier())],
voting="hard",
)
_ = clf.auto_configure(
self.X_train,
self.y_train,
GridSearchCV,
lale_num_samples=1,
lale_num_grids=1,
cv=2,
scoring=make_scorer(accuracy_score),
)
@unittest.skip("TODO: get this working with sklearn 0.23")
def test_with_observed_gridsearch(self):
from sklearn.metrics import accuracy_score, make_scorer
from lale.lib.lale import GridSearchCV
from lale.lib.lale.observing import LoggingObserver
clf = VotingClassifier(
estimators=[("knn", KNeighborsClassifier()), ("rc", RidgeClassifier())],
voting="hard",
)
_ = clf.auto_configure(
self.X_train,
self.y_train,
GridSearchCV,
lale_num_samples=1,
lale_num_grids=1,
cv=2,
scoring=make_scorer(accuracy_score),
observer=LoggingObserver,
)
class TestBaggingClassifier(unittest.TestCase):
def setUp(self):
from sklearn.model_selection import train_test_split
data = load_iris()
X, y = data.data, data.target
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(X, y)
warnings.filterwarnings("ignore")
def test_with_lale_classifiers(self):
from lale.lib.sklearn import BaggingClassifier
clf = BaggingClassifier(base_estimator=LogisticRegression())
trained = clf.fit(self.X_train, self.y_train)
trained.predict(self.X_test)
def test_with_lale_pipeline(self):
from lale.lib.sklearn import BaggingClassifier
clf = BaggingClassifier(base_estimator=PCA() >> LogisticRegression())
trained = clf.fit(self.X_train, self.y_train)
trained.predict(self.X_test)
def test_with_hyperopt(self):
from lale.lib.lale import Hyperopt
from lale.lib.sklearn import BaggingClassifier
clf = BaggingClassifier(base_estimator=LogisticRegression())
trained = clf.auto_configure(self.X_train, self.y_train, Hyperopt, max_evals=1)
print(trained.to_json())
def test_pipeline_with_hyperopt(self):
from lale.lib.lale import Hyperopt
from lale.lib.sklearn import BaggingClassifier
clf = BaggingClassifier(base_estimator=PCA() >> LogisticRegression())
_ = clf.auto_configure(self.X_train, self.y_train, Hyperopt, max_evals=1)
def test_pipeline_choice_with_hyperopt(self):
from lale.lib.lale import Hyperopt
from lale.lib.sklearn import BaggingClassifier
clf = BaggingClassifier(
base_estimator=PCA() >> (LogisticRegression() | KNeighborsClassifier())
)
_ = clf.auto_configure(self.X_train, self.y_train, Hyperopt, max_evals=1)
def test_predict_log_proba(self):
from lale.lib.sklearn import BaggingClassifier
clf = BaggingClassifier(base_estimator=PCA() >> LogisticRegression())
trained = clf.fit(self.X_train, self.y_train)
trained.predict_log_proba(self.X_test)
def test_predict_log_proba_trained_trainable(self):
from lale.lib.sklearn import BaggingClassifier
clf = BaggingClassifier()
clf.fit(self.X_train, self.y_train)
with self.assertWarns(DeprecationWarning):
clf.predict_log_proba(self.X_test)
def test_predict_log_proba_trainable(self):
from lale.lib.sklearn import BaggingClassifier
clf = BaggingClassifier(base_estimator=PCA() >> LogisticRegression())
with self.assertRaises(ValueError):
clf.predict_log_proba(self.X_test)
class TestStackingClassifier(unittest.TestCase):
def setUp(self):
from sklearn.model_selection import train_test_split
data = load_iris()
X, y = data.data, data.target
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(X, y)
def test_with_lale_classifiers(self):
from lale.lib.sklearn import StackingClassifier
clf = StackingClassifier(estimators=[("base", LogisticRegression())])
trained = clf.fit(self.X_train, self.y_train)
trained.predict(self.X_test)
def test_with_lale_pipeline(self):
from lale.lib.sklearn import StackingClassifier
clf = StackingClassifier(estimators=[("base", PCA() >> LogisticRegression())])
trained = clf.fit(self.X_train, self.y_train)
trained.predict(self.X_test)
def test_with_hyperopt(self):
from lale.lib.lale import Hyperopt
from lale.lib.sklearn import StackingClassifier
clf = StackingClassifier(
estimators=[("base", LogisticRegression())],
final_estimator=LogisticRegression(),
)
trained = clf.auto_configure(self.X_train, self.y_train, Hyperopt, max_evals=1)
print(trained.to_json())
def test_pipeline_with_hyperopt(self):
from lale.lib.lale import Hyperopt
from lale.lib.sklearn import StackingClassifier
clf = StackingClassifier(
estimators=[("base", PCA() >> LogisticRegression())],
final_estimator=LogisticRegression(),
)
_ = clf.auto_configure(self.X_train, self.y_train, Hyperopt, max_evals=1)
def test_pipeline_choice_with_hyperopt(self):
from lale.lib.lale import Hyperopt
from lale.lib.sklearn import StackingClassifier
clf = StackingClassifier(
estimators=[
("base", PCA() >> (LogisticRegression() | KNeighborsClassifier()))
]
)
_ = clf.auto_configure(self.X_train, self.y_train, Hyperopt, max_evals=1)
class TestSpuriousSideConstraintsClassification(unittest.TestCase):
# This was prompted buy a bug, keeping it as it may help with support for other sklearn versions
def setUp(self):
from sklearn.model_selection import train_test_split
data = load_iris()
X, y = data.data, data.target
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(X, y)
def test_sgd_classifier(self):
reg = SGDClassifier(loss="squared_error", epsilon=0.2)
reg.fit(self.X_train, self.y_train)
def test_sgd_classifier_1(self):
reg = SGDClassifier(learning_rate="optimal", eta0=0.2)
reg.fit(self.X_train, self.y_train)
def test_sgd_classifier_2(self):
reg = SGDClassifier(early_stopping=False, validation_fraction=0.2)
reg.fit(self.X_train, self.y_train)
def test_sgd_classifier_3(self):
reg = SGDClassifier(l1_ratio=0.2, penalty="l1")
reg.fit(self.X_train, self.y_train)
def test_mlp_classifier(self):
reg = MLPClassifier(early_stopping=False, validation_fraction=0.2)
reg.fit(self.X_train, self.y_train)
def test_mlp_classifier_1(self):
reg = MLPClassifier(beta_1=0.8, solver="sgd")
reg.fit(self.X_train, self.y_train)
def test_mlp_classifier_2b(self):
reg = MLPClassifier(beta_2=0.8, solver="sgd")
reg.fit(self.X_train, self.y_train)
def test_mlp_classifier_2e(self):
reg = MLPClassifier(epsilon=0.8, solver="sgd")
reg.fit(self.X_train, self.y_train)
def test_mlp_classifier_3(self):
reg = MLPClassifier(n_iter_no_change=100, solver="lbfgs")
reg.fit(self.X_train, self.y_train)
def test_mlp_classifier_4(self):
reg = MLPClassifier(early_stopping=True, solver="lbfgs")
reg.fit(self.X_train, self.y_train)
def test_mlp_classifier_5(self):
reg = MLPClassifier(nesterovs_momentum=False, solver="lbfgs")
reg.fit(self.X_train, self.y_train)
def test_mlp_classifier_6(self):
reg = MLPClassifier(momentum=0.8, solver="lbfgs")
reg.fit(self.X_train, self.y_train)
def test_mlp_classifier_7(self):
reg = MLPClassifier(shuffle=False, solver="lbfgs")
reg.fit(self.X_train, self.y_train)
def test_mlp_classifier_8(self):
reg = MLPClassifier(learning_rate="invscaling", solver="lbfgs")
reg.fit(self.X_train, self.y_train)
def test_mlp_classifier_9(self):
reg = MLPClassifier(learning_rate_init=0.002, solver="lbfgs")
reg.fit(self.X_train, self.y_train)
def test_mlp_classifier_10(self):
reg = MLPClassifier(learning_rate="invscaling", power_t=0.4, solver="lbfgs")
reg.fit(self.X_train, self.y_train)
def test_passive_aggressive_classifier(self):
reg = PassiveAggressiveClassifier(validation_fraction=0.4, early_stopping=False)
reg.fit(self.X_train, self.y_train)
def test_svc(self):
reg = SVC(kernel="linear", gamma=1)
reg.fit(self.X_train, self.y_train)
def test_simple_imputer(self):
reg = SimpleImputer(strategy="mean", fill_value=10)
reg.fit(self.X_train, self.y_train)
def test_nystroem(self):
reg = Nystroem(kernel="cosine", gamma=0.1)
reg.fit(self.X_train, self.y_train)
def test_nystroem_1(self):
reg = Nystroem(kernel="cosine", coef0=0.1)
reg.fit(self.X_train, self.y_train)
def test_nystroem_2(self):
reg = Nystroem(kernel="cosine", degree=2)
reg.fit(self.X_train, self.y_train)
def test_ridge_classifier(self):
reg = RidgeClassifier(fit_intercept=False)
reg.fit(self.X_train, self.y_train)
def test_ridge_classifier_1(self):
reg = RidgeClassifier(solver="svd", max_iter=10)
reg.fit(self.X_train, self.y_train)
class TestKNeighborsClassifier(unittest.TestCase):
def test_with_multioutput_targets(self):
import numpy as np
from sklearn.datasets import make_classification
from sklearn.utils import shuffle
X, y1 = make_classification(
n_samples=10, n_features=100, n_informative=30, n_classes=3, random_state=1
)
y2 = shuffle(y1, random_state=1)
y3 = shuffle(y1, random_state=2)
Y = np.vstack((y1, y2, y3)).T
trainable = KNeighborsClassifier()
trained = trainable.fit(X, Y)
_ = trained.predict(X)
def test_predict_proba(self):
trainable = KNeighborsClassifier()
iris = load_iris()
trained = trainable.fit(iris.data, iris.target)
# with self.assertWarns(DeprecationWarning):
_ = trainable.predict_proba(iris.data)
_ = trained.predict_proba(iris.data)
class TestLogisticRegression(unittest.TestCase):
def test_hyperparam_keyword_enum(self):
_ = LogisticRegression(
LogisticRegression.enum.penalty.l1,
C=0.1,
solver=LogisticRegression.enum.solver.saga,
)
def test_hyperparam_exclusive_min(self):
with EnableSchemaValidation():
with self.assertRaises(jsonschema.ValidationError):
_ = LogisticRegression(LogisticRegression.enum.penalty.l1, C=0.0)
def test_hyperparam_penalty_solver_dependence(self):
with EnableSchemaValidation():
with self.assertRaises(jsonschema.ValidationError):
_ = LogisticRegression(
LogisticRegression.enum.penalty.l1,
LogisticRegression.enum.solver.newton_cg,
)
def test_hyperparam_dual_penalty_solver_dependence(self):
with EnableSchemaValidation():
with self.assertRaises(jsonschema.ValidationError):
_ = LogisticRegression(
LogisticRegression.enum.penalty.l2,
LogisticRegression.enum.solver.sag,
dual=True,
)
def test_sample_weight(self):
import numpy as np
trainable_lr = LogisticRegression(n_jobs=1)
iris = load_iris()
trained_lr = trainable_lr.fit(
iris.data, iris.target, sample_weight=np.arange(len(iris.target))
)
_ = trained_lr.predict(iris.data)
def test_predict_proba(self):
import numpy as np
trainable_lr = LogisticRegression(n_jobs=1)
iris = load_iris()
trained_lr = trainable_lr.fit(
iris.data, iris.target, sample_weight=np.arange(len(iris.target))
)
# with self.assertWarns(DeprecationWarning):
_ = trainable_lr.predict_proba(iris.data)
_ = trained_lr.predict_proba(iris.data)
def test_decision_function(self):
import numpy as np
trainable_lr = LogisticRegression(n_jobs=1)
iris = load_iris()
trained_lr = trainable_lr.fit(
iris.data, iris.target, sample_weight=np.arange(len(iris.target))
)
_ = trained_lr.decision_function(iris.data)
def test_with_sklearn_gridsearchcv(self):
from sklearn.metrics import accuracy_score, make_scorer
from sklearn.model_selection import GridSearchCV
lr = LogisticRegression()
parameters = {"solver": ("liblinear", "lbfgs"), "penalty": ["l2"]}
with warnings.catch_warnings():
warnings.simplefilter("ignore")
clf = GridSearchCV(
lr, parameters, cv=5, scoring=make_scorer(accuracy_score)
)
iris = load_iris()
clf.fit(iris.data, iris.target)
def test_with_randomizedsearchcv(self):
import numpy as np
from scipy.stats.distributions import uniform
from sklearn.metrics import accuracy_score, make_scorer
from sklearn.model_selection import RandomizedSearchCV
lr = LogisticRegression()
ranges, _cat_idx = lr.get_param_ranges()
# specify parameters and distributions to sample from
# the loguniform distribution needs to be taken care of properly
param_dist = {"solver": ranges["solver"], "C": uniform(0.03125, np.log(32768))}
# run randomized search
n_iter_search = 5
with warnings.catch_warnings():
warnings.simplefilter("ignore")
random_search = RandomizedSearchCV(
lr,
param_distributions=param_dist,
n_iter=n_iter_search,
cv=5,
scoring=make_scorer(accuracy_score),
)
iris = load_iris()
random_search.fit(iris.data, iris.target)
def test_grid_search_on_trained(self):
from sklearn.metrics import accuracy_score, make_scorer
from sklearn.model_selection import GridSearchCV
iris = load_iris()
X, y = iris.data, iris.target
lr = LogisticRegression()
trained = lr.fit(X, y)
parameters = {"solver": ("liblinear", "lbfgs"), "penalty": ["l2"]}
_ = GridSearchCV(trained, parameters, cv=5, scoring=make_scorer(accuracy_score))
def test_grid_search_on_trained_auto(self):
from sklearn.metrics import accuracy_score, make_scorer
from sklearn.model_selection import GridSearchCV
iris = load_iris()
X, y = iris.data, iris.target
lr = LogisticRegression()
trained = lr.fit(X, y)
parameters = get_grid_search_parameter_grids(lr, num_samples=2)
_ = GridSearchCV(trained, parameters, cv=5, scoring=make_scorer(accuracy_score))
def test_doc(self):
from test.mock_custom_operators import MyLR
import sklearn.datasets
import sklearn.utils
iris = load_iris()
X_all, y_all = sklearn.utils.shuffle(iris.data, iris.target, random_state=42)
X_train, y_train = X_all[10:], y_all[10:]
X_test, y_test = X_all[:10], y_all[:10]
print(f"expected {y_test}")
warnings.filterwarnings("ignore", category=FutureWarning)
trainable = MyLR(solver="lbfgs", C=0.1)
trained = trainable.fit(X_train, y_train)
predictions = trained.predict(X_test)
print(f"actual {predictions}")
class TestIsolationForest(unittest.TestCase):
def setUp(self):
from sklearn.model_selection import train_test_split
from lale.datasets.util import load_boston
data = load_boston()
X, y = data.data, data.target
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(X, y)
warnings.filterwarnings("ignore")
def test_with_no_y(self):
clf = IsolationForest()
trained = clf.fit(self.X_train)
trained.predict(self.X_test)
def test_with_hyperopt(self):
def my_scorer(estimator, X, y=None):
return 1
from lale.lib.lale import Hyperopt
hyperopt = Hyperopt(
estimator=IsolationForest(max_features=1.0, max_samples=1.0),
max_evals=5,
verbose=True,
scoring=my_scorer,
)
trained = hyperopt.fit(self.X_train)
_ = trained.predict(self.X_test)
def test_decision_function_1(self):
def my_scorer(estimator, X, y=None):
return 1
from lale.lib.lale import Hyperopt
hyperopt = Hyperopt(
estimator=IsolationForest(max_features=1.0, max_samples=1.0),
max_evals=5,
verbose=True,
scoring=my_scorer,
)
trained = hyperopt.fit(self.X_train)
pipeline = trained.get_pipeline()
assert pipeline is not None
_ = pipeline.decision_function(self.X_test)
def test_decision_function_2(self):
def my_scorer(estimator, X, y=None):
return 1
from lale.lib.lale import Hyperopt
from lale.lib.sklearn import MinMaxScaler
hyperopt = Hyperopt(
estimator=MinMaxScaler()
>> IsolationForest(max_features=1.0, max_samples=1.0),
max_evals=5,
verbose=True,
scoring=my_scorer,
)
trained = hyperopt.fit(self.X_train)
pipeline = trained.get_pipeline()
assert pipeline is not None
_ = pipeline.decision_function(self.X_test)
def test_score_samples(self):
clf = IsolationForest()
trained = clf.fit(self.X_train)
trained.score_samples(self.X_test)
def test_score_samples_trainable(self):
clf = IsolationForest()
with self.assertRaises(ValueError):
clf.score_samples(self.X_test)
def test_score_samples_trained_trainable(self):
clf = IsolationForest()
clf.fit(self.X_train)
with self.assertWarns(DeprecationWarning):
clf.score_samples(self.X_test)
class TestKMeans(unittest.TestCase):
def setUp(self):
from sklearn.model_selection import train_test_split
from lale.datasets.util import load_boston
data = load_boston()
X, y = data.data, data.target
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(X, y)
warnings.filterwarnings("ignore")
def test_with_no_y(self):
clf = KMeans()
trained = clf.fit(self.X_train)
trained.predict(self.X_test)
def test_with_hyperopt(self):
from lale.lib.lale import Hyperopt
def my_scorer(estimator, X, y=None):
return 1
hyperopt = Hyperopt(
estimator=KMeans(n_clusters=3), max_evals=5, verbose=True, scoring=my_scorer
)
trained = hyperopt.fit(self.X_train)
_ = trained.predict(self.X_test)
| 27,160 | 33.6 | 105 |
py
|
lale
|
lale-master/test/test_category_encoders.py
|
# Copyright 2022 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import lale.datasets
import lale.datasets.openml
from lale.lib.category_encoders import HashingEncoder, TargetEncoder
from lale.lib.rasl import ConcatFeatures, Project
from lale.lib.sklearn import LogisticRegression
class TestCategoryEncoders(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.creditg = lale.datasets.openml.fetch(
"credit-g",
"classification",
preprocess=False,
astype="pandas",
)
def test_hashing_encoder(self):
(train_X, train_y), (test_X, _test_y) = self.creditg
cat_prep = Project(columns={"type": "string"}) >> HashingEncoder()
num_prep = Project(columns={"type": "number"})
trainable = (
(cat_prep & num_prep) >> ConcatFeatures >> LogisticRegression(max_iter=1000)
)
trained = trainable.fit(train_X, train_y)
_ = trained.predict(test_X)
def test_target_encoder(self):
(train_X, train_y), (test_X, _test_y) = self.creditg
cat_prep = Project(columns={"type": "string"}) >> TargetEncoder()
num_prep = Project(columns={"type": "number"})
trainable = (
(cat_prep & num_prep) >> ConcatFeatures >> LogisticRegression(max_iter=1000)
)
trained = trainable.fit(train_X, train_y)
_ = trained.predict(test_X)
| 1,941 | 35.641509 | 88 |
py
|
lale
|
lale-master/test/test_notebooks.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
import tempfile
import warnings
from typing import List, Optional
import pytest
notebook_dir: str = os.environ.get("NOTEBOOK_DIR", ".")
def should_test(f: str) -> bool:
notebooks_to_skip_str: Optional[str] = os.environ.get("NOTEBOOK_EXCLUDES", None)
notebooks_to_skip: Optional[List[str]] = (
notebooks_to_skip_str.split() if notebooks_to_skip_str is not None else None
)
if notebooks_to_skip and f in notebooks_to_skip:
return False
notebooks_categories_str: Optional[str] = os.environ.get("NOTEBOOK_CATEGORY", None)
notebooks_categories: Optional[List[str]] = (
notebooks_categories_str.split()
if notebooks_categories_str is not None
else None
)
all_notebooks_categories_str: Optional[str] = os.environ.get(
"ALL_NOTEBOOK_CATEGORIES", None
)
all_notebooks_categories: Optional[List[str]] = (
all_notebooks_categories_str.split()
if all_notebooks_categories_str is not None
else None
)
if not notebooks_categories:
if all_notebooks_categories is None:
# run everything (with a warning)
warnings.warn(
"Running all notebook tests. To run a subset, specify appropriate filters using the NOTEBOOK_CATEGORY and ALL_NOTEBOOK_CATEGORIES environment variables"
)
return True
else:
# we want to run all tests that are *not* in the all list
# this is for running the stuff left over (in another job) after we carve out prefixes with NOTEBOOK_CATEGORY
for c in all_notebooks_categories:
if f.startswith(c):
return False
return True
else:
if all_notebooks_categories is not None:
# check that the category is included in the master list, if set (useful for travis)
# if the list of categories is not set, continue (useful for running on the command line)
for c in notebooks_categories:
assert c in all_notebooks_categories
for c in notebooks_categories:
if f.startswith(c):
return True
# if it is not a requested category, don't run it
return False
@pytest.mark.parametrize(
"filename",
sorted(
[f for f in os.listdir(notebook_dir) if f.endswith(".ipynb") and should_test(f)]
),
)
def test_notebook(filename):
path = os.path.join(notebook_dir, filename)
print(path)
with tempfile.NamedTemporaryFile(suffix=".ipynb") as fout:
args = [
"jupyter",
"nbconvert",
"--to",
"notebook",
"--execute",
"--ExecutePreprocessor.timeout=1000",
"--output",
fout.name,
path,
]
subprocess.check_call(args)
| 3,457 | 33.929293 | 169 |
py
|
lale
|
lale-master/test/test_type_checking.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from test import EnableSchemaValidation
import jsonschema
import lale.lib.lale
from lale.lib.lale import ConcatFeatures, IdentityWrapper, NoOp
from lale.lib.sklearn import NMF, PCA, LogisticRegression, TfidfVectorizer
from lale.settings import (
disable_data_schema_validation,
disable_hyperparams_schema_validation,
set_disable_data_schema_validation,
set_disable_hyperparams_schema_validation,
)
class TestDatasetSchemas(unittest.TestCase):
@classmethod
def setUpClass(cls):
from sklearn.datasets import load_iris
with EnableSchemaValidation():
irisArr = load_iris()
cls._irisArr = {"X": irisArr.data, "y": irisArr.target}
from lale.datasets import sklearn_to_pandas
(train_X, train_y), (_test_X, _test_y) = sklearn_to_pandas.load_iris_df()
cls._irisDf = {"X": train_X, "y": train_y}
(train_X, train_y), (_test_X, _test_y) = sklearn_to_pandas.digits_df()
cls._digits = {"X": train_X, "y": train_y}
(
(train_X, train_y),
(_test_X, _test_y),
) = sklearn_to_pandas.california_housing_df()
cls._housing = {"X": train_X, "y": train_y}
from lale.datasets import openml
(train_X, train_y), (_test_X, _test_y) = openml.fetch(
"credit-g", "classification", preprocess=False
)
cls._creditG = {"X": train_X, "y": train_y}
from lale.datasets import load_movie_review
train_X, train_y = load_movie_review()
cls._movies = {"X": train_X, "y": train_y}
from lale.datasets.uci.uci_datasets import fetch_drugscom
train_X, train_y, _test_X, _test_y = fetch_drugscom()
cls._drugRev = {"X": train_X, "y": train_y}
@classmethod
def tearDownClass(cls):
cls._irisArr = None
cls._irisDf = None
cls._digits = None
cls._housing = None
cls._creditG = None
cls._movies = None
cls._drugRev = None
def test_datasets_with_own_schemas(self):
from lale.datasets.data_schemas import to_schema
from lale.type_checking import validate_schema_directly
for name in [
"irisArr",
"irisDf",
"digits",
"housing",
"creditG",
"movies",
"drugRev",
]:
dataset = getattr(self, f"_{name}")
data_X, data_y = dataset["X"], dataset["y"]
schema_X, schema_y = to_schema(data_X), to_schema(data_y)
validate_schema_directly(data_X, schema_X, subsample_array=False)
validate_schema_directly(data_y, schema_y, subsample_array=False)
def test_ndarray_to_schema(self):
from lale.datasets.data_schemas import to_schema
from lale.type_checking import validate_schema_directly
irisArr = self._irisArr
assert irisArr is not None
all_X, all_y = irisArr["X"], irisArr["y"]
assert not hasattr(all_X, "json_schema")
all_X_schema = to_schema(all_X)
validate_schema_directly(all_X, all_X_schema, subsample_array=False)
assert not hasattr(all_y, "json_schema")
all_y_schema = to_schema(all_y)
validate_schema_directly(all_y, all_y_schema, subsample_array=False)
all_X_expected = {
"type": "array",
"minItems": 150,
"maxItems": 150,
"items": {
"type": "array",
"minItems": 4,
"maxItems": 4,
"items": {"type": "number"},
},
}
all_y_expected = {
"type": "array",
"minItems": 150,
"maxItems": 150,
"items": {"type": "integer"},
}
self.maxDiff = None
self.assertEqual(all_X_schema, all_X_expected)
self.assertEqual(all_y_schema, all_y_expected)
def test_pandas_to_schema(self):
import pandas as pd
from lale.datasets.data_schemas import to_schema
from lale.type_checking import validate_schema_directly
irisDf = self._irisDf
assert irisDf is not None
train_X, train_y = irisDf["X"], irisDf["y"]
assert isinstance(train_X, pd.DataFrame)
assert not hasattr(train_X, "json_schema")
train_X_schema = to_schema(train_X)
validate_schema_directly(train_X, train_X_schema, subsample_array=False)
assert isinstance(train_y, pd.Series)
assert not hasattr(train_y, "json_schema")
train_y_schema = to_schema(train_y)
validate_schema_directly(train_y, train_y_schema, subsample_array=False)
train_X_expected = {
"type": "array",
"minItems": 120,
"maxItems": 120,
"items": {
"type": "array",
"minItems": 4,
"maxItems": 4,
"items": [
{"description": "sepal length (cm)", "type": "number"},
{"description": "sepal width (cm)", "type": "number"},
{"description": "petal length (cm)", "type": "number"},
{"description": "petal width (cm)", "type": "number"},
],
},
}
train_y_expected = {
"type": "array",
"minItems": 120,
"maxItems": 120,
"items": {"description": "target", "type": "integer"},
}
self.maxDiff = None
self.assertEqual(train_X_schema, train_X_expected)
self.assertEqual(train_y_schema, train_y_expected)
def test_arff_to_schema(self):
from lale.datasets.data_schemas import to_schema
from lale.type_checking import validate_schema_directly
creditG = self._creditG
assert creditG is not None
train_X, train_y = creditG["X"], creditG["y"]
assert hasattr(train_X, "json_schema")
train_X_schema = to_schema(train_X)
validate_schema_directly(train_X, train_X_schema, subsample_array=False)
assert hasattr(train_y, "json_schema")
train_y_schema = to_schema(train_y)
validate_schema_directly(train_y, train_y_schema, subsample_array=False)
train_X_expected = {
"type": "array",
"minItems": 670,
"maxItems": 670,
"items": {
"type": "array",
"minItems": 20,
"maxItems": 20,
"items": [
{
"description": "checking_status",
"enum": ["<0", "0<=X<200", ">=200", "no checking"],
},
{"description": "duration", "type": "number"},
{
"description": "credit_history",
"enum": [
"no credits/all paid",
"all paid",
"existing paid",
"delayed previously",
"critical/other existing credit",
],
},
{
"description": "purpose",
"enum": [
"new car",
"used car",
"furniture/equipment",
"radio/tv",
"domestic appliance",
"repairs",
"education",
"vacation",
"retraining",
"business",
"other",
],
},
{"description": "credit_amount", "type": "number"},
{
"description": "savings_status",
"enum": [
"<100",
"100<=X<500",
"500<=X<1000",
">=1000",
"no known savings",
],
},
{
"description": "employment",
"enum": ["unemployed", "<1", "1<=X<4", "4<=X<7", ">=7"],
},
{"description": "installment_commitment", "type": "number"},
{
"description": "personal_status",
"enum": [
"male div/sep",
"female div/dep/mar",
"male single",
"male mar/wid",
"female single",
],
},
{
"description": "other_parties",
"enum": ["none", "co applicant", "guarantor"],
},
{"description": "residence_since", "type": "number"},
{
"description": "property_magnitude",
"enum": [
"real estate",
"life insurance",
"car",
"no known property",
],
},
{"description": "age", "type": "number"},
{
"description": "other_payment_plans",
"enum": ["bank", "stores", "none"],
},
{"description": "housing", "enum": ["rent", "own", "for free"]},
{"description": "existing_credits", "type": "number"},
{
"description": "job",
"enum": [
"unemp/unskilled non res",
"unskilled resident",
"skilled",
"high qualif/self emp/mgmt",
],
},
{"description": "num_dependents", "type": "number"},
{"description": "own_telephone", "enum": ["none", "yes"]},
{"description": "foreign_worker", "enum": ["yes", "no"]},
],
},
}
train_y_expected = {
"type": "array",
"minItems": 670,
"maxItems": 670,
"items": {"description": "class", "enum": ["good", "bad"]},
}
self.maxDiff = None
self.assertEqual(train_X_schema, train_X_expected)
self.assertEqual(train_y_schema, train_y_expected)
def test_keep_numbers(self):
from lale.datasets.data_schemas import to_schema
from lale.lib.lale import Project
creditG = self._creditG
assert creditG is not None
train_X = creditG["X"]
trainable = Project(columns={"type": "number"})
trained = trainable.fit(train_X)
transformed = trained.transform(train_X)
transformed_schema = to_schema(transformed)
transformed_expected = {
"type": "array",
"minItems": 670,
"maxItems": 670,
"items": {
"type": "array",
"minItems": 7,
"maxItems": 7,
"items": [
{"description": "duration", "type": "number"},
{"description": "credit_amount", "type": "number"},
{"description": "installment_commitment", "type": "number"},
{"description": "residence_since", "type": "number"},
{"description": "age", "type": "number"},
{"description": "existing_credits", "type": "number"},
{"description": "num_dependents", "type": "number"},
],
},
}
self.maxDiff = None
self.assertEqual(transformed_schema, transformed_expected)
def test_keep_non_numbers(self):
with EnableSchemaValidation():
from lale.datasets.data_schemas import to_schema
from lale.lib.lale import Project
creditG = self._creditG
assert creditG is not None
train_X = creditG["X"]
trainable = Project(columns={"not": {"type": "number"}})
trained = trainable.fit(train_X)
transformed = trained.transform(train_X)
transformed_schema = to_schema(transformed)
transformed_expected = {
"type": "array",
"minItems": 670,
"maxItems": 670,
"items": {
"type": "array",
"minItems": 13,
"maxItems": 13,
"items": [
{
"description": "checking_status",
"enum": ["<0", "0<=X<200", ">=200", "no checking"],
},
{
"description": "credit_history",
"enum": [
"no credits/all paid",
"all paid",
"existing paid",
"delayed previously",
"critical/other existing credit",
],
},
{
"description": "purpose",
"enum": [
"new car",
"used car",
"furniture/equipment",
"radio/tv",
"domestic appliance",
"repairs",
"education",
"vacation",
"retraining",
"business",
"other",
],
},
{
"description": "savings_status",
"enum": [
"<100",
"100<=X<500",
"500<=X<1000",
">=1000",
"no known savings",
],
},
{
"description": "employment",
"enum": ["unemployed", "<1", "1<=X<4", "4<=X<7", ">=7"],
},
{
"description": "personal_status",
"enum": [
"male div/sep",
"female div/dep/mar",
"male single",
"male mar/wid",
"female single",
],
},
{
"description": "other_parties",
"enum": ["none", "co applicant", "guarantor"],
},
{
"description": "property_magnitude",
"enum": [
"real estate",
"life insurance",
"car",
"no known property",
],
},
{
"description": "other_payment_plans",
"enum": ["bank", "stores", "none"],
},
{"description": "housing", "enum": ["rent", "own", "for free"]},
{
"description": "job",
"enum": [
"unemp/unskilled non res",
"unskilled resident",
"skilled",
"high qualif/self emp/mgmt",
],
},
{"description": "own_telephone", "enum": ["none", "yes"]},
{"description": "foreign_worker", "enum": ["yes", "no"]},
],
},
}
self.maxDiff = None
self.assertEqual(transformed_expected, transformed_schema)
def test_input_schema_fit(self):
self.maxDiff = None
self.assertEqual(
LogisticRegression.input_schema_fit(),
LogisticRegression.get_schema("input_fit"),
)
self.assertEqual(
(NMF >> LogisticRegression).input_schema_fit(), NMF.get_schema("input_fit")
)
self.assertEqual(
IdentityWrapper(op=LogisticRegression).input_schema_fit(),
LogisticRegression.get_schema("input_fit"),
)
actual = (TfidfVectorizer | NMF).input_schema_fit()
expected = {
"anyOf": [
{
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {
"anyOf": [
{"type": "array", "items": {"type": "string"}},
{
"type": "array",
"items": {
"type": "array",
"minItems": 1,
"maxItems": 1,
"items": {"type": "string"},
},
},
]
},
"y": {},
},
},
{
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {
"type": "array",
"items": {
"type": "array",
"items": {"type": "number", "minimum": 0.0},
},
},
"y": {},
},
},
]
}
self.assertEqual(actual, expected)
def test_transform_schema_NoOp(self):
with EnableSchemaValidation():
from lale.datasets.data_schemas import to_schema
for ds in [
self._irisArr,
self._irisDf,
self._digits,
self._housing,
self._creditG,
self._movies,
self._drugRev,
]:
assert ds is not None
s_input = to_schema(ds["X"])
s_output = NoOp.transform_schema(s_input)
self.assertIs(s_input, s_output)
def test_transform_schema_pipeline(self):
with EnableSchemaValidation():
from lale.datasets.data_schemas import to_schema
pipeline = NMF >> LogisticRegression
digits = self._digits
assert digits is not None
input_schema = to_schema(digits["X"])
transformed_schema = pipeline.transform_schema(input_schema)
transformed_expected = {
"description": "Probability of the sample for each class in the model.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
self.maxDiff = None
self.assertEqual(transformed_schema, transformed_expected)
def test_transform_schema_choice(self):
with EnableSchemaValidation():
from lale.datasets.data_schemas import to_schema
choice = NMF | LogisticRegression
digits = self._digits
assert digits is not None
input_schema = to_schema(digits["X"])
transformed_schema = choice.transform_schema(input_schema)
transformed_expected = {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
self.maxDiff = None
self.assertEqual(transformed_schema, transformed_expected)
def test_transform_schema_higher_order(self):
with EnableSchemaValidation():
from lale.datasets.data_schemas import to_schema
inner = LogisticRegression
outer = IdentityWrapper(op=LogisticRegression)
digits = self._digits
assert digits is not None
input_schema = to_schema(digits["X"])
transformed_inner = inner.transform_schema(input_schema)
transformed_outer = outer.transform_schema(input_schema)
self.maxDiff = None
self.assertEqual(transformed_inner, transformed_outer)
def test_transform_schema_Concat_irisArr(self):
with EnableSchemaValidation():
from lale.datasets.data_schemas import to_schema
irisArr = self._irisArr
assert irisArr is not None
data_X, data_y = irisArr["X"], irisArr["y"]
s_in_X, s_in_y = to_schema(data_X), to_schema(data_y)
def check(s_actual, n_expected, s_expected):
assert s_actual["items"]["minItems"] == n_expected, str(s_actual)
assert s_actual["items"]["maxItems"] == n_expected, str(s_actual)
assert s_actual["items"]["items"] == s_expected, str(s_actual)
s_out_X = ConcatFeatures.transform_schema({"items": [s_in_X]})
check(s_out_X, 4, {"type": "number"})
s_out_y = ConcatFeatures.transform_schema({"items": [s_in_y]})
check(s_out_y, 1, {"type": "integer"})
s_out_XX = ConcatFeatures.transform_schema({"items": [s_in_X, s_in_X]})
check(s_out_XX, 8, {"type": "number"})
s_out_yy = ConcatFeatures.transform_schema({"items": [s_in_y, s_in_y]})
check(s_out_yy, 2, {"type": "integer"})
s_out_Xy = ConcatFeatures.transform_schema({"items": [s_in_X, s_in_y]})
check(s_out_Xy, 5, {"type": "number"})
s_out_XXX = ConcatFeatures.transform_schema(
{"items": [s_in_X, s_in_X, s_in_X]}
)
check(s_out_XXX, 12, {"type": "number"})
def test_transform_schema_Concat_irisDf(self):
with EnableSchemaValidation():
from lale.datasets.data_schemas import to_schema
irisDf = self._irisDf
assert irisDf is not None
data_X, data_y = irisDf["X"], irisDf["y"]
s_in_X, s_in_y = to_schema(data_X), to_schema(data_y)
def check(s_actual, n_expected, s_expected):
assert s_actual["items"]["minItems"] == n_expected, str(s_actual)
assert s_actual["items"]["maxItems"] == n_expected, str(s_actual)
assert s_actual["items"]["items"] == s_expected, str(s_actual)
s_out_X = ConcatFeatures.transform_schema({"items": [s_in_X]})
check(s_out_X, 4, {"type": "number"})
s_out_y = ConcatFeatures.transform_schema({"items": [s_in_y]})
check(s_out_y, 1, {"description": "target", "type": "integer"})
s_out_XX = ConcatFeatures.transform_schema({"items": [s_in_X, s_in_X]})
check(s_out_XX, 8, {"type": "number"})
s_out_yy = ConcatFeatures.transform_schema({"items": [s_in_y, s_in_y]})
check(s_out_yy, 2, {"type": "integer"})
s_out_Xy = ConcatFeatures.transform_schema({"items": [s_in_X, s_in_y]})
check(s_out_Xy, 5, {"type": "number"})
s_out_XXX = ConcatFeatures.transform_schema(
{"items": [s_in_X, s_in_X, s_in_X]}
)
check(s_out_XXX, 12, {"type": "number"})
def test_lr_with_all_datasets(self):
with EnableSchemaValidation():
should_succeed = ["irisArr", "irisDf", "digits", "housing"]
should_fail = ["creditG", "movies", "drugRev"]
for name in should_succeed:
dataset = getattr(self, f"_{name}")
LogisticRegression.validate_schema(**dataset)
for name in should_fail:
dataset = getattr(self, f"_{name}")
with self.assertRaises(ValueError):
LogisticRegression.validate_schema(**dataset)
def test_project_with_all_datasets(self):
with EnableSchemaValidation():
should_succeed = [
"irisArr",
"irisDf",
"digits",
"housing",
"creditG",
"drugRev",
]
should_fail = ["movies"]
for name in should_succeed:
dataset = getattr(self, f"_{name}")
lale.lib.lale.Project.validate_schema(**dataset)
for name in should_fail:
dataset = getattr(self, f"_{name}")
with self.assertRaises(ValueError):
lale.lib.lale.Project.validate_schema(**dataset)
def test_nmf_with_all_datasets(self):
with EnableSchemaValidation():
should_succeed = ["digits"]
should_fail = [
"irisArr",
"irisDf",
"housing",
"creditG",
"movies",
"drugRev",
]
for name in should_succeed:
dataset = getattr(self, f"_{name}")
NMF.validate_schema(**dataset)
for name in should_fail:
dataset = getattr(self, f"_{name}")
with self.assertRaises(ValueError):
NMF.validate_schema(**dataset)
def test_tfidf_with_all_datasets(self):
with EnableSchemaValidation():
should_succeed = ["movies"]
should_fail = [
"irisArr",
"irisDf",
"digits",
"housing",
"creditG",
"drugRev",
]
for name in should_succeed:
dataset = getattr(self, f"_{name}")
TfidfVectorizer.validate_schema(**dataset)
for name in should_fail:
dataset = getattr(self, f"_{name}")
with self.assertRaises(ValueError):
TfidfVectorizer.validate_schema(**dataset)
def test_decision_function_binary(self):
from lale.lib.lale import Project
creditG = self._creditG
assert creditG is not None
train_X, train_y = creditG["X"], creditG["y"]
trainable = Project(columns={"type": "number"}) >> LogisticRegression()
trained = trainable.fit(train_X, train_y)
_ = trained.decision_function(train_X)
class TestErrorMessages(unittest.TestCase):
def test_wrong_cont(self):
with EnableSchemaValidation():
with self.assertRaises(jsonschema.ValidationError) as cm:
LogisticRegression(C=-1)
summary = cm.exception.message.split("\n")[0]
self.assertEqual(
summary,
"Invalid configuration for LogisticRegression(C=-1) due to invalid value C=-1.",
)
fix1 = cm.exception.message.split("\n")[2]
self.assertRegex(fix1, "C=1.0")
def test_fixes2(self):
with EnableSchemaValidation():
with self.assertRaises(jsonschema.ValidationError) as cm:
LogisticRegression(
penalty="l1",
solver="liblinear",
multi_class="multinomial",
dual=True,
)
summary = cm.exception.message.split("\n")[0]
self.assertRegex(
summary,
"Invalid configuration for LogisticRegression(.*) due to constraint",
)
fix1 = cm.exception.message.split("\n")[2]
fix2 = cm.exception.message.split("\n")[3]
# we don't care what order they are in
self.assertRegex(fix1 + fix2, "penalty='l2', multi_class='auto'")
self.assertRegex(fix1 + fix2, "multi_class='auto', dual=False")
def test_wrong_cat(self):
with EnableSchemaValidation():
with self.assertRaises(jsonschema.ValidationError) as cm:
LogisticRegression(solver="adam")
summary = cm.exception.message.split("\n")[0]
self.assertEqual(
summary,
"Invalid configuration for LogisticRegression(solver='adam') due to invalid value solver=adam.",
)
def test_unknown_arg(self):
with EnableSchemaValidation():
with self.assertRaises(jsonschema.ValidationError) as cm:
LogisticRegression(activation="relu")
summary = cm.exception.message.split("\n")[0]
self.assertEqual(
summary,
"Invalid configuration for LogisticRegression(activation='relu') due to argument 'activation' was unexpected.",
)
fix1 = cm.exception.message.split("\n")[1]
self.assertRegex(fix1, "remove unknown key 'activation'")
def test_constraint(self):
with EnableSchemaValidation():
with self.assertRaises(jsonschema.ValidationError) as cm:
LogisticRegression(solver="sag", penalty="l1")
summary = cm.exception.message.split("\n")[0]
self.assertEqual(
summary,
"Invalid configuration for LogisticRegression(solver='sag', penalty='l1') due to constraint the newton-cg, sag, and lbfgs solvers support only l2 or no penalties.",
)
fix1 = cm.exception.message.split("\n")[2]
self.assertRegex(fix1, "set penalty='l2'")
def test_unknown_arg_and_constraint2(self):
with EnableSchemaValidation():
with EnableSchemaValidation():
with self.assertRaises(jsonschema.ValidationError) as cm:
LogisticRegression(
activation="relu",
penalty="l1",
solver="liblinear",
multi_class="multinomial",
dual=True,
)
summary = cm.exception.message.split("\n")[0]
self.assertRegex(
summary,
"Invalid configuration for LogisticRegression.*due to argument 'activation' was unexpected.",
)
fix1 = cm.exception.message.split("\n")[2]
fix2 = cm.exception.message.split("\n")[3]
# we don't care what order they are in
self.assertRegex(
fix1 + fix2, "remove unknown key 'activation'.*set.*penalty='l2'"
)
self.assertRegex(
fix1 + fix2, "remove unknown key 'activation'.*set.*dual=False"
)
self.assertRegex(
fix1 + fix2,
"remove unknown key 'activation'.*set.*multi_class='auto'",
)
def test_unknown_arg_and_constraint(self):
with EnableSchemaValidation():
with self.assertRaises(jsonschema.ValidationError) as cm:
LogisticRegression(activation="relu", solver="sag", penalty="l1")
summary = cm.exception.message.split("\n")[0]
self.assertRegex(
summary,
"Invalid configuration for LogisticRegression.*due to argument 'activation' was unexpected.",
)
fix1 = cm.exception.message.split("\n")[2]
self.assertRegex(fix1, "remove unknown key 'activation'.*set penalty='l2'")
class TestHyperparamConstraints(unittest.TestCase):
def setUp(self):
import scipy.sparse
import sklearn.datasets
data = sklearn.datasets.load_iris()
X, y = data.data, data.target
sparse_X = scipy.sparse.csr_matrix(X)
self.sparse_X = sparse_X
self.X = X
self.y = y
self.regression_X, self.regression_y = sklearn.datasets.make_regression(
n_features=4, n_informative=2, random_state=0, shuffle=False
)
def test_bagging_classifier(self):
import sklearn
from lale.lib.sklearn import BaggingClassifier
bad_hyperparams = {"bootstrap": False, "oob_score": True}
trainable = sklearn.ensemble.BaggingClassifier(**bad_hyperparams)
with self.assertRaisesRegex(
ValueError, "Out of bag estimation only available if bootstrap=True"
):
trainable.fit(self.X, self.y)
with EnableSchemaValidation():
with self.assertRaises(jsonschema.ValidationError):
BaggingClassifier(**bad_hyperparams)
def test_bagging_classifier_2(self):
import sklearn
from lale.lib.sklearn import BaggingClassifier
bad_hyperparams = {"warm_start": True, "oob_score": True}
trainable = sklearn.ensemble.BaggingClassifier(**bad_hyperparams)
with self.assertRaisesRegex(
ValueError, "Out of bag estimate only available if warm_start=False"
):
trainable.fit(self.X, self.y)
with EnableSchemaValidation():
with self.assertRaises(jsonschema.ValidationError):
BaggingClassifier(**bad_hyperparams)
def test_bagging_regressor(self):
import sklearn
from lale.lib.sklearn import BaggingRegressor
bad_hyperparams = {"bootstrap": False, "oob_score": True}
trainable = sklearn.ensemble.BaggingRegressor(**bad_hyperparams)
with self.assertRaisesRegex(
ValueError, "Out of bag estimation only available if bootstrap=True"
):
trainable.fit(self.regression_X, self.regression_y)
with EnableSchemaValidation():
with self.assertRaises(jsonschema.ValidationError):
BaggingRegressor(**bad_hyperparams)
def test_bagging_regressor_2(self):
import sklearn
from lale.lib.sklearn import BaggingRegressor
bad_hyperparams = {"warm_start": True, "oob_score": True}
trainable = sklearn.ensemble.BaggingRegressor(**bad_hyperparams)
with self.assertRaisesRegex(
ValueError, "Out of bag estimate only available if warm_start=False"
):
trainable.fit(self.regression_X, self.regression_y)
with EnableSchemaValidation():
with self.assertRaises(jsonschema.ValidationError):
BaggingRegressor(**bad_hyperparams)
def test_extra_trees_classifier(self):
import sklearn
from lale.lib.sklearn import ExtraTreesClassifier
bad_hyperparams = {"bootstrap": False, "oob_score": True}
trainable = sklearn.ensemble.ExtraTreesClassifier(**bad_hyperparams)
with self.assertRaisesRegex(
ValueError, "Out of bag estimation only available if bootstrap=True"
):
trainable.fit(self.X, self.y)
with EnableSchemaValidation():
with self.assertRaises(jsonschema.ValidationError):
ExtraTreesClassifier(**bad_hyperparams)
def test_extra_trees_regressor(self):
import sklearn
from lale.lib.sklearn import ExtraTreesRegressor
bad_hyperparams = {"bootstrap": False, "oob_score": True}
trainable = sklearn.ensemble.ExtraTreesRegressor(**bad_hyperparams)
with self.assertRaisesRegex(
ValueError, "Out of bag estimation only available if bootstrap=True"
):
trainable.fit(self.regression_X, self.regression_y)
with EnableSchemaValidation():
with self.assertRaises(jsonschema.ValidationError):
ExtraTreesRegressor(**bad_hyperparams)
def test_function_transformer(self):
import sklearn
from lale.lib.sklearn import FunctionTransformer
bad_hyperparams = {"validate": True, "accept_sparse": False}
bad_X = self.sparse_X
y = self.y
trainable = sklearn.preprocessing.FunctionTransformer(**bad_hyperparams)
with self.assertRaisesRegex(
TypeError, "A sparse matrix was passed, but dense data is required."
):
trainable.fit(bad_X, self.y)
trainable = FunctionTransformer(**bad_hyperparams)
with EnableSchemaValidation():
with self.assertRaises(jsonschema.ValidationError):
trainable.fit(bad_X, y)
def test_linear_svc_1(self):
import sklearn
from lale.lib.sklearn import LinearSVC
bad_hyperparams = {"penalty": "l1", "loss": "hinge", "multi_class": "ovr"}
trainable = sklearn.svm.LinearSVC(**bad_hyperparams)
with self.assertRaisesRegex(
ValueError,
"The combination of penalty='l1' and loss='hinge' is not supported",
):
trainable.fit(self.X, self.y)
with EnableSchemaValidation():
with self.assertRaises(jsonschema.ValidationError):
LinearSVC(**bad_hyperparams)
def test_linear_svc_2(self):
import sklearn
from lale.lib.sklearn import LinearSVC
bad_hyperparams = {
"penalty": "l2",
"loss": "hinge",
"dual": False,
"multi_class": "ovr",
}
trainable = sklearn.svm.LinearSVC(**bad_hyperparams)
with self.assertRaisesRegex(
ValueError,
"The combination of penalty='l2' and loss='hinge' are not supported when dual=False",
):
trainable.fit(self.X, self.y)
with EnableSchemaValidation():
with self.assertRaises(jsonschema.ValidationError):
LinearSVC(**bad_hyperparams)
def test_linear_svc_3(self):
import sklearn
from lale.lib.sklearn import LinearSVC
bad_hyperparams = {
"penalty": "l1",
"loss": "squared_hinge",
"dual": True,
"multi_class": "ovr",
}
trainable = sklearn.svm.LinearSVC(**bad_hyperparams)
with self.assertRaisesRegex(
ValueError,
"The combination of penalty='l1' and loss='squared_hinge' are not supported when dual=True",
):
trainable.fit(self.X, self.y)
with EnableSchemaValidation():
with self.assertRaises(jsonschema.ValidationError):
LinearSVC(**bad_hyperparams)
def test_linear_svr(self):
import sklearn
from lale.lib.sklearn import LinearSVR
bad_hyperparams = {"loss": "epsilon_insensitive", "dual": False}
trainable = sklearn.svm.LinearSVR(**bad_hyperparams)
with self.assertRaisesRegex(
ValueError,
"The combination of penalty='l2' and loss='epsilon_insensitive' are not supported when dual=False",
):
trainable.fit(self.regression_X, self.regression_y)
with EnableSchemaValidation():
with self.assertRaises(jsonschema.ValidationError):
LinearSVR(**bad_hyperparams)
def test_logistic_regression_1(self):
import sklearn
bad_hyperparams = {"solver": "liblinear", "penalty": "none"}
trainable = sklearn.linear_model.LogisticRegression(**bad_hyperparams)
with self.assertRaisesRegex(
ValueError, "penalty='none' is not supported for the liblinear solver"
):
trainable.fit(self.X, self.y)
with EnableSchemaValidation():
with self.assertRaises(jsonschema.ValidationError):
LogisticRegression(**bad_hyperparams)
def test_logistic_regression_2(self):
import sklearn
bad_hyperparams = {
"penalty": "elasticnet",
"l1_ratio": None,
"solver": "saga",
}
trainable = sklearn.linear_model.LogisticRegression(**bad_hyperparams)
with self.assertRaises(BaseException):
trainable.fit(self.X, self.y)
with EnableSchemaValidation():
with self.assertRaises(jsonschema.ValidationError):
LogisticRegression(**bad_hyperparams)
def test_logistic_regression_3(self):
import sklearn
bad_hyperparams = {
"penalty": "elasticnet",
"solver": "liblinear",
"l1_ratio": 0.5,
}
trainable = sklearn.linear_model.LogisticRegression(**bad_hyperparams)
with self.assertRaisesRegex(
ValueError, "Only 'saga' solver supports elasticnet penalty"
):
trainable.fit(self.X, self.y)
with EnableSchemaValidation():
with self.assertRaises(jsonschema.ValidationError):
LogisticRegression(**bad_hyperparams)
def test_missing_indicator(self):
import sklearn
from lale.lib.sklearn import MissingIndicator
bad_X = self.sparse_X
y = self.y
bad_hyperparams = {"missing_values": 0}
trainable = sklearn.impute.MissingIndicator(**bad_hyperparams)
with self.assertRaisesRegex(
ValueError, "Sparse input with missing_values=0 is not supported."
):
trainable.fit(bad_X, self.y)
trainable = MissingIndicator(**bad_hyperparams)
with EnableSchemaValidation():
with self.assertRaises(jsonschema.ValidationError):
trainable.fit(bad_X, y)
def test_one_hot_encoder(self):
import sklearn
from lale.lib.sklearn import OneHotEncoder
bad_hyperparams = {"drop": "first", "handle_unknown": "ignore"}
trainable = sklearn.preprocessing.OneHotEncoder(**bad_hyperparams)
if sklearn.__version__ < "1.0":
with self.assertRaisesRegex(
ValueError,
"`handle_unknown` must be 'error' when the drop parameter is specified",
):
trainable.fit(self.X, self.y)
with EnableSchemaValidation():
if sklearn.__version__ < "1.0":
with self.assertRaises(jsonschema.ValidationError):
OneHotEncoder(**bad_hyperparams)
else:
OneHotEncoder(**bad_hyperparams)
def test_ordinal_encoder_1(self):
import sklearn
from lale.lib.sklearn import OrdinalEncoder
if sklearn.__version__ >= "0.24.1":
bad_hyperparams = {
"handle_unknown": "use_encoded_value",
"unknown_value": None,
}
trainable = sklearn.preprocessing.OrdinalEncoder(**bad_hyperparams)
with self.assertRaisesRegex(
TypeError,
"unknown_value should be an integer or np.nan when handle_unknown is 'use_encoded_value'",
):
trainable.fit(self.X, self.y)
with EnableSchemaValidation():
with self.assertRaises(jsonschema.ValidationError):
OrdinalEncoder(**bad_hyperparams)
def test_ordinal_encoder_2(self):
import sklearn
from lale.lib.sklearn import OrdinalEncoder
if sklearn.__version__ >= "0.24.1":
bad_hyperparams = {"handle_unknown": "error", "unknown_value": 1}
trainable = sklearn.preprocessing.OrdinalEncoder(**bad_hyperparams)
with self.assertRaisesRegex(
TypeError,
"unknown_value should only be set when handle_unknown is 'use_encoded_value'",
):
trainable.fit(self.X, self.y)
with EnableSchemaValidation():
with self.assertRaises(jsonschema.ValidationError):
OrdinalEncoder(**bad_hyperparams)
def test_random_forest_classifier(self):
import sklearn
from lale.lib.sklearn import RandomForestClassifier
bad_hyperparams = {"bootstrap": False, "oob_score": True}
trainable = sklearn.ensemble.RandomForestClassifier(**bad_hyperparams)
with self.assertRaisesRegex(
ValueError, "Out of bag estimation only available if bootstrap=True"
):
trainable.fit(self.X, self.y)
with EnableSchemaValidation():
with self.assertRaises(jsonschema.ValidationError):
RandomForestClassifier(**bad_hyperparams)
def test_random_forest_regressor(self):
import sklearn
from lale.lib.sklearn import RandomForestRegressor
bad_hyperparams = {"bootstrap": False, "oob_score": True}
trainable = sklearn.ensemble.RandomForestRegressor(**bad_hyperparams)
with self.assertRaisesRegex(
ValueError, "Out of bag estimation only available if bootstrap=True"
):
trainable.fit(self.regression_X, self.regression_y)
with EnableSchemaValidation():
with self.assertRaises(jsonschema.ValidationError):
RandomForestRegressor(**bad_hyperparams)
@unittest.skip("newer versions of sklearn do support this")
def test_ridge_1(self):
import sklearn
from lale.lib.sklearn import Ridge
bad_X = self.sparse_X
y = self.y
bad_hyperparams = {"fit_intercept": True, "solver": "lsqr"}
trainable = sklearn.linear_model.Ridge(**bad_hyperparams)
with self.assertRaisesRegex(
ValueError, "does not support fitting the intercept on sparse data."
):
trainable.fit(bad_X, self.y)
trainable = Ridge(**bad_hyperparams)
with EnableSchemaValidation():
with self.assertRaises(jsonschema.ValidationError):
trainable.fit(bad_X, y)
def test_ridge_2(self):
import sklearn
from lale.lib.sklearn import Ridge
bad_X = self.sparse_X
y = self.y
bad_hyperparams = {"solver": "svd", "fit_intercept": False}
trainable = sklearn.linear_model.Ridge(**bad_hyperparams)
with self.assertRaisesRegex(
TypeError, "SVD solver does not support sparse inputs currently"
):
trainable.fit(bad_X, self.y)
trainable = Ridge(**bad_hyperparams)
with EnableSchemaValidation():
with self.assertRaises(jsonschema.ValidationError):
trainable.fit(bad_X, y)
def test_robust_scaler(self):
import sklearn
from lale.lib.sklearn import RobustScaler
bad_X = self.sparse_X
y = self.y
bad_hyperparams = {"with_centering": True}
trainable = sklearn.preprocessing.RobustScaler(**bad_hyperparams)
with self.assertRaisesRegex(
ValueError,
"Cannot center sparse matrices: use `with_centering=False` instead.",
):
trainable.fit(bad_X, self.y)
trainable = RobustScaler(**bad_hyperparams)
with EnableSchemaValidation():
with self.assertRaises(jsonschema.ValidationError):
trainable.fit(bad_X, y)
def test_simple_imputer(self):
import sklearn
from lale.lib.sklearn import SimpleImputer
bad_X = self.sparse_X
y = self.y
bad_hyperparams = {"missing_values": 0}
trainable = sklearn.impute.SimpleImputer(**bad_hyperparams)
with self.assertRaisesRegex(
ValueError,
"Imputation not possible when missing_values == 0 and input is sparse.",
):
trainable.fit(bad_X, self.y)
trainable = SimpleImputer(**bad_hyperparams)
with EnableSchemaValidation():
with self.assertRaises(jsonschema.ValidationError):
trainable.fit(bad_X, y)
def test_svc(self):
import sklearn
from lale.lib.sklearn import SVC
bad_X = self.sparse_X
y = self.y
bad_hyperparams = {"kernel": "precomputed"}
trainable = sklearn.svm.SVC(**bad_hyperparams)
with self.assertRaisesRegex(
TypeError, "Sparse precomputed kernels are not supported."
):
trainable.fit(bad_X, self.y)
trainable = SVC(**bad_hyperparams)
with EnableSchemaValidation():
with self.assertRaises(jsonschema.ValidationError):
trainable.fit(bad_X, y)
def test_svr(self):
import sklearn
from lale.lib.sklearn import SVR
bad_X = self.sparse_X
y = self.y
bad_hyperparams = {"kernel": "precomputed"}
trainable = sklearn.svm.SVR(**bad_hyperparams)
with self.assertRaisesRegex(
TypeError, "Sparse precomputed kernels are not supported."
):
trainable.fit(bad_X, self.y)
trainable = SVR(**bad_hyperparams)
with EnableSchemaValidation():
with self.assertRaises(jsonschema.ValidationError):
trainable.fit(bad_X, y)
class TestSchemaValidation(unittest.TestCase):
def test_any(self):
from lale.type_checking import is_subschema
num_schema = {"type": "number"}
any_schema = {"laleType": "Any"}
jsonschema.validate(42, num_schema)
jsonschema.validate(42, any_schema)
self.assertTrue(is_subschema(num_schema, any_schema))
self.assertTrue(is_subschema(any_schema, num_schema))
def test_bool_label(self):
import pandas as pd
data_records = [
{
"IS_TENT": False,
"GENDER": "M",
"AGE": 20,
"MARITAL_STATUS": "Single",
"PROFESSION": "Sales",
},
{
"IS_TENT": False,
"GENDER": "M",
"AGE": 20,
"MARITAL_STATUS": "Single",
"PROFESSION": "Sales",
},
{
"IS_TENT": False,
"GENDER": "F",
"AGE": 37,
"MARITAL_STATUS": "Single",
"PROFESSION": "Other",
},
{
"IS_TENT": False,
"GENDER": "M",
"AGE": 42,
"MARITAL_STATUS": "Married",
"PROFESSION": "Other",
},
{
"IS_TENT": True,
"GENDER": "F",
"AGE": 24,
"MARITAL_STATUS": "Married",
"PROFESSION": "Retail",
},
{
"IS_TENT": False,
"GENDER": "F",
"AGE": 24,
"MARITAL_STATUS": "Married",
"PROFESSION": "Retail",
},
{
"IS_TENT": False,
"GENDER": "M",
"AGE": 29,
"MARITAL_STATUS": "Single",
"PROFESSION": "Retail",
},
{
"IS_TENT": False,
"GENDER": "M",
"AGE": 29,
"MARITAL_STATUS": "Single",
"PROFESSION": "Retail",
},
{
"IS_TENT": True,
"GENDER": "M",
"AGE": 43,
"MARITAL_STATUS": "Married",
"PROFESSION": "Trades",
},
{
"IS_TENT": False,
"GENDER": "M",
"AGE": 43,
"MARITAL_STATUS": "Married",
"PROFESSION": "Trades",
},
]
df = pd.DataFrame.from_records(data_records)
X = df.drop(["IS_TENT"], axis=1).values
y = df["IS_TENT"].values
from lale.lib.sklearn import GradientBoostingClassifier as Clf
from lale.lib.sklearn import OneHotEncoder as Enc
trainable = Enc() >> Clf()
_ = trainable.fit(X, y)
class TestWithScorer(unittest.TestCase):
def test_bare_array(self):
import sklearn.datasets
import sklearn.metrics
from numpy import ndarray
from lale.datasets.data_schemas import NDArrayWithSchema
X, y = sklearn.datasets.load_iris(return_X_y=True)
self.assertIsInstance(X, ndarray)
self.assertIsInstance(y, ndarray)
self.assertNotIsInstance(X, NDArrayWithSchema)
self.assertNotIsInstance(y, NDArrayWithSchema)
trainable = LogisticRegression()
trained = trainable.fit(X, y)
scorer = sklearn.metrics.make_scorer(sklearn.metrics.accuracy_score)
out = scorer(trained, X, y)
self.assertIsInstance(out, float)
self.assertNotIsInstance(out, NDArrayWithSchema)
class TestDisablingSchemaValidation(unittest.TestCase):
def setUp(self):
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
data = load_iris()
X, y = data.data, data.target
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(X, y)
def test_disable_schema_validation_individual_op(self):
existing_flag = disable_data_schema_validation
set_disable_data_schema_validation(True)
from lale import schemas
pca_input = schemas.Object(
X=schemas.AnyOf(
[
schemas.Array(schemas.Array(schemas.String())),
schemas.Array(schemas.String()),
]
)
)
custom_PCA = PCA.customize_schema(input_fit=pca_input)
pca_output = schemas.Object(
X=schemas.AnyOf(
[
schemas.Array(schemas.Array(schemas.String())),
schemas.Array(schemas.String()),
]
)
)
custom_PCA = custom_PCA.customize_schema(output_transform=pca_output)
abc = custom_PCA()
trained_pca = abc.fit(self.X_train)
trained_pca.transform(self.X_test)
set_disable_data_schema_validation(existing_flag)
def test_enable_schema_validation_individual_op(self):
with EnableSchemaValidation():
from lale import schemas
pca_input = schemas.Object(
X=schemas.AnyOf(
[
schemas.Array(schemas.Array(schemas.String())),
schemas.Array(schemas.String()),
]
)
)
custom_PCA = PCA.customize_schema(input_fit=pca_input)
pca_output = schemas.Object(
X=schemas.AnyOf(
[
schemas.Array(schemas.Array(schemas.String())),
schemas.Array(schemas.String()),
]
)
)
custom_PCA = custom_PCA.customize_schema(output_transform=pca_output)
abc = custom_PCA()
with self.assertRaises(ValueError):
trained_pca = abc.fit(self.X_train)
trained_pca.transform(self.X_test)
def test_disable_schema_validation_pipeline(self):
existing_flag = disable_data_schema_validation
set_disable_data_schema_validation(True)
from lale import schemas
lr_input = schemas.Object(
required=["X", "y"],
X=schemas.AnyOf(
[
schemas.Array(schemas.Array(schemas.String())),
schemas.Array(schemas.String()),
]
),
y=schemas.Array(schemas.String()),
)
custom_LR = LogisticRegression.customize_schema(input_fit=lr_input)
abc = custom_LR()
pipeline = PCA() >> abc
trained_pipeline = pipeline.fit(self.X_train, self.y_train)
trained_pipeline.predict(self.X_test)
set_disable_data_schema_validation(existing_flag)
def test_enable_schema_validation_pipeline(self):
with EnableSchemaValidation():
from lale import schemas
lr_input = schemas.Object(
required=["X", "y"],
X=schemas.AnyOf(
[
schemas.Array(schemas.Array(schemas.String())),
schemas.Array(schemas.String()),
]
),
y=schemas.Array(schemas.String()),
)
custom_LR = LogisticRegression.customize_schema(input_fit=lr_input)
abc = custom_LR()
pipeline = PCA() >> abc
with self.assertRaises(ValueError):
trained_pipeline = pipeline.fit(self.X_train, self.y_train)
trained_pipeline.predict(self.X_test)
def test_disable_enable_hyperparam_validation(self):
existing_flag = disable_hyperparams_schema_validation
set_disable_hyperparams_schema_validation(True)
PCA(n_components=True)
set_disable_hyperparams_schema_validation(False)
with self.assertRaises(jsonschema.ValidationError):
PCA(n_components=True)
set_disable_hyperparams_schema_validation(existing_flag)
| 58,526 | 36.783731 | 180 |
py
|
lale
|
lale-master/test/test_core_transformers.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from test import EnableSchemaValidation
from typing import Any
import jsonschema
import pandas as pd
import lale.lib.lale
import lale.lib.sklearn
import lale.type_checking
from lale.datasets import pandas2spark
from lale.datasets.data_schemas import add_table_name, get_table_name
from lale.datasets.util import spark_installed
from lale.lib.lale import ConcatFeatures
from lale.lib.sklearn import (
NMF,
PCA,
RFE,
FunctionTransformer,
LogisticRegression,
MissingIndicator,
Nystroem,
TfidfVectorizer,
)
class TestFeaturePreprocessing(unittest.TestCase):
def setUp(self):
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
data = load_iris()
X, y = data.data, data.target
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(X, y)
def create_function_test_feature_preprocessor(fproc_name):
def test_feature_preprocessor(self):
X_train, y_train = self.X_train, self.y_train
import importlib
module_name = ".".join(fproc_name.split(".")[0:-1])
class_name = fproc_name.split(".")[-1]
module = importlib.import_module(module_name)
class_ = getattr(module, class_name)
fproc = class_()
from lale.lib.sklearn.one_hot_encoder import OneHotEncoder
if isinstance(fproc, OneHotEncoder): # type: ignore
# fproc = OneHotEncoder(handle_unknown = 'ignore')
# remove the hack when this is fixed
fproc = PCA()
# test_schemas_are_schemas
lale.type_checking.validate_is_schema(fproc.input_schema_fit())
lale.type_checking.validate_is_schema(fproc.input_schema_transform())
lale.type_checking.validate_is_schema(fproc.output_schema_transform())
lale.type_checking.validate_is_schema(fproc.hyperparam_schema())
# test_init_fit_transform
trained = fproc.fit(self.X_train, self.y_train)
_ = trained.transform(self.X_test)
# test_predict_on_trainable
trained = fproc.fit(X_train, y_train)
fproc.transform(X_train)
# test_to_json
fproc.to_json()
# test_in_a_pipeline
# This test assumes that the output of feature processing is compatible with LogisticRegression
pipeline = fproc >> LogisticRegression()
trained = pipeline.fit(self.X_train, self.y_train)
_ = trained.predict(self.X_test)
# Tune the pipeline with LR using Hyperopt
from lale.lib.lale import Hyperopt
hyperopt = Hyperopt(estimator=pipeline, max_evals=1, verbose=True, cv=3)
trained = hyperopt.fit(self.X_train, self.y_train)
_ = trained.predict(self.X_test)
test_feature_preprocessor.__name__ = f"test_{fproc_name.split('.')[-1]}"
return test_feature_preprocessor
feature_preprocessors = [
"lale.lib.sklearn.PolynomialFeatures",
"lale.lib.sklearn.PCA",
"lale.lib.sklearn.Nystroem",
"lale.lib.sklearn.Normalizer",
"lale.lib.sklearn.MinMaxScaler",
"lale.lib.sklearn.OneHotEncoder",
"lale.lib.sklearn.SimpleImputer",
"lale.lib.sklearn.StandardScaler",
"lale.lib.sklearn.FeatureAgglomeration",
"lale.lib.sklearn.RobustScaler",
"lale.lib.sklearn.QuantileTransformer",
"lale.lib.sklearn.VarianceThreshold",
"lale.lib.sklearn.Isomap",
]
for fproc_to_test in feature_preprocessors:
setattr(
TestFeaturePreprocessing,
f"test_{fproc_to_test.rsplit('.', maxsplit=1)[-1]}",
create_function_test_feature_preprocessor(fproc_to_test),
)
class TestNMF(unittest.TestCase):
def test_init_fit_predict(self):
from lale.datasets import digits_df
nmf = NMF()
lr = LogisticRegression()
trainable = nmf >> lr
(train_X, train_y), (test_X, _test_y) = digits_df()
trained = trainable.fit(train_X, train_y)
_ = trained.predict(test_X)
def test_not_randome_state(self):
with EnableSchemaValidation():
with self.assertRaises(jsonschema.ValidationError):
_ = NMF(random_state='"not RandomState"')
class TestFunctionTransformer(unittest.TestCase):
def test_init_fit_predict(self):
import numpy as np
from lale.datasets import digits_df
ft = FunctionTransformer(func=np.log1p)
lr = LogisticRegression()
trainable = ft >> lr
(train_X, train_y), (test_X, _test_y) = digits_df()
trained = trainable.fit(train_X, train_y)
_ = trained.predict(test_X)
def test_not_callable(self):
with EnableSchemaValidation():
with self.assertRaises(jsonschema.ValidationError):
_ = FunctionTransformer(func='"not callable"')
class TestMissingIndicator(unittest.TestCase):
def test_init_fit_transform(self):
import numpy as np
X1 = np.array([[np.nan, 1, 3], [4, 0, np.nan], [8, 1, 0]])
X2 = np.array([[5, 1, np.nan], [np.nan, 2, 3], [2, 4, 0]])
trainable = MissingIndicator()
trained = trainable.fit(X1)
transformed = trained.transform(X2)
expected = np.array([[False, True], [True, False], [False, False]])
self.assertTrue((transformed == expected).all())
class TestRFE(unittest.TestCase):
def test_init_fit_predict(self):
import sklearn.datasets
import sklearn.svm
svm = lale.lib.sklearn.SVR(kernel="linear")
rfe = RFE(estimator=svm, n_features_to_select=2)
lr = LogisticRegression()
trainable = rfe >> lr
data = sklearn.datasets.load_iris()
X, y = data.data, data.target
trained = trainable.fit(X, y)
_ = trained.predict(X)
def test_init_fit_predict_sklearn(self):
import sklearn.datasets
import sklearn.svm
svm = sklearn.svm.SVR(kernel="linear")
rfe = RFE(estimator=svm, n_features_to_select=2)
lr = LogisticRegression()
trainable = rfe >> lr
data = sklearn.datasets.load_iris()
X, y = data.data, data.target
trained = trainable.fit(X, y)
_ = trained.predict(X)
def test_not_operator(self):
with EnableSchemaValidation():
with self.assertRaises(jsonschema.ValidationError):
_ = RFE(estimator='"not an operator"', n_features_to_select=2)
def test_attrib_sklearn(self):
import sklearn.datasets
import sklearn.svm
svm = sklearn.svm.SVR(kernel="linear")
rfe = RFE(estimator=svm, n_features_to_select=2)
lr = LogisticRegression()
trainable = rfe >> lr
data = sklearn.datasets.load_iris()
X, y = data.data, data.target
trained = trainable.fit(X, y)
_ = trained.predict(X)
from lale.lib.lale import Hyperopt
opt = Hyperopt(estimator=trainable, max_evals=2, verbose=True)
opt.fit(X, y)
def test_attrib(self):
import sklearn.datasets
svm = lale.lib.sklearn.SVR(kernel="linear")
rfe = RFE(estimator=svm, n_features_to_select=2)
lr = LogisticRegression()
trainable = rfe >> lr
data = sklearn.datasets.load_iris()
X, y = data.data, data.target
trained = trainable.fit(X, y)
_ = trained.predict(X)
from lale.lib.lale import Hyperopt
opt = Hyperopt(estimator=trainable, max_evals=2, verbose=True)
opt.fit(X, y)
class TestOrdinalEncoder(unittest.TestCase):
def setUp(self):
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
data = load_iris()
X, y = data.data, data.target
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(X, y)
def test_with_hyperopt(self):
from lale.lib.sklearn import OrdinalEncoder
fproc = OrdinalEncoder(handle_unknown="ignore")
pipeline = fproc >> LogisticRegression()
# Tune the pipeline with LR using Hyperopt
from lale.lib.lale import Hyperopt
hyperopt = Hyperopt(estimator=pipeline, max_evals=1)
trained = hyperopt.fit(self.X_train, self.y_train)
_ = trained.predict(self.X_test)
def test_inverse_transform(self):
from lale.lib.sklearn import OneHotEncoder, OrdinalEncoder
fproc_ohe = OneHotEncoder(handle_unknown="ignore")
# test_init_fit_transform
trained_ohe = fproc_ohe.fit(self.X_train, self.y_train)
transformed_X = trained_ohe.transform(self.X_test)
orig_X_ohe = trained_ohe._impl._wrapped_model.inverse_transform(transformed_X)
fproc_oe = OrdinalEncoder(handle_unknown="ignore")
# test_init_fit_transform
trained_oe = fproc_oe.fit(self.X_train, self.y_train)
transformed_X = trained_oe.transform(self.X_test)
orig_X_oe = trained_oe._impl.inverse_transform(transformed_X)
self.assertEqual(orig_X_ohe.all(), orig_X_oe.all())
def test_handle_unknown_error(self):
from lale.lib.sklearn import OrdinalEncoder
fproc_oe = OrdinalEncoder(handle_unknown="error")
# test_init_fit_transform
trained_oe = fproc_oe.fit(self.X_train, self.y_train)
with self.assertRaises(
ValueError
): # This is repying on the train_test_split, so may fail randomly
_ = trained_oe.transform(self.X_test)
def test_encode_unknown_with(self):
from lale.lib.sklearn import OrdinalEncoder
fproc_oe = OrdinalEncoder(handle_unknown="ignore", encode_unknown_with=1000)
# test_init_fit_transform
trained_oe = fproc_oe.fit(self.X_train, self.y_train)
transformed_X = trained_oe.transform(self.X_test)
# This is repying on the train_test_split, so may fail randomly
self.assertTrue(1000 in transformed_X)
# Testing that inverse_transform works even for encode_unknown_with=1000
_ = trained_oe._impl.inverse_transform(transformed_X)
class TestConcatFeatures(unittest.TestCase):
def test_hyperparam_defaults(self):
_ = ConcatFeatures()
def test_init_fit_predict(self):
trainable_cf = ConcatFeatures()
A = [[11, 12, 13], [21, 22, 23], [31, 32, 33]]
B = [[14, 15], [24, 25], [34, 35]]
trained_cf = trainable_cf.fit(X=[A, B])
transformed: Any = trained_cf.transform([A, B])
expected = [[11, 12, 13, 14, 15], [21, 22, 23, 24, 25], [31, 32, 33, 34, 35]]
for transformed_sample, expected_sample in zip(transformed, expected):
for transformed_feature, expected_feature in zip(
transformed_sample, expected_sample
):
self.assertEqual(transformed_feature, expected_feature)
def test_init_fit_predict_pandas(self):
trainable_cf = ConcatFeatures()
A = [[11, 12, 13], [21, 22, 23], [31, 32, 33]]
B = [[14, 15], [24, 25], [34, 35]]
A = pd.DataFrame(A, columns=["a", "b", "c"]).rename_axis(index="idx")
B = pd.DataFrame(B, columns=["d", "e"]).rename_axis(index="idx")
A = add_table_name(A, "A")
B = add_table_name(B, "B")
trained_cf = trainable_cf.fit(X=[A, B])
transformed = trained_cf.transform([A, B])
self.assertEqual(transformed.index.name, "idx")
expected = [
[11, 12, 13, 14, 15],
[21, 22, 23, 24, 25],
[31, 32, 33, 34, 35],
]
expected = pd.DataFrame(expected, columns=["a", "b", "c", "d", "e"])
for c in expected.columns:
self.assertEqual(list(transformed[c]), list(expected[c]))
def test_init_fit_predict_pandas_series(self):
trainable_cf = ConcatFeatures()
A = [[11, 12, 13], [21, 22, 23], [31, 32, 33]]
B = [14, 24, 34]
A = pd.DataFrame(A, columns=["a", "b", "c"])
B = pd.Series(B, name="d")
A = add_table_name(A, "A")
B = add_table_name(B, "B")
trained_cf = trainable_cf.fit(X=[A, B])
transformed = trained_cf.transform([A, B])
expected = [
[11, 12, 13, 14],
[21, 22, 23, 24],
[31, 32, 33, 34],
]
expected = pd.DataFrame(expected, columns=["a", "b", "c", "d"])
for c in expected.columns:
self.assertEqual(list(transformed[c]), list(expected[c]))
def test_init_fit_predict_spark(self):
if spark_installed:
trainable_cf = ConcatFeatures()
A = [[11, 12, 13], [21, 22, 23], [31, 32, 33]]
B = [[14, 15], [24, 25], [34, 35]]
A = pd.DataFrame(A, columns=["a", "b", "c"])
B = pd.DataFrame(B, columns=["d", "e"])
A = pandas2spark(A.rename_axis(index="idx"))
B = pandas2spark(B.rename_axis(index="idx"))
A = add_table_name(A, "A")
B = add_table_name(B, "B")
trained_cf = trainable_cf.fit(X=[A, B])
transformed = trained_cf.transform([A, B]).toPandas()
self.assertEqual(transformed.index.name, "idx")
expected = [
[11, 12, 13, 14, 15],
[21, 22, 23, 24, 25],
[31, 32, 33, 34, 35],
]
expected = pd.DataFrame(expected, columns=["a", "b", "c", "d", "e"])
for c in expected.columns:
self.assertEqual(list(transformed[c]), list(expected[c]))
def test_init_fit_predict_spark_pandas(self):
if spark_installed:
trainable_cf = ConcatFeatures()
A = [[11, 12, 13], [21, 22, 23], [31, 32, 33]]
B = [[14, 15], [24, 25], [34, 35]]
A = pd.DataFrame(A, columns=["a", "b", "c"])
B = pd.DataFrame(B, columns=["d", "e"])
A = pandas2spark(A)
A = add_table_name(A, "A")
B = add_table_name(B, "B")
trained_cf = trainable_cf.fit(X=[A, B])
transformed = trained_cf.transform([A, B])
expected = [
[11, 12, 13, 14, 15],
[21, 22, 23, 24, 25],
[31, 32, 33, 34, 35],
]
expected = pd.DataFrame(expected, columns=["a", "b", "c", "d", "e"])
for c in expected.columns:
self.assertEqual(list(transformed[c]), list(expected[c]))
def test_init_fit_predict_spark_no_table_name(self):
if spark_installed:
trainable_cf = ConcatFeatures()
A = [[11, 12, 13], [21, 22, 23], [31, 32, 33]]
B = [[14, 15], [24, 25], [34, 35]]
A = pd.DataFrame(A, columns=["a", "b", "c"])
B = pd.DataFrame(B, columns=["d", "e"])
A = pandas2spark(A)
B = pandas2spark(B)
trained_cf = trainable_cf.fit(X=[A, B])
transformed = trained_cf.transform([A, B]).toPandas()
expected = [
[11, 12, 13, 14, 15],
[21, 22, 23, 24, 25],
[31, 32, 33, 34, 35],
]
expected = pd.DataFrame(expected, columns=["a", "b", "c", "d", "e"])
for c in expected.columns:
self.assertEqual(list(transformed[c]), list(expected[c]))
def test_comparison_with_scikit(self):
import warnings
warnings.filterwarnings("ignore")
import sklearn.datasets
import sklearn.utils
from lale.helpers import cross_val_score as lale_cross_val_score
pca = PCA(n_components=3, random_state=42, svd_solver="arpack")
nys = Nystroem(n_components=10, random_state=42)
concat = ConcatFeatures()
lr = LogisticRegression(random_state=42, C=0.1, solver="saga")
trainable = (pca & nys) >> concat >> lr
digits = sklearn.datasets.load_digits()
X, y = sklearn.utils.shuffle(digits.data, digits.target, random_state=42)
cv_results = lale_cross_val_score(trainable, X, y)
cv_results = [f"{score:.1%}" for score in cv_results]
from sklearn.decomposition import PCA as SklearnPCA
from sklearn.kernel_approximation import Nystroem as SklearnNystroem
from sklearn.linear_model import LogisticRegression as SklearnLR
from sklearn.model_selection import cross_val_score as sklearn_cross_val_score
from sklearn.pipeline import FeatureUnion, make_pipeline
union = FeatureUnion(
[
(
"pca",
SklearnPCA(n_components=3, random_state=42, svd_solver="arpack"),
),
("nys", SklearnNystroem(n_components=10, random_state=42)),
]
)
lr = SklearnLR(random_state=42, C=0.1, solver="saga")
pipeline = make_pipeline(union, lr)
scikit_cv_results = sklearn_cross_val_score(pipeline, X, y, cv=5)
scikit_cv_results = [f"{score:.1%}" for score in scikit_cv_results]
self.assertEqual(cv_results, scikit_cv_results)
warnings.resetwarnings()
def test_with_pandas(self):
import warnings
from lale.datasets import load_iris_df
warnings.filterwarnings("ignore")
pca = PCA(n_components=3)
nys = Nystroem(n_components=10)
concat = ConcatFeatures()
lr = LogisticRegression(random_state=42, C=0.1)
trainable = (pca & nys) >> concat >> lr
(X_train, y_train), (X_test, _y_test) = load_iris_df()
trained = trainable.fit(X_train, y_train)
_ = trained.predict(X_test)
def test_concat_with_hyperopt(self):
from lale.lib.lale import Hyperopt
pca = PCA(n_components=3)
nys = Nystroem(n_components=10)
concat = ConcatFeatures()
lr = LogisticRegression(random_state=42, C=0.1)
trainable = (pca & nys) >> concat >> lr
clf = Hyperopt(estimator=trainable, max_evals=2)
from sklearn.datasets import load_iris
iris_data = load_iris()
clf.fit(iris_data.data, iris_data.target)
clf.predict(iris_data.data)
def test_concat_with_hyperopt2(self):
from lale.lib.lale import Hyperopt
from lale.operators import make_pipeline, make_union
pca = PCA(n_components=3)
nys = Nystroem(n_components=10)
lr = LogisticRegression(random_state=42, C=0.1)
trainable = make_pipeline(make_union(pca, nys), lr)
clf = Hyperopt(estimator=trainable, max_evals=2)
from sklearn.datasets import load_iris
iris_data = load_iris()
clf.fit(iris_data.data, iris_data.target)
clf.predict(iris_data.data)
def test_name(self):
trainable_cf = ConcatFeatures()
A = [[11, 12, 13], [21, 22, 23], [31, 32, 33]]
B = [[14, 15], [24, 25], [34, 35]]
A = pd.DataFrame(A, columns=["a", "b", "c"])
B = pd.DataFrame(B, columns=["d", "e"])
A = add_table_name(A, "A")
B = add_table_name(B, "B")
trained_cf = trainable_cf.fit(X=[A, B])
transformed = trained_cf.transform([A, B])
self.assertEqual(get_table_name(transformed), None)
A = add_table_name(A, "AB")
B = add_table_name(B, "AB")
trained_cf = trainable_cf.fit(X=[A, B])
transformed = trained_cf.transform([A, B])
self.assertEqual(get_table_name(transformed), "AB")
class TestTfidfVectorizer(unittest.TestCase):
def test_more_hyperparam_values(self):
with EnableSchemaValidation():
with self.assertRaises(jsonschema.ValidationError):
_ = TfidfVectorizer(
max_df=2.5, min_df=2, max_features=1000, stop_words="english"
)
with self.assertRaises(jsonschema.ValidationError):
_ = TfidfVectorizer(
max_df=2,
min_df=2,
max_features=1000,
stop_words=["I", "we", "not", "this", "that"],
analyzer="char",
)
def test_non_null_tokenizer(self):
# tokenize the doc and lemmatize its tokens
def my_tokenizer():
return "abc"
with EnableSchemaValidation():
with self.assertRaises(jsonschema.ValidationError):
_ = TfidfVectorizer(
max_df=2,
min_df=2,
max_features=1000,
stop_words="english",
tokenizer=my_tokenizer,
analyzer="char",
)
| 21,073 | 36.036907 | 103 |
py
|
lale
|
lale-master/test/test_core_misc.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Test cases for miscellaneous functionality of Lale that is also part of the
# core behavior but does not fall into other test_core* modules.
# pylint:disable=reimported
import inspect
import io
import logging
import unittest
import warnings
from typing import Any, Dict
import numpy as np
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA as SkPCA
import lale.datasets
import lale.operators as Ops
import lale.type_checking
# from lale.helpers import get_sklearn_estimator_name
from lale.helpers import nest_HPparams
from lale.lib.lale import ConcatFeatures, Hyperopt, NoOp
from lale.lib.rasl import categorical
from lale.lib.sklearn import (
NMF,
PCA,
KNeighborsClassifier,
LogisticRegression,
MLPClassifier,
Nystroem,
OneHotEncoder,
RandomForestClassifier,
)
class TestTags(unittest.TestCase):
def test_estimators(self):
ops = Ops.get_available_estimators()
ops_names = [op.name() for op in ops]
self.assertIn("LogisticRegression", ops_names)
self.assertIn("MLPClassifier", ops_names)
self.assertNotIn("PCA", ops_names)
def test_interpretable_estimators(self):
ops = Ops.get_available_estimators({"interpretable"})
ops_names = [op.name() for op in ops]
self.assertIn("KNeighborsClassifier", ops_names)
self.assertNotIn("MLPClassifier", ops_names)
self.assertNotIn("PCA", ops_names)
def test_transformers(self):
ops = Ops.get_available_transformers()
ops_names = [op.name() for op in ops]
self.assertIn("PCA", ops_names)
self.assertNotIn("LogisticRegression", ops_names)
self.assertNotIn("MLPClassifier", ops_names)
class TestUnparseExpr(unittest.TestCase):
def test_unparse_const38(self):
from lale.expressions import fixedUnparse, it
test_expr = it.hello["hi"]
# This fails on 3.8 with some versions of the library
# which is why we use the fixed version
# import astunparse
# astunparse.unparse(he._expr)
str(fixedUnparse(test_expr._expr))
class TestOperatorWithoutSchema(unittest.TestCase):
def test_trainable_pipe_left(self):
iris = load_iris()
pipeline = SkPCA() >> LogisticRegression(random_state=42)
pipeline.fit(iris.data, iris.target)
def test_trainable_pipe_right(self):
iris = load_iris()
pipeline = NoOp() >> SkPCA() >> LogisticRegression(random_state=42)
pipeline.fit(iris.data, iris.target)
def dont_test_planned_pipe_left(self):
iris = load_iris()
pipeline = NoOp() >> SkPCA >> LogisticRegression
clf = Hyperopt(estimator=pipeline, max_evals=1)
clf.fit(iris.data, iris.target)
def dont_test_planned_pipe_right(self):
iris = load_iris()
pipeline = SkPCA >> LogisticRegression
clf = Hyperopt(estimator=pipeline, max_evals=1)
clf.fit(iris.data, iris.target)
class _TestLazyImpl(unittest.TestCase):
def test_lazy_impl(self):
impl = Hyperopt._impl
self.assertTrue(inspect.isclass(impl))
class TestOperatorErrors(unittest.TestCase):
def test_trainable_get_pipeline_fail(self):
try:
_ = LogisticRegression().get_pipeline
self.fail("get_pipeline did not fail")
except AttributeError as e:
msg: str = str(e)
self.assertRegex(msg, "is not trained.")
self.assertRegex(msg, "the result of fit is a new trained")
def test_trained_get_pipeline_fail(self):
try:
_ = NoOp().get_pipeline
self.fail("get_pipeline did not fail")
except AttributeError as e:
msg: str = str(e)
self.assertRegex(msg, "underlying operator")
def test_trained_get_pipeline_success(self):
iris_data = load_iris()
op = Hyperopt(estimator=LogisticRegression(), max_evals=1)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
op2 = op.fit(iris_data.data[10:], iris_data.target[10:])
_ = op2.get_pipeline
def test_trainable_summary_fail(self):
try:
_ = LogisticRegression().summary
self.fail("summary did not fail")
except AttributeError as e:
msg: str = str(e)
self.assertRegex(msg, "is not trained.")
self.assertRegex(msg, "the result of fit is a new trained")
def test_trained_summary_fail(self):
try:
_ = NoOp().summary
self.fail("summary did not fail")
except AttributeError as e:
msg: str = str(e)
self.assertRegex(msg, "underlying operator")
def test_trained_summary_success(self):
iris_data = load_iris()
op = Hyperopt(
estimator=LogisticRegression(), max_evals=1, show_progressbar=False
)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
op2 = op.fit(iris_data.data[10:], iris_data.target[10:])
_ = op2.summary
class TestLaleVersion(unittest.TestCase):
def test_version_exists(self):
from lale import __version__ as lale_version
self.assertIsNot(lale_version, None)
class TestMethodParameters(unittest.TestCase):
def test_fit_predict_params_individual(self):
from test.mock_custom_operators import CustomParamsCheckerOp
trainable = CustomParamsCheckerOp()
trained = trainable.fit([[3, 4], [5, 6]], fit_version=5)
_ = trained.predict([3, 4], predict_version=6)
self.assertEqual(trained.impl._fit_params.get("fit_version", None), 5)
self.assertEqual(trained.impl._predict_params.get("predict_version", None), 6)
def test_predict_params_pipeline(self):
from test.mock_custom_operators import CustomParamsCheckerOp
trainable = CustomParamsCheckerOp() >> CustomParamsCheckerOp()
trained = trainable.fit([[3, 4], [5, 6]], y=[3], fit_version=5)
_ = trained.predict([3, 4], predict_version=6)
self.assertEqual(
trained.steps_list()[1].impl._predict_params.get("predict_version", None), 6
)
# self.assertEqual(trained.steps()[1].impl._fit_params.get("fit_version", None), 5)
class TestWrappedImpl(unittest.TestCase):
def test_impl(self):
import sklearn.preprocessing._encoders as skohe
ohe = OneHotEncoder()
self.assertIsInstance(ohe.impl, skohe.OneHotEncoder)
def test_shallow_impl(self):
import lale.lib.sklearn.one_hot_encoder as lohe
ohe = OneHotEncoder()
self.assertIsInstance(ohe.shallow_impl, lohe._OneHotEncoderImpl)
class TestOperatorLogging(unittest.TestCase):
def setUp(self):
self.old_level = Ops.logger.level
Ops.logger.setLevel(logging.INFO)
self.stream = io.StringIO()
self.handler = logging.StreamHandler(self.stream)
Ops.logger.addHandler(self.handler)
@unittest.skip("Turned off the logging for now")
def test_log_fit_predict(self):
trainable = LogisticRegression()
(X_train, y_train), (X_test, _y_test) = lale.datasets.load_iris_df()
trained = trainable.fit(X_train, y_train)
_ = trained.predict(X_test)
self.handler.flush()
s1, s2, s3, s4 = self.stream.getvalue().strip().split("\n")
self.assertTrue(s1.endswith("enter fit LogisticRegression"))
self.assertTrue(s2.endswith("exit fit LogisticRegression"))
self.assertTrue(s3.endswith("enter predict LogisticRegression"))
self.assertTrue(s4.endswith("exit predict LogisticRegression"))
def tearDown(self):
Ops.logger.removeHandler(self.handler)
Ops.logger.setLevel(self.old_level)
self.handler.close()
class TestBoth(unittest.TestCase):
def test_init_fit_transform(self):
from lale.lib.lale import Both
nmf = NMF()
pca = PCA()
trainable = Both(op1=nmf, op2=pca)
(train_X, train_y), (test_X, _test_y) = lale.datasets.digits_df()
trained = trainable.fit(train_X, train_y)
_ = trained.transform(test_X)
class TestTee(unittest.TestCase):
def test_tee_None(self):
from lale.lib.lale import Tee
pca = PCA()
trainable = Tee() >> pca
(train_X, train_y), (test_X, _test_y) = lale.datasets.digits_df()
trained = trainable.fit(train_X, train_y)
_ = trained.transform(test_X)
def test_tee_lambda(self):
from lale.lib.lale import Tee
def check_data(X, y):
self.assertEqual(X.dtypes["x1"], np.float64)
pca = PCA()
trainable = Tee(listener=check_data) >> pca
(train_X, train_y), (test_X, _test_y) = lale.datasets.digits_df()
trained = trainable.fit(train_X, train_y)
_ = trained.transform(test_X)
def test_tee_def(self):
from lale.lib.lale import Tee
def check_data(X, y):
self.assertEqual(X.dtypes["x1"], np.float64)
pca = PCA()
trainable = Tee(listener=check_data) >> pca
(train_X, train_y), (test_X, _test_y) = lale.datasets.digits_df()
trained = trainable.fit(train_X, train_y)
_ = trained.transform(test_X)
def test_tee_obj(self):
from lale.lib.lale import Tee
class check_data:
def __init__(self, outerSelf):
self._outerSelf = outerSelf
def __call__(self, X, y):
self._outerSelf.assertEqual(X.dtypes["x1"], np.float64)
pca = PCA()
trainable = Tee(listener=check_data(self)) >> pca
(train_X, train_y), (test_X, _test_y) = lale.datasets.digits_df()
trained = trainable.fit(train_X, train_y)
_ = trained.transform(test_X)
class TestClone(unittest.TestCase):
def test_clone_with_scikit1(self):
lr = LogisticRegression()
lr.get_params()
from sklearn.base import clone
lr_clone = clone(lr)
self.assertNotEqual(lr, lr_clone)
self.assertNotEqual(lr._impl, lr_clone._impl)
iris = load_iris()
trained_lr = lr.fit(iris.data, iris.target)
_ = trained_lr.predict(iris.data)
cloned_trained_lr = clone(trained_lr)
self.assertNotEqual(trained_lr._impl, cloned_trained_lr._impl)
# Testing clone with pipelines having OperatorChoice
def test_clone_operator_pipeline(self):
from sklearn.base import clone
from sklearn.metrics import accuracy_score, make_scorer
from sklearn.model_selection import cross_val_score
iris = load_iris()
X, y = iris.data, iris.target
lr = LogisticRegression()
trainable = PCA() >> lr
trainable_wrapper = trainable
trainable2 = clone(trainable_wrapper)
_ = clone(trainable)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
result = cross_val_score(
trainable_wrapper, X, y, scoring=make_scorer(accuracy_score), cv=2
)
result2 = cross_val_score(
trainable2, X, y, scoring=make_scorer(accuracy_score), cv=2
)
for res1, res2 in zip(result, result2):
self.assertEqual(res1, res2)
def test_clone_operator_choice(self):
from sklearn.base import clone
lr = LogisticRegression()
trainable = (PCA() | NoOp) >> lr
trainable_wrapper = trainable
_ = clone(trainable_wrapper)
_ = clone(trainable)
def test_clone_with_scikit2(self):
lr = LogisticRegression()
from sklearn.metrics import accuracy_score, make_scorer
from sklearn.model_selection import cross_val_score
pca = PCA()
trainable = pca >> lr
from sklearn.base import clone
iris = load_iris()
X, y = iris.data, iris.target
trainable2 = clone(trainable)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
result = cross_val_score(
trainable, X, y, scoring=make_scorer(accuracy_score), cv=2
)
result2 = cross_val_score(
trainable2, X, y, scoring=make_scorer(accuracy_score), cv=2
)
for res1, res2 in zip(result, result2):
self.assertEqual(res1, res2)
# Testing clone with nested linear pipelines
trainable = PCA() >> trainable
trainable2 = clone(trainable)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
result = cross_val_score(
trainable, X, y, scoring=make_scorer(accuracy_score), cv=2
)
result2 = cross_val_score(
trainable2, X, y, scoring=make_scorer(accuracy_score), cv=2
)
for res1, res2 in zip(result, result2):
self.assertEqual(res1, res2)
def test_clone_of_trained(self):
from sklearn.base import clone
lr = LogisticRegression()
iris = load_iris()
X, y = iris.data, iris.target
trained = lr.fit(X, y)
_ = clone(trained)
def test_with_voting_classifier1(self):
lr = LogisticRegression()
knn = KNeighborsClassifier()
from sklearn.ensemble import VotingClassifier
vclf = VotingClassifier(estimators=[("lr", lr), ("knn", knn)])
iris = load_iris()
X, y = iris.data, iris.target
vclf.fit(X, y)
def test_with_voting_classifier2(self):
lr = LogisticRegression()
pca = PCA()
trainable = pca >> lr
from sklearn.ensemble import VotingClassifier
vclf = VotingClassifier(estimators=[("lr", lr), ("pipe", trainable)])
iris = load_iris()
X, y = iris.data, iris.target
vclf.fit(X, y)
def test_fit_clones_impl(self):
lr_trainable = LogisticRegression()
iris = load_iris()
X, y = iris.data, iris.target
lr_trained = lr_trainable.fit(X, y)
self.assertIsNot(lr_trainable._impl, lr_trained._impl)
class TestGetParams(unittest.TestCase):
@classmethod
def remove_lale_params(cls, params: Dict[str, Any]) -> Dict[str, Any]:
return {k: v for (k, v) in params.items() if not k.startswith("_lale_")}
def test_shallow_planned_individual_operator(self):
op: Ops.PlannedIndividualOp = LogisticRegression
params = op.get_params(deep=False)
filtered_params = self.remove_lale_params(params)
self.assertIn("_lale_schemas", params)
expected = LogisticRegression.get_defaults()
self.assertEqual(filtered_params, expected)
def test_shallow0_planned_individual_operator(self):
op: Ops.PlannedIndividualOp = LogisticRegression
params = op.get_params(deep=0)
self.assertNotIn("_lale_schemas", params)
expected = LogisticRegression.get_defaults()
self.assertEqual(params, expected)
def test_deep_planned_individual_operator(self):
op: Ops.PlannedIndividualOp = LogisticRegression
params = op.get_params(deep=True)
filtered_params = self.remove_lale_params(params)
expected = LogisticRegression.get_defaults()
self.assertEqual(filtered_params, expected)
def test_shallow_trainable_individual_operator_defaults(self):
op: Ops.TrainableIndividualOp = LogisticRegression()
params = op.get_params(deep=False)
filtered_params = self.remove_lale_params(params)
expected = LogisticRegression.get_defaults()
self.assertEqual(filtered_params, expected)
def test_shallow_trainable_individual_operator_configured(self):
op: Ops.TrainableIndividualOp = LogisticRegression(
LogisticRegression.enum.solver.saga
)
params = op.get_params(deep=False)
filtered_params = self.remove_lale_params(params)
expected = dict(LogisticRegression.get_defaults())
expected["solver"] = "saga"
self.assertEqual(filtered_params, expected)
def test_shallow_trained_individual_operator_defaults(self):
op1: Ops.TrainableIndividualOp = LogisticRegression()
iris = load_iris()
op: Ops.TrainedIndividualOp = op1.fit(iris.data, iris.target)
params = op.get_params(deep=False)
filtered_params = self.remove_lale_params(params)
expected = LogisticRegression.get_defaults()
self.assertEqual(filtered_params, expected)
def test_shallow_trained_individual_operator_configured(self):
op1: Ops.TrainableIndividualOp = LogisticRegression(
LogisticRegression.enum.solver.saga
)
iris = load_iris()
op: Ops.TrainedIndividualOp = op1.fit(iris.data, iris.target)
params = op.get_params(deep=False)
filtered_params = self.remove_lale_params(params)
expected = dict(LogisticRegression.get_defaults())
expected["solver"] = "saga"
self.assertEqual(filtered_params, expected)
def test_shallow_planned_pipeline(self):
op: Ops.PlannedPipeline = PCA >> LogisticRegression
params = op.get_params(deep=False)
assert "steps" in params
assert "_lale_preds" in params
pca = params["steps"][0]
lr = params["steps"][1]
assert isinstance(pca, Ops.PlannedIndividualOp)
assert isinstance(lr, Ops.PlannedIndividualOp)
lr_params = lr.get_params()
lr_filtered_params = self.remove_lale_params(lr_params)
lr_expected = LogisticRegression.get_defaults()
self.assertEqual(lr_filtered_params, lr_expected)
def test_shallow_planned_pipeline_with_trainable_default(self):
op: Ops.PlannedPipeline = PCA >> LogisticRegression()
params = op.get_params(deep=False)
assert "steps" in params
assert "_lale_preds" in params
pca = params["steps"][0]
lr = params["steps"][1]
assert isinstance(pca, Ops.PlannedIndividualOp)
assert isinstance(lr, Ops.TrainableIndividualOp)
lr_params = lr.get_params()
lr_filtered_params = self.remove_lale_params(lr_params)
lr_expected = LogisticRegression.get_defaults()
self.assertEqual(lr_filtered_params, lr_expected)
def test_shallow_planned_pipeline_with_trainable_configured(self):
op: Ops.PlannedPipeline = PCA >> LogisticRegression(
LogisticRegression.enum.solver.saga
)
params = op.get_params(deep=False)
assert "steps" in params
assert "_lale_preds" in params
pca = params["steps"][0]
lr = params["steps"][1]
assert isinstance(pca, Ops.PlannedIndividualOp)
assert isinstance(lr, Ops.TrainableIndividualOp)
lr_params = lr.get_params()
lr_filtered_params = self.remove_lale_params(lr_params)
lr_expected = dict(LogisticRegression.get_defaults())
lr_expected["solver"] = "saga"
self.assertEqual(lr_filtered_params, lr_expected)
def test_shallow_trainable_pipeline_default(self):
op: Ops.TrainablePipeline = PCA() >> LogisticRegression()
params = op.get_params(deep=False)
assert "steps" in params
assert "_lale_preds" in params
pca = params["steps"][0]
lr = params["steps"][1]
assert isinstance(pca, Ops.TrainableIndividualOp)
assert isinstance(lr, Ops.TrainableIndividualOp)
lr_params = lr.get_params()
lr_filtered_params = self.remove_lale_params(lr_params)
lr_expected = LogisticRegression.get_defaults()
self.assertEqual(lr_filtered_params, lr_expected)
def test_shallow_trainable_pipeline_configured(self):
op: Ops.TrainablePipeline = PCA() >> LogisticRegression(
LogisticRegression.enum.solver.saga
)
params = op.get_params(deep=False)
assert "steps" in params
assert "_lale_preds" in params
pca = params["steps"][0]
lr = params["steps"][1]
assert isinstance(pca, Ops.TrainableIndividualOp)
assert isinstance(lr, Ops.TrainableIndividualOp)
lr_params = lr.get_params()
lr_filtered_params = self.remove_lale_params(lr_params)
lr_expected = dict(LogisticRegression.get_defaults())
lr_expected["solver"] = "saga"
self.assertEqual(lr_filtered_params, lr_expected)
def test_shallow0_trainable_pipeline_configured(self):
op: Ops.TrainablePipeline = PCA() >> LogisticRegression(
LogisticRegression.enum.solver.saga
)
params = op.get_params(deep=0)
assert "steps" in params
assert "_lale_preds" not in params
pca = params["steps"][0]
lr = params["steps"][1]
assert isinstance(pca, Ops.TrainableIndividualOp)
assert isinstance(lr, Ops.TrainableIndividualOp)
lr_params = lr.get_params()
lr_expected = dict(LogisticRegression.get_defaults())
lr_expected["solver"] = "saga"
self.assertEqual(lr_params, lr_expected)
def test_shallow_planned_nested_indiv_operator(self):
from lale.lib.sklearn import BaggingClassifier, DecisionTreeClassifier
clf = BaggingClassifier(base_estimator=DecisionTreeClassifier())
params = clf.get_params(deep=False)
filtered_params = self.remove_lale_params(params)
assert filtered_params["bootstrap"]
def test_shallow_planned_nested_list_indiv_operator(self):
from lale.lib.sklearn import DecisionTreeClassifier, VotingClassifier
clf = VotingClassifier(estimators=[("dtc", DecisionTreeClassifier())])
params = clf.get_params(deep=False)
filtered_params = self.remove_lale_params(params)
assert filtered_params["voting"] == "hard"
def test_deep_planned_pipeline(self):
op: Ops.PlannedPipeline = PCA >> LogisticRegression
params = op.get_params(deep=True)
assert "steps" in params
assert "_lale_preds" not in params
pca = params["steps"][0]
lr = params["steps"][1]
assert isinstance(pca, Ops.PlannedIndividualOp)
assert isinstance(lr, Ops.PlannedIndividualOp)
assert "LogisticRegression__fit_intercept" in params
lr_params = lr.get_params()
lr_filtered_params = self.remove_lale_params(lr_params)
lr_expected = LogisticRegression.get_defaults()
self.assertEqual(lr_filtered_params, lr_expected)
def test_deep_planned_choice(self):
op: Ops.PlannedPipeline = (PCA | NoOp) >> LogisticRegression
params = op.get_params(deep=True)
assert "steps" in params
choice = params["steps"][0]
assert isinstance(choice, Ops.OperatorChoice)
choice_name = choice.name()
self.assertTrue(params[choice_name + "__PCA__copy"])
def test_deep_planned_nested_indiv_operator(self):
from lale.lib.sklearn import BaggingClassifier, DecisionTreeClassifier
est_name = "base_estimator"
dtc = DecisionTreeClassifier()
clf = BaggingClassifier(base_estimator=dtc)
params = clf.get_params(deep=True)
filtered_params = self.remove_lale_params(params)
# expected = LogisticRegression.get_defaults()
base = filtered_params[est_name]
base_params = self.remove_lale_params(base.get_params(deep=True))
nested_base_params = nest_HPparams(est_name, base_params)
self.assertDictEqual(
{
k: v
for k, v in filtered_params.items()
if k.startswith(f"{est_name}__")
and not k.startswith(f"{est_name}___lale")
},
nested_base_params,
)
def test_deep_grammar(self):
from lale.grammar import Grammar
from lale.lib.sklearn import BaggingClassifier, DecisionTreeClassifier
from lale.lib.sklearn import KNeighborsClassifier as KNN
from lale.lib.sklearn import LogisticRegression as LR
from lale.lib.sklearn import StandardScaler as Scaler
dtc = DecisionTreeClassifier()
clf = BaggingClassifier(base_estimator=dtc)
params = clf.get_params(deep=True)
filtered_params = self.remove_lale_params(params)
g = Grammar()
g.start = g.estimator
g.estimator = (NoOp | g.transformer) >> g.prim_est
g.transformer = (NoOp | g.transformer) >> g.prim_tfm
g.prim_est = LR | KNN
g.prim_tfm = PCA | Scaler
params = g.get_params(deep=True)
filtered_params = self.remove_lale_params(params)
assert filtered_params["start__name"] == "estimator"
assert filtered_params["prim_est__LogisticRegression__penalty"] == "l2"
# TODO: design question.
# def test_deep_planned_nested_list_indiv_operator(self):
# from lale.lib.sklearn import VotingClassifier, DecisionTreeClassifier
#
# clf = VotingClassifier(estimators=[("dtc", DecisionTreeClassifier())])
# params = clf.get_params(deep=True)
# filtered_params = self.remove_lale_params(params)
#
# # expected = LogisticRegression.get_defaults()
# base = filtered_params['base_estimator']
# base_params = self.remove_lale_params(base.get_params(deep=True))
# nested_base_params = nest_HPparams('base_esimator', base_params)
#
# self.assertLess(nested_base_params, filtered_params)
class TestWithParams(unittest.TestCase):
@classmethod
def remove_lale_params(cls, params: Dict[str, Any]) -> Dict[str, Any]:
return {k: v for (k, v) in params.items() if not k.startswith("_lale_")}
def test_shallow_copied_trainable_individual_operator(self):
from lale.lib.lightgbm import LGBMClassifier as LGBM
op: Ops.PlannedIndividualOp = LGBM()
op2 = op.clone()
new_param_dict = {"learning_rate": 0.8}
op3 = op2.with_params(**new_param_dict)
params = op3.get_params(deep=False)
self.assertEqual(params["learning_rate"], 0.8)
class UserValidatorImpl:
@classmethod
def validate_hyperparams(cls, **hyperparams):
assert "validate" in hyperparams
v = hyperparams["validate"]
if not v:
raise ValueError("validate set to False!")
def __init__(self, validate=True):
pass
def fit(self, X, y=None):
return self
_user_validate_hyperparam_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"allOf": [
{
"description": "This first sub-object lists all constructor arguments with their "
"types, one at a time, omitting cross-argument constraints.",
"type": "object",
"additionalProperties": False,
"relevantToOptimizer": [],
"required": ["validate"],
"properties": {
"validate": {
"type": "boolean",
"default": True,
"description": "Should we pass validation?",
}
},
}
],
}
_user_validate_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"type": "object",
"tags": {"pre": [], "op": [""], "post": []},
"properties": {
"hyperparams": _user_validate_hyperparam_schema,
# "input_fit": None,
# "input_transform": ,
# "output_transform": _output_transform_schema,
},
}
UserValidatorOp = Ops.make_operator(UserValidatorImpl, _user_validate_combined_schemas)
class TestUserValidator(unittest.TestCase):
def test_validate_none(self):
import re
import jsonschema
self.assertRaisesRegex(
jsonschema.ValidationError,
re.compile(
r"invalid value validate=None.*boolean", re.MULTILINE | re.DOTALL
),
UserValidatorOp,
validate=None,
)
def test_validate_true(self):
UserValidatorOp(validate=True)
def test_validate_false(self):
self.assertRaisesRegex(
ValueError, "validate set to False!", UserValidatorOp, validate=False
)
class TestCategorical(unittest.TestCase):
def test_pickle_categorical(self):
from multiprocessing.reduction import ForkingPickler
from lale.lib.rasl import Project
c = categorical(5)
p = Project(columns=None, drop_columns=categorical(10))
_ = ForkingPickler.dumps(c)
_ = ForkingPickler.dumps(p)
class TestHyperparamRanges(unittest.TestCase):
def exactly_relevant_properties(self, keys1, operator):
keys2 = operator.hyperparam_schema()["allOf"][0]["relevantToOptimizer"]
self.assertCountEqual(keys1, keys2)
def validate_get_param_ranges(self, operator):
ranges, cat_idx = operator.get_param_ranges()
self.exactly_relevant_properties(ranges.keys(), operator)
# all defaults are in-range
for hp, r in ranges.items():
if isinstance(r, tuple):
minimum, maximum, default = r
if minimum is not None and maximum is not None and default is not None:
assert minimum <= default <= maximum
else:
minimum, maximum, default = cat_idx[hp]
assert minimum == 0 and len(r) - 1 == maximum
def validate_get_param_dist(self, operator):
size = 5
dist = operator.get_param_dist(size)
self.exactly_relevant_properties(dist.keys(), operator)
for hp, d in dist.items():
self.assertTrue(len(d) > 0)
if isinstance(d[0], int):
self.assertTrue(len(d) <= size)
elif isinstance(d[0], float):
self.assertTrue(len(d) == size)
schema = operator.hyperparam_schema(hp)
for v in d:
lale.type_checking.validate_schema_directly(v, schema)
def test_get_param_ranges_and_dist(self):
for op in [
ConcatFeatures,
KNeighborsClassifier,
LogisticRegression,
MLPClassifier,
Nystroem,
OneHotEncoder,
PCA,
RandomForestClassifier,
]:
self.validate_get_param_ranges(op)
self.validate_get_param_dist(op)
def test_sklearn_get_param_ranges_and_dist(self):
for op in [
ConcatFeatures,
KNeighborsClassifier,
LogisticRegression,
MLPClassifier,
Nystroem,
OneHotEncoder,
PCA,
RandomForestClassifier,
]:
skop = op
self.validate_get_param_ranges(skop)
self.validate_get_param_dist(skop)
def test_random_forest_classifier(self):
ranges, _dists = RandomForestClassifier.get_param_ranges()
expected_ranges = {
"n_estimators": (10, 100, 100),
"criterion": ["entropy", "gini"],
"max_depth": (3, 5, None),
"min_samples_split": (2, 5, 2),
"min_samples_leaf": (1, 5, 1),
"max_features": (0.01, 1.0, 0.5),
}
self.maxDiff = None
self.assertEqual(ranges, expected_ranges)
def test_lgbclassifier(self):
from lale.lib.lightgbm import LGBMClassifier
ranges, _dists = LGBMClassifier.get_param_ranges()
expected_ranges = {
"boosting_type": ["dart", "gbdt"],
"num_leaves": [4, 8, 32, 64, 128, 16, 2],
"learning_rate": (0.02, 1.0, 0.1),
"n_estimators": (50, 1000, 200),
"min_child_weight": (0.0001, 0.01, 0.001),
"min_child_samples": (5, 30, 20),
"subsample": (0.01, 1.0, 1.0),
"subsample_freq": (0, 5, 0),
"colsample_bytree": (0.01, 1.0, 1.0),
"reg_alpha": (0.0, 1.0, 0.0),
"reg_lambda": (0.0, 1.0, 0.0),
}
self.maxDiff = None
self.assertEqual(ranges, expected_ranges)
def test_logisticregression(self):
ranges, dists = LogisticRegression.get_param_ranges()
# allowed solver changes between sklearn versions, so we will just remove them from the comparison for now
del ranges["solver"]
del dists["solver"]
expected_ranges = {
# "solver": ["newton-cg", "liblinear", "sag", "saga", "lbfgs"],
"dual": (False, True, False),
"tol": (1e-08, 0.01, 0.0001),
"fit_intercept": (False, True, True),
"intercept_scaling": (0.0, 1.0, 1.0),
"max_iter": (10, 1000, 100),
"multi_class": ["ovr", "multinomial", "auto"],
}
expected_dists = {"multi_class": (0, 2, 2)}
self.maxDiff = None
self.assertEqual(ranges, expected_ranges)
self.assertEqual(dists, expected_dists)
def test_for_optimizer_false_any_two(self):
from lale.schemas import AnyOf, Enum
custom = NoOp.customize_schema(
prop=AnyOf(
types=[Enum(values=[3]), Enum(values=[4])],
),
relevantToOptimizer=["prop"],
)
ranges, dists = custom.get_param_ranges()
expected_ranges = {
"prop": [4, 3],
}
expected_dists = {"prop": (0, 1, 1)}
self.maxDiff = None
self.assertEqual(ranges, expected_ranges)
self.assertEqual(dists, expected_dists)
def test_for_optimizer_false_any_first_one(self):
from lale.schemas import AnyOf, Enum
custom = NoOp.customize_schema(
prop=AnyOf(
types=[Enum(values=[3]), Enum(values=[4], forOptimizer=False)],
),
relevantToOptimizer=["prop"],
)
ranges, dists = custom.get_param_ranges()
expected_ranges = {
"prop": [3],
}
expected_dists = {"prop": (0, 0, 0)}
self.maxDiff = None
self.assertEqual(ranges, expected_ranges)
self.assertEqual(dists, expected_dists)
def test_for_optimizer_false_any_second_one(self):
from lale.schemas import AnyOf, Enum
custom = NoOp.customize_schema(
prop=AnyOf(
types=[Enum(values=[3], forOptimizer=False), Enum(values=[4])],
),
relevantToOptimizer=["prop"],
)
ranges, dists = custom.get_param_ranges()
expected_ranges = {
"prop": [4],
}
expected_dists = {"prop": (0, 0, 0)}
self.maxDiff = None
self.assertEqual(ranges, expected_ranges)
self.assertEqual(dists, expected_dists)
def test_for_optimizer_false_any_zero(self):
from lale.schemas import AnyOf, Enum
custom = NoOp.customize_schema(
prop=AnyOf(
types=[
Enum(values=[3], forOptimizer=False),
Enum(values=[4], forOptimizer=False),
],
),
relevantToOptimizer=["prop"],
)
ranges, dists = custom.get_param_ranges()
expected_ranges = {}
expected_dists = {}
self.maxDiff = None
self.assertEqual(ranges, expected_ranges)
self.assertEqual(dists, expected_dists)
def test_for_optimizer_false_any(self):
from lale.schemas import AnyOf, Enum
custom = NoOp.customize_schema(
prop=AnyOf(types=[Enum(values=[3]), Enum(values=[4])], forOptimizer=False),
relevantToOptimizer=["prop"],
)
ranges, dists = custom.get_param_ranges()
expected_ranges = {}
expected_dists = {}
self.maxDiff = None
self.assertEqual(ranges, expected_ranges)
self.assertEqual(dists, expected_dists)
def test_for_optimizer_true(self):
from lale.schemas import Enum
custom = NoOp.customize_schema(
prop=Enum(values=[4], forOptimizer=True), relevantToOptimizer=["prop"]
)
ranges, dists = custom.get_param_ranges()
expected_ranges = {
"prop": [4],
}
expected_dists = {"prop": (0, 0, 0)}
self.maxDiff = None
self.assertEqual(ranges, expected_ranges)
self.assertEqual(dists, expected_dists)
def test_for_optimizer_false(self):
from lale.schemas import Enum
custom = NoOp.customize_schema(
prop=Enum(values=[3], forOptimizer=False), relevantToOptimizer=["prop"]
)
ranges, dists = custom.get_param_ranges()
expected_ranges = {}
expected_dists = {}
self.maxDiff = None
self.assertEqual(ranges, expected_ranges)
self.assertEqual(dists, expected_dists)
def test_bool_enum(self):
from lale.lib.sklearn import SVR
from lale.schemas import AnyOf, Bool, Null
SVR = SVR.customize_schema(
shrinking=AnyOf(
types=[Bool(), Null()],
default=None,
desc="Whether to use the shrinking heuristic.",
)
)
ranges, _dists = SVR.get_param_ranges()
expected_ranges = {
"kernel": ["poly", "rbf", "sigmoid", "linear"],
"degree": (2, 5, 3),
"gamma": (3.0517578125e-05, 8, None),
"tol": (0.0, 0.01, 0.001),
"C": (0.03125, 32768, 1.0),
"shrinking": [False, True, None],
}
self.maxDiff = None
self.assertEqual(ranges, expected_ranges)
class TestScoreIndividualOp(unittest.TestCase):
def setUp(self):
from sklearn.model_selection import train_test_split
data = load_iris()
X, y = data.data, data.target
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(X, y)
def test_score_planned_op(self):
with self.assertRaises(AttributeError):
LogisticRegression.score(self.X_test, self.y_test)
def test_score_trainable_op(self):
trainable = LogisticRegression()
_ = trainable.fit(self.X_train, self.y_train)
trainable.score(self.X_test, self.y_test)
def test_score_trained_op(self):
from sklearn.metrics import accuracy_score
trainable = LogisticRegression()
trained_lr = trainable.fit(self.X_train, self.y_train)
score = trained_lr.score(self.X_test, self.y_test)
predictions = trained_lr.predict(self.X_test)
accuracy = accuracy_score(self.y_test, predictions)
self.assertEqual(score, accuracy)
def test_score_trained_op_sample_wt(self):
from sklearn.metrics import accuracy_score
trainable = LogisticRegression()
trained_lr = trainable.fit(self.X_train, self.y_train)
rng = np.random.RandomState(0)
iris_weights = rng.randint(10, size=self.y_test.shape)
score = trained_lr.score(self.X_test, self.y_test, sample_weight=iris_weights)
predictions = trained_lr.predict(self.X_test)
accuracy = accuracy_score(self.y_test, predictions, sample_weight=iris_weights)
self.assertEqual(score, accuracy)
class TestEmptyY(unittest.TestCase):
def setUp(self):
data = load_iris()
self.X, self.y = data.data, data.target
def test_PCA(self):
op = PCA()
op.fit(self.X, [])
class TestFitPlannedOp(unittest.TestCase):
def setUp(self):
data = load_iris()
self.X, self.y = data.data, data.target
def test_planned_individual_op(self):
planned = LogisticRegression
try:
planned.fit(self.X, self.y)
except AttributeError as e:
self.assertEqual(
str(e),
"""Please use `LogisticRegression()` instead of `LogisticRegression` to make it trainable.
Alternatively, you could use `auto_configure(X, y, Hyperopt, max_evals=5)` on the operator to use Hyperopt for
`max_evals` iterations for hyperparameter tuning. `Hyperopt` can be imported as `from lale.lib.lale import Hyperopt`.""",
)
def test_planned_pipeline_with_choice(self):
planned = PCA() >> (LogisticRegression() | KNeighborsClassifier())
try:
planned.fit(self.X, self.y)
except AttributeError as e:
self.assertEqual(
str(e),
"""The pipeline is not trainable, which means you can not call fit on it.
Suggested fixes:
Fix [A]: You can make the following changes in the pipeline in order to make it trainable:
[A.1] Please remove the operator choice `|` from `LogisticRegression | KNeighborsClassifier` and keep only one of those operators.
Fix [B]: Alternatively, you could use `auto_configure(X, y, Hyperopt, max_evals=5)` on the pipeline
to use Hyperopt for `max_evals` iterations for hyperparameter tuning. `Hyperopt` can be imported as `from lale.lib.lale import Hyperopt`.""",
)
def test_planned_pipeline_with_choice_1(self):
planned = PCA >> (LogisticRegression() | KNeighborsClassifier())
try:
planned.fit(self.X, self.y)
except AttributeError as e:
self.maxDiff = None
self.assertEqual(
str(e),
"""The pipeline is not trainable, which means you can not call fit on it.
Suggested fixes:
Fix [A]: You can make the following changes in the pipeline in order to make it trainable:
[A.1] Please use `PCA()` instead of `PCA.`
[A.2] Please remove the operator choice `|` from `LogisticRegression | KNeighborsClassifier` and keep only one of those operators.
Fix [B]: Alternatively, you could use `auto_configure(X, y, Hyperopt, max_evals=5)` on the pipeline
to use Hyperopt for `max_evals` iterations for hyperparameter tuning. `Hyperopt` can be imported as `from lale.lib.lale import Hyperopt`.""",
)
def test_choice(self):
planned = LogisticRegression() | KNeighborsClassifier()
try:
planned.fit(self.X, self.y)
except AttributeError as e:
self.assertEqual(
str(e),
"""The pipeline is not trainable, which means you can not call fit on it.
Suggested fixes:
Fix [A]: You can make the following changes in the pipeline in order to make it trainable:
[A.1] Please remove the operator choice `|` from `LogisticRegression | KNeighborsClassifier` and keep only one of those operators.
Fix [B]: Alternatively, you could use `auto_configure(X, y, Hyperopt, max_evals=5)` on the pipeline
to use Hyperopt for `max_evals` iterations for hyperparameter tuning. `Hyperopt` can be imported as `from lale.lib.lale import Hyperopt`.""",
)
class _OperatorForwardingTestWrappedImpl:
def __init__(self):
pass
def fshadow(self):
return False
def finner(self):
return True
class _OperatorForwardingTestImpl:
def __init__(self):
self._wrapped_model = _OperatorForwardingTestWrappedImpl()
self.prop_ = True
def fit(self, X, y=None):
return self
def f(self):
return True
def fshadow(self):
return True
def fnotforward(self):
return True
@property
def p(self):
return True
def auto_(self):
return True
_operator_forwarding_test_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"allOf": [{}],
}
_operator_forwarding_test_combined_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"type": "object",
"tags": {"pre": [], "op": [""], "post": []},
"forwards": ["f", "p", "fshadow", "finner"],
"properties": {
"hyperparams": _operator_forwarding_test_schema,
# "input_fit": None,
# "input_transform": ,
# "output_transform": _output_transform_schema,
},
}
_OperatorForwardingTest = Ops.make_operator(
_OperatorForwardingTestImpl, _operator_forwarding_test_combined_schema
)
class TestOperatorFowarding(unittest.TestCase):
def test_fowards_method_list(self):
self.assertEqual(
_OperatorForwardingTest.get_forwards(),
_operator_forwarding_test_combined_schema["forwards"],
)
def test_fowards_method_succeeds(self):
self.assertTrue(_OperatorForwardingTest.f())
def test_fowards_underscore_method_succeeds(self):
self.assertTrue(_OperatorForwardingTest.auto_())
def test_fowards_underscore_prop_succeeds(self):
self.assertTrue(_OperatorForwardingTest.prop_)
# test that the outer impl method is given priority over the inner impl method
def test_fowards_method_shadow_succeeds(self):
self.assertTrue(_OperatorForwardingTest.fshadow())
def test_fowards_method_wrapped_succeeds(self):
self.assertTrue(_OperatorForwardingTest.finner())
def test_fowards_property_succeeds(self):
self.assertTrue(_OperatorForwardingTest.p)
def test_not_fowards_method(self):
with self.assertRaises(AttributeError):
self.assertTrue(_OperatorForwardingTest.fnotforward())
def test_bad_forwards_decl(self):
from test import EnableSchemaValidation
_operator_forwarding_test_combined_schema2 = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"type": "object",
"tags": {"pre": [], "op": [""], "post": []},
"forwards": ["f", "p", "fshadow", "finner", "predict"],
"properties": {
"hyperparams": _operator_forwarding_test_schema,
# "input_fit": None,
# "input_transform": ,
# "output_transform": _output_transform_schema,
},
}
with self.assertRaises(AssertionError):
with EnableSchemaValidation():
Ops.make_operator(
_OperatorForwardingTestImpl,
_operator_forwarding_test_combined_schema2,
)
def test_bad_forwards_false_decl(self):
_operator_forwarding_test_combined_schema2 = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"type": "object",
"tags": {"pre": [], "op": [""], "post": []},
"properties": {
"hyperparams": _operator_forwarding_test_schema,
# "input_fit": None,
# "input_transform": ,
# "output_transform": _output_transform_schema,
},
}
_OperatorForwardingTest2 = Ops.make_operator(
_OperatorForwardingTestImpl, _operator_forwarding_test_combined_schema2
)
with self.assertRaises(AttributeError):
_OperatorForwardingTest2.f()
def test_bad_forwards_true_decl(self):
_operator_forwarding_test_combined_schema2 = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"type": "object",
"tags": {"pre": [], "op": [""], "post": []},
"forwards": True,
"properties": {
"hyperparams": _operator_forwarding_test_schema,
# "input_fit": None,
# "input_transform": ,
# "output_transform": _output_transform_schema,
},
}
_OperatorForwardingTest2 = Ops.make_operator(
_OperatorForwardingTestImpl, _operator_forwarding_test_combined_schema2
)
_OperatorForwardingTest2.f()
_OperatorForwardingTest2.fnotforward()
with self.assertRaises(AttributeError):
_OperatorForwardingTest2.unknown()
def test_customize_schema_forward_success(self):
Op = _OperatorForwardingTest.customize_schema(forwards=["fnotforward"])
self.assertTrue(Op.fnotforward())
def test_customize_schema_forward_failure(self):
Op = _OperatorForwardingTest.customize_schema(forwards=["fnotforward"])
with self.assertRaises(AttributeError):
self.assertTrue(Op.f()())
class TestSteps(unittest.TestCase):
def test_pipeline(self):
pca = PCA()
op: Ops.PlannedPipeline = pca >> LogisticRegression
self.assertEqual(len(op.steps), 2)
self.assertEqual(op.steps[0][0], "PCA")
self.assertEqual(op.steps[0][1], pca)
| 49,238 | 33.970881 | 141 |
py
|
lale
|
lale-master/test/test_replace.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from lale.lib.lale import NoOp
from lale.lib.sklearn import (
PCA,
RFE,
AdaBoostRegressor,
DecisionTreeClassifier,
LinearRegression,
LogisticRegression,
SelectKBest,
SimpleImputer,
StackingClassifier,
VotingClassifier,
)
class TestReplace(unittest.TestCase):
def test_choice(self):
two_choice = PCA | SelectKBest
replaced_choice = two_choice.replace(PCA, LogisticRegression)
expected_choice = LogisticRegression | SelectKBest
self.assertEqual(replaced_choice.to_json(), expected_choice.to_json())
three_choice = PCA | SelectKBest | NoOp
replaced_choice = three_choice.replace(PCA, LogisticRegression)
expected_choice = LogisticRegression | SelectKBest | NoOp
self.assertEqual(replaced_choice.to_json(), expected_choice.to_json())
def test_simple_pipeline(self):
pipeline_simple = PCA >> SelectKBest >> LogisticRegression
simple_imputer = SimpleImputer
replaced_pipeline = pipeline_simple.replace(PCA, simple_imputer)
expected_pipeline = SimpleImputer >> SelectKBest >> LogisticRegression
self.assertEqual(replaced_pipeline.to_json(), expected_pipeline.to_json())
def test_choice_pipeline(self):
pipeline_choice = (PCA | NoOp) >> SelectKBest >> LogisticRegression
simple_imputer = SimpleImputer
replaced_pipeline = pipeline_choice.replace(PCA, simple_imputer)
expected_pipeline = (SimpleImputer | NoOp) >> SelectKBest >> LogisticRegression
self.assertEqual(replaced_pipeline.to_json(), expected_pipeline.to_json())
def test_planned_trained_ops(self):
pca1 = PCA(n_components=10)
pca2 = PCA(n_components=5)
choice = pca1 | pca2
pipeline_choice = (pca1 | pca2) >> LogisticRegression
replaced_choice = choice.replace(pca1, SimpleImputer) # SimpleImputer | pca2
expected_choice = SimpleImputer | pca2
self.assertEqual(replaced_choice.to_json(), expected_choice.to_json())
replaced_pipeline = pipeline_choice.replace(
pca1, SimpleImputer
) # SimpleImputer | pca2
expected_pipeline = (SimpleImputer | pca2) >> LogisticRegression
self.assertEqual(replaced_pipeline.to_json(), expected_pipeline.to_json())
replaced_choice = choice.replace(pca2, SimpleImputer) # pca1 | SimpleImputer
expected_choice = pca1 | SimpleImputer
self.assertEqual(replaced_choice.to_json(), expected_choice.to_json())
replaced_pipeline = pipeline_choice.replace(
pca2, SimpleImputer
) # pca1 | SimpleImputer
expected_pipeline = (pca1 | SimpleImputer) >> LogisticRegression
self.assertEqual(replaced_pipeline.to_json(), expected_pipeline.to_json())
replaced_choice = choice.replace(
PCA, SimpleImputer
) # SimpleImputer | SimpleImputer
expected_choice = SimpleImputer | SimpleImputer
self.assertEqual(replaced_choice.to_json(), expected_choice.to_json())
replaced_pipeline = pipeline_choice.replace(
PCA, SimpleImputer
) # SimpleImputer | SimpleImputer
expected_pipeline = (SimpleImputer | SimpleImputer) >> LogisticRegression
self.assertEqual(replaced_pipeline.to_json(), expected_pipeline.to_json())
def test_nested_choice(self):
pca1 = PCA(n_components=10)
pca2 = PCA(n_components=5)
pipeline_nested_choice = pca1 >> (pca1 | pca2)
replaced_pipeline = pipeline_nested_choice.replace(pca1, SimpleImputer)
expected_pipeline = SimpleImputer >> (SimpleImputer | pca2)
self.assertEqual(replaced_pipeline.to_json(), expected_pipeline.to_json())
replaced_pipeline = pipeline_nested_choice.replace(PCA, SimpleImputer)
expected_pipeline = SimpleImputer >> (SimpleImputer | SimpleImputer)
self.assertEqual(replaced_pipeline.to_json(), expected_pipeline.to_json())
def test_nested_pipeline(self):
pca1 = PCA(n_components=10)
pca2 = PCA(n_components=5)
first_pipeline = pca1 >> LogisticRegression
nested_pipeline = pca1 >> (pca2 | first_pipeline)
replaced_pipeline = nested_pipeline.replace(pca1, SimpleImputer)
expected_pipeline = SimpleImputer >> (
pca2 | (SimpleImputer >> LogisticRegression)
)
self.assertEqual(replaced_pipeline.to_json(), expected_pipeline.to_json())
replaced_pipeline = nested_pipeline.replace(PCA, SimpleImputer)
expected_pipeline = SimpleImputer >> (
SimpleImputer | (SimpleImputer >> LogisticRegression)
)
self.assertEqual(replaced_pipeline.to_json(), expected_pipeline.to_json())
def test_hyperparam_estimator(self):
lr = LogisticRegression()
linear_reg = LinearRegression()
ada = AdaBoostRegressor(base_estimator=lr)
replaced_ada = ada.replace(lr, linear_reg)
expected_ada = AdaBoostRegressor(base_estimator=linear_reg)
self.assertEqual(replaced_ada.to_json(), expected_ada.to_json())
replaced_ada = ada.replace(LogisticRegression, linear_reg)
expected_ada = AdaBoostRegressor(base_estimator=linear_reg)
self.assertEqual(replaced_ada.to_json(), expected_ada.to_json())
ada_pipeline = PCA >> SimpleImputer >> ada
replaced_pipeline = ada_pipeline.replace(lr, linear_reg)
expected_pipeline = (
PCA >> SimpleImputer >> AdaBoostRegressor(base_estimator=linear_reg)
)
self.assertEqual(replaced_pipeline.to_json(), expected_pipeline.to_json())
ada_choice = PCA | ada
replaced_choice = ada_choice.replace(lr, linear_reg)
expected_choice = PCA | AdaBoostRegressor(base_estimator=linear_reg)
self.assertEqual(replaced_choice.to_json(), expected_choice.to_json())
rfe = RFE(estimator=lr)
replaced_rfe = rfe.replace(lr, linear_reg)
expected_rfe = RFE(estimator=linear_reg)
self.assertEqual(replaced_rfe.to_json(), expected_rfe.to_json())
def test_hyperparam_estimator_list(self):
lr = LogisticRegression()
linear_reg = LinearRegression()
dtc = DecisionTreeClassifier()
cls_list = [("lr", lr), ("linear_reg", linear_reg)]
vc = VotingClassifier(estimators=cls_list)
replaced_vc = vc.replace(linear_reg, dtc)
new_cls_list = [("lr", lr), ("linear_reg", dtc)]
expected_vc = VotingClassifier(estimators=new_cls_list)
self.assertEqual(replaced_vc.to_json(), expected_vc.to_json())
sc = StackingClassifier(estimators=cls_list, final_estimator=vc)
replaced_sc = sc.replace(linear_reg, dtc)
new_cls_list = [("lr", lr), ("linear_reg", dtc)]
expected_sc = StackingClassifier(
estimators=new_cls_list, final_estimator=expected_vc
)
self.assertEqual(replaced_sc.to_json(), expected_sc.to_json())
def test_replace_choice(self):
choice = PCA | SelectKBest
choice_pipeline = choice >> LogisticRegression
replaced_pipeline = choice_pipeline.replace(choice, SelectKBest)
expected_pipeline = SelectKBest >> LogisticRegression
self.assertEqual(replaced_pipeline.to_json(), expected_pipeline.to_json())
choice2 = NoOp | LinearRegression
replaced_pipeline = choice_pipeline.replace(LogisticRegression, choice2)
expected_pipeline = choice >> choice2
self.assertEqual(replaced_pipeline.to_json(), expected_pipeline.to_json())
| 8,162 | 41.963158 | 87 |
py
|
lale
|
lale-master/test/test_halving_gridsearchcv.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import warnings
from lale.lib.lale import HalvingGridSearchCV, NoOp
from lale.lib.sklearn import (
PCA,
KNeighborsClassifier,
KNeighborsRegressor,
LogisticRegression,
MinMaxScaler,
Normalizer,
RandomForestRegressor,
StandardScaler,
)
class TestAutoConfigureClassification(unittest.TestCase):
def setUp(self):
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
data = load_iris()
X, y = data.data, data.target
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(X, y)
def test_with_halving_gridsearchcv(self):
warnings.simplefilter("ignore")
planned_pipeline = (PCA | NoOp) >> LogisticRegression
best_pipeline = planned_pipeline.auto_configure(
self.X_train,
self.y_train,
optimizer=HalvingGridSearchCV,
cv=3,
scoring="accuracy",
lale_num_samples=1,
lale_num_grids=1,
)
_ = best_pipeline.predict(self.X_test)
assert best_pipeline is not None
def test_runtime_limit_hoc(self):
import time
planned_pipeline = (MinMaxScaler | Normalizer) >> (
LogisticRegression | KNeighborsClassifier
)
from sklearn.datasets import load_iris
X, y = load_iris(return_X_y=True)
max_opt_time = 10.0
hoc = HalvingGridSearchCV(
estimator=planned_pipeline,
cv=3,
scoring="accuracy",
max_opt_time=max_opt_time,
)
start = time.time()
with self.assertRaises(BaseException):
_ = hoc.fit(X, y)
end = time.time()
opt_time = end - start
rel_diff = (opt_time - max_opt_time) / max_opt_time
assert (
rel_diff < 0.7
), f"Max time: {max_opt_time}, Actual time: {opt_time}, relative diff: {rel_diff}"
def test_runtime_limit_hor(self):
import time
planned_pipeline = (MinMaxScaler | Normalizer) >> RandomForestRegressor
from lale.datasets.util import load_boston
X, y = load_boston(return_X_y=True)
max_opt_time = 2
hor = HalvingGridSearchCV(
estimator=planned_pipeline,
cv=3,
max_opt_time=max_opt_time,
scoring="r2",
)
start = time.time()
with self.assertRaises(BaseException):
_ = hor.fit(X[:500, :], y[:500])
end = time.time()
opt_time = end - start
rel_diff = (opt_time - max_opt_time) / max_opt_time
assert (
rel_diff < 0.2
), f"Max time: {max_opt_time}, Actual time: {opt_time}, relative diff: {rel_diff}"
class TestGridSearchCV(unittest.TestCase):
def test_manual_grid(self):
from sklearn.datasets import load_iris
from lale.lib.sklearn import SVC
warnings.simplefilter("ignore")
from lale import wrap_imported_operators
wrap_imported_operators()
iris = load_iris()
parameters = {"kernel": ("linear", "rbf"), "C": [1, 10]}
svc = SVC()
clf = HalvingGridSearchCV(estimator=svc, param_grid=parameters)
clf.fit(iris.data, iris.target)
clf.predict(iris.data)
@unittest.skip("Currently flaky")
def test_with_halving_gridsearchcv_auto_wrapped_pipe1(self):
from sklearn.datasets import load_iris
from sklearn.metrics import accuracy_score, make_scorer
lr = LogisticRegression()
pca = PCA()
trainable = pca >> lr
with warnings.catch_warnings():
warnings.simplefilter("ignore")
clf = HalvingGridSearchCV(
estimator=trainable,
lale_num_samples=2,
lale_num_grids=2,
cv=2,
scoring=make_scorer(accuracy_score),
)
iris = load_iris()
clf.fit(iris.data, iris.target)
@unittest.skip("Currently flaky")
def test_with_halving_gridsearchcv_auto_wrapped_pipe2(self):
from sklearn.datasets import load_iris
from sklearn.metrics import accuracy_score, make_scorer
lr = LogisticRegression()
pca1 = PCA()
pca1._name = "PCA1"
pca2 = PCA()
pca2._name = "PCA2"
trainable = (pca1 | pca2) >> lr
with warnings.catch_warnings():
warnings.simplefilter("ignore")
clf = HalvingGridSearchCV(
estimator=trainable,
lale_num_samples=2,
lale_num_grids=3,
cv=2,
scoring=make_scorer(accuracy_score),
)
iris = load_iris()
clf.fit(iris.data, iris.target)
class TestKNeighborsRegressor(unittest.TestCase):
def setUp(self):
from sklearn.datasets import load_diabetes
from sklearn.model_selection import train_test_split
all_X, all_y = load_diabetes(return_X_y=True)
# 15 samples, small enough so folds are likely smaller than n_neighbors
self.train_X, self.test_X, self.train_y, self.test_y = train_test_split(
all_X, all_y, train_size=15, test_size=None, shuffle=True, random_state=42
)
def test_halving_gridsearch(self):
planned = KNeighborsRegressor
with warnings.catch_warnings():
warnings.simplefilter("ignore")
trained = planned.auto_configure(
self.train_X,
self.train_y,
optimizer=HalvingGridSearchCV,
cv=3,
scoring="r2",
)
_ = trained.predict(self.test_X)
class TestStandardScaler(unittest.TestCase):
def setUp(self):
import scipy.sparse
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
# from lale.datasets.data_schemas import add_schema
all_X, all_y = load_iris(return_X_y=True)
denseTrainX, self.test_X, self.train_y, self.test_y = train_test_split(
all_X, all_y, train_size=0.8, test_size=0.2, shuffle=True, random_state=42
)
# self.train_X = add_schema(scipy.sparse.csr_matrix(denseTrainX))
self.train_X = scipy.sparse.csr_matrix(denseTrainX)
def test_halving_gridsearch(self):
planned = StandardScaler >> LogisticRegression().freeze_trainable()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
trained = planned.auto_configure(
self.train_X,
self.train_y,
optimizer=HalvingGridSearchCV,
cv=3,
scoring="r2",
)
_ = trained.predict(self.test_X)
| 7,380 | 31.804444 | 90 |
py
|
lale
|
lale-master/test/__init__.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class EnableSchemaValidation:
def __init__(self):
pass
def __enter__(self):
from lale.settings import (
disable_data_schema_validation,
disable_hyperparams_schema_validation,
set_disable_data_schema_validation,
set_disable_hyperparams_schema_validation,
)
self.existing_data_schema_validation_flag = disable_data_schema_validation
self.existing_hyperparams_schema_validation_flag = (
disable_hyperparams_schema_validation
)
set_disable_data_schema_validation(False)
set_disable_hyperparams_schema_validation(False)
def __exit__(self, exc_value, exc_type, traceback):
from lale.settings import (
set_disable_data_schema_validation,
set_disable_hyperparams_schema_validation,
)
set_disable_data_schema_validation(self.existing_data_schema_validation_flag)
set_disable_hyperparams_schema_validation(
self.existing_hyperparams_schema_validation_flag
)
| 1,639 | 37.139535 | 85 |
py
|
lale
|
lale-master/test/test_relational_sklearn.py
|
# Copyright 2021-2023 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import math
import numbers
import os.path
import re
import tempfile
import unittest
import urllib.request
from typing import Any, Dict, List, Tuple, cast
import jsonschema
import numpy as np
import pandas as pd
import sklearn
import sklearn.datasets
from category_encoders import HashingEncoder as SkHashingEncoder
from sklearn.feature_selection import SelectKBest as SkSelectKBest
from sklearn.impute import SimpleImputer as SkSimpleImputer
from sklearn.metrics import accuracy_score as sk_accuracy_score
from sklearn.metrics import balanced_accuracy_score as sk_balanced_accuracy_score
from sklearn.metrics import f1_score as sk_f1_score
from sklearn.metrics import make_scorer
from sklearn.metrics import r2_score as sk_r2_score
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score as sk_cross_val_score
from sklearn.model_selection import cross_validate as sk_cross_validate
from sklearn.pipeline import make_pipeline as sk_make_pipeline
from sklearn.preprocessing import MinMaxScaler as SkMinMaxScaler
from sklearn.preprocessing import OneHotEncoder as SkOneHotEncoder
from sklearn.preprocessing import OrdinalEncoder as SkOrdinalEncoder
from sklearn.preprocessing import StandardScaler as SkStandardScaler
from sklearn.preprocessing import scale as sk_scale
import lale.datasets
import lale.datasets.openml
import lale.datasets.openml.openml_datasets
import lale.lib.aif360
import lale.type_checking
from lale.datasets import pandas2spark
from lale.datasets.data_schemas import (
SparkDataFrameWithIndex,
add_table_name,
forward_metadata,
get_index_name,
)
from lale.datasets.multitable.fetch_datasets import fetch_go_sales_dataset
from lale.expressions import it
from lale.helpers import _ensure_pandas, create_data_loader, datatype_param_type
from lale.lib.category_encoders import TargetEncoder as SkTargetEncoder
from lale.lib.lightgbm import LGBMClassifier, LGBMRegressor
from lale.lib.rasl import BatchedBaggingClassifier, ConcatFeatures, Convert
from lale.lib.rasl import HashingEncoder as RaslHashingEncoder
from lale.lib.rasl import Map
from lale.lib.rasl import MinMaxScaler as RaslMinMaxScaler
from lale.lib.rasl import OneHotEncoder as RaslOneHotEncoder
from lale.lib.rasl import OrdinalEncoder as RaslOrdinalEncoder
from lale.lib.rasl import PrioBatch, PrioStep, Project, Scan
from lale.lib.rasl import SelectKBest as RaslSelectKBest
from lale.lib.rasl import SimpleImputer as RaslSimpleImputer
from lale.lib.rasl import StandardScaler as RaslStandardScaler
from lale.lib.rasl import TargetEncoder as RaslTargetEncoder
from lale.lib.rasl import accuracy_score as rasl_accuracy_score
from lale.lib.rasl import balanced_accuracy_score as rasl_balanced_accuracy_score
from lale.lib.rasl import categorical
from lale.lib.rasl import cross_val_score as rasl_cross_val_score
from lale.lib.rasl import cross_validate as rasl_cross_validate
from lale.lib.rasl import csv_data_loader
from lale.lib.rasl import f1_score as rasl_f1_score
from lale.lib.rasl import fit_with_batches
from lale.lib.rasl import get_scorer as rasl_get_scorer
from lale.lib.rasl import mockup_data_loader, openml_data_loader
from lale.lib.rasl import r2_score as rasl_r2_score
from lale.lib.rasl.standard_scaler import scale as rasl_scale
from lale.lib.sklearn import (
DecisionTreeClassifier,
LinearRegression,
LogisticRegression,
RandomForestClassifier,
SGDClassifier,
)
from lale.lib.xgboost import XGBClassifier, XGBRegressor
from lale.operators import TrainedPipeline
assert sklearn.__version__ >= "1.0", sklearn.__version__
class TestDatasets(unittest.TestCase):
def test_openml_creditg_arff(self):
batches = openml_data_loader("credit-g", 340)
n_rows_found = 0
n_batches_found = 0
for bX, by in batches:
n_batches_found += 1
n_rows_batch, n_columns_batch = bX.shape
n_rows_found += n_rows_batch
self.assertEqual(n_rows_batch, len(by))
self.assertEqual(n_columns_batch, 20)
self.assertEqual(n_batches_found, 3)
self.assertEqual(n_rows_found, 1000)
def test_autoai_creditg_csv(self):
with tempfile.TemporaryDirectory() as tmpdir_name:
url = "https://raw.githubusercontent.com/pmservice/wml-sample-models/master/autoai/credit-risk-prediction/data/german_credit_data_biased_training.csv"
file_name = os.path.join(tmpdir_name, "credit-g.csv")
# this request is to a hardcoded https url, so does not risk leaking local data
urllib.request.urlretrieve(url, file_name) # nosec
assert os.path.exists(file_name)
n_rows = 5000
n_batches = 3
rows_per_batch = (n_rows + n_batches - 1) // n_batches
batches = csv_data_loader(file_name, "Risk", rows_per_batch)
n_rows_found = 0
n_batches_found = 0
for bX, by in batches:
n_batches_found += 1
n_rows_batch, n_columns_batch = bX.shape
n_rows_found += n_rows_batch
self.assertEqual(n_rows_batch, len(by))
self.assertEqual(n_columns_batch, 20)
self.assertEqual(n_batches_found, n_batches)
self.assertEqual(n_rows_found, n_rows)
def _check_trained_min_max_scaler(test, op1, op2, msg):
test.assertEqual(list(op1.data_min_), list(op2.data_min_), msg)
test.assertEqual(list(op1.data_max_), list(op2.data_max_), msg)
test.assertEqual(list(op1.data_range_), list(op2.data_range_), msg)
test.assertEqual(list(op1.scale_), list(op2.scale_), msg)
test.assertEqual(list(op1.min_), list(op2.min_), msg)
test.assertEqual(op1.n_features_in_, op2.n_features_in_, msg)
test.assertEqual(op1.n_samples_seen_, op2.n_samples_seen_, msg)
class TestMinMaxScaler(unittest.TestCase):
@classmethod
def setUpClass(cls):
targets: List[datatype_param_type] = ["pandas", "spark"]
cls.tgt2datasets = {tgt: fetch_go_sales_dataset(tgt) for tgt in targets}
def test_get_params(self):
sk_scaler = SkMinMaxScaler()
rasl_scaler = RaslMinMaxScaler()
sk_params = sk_scaler.get_params()
rasl_params = rasl_scaler.get_params()
self.assertDictContainsSubset(sk_params, rasl_params)
def test_error(self):
_ = RaslMinMaxScaler(clip=True) # should raise no error
with self.assertRaisesRegex(
jsonschema.ValidationError,
re.compile(r"MinMaxScaler\(copy=False\)", re.MULTILINE | re.DOTALL),
):
_ = RaslMinMaxScaler(copy=False)
def test_fit(self):
columns = ["Product number", "Quantity", "Retailer code"]
pandas_data = self.tgt2datasets["pandas"][0][columns]
sk_scaler = SkMinMaxScaler()
sk_trained = sk_scaler.fit(pandas_data)
rasl_scaler = RaslMinMaxScaler()
for tgt, go_sales in self.tgt2datasets.items():
data = go_sales[0][columns]
if tgt == "spark":
data = SparkDataFrameWithIndex(data)
rasl_trained = rasl_scaler.fit(data)
_check_trained_min_max_scaler(self, sk_trained, rasl_trained.impl, "pandas")
def test_transform(self):
columns = ["Product number", "Quantity", "Retailer code"]
pandas_data = self.tgt2datasets["pandas"][0][columns]
sk_scaler = SkMinMaxScaler()
sk_trained = sk_scaler.fit(pandas_data)
sk_transformed = sk_trained.transform(pandas_data)
rasl_scaler = RaslMinMaxScaler()
for tgt, go_sales in self.tgt2datasets.items():
data = go_sales[0][columns]
if tgt == "spark":
data = SparkDataFrameWithIndex(data)
rasl_trained = rasl_scaler.fit(data)
rasl_transformed = rasl_trained.transform(data)
if tgt == "spark":
self.assertEqual(get_index_name(rasl_transformed), "index")
rasl_transformed = _ensure_pandas(rasl_transformed)
self.assertAlmostEqual(sk_transformed[0, 0], rasl_transformed.iloc[0, 0])
self.assertAlmostEqual(sk_transformed[0, 1], rasl_transformed.iloc[0, 1])
self.assertAlmostEqual(sk_transformed[0, 2], rasl_transformed.iloc[0, 2])
self.assertAlmostEqual(sk_transformed[10, 0], rasl_transformed.iloc[10, 0])
self.assertAlmostEqual(sk_transformed[10, 1], rasl_transformed.iloc[10, 1])
self.assertAlmostEqual(sk_transformed[10, 2], rasl_transformed.iloc[10, 2])
self.assertAlmostEqual(sk_transformed[20, 0], rasl_transformed.iloc[20, 0])
self.assertAlmostEqual(sk_transformed[20, 1], rasl_transformed.iloc[20, 1])
self.assertAlmostEqual(sk_transformed[20, 2], rasl_transformed.iloc[20, 2])
def test_transform_clipped(self):
columns = ["Product number", "Quantity", "Retailer code"]
pandas_data = self.tgt2datasets["pandas"][0][columns]
sk_scaler = SkMinMaxScaler(clip=True)
sk_trained = sk_scaler.fit(pandas_data)
sk_transformed = sk_trained.transform(pandas_data)
rasl_scaler = RaslMinMaxScaler(clip=True)
for tgt, go_sales in self.tgt2datasets.items():
data = go_sales[0][columns]
if tgt == "spark":
data = SparkDataFrameWithIndex(data)
rasl_trained = rasl_scaler.fit(data)
rasl_transformed = rasl_trained.transform(data)
if tgt == "spark":
self.assertEqual(get_index_name(rasl_transformed), "index")
rasl_transformed = _ensure_pandas(rasl_transformed)
self.assertAlmostEqual(sk_transformed[0, 0], rasl_transformed.iloc[0, 0])
self.assertAlmostEqual(sk_transformed[0, 1], rasl_transformed.iloc[0, 1])
self.assertAlmostEqual(sk_transformed[0, 2], rasl_transformed.iloc[0, 2])
self.assertAlmostEqual(sk_transformed[10, 0], rasl_transformed.iloc[10, 0])
self.assertAlmostEqual(sk_transformed[10, 1], rasl_transformed.iloc[10, 1])
self.assertAlmostEqual(sk_transformed[10, 2], rasl_transformed.iloc[10, 2])
self.assertAlmostEqual(sk_transformed[20, 0], rasl_transformed.iloc[20, 0])
self.assertAlmostEqual(sk_transformed[20, 1], rasl_transformed.iloc[20, 1])
self.assertAlmostEqual(sk_transformed[20, 2], rasl_transformed.iloc[20, 2])
def test_zero_scale(self):
pandas_data = pd.DataFrame({"a": [0.5]})
sk_scaler = SkMinMaxScaler()
sk_trained = sk_scaler.fit(pandas_data)
sk_transformed = sk_trained.transform(pandas_data)
rasl_scaler = RaslMinMaxScaler()
for tgt, _ in self.tgt2datasets.items():
data = Convert(astype=tgt).transform(pandas_data)
rasl_trained = rasl_scaler.fit(data)
rasl_transformed = rasl_trained.transform(data)
rasl_transformed = _ensure_pandas(rasl_transformed)
self.assertAlmostEqual(sk_transformed[0, 0], rasl_transformed.iloc[0, 0])
def test_fit_range(self):
columns = ["Product number", "Quantity", "Retailer code"]
pandas_data = self.tgt2datasets["pandas"][0][columns]
sk_scaler = SkMinMaxScaler(feature_range=(-5, 5))
sk_trained = sk_scaler.fit(pandas_data)
for tgt, go_sales in self.tgt2datasets.items():
data = go_sales[0][columns]
if tgt == "spark":
data = SparkDataFrameWithIndex(data)
rasl_scaler = RaslMinMaxScaler(feature_range=(-5, 5))
rasl_trained = rasl_scaler.fit(data)
_check_trained_min_max_scaler(self, sk_trained, rasl_trained.impl, "pandas")
def test_transform_range(self):
columns = ["Product number", "Quantity", "Retailer code"]
pandas_data = self.tgt2datasets["pandas"][0][columns]
sk_scaler = SkMinMaxScaler(feature_range=(-5, 5))
sk_trained = sk_scaler.fit(pandas_data)
sk_transformed = sk_trained.transform(pandas_data)
rasl_scaler = RaslMinMaxScaler(feature_range=(-5, 5))
for tgt, go_sales in self.tgt2datasets.items():
data = go_sales[0][columns]
if tgt == "spark":
data = SparkDataFrameWithIndex(data)
rasl_trained = rasl_scaler.fit(data)
rasl_transformed = rasl_trained.transform(data)
rasl_transformed = _ensure_pandas(rasl_transformed)
self.assertAlmostEqual(sk_transformed[0, 0], rasl_transformed.iloc[0, 0])
self.assertAlmostEqual(sk_transformed[0, 1], rasl_transformed.iloc[0, 1])
self.assertAlmostEqual(sk_transformed[0, 2], rasl_transformed.iloc[0, 2])
self.assertAlmostEqual(sk_transformed[10, 0], rasl_transformed.iloc[10, 0])
self.assertAlmostEqual(sk_transformed[10, 1], rasl_transformed.iloc[10, 1])
self.assertAlmostEqual(sk_transformed[10, 2], rasl_transformed.iloc[10, 2])
self.assertAlmostEqual(sk_transformed[20, 0], rasl_transformed.iloc[20, 0])
self.assertAlmostEqual(sk_transformed[20, 1], rasl_transformed.iloc[20, 1])
self.assertAlmostEqual(sk_transformed[20, 2], rasl_transformed.iloc[20, 2])
def test_partial_fit(self):
columns = ["Product number", "Quantity", "Retailer code"]
data = self.tgt2datasets["pandas"][0][columns]
for tgt in self.tgt2datasets.keys():
sk_scaler = SkMinMaxScaler()
rasl_scaler = RaslMinMaxScaler()
for lower, upper in [[0, 10], [10, 100], [100, data.shape[0]]]:
data_so_far = data[0:upper]
data_delta = data[lower:upper]
if tgt == "pandas":
pass
elif tgt == "spark":
data_delta = pandas2spark(data_delta)
else:
assert False
sk_trained = sk_scaler.fit(data_so_far)
rasl_trained = rasl_scaler.partial_fit(data_delta)
_check_trained_min_max_scaler(self, sk_trained, rasl_trained.impl, tgt)
class TestPipeline(unittest.TestCase):
@classmethod
def setUpClass(cls):
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
targets = ["pandas", "spark"]
cls.tgt2datasets = {tgt: {} for tgt in targets}
def add_df(name, df):
cls.tgt2datasets["pandas"][name] = df
cls.tgt2datasets["spark"][name] = pandas2spark(df)
X, y = load_iris(as_frame=True, return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y)
add_df("X_train", X_train)
add_df("X_test", X_test)
add_df("y_train", y_train)
add_df("y_test", y_test)
def test_pipeline(self):
for _tgt, datasets in self.tgt2datasets.items():
X_train, X_test = (datasets["X_train"], datasets["X_test"])
y_train = self.tgt2datasets["pandas"]["y_train"]
pipeline = (
RaslMinMaxScaler() >> Convert(astype="pandas") >> LogisticRegression()
)
trained = pipeline.fit(X_train, y_train)
_ = trained.predict(X_test)
_ = trained.predict(X_test)
def _check_trained_select_k_best(self, sk_trained, rasl_trained, msg=""):
self.assertEqual(len(sk_trained.scores_), len(rasl_trained.impl.scores_))
self.assertEqual(len(sk_trained.scores_), len(sk_trained.pvalues_))
self.assertEqual(len(sk_trained.scores_), len(rasl_trained.impl.pvalues_))
for i, (sk_score, rasl_score, sk_pvalue, rasl_pvalue) in enumerate(
zip(
sk_trained.scores_,
rasl_trained.impl.scores_,
sk_trained.pvalues_,
rasl_trained.impl.pvalues_,
)
):
if not (np.isnan(sk_score) and np.isnan(rasl_score)):
self.assertAlmostEqual(
sk_score,
rasl_score,
msg=f"{msg}: {i}",
)
if not (np.isnan(sk_pvalue) and np.isnan(rasl_pvalue)):
self.assertAlmostEqual(
sk_pvalue,
rasl_pvalue,
msg=f"{msg}: {i}",
)
self.assertEqual(sk_trained.n_features_in_, rasl_trained.impl.n_features_in_, msg)
class TestSelectKBest(unittest.TestCase):
@classmethod
def setUpClass(cls):
from sklearn.datasets import load_digits
targets = ["pandas", "spark"]
cls.tgt2datasets = {tgt: {} for tgt in targets}
def add_df(name, df):
cls.tgt2datasets["pandas"][name] = df
cls.tgt2datasets["spark"][name] = pandas2spark(df)
X, y = load_digits(return_X_y=True, as_frame=True)
X = add_table_name(X, "X")
y = add_table_name(y, "y")
add_df("X", X)
add_df("y", y)
def test_fit(self):
sk_trainable = SkSelectKBest(k=20)
X, y = self.tgt2datasets["pandas"]["X"], self.tgt2datasets["pandas"]["y"]
sk_trained = sk_trainable.fit(X, y)
rasl_trainable = RaslSelectKBest(k=20)
for tgt, datasets in self.tgt2datasets.items():
X, y = datasets["X"], datasets["y"]
rasl_trained = rasl_trainable.fit(X, y)
_check_trained_select_k_best(self, sk_trained, rasl_trained, tgt)
def test_transform(self):
sk_trainable = SkSelectKBest(k=20)
X, y = self.tgt2datasets["pandas"]["X"], self.tgt2datasets["pandas"]["y"]
sk_trained = sk_trainable.fit(X, y)
sk_transformed = sk_trained.transform(X)
rasl_trainable = RaslSelectKBest(k=20)
for tgt, datasets in self.tgt2datasets.items():
X, y = datasets["X"], datasets["y"]
rasl_trained = rasl_trainable.fit(X, y)
rasl_transformed = rasl_trained.transform(X)
_check_trained_select_k_best(self, sk_trained, rasl_trained, tgt)
rasl_transformed = _ensure_pandas(rasl_transformed)
self.assertEqual(sk_transformed.shape, rasl_transformed.shape, tgt)
for row_idx in range(sk_transformed.shape[0]):
for col_idx in range(sk_transformed.shape[1]):
self.assertAlmostEqual(
sk_transformed[row_idx, col_idx],
rasl_transformed.iloc[row_idx, col_idx],
msg=(row_idx, col_idx),
)
def test_partial_fit(self):
rasl_trainable = RaslSelectKBest(k=20)
X, y = self.tgt2datasets["pandas"]["X"], self.tgt2datasets["pandas"]["y"]
for lower, upper in [[0, 100], [100, 200], [200, X.shape[0]]]:
X_so_far, y_so_far = X[0:upper], y[0:upper]
sk_trainable = SkSelectKBest(k=20)
sk_trained = sk_trainable.fit(X_so_far, y_so_far)
X_delta, y_delta = X[lower:upper], y[lower:upper]
rasl_trained = rasl_trainable.partial_fit(X_delta, y_delta)
_check_trained_select_k_best(
self, sk_trained, rasl_trained, f"lower: {lower}, upper: {upper}"
)
def _check_trained_ordinal_encoder(test, op1, op2, msg):
if hasattr(op1, "feature_names_in_"):
test.assertEqual(list(op1.feature_names_in_), list(op2.feature_names_in_), msg)
test.assertEqual(len(op1.categories_), len(op2.categories_), msg)
for cat1, cat2 in zip(op1.categories_, op2.categories_):
test.assertEqual(len(cat1), len(cat2), msg)
for num1, num2 in zip(cat1, cat2):
if isinstance(num1, numbers.Number) and math.isnan(num2):
test.assertTrue(math.isnan(num2), msg)
else:
test.assertEqual(num1, num2, msg)
class TestOrdinalEncoder(unittest.TestCase):
@classmethod
def setUpClass(cls):
targets: List[datatype_param_type] = ["pandas", "spark"]
cls.tgt2gosales = {tgt: fetch_go_sales_dataset(tgt) for tgt in targets}
cls.tgt2creditg = {
tgt: lale.datasets.openml.fetch(
"credit-g",
"classification",
preprocess=False,
astype=tgt,
)
for tgt in targets
}
def _check_last_trained(self, op1, op2, msg):
_check_trained_ordinal_encoder(
self, op1.get_last().impl, op2.get_last().impl, msg
)
def test_fit(self):
prefix = Scan(table=it.go_daily_sales) >> Map(
columns={"retailer": it["Retailer code"], "method": it["Order method code"]}
)
encoder_args = {"handle_unknown": "use_encoded_value", "unknown_value": np.nan}
rasl_trainable = prefix >> RaslOrdinalEncoder(**encoder_args)
sk_trainable = prefix >> SkOrdinalEncoder(**encoder_args)
sk_trained = sk_trainable.fit(self.tgt2gosales["pandas"])
for tgt, datasets in self.tgt2gosales.items():
rasl_trained = rasl_trainable.fit(datasets)
self._check_last_trained(sk_trained, rasl_trained, tgt)
def test_partial_fit(self):
prefix = Scan(table=it.go_daily_sales) >> Map(
columns={"retailer": it["Retailer code"], "method": it["Order method code"]}
)
pandas_data = prefix.transform(self.tgt2gosales["pandas"])
encoder_args = {"handle_unknown": "use_encoded_value", "unknown_value": np.nan}
for tgt in self.tgt2gosales.keys():
rasl_op = RaslOrdinalEncoder(**encoder_args)
for lower, upper in [[0, 10], [10, 100], [100, pandas_data.shape[0]]]:
data_so_far = pandas_data[0:upper]
sk_op = SkOrdinalEncoder(**encoder_args).fit(data_so_far)
data_delta = pandas_data[lower:upper]
if tgt == "pandas":
pass
elif tgt == "spark":
data_delta = pandas2spark(data_delta)
else:
assert False
rasl_op = rasl_op.partial_fit(data_delta)
_check_trained_ordinal_encoder(
self,
sk_op,
rasl_op.impl,
f"tgt {tgt}, lower {lower}, upper {upper}",
)
def test_transform(self):
prefix = Scan(table=it.go_daily_sales) >> Map(
columns={"retailer": it["Retailer code"], "method": it["Order method code"]}
)
encoder_args = {"handle_unknown": "use_encoded_value", "unknown_value": np.nan}
rasl_trainable = prefix >> RaslOrdinalEncoder(**encoder_args)
sk_trainable = prefix >> SkOrdinalEncoder(**encoder_args)
sk_trained = sk_trainable.fit(self.tgt2gosales["pandas"])
sk_transformed = sk_trained.transform(self.tgt2gosales["pandas"])
for tgt, datasets in self.tgt2gosales.items():
rasl_trained = rasl_trainable.fit(datasets)
self._check_last_trained(sk_trained, rasl_trained, tgt)
rasl_transformed = rasl_trained.transform(datasets)
if tgt == "spark":
self.assertEqual(get_index_name(rasl_transformed), "index")
rasl_transformed = _ensure_pandas(rasl_transformed)
self.assertEqual(sk_transformed.shape, rasl_transformed.shape, tgt)
for row_idx in range(sk_transformed.shape[0]):
for col_idx in range(sk_transformed.shape[1]):
self.assertEqual(
sk_transformed[row_idx, col_idx],
rasl_transformed.iloc[row_idx, col_idx],
(row_idx, col_idx, tgt),
)
def test_predict(self):
(train_X_pd, train_y_pd), (test_X_pd, _test_y_pd) = self.tgt2creditg["pandas"]
cat_columns = categorical()(train_X_pd)
prefix = Map(columns={c: it[c] for c in cat_columns})
to_pd = Convert(astype="pandas")
lr = LogisticRegression()
encoder_args = {"handle_unknown": "use_encoded_value", "unknown_value": -1}
sk_trainable = prefix >> SkOrdinalEncoder(**encoder_args) >> lr
sk_trained = sk_trainable.fit(train_X_pd, train_y_pd)
sk_predicted = sk_trained.predict(test_X_pd)
rasl_trainable = prefix >> RaslOrdinalEncoder(**encoder_args) >> to_pd >> lr
for tgt, dataset in self.tgt2creditg.items():
(train_X, train_y), (test_X, _test_y) = dataset
rasl_trained = rasl_trainable.fit(train_X, train_y)
rasl_predicted = rasl_trained.predict(test_X)
self.assertEqual(sk_predicted.shape, rasl_predicted.shape, tgt)
self.assertEqual(sk_predicted.tolist(), rasl_predicted.tolist(), tgt)
def _check_trained_one_hot_encoder(test, op1, op2, msg):
if hasattr(op1, "feature_names_in_"):
test.assertEqual(list(op1.feature_names_in_), list(op2.feature_names_in_), msg)
test.assertEqual(len(op1.categories_), len(op2.categories_), msg)
for cat1, cat2 in zip(op1.categories_, op2.categories_):
test.assertEqual(list(cat1), list(cat2), msg)
class TestOneHotEncoder(unittest.TestCase):
@classmethod
def setUpClass(cls):
targets = ["pandas", "spark"]
cls.tgt2creditg = cast(
Dict[str, Any],
{
tgt: lale.datasets.openml.fetch(
"credit-g",
"classification",
preprocess=False,
astype=tgt,
)
for tgt in targets
},
)
def _check_last_trained(self, op1, op2, msg):
_check_trained_one_hot_encoder(
self, op1.get_last().impl, op2.get_last().impl, msg
)
def test_fit(self):
(train_X_pd, _), (_, _) = self.tgt2creditg["pandas"]
cat_columns = categorical()(train_X_pd)
prefix = Map(columns={c: it[c] for c in cat_columns})
rasl_trainable = prefix >> RaslOneHotEncoder()
sk_trainable = prefix >> SkOneHotEncoder()
sk_trained = sk_trainable.fit(train_X_pd)
for tgt, dataset in self.tgt2creditg.items():
(train_X, _train_y), (_test_X, _test_y) = dataset
rasl_trained = rasl_trainable.fit(train_X)
self._check_last_trained(sk_trained, rasl_trained, tgt)
def test_partial_fit(self):
(train_X_pd, _), (_, _) = self.tgt2creditg["pandas"]
cat_columns = categorical()(train_X_pd)
prefix = Map(columns={c: it[c] for c in cat_columns})
for tgt in self.tgt2creditg.keys():
rasl_pipe = prefix >> RaslOneHotEncoder()
for lower, upper in [[0, 10], [10, 100], [100, train_X_pd.shape[0]]]:
data_so_far = train_X_pd[0:upper]
sk_pipe = prefix >> SkOrdinalEncoder()
sk_pipe = sk_pipe.fit(data_so_far)
data_delta = train_X_pd[lower:upper]
if tgt == "pandas":
pass
elif tgt == "spark":
data_delta = pandas2spark(data_delta)
else:
assert False
rasl_pipe = rasl_pipe.partial_fit(data_delta)
self._check_last_trained(
sk_pipe,
rasl_pipe,
(tgt, lower, upper),
)
def test_transform(self):
(train_X_pd, _train_y_pd), (test_X_pd, _test_y_pd) = self.tgt2creditg["pandas"]
cat_columns = categorical()(train_X_pd)
prefix = Map(columns={c: it[c] for c in cat_columns})
rasl_trainable = prefix >> RaslOneHotEncoder(sparse=False)
sk_trainable = prefix >> SkOneHotEncoder(sparse=False)
sk_trained = sk_trainable.fit(train_X_pd)
sk_transformed = sk_trained.transform(test_X_pd)
for tgt, dataset in self.tgt2creditg.items():
(train_X, _train_y), (test_X, _test_y) = dataset
rasl_trained = rasl_trainable.fit(train_X)
self._check_last_trained(sk_trained, rasl_trained, tgt)
rasl_transformed = rasl_trained.transform(test_X)
if tgt == "spark":
self.assertEqual(get_index_name(rasl_transformed), "index")
rasl_transformed = _ensure_pandas(rasl_transformed)
self.assertEqual(sk_transformed.shape, rasl_transformed.shape, tgt)
for row_idx in range(sk_transformed.shape[0]):
for col_idx in range(sk_transformed.shape[1]):
self.assertEqual(
sk_transformed[row_idx, col_idx],
rasl_transformed.iloc[row_idx, col_idx],
(row_idx, col_idx, tgt),
)
def test_predict(self):
(train_X_pd, train_y_pd), (test_X_pd, _test_y_pd) = self.tgt2creditg["pandas"]
cat_columns = categorical()(train_X_pd)
prefix = Map(columns={c: it[c] for c in cat_columns})
to_pd = Convert(astype="pandas")
lr = LogisticRegression()
sk_trainable = prefix >> SkOneHotEncoder(sparse=False) >> lr
sk_trained = sk_trainable.fit(train_X_pd, train_y_pd)
sk_predicted = sk_trained.predict(test_X_pd)
rasl_trainable = prefix >> RaslOneHotEncoder(sparse=False) >> to_pd >> lr
for tgt, dataset in self.tgt2creditg.items():
(train_X, train_y), (test_X, _test_y) = dataset
rasl_trained = rasl_trainable.fit(train_X, train_y)
rasl_predicted = rasl_trained.predict(test_X)
self.assertEqual(sk_predicted.shape, rasl_predicted.shape, tgt)
self.assertEqual(sk_predicted.tolist(), rasl_predicted.tolist(), tgt)
def _get_feature_names_out(op):
"""later version of category_encoder's HashingEncoder changed the attribute name"""
fnames = getattr(op, "feature_names", None)
if fnames is not None:
return fnames
fnames = getattr(op, "get_feature_names_out", None)
assert fnames is not None
return fnames()
def _check_trained_hashing_encoder(test, op1, op2, msg):
test.assertEqual(
list(_get_feature_names_out(op1)), list(_get_feature_names_out(op2)), msg
)
class TestHashingEncoder(unittest.TestCase):
@classmethod
def setUpClass(cls):
targets = ["pandas", "spark"]
cls.tgt2creditg = cast(
Dict[str, Any],
{
tgt: lale.datasets.openml.fetch(
"credit-g",
"classification",
preprocess=False,
astype=tgt,
)
for tgt in targets
},
)
def _check_last_trained(self, op1, op2, msg):
_check_trained_hashing_encoder(
self, op1.get_last().impl, op2.get_last().impl, msg
)
def test_fit(self):
(train_X_pd, _), (_, _) = self.tgt2creditg["pandas"]
cat_columns = categorical()(train_X_pd)
prefix = Map(columns={c: it[c] for c in cat_columns})
rasl_trainable = prefix >> RaslHashingEncoder()
sk_trainable = prefix >> SkHashingEncoder()
sk_trained = sk_trainable.fit(train_X_pd)
# TODO: test with multiple batches
for tgt, dataset in self.tgt2creditg.items():
(train_X, _train_y), (_test_X, _test_y) = dataset
rasl_trained = rasl_trainable.fit(train_X)
self._check_last_trained(sk_trained, rasl_trained, tgt)
def test_transform(self):
(train_X_pd, _train_y_pd), (test_X_pd, _test_y_pd) = self.tgt2creditg["pandas"]
cat_columns = categorical()(train_X_pd)
prefix = Map(columns={c: it[c] for c in cat_columns})
rasl_trainable = prefix >> RaslHashingEncoder()
sk_trainable = prefix >> SkHashingEncoder()
sk_trained = sk_trainable.fit(train_X_pd)
sk_transformed = sk_trained.transform(test_X_pd)
for tgt, dataset in self.tgt2creditg.items():
(train_X, _train_y), (test_X, _test_y) = dataset
rasl_trained = rasl_trainable.fit(train_X)
self._check_last_trained(sk_trained, rasl_trained, tgt)
rasl_transformed = rasl_trained.transform(test_X)
if tgt == "spark":
self.assertEqual(get_index_name(rasl_transformed), "index")
rasl_transformed = _ensure_pandas(rasl_transformed)
self.assertEqual(sk_transformed.shape, rasl_transformed.shape, tgt)
for row_idx in range(sk_transformed.shape[0]):
for col_idx in range(sk_transformed.shape[1]):
self.assertEqual(
sk_transformed.iloc[row_idx, col_idx],
rasl_transformed.iloc[row_idx, col_idx],
(row_idx, col_idx, tgt),
)
def test_predict(self):
(train_X_pd, train_y_pd), (test_X_pd, _test_y_pd) = self.tgt2creditg["pandas"]
cat_columns = categorical()(train_X_pd)
prefix = Map(columns={c: it[c] for c in cat_columns})
to_pd = Convert(astype="pandas")
lr = LogisticRegression()
sk_trainable = prefix >> SkHashingEncoder() >> lr
sk_trained = sk_trainable.fit(train_X_pd, train_y_pd)
sk_predicted = sk_trained.predict(test_X_pd)
rasl_trainable = prefix >> RaslHashingEncoder() >> to_pd >> lr
for tgt, dataset in self.tgt2creditg.items():
(train_X, train_y), (test_X, _test_y) = dataset
rasl_trained = rasl_trainable.fit(train_X, train_y)
rasl_predicted = rasl_trained.predict(test_X)
self.assertEqual(sk_predicted.shape, rasl_predicted.shape, tgt)
self.assertEqual(sk_predicted.tolist(), rasl_predicted.tolist(), tgt)
def _check_trained_target_encoder(test, op1, op2, msg):
names1, names2 = _get_feature_names_out(op1), _get_feature_names_out(op2)
test.assertListEqual(list(names1), list(names2), msg)
test.assertSequenceEqual(op1.mapping.keys(), op2._col2cat2value.keys(), msg)
for col in op1.mapping.keys():
op1_cat2val = op1.mapping[col].to_dict()
op2_cat2val = op2._col2cat2value[col]
expected_keys = list(range(1, 1 + len(op2_cat2val))) + [-1, -2]
test.assertListEqual(list(op1_cat2val.keys()), expected_keys, (col, msg))
test.assertAlmostEqual(op1_cat2val[-1], op2._prior, msg=(col, msg))
for i, cat in enumerate(op2_cat2val.keys()):
test.assertAlmostEqual(
op1_cat2val[i + 1], op2_cat2val[cat], msg=(col, i, cat, msg)
)
class TestTargetEncoder(unittest.TestCase):
@classmethod
def setUpClass(cls):
targets = ["pandas", "spark"]
dataset_names = [
"tae", # 3-class classification
"cloud", # regression
"credit-g", # binary classification
]
experiments_dict = lale.datasets.openml.openml_datasets.experiments_dict
cls.datasets = cast(
Dict[Tuple[str, str], Any],
{
(tgt, dataset_name): lale.datasets.openml.fetch(
dataset_name,
experiments_dict[dataset_name]["task_type"],
preprocess=False,
astype=tgt,
)
for tgt, dataset_name in itertools.product(targets, dataset_names)
},
)
def test_fit(self):
for (tgt, dataset_name), dataset in self.datasets.items():
(train_X, train_y), (_, _) = dataset
(train_X_pd, train_y_pd), (_, _) = self.datasets["pandas", dataset_name]
sk_trainable = SkTargetEncoder()
sk_trained = sk_trainable.fit(train_X_pd, train_y_pd)
experiments_dict = lale.datasets.openml.openml_datasets.experiments_dict
if experiments_dict[dataset_name]["task_type"] == "regression":
rasl_trainable = RaslTargetEncoder()
else:
classes = sorted(list(train_y_pd.unique()))
rasl_trainable = RaslTargetEncoder(classes=classes)
rasl_trained = rasl_trainable.fit(train_X, train_y)
_check_trained_target_encoder(
self, sk_trained.impl, rasl_trained.impl, (tgt, dataset_name)
)
def test_partial_fit(self):
for (tgt, dataset_name), dataset in self.datasets.items():
if tgt != "pandas":
continue
(train_X, train_y), (_, _) = dataset
experiments_dict = lale.datasets.openml.openml_datasets.experiments_dict
if experiments_dict[dataset_name]["task_type"] == "regression":
classes = None
else:
classes = sorted(list(_ensure_pandas(train_y).unique()))
rasl_trainable = RaslTargetEncoder(classes=classes)
rasl_trained = rasl_trainable.fit(train_X, train_y)
rasl_batched = RaslTargetEncoder(classes=classes)
for batch_X, batch_y in mockup_data_loader(train_X, train_y, 3, tgt):
rasl_batched = rasl_batched.partial_fit(batch_X, batch_y)
op1, op2 = rasl_trained.impl, rasl_batched.impl
self.assertAlmostEqual(op1._prior, op2._prior, msg=(tgt, dataset_name))
self.assertSequenceEqual(
op1._col2cat2value.keys(),
op2._col2cat2value.keys(),
msg=(tgt, dataset_name),
)
for col in op1._col2cat2value:
self.assertSequenceEqual(
op1._col2cat2value[col].keys(),
op2._col2cat2value[col].keys(),
msg=(tgt, dataset_name, col),
)
for cat in op1._col2cat2value[col]:
self.assertAlmostEqual(
op1._col2cat2value[col][cat],
op2._col2cat2value[col][cat],
msg=(tgt, dataset_name, col, cat),
)
def test_transform(self):
for (tgt, dataset_name), dataset in self.datasets.items():
(train_X, train_y), (test_X, _) = dataset
(train_X_pd, train_y_pd), (test_X_pd, _) = self.datasets[
"pandas", dataset_name
]
sk_trainable = SkTargetEncoder()
sk_trained = sk_trainable.fit(train_X_pd, train_y_pd)
sk_transformed = sk_trained.transform(test_X_pd)
experiments_dict = lale.datasets.openml.openml_datasets.experiments_dict
if experiments_dict[dataset_name]["task_type"] == "regression":
rasl_trainable = RaslTargetEncoder(classes=None)
else:
classes = sorted(list(train_y_pd.unique()))
rasl_trainable = RaslTargetEncoder(classes=classes)
rasl_trained = rasl_trainable.fit(train_X, train_y)
_check_trained_target_encoder(
self, sk_trained.impl, rasl_trained.impl, (tgt, dataset_name)
)
rasl_transformed = rasl_trained.transform(test_X)
if tgt == "spark":
self.assertEqual(
get_index_name(rasl_transformed), "index", (tgt, dataset_name)
)
rasl_transformed = _ensure_pandas(rasl_transformed)
self.assertEqual(
sk_transformed.shape, rasl_transformed.shape, (tgt, dataset_name)
)
for row_idx in range(sk_transformed.shape[0]):
for col_idx in range(sk_transformed.shape[1]):
self.assertEqual(
sk_transformed.iloc[row_idx, col_idx],
rasl_transformed.iloc[row_idx, col_idx],
(tgt, dataset_name, row_idx, col_idx),
)
def test_predict(self):
for (tgt, dataset_name), dataset in self.datasets.items():
(train_X, train_y), (test_X, _) = dataset
(train_X_pd, train_y_pd), (test_X_pd, _) = self.datasets[
"pandas", dataset_name
]
experiments_dict = lale.datasets.openml.openml_datasets.experiments_dict
if experiments_dict[dataset_name]["task_type"] == "regression":
classes = None
est = LinearRegression()
else:
classes = sorted(list(train_y_pd.unique()))
est = LogisticRegression()
sk_trainable = SkTargetEncoder() >> est
sk_trained = sk_trainable.fit(train_X_pd, train_y_pd)
sk_predicted = sk_trained.predict(test_X_pd)
rasl_trainable = (
RaslTargetEncoder(classes=classes) >> Convert(astype="pandas") >> est
)
rasl_trained = rasl_trainable.fit(train_X, train_y)
rasl_predicted = rasl_trained.predict(test_X)
self.assertEqual(
sk_predicted.shape, rasl_predicted.shape, (tgt, dataset_name)
)
self.assertListEqual(
sk_predicted.tolist(), rasl_predicted.tolist(), (tgt, dataset_name)
)
def _check_trained_simple_imputer(test, op1, op2, msg):
if hasattr(op1, "feature_names_in_"):
test.assertEqual(list(op1.feature_names_in_), list(op2.feature_names_in_), msg)
if hasattr(op1, "statistics_"):
test.assertEqual(len(op1.statistics_), len(op2.statistics_), msg)
for stats1, stats2 in zip(op1.statistics_, op2.statistics_):
test.assertEqual(stats1, stats2, msg)
if hasattr(op1, "n_features_in_"):
test.assertEqual(op1.n_features_in_, op2.n_features_in_, msg)
if hasattr(op1, "indicator_"):
test.assertEqual(op1.indicator_, op2.indicator_, msg)
class TestSimpleImputer(unittest.TestCase):
def setUp(self):
targets = ["pandas", "spark"]
self.tgt2adult = {
tgt: lale.datasets.openml.fetch(
"adult",
"classification",
preprocess=False,
astype=tgt,
)
for tgt in targets
}
def _fill_missing_value(self, col_name, value, missing_value):
for tgt, datasets in self.tgt2adult.items():
(train_X, train_y), (test_X, test_y) = datasets
if tgt == "pandas":
train_X.loc[
train_X[col_name] == value, col_name
] = missing_value # type:ignore
test_X.loc[
test_X[col_name] == value, col_name
] = missing_value # type:ignore
elif tgt == "spark":
from pyspark.sql.functions import col, when
train_X_new = train_X.withColumn(
col_name,
when(col(col_name) == value, missing_value).otherwise(
col(col_name)
),
)
test_X_new = test_X.withColumn(
col_name,
when(col(col_name) == value, missing_value).otherwise(
col(col_name)
),
)
train_X = forward_metadata(train_X, train_X_new)
test_X = forward_metadata(test_X, test_X_new)
else:
assert False
self.tgt2adult[tgt] = (train_X, train_y), (test_X, test_y)
def test_fit_transform_numeric_nan_missing(self):
self._fill_missing_value("age", 36.0, np.nan)
num_columns = ["age", "fnlwgt", "education-num"]
prefix = Map(columns={c: it[c] for c in num_columns})
hyperparams = [
{"strategy": "mean"},
{"strategy": "median"},
{"strategy": "most_frequent"},
{"strategy": "constant", "fill_value": 99},
]
for hyperparam in hyperparams:
rasl_imputer = RaslSimpleImputer(**hyperparam)
sk_imputer = SkSimpleImputer(**hyperparam)
rasl_trainable = prefix >> rasl_imputer
sk_trainable = prefix >> sk_imputer
sk_trained = sk_trainable.fit(self.tgt2adult["pandas"][0][0])
sk_transformed = sk_trained.transform(self.tgt2adult["pandas"][1][0])
sk_statistics_ = sk_trained.steps[-1][1].impl.statistics_
for tgt, dataset in self.tgt2adult.items():
(train_X, _), (test_X, _) = dataset
rasl_trained = rasl_trainable.fit(train_X)
# test the fit succeeded.
rasl_statistics_ = rasl_trained.steps[-1][1].impl.statistics_
self.assertEqual(
len(sk_statistics_), len(rasl_statistics_), (hyperparam, tgt)
)
for i in range(sk_statistics_.shape[0]):
self.assertAlmostEqual(
sk_statistics_[i], rasl_statistics_[i], msg=(i, hyperparam, tgt)
)
rasl_transformed = rasl_trained.transform(test_X)
if tgt == "spark":
self.assertEqual(get_index_name(rasl_transformed), "index")
rasl_transformed = _ensure_pandas(rasl_transformed)
self.assertEqual(sk_transformed.shape, rasl_transformed.shape, tgt)
for row_idx in range(sk_transformed.shape[0]):
for col_idx in range(sk_transformed.shape[1]):
self.assertAlmostEqual(
sk_transformed[row_idx, col_idx],
rasl_transformed.iloc[row_idx, col_idx],
msg=(row_idx, col_idx, tgt),
)
_check_trained_simple_imputer(self, sk_imputer, rasl_imputer, tgt)
def test_fit_transform_numeric_nonan_missing(self):
self._fill_missing_value("age", 36.0, -1)
num_columns = ["age", "fnlwgt", "education-num"]
prefix = Map(columns={c: it[c] for c in num_columns})
hyperparams = [
{"strategy": "mean"},
{"strategy": "median"},
{"strategy": "most_frequent"},
{"strategy": "constant", "fill_value": 99},
]
for hyperparam in hyperparams:
rasl_trainable = prefix >> RaslSimpleImputer(
missing_values=-1, **hyperparam
)
sk_trainable = prefix >> SkSimpleImputer(missing_values=-1, **hyperparam)
sk_trained = sk_trainable.fit(self.tgt2adult["pandas"][0][0])
sk_transformed = sk_trained.transform(self.tgt2adult["pandas"][1][0])
sk_statistics_ = sk_trained.get_last().impl.statistics_
for tgt, dataset in self.tgt2adult.items():
(train_X, _), (test_X, _) = dataset
rasl_trained = rasl_trainable.fit(train_X)
# test the fit succeeded.
rasl_statistics_ = rasl_trained.get_last().impl.statistics_ # type: ignore
self.assertEqual(len(sk_statistics_), len(rasl_statistics_), tgt)
self.assertEqual(list(sk_statistics_), list(rasl_statistics_), tgt)
rasl_transformed = rasl_trained.transform(test_X)
if tgt == "spark":
self.assertEqual(get_index_name(rasl_transformed), "index")
rasl_transformed = _ensure_pandas(rasl_transformed)
self.assertEqual(sk_transformed.shape, rasl_transformed.shape, tgt)
for row_idx in range(sk_transformed.shape[0]):
for col_idx in range(sk_transformed.shape[1]):
self.assertEqual(
sk_transformed[row_idx, col_idx],
rasl_transformed.iloc[row_idx, col_idx],
(row_idx, col_idx, tgt),
)
def test_predict(self):
self._fill_missing_value("age", 36.0, np.nan)
(train_X_pd, train_y_pd), (test_X_pd, _test_y_pd) = self.tgt2adult["pandas"]
num_columns = ["age", "fnlwgt", "education-num"]
prefix = Map(columns={c: it[c] for c in num_columns})
to_pd = Convert(astype="pandas")
lr = LogisticRegression()
imputer_args = {"strategy": "mean"}
sk_trainable = prefix >> SkSimpleImputer(**imputer_args) >> lr
sk_trained = sk_trainable.fit(train_X_pd, train_y_pd)
sk_predicted = sk_trained.predict(test_X_pd)
rasl_trainable = prefix >> RaslSimpleImputer(**imputer_args) >> to_pd >> lr
for tgt, dataset in self.tgt2adult.items():
(train_X, train_y), (test_X, _test_y) = dataset
rasl_trained = rasl_trainable.fit(train_X, train_y)
rasl_predicted = rasl_trained.predict(test_X)
self.assertEqual(sk_predicted.shape, rasl_predicted.shape, tgt)
self.assertEqual(sk_predicted.tolist(), rasl_predicted.tolist(), tgt)
def test_invalid_datatype_strategy(self):
sk_trainable = SkSimpleImputer()
with self.assertRaises(ValueError):
sk_trainable.fit(self.tgt2adult["pandas"][0][0])
rasl_trainable = RaslSimpleImputer()
for tgt, dataset in self.tgt2adult.items():
(train_X, _), (_, _) = dataset
if tgt == "spark":
# Skip test because of timeout!
continue
with self.assertRaises(ValueError):
_ = rasl_trainable.fit(train_X)
def test_default_numeric_fill_value(self):
self._fill_missing_value("age", 36.0, np.nan)
num_columns = ["age", "fnlwgt", "education-num"]
prefix = Map(columns={c: it[c] for c in num_columns})
hyperparams = [{"strategy": "constant"}]
for hyperparam in hyperparams:
rasl_trainable = prefix >> RaslSimpleImputer(**hyperparam)
sk_trainable = prefix >> SkSimpleImputer(**hyperparam)
sk_trained = sk_trainable.fit(self.tgt2adult["pandas"][0][0])
sk_transformed = sk_trained.transform(self.tgt2adult["pandas"][1][0])
sk_statistics_ = sk_trained.steps[-1][1].impl.statistics_
for tgt, dataset in self.tgt2adult.items():
(train_X, _), (test_X, _) = dataset
rasl_trained = rasl_trainable.fit(train_X)
# test the fit succeeded.
rasl_statistics_ = rasl_trained.steps[-1][1].impl.statistics_
self.assertEqual(len(sk_statistics_), len(rasl_statistics_), tgt)
self.assertEqual(list(sk_statistics_), list(rasl_statistics_), tgt)
rasl_transformed = rasl_trained.transform(test_X)
rasl_transformed = _ensure_pandas(rasl_transformed)
self.assertEqual(sk_transformed.shape, rasl_transformed.shape, tgt)
for row_idx in range(sk_transformed.shape[0]):
for col_idx in range(sk_transformed.shape[1]):
self.assertEqual(
sk_transformed[row_idx, col_idx],
rasl_transformed.iloc[row_idx, col_idx],
(row_idx, col_idx, tgt),
)
def test_default_string_fill_value(self):
self._fill_missing_value("education", "Prof-school", np.nan)
str_columns = ["workclass", "education", "capital-gain"]
prefix = Map(columns={c: it[c] for c in str_columns})
hyperparams = [{"strategy": "constant"}]
for hyperparam in hyperparams:
rasl_trainable = prefix >> RaslSimpleImputer(**hyperparam)
sk_trainable = prefix >> SkSimpleImputer(**hyperparam)
sk_trained = sk_trainable.fit(self.tgt2adult["pandas"][0][0])
sk_statistics_ = sk_trained.steps[-1][1].impl.statistics_
for tgt, dataset in self.tgt2adult.items():
(train_X, _), (test_X, _) = dataset
rasl_trained = rasl_trainable.fit(train_X)
# test the fit succeeded.
rasl_statistics_ = rasl_trained.steps[-1][1].impl.statistics_
self.assertEqual(len(sk_statistics_), len(rasl_statistics_), tgt)
self.assertEqual(list(sk_statistics_), list(rasl_statistics_), tgt)
rasl_transformed = rasl_trained.transform(test_X)
rasl_transformed = _ensure_pandas(rasl_transformed)
# Note that for this test case, the output of sklearn transform does not
# match rasl transform. There is at least one row which has a None
# value and pandas replace treats it as nan and replaces it.
# Sklearn which uses numpy does not replace a None.
# So we just test that `missing_value` is the default assigned.
self.assertEqual(rasl_transformed.iloc[1, 1], "missing_value")
def test_multiple_modes_numeric(self):
# Sklearn SimpleImputer says: for strategy `most_frequent`,
# if there is more than one such value, only the smallest is returned.
data = [[1, 10], [2, 14], [3, 15], [4, 15], [5, 14], [6, np.nan]]
df = pd.DataFrame(data, columns=["Id", "Age"])
hyperparam = {"strategy": "most_frequent"}
sk_trainable = SkSimpleImputer(**hyperparam)
rasl_trainable = RaslSimpleImputer(**hyperparam)
sk_trained = sk_trainable.fit(df)
rasl_trained = rasl_trainable.fit(df)
self.assertEqual(
len(sk_trained.statistics_), len(rasl_trained.impl.statistics_), "pandas"
)
self.assertEqual([6, 15], list(rasl_trained.impl.statistics_), "pandas")
spark_df = pandas2spark(df)
rasl_trained = rasl_trainable.fit(spark_df)
self.assertEqual(
len(sk_trained.statistics_), len(rasl_trained.impl.statistics_), "spark"
)
self.assertIn(rasl_trained.impl.statistics_[1], [14, 15])
def test_multiple_modes_string(self):
# Sklearn SimpleImputer says: for strategy `most_frequent`,
# if there is more than one such value, only the smallest is returned.
data = [
["a", "t"],
["b", "f"],
["b", "m"],
["c", "f"],
["c", "m"],
["f", "missing"],
]
df = pd.DataFrame(data, columns=["Id", "Gender"])
hyperparam = {"strategy": "most_frequent", "missing_values": "missing"}
sk_trainable = SkSimpleImputer(**hyperparam)
rasl_trainable = RaslSimpleImputer(**hyperparam)
sk_trained = sk_trainable.fit(df)
rasl_trained = rasl_trainable.fit(df)
self.assertEqual(
len(sk_trained.statistics_), len(rasl_trained.impl.statistics_), "pandas"
)
self.assertEqual(
list(["c", "m"]), list(rasl_trained.impl.statistics_), "pandas"
)
spark_df = pandas2spark(df)
rasl_trained = rasl_trainable.fit(spark_df)
self.assertEqual(
len(sk_trained.statistics_), len(rasl_trained.impl.statistics_), "spark"
)
self.assertIn(rasl_trained.impl.statistics_[1], ["f", "m"])
def test_valid_partial_fit(self):
self._fill_missing_value("age", 36.0, -1)
num_columns = ["age", "fnlwgt", "education-num"]
prefix = Map(columns={c: it[c] for c in num_columns})
hyperparams = [
{"strategy": "mean"},
{"strategy": "constant", "fill_value": 99},
]
for hyperparam in hyperparams:
rasl_trainable = prefix >> RaslSimpleImputer(
missing_values=-1, **hyperparam
)
sk_trainable = prefix >> SkSimpleImputer(missing_values=-1, **hyperparam)
sk_trained = sk_trainable.fit(self.tgt2adult["pandas"][0][0])
sk_transformed = sk_trained.transform(self.tgt2adult["pandas"][1][0])
sk_statistics_ = sk_trained.get_last().impl.statistics_
(train_X, _), (test_X, _) = self.tgt2adult["pandas"]
data1_pandas = train_X.iloc[:10]
data2_pandas = train_X.iloc[10:100]
data3_pandas = train_X.iloc[100:]
test_X_pandas = test_X
for tgt in self.tgt2adult.keys():
if tgt == "pandas":
data1 = data1_pandas
data2 = data2_pandas
data3 = data3_pandas
test_X = test_X_pandas
elif tgt == "spark":
data1 = pandas2spark(data1_pandas) # type:ignore
data2 = pandas2spark(data2_pandas) # type:ignore
data3 = pandas2spark(data3_pandas) # type:ignore
test_X = pandas2spark(test_X_pandas) # type:ignore
else:
assert False
rasl_trainable = prefix >> RaslSimpleImputer(
missing_values=-1, **hyperparam
)
rasl_trained = rasl_trainable.partial_fit(data1)
rasl_trained = rasl_trained.partial_fit(data2)
rasl_trained = rasl_trained.partial_fit(data3)
# test the fit succeeded.
rasl_statistics_ = rasl_trained.get_last().impl.statistics_ # type: ignore
self.assertEqual(len(sk_statistics_), len(rasl_statistics_), tgt)
for sk_stats, rasl_stats in zip(sk_statistics_, rasl_statistics_):
self.assertEqual(sk_stats, rasl_stats)
rasl_transformed = rasl_trained.transform(test_X)
rasl_transformed = _ensure_pandas(rasl_transformed)
self.assertEqual(sk_transformed.shape, rasl_transformed.shape, tgt)
for row_idx in range(sk_transformed.shape[0]):
for col_idx in range(sk_transformed.shape[1]):
self.assertEqual(
sk_transformed[row_idx, col_idx],
rasl_transformed.iloc[row_idx, col_idx],
(row_idx, col_idx, tgt),
)
def test_invalid_partial_fit(self):
num_columns = ["age", "fnlwgt", "education-num"]
prefix = Map(columns={c: it[c] for c in num_columns})
hyperparams = [
{"strategy": "median"},
{"strategy": "most_frequent"},
]
for hyperparam in hyperparams:
rasl_trainable = prefix >> RaslSimpleImputer(
missing_values=-1, **hyperparam
)
(train_X, _), (_, _) = self.tgt2adult["pandas"]
with self.assertRaises(ValueError):
_ = rasl_trainable.partial_fit(train_X)
def _check_trained_standard_scaler(test, op1, op2, msg):
if hasattr(op1, "feature_names_in_"):
test.assertEqual(list(op1.feature_names_in_), list(op2.feature_names_in_), msg)
test.assertEqual(op1.n_features_in_, op2.n_features_in_, msg)
test.assertEqual(op1.n_samples_seen_, op2.n_samples_seen_, msg)
if op1.mean_ is None:
test.assertIsNone(op2.mean_, msg)
else:
test.assertIsNotNone(op2.mean_, msg)
test.assertEqual(len(op1.mean_), len(op2.mean_), msg)
for mean1, mean2 in zip(op1.mean_, op2.mean_):
test.assertAlmostEqual(mean1, mean2, msg=msg)
if op1.var_ is None:
test.assertIsNone(op2.var_, msg)
else:
test.assertIsNotNone(op2.var_, msg)
test.assertEqual(len(op1.var_), len(op2.var_), msg)
for var1, var2 in zip(op1.var_, op2.var_):
test.assertAlmostEqual(var1, var2, msg=msg)
if op1.scale_ is None:
test.assertIsNone(op2.scale_, msg)
else:
test.assertIsNotNone(op2.scale_, msg)
test.assertEqual(len(op1.scale_), len(op2.scale_), msg)
for scale1, scale2 in zip(op1.scale_, op2.scale_):
test.assertAlmostEqual(scale1, scale2, msg=msg)
class TestStandardScaler(unittest.TestCase):
@classmethod
def setUpClass(cls):
targets = ["pandas", "spark"]
cls.tgt2creditg = cast(
Dict[str, Any],
{
tgt: lale.datasets.openml.fetch(
"credit-g",
"classification",
preprocess=True,
astype=tgt,
)
for tgt in targets
},
)
def test_fit(self):
(train_X_pd, _), (_, _) = self.tgt2creditg["pandas"]
sk_trainable = SkStandardScaler()
sk_trained = sk_trainable.fit(train_X_pd)
rasl_trainable = RaslStandardScaler()
for tgt, dataset in self.tgt2creditg.items():
(train_X, _), (_, _) = dataset
rasl_trained = rasl_trainable.fit(train_X)
_check_trained_standard_scaler(self, sk_trained, rasl_trained.impl, tgt)
def test_partial_fit(self):
(train_X_pd, _), (_, _) = self.tgt2creditg["pandas"]
for tgt in self.tgt2creditg.keys():
rasl_op = RaslStandardScaler()
for lower, upper in [[0, 10], [10, 100], [100, train_X_pd.shape[0]]]:
data_so_far = train_X_pd[0:upper]
sk_op = SkStandardScaler()
sk_op = sk_op.fit(data_so_far)
data_delta = train_X_pd[lower:upper]
if tgt == "pandas":
pass
elif tgt == "spark":
data_delta = pandas2spark(data_delta)
else:
assert False
rasl_op = rasl_op.partial_fit(data_delta)
_check_trained_standard_scaler(
self, sk_op, rasl_op.impl, (tgt, lower, upper)
)
def test_transform(self):
(train_X_pd, _), (test_X_pd, _) = self.tgt2creditg["pandas"]
sk_trainable = SkStandardScaler()
sk_trained = sk_trainable.fit(train_X_pd)
sk_transformed = sk_trained.transform(test_X_pd)
rasl_trainable = RaslStandardScaler()
for tgt, dataset in self.tgt2creditg.items():
(train_X, _), (test_X, _) = dataset
rasl_trained = rasl_trainable.fit(train_X)
_check_trained_standard_scaler(self, sk_trained, rasl_trained.impl, tgt)
rasl_transformed = rasl_trained.transform(test_X)
if tgt == "spark":
self.assertEqual(get_index_name(rasl_transformed), "index")
rasl_transformed = _ensure_pandas(rasl_transformed)
self.assertEqual(sk_transformed.shape, rasl_transformed.shape, tgt)
for row_idx in range(sk_transformed.shape[0]):
for col_idx in range(sk_transformed.shape[1]):
self.assertAlmostEqual(
sk_transformed[row_idx, col_idx],
rasl_transformed.iloc[row_idx, col_idx],
msg=(row_idx, col_idx, tgt),
)
def test_scale(self):
(X_pd, _), _ = self.tgt2creditg["pandas"]
sk_transformed = sk_scale(X_pd)
for tgt, dataset in self.tgt2creditg.items():
(X, _), _ = dataset
rasl_transformed = rasl_scale(X)
if tgt == "spark":
self.assertEqual(get_index_name(rasl_transformed), "index")
rasl_transformed = _ensure_pandas(rasl_transformed)
self.assertEqual(sk_transformed.shape, rasl_transformed.shape, tgt)
for row_idx in range(sk_transformed.shape[0]):
for col_idx in range(sk_transformed.shape[1]):
self.assertAlmostEqual(
sk_transformed[row_idx, col_idx],
rasl_transformed.iloc[row_idx, col_idx],
msg=(row_idx, col_idx, tgt),
)
def test_predict(self):
(train_X_pd, train_y_pd), (test_X_pd, _test_y_pd) = self.tgt2creditg["pandas"]
to_pd = Convert(astype="pandas")
lr = LogisticRegression()
sk_trainable = SkStandardScaler() >> lr
sk_trained = sk_trainable.fit(train_X_pd, train_y_pd)
sk_predicted = sk_trained.predict(test_X_pd)
rasl_trainable = RaslStandardScaler() >> to_pd >> lr
for tgt, dataset in self.tgt2creditg.items():
(train_X, train_y), (test_X, _test_y) = dataset
rasl_trained = rasl_trainable.fit(train_X, train_y)
rasl_predicted = rasl_trained.predict(test_X)
self.assertEqual(sk_predicted.shape, rasl_predicted.shape, tgt)
self.assertEqual(sk_predicted.tolist(), rasl_predicted.tolist(), tgt)
class _BatchTestingKFold:
def __init__(self, n_batches, n_splits):
self.n_batches = n_batches
self.n_splits = n_splits
def get_n_splits(self, X=None, y=None, groups=None):
return self.n_splits
def split(self, X, y=None, groups=None):
# re-arrange batched data [[d0,e0,f0], [d1,e1,f1], [d2,e2,f2]]
# into non-batched data [d0+d1+d2, e0+e1+e2, f0+f1+f2]
result = [([], []) for _ in range(self.n_splits)]
cv = KFold(self.n_splits)
batches = mockup_data_loader(X, y, self.n_batches, "pandas")
for bX, by in batches:
for fold, (_, test) in enumerate(cv.split(bX, by)):
remapped = bX.index[test]
for f in range(self.n_splits):
if f != fold:
assert set(result[f][0]).isdisjoint(set(remapped))
result[f][0].extend(remapped)
assert set(result[fold][1]).isdisjoint(set(remapped))
result[fold][1].extend(remapped)
return result
class _BatchTestingCallback:
def __init__(self):
self.n_calls = 0
def __call__(
self, score_train, score_valid, n_batches_scanned, end_of_scanned_batches
):
self.n_calls += 1
assert self.n_calls == n_batches_scanned, (self.n_calls, n_batches_scanned)
assert not end_of_scanned_batches
class TestTaskGraphs(unittest.TestCase):
@classmethod
def setUpClass(cls):
X, y, fairness_info = lale.lib.aif360.fetch_creditg_df(preprocess=False)
X = Project(columns=categorical()).fit(X).transform(X)
fairness_info = { # remove numeric protected attribute age
"favorable_labels": fairness_info["favorable_labels"],
"protected_attributes": fairness_info["protected_attributes"][:1],
}
cls.creditg = X, y, fairness_info
@classmethod
def _make_sk_trainable(cls, final_est):
if final_est == "sgd":
est = SGDClassifier(random_state=97)
elif final_est == "rfc":
est = RandomForestClassifier(random_state=97)
else:
assert False, final_est
return sk_make_pipeline(
SkOrdinalEncoder(handle_unknown="use_encoded_value", unknown_value=-1),
SkMinMaxScaler(),
est,
)
@classmethod
def _make_rasl_trainable(cls, final_est):
if final_est == "sgd":
est = SGDClassifier(random_state=97)
elif final_est == "rfc":
est = RandomForestClassifier(random_state=97)
else:
assert False, final_est
return (
RaslOrdinalEncoder(handle_unknown="use_encoded_value", unknown_value=-1)
>> RaslMinMaxScaler()
>> est
)
def test_fit_no_batching(self):
train_X, train_y, _ = self.creditg
sk_trainable = self._make_sk_trainable("sgd")
sk_trained = sk_trainable.fit(train_X, train_y)
rasl_trainable = self._make_rasl_trainable("sgd")
rasl_trained = rasl_trainable.fit(train_X, train_y)
_check_trained_ordinal_encoder(
self, sk_trained.steps[0][1], rasl_trained.steps[0][1].impl, "pandas"
)
_check_trained_min_max_scaler(
self, sk_trained.steps[1][1], rasl_trained.steps[1][1].impl, "pandas"
)
def test_fit_batching(self):
train_X, train_y, _ = self.creditg
train_data_space = train_X.memory_usage().sum() + train_y.memory_usage()
sk_trainable = self._make_sk_trainable("sgd")
sk_trained = sk_trainable.fit(train_X, train_y)
unique_class_labels = list(train_y.unique())
for n_batches in [1, 3]:
for prio in [PrioStep(), PrioBatch()]:
batches = mockup_data_loader(train_X, train_y, n_batches, "pandas")
rasl_trainable = self._make_rasl_trainable("sgd")
rasl_trained = fit_with_batches(
pipeline=rasl_trainable,
batches_train=batches,
batches_valid=None,
scoring=None,
unique_class_labels=unique_class_labels,
max_resident=3 * math.ceil(train_data_space / n_batches),
prio=prio,
partial_transform=False,
verbose=0,
progress_callback=None,
)
_check_trained_ordinal_encoder(
self,
sk_trained.steps[0][1],
rasl_trained.steps[0][1].impl,
(n_batches, type(prio)),
)
_check_trained_min_max_scaler(
self,
sk_trained.steps[1][1],
rasl_trained.steps[1][1].impl,
(n_batches, type(prio)),
)
def test_partial_transform(self):
train_X, train_y, _ = self.creditg
unique_class_labels = list(train_y.unique())
for n_batches in [1, 3]:
batches = mockup_data_loader(train_X, train_y, n_batches, "pandas")
rasl_trainable = self._make_rasl_trainable("sgd")
progress_callback = _BatchTestingCallback()
_ = fit_with_batches(
pipeline=rasl_trainable,
batches_train=batches,
batches_valid=None,
scoring=rasl_get_scorer("accuracy"),
unique_class_labels=unique_class_labels,
max_resident=None,
prio=PrioBatch(),
partial_transform=True,
verbose=0,
progress_callback=progress_callback,
)
self.assertEqual(progress_callback.n_calls, n_batches)
def test_frozen_prefix(self):
train_X, train_y, _ = self.creditg
unique_class_labels = list(train_y.unique())
n_batches = 3
batches = mockup_data_loader(train_X, train_y, n_batches, "pandas")
first_batch = next(iter(batches))
trainable1 = self._make_rasl_trainable("sgd")
trained1 = fit_with_batches(
pipeline=trainable1,
batches_train=[first_batch],
batches_valid=None,
scoring=None,
unique_class_labels=unique_class_labels,
max_resident=None,
prio=PrioBatch(),
partial_transform=False,
verbose=0,
progress_callback=None,
)
prefix2 = trained1.remove_last().freeze_trained()
suffix2 = trained1.get_last()
trainable2 = prefix2 >> suffix2
assert isinstance(trainable2, TrainedPipeline)
progress_callback = _BatchTestingCallback()
_ = fit_with_batches(
pipeline=trainable2,
batches_train=batches,
batches_valid=None,
scoring=rasl_get_scorer("accuracy"),
unique_class_labels=unique_class_labels,
max_resident=None,
prio=PrioBatch(),
partial_transform="score",
verbose=0,
progress_callback=progress_callback,
)
self.assertEqual(progress_callback.n_calls, n_batches - 1)
def test_cross_val_score_accuracy(self):
X, y, _ = self.creditg
n_splits = 3
for n_batches in [1, 3]:
with self.assertWarnsRegex(DeprecationWarning, "trainable operator"):
sk_scores = sk_cross_val_score(
estimator=self._make_sk_trainable("rfc"),
X=X,
y=y,
scoring=make_scorer(sk_accuracy_score),
cv=_BatchTestingKFold(n_batches, n_splits),
)
rasl_scores = rasl_cross_val_score(
pipeline=self._make_rasl_trainable("rfc"),
batches=mockup_data_loader(X, y, n_batches, "pandas"),
scoring=rasl_get_scorer("accuracy"),
cv=KFold(n_splits),
unique_class_labels=list(y.unique()),
max_resident=None,
prio=PrioBatch(),
same_fold=True,
verbose=0,
)
for sk_s, rasl_s in zip(sk_scores, rasl_scores):
self.assertAlmostEqual(sk_s, rasl_s, msg=n_batches)
def test_cross_val_score_disparate_impact(self):
X, y, fairness_info = self.creditg
disparate_impact_scorer = lale.lib.aif360.disparate_impact(**fairness_info)
n_splits = 3
for n_batches in [1, 3]:
with self.assertWarnsRegex(DeprecationWarning, "trainable operator"):
sk_scores = sk_cross_val_score(
estimator=self._make_sk_trainable("rfc"),
X=X,
y=y,
scoring=disparate_impact_scorer,
cv=_BatchTestingKFold(n_batches, n_splits),
)
rasl_scores = rasl_cross_val_score(
pipeline=self._make_rasl_trainable("rfc"),
batches=mockup_data_loader(X, y, n_batches, "pandas"),
scoring=disparate_impact_scorer,
cv=KFold(n_splits),
unique_class_labels=list(y.unique()),
max_resident=None,
prio=PrioBatch(),
same_fold=True,
verbose=0,
)
for sk_s, rasl_s in zip(sk_scores, rasl_scores):
self.assertAlmostEqual(sk_s, rasl_s, msg=n_batches)
def test_cross_validate(self):
X, y, _ = self.creditg
n_splits = 3
for n_batches in [1, 3]:
with self.assertWarnsRegex(DeprecationWarning, "trainable operator"):
sk_scr = sk_cross_validate(
estimator=self._make_sk_trainable("rfc"),
X=X,
y=y,
scoring=make_scorer(sk_accuracy_score),
cv=_BatchTestingKFold(n_batches, n_splits),
return_estimator=True,
)
rasl_scr = rasl_cross_validate(
pipeline=self._make_rasl_trainable("rfc"),
batches=mockup_data_loader(X, y, n_batches, "pandas"),
scoring=rasl_get_scorer("accuracy"),
cv=KFold(n_splits),
unique_class_labels=list(y.unique()),
max_resident=None,
prio=PrioBatch(),
same_fold=True,
return_estimator=True,
verbose=0,
)
for sk_s, rasl_s in zip(sk_scr["test_score"], rasl_scr["test_score"]):
self.assertAlmostEqual(cast(float, sk_s), cast(float, rasl_s))
for sk_e, rasl_e in zip(sk_scr["estimator"], rasl_scr["estimator"]):
_check_trained_ordinal_encoder(
self,
sk_e.steps[0][1],
cast(TrainedPipeline, rasl_e).steps[0][1].impl,
n_batches,
)
_check_trained_min_max_scaler(
self,
sk_e.steps[1][1],
cast(TrainedPipeline, rasl_e).steps[1][1].impl,
n_batches,
)
class TestTaskGraphsWithConcat(unittest.TestCase):
@classmethod
def setUpClass(cls):
(train_X, train_y), (test_X, test_y) = lale.datasets.openml.fetch(
"credit-g", "classification", preprocess=False
)
cls.cat_columns = [
"checking_status",
"credit_history",
"purpose",
"savings_status",
"employment",
"personal_status",
"other_parties",
"property_magnitude",
"other_payment_plans",
"housing",
"job",
"own_telephone",
"foreign_worker",
]
cls.num_columns = [
"duration",
"credit_amount",
"installment_commitment",
"residence_since",
"age",
"existing_credits",
"num_dependents",
]
cls.creditg = (train_X, train_y), (test_X, test_y)
@classmethod
def _make_sk_trainable(cls, final_est):
from sklearn.compose import ColumnTransformer as SkColumnTransformer
from sklearn.ensemble import RandomForestClassifier as SkRandomForestClassifier
from sklearn.linear_model import SGDClassifier as SkSGDClassifier
if final_est == "sgd":
est = SkSGDClassifier(random_state=97)
elif final_est == "rfc":
est = SkRandomForestClassifier(random_state=97)
else:
assert False, final_est
return sk_make_pipeline(
SkColumnTransformer(
[
(
"prep_cat",
SkOrdinalEncoder(
handle_unknown="use_encoded_value", unknown_value=-1
),
cls.cat_columns,
),
(
"prep_num",
sk_make_pipeline(
SkSimpleImputer(strategy="mean"),
SkMinMaxScaler(),
"passthrough",
),
cls.num_columns,
),
]
),
est,
)
@classmethod
def _make_rasl_trainable(cls, final_est):
if final_est == "sgd":
est = SGDClassifier(random_state=97)
elif final_est == "rfc":
est = RandomForestClassifier(random_state=97)
else:
assert False, final_est
return (
(
(
Project(columns=cls.cat_columns)
>> RaslOrdinalEncoder(
handle_unknown="use_encoded_value", unknown_value=-1
)
)
& (
Project(columns=cls.num_columns)
>> RaslSimpleImputer(strategy="mean")
>> RaslMinMaxScaler()
)
)
>> ConcatFeatures()
>> est
)
def test_fit_no_batching(self):
(train_X, train_y), _ = self.creditg
sk_trainable = self._make_sk_trainable("sgd")
sk_trained = sk_trainable.fit(train_X, train_y)
rasl_trainable = self._make_rasl_trainable("sgd")
rasl_trained = rasl_trainable.fit(train_X, train_y)
_check_trained_ordinal_encoder(
self,
sk_trained.steps[0][1].transformers_[0][1],
rasl_trained.steps[1][1].impl,
"pandas",
)
_check_trained_min_max_scaler(
self,
sk_trained.steps[0][1].transformers_[1][1].steps[1][1],
rasl_trained.steps[4][1].impl,
"pandas",
)
def test_fit_batching(self):
(train_X, train_y), _ = self.creditg
train_data_space = train_X.memory_usage().sum() + train_y.memory_usage()
sk_trainable = self._make_sk_trainable("sgd")
sk_trained = sk_trainable.fit(train_X, train_y)
unique_class_labels = list(train_y.unique())
for n_batches in [1, 3]:
for prio in [PrioStep(), PrioBatch()]:
batches = create_data_loader(
train_X, train_y, math.ceil(len(train_y) / n_batches)
)
self.assertEqual(n_batches, len(batches))
rasl_trainable = self._make_rasl_trainable("sgd")
rasl_trained = fit_with_batches(
pipeline=rasl_trainable,
batches_train=batches, # type: ignore
batches_valid=None,
scoring=None,
unique_class_labels=unique_class_labels,
max_resident=3 * math.ceil(train_data_space / n_batches),
prio=prio,
partial_transform=False,
verbose=0,
progress_callback=None,
)
_check_trained_ordinal_encoder(
self,
sk_trained.steps[0][1].transformers_[0][1],
rasl_trained.steps[1][1].impl,
(n_batches, type(prio)),
)
_check_trained_min_max_scaler(
self,
sk_trained.steps[0][1].transformers_[1][1].steps[1][1],
rasl_trained.steps[4][1].impl,
(n_batches, type(prio)),
)
def test_cross_val_score(self):
(X, y), _ = self.creditg
sk_scores = sk_cross_val_score(
estimator=self._make_sk_trainable("rfc"),
X=X,
y=y,
scoring=make_scorer(sk_accuracy_score),
cv=KFold(3),
)
for n_batches in [1, 3]:
rasl_scores = rasl_cross_val_score(
pipeline=self._make_rasl_trainable("rfc"),
batches=mockup_data_loader(X, y, n_batches, "pandas"),
scoring=rasl_get_scorer("accuracy"),
cv=KFold(3),
unique_class_labels=list(y.unique()),
max_resident=None,
prio=PrioBatch(),
same_fold=True,
verbose=0,
)
if n_batches == 1:
for sk_s, rasl_s in zip(sk_scores, rasl_scores):
self.assertAlmostEqual(sk_s, rasl_s)
class TestTaskGraphsWithCategoricalConcat(unittest.TestCase):
@classmethod
def setUpClass(cls):
(train_X, train_y), (test_X, test_y) = lale.datasets.openml.fetch(
"credit-g", "classification", preprocess=False
)
cls.cat_columns = [
"checking_status",
"credit_history",
"purpose",
"savings_status",
"employment",
"personal_status",
"other_parties",
"property_magnitude",
"other_payment_plans",
"housing",
"job",
"own_telephone",
"foreign_worker",
]
cls.num_columns = [
"duration",
"credit_amount",
"installment_commitment",
"residence_since",
"age",
"existing_credits",
"num_dependents",
]
cls.creditg = (train_X, train_y), (test_X, test_y)
@classmethod
def _make_sk_trainable(cls, final_est):
from sklearn.compose import ColumnTransformer as SkColumnTransformer
from sklearn.ensemble import RandomForestClassifier as SkRandomForestClassifier
from sklearn.linear_model import SGDClassifier as SkSGDClassifier
if final_est == "sgd":
est = SkSGDClassifier(random_state=97)
elif final_est == "rfc":
est = SkRandomForestClassifier(random_state=97)
else:
assert False, final_est
return sk_make_pipeline(
SkColumnTransformer(
[
(
"prep_cat",
SkOrdinalEncoder(
handle_unknown="use_encoded_value", unknown_value=-1
),
cls.cat_columns,
),
(
"prep_num",
sk_make_pipeline(
SkSimpleImputer(strategy="mean"),
SkMinMaxScaler(),
"passthrough",
),
cls.num_columns,
),
]
),
est,
)
@classmethod
def _make_rasl_trainable(cls, final_est):
if final_est == "sgd":
est = SGDClassifier(random_state=97)
elif final_est == "rfc":
est = RandomForestClassifier(random_state=97)
else:
assert False, final_est
return (
(
(
Project(columns=categorical(11), drop_columns={"type": "number"})
>> RaslOrdinalEncoder(
handle_unknown="use_encoded_value", unknown_value=-1
)
)
& (
Project(columns={"type": "number"})
>> RaslSimpleImputer(strategy="mean")
>> RaslMinMaxScaler()
)
)
>> ConcatFeatures()
>> est
)
def test_fit_no_batching(self):
(train_X, train_y), _ = self.creditg
sk_trainable = self._make_sk_trainable("sgd")
sk_trained = sk_trainable.fit(train_X, train_y)
rasl_trainable = self._make_rasl_trainable("sgd")
rasl_trained = rasl_trainable.fit(train_X, train_y)
_check_trained_ordinal_encoder(
self,
sk_trained.steps[0][1].transformers_[0][1],
rasl_trained.steps[1][1].impl,
"pandas",
)
_check_trained_min_max_scaler(
self,
sk_trained.steps[0][1].transformers_[1][1].steps[1][1],
rasl_trained.steps[4][1].impl,
"pandas",
)
def test_fit_batching(self):
(train_X, train_y), _ = self.creditg
train_data_space = train_X.memory_usage().sum() + train_y.memory_usage()
sk_trainable = self._make_sk_trainable("sgd")
sk_trained = sk_trainable.fit(train_X, train_y)
unique_class_labels = list(train_y.unique())
for n_batches in [1, 3]:
for prio in [PrioStep(), PrioBatch()]:
batches = create_data_loader(
train_X, train_y, math.ceil(len(train_y) / n_batches)
)
self.assertEqual(n_batches, len(batches))
rasl_trainable = self._make_rasl_trainable("sgd")
rasl_trained = fit_with_batches(
pipeline=rasl_trainable,
batches_train=batches, # type: ignore
batches_valid=None,
scoring=None,
unique_class_labels=unique_class_labels,
max_resident=3 * math.ceil(train_data_space / n_batches),
prio=prio,
partial_transform=False,
verbose=0,
progress_callback=None,
)
_check_trained_ordinal_encoder(
self,
sk_trained.steps[0][1].transformers_[0][1],
rasl_trained.steps[1][1].impl,
(n_batches, type(prio)),
)
_check_trained_min_max_scaler(
self,
sk_trained.steps[0][1].transformers_[1][1].steps[1][1],
rasl_trained.steps[4][1].impl,
(n_batches, type(prio)),
)
def test_cross_val_score(self):
(X, y), _ = self.creditg
sk_scores = sk_cross_val_score(
estimator=self._make_sk_trainable("rfc"),
X=X,
y=y,
scoring=make_scorer(sk_accuracy_score),
cv=KFold(3),
)
for n_batches in [1, 3]:
rasl_scores = rasl_cross_val_score(
pipeline=self._make_rasl_trainable("rfc"),
batches=mockup_data_loader(X, y, n_batches, "pandas"),
scoring=rasl_get_scorer("accuracy"),
cv=KFold(3),
unique_class_labels=list(y.unique()),
max_resident=None,
prio=PrioBatch(),
same_fold=True,
verbose=0,
)
if n_batches == 1:
for sk_s, rasl_s in zip(sk_scores, rasl_scores):
self.assertAlmostEqual(sk_s, rasl_s)
class TestTaskGraphsSpark(unittest.TestCase):
@classmethod
def setUpClass(cls):
X, y, _ = lale.lib.aif360.fetch_creditg_df(preprocess=False)
X = Project(columns=categorical()).fit(X).transform(X)
cls.creditg = X, y
@classmethod
def _make_sk_trainable(cls):
return sk_make_pipeline(
SkOrdinalEncoder(handle_unknown="use_encoded_value", unknown_value=-1),
SkMinMaxScaler(),
Convert(astype="pandas"),
RandomForestClassifier(random_state=97),
)
@classmethod
def _make_rasl_trainable(cls):
return (
RaslOrdinalEncoder(handle_unknown="use_encoded_value", unknown_value=-1)
>> RaslMinMaxScaler()
>> Convert(astype="pandas")
>> RandomForestClassifier(random_state=97)
)
def test_fit_with_batches(self):
train_X, train_y = self.creditg
sk_trainable = self._make_sk_trainable()
sk_trained = sk_trainable.fit(train_X, train_y)
unique_class_labels = list(train_y.unique())
datatype_list: List[datatype_param_type] = ["pandas", "spark"]
for tgt, n_batches in itertools.product(datatype_list, [1, 3]):
rasl_trained = fit_with_batches(
pipeline=self._make_rasl_trainable(),
batches_train=mockup_data_loader(train_X, train_y, n_batches, tgt),
batches_valid=None,
scoring=None,
unique_class_labels=unique_class_labels,
max_resident=None,
prio=PrioBatch(),
partial_transform=False,
verbose=0,
progress_callback=None,
)
_check_trained_ordinal_encoder(
self, sk_trained.steps[0][1], rasl_trained.steps[0][1].impl, tgt
)
_check_trained_min_max_scaler(
self, sk_trained.steps[1][1], rasl_trained.steps[1][1].impl, tgt
)
def test_cross_val_score(self):
X, y = self.creditg
with self.assertWarnsRegex(DeprecationWarning, "trainable operator"):
sk_scores = sk_cross_val_score(
estimator=self._make_sk_trainable(),
X=X,
y=y,
scoring=make_scorer(sk_accuracy_score),
cv=KFold(3),
)
unique_class_labels = list(y.unique())
datatype_list: List[datatype_param_type] = ["pandas", "spark"]
for tgt, n_batches in itertools.product(datatype_list, [1, 3]):
rasl_scores = rasl_cross_val_score(
pipeline=self._make_rasl_trainable(),
batches=mockup_data_loader(X, y, n_batches, tgt),
scoring=rasl_get_scorer("accuracy"),
cv=KFold(3),
unique_class_labels=unique_class_labels,
max_resident=None,
prio=PrioBatch(),
same_fold=True,
verbose=0,
)
if n_batches == 1:
for sk_s, rasl_s in zip(sk_scores, rasl_scores):
self.assertAlmostEqual(sk_s, rasl_s, msg=(tgt, n_batches))
def test_cross_validate(self):
X, y = self.creditg
with self.assertWarnsRegex(DeprecationWarning, "trainable operator"):
sk_scores = sk_cross_validate(
estimator=self._make_sk_trainable(),
X=X,
y=y,
scoring=make_scorer(sk_accuracy_score),
cv=KFold(3),
return_estimator=True,
)
unique_class_labels = list(y.unique())
datatype_list: List[datatype_param_type] = ["pandas", "spark"]
for tgt, n_batches in itertools.product(datatype_list, [1, 3]):
rasl_scores = rasl_cross_validate(
pipeline=self._make_rasl_trainable(),
batches=mockup_data_loader(X, y, n_batches, tgt),
scoring=rasl_get_scorer("accuracy"),
cv=KFold(3),
unique_class_labels=unique_class_labels,
max_resident=None,
prio=PrioBatch(),
same_fold=True,
return_estimator=True,
verbose=0,
)
msg = tgt, n_batches
if n_batches == 1:
for sk_s, rasl_s in zip(
sk_scores["test_score"], rasl_scores["test_score"]
):
self.assertAlmostEqual(
cast(float, sk_s), cast(float, rasl_s), msg=msg
)
for sk_e, rasl_e in zip(
sk_scores["estimator"], rasl_scores["estimator"]
):
rasl_steps = cast(TrainedPipeline, rasl_e).steps
_check_trained_ordinal_encoder(
self,
sk_e.steps[0][1],
rasl_steps[0][1].impl,
msg=msg,
)
_check_trained_min_max_scaler(
self,
sk_e.steps[1][1],
rasl_steps[1][1].impl,
msg=msg,
)
class TestMetrics(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.creditg = lale.datasets.openml.fetch(
"credit-g", "classification", preprocess=True, astype="pandas"
)
def test_accuracy(self):
(train_X, train_y), (test_X, test_y) = self.creditg
est = LogisticRegression().fit(train_X, train_y)
y_pred = est.predict(test_X)
sk_score = sk_accuracy_score(test_y, y_pred)
self.assertAlmostEqual(sk_score, rasl_accuracy_score(test_y, y_pred))
rasl_scorer = rasl_get_scorer("accuracy")
self.assertAlmostEqual(sk_score, rasl_scorer(est, test_X, test_y))
batches = mockup_data_loader(test_X, test_y, 3, "pandas")
self.assertAlmostEqual(
sk_score, rasl_scorer.score_estimator_batched(est, batches)
)
def test_balanced_accuracy(self):
(train_X, train_y), (test_X, test_y) = self.creditg
est = LogisticRegression().fit(train_X, train_y)
y_pred = est.predict(test_X)
sk_score = sk_balanced_accuracy_score(test_y, y_pred)
self.assertAlmostEqual(sk_score, rasl_balanced_accuracy_score(test_y, y_pred))
rasl_scorer = rasl_get_scorer("balanced_accuracy")
self.assertAlmostEqual(sk_score, rasl_scorer(est, test_X, test_y))
batches = mockup_data_loader(test_X, test_y, 3, "pandas")
self.assertAlmostEqual(
sk_score, rasl_scorer.score_estimator_batched(est, batches)
)
def test_f1(self):
(train_X, train_y), (test_X, test_y) = self.creditg
est = LogisticRegression().fit(train_X, train_y)
y_pred = est.predict(test_X)
sk_score = sk_f1_score(test_y, y_pred, pos_label=1)
self.assertAlmostEqual(sk_score, rasl_f1_score(test_y, y_pred, pos_label=1))
rasl_scorer = rasl_get_scorer("f1", pos_label=1)
self.assertAlmostEqual(sk_score, rasl_scorer(est, test_X, test_y))
batches = mockup_data_loader(test_X, test_y, 3, "pandas")
self.assertAlmostEqual(
sk_score, rasl_scorer.score_estimator_batched(est, batches)
)
def test_r2_score(self):
(train_X, train_y), (test_X, test_y) = self.creditg
est = LogisticRegression().fit(train_X, train_y)
y_pred = est.predict(test_X)
sk_score = sk_r2_score(test_y, y_pred)
self.assertAlmostEqual(sk_score, rasl_r2_score(test_y, y_pred))
rasl_scorer = rasl_get_scorer("r2")
self.assertAlmostEqual(sk_score, rasl_scorer(est, test_X, test_y))
batches = mockup_data_loader(test_X, test_y, 3, "pandas")
self.assertAlmostEqual(
sk_score, rasl_scorer.score_estimator_batched(est, batches)
)
class TestBatchedBaggingClassifier(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.creditg = lale.datasets.openml.fetch(
"credit-g", "classification", preprocess=False, astype="pandas"
)
@classmethod
def _make_sk_trainable(cls):
from sklearn.tree import DecisionTreeClassifier as SkDecisionTreeClassifier
return sk_make_pipeline(
SkOrdinalEncoder(handle_unknown="use_encoded_value", unknown_value=-1),
SkMinMaxScaler(),
SkDecisionTreeClassifier(random_state=97, max_features="auto"),
)
@classmethod
def _make_rasl_trainable(cls, final_est):
if final_est == "bagging_monoid":
est = BatchedBaggingClassifier(
base_estimator=DecisionTreeClassifier(
random_state=97, max_features="auto"
)
)
else:
assert False, final_est
return (
RaslOrdinalEncoder(handle_unknown="use_encoded_value", unknown_value=-1)
>> RaslMinMaxScaler()
>> est
)
def test_classifier(self):
(X_train, y_train), (X_test, _y_test) = self.creditg
import warnings
clf = BatchedBaggingClassifier()
# test_schemas_are_schemas
lale.type_checking.validate_is_schema(clf.input_schema_fit())
lale.type_checking.validate_is_schema(clf.input_schema_predict())
lale.type_checking.validate_is_schema(clf.output_schema_predict())
lale.type_checking.validate_is_schema(clf.hyperparam_schema())
# test_init_fit_predict
pipeline = self._make_rasl_trainable("bagging_monoid")
trained = pipeline.fit(X_train, y_train)
_ = trained.predict(X_test)
# test_with_hyperopt
from lale.lib.lale import Hyperopt
(X_train, y_train), (X_test, _) = lale.datasets.openml.fetch(
"credit-g", "classification", preprocess=True, astype="pandas"
)
hyperopt = Hyperopt(estimator=pipeline, max_evals=1, verbose=True)
trained = hyperopt.fit(X_train, y_train)
_ = trained.predict(X_test)
# test_cross_validation
from lale.helpers import cross_val_score
cv_results = cross_val_score(pipeline, X_train, y_train, cv=2)
self.assertEqual(len(cv_results), 2)
# test_with_gridsearchcv_auto_wrapped
with warnings.catch_warnings():
warnings.simplefilter("ignore")
from lale.lib.lale import GridSearchCV
grid_search = GridSearchCV(
estimator=pipeline,
lale_num_samples=1,
lale_num_grids=1,
cv=2,
scoring=make_scorer(sk_accuracy_score),
)
grid_search.fit(X_train, y_train)
# test_predict_on_trainable
trained = clf.fit(X_train, y_train)
clf.predict(X_train)
# test_to_json
clf.to_json()
def test_fit_batching(self):
(train_X, train_y), (test_X, test_y) = self.creditg
train_data_space = train_X.memory_usage().sum() + train_y.memory_usage()
unique_class_labels = list(train_y.unique())
for n_batches in [1, 3]:
for prio in [PrioStep(), PrioBatch()]:
batches = mockup_data_loader(train_X, train_y, n_batches, "pandas")
rasl_trainable = self._make_rasl_trainable("bagging_monoid")
rasl_trained = fit_with_batches(
pipeline=rasl_trainable,
batches_train=batches,
batches_valid=None,
scoring=None,
unique_class_labels=unique_class_labels,
max_resident=3 * math.ceil(train_data_space / n_batches),
prio=prio,
partial_transform=False,
verbose=0,
progress_callback=None,
)
predictions = rasl_trained.predict(test_X)
rasl_acc = sk_accuracy_score(test_y, predictions)
if n_batches == 1:
sk_pipeline = self._make_sk_trainable()
sk_pipeline.fit(train_X, train_y)
predictions = sk_pipeline.predict(test_X)
sk_acc = sk_accuracy_score(test_y, predictions)
self.assertEqual(rasl_acc, sk_acc)
def test_cross_val_score(self):
(train_X, train_y), (_, _) = self.creditg
for n_batches in [1, 3]:
_ = rasl_cross_val_score(
pipeline=self._make_rasl_trainable("bagging_monoid"),
batches=mockup_data_loader(train_X, train_y, n_batches, "pandas"),
scoring=rasl_get_scorer("accuracy"),
cv=KFold(3),
unique_class_labels=list(train_y.unique()),
max_resident=None,
prio=PrioBatch(),
same_fold=True,
verbose=0,
)
class TestXGBoost(unittest.TestCase):
def test_partial_fit_xgb_classifier(self):
X, y = sklearn.datasets.load_iris(return_X_y=True, as_frame=True)
est = XGBClassifier(verbosity=0)
for bX, by in mockup_data_loader(X, y, 3, "pandas"):
est = est.partial_fit(bX, by)
_ = est.predict(bX)
def test_partial_fit_xgb_regressor(self):
X, y = sklearn.datasets.load_diabetes(return_X_y=True, as_frame=True)
est = XGBRegressor(verbosity=0)
for bX, by in mockup_data_loader(X, y, 3, "pandas"):
est = est.partial_fit(bX, by)
_ = est.predict(bX)
class TestLightGBM(unittest.TestCase):
def test_partial_fit_lgbm_classifier(self):
X, y = sklearn.datasets.load_iris(return_X_y=True, as_frame=True)
est = LGBMClassifier()
for bX, by in mockup_data_loader(X, y, 3, "pandas"):
est = est.partial_fit(bX, by)
_ = est.predict(bX)
def test_partial_fit_lgbm_regressor(self):
X, y = sklearn.datasets.load_diabetes(return_X_y=True, as_frame=True)
est = LGBMRegressor()
for bX, by in mockup_data_loader(X, y, 3, "pandas"):
est = est.partial_fit(bX, by)
_ = est.predict(bX)
| 103,433 | 42.29594 | 162 |
py
|
lale
|
lale-master/test/test_json_pretty_viz.py
|
# Copyright 2019-2023 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import lale.operators
import lale.pretty_print
class TestToGraphviz(unittest.TestCase):
def test_with_operator_choice(self):
from lale.lib.lale import NoOp
from lale.lib.sklearn import (
PCA,
KNeighborsClassifier,
LogisticRegression,
Nystroem,
)
from lale.operators import make_choice
kernel_tfm_or_not = NoOp | Nystroem
tfm = PCA
clf = make_choice(LogisticRegression, KNeighborsClassifier)
clf.visualize(ipython_display=False)
optimizable = kernel_tfm_or_not >> tfm >> clf
optimizable.visualize(ipython_display=False)
def test_invalid_input(self):
from sklearn.linear_model import LogisticRegression as SklearnLR
scikit_lr = SklearnLR()
from lale.helpers import to_graphviz
with self.assertRaises(TypeError):
to_graphviz(scikit_lr)
class TestPrettyPrint(unittest.TestCase):
# pylint:disable=reimported,redefined-outer-name
def _roundtrip(self, expected, printed):
self.maxDiff = None
# sklearn_version_family changes based on the Python as well as sklearn version,
# so remove that hyperparameter while comparing if present.
import re
expected = re.sub(r"""sklearn_version_family=.\d*.,""", "", expected)
printed = re.sub(r"""sklearn_version_family=.\d*.,""", "", printed)
self.assertEqual(expected, printed)
globals2 = {}
locals2 = {}
try:
exec(printed, globals2, locals2)
except Exception as e:
import pprint
print("error during exec(printed, globals2, locals2) where:")
print(f'printed = """{printed}"""')
print(f"globals2 = {pprint.pformat(globals2)}")
print(f"locals2 = {pprint.pformat(locals2)}")
raise e
pipeline2 = locals2["pipeline"]
import sklearn.pipeline
self.assertIsInstance(
pipeline2, (lale.operators.PlannedOperator, sklearn.pipeline.Pipeline)
)
def test_distance_threshold_validation_error(self):
import jsonschema
from lale.lib.sklearn import FeatureAgglomeration, LogisticRegression
with self.assertRaises(jsonschema.ValidationError):
_ = (
FeatureAgglomeration(
distance_threshold=0.5, n_clusters=None, compute_full_tree=True
)
>> LogisticRegression()
)
def test_indiv_op_1(self):
from lale.lib.sklearn import LogisticRegression
pipeline = LogisticRegression(solver=LogisticRegression.enum.solver.saga, C=0.9)
expected = """from sklearn.linear_model import LogisticRegression
import lale
lale.wrap_imported_operators()
pipeline = LogisticRegression(solver="saga", C=0.9)"""
self._roundtrip(expected, lale.pretty_print.to_string(pipeline))
def test_indiv_op_2(self):
from lale.lib.sklearn import LogisticRegression
pipeline = LogisticRegression()
expected = """from sklearn.linear_model import LogisticRegression
import lale
lale.wrap_imported_operators()
pipeline = LogisticRegression()"""
self._roundtrip(expected, lale.pretty_print.to_string(pipeline))
def test_reducible(self):
from lale.lib.lale import NoOp
from lale.lib.rasl import ConcatFeatures
from lale.lib.sklearn import (
PCA,
KNeighborsClassifier,
LogisticRegression,
MinMaxScaler,
Nystroem,
)
from lale.lib.xgboost import XGBClassifier as XGB
pca = PCA(copy=False)
logistic_regression = LogisticRegression(solver="saga", C=0.9)
pipeline = (
(MinMaxScaler | NoOp)
>> (pca & Nystroem)
>> ConcatFeatures
>> (KNeighborsClassifier | logistic_regression | XGB)
)
expected = """from sklearn.preprocessing import MinMaxScaler
from lale.lib.lale import NoOp
from sklearn.decomposition import PCA
from sklearn.kernel_approximation import Nystroem
from lale.lib.rasl import ConcatFeatures
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from xgboost import XGBClassifier as XGB
import lale
lale.wrap_imported_operators()
pca = PCA(copy=False)
logistic_regression = LogisticRegression(solver="saga", C=0.9)
pipeline = (
(MinMaxScaler | NoOp)
>> (pca & Nystroem)
>> ConcatFeatures
>> (KNeighborsClassifier | logistic_regression | XGB)
)"""
self._roundtrip(expected, lale.pretty_print.to_string(pipeline))
def test_no_combinators(self):
from lale.lib.lale import NoOp
from lale.lib.rasl import ConcatFeatures
from lale.lib.sklearn import (
PCA,
KNeighborsClassifier,
LogisticRegression,
MinMaxScaler,
Nystroem,
)
pca = PCA(copy=False)
logistic_regression = LogisticRegression(solver="saga", C=0.9)
pipeline = (
(MinMaxScaler | NoOp)
>> (pca & Nystroem & NoOp)
>> ConcatFeatures
>> (KNeighborsClassifier | logistic_regression)
)
expected = """from sklearn.preprocessing import MinMaxScaler
from lale.lib.lale import NoOp
from lale.operators import make_choice
from sklearn.decomposition import PCA
from sklearn.kernel_approximation import Nystroem
from lale.operators import make_union
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from lale.operators import make_pipeline
choice_0 = make_choice(MinMaxScaler, NoOp)
pca = PCA(copy=False)
union = make_union(pca, Nystroem, NoOp)
logistic_regression = LogisticRegression(solver="saga", C=0.9)
choice_1 = make_choice(KNeighborsClassifier, logistic_regression)
pipeline = make_pipeline(choice_0, union, choice_1)"""
printed = lale.pretty_print.to_string(pipeline, combinators=False)
self._roundtrip(expected, printed)
def test_astype_sklearn(self):
from lale.lib.rasl import ConcatFeatures
from lale.lib.sklearn import PCA, LogisticRegression, MinMaxScaler, Nystroem
pca = PCA(copy=False)
logistic_regression = LogisticRegression(solver="saga", C=0.9)
pipeline = (
MinMaxScaler()
>> (pca & Nystroem())
>> ConcatFeatures
>> logistic_regression
)
expected = """from sklearn.preprocessing import MinMaxScaler
from sklearn.decomposition import PCA
from sklearn.kernel_approximation import Nystroem
from sklearn.pipeline import make_union
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import make_pipeline
pca = PCA(copy=False)
union = make_union(pca, Nystroem())
logistic_regression = LogisticRegression(solver="saga", C=0.9)
pipeline = make_pipeline(MinMaxScaler(), union, logistic_regression)"""
printed = lale.pretty_print.to_string(pipeline, astype="sklearn")
self._roundtrip(expected, printed)
def test_import_as_1(self):
from lale.lib.sklearn import LogisticRegression as LR
pipeline = LR(solver="saga", C=0.9)
expected = """from sklearn.linear_model import LogisticRegression as LR
import lale
lale.wrap_imported_operators()
pipeline = LR(solver="saga", C=0.9)"""
self._roundtrip(expected, lale.pretty_print.to_string(pipeline))
def test_import_as_2(self):
from lale.lib.lale import NoOp
from lale.lib.rasl import ConcatFeatures as Concat
from lale.lib.sklearn import PCA
from lale.lib.sklearn import KNeighborsClassifier as KNN
from lale.lib.sklearn import LogisticRegression as LR
from lale.lib.sklearn import MinMaxScaler as Scaler
from lale.lib.sklearn import Nystroem
pca = PCA(copy=False)
lr = LR(solver="saga", C=0.9)
pipeline = (Scaler | NoOp) >> (pca & Nystroem) >> Concat >> (KNN | lr)
expected = """from sklearn.preprocessing import MinMaxScaler as Scaler
from lale.lib.lale import NoOp
from sklearn.decomposition import PCA
from sklearn.kernel_approximation import Nystroem
from lale.lib.rasl import ConcatFeatures as Concat
from sklearn.neighbors import KNeighborsClassifier as KNN
from sklearn.linear_model import LogisticRegression as LR
import lale
lale.wrap_imported_operators()
pca = PCA(copy=False)
lr = LR(solver="saga", C=0.9)
pipeline = (Scaler | NoOp) >> (pca & Nystroem) >> Concat >> (KNN | lr)"""
self._roundtrip(expected, lale.pretty_print.to_string(pipeline))
def test_operator_choice(self):
from lale.lib.sklearn import PCA
from lale.lib.sklearn import MinMaxScaler as Scl
pipeline = PCA | Scl
expected = """from sklearn.decomposition import PCA
from sklearn.preprocessing import MinMaxScaler as Scl
import lale
lale.wrap_imported_operators()
pipeline = PCA | Scl"""
self._roundtrip(expected, lale.pretty_print.to_string(pipeline))
def test_higher_order(self):
from lale.lib.lale import Both
from lale.lib.sklearn import PCA, Nystroem
pipeline = Both(op1=PCA(n_components=2), op2=Nystroem)
expected = """from lale.lib.lale import Both
from sklearn.decomposition import PCA
from sklearn.kernel_approximation import Nystroem
import lale
lale.wrap_imported_operators()
pca = PCA(n_components=2)
pipeline = Both(op1=pca, op2=Nystroem)"""
self._roundtrip(expected, lale.pretty_print.to_string(pipeline))
def test_higher_order_2(self):
from lale.lib.sklearn import PCA
from lale.lib.sklearn import KNeighborsClassifier as KNN
from lale.lib.sklearn import LogisticRegression as LR
from lale.lib.sklearn import VotingClassifier as Vote
pipeline = Vote(
estimators=[("knn", KNN), ("pipeline", PCA() >> LR)], voting="soft"
)
expected = """from sklearn.ensemble import VotingClassifier as Vote
from sklearn.neighbors import KNeighborsClassifier as KNN
from sklearn.decomposition import PCA
from sklearn.linear_model import LogisticRegression as LR
import lale
lale.wrap_imported_operators()
pipeline = Vote(
estimators=[("knn", KNN), ("pipeline", PCA() >> LR)], voting="soft"
)"""
self._roundtrip(expected, lale.pretty_print.to_string(pipeline))
def test_multimodal(self):
from lale.lib.rasl import ConcatFeatures as Cat
from lale.lib.rasl import Project
from lale.lib.sklearn import LinearSVC
from lale.lib.sklearn import Normalizer as Norm
from lale.lib.sklearn import OneHotEncoder as OneHot
project_0 = Project(columns={"type": "number"})
project_1 = Project(columns={"type": "string"})
linear_svc = LinearSVC(C=29617.4, dual=False, tol=0.005266)
pipeline = (
((project_0 >> Norm()) & (project_1 >> OneHot())) >> Cat >> linear_svc
)
expected = """from lale.lib.rasl import Project
from sklearn.preprocessing import Normalizer as Norm
from sklearn.preprocessing import OneHotEncoder as OneHot
from lale.lib.rasl import ConcatFeatures as Cat
from sklearn.svm import LinearSVC
import lale
lale.wrap_imported_operators()
project_0 = Project(columns={"type": "number"})
project_1 = Project(columns={"type": "string"})
linear_svc = LinearSVC(C=29617.4, dual=False, tol=0.005266)
pipeline = (
((project_0 >> Norm()) & (project_1 >> OneHot())) >> Cat >> linear_svc
)"""
self._roundtrip(expected, lale.pretty_print.to_string(pipeline))
def test_irreducible_1(self):
from lale.lib.sklearn import (
PCA,
KNeighborsClassifier,
LogisticRegression,
MinMaxScaler,
Nystroem,
)
from lale.operators import make_pipeline_graph
choice = PCA | Nystroem
pipeline = make_pipeline_graph(
steps=[choice, MinMaxScaler, LogisticRegression, KNeighborsClassifier],
edges=[
(choice, LogisticRegression),
(MinMaxScaler, LogisticRegression),
(MinMaxScaler, KNeighborsClassifier),
],
)
expected = """from sklearn.decomposition import PCA
from sklearn.kernel_approximation import Nystroem
from sklearn.preprocessing import MinMaxScaler
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from lale.operators import make_pipeline_graph
import lale
lale.wrap_imported_operators()
choice = PCA | Nystroem
pipeline = make_pipeline_graph(
steps=[choice, MinMaxScaler, LogisticRegression, KNeighborsClassifier],
edges=[
(choice, LogisticRegression),
(MinMaxScaler, LogisticRegression),
(MinMaxScaler, KNeighborsClassifier),
],
)"""
self._roundtrip(expected, lale.pretty_print.to_string(pipeline))
def test_irreducible_2(self):
from lale.lib.rasl import ConcatFeatures as HStack
from lale.lib.sklearn import PCA
from lale.lib.sklearn import KNeighborsClassifier as KNN
from lale.lib.sklearn import LogisticRegression as LR
from lale.lib.sklearn import MinMaxScaler as MMS
from lale.operators import make_pipeline_graph
pipeline_0 = HStack >> LR
pipeline = make_pipeline_graph(
steps=[PCA, MMS, KNN, pipeline_0],
edges=[(PCA, KNN), (PCA, pipeline_0), (MMS, pipeline_0)],
)
expected = """from sklearn.decomposition import PCA
from sklearn.preprocessing import MinMaxScaler as MMS
from sklearn.neighbors import KNeighborsClassifier as KNN
from lale.lib.rasl import ConcatFeatures as HStack
from sklearn.linear_model import LogisticRegression as LR
from lale.operators import make_pipeline_graph
import lale
lale.wrap_imported_operators()
pipeline_0 = HStack >> LR
pipeline = make_pipeline_graph(
steps=[PCA, MMS, KNN, pipeline_0],
edges=[(PCA, KNN), (PCA, pipeline_0), (MMS, pipeline_0)],
)"""
self._roundtrip(expected, lale.pretty_print.to_string(pipeline))
def test_nested(self):
from lale.lib.lale import NoOp
from lale.lib.sklearn import PCA
from lale.lib.sklearn import LogisticRegression as LR
lr_0 = LR(C=0.09)
lr_1 = LR(C=0.19)
pipeline = PCA >> (lr_0 | NoOp >> lr_1)
expected = """from sklearn.decomposition import PCA
from sklearn.linear_model import LogisticRegression as LR
from lale.lib.lale import NoOp
import lale
lale.wrap_imported_operators()
lr_0 = LR(C=0.09)
lr_1 = LR(C=0.19)
pipeline = PCA >> (lr_0 | NoOp >> lr_1)"""
self._roundtrip(expected, lale.pretty_print.to_string(pipeline))
def test_autoai_libs_cat_encoder(self):
import numpy as np
from autoai_libs.transformers.exportable import CatEncoder
from lale.lib.sklearn import LogisticRegression as LR
cat_encoder = CatEncoder(
encoding="ordinal",
categories="auto",
dtype=np.float64,
handle_unknown="error",
)
pipeline = cat_encoder >> LR()
expected = """from autoai_libs.transformers.exportable import CatEncoder
import numpy as np
from sklearn.linear_model import LogisticRegression as LR
import lale
lale.wrap_imported_operators()
cat_encoder = CatEncoder(
encoding="ordinal",
categories="auto",
dtype=np.float64,
handle_unknown="error",
sklearn_version_family="23",
)
pipeline = cat_encoder >> LR()"""
self._roundtrip(expected, lale.pretty_print.to_string(pipeline))
def test_autoai_libs_cat_encoder_defaults(self):
import numpy as np
from lale.lib.autoai_libs import CatEncoder
from lale.lib.sklearn import LogisticRegression as LR
cat_encoder = CatEncoder(
dtype=np.float64, handle_unknown="error", sklearn_version_family="1"
)
pipeline = cat_encoder >> LR()
expected = """from autoai_libs.transformers.exportable import CatEncoder
import numpy as np
from sklearn.linear_model import LogisticRegression as LR
from sklearn.pipeline import make_pipeline
cat_encoder = CatEncoder(
dtype=np.float64,
handle_unknown="error",
sklearn_version_family="1",
encoding="ordinal",
categories="auto",
)
pipeline = make_pipeline(cat_encoder, LR())"""
self._roundtrip(
expected, lale.pretty_print.to_string(pipeline, astype="sklearn")
)
def test_autoai_libs_fs1(self):
from autoai_libs.cognito.transforms.transform_utils import FS1
from lale.lib.sklearn import LogisticRegression as LR
fs1 = FS1(
cols_ids_must_keep=range(0, 7),
additional_col_count_to_keep=8,
ptype="classification",
)
pipeline = fs1 >> LR()
expected = """from autoai_libs.cognito.transforms.transform_utils import FS1
from sklearn.linear_model import LogisticRegression as LR
import lale
lale.wrap_imported_operators()
fs1 = FS1(
cols_ids_must_keep=range(0, 7),
additional_col_count_to_keep=8,
ptype="classification",
)
pipeline = fs1 >> LR()"""
self._roundtrip(expected, lale.pretty_print.to_string(pipeline))
def test_autoai_libs_numpy_replace_missing_values(self):
from autoai_libs.transformers.exportable import NumpyReplaceMissingValues
from lale.lib.sklearn import LogisticRegression as LR
numpy_replace_missing_values = NumpyReplaceMissingValues(
filling_values=float("nan"), missing_values=["?"]
)
pipeline = numpy_replace_missing_values >> LR()
expected = """from autoai_libs.transformers.exportable import NumpyReplaceMissingValues
from sklearn.linear_model import LogisticRegression as LR
import lale
lale.wrap_imported_operators()
numpy_replace_missing_values = NumpyReplaceMissingValues(
missing_values=["?"], filling_values=float("nan")
)
pipeline = numpy_replace_missing_values >> LR()"""
self._roundtrip(expected, lale.pretty_print.to_string(pipeline))
def test_autoai_libs_numpy_replace_unknown_values1(self):
from autoai_libs.transformers.exportable import NumpyReplaceUnknownValues
from lale.lib.sklearn import LogisticRegression as LR
numpy_replace_unknown_values = NumpyReplaceUnknownValues(
filling_values=float("nan"),
filling_values_list=[float("nan")],
known_values_list=[[36, 45, 56, 67, 68, 75, 78, 89]],
missing_values_reference_list=["", "-", "?", float("nan")],
)
pipeline = numpy_replace_unknown_values >> LR()
expected = """from autoai_libs.transformers.exportable import NumpyReplaceUnknownValues
from sklearn.linear_model import LogisticRegression as LR
import lale
lale.wrap_imported_operators()
numpy_replace_unknown_values = NumpyReplaceUnknownValues(
filling_values=float("nan"),
filling_values_list=[float("nan")],
missing_values_reference_list=["", "-", "?", float("nan")],
)
pipeline = numpy_replace_unknown_values >> LR()"""
try:
self._roundtrip(expected, lale.pretty_print.to_string(pipeline))
except BaseException:
expected = """from autoai_libs.transformers.exportable import NumpyReplaceUnknownValues
from sklearn.linear_model import LogisticRegression as LR
import lale
lale.wrap_imported_operators()
numpy_replace_unknown_values = NumpyReplaceUnknownValues(
filling_values=float("nan"),
filling_values_list=[float("nan")],
missing_values_reference_list=("", "-", "?", float("nan")),
)
pipeline = numpy_replace_unknown_values >> LR()"""
self._roundtrip(expected, lale.pretty_print.to_string(pipeline))
def test_autoai_libs_numpy_replace_unknown_values2(self):
from lale.lib.autoai_libs import NumpyReplaceUnknownValues
from lale.lib.sklearn import LogisticRegression as LR
CustomOp = NumpyReplaceUnknownValues.customize_schema(
known_values_list={
"anyOf": [
{"type": "array", "items": {"laleType": "Any"}},
{"enum": [None]},
],
"default": None,
}
)
numpy_replace_unknown_values = CustomOp(
filling_values=float("nan"),
filling_values_list=[float("nan")],
known_values_list=[[36, 45, 56, 67, 68, 75, 78, 89]],
missing_values_reference_list=["", "-", "?", float("nan")],
)
pipeline = numpy_replace_unknown_values >> LR()
expected = """from autoai_libs.transformers.exportable import (
NumpyReplaceUnknownValues as CustomOp,
)
from sklearn.linear_model import LogisticRegression as LR
import lale
lale.wrap_imported_operators()
custom_op = CustomOp(
filling_values=float("nan"),
filling_values_list=[float("nan")],
known_values_list=[[36, 45, 56, 67, 68, 75, 78, 89]],
missing_values_reference_list=["", "-", "?", float("nan")],
)
pipeline = custom_op >> LR()"""
try:
self._roundtrip(expected, lale.pretty_print.to_string(pipeline))
except BaseException:
expected = """from autoai_libs.transformers.exportable import (
NumpyReplaceUnknownValues as CustomOp,
)
from sklearn.linear_model import LogisticRegression as LR
import lale
lale.wrap_imported_operators()
custom_op = CustomOp(
filling_values=float("nan"),
filling_values_list=[float("nan")],
known_values_list=[[36, 45, 56, 67, 68, 75, 78, 89]],
missing_values_reference_list=("", "-", "?", float("nan")),
)
pipeline = custom_op >> LR()"""
self._roundtrip(expected, lale.pretty_print.to_string(pipeline))
def test_autoai_libs_tam_1(self):
import autoai_libs.cognito.transforms.transform_extras
import numpy as np
from autoai_libs.cognito.transforms.transform_utils import TAM
from lale.lib.sklearn import LogisticRegression as LR
tam = TAM(
tans_class=autoai_libs.cognito.transforms.transform_extras.IsolationForestAnomaly,
name="isoforestanomaly",
col_names=["a", "b", "c"],
col_dtypes=[np.dtype("float32"), np.dtype("float32"), np.dtype("float32")],
)
pipeline = tam >> LR()
expected = """from autoai_libs.cognito.transforms.transform_utils import TAM
import autoai_libs.cognito.transforms.transform_extras
import numpy as np
from sklearn.linear_model import LogisticRegression as LR
from sklearn.pipeline import make_pipeline
tam = TAM(
tans_class=autoai_libs.cognito.transforms.transform_extras.IsolationForestAnomaly,
name="isoforestanomaly",
col_names=["a", "b", "c"],
col_dtypes=[
np.dtype("float32"), np.dtype("float32"), np.dtype("float32"),
],
)
pipeline = make_pipeline(tam, LR())"""
self._roundtrip(
expected, lale.pretty_print.to_string(pipeline, astype="sklearn")
)
def test_autoai_libs_tam_2(self):
import numpy as np
from lightgbm import LGBMClassifier
from sklearn.decomposition import PCA
from lale.lib.autoai_libs import TAM
from lale.operators import make_pipeline
pca = PCA(copy=False)
tam = TAM(
tans_class=pca,
name="pca",
col_names=["a", "b", "c"],
col_dtypes=[np.dtype("float32"), np.dtype("float32"), np.dtype("float32")],
)
lgbm_classifier = LGBMClassifier(class_weight="balanced", learning_rate=0.18)
pipeline = make_pipeline(tam, lgbm_classifier)
expected = """from autoai_libs.cognito.transforms.transform_utils import TAM
import sklearn.decomposition
import numpy as np
from lightgbm import LGBMClassifier
from lale.operators import make_pipeline
tam = TAM(
tans_class=sklearn.decomposition.PCA(copy=False),
name="pca",
col_names=["a", "b", "c"],
col_dtypes=[
np.dtype("float32"), np.dtype("float32"), np.dtype("float32"),
],
)
lgbm_classifier = LGBMClassifier(
class_weight="balanced", learning_rate=0.18, n_estimators=100
)
pipeline = make_pipeline(tam, lgbm_classifier)"""
self._roundtrip(
expected, lale.pretty_print.to_string(pipeline, combinators=False)
)
def test_autoai_libs_tam_3(self):
import autoai_libs.cognito.transforms.transform_utils
import numpy as np
import sklearn.cluster
import sklearn.linear_model
import sklearn.pipeline
import lale.helpers
import lale.operators
import lale.pretty_print
sklearn_pipeline = sklearn.pipeline.make_pipeline(
autoai_libs.cognito.transforms.transform_utils.TAM(
tans_class=sklearn.cluster.FeatureAgglomeration(
compute_full_tree="auto",
connectivity=None,
linkage="ward",
memory=None,
n_clusters=2,
pooling_func=np.mean,
),
name="featureagglomeration",
col_names=["a", "b", "c"],
col_dtypes=[
np.dtype("float32"),
np.dtype("float32"),
np.dtype("float32"),
],
),
sklearn.linear_model.LogisticRegression(
solver="liblinear", multi_class="ovr"
),
)
pipeline = lale.helpers.import_from_sklearn_pipeline(sklearn_pipeline)
expected = """from autoai_libs.cognito.transforms.transform_utils import TAM
from sklearn.cluster import FeatureAgglomeration
import numpy as np
from sklearn.linear_model import LogisticRegression
import lale
lale.wrap_imported_operators()
tam = TAM(
tans_class=FeatureAgglomeration(),
name="featureagglomeration",
col_names=["a", "b", "c"],
col_dtypes=[
np.dtype("float32"), np.dtype("float32"), np.dtype("float32"),
],
)
logistic_regression = LogisticRegression(
multi_class="ovr", solver="liblinear"
)
pipeline = tam >> logistic_regression"""
self._roundtrip(expected, lale.pretty_print.to_string(pipeline))
def test_autoai_libs_tam_4(self):
import autoai_libs.cognito.transforms.transform_utils
import numpy as np
import sklearn.decomposition
import sklearn.linear_model
import sklearn.pipeline
import lale.helpers
import lale.operators
import lale.pretty_print
sklearn_pipeline = sklearn.pipeline.make_pipeline(
autoai_libs.cognito.transforms.transform_utils.TAM(
tans_class=sklearn.decomposition.PCA(),
name="pca",
col_names=["a", "b", "c"],
col_dtypes=[
np.dtype("float32"),
np.dtype("float32"),
np.dtype("float32"),
],
),
sklearn.linear_model.LogisticRegression(
solver="liblinear", multi_class="ovr"
),
)
pipeline = lale.helpers.import_from_sklearn_pipeline(
sklearn_pipeline, fitted=False
)
assert isinstance(pipeline, lale.operators.TrainableOperator)
expected = """from autoai_libs.cognito.transforms.transform_utils import TAM
from sklearn.decomposition import PCA
import numpy as np
from sklearn.linear_model import LogisticRegression
import lale
lale.wrap_imported_operators()
tam = TAM(
tans_class=PCA(),
name="pca",
col_names=["a", "b", "c"],
col_dtypes=[
np.dtype("float32"), np.dtype("float32"), np.dtype("float32"),
],
)
logistic_regression = LogisticRegression(
multi_class="ovr", solver="liblinear"
)
pipeline = tam >> logistic_regression"""
self._roundtrip(expected, lale.pretty_print.to_string(pipeline))
import numpy as np
import pandas as pd
test = pd.DataFrame(
np.random.randint(0, 100, size=(15, 3)),
columns=["a", "b", "c"],
dtype=np.dtype("float32"),
)
trained = pipeline.fit(
test.to_numpy(), [0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1]
)
trained.predict(test.to_numpy())
def test_autoai_libs_ta1(self):
import autoai_libs.utils.fc_methods
import numpy as np
from autoai_libs.cognito.transforms.transform_utils import TA1
from lale.lib.sklearn import LogisticRegression as LR
ta1 = TA1(
fun=np.rint,
name="round",
datatypes=["numeric"],
feat_constraints=[autoai_libs.utils.fc_methods.is_not_categorical],
col_names=[
"a____________",
"b____________",
"c____________",
"d____________",
"e____________",
],
col_dtypes=[
np.dtype("float32"),
np.dtype("float32"),
np.dtype("float32"),
np.dtype("float32"),
np.dtype("float32"),
],
)
pipeline = ta1 >> LR()
expected = """from autoai_libs.cognito.transforms.transform_utils import TA1
import numpy as np
import autoai_libs.utils.fc_methods
from sklearn.linear_model import LogisticRegression as LR
import lale
lale.wrap_imported_operators()
ta1 = TA1(
fun=np.rint,
name="round",
datatypes=["numeric"],
feat_constraints=[autoai_libs.utils.fc_methods.is_not_categorical],
col_names=[
"a____________", "b____________", "c____________", "d____________",
"e____________",
],
col_dtypes=[
np.dtype("float32"), np.dtype("float32"), np.dtype("float32"),
np.dtype("float32"), np.dtype("float32"),
],
)
pipeline = ta1 >> LR()"""
self._roundtrip(expected, lale.pretty_print.to_string(pipeline))
def test_autoai_libs_t_no_op(self):
from lightgbm import LGBMClassifier
from lale.lib.autoai_libs import TNoOp
from lale.operators import make_pipeline
t_no_op = TNoOp(
fun="fun",
name="no_action",
datatypes="x",
feat_constraints=[],
tgraph="tgraph",
)
lgbm_classifier = LGBMClassifier(class_weight="balanced", learning_rate=0.18)
pipeline = make_pipeline(t_no_op, lgbm_classifier)
expected = """from autoai_libs.cognito.transforms.transform_utils import TNoOp
from lightgbm import LGBMClassifier
from lale.operators import make_pipeline
t_no_op = TNoOp(
fun="fun",
name="no_action",
datatypes="x",
feat_constraints=[],
tgraph="tgraph",
)
lgbm_classifier = LGBMClassifier(
class_weight="balanced", learning_rate=0.18, n_estimators=100
)
pipeline = make_pipeline(t_no_op, lgbm_classifier)"""
self._roundtrip(
expected, lale.pretty_print.to_string(pipeline, combinators=False)
)
def test_autoai_libs_two_ops_with_combinator(self):
from autoai_libs.transformers.exportable import (
CompressStrings,
NumpyColumnSelector,
)
numpy_column_selector = NumpyColumnSelector(columns=[0, 2, 3, 5])
compress_strings = CompressStrings(
compress_type="hash",
dtypes_list=["char_str", "char_str", "char_str", "char_str"],
misslist_list=[[], [], [], []],
)
pipeline = lale.operators.make_pipeline(numpy_column_selector, compress_strings)
expected = """from autoai_libs.transformers.exportable import NumpyColumnSelector
from autoai_libs.transformers.exportable import CompressStrings
import lale
lale.wrap_imported_operators()
numpy_column_selector = NumpyColumnSelector(columns=[0, 2, 3, 5])
compress_strings = CompressStrings(
compress_type="hash",
dtypes_list=["char_str", "char_str", "char_str", "char_str"],
missing_values_reference_list=["?", "", "-", float("nan")],
misslist_list=[[], [], [], []],
)
pipeline = numpy_column_selector >> compress_strings"""
printed = lale.pretty_print.to_string(pipeline, combinators=True)
try:
self._roundtrip(expected, printed)
except BaseException:
expected = """from autoai_libs.transformers.exportable import NumpyColumnSelector
from autoai_libs.transformers.exportable import CompressStrings
import lale
lale.wrap_imported_operators()
numpy_column_selector = NumpyColumnSelector(columns=[0, 2, 3, 5])
compress_strings = CompressStrings(
compress_type="hash",
dtypes_list=["char_str", "char_str", "char_str", "char_str"],
missing_values_reference_list=("?", "", "-", float("nan")),
misslist_list=[[], [], [], []],
)
pipeline = numpy_column_selector >> compress_strings"""
self._roundtrip(expected, printed)
def test_expression(self):
from lale.expressions import it, mean
from lale.lib.rasl import Aggregate, Join, Scan
scan1 = Scan(table=it["table1.csv"])
scan2 = Scan(table=it["table2.csv"])
join = Join(pred=[it["table1.csv"].k1 == it["table2.csv"].k2])
aggregate = Aggregate(columns={"talk_time|mean": mean(it.talk_time)})
pipeline = (scan1 & scan2) >> join >> aggregate
expected = """from lale.lib.rasl import Scan
from lale.expressions import it
from lale.lib.rasl import Join
from lale.lib.rasl import Aggregate
from lale.expressions import mean
import lale
lale.wrap_imported_operators()
scan_0 = Scan(table=it["table1.csv"])
scan_1 = Scan(table=it["table2.csv"])
join = Join(pred=[it["table1.csv"].k1 == it["table2.csv"].k2])
aggregate = Aggregate(columns={"talk_time|mean": mean(it.talk_time)})
pipeline = (scan_0 & scan_1) >> join >> aggregate"""
self._roundtrip(expected, lale.pretty_print.to_string(pipeline))
def test_sklearn_pipeline(self):
from lale.lib.sklearn import PCA, LogisticRegression, Pipeline
pipeline = Pipeline(steps=[("pca", PCA), ("lr", LogisticRegression(C=0.1))])
expected = """from sklearn.pipeline import Pipeline
from sklearn.decomposition import PCA
from sklearn.linear_model import LogisticRegression
import lale
lale.wrap_imported_operators()
logistic_regression = LogisticRegression(C=0.1)
pipeline = Pipeline(steps=[("pca", PCA), ("lr", logistic_regression)])"""
self._roundtrip(expected, lale.pretty_print.to_string(pipeline))
def test_sklearn_pipeline_2(self):
from lale.lib.sklearn import PCA, LogisticRegression, Pipeline
pipeline = Pipeline(steps=[("pca", PCA), ("lr", LogisticRegression(C=0.1))])
expected = """from sklearn.pipeline import Pipeline
from sklearn.decomposition import PCA
from sklearn.linear_model import LogisticRegression
logistic_regression = LogisticRegression(C=0.1)
pipeline = Pipeline(steps=[("pca", PCA), ("lr", logistic_regression)])"""
printed = lale.pretty_print.to_string(pipeline, astype="sklearn")
self._roundtrip(expected, printed)
def test_customize_schema_enum_and_number(self):
from lale.lib.sklearn import LogisticRegression
pipeline = LogisticRegression.customize_schema(
solver={"enum": ["lbfgs", "liblinear"], "default": "liblinear"},
tol={
"type": "number",
"minimum": 0.00001,
"maximum": 0.1,
"default": 0.0001,
},
)(solver="lbfgs")
expected = """from sklearn.linear_model import LogisticRegression
import lale
lale.wrap_imported_operators()
pipeline = LogisticRegression.customize_schema(
solver={"enum": ["lbfgs", "liblinear"], "default": "liblinear"},
tol={
"type": "number",
"minimum": 1e-05,
"maximum": 0.1,
"default": 0.0001,
},
)(solver="lbfgs")"""
self._roundtrip(expected, pipeline.pretty_print(customize_schema=True))
def test_customize_schema_none_and_boolean(self):
from lale.lib.sklearn import RandomForestRegressor
pipeline = RandomForestRegressor.customize_schema(
bootstrap={"type": "boolean", "default": True},
random_state={
"anyOf": [
{"laleType": "numpy.random.RandomState"},
{
"description": "RandomState used by np.random",
"enum": [None],
},
{"description": "Explicit seed.", "type": "integer"},
],
"default": 33,
},
)(n_estimators=50)
expected = """from sklearn.ensemble import RandomForestRegressor
import lale
lale.wrap_imported_operators()
pipeline = RandomForestRegressor.customize_schema(
bootstrap={"type": "boolean", "default": True},
random_state={
"anyOf": [
{"laleType": "numpy.random.RandomState"},
{"description": "RandomState used by np.random", "enum": [None]},
{"description": "Explicit seed.", "type": "integer"},
],
"default": 33,
},
)(n_estimators=50)"""
# this should not include "random_state=33" because that would be
# redundant with the schema, and would prevent automated search
self._roundtrip(expected, pipeline.pretty_print(customize_schema=True))
def test_customize_schema_print_defaults(self):
from lale.lib.sklearn import RandomForestRegressor
pipeline = RandomForestRegressor.customize_schema(
bootstrap={"type": "boolean", "default": True}, # default unchanged
random_state={
"anyOf": [
{"laleType": "numpy.random.RandomState"},
{"enum": [None]},
{"type": "integer"},
],
"default": 33, # default changed
},
)(n_estimators=50)
expected = """from sklearn.ensemble import RandomForestRegressor
import lale
lale.wrap_imported_operators()
pipeline = RandomForestRegressor(n_estimators=50, random_state=33)"""
# print exactly those defaults that changed
self._roundtrip(expected, pipeline.pretty_print(customize_schema=False))
def test_user_operator_in_toplevel_module(self):
import importlib
import os.path
import sys
import tempfile
with tempfile.NamedTemporaryFile(mode="w", suffix=".py") as tmp_py_file:
file_contents = """import numpy as np
import lale.operators
class _MockClassifierImpl:
def __init__(self, int_hp=0):
self.int_hp = int_hp
def fit(self, X, y):
self.some_y = list(y)[0]
def predict(self, X):
return self.some_y
MockClassifier = lale.operators.make_operator(_MockClassifierImpl)
"""
tmp_py_file.write(file_contents)
tmp_py_file.flush()
dir_name = os.path.dirname(tmp_py_file.name)
old_pythonpath = sys.path
try:
sys.path.append(dir_name)
module_name = os.path.basename(tmp_py_file.name)[: -len(".py")]
module = importlib.import_module(module_name)
MockClf = getattr(module, "MockClassifier")
self.assertIsInstance(MockClf, lale.operators.PlannedIndividualOp)
self.assertEqual(MockClf.name(), "MockClassifier")
pipeline = MockClf(int_hp=42)
expected = f"""from {module_name} import MockClassifier as MockClf
import lale
lale.wrap_imported_operators(["{module_name}"])
pipeline = MockClf(int_hp=42)"""
self._roundtrip(expected, pipeline.pretty_print())
finally:
sys.path = old_pythonpath
def test_nonlib_operator(self):
from test.mock_custom_operators import CustomOrigOperator
from lale.lib.sklearn import LogisticRegression
pipeline = CustomOrigOperator() >> LogisticRegression()
expected = """from test.mock_module import CustomOrigOperator
from sklearn.linear_model import LogisticRegression
import lale
lale.wrap_imported_operators(["test.mock_module"])
pipeline = CustomOrigOperator() >> LogisticRegression()"""
self._roundtrip(expected, pipeline.pretty_print())
@unittest.skip("TODO: avoid spurious 'name' keys in printed dictionaries")
def test_fairness_info(self):
from lale.lib.aif360 import DisparateImpactRemover, fetch_creditg_df
from lale.lib.lale import Hyperopt, Project
from lale.lib.sklearn import KNeighborsClassifier
X, y, fairness_info = fetch_creditg_df()
disparate_impact_remover = DisparateImpactRemover(
**fairness_info,
preparation=Project(columns={"type": "number"}),
)
planned = disparate_impact_remover >> KNeighborsClassifier()
frozen = planned.freeze_trainable()
pipeline = frozen.auto_configure(X, y, optimizer=Hyperopt, cv=2, max_evals=1)
expected = """from aif360.algorithms.preprocessing import DisparateImpactRemover
from lale.lib.rasl import Project
from sklearn.neighbors import KNeighborsClassifier
import lale
lale.wrap_imported_operators()
project = Project(columns={"type": "number"})
disparate_impact_remover = DisparateImpactRemover(
favorable_labels=["good"],
protected_attributes=[
{
"reference_group": [
"male div/sep", "male mar/wid", "male single",
],
"feature": "personal_status",
},
{
"reference_group": [[26, 1000]],
"feature": "age",
},
],
preparation=project,
)
pipeline = disparate_impact_remover >> KNeighborsClassifier()"""
self._roundtrip(expected, pipeline.pretty_print())
def test_snap_logistic_regression_1(self):
# force printing arguments via "transient": "alwaysPrint", case True
from lale.lib.snapml import SnapLogisticRegression
pipeline = SnapLogisticRegression(normalize=True)
expected = """from snapml import SnapLogisticRegression
import lale
lale.wrap_imported_operators()
pipeline = SnapLogisticRegression(fit_intercept=True, normalize=True)"""
self._roundtrip(expected, lale.pretty_print.to_string(pipeline))
def test_snap_logistic_regression_2(self):
# force printing arguments via "transient": "alwaysPrint", case False
from lale.lib.snapml import SnapLogisticRegression
pipeline = SnapLogisticRegression(normalize=False)
expected = """from snapml import SnapLogisticRegression
import lale
lale.wrap_imported_operators()
pipeline = SnapLogisticRegression(normalize=False, fit_intercept=True)"""
self._roundtrip(expected, lale.pretty_print.to_string(pipeline))
class TestToAndFromJSON(unittest.TestCase):
def test_trainable_individual_op(self):
self.maxDiff = None
from lale.json_operator import from_json, to_json
from lale.lib.sklearn import LogisticRegression as LR
operator = LR(LR.enum.solver.sag, C=0.1)
json_expected = {
"class": LR.class_name(),
"state": "trainable",
"operator": "LogisticRegression",
"label": "LR",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.logistic_regression.html",
"hyperparams": {"C": 0.1, "solver": "sag"},
"is_frozen_trainable": False,
}
json = to_json(operator)
self.assertEqual(json, json_expected)
operator_2 = from_json(json)
json_2 = to_json(operator_2)
self.assertEqual(json_2, json_expected)
def test_operator_choice(self):
self.maxDiff = None
from lale.json_operator import from_json, to_json
from lale.lib.sklearn import PCA
from lale.lib.sklearn import MinMaxScaler as Scl
operator = PCA | Scl
json_expected = {
"class": "lale.operators.OperatorChoice",
"operator": "OperatorChoice",
"state": "planned",
"steps": {
"pca": {
"class": PCA.class_name(),
"state": "planned",
"operator": "PCA",
"label": "PCA",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.pca.html",
},
"scl": {
"class": Scl.class_name(),
"state": "planned",
"operator": "MinMaxScaler",
"label": "Scl",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.min_max_scaler.html",
},
},
}
json = to_json(operator)
self.assertEqual(json, json_expected)
operator_2 = from_json(json)
json_2 = to_json(operator_2)
self.assertEqual(json_2, json_expected)
def test_pipeline_1(self):
self.maxDiff = None
from lale.json_operator import from_json, to_json
from lale.lib.lale import NoOp
from lale.lib.rasl import ConcatFeatures
from lale.lib.sklearn import PCA
from lale.lib.sklearn import LogisticRegression as LR
operator = (PCA & NoOp) >> ConcatFeatures >> LR
json_expected = {
"class": "lale.operators.PlannedPipeline",
"state": "planned",
"edges": [
["pca", "concat_features"],
["no_op", "concat_features"],
["concat_features", "lr"],
],
"steps": {
"pca": {
"class": PCA.class_name(),
"state": "planned",
"operator": "PCA",
"label": "PCA",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.pca.html",
},
"no_op": {
"class": NoOp.class_name(),
"state": "trained",
"operator": "NoOp",
"label": "NoOp",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.lale.no_op.html",
"hyperparams": None,
"coefs": None,
"is_frozen_trainable": True,
"is_frozen_trained": True,
},
"concat_features": {
"class": ConcatFeatures.class_name(),
"state": "trained",
"operator": "ConcatFeatures",
"label": "ConcatFeatures",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.rasl.concat_features.html",
"hyperparams": None,
"coefs": None,
"is_frozen_trainable": True,
"is_frozen_trained": True,
},
"lr": {
"class": LR.class_name(),
"state": "planned",
"operator": "LogisticRegression",
"label": "LR",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.logistic_regression.html",
},
},
}
json = to_json(operator)
self.assertEqual(json, json_expected)
operator_2 = from_json(json)
json_2 = to_json(operator_2)
self.assertEqual(json, json_2)
def test_pipeline_2(self):
from lale.json_operator import from_json, to_json
from lale.lib.lale import NoOp
from lale.lib.sklearn import (
PCA,
KNeighborsClassifier,
LogisticRegression,
Nystroem,
)
from lale.operators import make_choice, make_pipeline
kernel_tfm_or_not = make_choice(NoOp, Nystroem)
tfm = PCA
clf = make_choice(LogisticRegression, KNeighborsClassifier)
operator = make_pipeline(kernel_tfm_or_not, tfm, clf)
json = to_json(operator)
operator_2 = from_json(json)
json_2 = to_json(operator_2)
self.assertEqual(json, json_2)
def test_higher_order_1(self):
from lale.json_operator import from_json
from lale.lib.lale import Both
from lale.lib.sklearn import PCA, Nystroem
operator = Both(op1=PCA(n_components=2), op2=Nystroem)
json_expected = {
"class": Both.class_name(),
"state": "trainable",
"operator": "Both",
"label": "Both",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.lale.both.html",
"hyperparams": {
"op1": {"$ref": "../steps/pca"},
"op2": {"$ref": "../steps/nystroem"},
},
"steps": {
"pca": {
"class": PCA.class_name(),
"state": "trainable",
"operator": "PCA",
"label": "PCA",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.pca.html",
"hyperparams": {"n_components": 2},
"is_frozen_trainable": False,
},
"nystroem": {
"class": Nystroem.class_name(),
"state": "planned",
"operator": "Nystroem",
"label": "Nystroem",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.nystroem.html",
},
},
"is_frozen_trainable": False,
}
json = operator.to_json()
self.assertEqual(json, json_expected)
operator_2 = from_json(json)
json_2 = operator_2.to_json()
self.assertEqual(json, json_2)
def test_higher_order_2(self):
self.maxDiff = None
from lale.json_operator import from_json
from lale.lib.sklearn import PCA
from lale.lib.sklearn import KNeighborsClassifier as KNN
from lale.lib.sklearn import LogisticRegression as LR
from lale.lib.sklearn import VotingClassifier as Vote
operator = Vote(
estimators=[("knn", KNN), ("pipeline", PCA() >> LR)], voting="soft"
)
json_expected = {
"class": Vote.class_name(),
"state": "trainable",
"operator": "VotingClassifier",
"is_frozen_trainable": True,
"label": "Vote",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.voting_classifier.html",
"hyperparams": {
"estimators": [
("knn", {"$ref": "../steps/knn"}),
("pipeline", {"$ref": "../steps/pipeline"}),
],
"voting": "soft",
},
"steps": {
"knn": {
"class": KNN.class_name(),
"state": "planned",
"operator": "KNeighborsClassifier",
"label": "KNN",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.k_neighbors_classifier.html",
},
"pipeline": {
"class": "lale.operators.PlannedPipeline",
"state": "planned",
"edges": [["pca", "lr"]],
"steps": {
"pca": {
"class": PCA.class_name(),
"state": "trainable",
"operator": "PCA",
"label": "PCA",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.pca.html",
"hyperparams": {},
"is_frozen_trainable": False,
},
"lr": {
"class": LR.class_name(),
"state": "planned",
"operator": "LogisticRegression",
"label": "LR",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.logistic_regression.html",
},
},
},
},
}
json = operator.to_json()
self.assertEqual(json, json_expected)
operator_2 = from_json(json)
json_2 = operator_2.to_json()
self.assertEqual(json, json_2)
def test_nested(self):
self.maxDiff = None
from lale.json_operator import from_json, to_json
from lale.lib.lale import NoOp
from lale.lib.sklearn import PCA
from lale.lib.sklearn import LogisticRegression as LR
operator = PCA >> (LR(C=0.09) | NoOp >> LR(C=0.19))
json_expected = {
"class": "lale.operators.PlannedPipeline",
"state": "planned",
"edges": [["pca", "choice"]],
"steps": {
"pca": {
"class": PCA.class_name(),
"state": "planned",
"operator": "PCA",
"label": "PCA",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.pca.html",
},
"choice": {
"class": "lale.operators.OperatorChoice",
"state": "planned",
"operator": "OperatorChoice",
"steps": {
"lr_0": {
"class": LR.class_name(),
"state": "trainable",
"operator": "LogisticRegression",
"label": "LR",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.logistic_regression.html",
"hyperparams": {"C": 0.09},
"is_frozen_trainable": False,
},
"pipeline_1": {
"class": "lale.operators.TrainablePipeline",
"state": "trainable",
"edges": [["no_op", "lr_1"]],
"steps": {
"no_op": {
"class": NoOp.class_name(),
"state": "trained",
"operator": "NoOp",
"label": "NoOp",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.lale.no_op.html",
"hyperparams": None,
"coefs": None,
"is_frozen_trainable": True,
"is_frozen_trained": True,
},
"lr_1": {
"class": LR.class_name(),
"state": "trainable",
"operator": "LogisticRegression",
"label": "LR",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.logistic_regression.html",
"hyperparams": {"C": 0.19},
"is_frozen_trainable": False,
},
},
},
},
},
},
}
json = to_json(operator)
self.assertEqual(json, json_expected)
operator_2 = from_json(json)
json_2 = to_json(operator_2)
self.assertEqual(json, json_2)
def test_customize_schema(self):
from lale.json_operator import from_json, to_json
from lale.lib.sklearn import LogisticRegression as LR
operator = LR.customize_schema(
solver={"enum": ["lbfgs", "liblinear"], "default": "liblinear"},
tol={
"type": "number",
"minimum": 0.00001,
"maximum": 0.1,
"default": 0.0001,
},
)
json_expected = {
"class": LR.class_name(),
"state": "planned",
"operator": "LogisticRegression",
"label": "LR",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.logistic_regression.html",
"customize_schema": {
"properties": {
"hyperparams": {
"allOf": [
{
"type": "object",
"properties": {
"solver": {
"default": "liblinear",
"enum": ["lbfgs", "liblinear"],
},
"tol": {
"type": "number",
"minimum": 0.00001,
"maximum": 0.1,
"default": 0.0001,
},
},
}
]
}
}
},
}
json = to_json(operator)
self.maxDiff = None
self.assertEqual(json, json_expected)
operator_2 = from_json(json)
json_2 = to_json(operator_2)
self.assertEqual(json, json_2)
class TestDiff(unittest.TestCase):
def test_single_op(self):
from lale.lib.sklearn import LogisticRegression
single_op = LogisticRegression()
single_op_param = LogisticRegression(solver="saga")
expected_diff = (
" from sklearn.linear_model import LogisticRegression\n"
" import lale\n"
" \n"
" lale.wrap_imported_operators()\n"
"- pipeline = LogisticRegression()\n"
'+ pipeline = LogisticRegression(solver="saga")\n'
"? +++++++++++++\n"
)
diff_str = single_op.diff(single_op_param, ipython_display=False)
self.assertEqual(diff_str, expected_diff)
expected_diff_reverse = (
" from sklearn.linear_model import LogisticRegression\n"
" import lale\n"
" \n"
" lale.wrap_imported_operators()\n"
'- pipeline = LogisticRegression(solver="saga")\n'
"? -------------\n\n"
"+ pipeline = LogisticRegression()"
)
diff_str_reverse = single_op_param.diff(single_op, ipython_display=False)
self.assertEqual(diff_str_reverse, expected_diff_reverse)
def test_pipeline(self):
from lale.lib.lale import NoOp
from lale.lib.sklearn import PCA, LogisticRegression, SelectKBest
pipeline_simple = PCA >> SelectKBest >> LogisticRegression
pipeline_choice = (PCA | NoOp) >> SelectKBest >> LogisticRegression
expected_diff = (
" from sklearn.decomposition import PCA\n"
"+ from lale.lib.lale import NoOp\n"
" from sklearn.feature_selection import SelectKBest\n"
" from sklearn.linear_model import LogisticRegression\n"
" import lale\n"
" \n"
" lale.wrap_imported_operators()\n"
"- pipeline = PCA >> SelectKBest >> LogisticRegression\n"
"+ pipeline = (PCA | NoOp) >> SelectKBest >> LogisticRegression\n"
"? + ++++++++\n"
)
diff_str = pipeline_simple.diff(pipeline_choice, ipython_display=False)
self.assertEqual(diff_str, expected_diff)
expected_diff_reverse = (
" from sklearn.decomposition import PCA\n"
"- from lale.lib.lale import NoOp\n"
" from sklearn.feature_selection import SelectKBest\n"
" from sklearn.linear_model import LogisticRegression\n"
" import lale\n"
" \n"
" lale.wrap_imported_operators()\n"
"- pipeline = (PCA | NoOp) >> SelectKBest >> LogisticRegression\n"
"? - --------\n\n"
"+ pipeline = PCA >> SelectKBest >> LogisticRegression"
)
diff_str_reverse = pipeline_choice.diff(pipeline_simple, ipython_display=False)
self.assertEqual(diff_str_reverse, expected_diff_reverse)
def test_single_op_pipeline(self):
from lale.lib.sklearn import PCA, LogisticRegression, SelectKBest
single_op = LogisticRegression()
pipeline = PCA >> SelectKBest >> LogisticRegression
expected_diff = (
"+ from sklearn.decomposition import PCA\n"
"+ from sklearn.feature_selection import SelectKBest\n"
" from sklearn.linear_model import LogisticRegression\n"
" import lale\n"
" \n"
" lale.wrap_imported_operators()\n"
"- pipeline = LogisticRegression()\n"
"+ pipeline = PCA >> SelectKBest >> LogisticRegression"
)
diff_str = single_op.diff(pipeline, ipython_display=False)
self.assertEqual(expected_diff, diff_str)
expected_diff_reverse = (
"- from sklearn.decomposition import PCA\n"
"- from sklearn.feature_selection import SelectKBest\n"
" from sklearn.linear_model import LogisticRegression\n"
" import lale\n"
" \n"
" lale.wrap_imported_operators()\n"
"- pipeline = PCA >> SelectKBest >> LogisticRegression\n"
"+ pipeline = LogisticRegression()"
)
diff_str_reverse = pipeline.diff(single_op, ipython_display=False)
self.assertEqual(expected_diff_reverse, diff_str_reverse)
def test_options(self):
from lale.lib.sklearn import LogisticRegression
single_op = LogisticRegression()
single_op_schema = single_op.customize_schema(solver={"enum": ["saga"]})
expected_diff_no_imports = " pipeline = LogisticRegression()"
diff_str_no_imports = single_op.diff(
single_op_schema, show_imports=False, ipython_display=False
)
self.assertEqual(diff_str_no_imports, expected_diff_no_imports)
expected_diff_no_schema = (
" from sklearn.linear_model import LogisticRegression\n"
" import lale\n"
" \n"
" lale.wrap_imported_operators()\n"
"- pipeline = LogisticRegression()\n"
'+ pipeline = LogisticRegression.customize_schema(solver={"enum": ["saga"]})()'
)
diff_str_no_schema = single_op.diff(
single_op_schema, customize_schema=True, ipython_display=False
)
self.assertEqual(diff_str_no_schema, expected_diff_no_schema)
| 64,209 | 37.106825 | 147 |
py
|
lale
|
lale-master/test/test_interoperability.py
|
# Copyright 2019-2023 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from test import EnableSchemaValidation
from jsonschema.exceptions import ValidationError
import lale.datasets.openml
import lale.lib.aif360
import lale.type_checking
from lale.lib.lale import ConcatFeatures, NoOp, Project
from lale.lib.sklearn import PCA, LogisticRegression, Nystroem, OrdinalEncoder
class TestResamplers(unittest.TestCase):
def setUp(self):
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
X, y = make_classification(
n_classes=2,
class_sep=2,
weights=[0.1, 0.9],
n_informative=3,
n_redundant=1,
flip_y=0,
n_features=20,
n_clusters_per_class=1,
n_samples=1000,
random_state=10,
)
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(X, y)
def create_function_test_resampler(res_name):
def test_resampler(self):
X_train, y_train = self.X_train, self.y_train
X_test = self.X_test
import importlib
module_name = ".".join(res_name.split(".")[0:-1])
class_name = res_name.split(".")[-1]
module = importlib.import_module(module_name)
class_ = getattr(module, class_name)
with EnableSchemaValidation():
with self.assertRaises(ValidationError):
_ = class_()
# test_schemas_are_schemas
lale.type_checking.validate_is_schema(class_.input_schema_fit())
lale.type_checking.validate_is_schema(class_.input_schema_predict())
lale.type_checking.validate_is_schema(class_.output_schema_predict())
lale.type_checking.validate_is_schema(class_.hyperparam_schema())
# test_init_fit_predict
from lale.operators import make_pipeline
pipeline1 = PCA() >> class_(operator=make_pipeline(LogisticRegression()))
trained = pipeline1.fit(X_train, y_train)
_ = trained.predict(X_test)
pipeline2 = class_(operator=make_pipeline(PCA(), LogisticRegression()))
trained = pipeline2.fit(X_train, y_train)
_ = trained.predict(X_test)
# test_with_hyperopt
from lale.lib.lale import Hyperopt
optimizer = Hyperopt(
estimator=PCA >> class_(operator=make_pipeline(LogisticRegression())),
max_evals=1,
show_progressbar=False,
)
trained_optimizer = optimizer.fit(X_train, y_train)
_ = trained_optimizer.predict(X_test)
pipeline3 = class_(
operator=PCA()
>> (Nystroem & NoOp)
>> ConcatFeatures
>> LogisticRegression()
)
optimizer = Hyperopt(estimator=pipeline3, max_evals=1, show_progressbar=False)
trained_optimizer = optimizer.fit(X_train, y_train)
_ = trained_optimizer.predict(X_test)
pipeline4 = (
(
PCA >> class_(operator=make_pipeline(Nystroem()))
& class_(operator=make_pipeline(Nystroem()))
)
>> ConcatFeatures
>> LogisticRegression()
)
optimizer = Hyperopt(
estimator=pipeline4, max_evals=1, scoring="roc_auc", show_progressbar=False
)
trained_optimizer = optimizer.fit(X_train, y_train)
_ = trained_optimizer.predict(X_test)
# test_cross_validation
from lale.helpers import cross_val_score
cv_results = cross_val_score(pipeline1, X_train, y_train, cv=2)
self.assertEqual(len(cv_results), 2)
# test_to_json
pipeline1.to_json()
test_resampler.__name__ = f"test_{res_name.split('.')[-1]}"
return test_resampler
resamplers = [
"lale.lib.imblearn.SMOTE",
"lale.lib.imblearn.SMOTEENN",
"lale.lib.imblearn.ADASYN",
"lale.lib.imblearn.BorderlineSMOTE",
"lale.lib.imblearn.SVMSMOTE",
"lale.lib.imblearn.RandomOverSampler",
"lale.lib.imblearn.RandomUnderSampler",
"lale.lib.imblearn.CondensedNearestNeighbour",
"lale.lib.imblearn.EditedNearestNeighbours",
"lale.lib.imblearn.RepeatedEditedNearestNeighbours",
"lale.lib.imblearn.AllKNN",
"lale.lib.imblearn.InstanceHardnessThreshold",
]
for res in resamplers:
setattr(
TestResamplers,
f"test_{res.rsplit('.', maxsplit=1)[-1]}",
create_function_test_resampler(res),
)
class TestImblearn(unittest.TestCase):
def setUp(self):
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
X, y = make_classification(
n_classes=2,
class_sep=2,
weights=[0.1, 0.9],
n_informative=3,
n_redundant=1,
flip_y=0,
n_features=20,
n_clusters_per_class=1,
n_samples=1000,
random_state=10,
)
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(X, y)
def test_decision_function(self):
from lale.lib.imblearn import SMOTE
from lale.lib.sklearn import RandomForestClassifier
from lale.operators import make_pipeline
smote = SMOTE(operator=make_pipeline(RandomForestClassifier()))
trained = smote.fit(self.X_train, self.y_train)
trained.predict(self.X_test)
with self.assertRaises(AttributeError):
trained.decision_function(self.X_test)
def test_string_labels(self):
from lale.lib.imblearn import CondensedNearestNeighbour
print(type(CondensedNearestNeighbour))
from lale.operators import make_pipeline
y_train = ["low" if label == 0 else "high" for label in self.y_train]
pipeline = CondensedNearestNeighbour(
operator=make_pipeline(PCA(), LogisticRegression()),
sampling_strategy=["high"],
)
trained = pipeline.fit(self.X_train, y_train)
_ = trained.predict(self.X_test)
def test_smoten(self):
from lale.lib.imblearn import SMOTEN
(train_X, train_y), (test_X, _) = lale.datasets.openml.fetch(
"breast-cancer", "classification", astype="pandas", preprocess=False
)
# SMOTEN can only use Nominal features
pipeline = SMOTEN(operator=OrdinalEncoder() >> LogisticRegression())
trained = pipeline.fit(train_X, train_y)
_ = trained.predict(test_X)
def test_smotenc(self):
from lale.lib.imblearn import SMOTENC
(train_X, train_y), (test_X, _) = lale.datasets.openml.fetch(
"ricci", "classification", astype="pandas", preprocess=False
)
# SMOTENC can handle both Nominal and Continuous features
pipeline = SMOTENC(
operator=(
Project(columns={"type": "number"})
& (Project(columns={"type": "string"}) >> OrdinalEncoder())
)
>> ConcatFeatures
>> LogisticRegression()
)
trained = pipeline.fit(train_X, train_y)
_ = trained.predict(test_X)
def test_smotenc_with_disparate_impact_remover(self):
from lale.lib.aif360 import DisparateImpactRemover
from lale.lib.imblearn import SMOTENC
X, y, fairness_info = lale.lib.aif360.fetch_ricci_df(preprocess=False)
pipeline = SMOTENC(
operator=(
DisparateImpactRemover(
**fairness_info,
preparation=(
Project(columns={"type": "number"})
& (Project(columns={"type": "string"}) >> OrdinalEncoder())
)
>> ConcatFeatures,
)
>> LogisticRegression()
)
)
(
train_X,
test_X,
train_y,
_,
) = lale.lib.aif360.fair_stratified_train_test_split(X, y, **fairness_info)
trained = pipeline.fit(train_X, train_y)
_ = trained.predict(test_X)
| 8,620 | 33.62249 | 87 |
py
|
lale
|
lale-master/test/test_optimizers.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tempfile
import unittest
import warnings
from test import EnableSchemaValidation
import jsonschema
import numpy as np
import pandas as pd
from sklearn.datasets import load_iris
from sklearn.metrics import accuracy_score, make_scorer
from sklearn.model_selection import train_test_split
from lale import schemas
from lale.lib.lale import (
SMAC,
ConcatFeatures,
GridSearchCV,
Hyperopt,
NoOp,
OptimizeLast,
Project,
TopKVotingClassifier,
)
from lale.lib.sklearn import (
PCA,
SVC,
KNeighborsClassifier,
KNeighborsRegressor,
LinearRegression,
LogisticRegression,
MinMaxScaler,
Normalizer,
Nystroem,
OneHotEncoder,
RandomForestClassifier,
RandomForestRegressor,
SimpleImputer,
StandardScaler,
TfidfVectorizer,
)
from lale.operators import TrainedIndividualOp, TrainedPipeline
from lale.search.lale_smac import get_smac_space, lale_op_smac_tae
from lale.search.op2hp import hyperopt_search_space
from .mock_custom_operators import OpThatWorksWithFiles
def f_min(op, X, y, num_folds=5):
from lale.helpers import cross_val_score
# try:
scores = cross_val_score(op, X, y, cv=num_folds)
return 1 - np.mean(scores) # Minimize!
# except BaseException as e:
# print(e)
# return
def iris_f_min(op, num_folds=5):
from sklearn import datasets
iris = datasets.load_iris()
return f_min(op, iris.data, iris.target, num_folds=num_folds)
def iris_f_min_for_folds(num_folds=5):
return lambda op: iris_f_min(op, num_folds=num_folds)
def iris_fmin_tae(op, num_folds=5):
return lale_op_smac_tae(op, iris_f_min_for_folds(num_folds=num_folds))
class TestSMAC(unittest.TestCase):
def setUp(self):
X, y = load_iris(return_X_y=True)
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(X, y)
def test_smac(self):
# Import ConfigSpace and different types of parameters
from smac.configspace import ConfigurationSpace
from smac.facade.smac_facade import SMAC as orig_SMAC
from smac.scenario.scenario import Scenario
# Import SMAC-utilities
lr = LogisticRegression()
cs: ConfigurationSpace = get_smac_space(lr)
# Scenario object
scenario = Scenario(
{
"run_obj": "quality", # we optimize quality (alternatively runtime)
"runcount-limit": 1, # maximum function evaluations
"cs": cs, # configuration space
"deterministic": "true",
"abort_on_first_run_crash": False,
}
)
# Optimize, using a SMAC-object
tae = iris_fmin_tae(lr, num_folds=2)
print("Optimizing! Depending on your machine, this might take a few minutes.")
smac = orig_SMAC(
scenario=scenario, rng=np.random.RandomState(42), tae_runner=tae
)
incumbent = smac.optimize()
inc_value = tae(incumbent)
print(f"Optimized Value: {inc_value:.2f}")
def dont_test_smac_choice(self):
# Import ConfigSpace and different types of parameters
from smac.configspace import ConfigurationSpace
from smac.facade.smac_facade import SMAC as orig_SMAC
from smac.scenario.scenario import Scenario
# Import SMAC-utilities
tfm = PCA() | Nystroem() | NoOp()
planned_pipeline1 = (
(OneHotEncoder(handle_unknown="ignore", sparse=False) | NoOp())
>> tfm
>> (LogisticRegression() | KNeighborsClassifier())
)
cs: ConfigurationSpace = get_smac_space(planned_pipeline1, lale_num_grids=1)
# Scenario object
scenario = Scenario(
{
"run_obj": "quality", # we optimize quality (alternatively runtime)
"runcount-limit": 1, # maximum function evaluations
"cs": cs, # configuration space
"deterministic": "true",
}
)
# Optimize, using a SMAC-object
tae = iris_fmin_tae(planned_pipeline1, num_folds=2)
print("Optimizing! Depending on your machine, this might take a few minutes.")
smac = orig_SMAC(
scenario=scenario, rng=np.random.RandomState(42), tae_runner=tae
)
incumbent = smac.optimize()
inc_value = tae(incumbent)
print(f"Optimized Value: {inc_value:.2f}")
def test_smac1(self):
planned_pipeline = (PCA | NoOp) >> LogisticRegression
opt = SMAC(estimator=planned_pipeline, max_evals=1)
# run optimizer
res = opt.fit(self.X_train, self.y_train)
_ = res.predict(self.X_test)
def test_smac2(self):
from test.mock_module import BadClassifier
import lale.operators
BadClf = lale.operators.make_operator(BadClassifier)
planned_pipeline = (PCA | NoOp) >> BadClf()
opt = SMAC(estimator=planned_pipeline, max_evals=1)
# run optimizer
res = opt.fit(self.X_train, self.y_train)
# Get the trials object and make sure that SMAC assigned cost_for_crash which is MAXINT by default to
# at least one trial (correspond to KNN).
trials = res._impl.get_trials()
assert 2147483647.0 in trials.cost_per_config.values()
def test_smac_timeout_zero_classification(self):
planned_pipeline = (MinMaxScaler | Normalizer) >> (
LogisticRegression | KNeighborsClassifier
)
opt = SMAC(estimator=planned_pipeline, max_evals=1, max_opt_time=0.0)
# run optimizer
res = opt.fit(self.X_train, self.y_train)
assert res.get_pipeline() is None
def test_smac_timeout_zero_regression(self):
planned_pipeline = (MinMaxScaler | Normalizer) >> LinearRegression
from lale.datasets.util import load_boston
X, y = load_boston(return_X_y=True)
opt = SMAC(
estimator=planned_pipeline, scoring="r2", max_evals=1, max_opt_time=0.0
)
# run optimizer
res = opt.fit(X[:500, :], y[:500])
assert res.get_pipeline() is None
def test_smac_timeout_classification(self):
import time
planned_pipeline = (MinMaxScaler | Normalizer) >> (
LogisticRegression | KNeighborsClassifier
)
max_opt_time = 4.0
opt = SMAC(estimator=planned_pipeline, max_evals=1, max_opt_time=max_opt_time)
start = time.time()
_ = opt.fit(self.X_train, self.y_train)
end = time.time()
opt_time = end - start
rel_diff = (opt_time - max_opt_time) / max_opt_time
assert (
rel_diff < 1.2
), f"Max time: {max_opt_time}, Actual time: {opt_time}, relative diff: {rel_diff}"
def test_smac_timeout_regression(self):
import time
from lale.datasets.util import load_boston
planned_pipeline = (MinMaxScaler | Normalizer) >> LinearRegression
X, y = load_boston(return_X_y=True)
max_opt_time = 2.0
opt = SMAC(
estimator=planned_pipeline,
scoring="r2",
max_evals=1,
max_opt_time=max_opt_time,
)
start = time.time()
_ = opt.fit(X[:500, :], y[:500])
end = time.time()
opt_time = end - start
rel_diff = (opt_time - max_opt_time) / max_opt_time
assert (
rel_diff < 0.5
), f"Max time: {max_opt_time}, Actual time: {opt_time}, relative diff: {rel_diff}"
def run_hyperopt_on_planned_pipeline(planned_pipeline, max_iters=1):
# data
features, labels = load_iris(return_X_y=True)
# set up optimizer
opt = Hyperopt(estimator=planned_pipeline, max_evals=max_iters)
# run optimizer
_ = opt.fit(features, labels)
class TestVisitorErrors(unittest.TestCase):
def test_empty_schema(self):
pca = PCA().customize_schema(whiten=schemas.Schema())
plan = (
(pca & (MinMaxScaler | Normalizer))
>> ConcatFeatures()
>> (MinMaxScaler | Normalizer)
>> (LogisticRegression | KNeighborsClassifier)
)
from lale.search.schema2search_space import OperatorSchemaError
with self.assertRaises(OperatorSchemaError):
run_hyperopt_on_planned_pipeline(plan)
# print(str(ctxt.exception))
def test_no_max_schema(self):
pca = PCA().customize_schema(n_components=schemas.Float(minimum=0.0))
plan = (
(pca & (MinMaxScaler | Normalizer))
>> ConcatFeatures()
>> (MinMaxScaler | Normalizer)
>> (LogisticRegression | KNeighborsClassifier)
)
from lale.search.search_space import SearchSpaceError
with self.assertRaises(SearchSpaceError):
run_hyperopt_on_planned_pipeline(plan)
# print(str(ctxt.exception))
class TestHyperoptOperatorDuplication(unittest.TestCase):
def test_planned_pipeline_1(self):
plan = (
(PCA & (MinMaxScaler | Normalizer))
>> ConcatFeatures()
>> (MinMaxScaler | Normalizer)
>> (LogisticRegression | KNeighborsClassifier)
)
run_hyperopt_on_planned_pipeline(plan)
def test_planned_pipeline_2(self):
plan = (
(MinMaxScaler() & NoOp())
>> ConcatFeatures()
>> (NoOp() & MinMaxScaler())
>> ConcatFeatures()
>> (LogisticRegression | KNeighborsClassifier)
)
run_hyperopt_on_planned_pipeline(plan)
def test_planned_pipeline_3(self):
plan = (
(MinMaxScaler() & NoOp())
>> ConcatFeatures()
>> (StandardScaler & (NoOp() | MinMaxScaler()))
>> ConcatFeatures()
>> (LogisticRegression | KNeighborsClassifier)
)
run_hyperopt_on_planned_pipeline(plan)
class TestHyperopt(unittest.TestCase):
def setUp(self):
data = load_iris()
X, y = data.data, data.target
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(X, y)
def test_loguniform_search_space(self):
from lale.search.schema2search_space import op_to_search_space
from lale.search.search_space import SearchSpaceObject
svc = SVC()
space = op_to_search_space(svc)
self.assertIsInstance(space, SearchSpaceObject)
strobj = str(space)
self.assertRegex(strobj, r"gamma->\[[^[]*,<log>,8]")
pass
def test_using_scoring(self):
lr = LogisticRegression()
clf = Hyperopt(estimator=lr, scoring="accuracy", cv=5, max_evals=1)
trained = clf.fit(self.X_train, self.y_train)
predictions = trained.predict(self.X_test)
predictions_1 = clf.predict(self.X_test)
assert np.array_equal(predictions_1, predictions)
def test_custom_scoring(self):
from sklearn.metrics import f1_score
lr = LogisticRegression()
clf = Hyperopt(
estimator=lr,
scoring=make_scorer(f1_score, average="macro"),
cv=5,
max_evals=1,
)
trained = clf.fit(self.X_train, self.y_train)
predictions = trained.predict(self.X_test)
predictions_1 = clf.predict(self.X_test)
assert np.array_equal(predictions_1, predictions)
def test_runtime_limit_hoc(self):
import time
planned_pipeline = (MinMaxScaler | Normalizer) >> (
LogisticRegression | KNeighborsClassifier
)
X, y = load_iris(return_X_y=True)
max_opt_time = 10.0
hoc = Hyperopt(
estimator=planned_pipeline,
max_evals=1,
cv=3,
scoring="accuracy",
max_opt_time=max_opt_time,
)
start = time.time()
_ = hoc.fit(X, y)
end = time.time()
opt_time = end - start
rel_diff = (opt_time - max_opt_time) / max_opt_time
assert (
rel_diff < 0.7
), f"Max time: {max_opt_time}, Actual time: {opt_time}, relative diff: {rel_diff}"
def test_runtime_limit_zero_time_hoc(self):
planned_pipeline = (MinMaxScaler | Normalizer) >> (
LogisticRegression | KNeighborsClassifier
)
X, y = load_iris(return_X_y=True)
hoc = Hyperopt(
estimator=planned_pipeline,
max_evals=1,
cv=3,
scoring="accuracy",
max_opt_time=0.0,
)
hoc_fitted = hoc.fit(X, y)
assert hoc_fitted.get_pipeline() is None
def test_runtime_limit_hor(self):
import time
planned_pipeline = (MinMaxScaler | Normalizer) >> LinearRegression
from lale.datasets.util import load_boston
X, y = load_boston(return_X_y=True)
max_opt_time = 3.0
hor = Hyperopt(
estimator=planned_pipeline,
max_evals=1,
cv=3,
max_opt_time=max_opt_time,
scoring="r2",
)
start = time.time()
_ = hor.fit(X[:500, :], y[:500])
end = time.time()
opt_time = end - start
rel_diff = (opt_time - max_opt_time) / max_opt_time
assert (
rel_diff < 0.2
), f"Max time: {max_opt_time}, Actual time: {opt_time}, relative diff: {rel_diff}"
def test_runtime_limit_zero_time_hor(self):
planned_pipeline = (MinMaxScaler | Normalizer) >> LinearRegression
from lale.datasets.util import load_boston
X, y = load_boston(return_X_y=True)
hor = Hyperopt(
estimator=planned_pipeline,
max_evals=1,
cv=3,
max_opt_time=0.0,
scoring="r2",
)
hor_fitted = hor.fit(X, y)
assert hor_fitted.get_pipeline() is None
def test_hyperparam_overriding_with_hyperopt(self):
pca1 = PCA(n_components=3)
pca2 = PCA()
search_space1 = hyperopt_search_space(pca1)
search_space2 = hyperopt_search_space(pca2)
self.assertNotEqual(search_space1, search_space2)
def test_nested_pipeline1(self):
data = load_iris()
X, y = data.data, data.target
# pipeline = KNeighborsClassifier() | (OneHotEncoder(handle_unknown = 'ignore') >> LogisticRegression())
pipeline = KNeighborsClassifier() | (SimpleImputer() >> LogisticRegression())
clf = Hyperopt(estimator=pipeline, max_evals=1)
trained = clf.fit(X, y)
predictions = trained.predict(X)
print(accuracy_score(y, predictions))
def test_with_concat_features1(self):
warnings.filterwarnings("ignore")
data = load_iris()
X, y = data.data, data.target
pca = PCA(n_components=3)
nys = Nystroem(n_components=10)
concat = ConcatFeatures()
lr = LogisticRegression(random_state=42, C=0.1)
pipeline = ((pca & nys) >> concat >> lr) | KNeighborsClassifier()
clf = Hyperopt(estimator=pipeline, max_evals=1)
trained = clf.fit(X, y)
predictions = trained.predict(X)
print(accuracy_score(y, predictions))
warnings.resetwarnings()
def test_with_concat_features2(self):
warnings.filterwarnings("ignore")
data = load_iris()
X, y = data.data, data.target
pca = PCA(n_components=3)
nys = Nystroem(n_components=10)
concat = ConcatFeatures()
lr = LogisticRegression(random_state=42, C=0.1)
from lale.operators import make_pipeline
pipeline = make_pipeline(
((((SimpleImputer() | NoOp()) >> pca) & nys) >> concat >> lr)
| KNeighborsClassifier()
)
clf = Hyperopt(estimator=pipeline, max_evals=1, handle_cv_failure=True)
trained = clf.fit(X, y)
predictions = trained.predict(X)
print(accuracy_score(y, predictions))
warnings.resetwarnings()
def test_preprocessing_union(self):
from lale.datasets import openml
(train_X, train_y), (_test_X, _test_y) = openml.fetch(
"credit-g", "classification", preprocess=False
)
prep_num = Project(columns={"type": "number"}) >> Normalizer
prep_cat = Project(columns={"not": {"type": "number"}}) >> OneHotEncoder(
sparse=False
)
planned = (prep_num & prep_cat) >> ConcatFeatures >> RandomForestClassifier
hyperopt_classifier = Hyperopt(estimator=planned, max_evals=1)
_ = hyperopt_classifier.fit(train_X, train_y)
def test_text_and_structured(self):
from lale.datasets.uci.uci_datasets import fetch_drugscom
train_X_all, train_y_all, _test_X, _test_y = fetch_drugscom()
# subset to speed up debugging
train_X, _train_X, train_y, _train_y = train_test_split(
train_X_all, train_y_all, train_size=0.01, random_state=42
)
prep_text = Project(columns=["review"]) >> TfidfVectorizer(max_features=100)
prep_nums = Project(columns={"type": "number"})
planned = (
(prep_text & prep_nums)
>> ConcatFeatures
>> (LinearRegression | RandomForestRegressor)
)
hyperopt_classifier = Hyperopt(estimator=planned, max_evals=1, scoring="r2")
_ = hyperopt_classifier.fit(train_X, train_y)
def test_custom_scorer(self):
pipeline = PCA() >> LogisticRegression()
def custom_scorer(estimator, X, y, factor=0.1):
# This is a custom scorer for demonstrating the use of kwargs
# Just applies some factor to the accuracy
predictions = estimator.predict(X)
self.assertEqual(factor, 0.5)
return factor * accuracy_score(y, predictions)
clf = Hyperopt(
estimator=pipeline,
scoring=custom_scorer,
cv=5,
max_evals=1,
args_to_scorer={"factor": 0.5},
)
trained = clf.fit(self.X_train, self.y_train)
predictions = trained.predict(self.X_test)
predictions_1 = clf.predict(self.X_test)
assert np.array_equal(predictions_1, predictions)
def test_other_algorithms(self):
for alg in ["rand", "tpe", "atpe", "anneal"]:
hyperopt = Hyperopt(
estimator=LogisticRegression, algo=alg, cv=3, max_evals=3
)
trained = hyperopt.fit(self.X_train, self.y_train)
predictions = trained.predict(self.X_test)
predictions_1 = hyperopt.predict(self.X_test)
self.assertTrue(np.array_equal(predictions_1, predictions), alg)
def test_args_to_cv(self):
from sklearn.model_selection import GroupKFold
data = pd.DataFrame(
{
"x_0": ["a"] * 10 + ["b"] * 10,
"x_1": ["c"] * 18 + ["d"] * 2,
"y": [1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0],
}
)
train_y = data.pop("y")
train_x = data
group_cols = train_x["x_0"]
group_cols = [col_value[0] for col_value in group_cols.tolist()]
group_kfold = GroupKFold(n_splits=2)
pipeline = OneHotEncoder(handle_unknown="ignore") >> RandomForestClassifier()
optimizer = Hyperopt(
estimator=pipeline, cv=group_kfold, max_evals=1, verbose=True
)
trained_optimizer = optimizer.fit(
train_x, train_y, args_to_cv={"groups": group_cols}
)
trained_optimizer.predict(train_x)
def test_string_X(self):
from sklearn.datasets import fetch_20newsgroups
cats = ["alt.atheism", "sci.space"]
newsgroups_train = fetch_20newsgroups(subset="train", categories=cats)
self.train_X, self.train_y = (
np.array(newsgroups_train.data),
newsgroups_train.target,
)
self.train_X = np.reshape(self.train_X, (self.train_X.shape[0], 1))
newsgroups_test = fetch_20newsgroups(subset="test", categories=cats)
self.test_X, self.test_y = (
np.array(newsgroups_test.data),
newsgroups_test.target,
)
self.test_X = np.reshape(self.test_X, (self.test_X.shape[0], 1))
op = OpThatWorksWithFiles()
optimizer = Hyperopt(estimator=op, cv=None, max_evals=2, verbose=True)
with tempfile.NamedTemporaryFile(mode="w") as tmp:
pd.DataFrame(self.train_X).to_csv(tmp.name, header=None, index=None)
# np.savetxt(tmp.name, self.train_X, fmt="%s")
with tempfile.NamedTemporaryFile(mode="w") as tmp1:
pd.DataFrame(self.test_X).to_csv(tmp1.name, header=None, index=None)
# np.savetxt(tmp1.name, self.test_X, fmt="%s")
_ = optimizer.fit(
tmp.name,
y=self.train_y,
X_valid=tmp1.name,
y_valid=self.test_y,
sample_weight=[],
)
class TestAutoConfigureClassification(unittest.TestCase):
def setUp(self):
data = load_iris()
X, y = data.data, data.target
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(X, y)
def test_with_Hyperopt(self):
planned_pipeline = (PCA | NoOp) >> LogisticRegression
best_pipeline = planned_pipeline.auto_configure(
self.X_train,
self.y_train,
optimizer=Hyperopt,
cv=3,
scoring="accuracy",
max_evals=1,
)
_ = best_pipeline.predict(self.X_test)
assert isinstance(best_pipeline, TrainedPipeline)
def test_with_Hyperopt_2(self):
choice = LogisticRegression | KNeighborsClassifier
best = choice.auto_configure(
self.X_train, self.y_train, optimizer=Hyperopt, cv=3, max_evals=3
)
_ = best.predict(self.X_test)
def test_with_Hyperopt_3(self):
planned_pipeline = (PCA() | Nystroem()) >> (
LogisticRegression() | KNeighborsClassifier()
)
best_pipeline = planned_pipeline.auto_configure(
self.X_train,
self.y_train,
optimizer=Hyperopt,
cv=3,
scoring="accuracy",
max_evals=10,
frac_evals_with_defaults=0.2,
)
_ = best_pipeline.predict(self.X_test)
assert isinstance(best_pipeline, TrainedPipeline)
def test_with_gridsearchcv(self):
warnings.simplefilter("ignore")
planned_pipeline = (PCA | NoOp) >> LogisticRegression
best_pipeline = planned_pipeline.auto_configure(
self.X_train,
self.y_train,
optimizer=GridSearchCV,
cv=3,
scoring="accuracy",
lale_num_samples=1,
lale_num_grids=1,
)
_ = best_pipeline.predict(self.X_test)
assert best_pipeline is not None
def test_with_smaccv(self):
planned_pipeline = (PCA | NoOp) >> LogisticRegression
best_pipeline = planned_pipeline.auto_configure(
self.X_train,
self.y_train,
optimizer=SMAC,
cv=3,
scoring="accuracy",
max_evals=1,
)
_ = best_pipeline.predict(self.X_test)
assert isinstance(best_pipeline, TrainedPipeline)
class TestAutoConfigureRegression(unittest.TestCase):
def setUp(self):
from lale.datasets.util import load_boston
X, y = load_boston(return_X_y=True)
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(X, y)
def test_with_Hyperopt(self):
planned_pipeline = (MinMaxScaler | Normalizer) >> LinearRegression
best_pipeline = planned_pipeline.auto_configure(
self.X_train,
self.y_train,
optimizer=Hyperopt,
cv=3,
scoring="r2",
max_evals=1,
)
_ = best_pipeline.predict(self.X_test)
assert isinstance(best_pipeline, TrainedPipeline)
def test_with_gridsearchcv(self):
warnings.simplefilter("ignore")
planned_pipeline = (MinMaxScaler | Normalizer) >> LinearRegression
best_pipeline = planned_pipeline.auto_configure(
self.X_train,
self.y_train,
optimizer=GridSearchCV,
cv=3,
scoring="r2",
lale_num_samples=1,
lale_num_grids=1,
)
_ = best_pipeline.predict(self.X_test)
assert best_pipeline is not None
class TestGridSearchCV(unittest.TestCase):
def test_manual_grid(self):
warnings.simplefilter("ignore")
from lale import wrap_imported_operators
wrap_imported_operators()
iris = load_iris()
parameters = {"kernel": ("linear", "rbf"), "C": [1, 10]}
svc = SVC()
clf = GridSearchCV(estimator=svc, param_grid=parameters)
clf.fit(iris.data, iris.target)
clf.predict(iris.data)
def test_with_gridsearchcv_auto_wrapped_pipe1(self):
lr = LogisticRegression()
pca = PCA()
trainable = pca >> lr
with warnings.catch_warnings():
warnings.simplefilter("ignore")
clf = GridSearchCV(
estimator=trainable,
lale_num_samples=1,
lale_num_grids=1,
cv=2,
scoring=make_scorer(accuracy_score),
)
iris = load_iris()
clf.fit(iris.data, iris.target)
def test_with_gridsearchcv_auto_wrapped_pipe2(self):
lr = LogisticRegression()
pca1 = PCA()
pca1._name = "PCA1"
pca2 = PCA()
pca2._name = "PCA2"
trainable = (pca1 | pca2) >> lr
with warnings.catch_warnings():
warnings.simplefilter("ignore")
clf = GridSearchCV(
estimator=trainable,
lale_num_samples=1,
lale_num_grids=1,
cv=2,
scoring=make_scorer(accuracy_score),
)
iris = load_iris()
clf.fit(iris.data, iris.target)
def test_runtime_limit_hoc(self):
import time
planned_pipeline = (MinMaxScaler | Normalizer) >> (
LogisticRegression | KNeighborsClassifier
)
X, y = load_iris(return_X_y=True)
max_opt_time = 10.0
hoc = GridSearchCV(
estimator=planned_pipeline,
cv=3,
scoring="accuracy",
max_opt_time=max_opt_time,
)
start = time.time()
with self.assertRaises(BaseException):
_ = hoc.fit(X, y)
end = time.time()
opt_time = end - start
rel_diff = (opt_time - max_opt_time) / max_opt_time
assert (
rel_diff < 0.7
), f"Max time: {max_opt_time}, Actual time: {opt_time}, relative diff: {rel_diff}"
@unittest.skip("This test has finicky timing")
def test_runtime_limit_hor(self):
import time
planned_pipeline = (MinMaxScaler | Normalizer) >> LinearRegression
from lale.datasets.util import load_boston
X, y = load_boston(return_X_y=True)
max_opt_time = 3
hor = GridSearchCV(
estimator=planned_pipeline,
cv=3,
max_opt_time=max_opt_time,
scoring="r2",
)
start = time.time()
with self.assertRaises(BaseException):
_ = hor.fit(X[:500, :], y[:500])
end = time.time()
opt_time = end - start
print(opt_time)
rel_diff = (opt_time - max_opt_time) / max_opt_time
assert (
rel_diff < 0.2
), f"Max time: {max_opt_time}, Actual time: {opt_time}, relative diff: {rel_diff}"
class TestCrossValidation(unittest.TestCase):
def test_cv_folds(self):
trainable_lr = LogisticRegression(n_jobs=1)
iris = load_iris()
from sklearn.model_selection import KFold
from lale.helpers import cross_val_score
cv_results = cross_val_score(trainable_lr, iris.data, iris.target, cv=KFold(2))
self.assertEqual(len(cv_results), 2)
def test_cv_scoring(self):
trainable_lr = LogisticRegression(n_jobs=1)
iris = load_iris()
from sklearn.metrics import confusion_matrix
from lale.helpers import cross_val_score
cv_results = cross_val_score(
trainable_lr, iris.data, iris.target, scoring=confusion_matrix
)
self.assertEqual(len(cv_results), 5)
def test_cv_folds_scikit(self):
trainable_lr = LogisticRegression(n_jobs=1)
iris = load_iris()
from sklearn.model_selection import KFold, cross_val_score
with warnings.catch_warnings():
warnings.simplefilter("ignore")
cv_results = cross_val_score(
trainable_lr,
iris.data,
iris.target,
cv=KFold(2),
scoring=make_scorer(accuracy_score),
)
self.assertEqual(len(cv_results), 2)
class TestHigherOrderOperators(unittest.TestCase):
def setUp(self):
data = load_iris()
X, y = data.data, data.target
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(X, y)
def test_ada_boost(self):
from lale.lib.sklearn import AdaBoostClassifier, DecisionTreeClassifier
clf = AdaBoostClassifier(base_estimator=DecisionTreeClassifier())
trained = clf.auto_configure(
self.X_train, self.y_train, optimizer=Hyperopt, max_evals=1
)
# Checking that the inner decision tree does not get the default value for min_samples_leaf, not sure if this will always pass
self.assertNotEqual(
trained.hyperparams()["base_estimator"].hyperparams()["min_samples_leaf"], 1
)
def test_ada_boost_pipe(self):
from lale.lib.sklearn import AdaBoostClassifier, DecisionTreeClassifier
clf = AdaBoostClassifier(base_estimator=NoOp >> DecisionTreeClassifier())
trained = clf.auto_configure(
self.X_train, self.y_train, optimizer=Hyperopt, max_evals=1
)
# Checking that the inner decision tree does not get the default value for min_samples_leaf, not sure if this will always pass
self.assertNotEqual(
trained.hyperparams()["base_estimator"]
.steps_list()[1]
.hyperparams()["min_samples_leaf"],
1,
)
def test_ada_boost1(self):
from sklearn.tree import DecisionTreeClassifier
from lale.lib.sklearn import AdaBoostClassifier
clf = AdaBoostClassifier(base_estimator=DecisionTreeClassifier())
clf.fit(self.X_train, self.y_train)
def test_ada_boost_regressor(self):
from lale.datasets.util import load_boston
X, y = load_boston(return_X_y=True)
X_train, _X_test, y_train, _y_test = train_test_split(X, y)
from lale.lib.sklearn import AdaBoostRegressor, DecisionTreeRegressor
reg = AdaBoostRegressor(base_estimator=DecisionTreeRegressor())
trained = reg.auto_configure(
X_train, y_train, optimizer=Hyperopt, max_evals=1, scoring="r2"
)
# Checking that the inner decision tree does not get the default value for min_samples_leaf, not sure if this will always pass
self.assertNotEqual(
trained.hyperparams()["base_estimator"].hyperparams()["min_samples_leaf"], 1
)
def test_ada_boost_regressor_pipe(self):
from lale.datasets.util import load_boston
X, y = load_boston(return_X_y=True)
X_train, _X_test, y_train, _y_test = train_test_split(X, y)
from lale.lib.sklearn import AdaBoostRegressor, DecisionTreeRegressor
reg = AdaBoostRegressor(base_estimator=NoOp >> DecisionTreeRegressor())
trained = reg.auto_configure(
X_train, y_train, optimizer=Hyperopt, max_evals=1, scoring="r2"
)
# Checking that the inner decision tree does not get the default value for min_samples_leaf, not sure if this will always pass
self.assertNotEqual(
trained.hyperparams()["base_estimator"]
.steps_list()[1]
.hyperparams()["min_samples_leaf"],
1,
)
class TestSelectKBestTransformer(unittest.TestCase):
def setUp(self):
data = load_iris()
X, y = data.data, data.target
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(X, y)
def test_hyperopt(self):
from lale.lib.sklearn import SelectKBest
planned = SelectKBest >> LogisticRegression
trained = planned.auto_configure(
self.X_train,
self.y_train,
cv=3,
optimizer=Hyperopt,
max_evals=3,
verbose=True,
)
_ = trained.predict(self.X_test)
class TestTopKVotingClassifier(unittest.TestCase):
def setUp(self):
data = load_iris()
X, y = data.data, data.target
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(X, y)
def test_fit_predict(self):
ensemble = TopKVotingClassifier(
estimator=(PCA() | Nystroem())
>> (LogisticRegression() | KNeighborsClassifier()),
args_to_optimizer={"max_evals": 3},
k=2,
)
trained = ensemble.fit(self.X_train, self.y_train)
trained.predict(self.X_test)
def test_fit_args(self):
ensemble = TopKVotingClassifier(
estimator=(PCA() | Nystroem())
>> (LogisticRegression() | KNeighborsClassifier()),
k=2,
)
trained = ensemble.fit(self.X_train, self.y_train)
trained.predict(self.X_test)
def test_fit_smaller_trials(self):
ensemble = TopKVotingClassifier(
estimator=(PCA() | Nystroem())
>> (LogisticRegression() | KNeighborsClassifier()),
args_to_optimizer={"max_evals": 3},
k=20,
)
trained = ensemble.fit(self.X_train, self.y_train)
final_ensemble = trained._impl._best_estimator
self.assertLessEqual(len(final_ensemble._impl_instance().estimators), 3)
def test_fit_default_args(self):
with self.assertRaises(ValueError):
_ = TopKVotingClassifier()
class TestKNeighborsClassifier(unittest.TestCase):
def setUp(self):
all_X, all_y = load_iris(return_X_y=True)
# 15 samples, small enough so folds are likely smaller than n_neighbors
self.train_X, self.test_X, self.train_y, self.test_y = train_test_split(
all_X, all_y, train_size=15, test_size=None, shuffle=True, random_state=42
)
def test_schema_validation(self):
trainable_16 = KNeighborsClassifier(n_neighbors=16)
with EnableSchemaValidation():
with self.assertRaises(jsonschema.ValidationError):
_ = trainable_16.fit(self.train_X, self.train_y)
trainable_15 = KNeighborsClassifier(n_neighbors=15)
trained_15 = trainable_15.fit(self.train_X, self.train_y)
_ = trained_15.predict(self.test_X)
def test_hyperopt(self):
planned = KNeighborsClassifier
trained = planned.auto_configure(
self.train_X,
self.train_y,
cv=3,
optimizer=Hyperopt,
max_evals=3,
verbose=True,
)
_ = trained.predict(self.test_X)
def test_gridsearch(self):
planned = KNeighborsClassifier
with warnings.catch_warnings():
warnings.simplefilter("ignore")
trained = planned.auto_configure(
self.train_X, self.train_y, optimizer=GridSearchCV, cv=3
)
_ = trained.predict(self.test_X)
def test_smac(self):
planned = KNeighborsClassifier
trained = planned.auto_configure(
self.train_X, self.train_y, cv=3, optimizer=SMAC, max_evals=3
)
_ = trained.predict(self.test_X)
class TestKNeighborsRegressor(unittest.TestCase):
def setUp(self):
from sklearn.datasets import load_diabetes
all_X, all_y = load_diabetes(return_X_y=True)
# 15 samples, small enough so folds are likely smaller than n_neighbors
self.train_X, self.test_X, self.train_y, self.test_y = train_test_split(
all_X, all_y, train_size=15, test_size=None, shuffle=True, random_state=42
)
def test_schema_validation(self):
trainable_16 = KNeighborsRegressor(n_neighbors=16)
with EnableSchemaValidation():
with self.assertRaises(jsonschema.ValidationError):
_ = trainable_16.fit(self.train_X, self.train_y)
trainable_15 = KNeighborsRegressor(n_neighbors=15)
trained_15 = trainable_15.fit(self.train_X, self.train_y)
_ = trained_15.predict(self.test_X)
def test_hyperopt(self):
planned = KNeighborsRegressor
trained = planned.auto_configure(
self.train_X,
self.train_y,
cv=3,
optimizer=Hyperopt,
max_evals=3,
verbose=True,
scoring="r2",
)
_ = trained.predict(self.test_X)
def test_gridsearch(self):
planned = KNeighborsRegressor
with warnings.catch_warnings():
warnings.simplefilter("ignore")
trained = planned.auto_configure(
self.train_X, self.train_y, optimizer=GridSearchCV, cv=3, scoring="r2"
)
_ = trained.predict(self.test_X)
def test_smac(self):
planned = KNeighborsRegressor
trained = planned.auto_configure(
self.train_X, self.train_y, cv=3, optimizer=SMAC, max_evals=3, scoring="r2"
)
_ = trained.predict(self.test_X)
class TestStandardScaler(unittest.TestCase):
def setUp(self):
import scipy.sparse
# from lale.datasets.data_schemas import add_schema
all_X, all_y = load_iris(return_X_y=True)
denseTrainX, self.test_X, self.train_y, self.test_y = train_test_split(
all_X, all_y, train_size=0.8, test_size=0.2, shuffle=True, random_state=42
)
# self.train_X = add_schema(scipy.sparse.csr_matrix(denseTrainX))
self.train_X = scipy.sparse.csr_matrix(denseTrainX)
def test_schema_validation(self):
trainable_okay = StandardScaler(with_mean=False) >> LogisticRegression()
_ = trainable_okay.fit(self.train_X, self.train_y)
trainable_bad = StandardScaler(with_mean=True) >> LogisticRegression()
with EnableSchemaValidation():
with self.assertRaises(jsonschema.ValidationError):
_ = trainable_bad.fit(self.train_X, self.train_y)
def test_hyperopt(self):
planned = StandardScaler >> LogisticRegression().freeze_trainable()
trained = planned.auto_configure(
self.train_X,
self.train_y,
cv=3,
optimizer=Hyperopt,
max_evals=3,
verbose=True,
scoring="r2",
)
_ = trained.predict(self.test_X)
def test_gridsearch(self):
planned = StandardScaler >> LogisticRegression().freeze_trainable()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
trained = planned.auto_configure(
self.train_X, self.train_y, optimizer=GridSearchCV, cv=3, scoring="r2"
)
_ = trained.predict(self.test_X)
class TestOptimizeLast(unittest.TestCase):
def setUp(self):
X, y = load_iris(return_X_y=True)
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(X, y)
def test_using_individual_operator(self):
lr = LogisticRegression() # Individual Operator
trained_operator = lr.fit(self.X_train, self.y_train)
# Now let's use Hyperopt to optimize the classifier
hyperopt_args = {"scoring": "accuracy", "cv": 3, "max_evals": 2}
opt_last = OptimizeLast(
estimator=trained_operator,
last_optimizer=Hyperopt,
optimizer_args=hyperopt_args,
)
res_last = opt_last.fit(self.X_train, self.y_train)
predictions = res_last.predict(self.X_test)
predictions_1 = opt_last.predict(self.X_test)
best_pipeline = res_last.get_pipeline()
self.assertEqual(type(trained_operator), type(best_pipeline))
assert np.array_equal(predictions_1, predictions)
def test_using_pipeline(self):
from sklearn.metrics import f1_score
planned_pipeline = (PCA | NoOp) >> LogisticRegression
# Let's first use Hyperopt to find the best pipeline
opt = Hyperopt(estimator=planned_pipeline, max_evals=1)
# run optimizer
res = opt.fit(self.X_train, self.y_train)
best_pipeline = res.get_pipeline()
# Now let's use Hyperopt to optimize only the
# last step (i.e., classifier) in the best pipeline
hyperopt_args = {
"scoring": make_scorer(f1_score, average="macro"),
"cv": 3,
"max_evals": 2,
}
opt_last = OptimizeLast(
estimator=best_pipeline,
last_optimizer=Hyperopt,
optimizer_args=hyperopt_args,
)
res_last = opt_last.fit(self.X_train, self.y_train)
predictions = res_last.predict(self.X_test)
predictions_1 = opt_last.predict(self.X_test)
best_pipeline2 = res_last.get_pipeline()
self.assertEqual(type(best_pipeline), type(best_pipeline2))
assert np.array_equal(predictions_1, predictions)
def test_get_named_pipeline(self):
pipeline = MinMaxScaler() >> KNeighborsClassifier()
trained_pipeline = pipeline.fit(self.X_train, self.y_train)
hyperopt_args = {"cv": 3, "max_evals": 2}
opt_last = OptimizeLast(
estimator=trained_pipeline,
last_optimizer=Hyperopt,
optimizer_args=hyperopt_args,
)
res_last = opt_last.fit(self.X_train, self.y_train)
pipeline2 = res_last.get_pipeline(pipeline_name="p1")
if pipeline2 is not None:
trained_pipeline2 = pipeline2.fit(self.X_train, self.y_train)
_ = trained_pipeline2.predict(self.X_test)
self.assertEqual(type(trained_pipeline), type(trained_pipeline2))
def test_unspecified_arguments(self):
opt = OptimizeLast(optimizer_args={"max_evals": 1}) # No arguments
res = opt.fit(self.X_train, self.y_train)
predictions = res.predict(self.X_test)
predictions_1 = opt.predict(self.X_test)
best_pipeline = res.get_pipeline()
assert np.array_equal(predictions_1, predictions)
self.assertEqual(type(best_pipeline), TrainedIndividualOp)
| 43,526 | 33.905373 | 134 |
py
|
lale
|
lale-master/test/test_sklearn_compat.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from typing import Any, Dict
from sklearn.base import clone
from lale.lib.lale import ConcatFeatures as Concat
from lale.operators import make_operator
class _MutatingOpImpl:
fit_counter: int
predict_counter: int
def __init__(self, k=0):
self.fit_counter = 0
self.predict_counter = 0
self.k = k
def fit(self, X, y=None):
assert self.fit_counter == 0
self.fit_counter = self.fit_counter + 1
return self
def predict(self, X, y=None):
assert self.predict_counter == 0
self.predict_counter = self.predict_counter + 1
return [[1] for x in X]
def get_params(self, deep: bool = True) -> Dict[str, Any]:
out: Dict[str, Any] = {}
out["k"] = self.k
return out
def set_params(self, **impl_params):
self.k = impl_params["k"]
return self
# def transform(self, X, y = None):
# return X, y
_input_schema_fit = {"$schema": "http://json-schema.org/draft-04/schema#"}
_input_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
}
_output_predict_schema = {"$schema": "http://json-schema.org/draft-04/schema#"}
_hyperparam_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"allOf": [
{
"description": "This first sub-object lists all constructor arguments with their "
"types, one at a time, omitting cross-argument constraints.",
"type": "object",
"additionalProperties": False,
"relevantToOptimizer": [],
"properties": {"k": {"type": "number"}},
}
],
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"type": "object",
"tags": {"pre": [], "op": ["estimator"], "post": []},
"properties": {
"hyperparams": _hyperparam_schema,
"input_fit": _input_schema_fit,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
MutatingOp = make_operator(_MutatingOpImpl, _combined_schemas)
def fit_clone_fit(op):
op1 = op
op1.fit(X=[1, 2], y=[1, 2])
op2 = clone(op1)
fit2 = op2.fit(X=[3, 4], y=[3, 4])
print(fit2)
class TestClone(unittest.TestCase):
def test_clone_clones_op(self):
op = MutatingOp(k=1)
fit_clone_fit(op)
def test_clone_clones_seq(self):
op = MutatingOp(k=1) >> MutatingOp(k=2)
fit_clone_fit(op)
def test_clone_clones_and(self):
op = MutatingOp(k=1) & MutatingOp(k=2)
fit_clone_fit(op)
def test_clone_clones_concat(self):
_ = ((MutatingOp(k=1) & MutatingOp(k=2))) >> Concat | MutatingOp(k=4)
def test_clone_clones_choice(self):
op = MutatingOp(k=1) | MutatingOp(k=2)
fit_clone_fit(op)
def test_clone_clones_complex(self):
op = (
(MutatingOp(k=1) | ((MutatingOp(k=2) & MutatingOp(k=3)) >> Concat))
>> MutatingOp(k=4)
) | MutatingOp(k=5)
fit_clone_fit(op)
| 3,697 | 28.349206 | 94 |
py
|
lale
|
lale-master/test/test_snapml.py
|
# Copyright 2020,2021 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import sklearn.datasets
import sklearn.metrics
class TestSnapMLClassifiers(unittest.TestCase):
def setUp(self):
from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import train_test_split
X, y = load_breast_cancer(return_X_y=True)
self.train_X, self.test_X, self.train_y, self.test_y = train_test_split(X, y)
def test_without_lale(self):
import snapml # type: ignore
clf = snapml.RandomForestClassifier()
self.assertIsInstance(clf, snapml.RandomForestClassifier)
fit_result = clf.fit(self.train_X, self.train_y)
self.assertIsInstance(fit_result, snapml.RandomForestClassifier)
for metric in [sklearn.metrics.accuracy_score, sklearn.metrics.roc_auc_score]:
scorer = sklearn.metrics.make_scorer(metric)
_ = scorer(clf, self.test_X, self.test_y)
def test_decision_tree_classifier(self):
import snapml
import lale.lib.snapml
for params in [{}, snapml.SnapDecisionTreeClassifier().get_params()]:
trainable = lale.lib.snapml.SnapDecisionTreeClassifier(**params)
trained = trainable.fit(self.train_X, self.train_y)
for metric in [
sklearn.metrics.accuracy_score,
sklearn.metrics.roc_auc_score,
]:
scorer = sklearn.metrics.make_scorer(metric)
_ = scorer(trained, self.test_X, self.test_y)
def test_random_forest_classifier(self):
import snapml
import lale.lib.snapml
for params in [{}, snapml.SnapRandomForestClassifier().get_params()]:
trainable = lale.lib.snapml.SnapRandomForestClassifier(**params)
trained = trainable.fit(self.train_X, self.train_y)
for metric in [
sklearn.metrics.accuracy_score,
sklearn.metrics.roc_auc_score,
]:
scorer = sklearn.metrics.make_scorer(metric)
_ = scorer(trained, self.test_X, self.test_y)
def test_boosting_machine_classifier(self):
import snapml
import lale.lib.snapml
for params in [{}, snapml.SnapBoostingMachineClassifier().get_params()]:
trainable = lale.lib.snapml.SnapBoostingMachineClassifier(**params)
trained = trainable.fit(self.train_X, self.train_y)
for metric in [
sklearn.metrics.accuracy_score,
sklearn.metrics.roc_auc_score,
]:
scorer = sklearn.metrics.make_scorer(metric)
_ = scorer(trained, self.test_X, self.test_y)
def test_logistic_regression(self):
import snapml
import lale.lib.snapml
for params in [{}, snapml.SnapLogisticRegression().get_params()]:
trainable = lale.lib.snapml.SnapLogisticRegression(**params)
trained = trainable.fit(self.train_X, self.train_y)
for metric in [
sklearn.metrics.accuracy_score,
sklearn.metrics.roc_auc_score,
]:
scorer = sklearn.metrics.make_scorer(metric)
_ = scorer(trained, self.test_X, self.test_y)
def test_support_vector_machine(self):
import snapml
import lale.lib.snapml
for params in [{}, snapml.SnapSVMClassifier().get_params()]:
trainable = lale.lib.snapml.SnapSVMClassifier(**params)
trained = trainable.fit(self.train_X, self.train_y)
for metric in [
sklearn.metrics.accuracy_score,
sklearn.metrics.roc_auc_score,
]:
scorer = sklearn.metrics.make_scorer(metric)
_ = scorer(trained, self.test_X, self.test_y)
def test_batched_tree_ensemble_classifier(self):
# import snapml
import lale.lib.snapml
# for params in [{}, snapml.BatchedTreeEnsembleClassifier().get_params()]:
for params in [{}]:
trainable = lale.lib.snapml.BatchedTreeEnsembleClassifier(**params)
trained = trainable.fit(self.train_X, self.train_y)
for metric in [
sklearn.metrics.accuracy_score,
sklearn.metrics.roc_auc_score,
]:
scorer = sklearn.metrics.make_scorer(metric)
_ = scorer(trained, self.test_X, self.test_y)
trainable = lale.lib.snapml.BatchedTreeEnsembleClassifier(**params)
trained = trainable.partial_fit(self.train_X, self.train_y, classes=[0, 1])
for metric in [
sklearn.metrics.accuracy_score,
sklearn.metrics.roc_auc_score,
]:
scorer = sklearn.metrics.make_scorer(metric)
_ = scorer(trained, self.test_X, self.test_y)
class TestSnapMLRegressors(unittest.TestCase):
def setUp(self):
from sklearn.datasets import load_diabetes
from sklearn.model_selection import train_test_split
X, y = load_diabetes(return_X_y=True)
self.train_X, self.test_X, self.train_y, self.test_y = train_test_split(X, y)
def test_decision_tree_regressor(self):
import snapml
import lale.lib.snapml
for params in [{}, snapml.SnapDecisionTreeRegressor().get_params()]:
trainable = lale.lib.snapml.SnapDecisionTreeRegressor(**params)
trained = trainable.fit(self.train_X, self.train_y)
scorer = sklearn.metrics.make_scorer(sklearn.metrics.r2_score)
_ = scorer(trained, self.test_X, self.test_y)
def test_linear_regression(self):
import snapml
import lale.lib.snapml
for params in [{}, snapml.LinearRegression().get_params()]:
trainable = lale.lib.snapml.SnapLinearRegression(**params)
trained = trainable.fit(self.train_X, self.train_y)
scorer = sklearn.metrics.make_scorer(sklearn.metrics.r2_score)
_ = scorer(trained, self.test_X, self.test_y)
def test_random_forest_regressor(self):
import snapml
import lale.lib.snapml
for params in [{}, snapml.SnapRandomForestRegressor().get_params()]:
trainable = lale.lib.snapml.SnapRandomForestRegressor(**params)
trained = trainable.fit(self.train_X, self.train_y)
scorer = sklearn.metrics.make_scorer(sklearn.metrics.r2_score)
_ = scorer(trained, self.test_X, self.test_y)
def test_boosting_machine_regressor(self):
import snapml
import lale.lib.snapml
for params in [{}, snapml.SnapBoostingMachineRegressor().get_params()]:
trainable = lale.lib.snapml.SnapBoostingMachineRegressor(**params)
trained = trainable.fit(self.train_X, self.train_y)
scorer = sklearn.metrics.make_scorer(sklearn.metrics.r2_score)
_ = scorer(trained, self.test_X, self.test_y)
| 7,525 | 38.403141 | 87 |
py
|
lale
|
lale-master/test/test_grammar.py
|
import unittest
import lale.datasets
from lale.grammar import Grammar
from lale.lib.lale import ConcatFeatures as Concat
from lale.lib.lale import Hyperopt, NoOp
from lale.lib.sklearn import PCA
from lale.lib.sklearn import AdaBoostClassifier as Boost
from lale.lib.sklearn import KNeighborsClassifier as KNN
from lale.lib.sklearn import LogisticRegression as LR
from lale.lib.sklearn import StandardScaler as Scaler
from lale.operators import PlannedOperator, PlannedPipeline, TrainedOperator
class TestGrammar(unittest.TestCase):
def setUp(self):
(
(self.train_X, self.train_y),
(self.test_X, self.test_y),
) = lale.datasets.load_iris_df()
def test_grammar_simple(self):
g = Grammar()
g.start = g.estimator
g.estimator = (NoOp | g.transformer) >> g.prim_est
g.transformer = (NoOp | g.transformer) >> g.prim_tfm
g.prim_est = LR | KNN
g.prim_tfm = PCA | Scaler
generated = g.unfold(6)
sample = g.sample(6)
# unfold and sample return a PlannedOperator
assert isinstance(generated, PlannedOperator)
assert isinstance(sample, PlannedOperator)
# test getter for methods other than Nonterminal
if isinstance(generated, PlannedPipeline):
assert generated._name.startswith("pipeline")
try:
gtrainer = Hyperopt(estimator=generated, max_evals=3, scoring="r2")
gtrained = gtrainer.fit(self.train_X, self.train_y)
assert isinstance(gtrained.get_pipeline(), TrainedOperator)
except ValueError:
# None of the trials succeeded
pass
try:
strainer = Hyperopt(estimator=sample, max_evals=3, scoring="r2")
strained = strainer.fit(self.train_X, self.train_y)
assert isinstance(strained.get_pipeline(), TrainedOperator)
except ValueError:
# None of the trials succeeded
pass
def test_grammar_all_combinator(self):
g = Grammar()
g.start = g.estimator
g.estimator = g.term_est | g.transformer >> g.term_est
g.term_est = g.prim_est | g.ensemble
g.ensemble = Boost(base_estimator=LR)
g.transformer = g.union_tfm | g.union_tfm >> g.transformer
g.union_tfm = g.prim_tfm | g.union_body >> Concat
g.union_body = g.transformer | g.transformer & g.union_body
g.prim_est = LR | KNN
g.prim_tfm = PCA | Scaler
g.ensembler = Boost
generated = g.unfold(7)
sample = g.sample(7)
assert isinstance(generated, PlannedOperator)
assert isinstance(sample, PlannedOperator)
# Train
try:
gtrainer = Hyperopt(estimator=generated, max_evals=3, scoring="r2")
gtrained = gtrainer.fit(self.train_X, self.train_y)
assert isinstance(gtrained.get_pipeline(), TrainedOperator)
except ValueError:
# None of the trials succeeded
pass
try:
strainer = Hyperopt(estimator=sample, max_evals=3, scoring="r2")
strained = strainer.fit(self.train_X, self.train_y)
assert isinstance(strained.get_pipeline(), TrainedOperator)
except ValueError:
# None of the trials succeeded
pass
| 3,332 | 34.457447 | 79 |
py
|
lale
|
lale-master/test/mock_module.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sklearn.neighbors
# class that follows scikit-learn conventions but lacks schemas,
# for the purpose of testing how to wrap an operator without schemas
class UnknownOp:
def __init__(self, n_neighbors=5, algorithm="auto"):
self._hyperparams = {"n_neighbors": n_neighbors, "algorithm": algorithm}
def get_params(self, deep: bool = False):
return self._hyperparams
def fit(self, X, y):
self._wrapped_model = sklearn.neighbors.KNeighborsClassifier(
**self._hyperparams
)
def predict(self, X, **predict_params):
return self._wrapped_model.predict(X, **predict_params)
class BadClassifier:
def __init__(self, is_good=False):
self._hyperparams = {"is_good": is_good}
def get_params(self, deep: bool = False):
return self._hyperparams
def fit(self, X, y):
assert False, "Bad fit method."
def predict(self, X):
assert False, "Bad predict method."
class CustomOrigOperator:
def __init__(self):
pass
def fit(self, X, y=None, **kwargs):
return self
def predict(self, X, **predict_params):
self._predict_params = predict_params
@classmethod
def _get_lale_operator(cls):
from .mock_custom_operators import CustomOrigOperator as wrapped_custom_operator
return wrapped_custom_operator
| 1,945 | 28.484848 | 88 |
py
|
lale
|
lale-master/test/test_nlp_operators.py
|
import unittest
import lale.type_checking
class TestTextEncoders(unittest.TestCase):
def setUp(self):
self.X_train = [
"Boston locates in the East Coast",
"Boston Celtics is part of the East conference of NBA",
"Cambridge is part of the Greater Boston Area",
"Manhattan is located in the lower part of NYC",
"People worked at New York city usually lives in New Jersey Area"
"The financial center in the world is New York",
]
self.y_train = [0, 0, 0, 1, 1, 1]
def create_function_test_encoder(encoder_name):
def test_encoder(self):
import importlib
module_name = ".".join(encoder_name.split(".")[0:-1])
class_name = encoder_name.split(".")[-1]
module = importlib.import_module(module_name)
class_ = getattr(module, class_name)
encoder = class_()
# test_schemas_are_schemas
lale.type_checking.validate_is_schema(encoder.input_schema_fit())
lale.type_checking.validate_is_schema(encoder.input_schema_transform())
lale.type_checking.validate_is_schema(encoder.output_schema_transform())
lale.type_checking.validate_is_schema(encoder.hyperparam_schema())
# test_init_fit_transform
trained = encoder.fit(self.X_train, self.y_train)
_ = trained.transform(self.X_train)
test_encoder.__name__ = f"test_{encoder_name.split('.')[-1]}"
return test_encoder
encoders = ["lale.lib.tensorflow.USEPretrainedEncoder"]
for encoder_to_test in encoders:
setattr(
TestTextEncoders,
f"test_{encoder_to_test.rsplit('.', maxsplit=1)[-1]}",
create_function_test_encoder(encoder_to_test),
)
| 1,733 | 32.346154 | 80 |
py
|
lale
|
lale-master/lale/operators.py
|
# Copyright 2019-2022 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes for Lale operators including individual operators, pipelines, and operator choice.
This module declares several functions for constructing individual
operators, pipelines, and operator choices.
- Functions `make_pipeline`_ and `Pipeline`_ compose linear sequential
pipelines, where each step has an edge to the next step. Instead of
these functions you can also use the `>>` combinator.
- Functions `make_union_no_concat`_ and `make_union`_ compose
pipelines that operate over the same data without edges between
their steps. Instead of these functions you can also use the `&`
combinator.
- Function `make_choice` creates an operator choice. Instead of this
function you can also use the `|` combinator.
- Function `make_pipeline_graph`_ creates a pipeline from
steps and edges, thus supporting any arbitrary acyclic directed
graph topology.
- Function `make_operator`_ creates an individual Lale operator from a
schema and an implementation class or object. This is called for each
of the operators in module lale.lib when it is being imported.
- Functions `get_available_operators`_, `get_available_estimators`_,
and `get_available_transformers`_ return lists of individual
operators previously registered by `make_operator`.
.. _make_operator: lale.operators.html#lale.operators.make_operator
.. _get_available_operators: lale.operators.html#lale.operators.get_available_operators
.. _get_available_estimators: lale.operators.html#lale.operators.get_available_estimators
.. _get_available_transformers: lale.operators.html#lale.operators.get_available_transformers
.. _make_pipeline_graph: lale.operators.html#lale.operators.make_pipeline_graph
.. _make_pipeline: lale.operators.html#lale.operators.make_pipeline
.. _Pipeline: Lale.Operators.Html#Lale.Operators.Pipeline
.. _make_union_no_concat: lale.operators.html#lale.operators.make_union_no_concat
.. _make_union: lale.operators.html#lale.operators.make_union
.. _make_choice: lale.operators.html#lale.operators.make_choice
The root of the hierarchy is the abstract class Operator_, all other
Lale operators inherit from this class, either directly or indirectly.
- The abstract classes Operator_, PlannedOperator_,
TrainableOperator_, and TrainedOperator_ correspond to lifecycle
states.
- The concrete classes IndividualOp_, PlannedIndividualOp_,
TrainableIndividualOp_, and TrainedIndividualOp_ inherit from the
corresponding abstract operator classes and encapsulate
implementations of individual operators from machine-learning
libraries such as scikit-learn.
- The concrete classes BasePipeline_, PlannedPipeline_,
TrainablePipeline_, and TrainedPipeline_ inherit from the
corresponding abstract operator classes and represent directed
acyclic graphs of operators. The steps of a pipeline can be any
operators, including individual operators, other pipelines, or
operator choices, whose lifecycle state is at least that of the
pipeline.
- The concrete class OperatorChoice_ represents a planned operator
that offers a choice for automated algorithm selection. The steps of
a choice can be any planned operators, including individual
operators, pipelines, or other operator choices.
The following picture illustrates the core operator class hierarchy.
.. image:: ../../docs/img/operator_classes.png
:alt: operators class hierarchy
.. _BasePipeline: lale.operators.html#lale.operators.BasePipeline
.. _IndividualOp: lale.operators.html#lale.operators.IndividualOp
.. _Operator: lale.operators.html#lale.operators.Operator
.. _OperatorChoice: lale.operators.html#lale.operators.OperatorChoice
.. _PlannedIndividualOp: lale.operators.html#lale.operators.PlannedIndividualOp
.. _PlannedOperator: lale.operators.html#lale.operators.PlannedOperator
.. _PlannedPipeline: lale.operators.html#lale.operators.PlannedPipeline
.. _TrainableIndividualOp: lale.operators.html#lale.operators.TrainableIndividualOp
.. _TrainableOperator: lale.operators.html#lale.operators.TrainableOperator
.. _TrainablePipeline: lale.operators.html#lale.operators.TrainablePipeline
.. _TrainedIndividualOp: lale.operators.html#lale.operators.TrainedIndividualOp
.. _TrainedOperator: lale.operators.html#lale.operators.TrainedOperator
.. _TrainedPipeline: lale.operators.html#lale.operators.TrainedPipeline
scikit-learn compatibility:
---------------------------
Lale operators attempt to behave like reasonable sckit-learn operators when possible.
In particular, operators support:
- get_params to return the hyperparameter settings for an operator.
- set_params for updating them (in-place). This is only supported by TrainableIndividualOps and Pipelines.
Note that while set_params is supported for
compatibility, but its use is not encouraged, since it mutates the operator in-place.
Instead, we recommend using with_params, a functional alternative that is supported by all
operators. It returns a new operator with updated parameters.
- sklearn.base.clone works for Lale operators, cloning them as expected.
Note that cloning a TrainedOperator will return a TrainableOperator, since
the cloned version does not have the result of training.
There also some known differences (that we are not currently planning on changing):
- Lale operators do not inherit from any sklearn base class.
- The Operator class constructors do not explicitly declare their set of hyperparameters.
However, the do implement get_params, (just not using sklearn style reflection).
There may also be other incompatibilities: our testing currently focuses on ensuring that clone works.
parameter path format:
^^^^^^^^^^^^^^^^^^^^^^
scikit-learn uses a simple addressing scheme to refer to nested hyperparameter: `name__param` refers to the
`param` hyperparameter nested under the `name` object.
Since lale supports richer structures, we conservatively extend this scheme as follows:
* `__` : separates nested components (as-in sklearn).
* `?` : is the discriminant (choice made) for a choice.
* `?` : is also a prefix for the nested parts of the chosen branch.
* `x@n` : In a pipeline, if multiple components have identical names,
everything but the first are suffixed with a number (starting with 1)
indicating which one we are talking about.
For example, given `(x >> y >> x)`, we would treat this much the same as
`(x >> y >> x@1)`.
* `$` : is used in the rare case that sklearn would expect the key of an object,
but we allow (and have) a non-object schema. In that case, $ is used as the key.
This should only happen at the top level, since nested occurrences should be removed.
* `#` : is a structure indicator, and the value should be one of 'list', 'tuple', or 'dict'.
* `n` : is used to represent the nth component in an array or tuple.
"""
import copy
import difflib
import enum as enumeration
import importlib
import inspect
import itertools
import logging
import os
import sys
import warnings
from abc import abstractmethod
from types import MappingProxyType
from typing import (
AbstractSet,
Any,
Dict,
Generic,
Iterable,
List,
Mapping,
Optional,
Set,
Text,
Tuple,
Type,
TypeVar,
Union,
cast,
overload,
)
import jsonschema
import pandas as pd
import sklearn
import sklearn.base
from packaging import version
from sklearn.base import clone
import lale.datasets.data_schemas
import lale.json_operator
import lale.pretty_print
from lale import schema2enums as enum_gen
from lale.datasets.data_schemas import (
NDArrayWithSchema,
_to_schema,
add_schema,
strip_schema,
)
from lale.helpers import (
append_batch,
are_hyperparameters_equal,
assignee_name,
astype_type,
fold_schema,
get_name_and_index,
is_empty_dict,
is_numeric_structure,
make_degen_indexed_name,
make_indexed_name,
nest_HPparams,
partition_sklearn_choice_params,
partition_sklearn_params,
structure_type_name,
to_graphviz,
val_wrapper,
)
from lale.json_operator import JSON_TYPE
from lale.schemas import Schema
from lale.search.PGO import remove_defaults_dict
from lale.type_checking import (
SubschemaError,
get_default_schema,
has_data_constraints,
is_subschema,
join_schemas,
replace_data_constraints,
validate_is_schema,
validate_method,
validate_schema,
validate_schema_directly,
)
from lale.util.VisitorMeta import AbstractVisitorMeta
sklearn_version = version.parse(getattr(sklearn, "__version__"))
if sys.version_info >= (3, 8):
from typing import Literal # raises a mypy error for <3.8
else:
from typing_extensions import Literal
try:
from sklearn.pipeline import ( # pylint:disable=ungrouped-imports
if_delegate_has_method,
)
except ImportError as imp_exc:
if sklearn_version >= version.Version("1.0"):
from sklearn.utils.metaestimators import if_delegate_has_method
else:
raise imp_exc
logger = logging.getLogger(__name__)
_LALE_SKL_PIPELINE = "lale.lib.sklearn.pipeline._PipelineImpl"
_combinators_docstrings = """
Methods
-------
step_1 >> step_2 -> PlannedPipeline
Pipe combinator, create two-step pipeline with edge from step_1 to step_2.
If step_1 is a pipeline, create edges from all of its sinks.
If step_2 is a pipeline, create edges to all of its sources.
Parameters
^^^^^^^^^^
step_1 : Operator
The origin of the edge(s).
step_2 : Operator
The destination of the edge(s).
Returns
^^^^^^^
BasePipeline
Pipeline with edge from step_1 to step_2.
step_1 & step_2 -> PlannedPipeline
And combinator, create two-step pipeline without an edge between step_1 and step_2.
Parameters
^^^^^^^^^^
step_1 : Operator
The first step.
step_2 : Operator
The second step.
Returns
^^^^^^^
BasePipeline
Pipeline without any additional edges beyond those already inside of step_1 or step_2.
step_1 | step_2 -> OperatorChoice
Or combinator, create operator choice between step_1 and step_2.
Parameters
^^^^^^^^^^
step_1 : Operator
The first step.
step_2 : Operator
The second step.
Returns
^^^^^^^
OperatorChoice
Algorithmic coice between step_1 or step_2."""
class Operator(metaclass=AbstractVisitorMeta):
"""Abstract base class for all Lale operators.
Pipelines and individual operators extend this."""
_name: str
def __and__(self, other: Union[Any, "Operator"]) -> "PlannedPipeline":
return make_union_no_concat(self, other)
def __rand__(self, other: Union[Any, "Operator"]) -> "PlannedPipeline":
return make_union_no_concat(other, self)
def __rshift__(self, other: Union[Any, "Operator"]) -> "PlannedPipeline":
return make_pipeline(self, other)
def __rrshift__(self, other: Union[Any, "Operator"]) -> "PlannedPipeline":
return make_pipeline(other, self)
def __or__(self, other: Union[Any, "Operator"]) -> "OperatorChoice":
return make_choice(self, other)
def __ror__(self, other: Union[Any, "Operator"]) -> "OperatorChoice":
return make_choice(other, self)
def name(self) -> str:
"""Get the name of this operator instance."""
return self._name
def _set_name(self, name: str):
"""Set the name of this operator instance."""
self._name = name
def class_name(self) -> str:
"""Fully qualified Python class name of this operator."""
cls = self.__class__
return cls.__module__ + "." + cls.__name__ # type: ignore
@abstractmethod
def validate_schema(self, X: Any, y: Any = None):
"""Validate that X and y are valid with respect to the input schema of this operator.
Parameters
----------
X :
Features.
y :
Target class labels or None for unsupervised operators.
Raises
------
ValueError
If X or y are invalid as inputs."""
pass
@abstractmethod
def transform_schema(self, s_X: JSON_TYPE) -> JSON_TYPE:
"""Return the output schema given the input schema.
Parameters
----------
s_X :
Input dataset or schema.
Returns
-------
JSON schema
Schema of the output data given the input data schema."""
pass
@abstractmethod
def input_schema_fit(self) -> JSON_TYPE:
"""Input schema for the fit method."""
pass
def to_json(self) -> JSON_TYPE:
"""Returns the JSON representation of the operator.
Returns
-------
JSON document
JSON representation that describes this operator and is valid with respect to lale.json_operator.SCHEMA.
"""
return lale.json_operator.to_json(self, call_depth=2)
def get_forwards(self) -> Union[bool, List[str]]:
"""Returns the list of attributes (methods/properties)
the schema has asked to be forwarded. A boolean value is a blanket
opt-in or out of forwarding
"""
return False
@abstractmethod
def get_params(self, deep: bool = True) -> Dict[str, Any]:
"""For scikit-learn compatibility"""
pass
def visualize(self, ipython_display: bool = True):
"""Visualize the operator using graphviz (use in a notebook).
Parameters
----------
ipython_display : bool, default True
If True, proactively ask Jupyter to render the graph.
Otherwise, the graph will only be rendered when visualize()
was called in the last statement in a notebook cell.
Returns
-------
Digraph
Digraph object from the graphviz package.
"""
return to_graphviz(self, ipython_display, call_depth=2)
@overload
def pretty_print(
self,
*,
show_imports: bool = True,
combinators: bool = True,
assign_nested: bool = True,
customize_schema: bool = False, # pylint:disable=redefined-outer-name
astype: astype_type = "lale",
ipython_display: Literal[False] = False,
) -> str:
...
@overload
def pretty_print(
self,
*,
show_imports: bool = True,
combinators: bool = True,
assign_nested: bool = True,
customize_schema: bool = False, # pylint:disable=redefined-outer-name
astype: astype_type = "lale",
ipython_display: Union[bool, Literal["input"]] = False,
) -> Optional[str]:
...
def pretty_print(
self,
*,
show_imports: bool = True,
combinators: bool = True,
assign_nested: bool = True,
customize_schema: bool = False, # pylint:disable=redefined-outer-name
astype: astype_type = "lale",
ipython_display: Union[bool, Literal["input"]] = False,
) -> Optional[str]:
"""Returns the Python source code representation of the operator.
Parameters
----------
show_imports : bool, default True
Whether to include import statements in the pretty-printed code.
combinators : bool, default True
If True, pretty-print with combinators (`>>`, `|`, `&`). Otherwise, pretty-print with functions (`make_pipeline`, `make_choice`, `make_union`) instead. Always False when astype is 'sklearn'.
assign_nested : bool, default True
If True, then nested operators, such as the base estimator for an ensemble, get assigned to fresh intermediate variables if configured with non-trivial arguments of their own.
customize_schema : bool, default False
If True, then individual operators whose schema differs from the lale.lib version of the operator will be printed with calls to `customize_schema` that reproduce this difference.
astype : union type, default 'lale'
- 'lale'
Use `lale.operators.make_pipeline` and `lale.operators.make_union` when pretty-printing wth functions.
- 'sklearn'
Set combinators to False and use `sklearn.pipeline.make_pipeline` and `sklearn.pipeline.make_union` for pretty-printed functions.
ipython_display : union type, default False
- False
Return the pretty-printed code as a plain old Python string.
- True:
Pretty-print in notebook cell output with syntax highlighting.
- 'input'
Create a new notebook cell with pretty-printed code as input.
Returns
-------
str or None
If called with ipython_display=False, return pretty-printed Python source code as a Python string.
"""
result = lale.pretty_print.to_string(
self,
show_imports=show_imports,
combinators=combinators,
customize_schema=customize_schema,
assign_nested=assign_nested,
astype=astype,
call_depth=2,
)
if ipython_display is False:
return result
elif ipython_display == "input":
import IPython.core
ipython = IPython.core.getipython.get_ipython()
comment = "# generated by pretty_print(ipython_display='input') from previous cell\n"
ipython.set_next_input(comment + result, replace=False)
return None
else:
assert ipython_display in [True, "output"]
import IPython.display
markdown = IPython.display.Markdown(f"```python\n{result}\n```")
IPython.display.display(markdown)
return None
@overload
def diff(
self,
other: "Operator",
show_imports: bool = True,
customize_schema: bool = False, # pylint:disable=redefined-outer-name
ipython_display: Literal[False] = False,
) -> str:
...
@overload
def diff(
self,
other: "Operator",
show_imports: bool = True,
customize_schema: bool = False, # pylint:disable=redefined-outer-name
ipython_display: bool = False,
) -> Optional[str]:
...
def diff(
self,
other: "Operator",
show_imports: bool = True,
customize_schema: bool = False, # pylint:disable=redefined-outer-name
ipython_display: bool = False,
) -> Optional[str]:
"""Displays a diff between this operator and the given other operator.
Parameters
----------
other: Operator
Operator to diff against
show_imports : bool, default True
Whether to include import statements in the pretty-printed code.
customize_schema : bool, default False
If True, then individual operators whose schema differs from the lale.lib version of the operator will be printed with calls to `customize_schema` that reproduce this difference.
ipython_display : bool, default False
If True, will display Markdown-formatted diff string in Jupyter notebook.
If False, returns pretty-printing diff as Python string.
Returns
-------
str or None
If called with ipython_display=False, return pretty-printed diff as a Python string.
"""
self_str = self.pretty_print(
customize_schema=customize_schema,
show_imports=show_imports,
ipython_display=False,
)
self_lines = self_str.splitlines()
other_str = other.pretty_print(
customize_schema=customize_schema,
show_imports=show_imports,
ipython_display=False,
)
other_lines = other_str.splitlines()
differ = difflib.Differ()
compare = differ.compare(self_lines, other_lines)
compare_str = "\n".join(compare)
if not ipython_display:
return compare_str
else:
import IPython.display
markdown = IPython.display.Markdown(f"```diff\n{compare_str}\n```")
IPython.display.display(markdown)
return None
@abstractmethod
def _has_same_impl(self, other: "Operator") -> bool:
"""Checks if the type of the operator implementations are compatible"""
pass
@abstractmethod
def is_supervised(self) -> bool:
"""Checks if this operator needs labeled data for learning.
Returns
-------
bool
True if the fit method requires a y argument.
"""
pass
@abstractmethod
def is_classifier(self) -> bool:
"""Checks if this operator is a clasifier.
Returns
-------
bool
True if the classifier tag is set.
"""
pass
def is_frozen_trainable(self) -> bool:
"""Return true if all hyperparameters are bound, in other words,
search spaces contain no free hyperparameters to be tuned.
"""
return False
def is_frozen_trained(self) -> bool:
"""Return true if all learnable coefficients are bound, in other
words, there are no free parameters to be learned by fit.
"""
return False
@property
def _final_individual_op(self) -> Optional["IndividualOp"]:
return None
@property
def _final_estimator(self) -> Any:
op: Optional[IndividualOp] = self._final_individual_op
model = None
if op is not None:
model = op.impl
return "passthrough" if model is None else model
@property
def classes_(self):
return self._final_estimator.classes_
@property
def n_classes_(self):
return self._final_estimator.n_classes_
@property
def _get_tags(self):
return self._final_estimator._get_tags
@property
def coef_(self):
return self._final_estimator.coef_
@property
def feature_importances_(self):
return self._final_estimator.feature_importances_
def get_param_ranges(self) -> Tuple[Dict[str, Any], Dict[str, Any]]:
"""Returns two dictionaries, ranges and cat_idx, for hyperparameters.
The ranges dictionary has two kinds of entries. Entries for
numeric and Boolean hyperparameters are tuples of the form
(min, max, default). Entries for categorical hyperparameters
are lists of their values.
The cat_idx dictionary has (min, max, default) entries of indices
into the corresponding list of values.
Warning: ignores side constraints and unions."""
op: Optional[IndividualOp] = self._final_individual_op
if op is None:
raise ValueError("This pipeline does not end with an individual operator")
return op.get_param_ranges()
def get_param_dist(self, size=10) -> Dict[str, List[Any]]:
"""Returns a dictionary for discretized hyperparameters.
Each entry is a list of values. For continuous hyperparameters,
it returns up to `size` uniformly distributed values.
Warning: ignores side constraints, unions, and distributions."""
op: Optional[IndividualOp] = self._final_individual_op
if op is None:
raise ValueError("This pipeline does not end with an individual operator")
return op.get_param_dist(size=size)
# should this be abstract? what do we do for grammars?
def get_defaults(self) -> Mapping[str, Any]:
return {}
def clone(self) -> "Operator":
"""Return a copy of this operator, with the same hyper-parameters but without training data
This behaves the same as calling sklearn.base.clone(self)
"""
cp = clone(self)
return cp
def replace(
self, original_op: "Operator", replacement_op: "Operator"
) -> "Operator":
"""Replaces an original operator with a replacement operator for the given operator.
Replacement also occurs for all operators within the given operator's steps (i.e. pipelines and
choices). If a planned operator is given as original_op, all derived operators (including
trainable and trained versions) will be replaced. Otherwise, only the exact operator
instance will be replaced.
Parameters
----------
original_op :
Operator to replace within given operator. If operator is a planned operator,
all derived operators (including trainable and trained versions) will be
replaced. Otherwise, only the exact operator instance will be replaced.
replacement_op :
Operator to replace the original with.
Returns
-------
modified_operator :
Modified operator where original operator is replaced with replacement throughout.
"""
def _check_match(subject, original_op):
if (
not isinstance(original_op, TrainableOperator)
and isinstance(subject, IndividualOp)
and isinstance(original_op, IndividualOp)
):
# is planned operator, so replace any matching downstream operator
if isinstance(subject, original_op): # type: ignore
return True
else:
# is trainable or trained operator, only check exact instance match
if subject == original_op:
return True
return False
@overload
def _replace(
subject: "Operator", original_op: "Operator", replacement_op: "Operator"
) -> "Operator":
...
@overload
def _replace(
subject: list, original_op: "Operator", replacement_op: "Operator"
) -> list:
...
@overload
def _replace(
subject: dict, original_op: "Operator", replacement_op: "Operator"
) -> dict:
...
def _replace(subject, original_op: "Operator", replacement_op: "Operator"):
# if operator has steps, recursively iterate through steps and recombine
if hasattr(subject, "steps"):
# special case if original_op has steps, check if it matches subject first
if hasattr(original_op, "steps"):
if _check_match(subject, original_op):
return replacement_op
new_steps: List[Operator] = []
if isinstance(subject, BasePipeline):
# first convert pipeline edges to index-based representation
index_edges = []
for edge in subject.edges():
index_edges.append(
(
subject.steps_list().index(edge[0]),
subject.steps_list().index(edge[1]),
)
)
for step in subject.steps_list():
new_steps.append(_replace(step, original_op, replacement_op))
# use previous index-based representation to reconstruct edges
new_edges: List[Tuple[Operator, Operator]] = []
for index_tuple in index_edges:
new_edges.append(
(new_steps[index_tuple[0]], new_steps[index_tuple[1]])
)
return make_pipeline_graph(new_steps, new_edges)
elif isinstance(subject, OperatorChoice):
for step in subject.steps_list():
new_steps.append(_replace(step, original_op, replacement_op))
return make_choice(*new_steps)
else:
raise NotImplementedError(
"replace() needs to implement recombining this operator with steps"
)
else:
# base case for recursion: operator with no steps, returns replacement if applicable, original otherwise
if _check_match(subject, original_op):
return replacement_op
# special case of subject being in a collection
if isinstance(subject, list):
return [_replace(s, original_op, replacement_op) for s in subject]
elif isinstance(subject, tuple):
return tuple(
_replace(s, original_op, replacement_op) for s in subject
)
elif isinstance(subject, dict):
return {
k: _replace(v, original_op, replacement_op)
for k, v in subject.items()
}
# special case of hyperparams containing operators, usually referring to an estimator
if hasattr(subject, "hyperparams") and subject.hyperparams():
modified_hyperparams = subject.hyperparams().copy()
for hyperparam, param_value in modified_hyperparams.items():
modified_hyperparams[hyperparam] = _replace(
param_value, original_op, replacement_op
)
return subject(**modified_hyperparams)
return subject
return _replace(self, original_op, replacement_op) # type: ignore
def with_params(self, **impl_params) -> "Operator":
"""This implements a functional version of set_params
which returns a new operator instead of modifying the original
"""
return self._with_params(False, **impl_params)
@abstractmethod
def _with_params(self, try_mutate: bool, **impl_params) -> "Operator":
"""
This method updates the parameters of the operator.
If try_mutate is set, it will attempt to update the operator in place,
although this may not always be possible
"""
pass
def to_lale(self):
"""This is a deprecated method for backward compatibility and will be removed soon"""
warnings.warn(
"Operator.to_lale exists for backwards compatibility with make_sklearn_compat and will be removed soon",
DeprecationWarning,
)
return self
def __getattr__(self, name: str) -> Any:
if name == "_cached_masked_attr_list":
raise AttributeError()
predict_methods = [
"get_pipeline",
"summary",
"transform",
"predict",
"predict_proba",
"decision_function",
"score",
"score_samples",
"predict_log_proba",
]
if name in predict_methods:
if isinstance(self, TrainedIndividualOp) or (
isinstance(self, TrainableIndividualOp) and hasattr(self, "_trained")
):
raise AttributeError(
f"The underlying operator implementation class does not define {name}"
)
if isinstance(self, TrainableIndividualOp) and not hasattr(
self, "_trained"
):
raise AttributeError(
f"{self.name()} is not trained. Note that in lale, the result of fit is a new trained operator that should be used with {name}."
)
if isinstance(self, PlannedOperator) and not isinstance(
self, TrainableOperator
):
pass # as the plannedOperators are handled in a separate block next
else:
raise AttributeError(
f"Calling {name} on a {type(self)} is deprecated. It needs to be trained by calling fit. Note that in lale, the result of fit is a new TrainedOperator that should be used with {name}."
)
if name == "fit" or name in predict_methods:
def get_error_msg(op, i):
if isinstance(op, OperatorChoice):
error_msg = f"""[A.{i}] Please remove the operator choice `|` from `{op.name()}` and keep only one of those operators.\n"""
elif isinstance(op, PlannedIndividualOp) and not isinstance(
op, TrainableIndividualOp
):
error_msg = f"[A.{i}] Please use `{op.name()}()` instead of `{op.name()}.`\n"
else:
return ""
return error_msg
def add_error_msg_for_predict_methods(op, error_msg):
if name in [
"get_pipeline",
"summary",
"transform",
"predict",
"predict_proba",
"decision_function",
"score",
"score_samples",
"predict_log_proba",
]:
error_msg = (
error_msg
+ """\nAfter applying the suggested fixes the operator might need to be trained by calling fit ."""
)
return error_msg
# This method is called only when `name` is not found on the object, so
# we don't need to account for the case when self is trainable or trained.
if isinstance(self, PlannedIndividualOp):
error_msg = f"""Please use `{self.name()}()` instead of `{self.name()}` to make it trainable.
Alternatively, you could use `auto_configure(X, y, Hyperopt, max_evals=5)` on the operator to use Hyperopt for
`max_evals` iterations for hyperparameter tuning. `Hyperopt` can be imported as `from lale.lib.lale import Hyperopt`."""
error_msg = add_error_msg_for_predict_methods(self, error_msg)
raise AttributeError(error_msg)
if isinstance(self, (PlannedPipeline, OperatorChoice)):
error_msg = f"""The pipeline is not trainable, which means you can not call {name} on it.\n
Suggested fixes:\nFix [A]: You can make the following changes in the pipeline in order to make it trainable:\n"""
i = 1
if isinstance(self, PlannedPipeline):
for step in self.steps_list():
step_err = get_error_msg(step, i)
if step_err != "":
error_msg = error_msg + step_err
i += 1
elif isinstance(self, OperatorChoice):
error_msg = error_msg + get_error_msg(self, i)
error_msg = (
error_msg
+ """\nFix [B]: Alternatively, you could use `auto_configure(X, y, Hyperopt, max_evals=5)` on the pipeline
to use Hyperopt for `max_evals` iterations for hyperparameter tuning. `Hyperopt` can be imported as `from lale.lib.lale import Hyperopt`."""
)
error_msg = add_error_msg_for_predict_methods(self, error_msg)
raise AttributeError(error_msg)
forwards = self.get_forwards()
if (
forwards is True
or (
name.endswith("_")
and not (name.startswith("__") and name.endswith("__"))
)
or (isinstance(forwards, list) and name in forwards)
):
# we should try forwarding it.
# first, a sanity check to prevent confusing behaviour where
# forwarding works on a plannedoperator and then fails on a trainedoperator
trained_ops = self._get_masked_attr_list()
if name not in trained_ops:
# ok, let us try to forward it
# first we try the "shallow" wrapper,
# and then we try each successive wrapped model
model = self.shallow_impl
while model is not None:
if hasattr(model, name):
return getattr(model, name)
old_model = model
model = getattr(model, "_wrapped_model", None)
if model is old_model:
model = None
raise AttributeError(f"Attribute {name} not found for {self}")
Operator.__doc__ = cast(str, Operator.__doc__) + "\n" + _combinators_docstrings
class PlannedOperator(Operator):
"""Abstract class for Lale operators in the planned lifecycle state."""
# pylint:disable=abstract-method
def auto_configure(
self,
X: Any,
y: Any = None,
optimizer: "Optional[PlannedIndividualOp]" = None,
cv: Any = None,
scoring: Any = None,
**kwargs,
) -> "TrainedOperator":
"""
Perform combined algorithm selection and hyperparameter tuning on this planned operator.
Parameters
----------
X:
Features that conform to the X property of input_schema_fit.
y: optional
Labels that conform to the y property of input_schema_fit.
Default is None.
optimizer:
lale.lib.lale.Hyperopt or lale.lib.lale.GridSearchCV
default is None.
cv:
cross-validation option that is valid for the optimizer.
Default is None, which will use the optimizer's default value.
scoring:
scoring option that is valid for the optimizer.
Default is None, which will use the optimizer's default value.
kwargs:
Other keyword arguments to be passed to the optimizer.
Returns
-------
TrainableOperator
Best operator discovered by the optimizer.
Raises
------
ValueError
If an invalid optimizer is provided
"""
if optimizer is None:
raise ValueError("Please provide a valid optimizer for auto_configure.")
if kwargs is None:
kwargs = {}
if cv is not None:
kwargs["cv"] = cv
if scoring is not None:
kwargs["scoring"] = scoring
optimizer_obj = optimizer(estimator=self, **kwargs)
trained = optimizer_obj.fit(X, y)
ret_pipeline = trained.get_pipeline()
assert ret_pipeline is not None
return ret_pipeline
PlannedOperator.__doc__ = (
cast(str, PlannedOperator.__doc__) + "\n" + _combinators_docstrings
)
class TrainableOperator(PlannedOperator):
"""Abstract class for Lale operators in the trainable lifecycle state."""
@overload
def __and__(self, other: "TrainedOperator") -> "TrainablePipeline":
...
@overload
def __and__(self, other: "TrainableOperator") -> "TrainablePipeline":
...
@overload
def __and__(self, other: Union[Any, "Operator"]) -> "PlannedPipeline":
...
def __and__(self, other): # type: ignore
return make_union_no_concat(self, other)
@overload
def __rshift__(self, other: "TrainedOperator") -> "TrainablePipeline":
...
@overload
def __rshift__(self, other: "TrainableOperator") -> "TrainablePipeline":
...
@overload
def __rshift__(self, other: Union[Any, "Operator"]) -> "PlannedPipeline":
...
def __rshift__(self, other): # type: ignore
return make_pipeline(self, other)
@abstractmethod
def fit(self, X: Any, y: Any = None, **fit_params) -> "TrainedOperator":
"""Train the learnable coefficients of this operator, if any.
Return a trained version of this operator. If this operator
has free learnable coefficients, bind them to values that fit
the data according to the operator's algorithm. Do nothing if
the operator implementation lacks a `fit` method or if the
operator has been marked as `is_frozen_trained`.
Parameters
----------
X:
Features that conform to the X property of input_schema_fit.
y: optional
Labels that conform to the y property of input_schema_fit.
Default is None.
fit_params: Dictionary, optional
A dictionary of keyword parameters to be used during training.
Returns
-------
TrainedOperator
A new copy of this operators that is the same except that its
learnable coefficients are bound to their trained values.
"""
pass
def fit_transform(self, X: Any, y: Any = None, **fit_params):
"""
Fit to data, then transform it.
Fits transformer to `X` and `y` with optional parameters `fit_params`
and returns a transformed version of `X`.
Parameters
----------
X:
Features that conform to the X property of input_schema_fit.
y: optional
Labels that conform to the y property of input_schema_fit.
Default is None.
fit_params: Dictionary, optional
A dictionary of keyword parameters to be used during training.
Returns
-------
result :
Transformed features; see output_transform schema of the operator.
"""
return self.fit(X, y, **fit_params).transform(X)
@abstractmethod
def freeze_trainable(self) -> "TrainableOperator":
"""Return a copy of the trainable parts of this operator that is the same except
that all hyperparameters are bound and none are free to be tuned.
If there is an operator choice, it is kept as is.
"""
pass
@abstractmethod
def is_transformer(self) -> bool:
"""Checks if the operator is a transformer"""
pass
TrainableOperator.__doc__ = (
cast(str, TrainableOperator.__doc__) + "\n" + _combinators_docstrings
)
class TrainedOperator(TrainableOperator):
"""Abstract class for Lale operators in the trained lifecycle state."""
@overload
def __and__(self, other: "TrainedOperator") -> "TrainedPipeline":
...
@overload
def __and__(self, other: "TrainableOperator") -> "TrainablePipeline":
...
@overload
def __and__(self, other: Union[Any, "Operator"]) -> "PlannedPipeline":
...
def __and__(self, other): # type: ignore
return make_union_no_concat(self, other)
@overload
def __rshift__(self, other: "TrainedOperator") -> "TrainedPipeline":
...
@overload
def __rshift__(self, other: "TrainableOperator") -> "TrainablePipeline":
...
@overload
def __rshift__(self, other: Union[Any, "Operator"]) -> "PlannedPipeline":
...
def __rshift__(self, other): # type: ignore
return make_pipeline(self, other)
@abstractmethod
def transform(self, X: Any, y: Any = None) -> Any:
"""Transform the data.
Parameters
----------
X :
Features; see input_transform schema of the operator.
y : None
Returns
-------
result :
Transformed features; see output_transform schema of the operator.
"""
pass
@abstractmethod
def _predict(self, X: Any) -> Any:
pass
@abstractmethod
def predict(self, X: Any, **predict_params) -> Any:
"""Make predictions.
Parameters
----------
X :
Features; see input_predict schema of the operator.
predict_params:
Additional parameters that should be passed to the predict method
Returns
-------
result :
Predictions; see output_predict schema of the operator.
"""
pass
@abstractmethod
def predict_proba(self, X: Any):
"""Probability estimates for all classes.
Parameters
----------
X :
Features; see input_predict_proba schema of the operator.
Returns
-------
result :
Probabilities; see output_predict_proba schema of the operator.
"""
pass
@abstractmethod
def decision_function(self, X: Any):
"""Confidence scores for all classes.
Parameters
----------
X :
Features; see input_decision_function schema of the operator.
Returns
-------
result :
Confidences; see output_decision_function schema of the operator.
"""
pass
@abstractmethod
def score_samples(self, X: Any):
"""Scores for each sample in X. The type of scores depends on the operator.
Parameters
----------
X :
Features.
Returns
-------
result :
scores per sample.
"""
pass
@abstractmethod
def score(self, X: Any, y: Any, **score_params):
"""Performance evaluation with a default metric.
Parameters
----------
X :
Features.
y:
Ground truth labels.
score_params:
Any additional parameters expected by the score function of
the underlying operator.
Returns
-------
score :
performance metric value
"""
pass
@abstractmethod
def predict_log_proba(self, X: Any):
"""Predicted class log-probabilities for X.
Parameters
----------
X :
Features.
Returns
-------
result :
Class log probabilities.
"""
pass
@abstractmethod
def freeze_trained(self) -> "TrainedOperator":
"""Return a copy of this trainable operator that is the same except
that all learnable coefficients are bound and thus fit is a no-op.
"""
pass
TrainedOperator.__doc__ = (
cast(str, TrainedOperator.__doc__) + "\n" + _combinators_docstrings
)
_schema_derived_attributes = ["_enum_attributes", "_hyperparam_defaults"]
class _DictionaryObjectForEnum:
_d: Dict[str, enumeration.Enum]
def __init__(self, d: Dict[str, enumeration.Enum]):
self._d = d
def __contains__(self, key: str) -> bool:
return key in self._d
# This method in fact always return an enumeration
# however, the values of the enumeration are not known, which causes
# the type checker to complain about a common (and desired) idiom
# such as, e.g. LogisticRegression.enum.solver.saga
# so we weaken the type to Any for pragmatic reasons
def __getattr__(self, key: str) -> Any: # enumeration.Enum:
if key in self._d:
return self._d[key]
else:
raise AttributeError("No enumeration found for hyper-parameter: " + key)
# This method in fact always return an enumeration
# however, the values of the enumeration are not known, which causes
# the type checker to complain about a common (and desired) idiom
# such as, e.g. LogisticRegression.enum.solver.saga
# so we weaken the type to Any for pragmatic reasons
def __getitem__(self, key: str) -> Any: # enumeration.Enum:
if key in self._d:
return self._d[key]
else:
raise KeyError("No enumeration found for hyper-parameter: " + key)
class _WithoutGetParams:
"""This is a wrapper class whose job is to *NOT* have a get_params method,
causing sklearn clone to call deepcopy on it (and its contents).
This is currently used, for example, to wrap the impl class instance
returned by an individual operator's get_params (since the class itself may have
a get_params method defined, causing problems if this wrapper is not used).
"""
@classmethod
def unwrap(cls, obj):
while isinstance(obj, _WithoutGetParams):
obj = obj.klass
return obj
@classmethod
def wrap(cls, obj):
if isinstance(obj, _WithoutGetParams):
return obj
else:
return _WithoutGetParams(obj)
klass: type
def __init__(self, klass: type):
self.klass = klass
class IndividualOp(Operator):
"""
This is a concrete class that can instantiate a new individual
operator and provide access to its metadata.
The enum property can be used to access enumerations for hyper-parameters,
auto-generated from the operator's schema.
For example, `LinearRegression.enum.solver.saga`
As a short-hand, if the hyper-parameter name does not conflict with
any fields of this class, the auto-generated enums can also be accessed
directly.
For example, `LinearRegression.solver.saga`"""
_impl: Any
_impl_class_: Union[type, _WithoutGetParams]
_hyperparams: Optional[Dict[str, Any]]
_frozen_hyperparams: Optional[List[str]]
# this attribute may not be defined
_hyperparam_defaults: Mapping[str, Any]
def __init__(
self,
_lale_name: str,
_lale_impl,
_lale_schemas,
_lale_frozen_hyperparameters=None,
**hp,
) -> None:
"""Create a new IndividualOp.
Parameters
----------
name : String
Name of the operator.
impl :
An instance of operator implementation class. This is a class that
contains fit, predict/transform methods implementing an underlying
algorithm.
schemas : dict
This is a dictionary of json schemas for the operator.
"""
self._name = _lale_name
self._enum_attributes = None
if _lale_schemas:
self._schemas = _lale_schemas
else:
self._schemas = get_default_schema(_lale_impl)
# if we are given a class instance, we need to preserve it
# so that get_params can return the same exact one that we got
# this is important for scikit-learn's clone to work correctly
unwrapped: Any = _WithoutGetParams.unwrap(_lale_impl)
self._impl = unwrapped
if inspect.isclass(unwrapped):
self._impl_class_ = _lale_impl
else:
self._impl_class_ = unwrapped.__class__
self._frozen_hyperparams = _lale_frozen_hyperparameters
self._hyperparams = hp
def _is_instantiated(self):
return not inspect.isclass(self._impl)
def _get_masked_attr_list(self):
prev_cached_value = getattr(self, "_cached_masked_attr_list", None)
if prev_cached_value is not None:
return prev_cached_value
found_ops = [
"get_pipeline",
"summary",
"transform",
"predict",
"predict_proba",
"decision_function",
"score",
"score_samples",
"predict_log_proba",
"_schemas",
"_impl",
"_impl_class",
"_hyperparams",
"_frozen_hyperparams",
"_trained",
"_enum_attributes",
"_cached_masked_attr_list",
]
found_ops.extend(dir(TrainedIndividualOp))
found_ops.extend(dir(self))
self._cached_masked_attr_list = found_ops
return found_ops
def _check_schemas(self):
from lale.settings import disable_hyperparams_schema_validation
if disable_hyperparams_schema_validation:
return
validate_is_schema(self._schemas)
from lale.pretty_print import json_to_string
assert (
self.has_tag("transformer") == self.is_transformer()
), f"{self.class_name()}: {json_to_string(self._schemas)}"
assert self.has_tag("estimator") == self.has_method(
"predict"
), f"{self.class_name()}: {json_to_string(self._schemas)}"
if self.has_tag("classifier") or self.has_tag("regressor"):
assert self.has_tag(
"estimator"
), f"{self.class_name()}: {json_to_string(self._schemas)}"
forwards = self.get_forwards()
# if it is a boolean, there is nothing to check
if isinstance(forwards, list):
trained_ops = self._get_masked_attr_list()
for f in forwards:
assert (
f not in trained_ops
), f"""This operator specified the {f} attribute to be forwarded.
Unfortunately, this method is also provided for some lale operator wrapper classes, so this
is invalid.
It is possible that this method/property is new to lale, and an older version of lale supported
forwarding this method/property, however, to be compatible with this version of lale, the attribute needs
to be removed from the forwards list, and code that calls this method/property (on an object op)
need to be changed from op.{f} to op.impl.{f}
"""
# Add enums from the hyperparameter schema to the object as fields
# so that their usage looks like LogisticRegression.penalty.l1
# enum_gen.addSchemaEnumsAsFields(self, self.hyperparam_schema())
_enum_attributes: Optional[_DictionaryObjectForEnum]
@classmethod
def _add_nested_params(cls, output: Dict[str, Any], k: str, v: Any):
nested_params = cls._get_nested_params(v)
if nested_params:
output.update(nest_HPparams(k, nested_params))
@classmethod
def _get_nested_params(cls, v: Any) -> Optional[Dict[str, Any]]:
# TODO: design question. This seems like the right thing,
# but sklearn does not currently do this, as is apparent with,
# e.g VotingClassifier
# if isinstance(v, list) or isinstance(v, tuple):
# output: Dict[str, Any] = {}
# for i, elem in enumerate(v):
# nested = cls._get_nested_params(elem)
# if nested:
# output.update(nest_HPparams(str(i)), nested)
# return output
# elif isinstance(v, dict):
# output: Dict[str, Any] = {}
# for sub_k, sub_v in v.items():
# nested = cls._get_nested_params(sub_v)
# if nested:
# output.update(nest_HPparams(sub_k), nested)
# return output
# else:
try:
return v.get_params(deep=True)
except AttributeError:
return None
def _get_params_all(self, deep: bool = False) -> Dict[str, Any]:
output: Dict[str, Any] = {}
hps = self.hyperparams_all()
if hps is not None:
output.update(hps)
defaults = self.get_defaults()
for k in defaults.keys():
if k not in output:
output[k] = defaults[k]
if deep:
deep_stuff: Dict[str, Any] = {}
for k, v in output.items():
self._add_nested_params(deep_stuff, k, v)
output.update(deep_stuff)
return output
def get_params(self, deep: Union[bool, Literal[0]] = True) -> Dict[str, Any]:
"""Get parameters for this operator.
This method follows scikit-learn's convention that all operators
have a constructor which takes a list of keyword arguments.
This is not required for operator impls which do not desire
scikit-compatibility.
Parameters
----------
deep : boolean, optional
If True, will return the parameters for this operator, and their nested parameters
If False, will return the parameters for this operator, along with '_lale_XXX` fields needed to support cloning
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
out: Dict[str, Any] = {}
if deep is False:
out["_lale_name"] = self._name
out["_lale_schemas"] = self._schemas
out["_lale_impl"] = _WithoutGetParams.wrap(self._wrapped_impl_class())
# we need to stringify the class object, since the class object
# has a get_params method (the instance method), which causes problems for
# sklearn clone
if self._is_instantiated():
impl = self._impl_instance()
if hasattr(impl, "get_params"):
out.update(impl.get_params(deep=deep))
elif hasattr(impl, "_wrapped_model") and hasattr(
impl._wrapped_model, "get_params"
):
out.update(impl._wrapped_model.get_params(deep=bool(deep)))
else:
out.update(self._get_params_all(deep=bool(deep)))
else:
out.update(self._get_params_all(deep=bool(deep)))
if deep is False and self.frozen_hyperparams() is not None:
out["_lale_frozen_hyperparameters"] = self.frozen_hyperparams()
return out
def _with_params(self, try_mutate: bool, **impl_params) -> "IndividualOp":
main_params, partitioned_sub_params = partition_sklearn_params(impl_params)
hyper = self.hyperparams()
# we set the sub params first
for sub_key, sub_params in partitioned_sub_params.items():
with_structured_params(try_mutate, sub_key, sub_params, hyper)
# we have now updated any nested operators
# (if this is a higher order operator)
# and can work on the main operator
all_params = {**hyper, **main_params}
filtered_impl_params = _fixup_hyperparams_dict(all_params)
# These are used by lale. Since they are returned by get_param
# they may show up here (if the user calls get_param, changes
# a values, and then calls set_param), so we remove them here
filtered_impl_params.pop("_lale_name", None)
filtered_impl_params.pop("_lale_impl", None)
filtered_impl_params.pop("_lale_schemas", None)
filtered_impl_params.pop("_lale_frozen_hyperparameters", None)
return self._with_op_params(try_mutate, **filtered_impl_params)
def _with_op_params(
self, try_mutate: bool, **impl_params
) -> "TrainableIndividualOp":
# for an individual (and planned individual) operator,
# we don't mutate the operator itself even if try_mutate is True
res = self._configure(**impl_params)
return res
# we have different views on the hyperparameters
def hyperparams_all(self) -> Optional[Dict[str, Any]]:
"""This is the hyperparameters that are currently set.
Some of them may not have been set explicitly
(e.g. if this is a clone of an operator,
some of these may be defaults.
To get the hyperparameters that were actually set,
use :meth:`hyperparams`
"""
return getattr(self, "_hyperparams", None)
def frozen_hyperparams(self) -> Optional[List[str]]:
return self._frozen_hyperparams
def _hyperparams_helper(self) -> Optional[Dict[str, Any]]:
actuals = self.hyperparams_all()
if actuals is None:
return None
frozen_params = self.frozen_hyperparams()
if frozen_params is None:
return None
params = {k: actuals[k] for k in frozen_params}
return params
def hyperparams(self) -> Dict[str, Any]:
params = self._hyperparams_helper()
if params is None:
return {}
else:
return params
def reduced_hyperparams(self):
actuals = self._hyperparams_helper()
if actuals is None:
return None
defaults = self.get_defaults()
actuals_minus_defaults = {
k: actuals[k]
for k in actuals
if k not in defaults
or not are_hyperparameters_equal(actuals[k], defaults[k])
}
if not hasattr(self, "_hyperparam_positionals"):
sig = inspect.signature(self._impl_class().__init__)
positionals = {
name: defaults[name]
for name, param in sig.parameters.items()
if name != "self"
and param.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD
and param.default == inspect.Parameter.empty
}
self._hyperparam_positionals = positionals
result = {**self._hyperparam_positionals, **actuals_minus_defaults}
return result
def _configure(self, *args, **kwargs) -> "TrainableIndividualOp":
class_ = self._impl_class()
hyperparams = {}
for arg in args:
k, v = self._enum_to_strings(arg)
hyperparams[k] = v
for k, v in _fixup_hyperparams_dict(kwargs).items():
if k in hyperparams:
raise ValueError(f"Duplicate argument {k}.")
v = val_wrapper.unwrap(v)
if isinstance(v, enumeration.Enum):
k2, v2 = self._enum_to_strings(v)
if k != k2:
raise ValueError(f"Invalid keyword {k2} for argument {v2}.")
else:
v2 = v
hyperparams[k] = v2
frozen_hyperparams = list(hyperparams.keys())
# using params_all instead of hyperparams to ensure the construction is consistent with schema
trainable_to_get_params = TrainableIndividualOp(
_lale_name=self.name(),
_lale_impl=class_,
_lale_schemas=self._schemas,
_lale_frozen_hyperparameters=frozen_hyperparams,
**hyperparams,
)
# TODO: improve this code
params_all = trainable_to_get_params._get_params_all()
self._validate_hyperparams(
hyperparams, params_all, self.hyperparam_schema(), class_
)
# TODO: delay creating the impl here
if len(params_all) == 0:
impl = class_()
else:
impl = class_(**params_all)
if self._should_configure_trained(impl):
result: TrainableIndividualOp = TrainedIndividualOp(
_lale_name=self.name(),
_lale_impl=impl,
_lale_schemas=self._schemas,
_lale_frozen_hyperparameters=frozen_hyperparams,
_lale_trained=True,
**hyperparams,
)
else:
result = TrainableIndividualOp(
_lale_name=self.name(),
_lale_impl=impl,
_lale_schemas=self._schemas,
_lale_frozen_hyperparameters=frozen_hyperparams,
**hyperparams,
)
return result
@property
def enum(self) -> _DictionaryObjectForEnum:
ea = getattr(self, "_enum_attributes", None)
if ea is None:
nea = enum_gen.schemaToPythonEnums(self.hyperparam_schema())
doe = _DictionaryObjectForEnum(nea)
self._enum_attributes = doe
return doe
else:
return ea
def _invalidate_enum_attributes(self) -> None:
for k in _schema_derived_attributes:
try:
delattr(self, k)
except AttributeError:
pass
def __getattr__(self, name: str) -> Any:
if name in _schema_derived_attributes or name in ["__setstate__", "_schemas"]:
raise AttributeError
if name == "_estimator_type":
if self.is_classifier():
return "classifier" # satisfy sklearn.base.is_classifier(op)
elif self.is_regressor():
return "regressor" # satisfy sklearn.base.is_regressor(op)
return super().__getattr__(name)
def __getstate__(self):
state = self.__dict__.copy()
# Remove entries that can't be pickled
for k in _schema_derived_attributes:
state.pop(k, None)
return state
def get_schema(self, schema_kind: str) -> Dict[str, Any]:
"""Return a schema of the operator.
Parameters
----------
schema_kind : string, 'hyperparams' or 'input_fit' or 'input_partial_fit' or 'input_transform' or 'input_transform_X_y' or 'input_predict' or 'input_predict_proba' or 'input_decision_function' or 'output_transform' or 'output_transform_X_y' or 'output_predict' or 'output_predict_proba' or 'output_decision_function'
Type of the schema to be returned.
Returns
-------
dict
The Python object containing the JSON schema of the operator.
For all the schemas currently present, this would be a dictionary.
"""
props = self._schemas["properties"]
assert (
schema_kind in props
), f"missing schema {schema_kind} for operator {self.name()} with class {self.class_name()}"
result = props[schema_kind]
return result
def has_schema(self, schema_kind: str) -> bool:
"""Return true if the operator has the schema kind.
Parameters
----------
schema_kind : string, 'hyperparams' or 'input_fit' or 'input_partial_fit' or 'input_transform' or 'input_transform_X_y' or 'input_predict' or 'input_predict_proba' or 'input_decision_function' or 'output_transform' or 'output_transform_X_y' or 'output_predict' or 'output_predict_proba' or 'output_decision_function' or 'input_score_samples' or 'output_score_samples'
Type of the schema to be returned.
Returns
-------
True if the json schema is present, False otherwise.
"""
props = self._schemas["properties"]
return schema_kind in props
def documentation_url(self):
if "documentation_url" in self._schemas:
return self._schemas["documentation_url"]
return None
def get_forwards(self) -> Union[bool, List[str]]:
"""Returns the list of attributes (methods/properties)
the schema has asked to be forwarded. A boolean value is a blanket
opt-in or out of forwarding
"""
forwards = self._schemas.get("forwards", False)
assert isinstance(
forwards, (bool, list)
), f"the schema forward declaration {forwards} must be either a boolean or a list of strings"
return forwards
def get_tags(self) -> Dict[str, List[str]]:
"""Return the tags of an operator.
Returns
-------
list
A list of tags describing the operator.
"""
return self._schemas.get("tags", {})
def has_tag(self, tag: str) -> bool:
"""Check the presence of a tag for an operator.
Parameters
----------
tag : string
Returns
-------
boolean
Flag indicating the presence or absence of the given tag
in this operator's schemas.
"""
tags = [t for ll in self.get_tags().values() for t in ll]
return tag in tags
def input_schema_fit(self) -> JSON_TYPE:
"""Input schema for the fit method."""
return self.get_schema("input_fit")
def input_schema_partial_fit(self) -> JSON_TYPE:
"""Input schema for the partial_fit method."""
return self.get_schema("input_partial_fit")
def input_schema_transform(self) -> JSON_TYPE:
"""Input schema for the transform method."""
return self.get_schema("input_transform")
def input_schema_transform_X_y(self) -> JSON_TYPE:
"""Input schema for the transform_X_y method."""
return self.get_schema("input_transform_X_y")
def input_schema_predict(self) -> JSON_TYPE:
"""Input schema for the predict method."""
return self.get_schema("input_predict")
def input_schema_predict_proba(self) -> JSON_TYPE:
"""Input schema for the predict_proba method."""
return self.get_schema("input_predict_proba")
def input_schema_predict_log_proba(self) -> JSON_TYPE:
"""Input schema for the predict_log_proba method.
We assume that it is the same as the predict_proba method if none has been defined explicitly.
"""
if self.has_schema("input_predict_log_proba"):
return self.get_schema("input_predict_log_proba")
else:
return self.get_schema("input_predict_proba")
def input_schema_decision_function(self) -> JSON_TYPE:
"""Input schema for the decision_function method."""
return self.get_schema("input_decision_function")
def input_schema_score_samples(self) -> JSON_TYPE:
"""Input schema for the score_samples method.
We assume that it is the same as the predict method if none has been defined explicitly.
"""
if self.has_schema("input_score_samples"):
return self.get_schema("input_score_samples")
else:
return self.get_schema("input_predict")
def output_schema_transform(self) -> JSON_TYPE:
"""Oputput schema for the transform method."""
return self.get_schema("output_transform")
def output_schema_transform_X_y(self) -> JSON_TYPE:
"""Oputput schema for the transform_X_y method."""
return self.get_schema("output_transform_X_y")
def output_schema_predict(self) -> JSON_TYPE:
"""Output schema for the predict method."""
return self.get_schema("output_predict")
def output_schema_predict_proba(self) -> JSON_TYPE:
"""Output schema for the predict_proba method."""
return self.get_schema("output_predict_proba")
def output_schema_decision_function(self) -> JSON_TYPE:
"""Output schema for the decision_function method."""
return self.get_schema("output_decision_function")
def output_schema_score_samples(self) -> JSON_TYPE:
"""Output schema for the score_samples method.
We assume that it is the same as the predict method if none has been defined explicitly.
"""
if self.has_schema("output_score_samples"):
return self.get_schema("output_score_samples")
else:
return self.get_schema("output_predict")
def output_schema_predict_log_proba(self) -> JSON_TYPE:
"""Output schema for the predict_log_proba method.
We assume that it is the same as the predict_proba method if none has been defined explicitly.
"""
if self.has_schema("output_predict_log_proba"):
return self.get_schema("output_predict_log_proba")
else:
return self.get_schema("output_predict_proba")
def hyperparam_schema(self, name: Optional[str] = None) -> JSON_TYPE:
"""Returns the hyperparameter schema for the operator.
Parameters
----------
name : string, optional
Name of the hyperparameter.
Returns
-------
dict
Full hyperparameter schema for this operator or part of the schema
corresponding to the hyperparameter given by parameter `name`.
"""
hp_schema = self.get_schema("hyperparams")
if name is None:
return hp_schema
else:
params = next(iter(hp_schema.get("allOf", [])))
return params.get("properties", {}).get(name)
def get_defaults(self) -> Mapping[str, Any]:
"""Returns the default values of hyperparameters for the operator.
Returns
-------
dict
A dictionary with names of the hyperparamers as keys and
their default values as values.
"""
if not hasattr(self, "_hyperparam_defaults"):
schema = self.hyperparam_schema()
props_container: Dict[str, Any] = next(iter(schema.get("allOf", [])), {})
props: Dict[str, Any] = props_container.get("properties", {})
# since we want to share this, we don't want callers
# to modify the returned dictionary, htereby modifying the defaults
defaults: MappingProxyType[str, Any] = MappingProxyType(
{k: props[k].get("default") for k in props.keys()}
)
self._hyperparam_defaults = defaults
return self._hyperparam_defaults
def get_param_ranges(self) -> Tuple[Dict[str, Any], Dict[str, Any]]:
"""Returns two dictionaries, ranges and cat_idx, for hyperparameters.
The ranges dictionary has two kinds of entries. Entries for
numeric and Boolean hyperparameters are tuples of the form
(min, max, default). Entries for categorical hyperparameters
are lists of their values.
The cat_idx dictionary has (min, max, default) entries of indices
into the corresponding list of values.
Warning: ignores side constraints and unions."""
hyperparam_obj = next(iter(self.hyperparam_schema().get("allOf", [])))
original = hyperparam_obj.get("properties")
def is_for_optimizer(s) -> bool:
return ("forOptimizer" not in s) or s["forOptimizer"]
def is_relevant(hp, s):
if "relevantToOptimizer" in hyperparam_obj:
return hp in hyperparam_obj["relevantToOptimizer"]
return True
relevant = {hp: s for hp, s in original.items() if is_relevant(hp, s)}
def pick_one_type(schema):
if not is_for_optimizer(schema):
return None
if "anyOf" in schema:
def by_type(typ):
for s in schema["anyOf"]:
if "type" in s and s["type"] == typ:
if is_for_optimizer(s):
return s
return None
s = None
for typ in ["number", "integer", "string"]:
s = by_type(typ)
if s:
return s
if s is None:
enums = []
for s in schema["anyOf"]:
if "enum" in s:
if is_for_optimizer(s):
enums.append(s)
elif s.get("type", None) == "boolean":
if is_for_optimizer(s):
bool_s = {"enum": [False, True]}
d = s.get("default", None)
if d is not None:
bool_s["default"] = d
enums.append(bool_s)
if len(enums) == 1:
return enums[0]
elif enums:
# combine them, and see if there is an anyOf default that we want to use as well
vals = [item for s in enums for item in s["enum"]]
new_s = {
"enum": vals,
}
if "default" in schema and schema["default"] in vals:
new_s["default"] = schema["default"]
else:
for s in enums:
if "default" in s:
new_s["default"] = s["default"]
break
return new_s
if len(schema["anyOf"]) > 0 and is_for_optimizer(schema["anyOf"][0]):
return schema["anyOf"][0]
else:
return None
return schema
unityped_with_none = {hp: pick_one_type(relevant[hp]) for hp in relevant}
unityped = {k: v for k, v in unityped_with_none.items() if v is not None}
def add_default(schema):
if "type" in schema:
minimum, maximum = 0.0, 1.0
if "minimumForOptimizer" in schema:
minimum = schema["minimumForOptimizer"]
elif "minimum" in schema:
minimum = schema["minimum"]
if "maximumForOptimizer" in schema:
maximum = schema["maximumForOptimizer"]
elif "maximum" in schema:
maximum = schema["maximum"]
result = {**schema}
if schema["type"] in ["number", "integer"]:
if "default" not in schema:
schema["default"] = None
if "minimumForOptimizer" not in schema:
result["minimumForOptimizer"] = minimum
if "maximumForOptimizer" not in schema:
result["maximumForOptimizer"] = maximum
return result
elif "enum" in schema:
if "default" in schema:
return schema
return {"default": schema["enum"][0], **schema}
return schema
defaulted = {hp: add_default(unityped[hp]) for hp in unityped}
def get_range(hp, schema):
if "enum" in schema:
default = schema["default"]
non_default = [v for v in schema["enum"] if v != default]
return [*non_default, default]
elif schema["type"] == "boolean":
return (False, True, schema["default"])
else:
def get(schema, key):
return schema[key] if key in schema else None
keys = ["minimumForOptimizer", "maximumForOptimizer", "default"]
return tuple(get(schema, key) for key in keys)
def get_cat_idx(schema):
if "enum" not in schema:
return None
return (0, len(schema["enum"]) - 1, len(schema["enum"]) - 1)
autoai_ranges = {hp: get_range(hp, s) for hp, s in defaulted.items()}
if "min_samples_split" in autoai_ranges and "min_samples_leaf" in autoai_ranges:
if self._name not in (
"_GradientBoostingRegressorImpl",
"_GradientBoostingClassifierImpl",
"_ExtraTreesClassifierImpl",
):
autoai_ranges["min_samples_leaf"] = (1, 5, 1)
autoai_ranges["min_samples_split"] = (2, 5, 2)
autoai_cat_idx = {
hp: get_cat_idx(s) for hp, s in defaulted.items() if "enum" in s
}
return autoai_ranges, autoai_cat_idx
def get_param_dist(self, size=10) -> Dict[str, List[Any]]:
"""Returns a dictionary for discretized hyperparameters.
Each entry is a list of values. For continuous hyperparameters,
it returns up to `size` uniformly distributed values.
Warning: ignores side constraints, unions, and distributions."""
autoai_ranges, _autoai_cat_idx = self.get_param_ranges()
def one_dist(key: str) -> List[Any]:
one_range = autoai_ranges[key]
if isinstance(one_range, tuple):
minimum, maximum, default = one_range
if minimum is None:
dist = [default]
elif isinstance(minimum, bool):
if minimum == maximum:
dist = [minimum]
else:
dist = [minimum, maximum]
elif isinstance(minimum, int) and isinstance(maximum, int):
step = float(maximum - minimum) / (size - 1)
fdist = [minimum + i * step for i in range(size)]
dist = list(set(round(f) for f in fdist))
dist.sort()
elif isinstance(minimum, (int, float)):
# just in case the minimum or maximum is exclusive
epsilon = (maximum - minimum) / (100 * size)
minimum += epsilon
maximum -= epsilon
step = (maximum - minimum) / (size - 1)
dist = [minimum + i * step for i in range(size)]
else:
assert False, f"key {key}, one_range {one_range}"
else:
dist = [*one_range]
return dist
autoai_dists = {k: one_dist(k) for k in autoai_ranges.keys()}
return autoai_dists
def _enum_to_strings(self, arg: "enumeration.Enum") -> Tuple[str, Any]:
"""[summary]
Parameters
----------
arg : [type]
[description]
Raises
------
ValueError
[description]
Returns
-------
[type]
[description]
"""
if not isinstance(arg, enumeration.Enum):
raise ValueError(f"Missing keyword on argument {arg}.")
return arg.__class__.__name__, arg.value
def _wrapped_impl_class(self):
if not hasattr(self, "_impl_class_"):
if inspect.isclass(self._impl):
self._impl_class_ = self._impl
else:
self._impl_class_ = self._impl.__class__
return self._impl_class_
def _impl_class(self):
return _WithoutGetParams.unwrap(self._wrapped_impl_class())
def _impl_instance(self) -> Any:
hyperparams: Mapping[str, Any]
if not self._is_instantiated():
defaults = self.get_defaults()
all_hps = self.hyperparams_all()
if all_hps:
hyperparams = {**defaults, **all_hps}
else:
hyperparams = defaults
class_ = self._impl_class()
try:
instance = class_(
**hyperparams
) # always with default values of hyperparams
except TypeError as e:
logger.debug(
f"Constructor for {class_.__module__}.{class_.__name__} "
f"threw exception {e}"
)
# TODO: Is this really a reasonable fallback?
instance = class_.__new__() # type:ignore
self._impl = instance
return self._impl
@property
def impl(self) -> Any:
"""Returns the underlying impl. This can be used to access additional
field and methods not exposed by Lale. If only the type of the
impl is needed, please use self.impl_class instead, as it can be more efficient.
If the found impl has a _wrapped_model, it will be returned instead
"""
model = self.shallow_impl
if model is None:
return None
while True:
base_model = getattr(model, "_wrapped_model", model)
if base_model is None or base_model is model:
return model
model = base_model
return model
@property
def shallow_impl(self) -> Any:
"""Returns the underlying impl. This can be used to access additional
field and methods not exposed by Lale. If only the type of the
impl is needed, please use self.impl_class instead, as it can be more efficient.
"""
# if fit was called, we want to use trained result
# even if the code uses the original operrator
# since sklearn assumes that fit mutates the operator
op = self
if hasattr(op, "_trained"):
tr_op: Any = op._trained
if tr_op is not None:
assert isinstance(tr_op, TrainedIndividualOp)
op = tr_op
return op._impl_instance()
@property
def impl_class(self) -> type:
"""Returns the class of the underlying impl. This should return the same thing
as self.impl.__class__, but can be more efficient.
"""
return self._impl_class()
# This allows the user, for example, to check isinstance(LR().fit(...), LR)
def __instancecheck__(self, other):
if isinstance(other, IndividualOp):
return issubclass(other.impl_class, self.impl_class)
else:
return False
def class_name(self) -> str:
module = None
if self._impl is not None:
module = self._impl.__module__
if module is None or module == str.__class__.__module__: # type: ignore
class_name = self.name()
else:
class_name = module + "." + self._impl_class().__name__
return class_name
def __str__(self) -> str:
return self.name()
# # sklearn calls __repr__ instead of __str__
def __repr__(self):
name = self.name()
return name
def _has_same_impl(self, other: Operator) -> bool:
"""Checks if the type of the operator implementations are compatible"""
if not isinstance(other, IndividualOp):
return False
return self._impl_class() == other._impl_class()
def _propose_fixed_hyperparams(
self, key_candidates, hp_all, hp_schema, max_depth=2
):
defaults = self.get_defaults()
explicit_defaults: Dict[str, Any] = {k: defaults[k] for k in key_candidates}
found: bool = False
for depth in range(0, max_depth):
if found:
return
candidate_replacements: Any = list(
itertools.combinations(explicit_defaults.items(), depth + 1)
)
for replacements in candidate_replacements:
new_values = dict(replacements)
fixed_hp = {**hp_all, **new_values}
try:
validate_schema_directly(fixed_hp, hp_schema)
found = True
yield new_values
except jsonschema.ValidationError:
pass
MAX_FIX_DEPTH: int = 2
MAX_FIX_SUGGESTIONS: int = 3
def _validate_hyperparams(self, hp_explicit, hp_all, hp_schema, class_):
from lale.settings import disable_hyperparams_schema_validation
if disable_hyperparams_schema_validation:
return
try:
validate_schema_directly(hp_all, hp_schema)
except jsonschema.ValidationError as e_orig:
e = e_orig if e_orig.parent is None else e_orig.parent
validate_is_schema(e.schema)
schema = lale.pretty_print.to_string(e.schema)
defaults = self.get_defaults()
extra_keys = [k for k in hp_explicit.keys() if k not in defaults]
trimmed_valid: bool = False
if extra_keys:
trimmed_hp_all = {
k: v for k, v in hp_all.items() if k not in extra_keys
}
trimmed_hp_explicit_keys = {
k for k in hp_explicit.keys() if k not in extra_keys
}
remove_recommendation = (
"unknown key "
+ ("s" if len(extra_keys) > 1 else "")
+ ", ".join(("'" + k + "'" for k in extra_keys))
)
try:
validate_schema_directly(trimmed_hp_all, hp_schema)
trimmed_valid = True
except jsonschema.ValidationError:
pass
else:
trimmed_hp_all = hp_all
trimmed_hp_explicit_keys = hp_explicit.keys()
remove_recommendation = ""
proposed_fix: str = ""
if trimmed_valid and remove_recommendation:
proposed_fix = "To fix, please remove " + remove_recommendation + "\n"
else:
find_fixed_hyperparam_iter = self._propose_fixed_hyperparams(
trimmed_hp_explicit_keys,
trimmed_hp_all,
hp_schema,
max_depth=self.MAX_FIX_DEPTH,
)
fix_suggestions: List[Dict[str, Any]] = list(
itertools.islice(
find_fixed_hyperparam_iter, self.MAX_FIX_SUGGESTIONS
)
)
if fix_suggestions:
from lale.pretty_print import hyperparams_to_string
if remove_recommendation:
remove_recommendation = (
"remove " + remove_recommendation + " and "
)
proposed_fix = "Some possible fixes include:\n" + "".join(
(
"- "
+ remove_recommendation
+ "set "
+ hyperparams_to_string(d)
+ "\n"
for d in fix_suggestions
)
)
if [*e.schema_path][:3] == ["allOf", 0, "properties"]:
arg = e.schema_path[3]
reason = f"invalid value {arg}={e.instance}"
schema_path = f"argument {arg}"
elif [*e.schema_path][:3] == ["allOf", 0, "additionalProperties"]:
pref, suff = "Additional properties are not allowed (", ")"
assert e.message.startswith(pref) and e.message.endswith(suff)
reason = "argument " + e.message[len(pref) : -len(suff)]
schema_path = "arguments and their defaults"
schema = self.get_defaults()
elif e.schema_path[0] == "allOf" and int(e.schema_path[1]) != 0:
assert e.schema_path[2] == "anyOf"
descr = e.schema["description"]
if descr.endswith("."):
descr = descr[:-1]
reason = f"constraint {descr[0].lower()}{descr[1:]}"
schema_path = "failing constraint"
if self.documentation_url() is not None:
schema = f"{self.documentation_url()}#constraint-{e.schema_path[1]}"
else:
reason = e.message
schema_path = e.schema_path
msg = (
f"Invalid configuration for {self.name()}("
+ f"{lale.pretty_print.hyperparams_to_string(hp_explicit if hp_explicit else {})}) "
+ f"due to {reason}.\n"
+ proposed_fix
+ f"Schema of {schema_path}: {schema}\n"
+ f"Invalid value: {e.instance}"
)
raise jsonschema.ValidationError(msg)
user_validator = getattr(class_, "validate_hyperparams", None)
if user_validator:
user_validator(**hp_all)
def validate_schema(self, X: Any, y: Any = None):
if self.has_method("fit"):
X = self._validate_input_schema("X", X, "fit")
method = "transform" if self.is_transformer() else "predict"
self._validate_input_schema("X", X, method)
if self.is_supervised(default_if_missing=False):
if y is None:
raise ValueError(f"{self.name()}.fit() y cannot be None")
if self.has_method("fit"):
y = self._validate_input_schema("y", y, "fit")
self._validate_input_schema("y", y, method)
def _validate_input_schema(self, arg_name: str, arg, method: str):
from lale.settings import disable_data_schema_validation
if disable_data_schema_validation:
return arg
if not is_empty_dict(arg):
if method == "fit":
schema = self.input_schema_fit()
elif method == "partial_fit":
schema = self.input_schema_partial_fit()
elif method == "transform":
schema = self.input_schema_transform()
elif method == "transform_X_y":
schema = self.input_schema_transform_X_y()
elif method == "predict":
schema = self.input_schema_predict()
elif method == "predict_proba":
schema = self.input_schema_predict_proba()
elif method == "predict_log_proba":
schema = self.input_schema_predict_log_proba()
elif method == "decision_function":
schema = self.input_schema_decision_function()
elif method == "score_samples":
schema = self.input_schema_score_samples()
else:
raise ValueError(f"Unexpected method argument: {method}")
if "properties" in schema and arg_name in schema["properties"]:
arg = add_schema(arg)
try:
sup: JSON_TYPE = schema["properties"][arg_name]
validate_schema(arg, sup)
except SubschemaError as e:
sub_str: str = lale.pretty_print.json_to_string(e.sub)
sup_str: str = lale.pretty_print.json_to_string(e.sup)
raise ValueError(
f"{self.name()}.{method}() invalid {arg_name}, the schema of the actual data is not a subschema of the expected schema of the argument.\nactual_schema = {sub_str}\nexpected_schema = {sup_str}"
) from None
except Exception as e:
exception_type = f"{type(e).__module__}.{type(e).__name__}"
raise ValueError(
f"{self.name()}.{method}() invalid {arg_name}: {exception_type}: {e}"
) from None
return arg
def _validate_output_schema(self, result, method):
from lale.settings import disable_data_schema_validation
if disable_data_schema_validation:
return result
if method == "transform":
schema = self.output_schema_transform()
elif method == "transform_X_y":
schema = self.output_schema_transform_X_y()
elif method == "predict":
schema = self.output_schema_predict()
elif method == "predict_proba":
schema = self.output_schema_predict_proba()
elif method == "predict_log_proba":
schema = self.output_schema_predict_log_proba()
elif method == "decision_function":
schema = self.output_schema_decision_function()
elif method == "score_samples":
schema = self.output_schema_score_samples()
else:
raise ValueError(f"Unexpected method argument: {method}")
result = add_schema(result)
try:
validate_schema(result, schema)
except Exception as e:
print(f"{self.name()}.{method}() invalid result: {e}")
raise ValueError(f"{self.name()}.{method}() invalid result: {e}") from e
return result
def transform_schema(self, s_X: JSON_TYPE) -> JSON_TYPE:
from lale.settings import disable_data_schema_validation
if disable_data_schema_validation:
return {}
elif self.is_transformer():
return self.output_schema_transform()
elif self.has_method("predict_proba"):
return self.output_schema_predict_proba()
elif self.has_method("decision_function"):
return self.output_schema_decision_function()
else:
return self.output_schema_predict()
def is_supervised(self, default_if_missing=True) -> bool:
if self.has_method("fit"):
schema_fit = self.input_schema_fit()
# first we try a fast path, since subschema checking can be a bit slow
if (
schema_fit is not None
and isinstance(schema_fit, dict)
and all(
k not in schema_fit for k in ["all_of", "any_of", "one_of", "not"]
)
):
req = schema_fit.get("required", None)
return req is not None and "y" in req
else:
return is_subschema(schema_fit, _is_supervised_schema)
return default_if_missing
def is_classifier(self) -> bool:
return self.has_tag("classifier")
def is_regressor(self) -> bool:
return self.has_tag("regressor")
def has_method(self, method_name: str) -> bool:
return hasattr(self._impl, method_name)
def is_transformer(self) -> bool:
"""Checks if the operator is a transformer"""
return self.has_method("transform")
@property
def _final_individual_op(self) -> Optional["IndividualOp"]:
return self
_is_supervised_schema = {"type": "object", "required": ["y"]}
class PlannedIndividualOp(IndividualOp, PlannedOperator):
"""
This is a concrete class that returns a trainable individual
operator through its __call__ method. A configure method can use
an optimizer and return the best hyperparameter combination.
"""
_hyperparams: Optional[Dict[str, Any]]
def __init__(
self,
_lale_name: str,
_lale_impl,
_lale_schemas,
_lale_frozen_hyperparameters=None,
_lale_trained=False,
**hp,
) -> None:
super().__init__(
_lale_name, _lale_impl, _lale_schemas, _lale_frozen_hyperparameters, **hp
)
def _should_configure_trained(self, impl):
# TODO: may also want to do this for other higher-order operators
if self.class_name() == _LALE_SKL_PIPELINE:
return isinstance(impl._pipeline, TrainedPipeline)
else:
return not hasattr(impl, "fit")
# give it a more precise type: if the input is an individual op, the output is as well
def auto_configure(
self, X: Any, y: Any = None, optimizer=None, cv=None, scoring=None, **kwargs
) -> "TrainedIndividualOp":
trained = super().auto_configure(
X, y=y, optimizer=optimizer, cv=cv, scoring=scoring, **kwargs
)
assert isinstance(trained, TrainedIndividualOp)
return trained
def __call__(self, *args, **kwargs) -> "TrainableIndividualOp":
return self._configure(*args, **kwargs)
def _hyperparam_schema_with_hyperparams(
self, data_schema: Optional[Dict[str, Any]] = None
):
def fix_hyperparams(schema):
hyperparams = self.hyperparams()
if not hyperparams:
return schema
props = {k: {"enum": [v]} for k, v in hyperparams.items()}
obj = {"type": "object", "properties": props}
obj["relevantToOptimizer"] = list(hyperparams.keys())
obj["required"] = list(hyperparams.keys())
top = {"allOf": [schema, obj]}
return top
s_1 = self.hyperparam_schema()
s_2 = fix_hyperparams(s_1)
if data_schema is None:
data_schema = {}
s_3 = replace_data_constraints(s_2, data_schema)
return s_3
def freeze_trainable(self) -> "TrainableIndividualOp":
return self._configure().freeze_trainable()
def free_hyperparams(self):
hyperparam_schema = self.hyperparam_schema()
if (
"allOf" in hyperparam_schema
and "relevantToOptimizer" in hyperparam_schema["allOf"][0]
):
to_bind = hyperparam_schema["allOf"][0]["relevantToOptimizer"]
else:
to_bind = []
bound = self.frozen_hyperparams()
if bound is None:
return set(to_bind)
else:
return set(to_bind) - set(bound)
def is_frozen_trainable(self) -> bool:
free = self.free_hyperparams()
return len(free) == 0
def customize_schema(
self,
schemas: Optional[Schema] = None,
relevantToOptimizer: Optional[List[str]] = None,
constraint: Union[
Schema, JSON_TYPE, List[Union[Schema, JSON_TYPE]], None
] = None,
tags: Optional[Dict] = None,
forwards: Union[bool, List[str], None] = None,
set_as_available: bool = False,
**kwargs: Union[Schema, JSON_TYPE, None],
) -> "PlannedIndividualOp":
return customize_schema(
self,
schemas,
relevantToOptimizer,
constraint,
tags,
forwards,
set_as_available,
**kwargs,
)
def _mutation_warning(method_name: str) -> str:
msg = str(
"The `{}` method is deprecated on a trainable "
"operator, because the learned coefficients could be "
"accidentally overwritten by retraining. Call `{}` "
"on the trained operator returned by `fit` instead."
)
return msg.format(method_name, method_name)
class TrainableIndividualOp(PlannedIndividualOp, TrainableOperator):
def __init__(
self,
_lale_name,
_lale_impl,
_lale_schemas,
_lale_frozen_hyperparameters=None,
**hp,
):
super().__init__(
_lale_name, _lale_impl, _lale_schemas, _lale_frozen_hyperparameters, **hp
)
def set_params(self, **impl_params):
"""This implements the set_params, as per the scikit-learn convention,
extended as documented in the module docstring"""
return self._with_params(True, **impl_params)
def _with_op_params(
self, try_mutate, **impl_params: Dict[str, Any]
) -> "TrainableIndividualOp":
if not try_mutate:
return super()._with_op_params(try_mutate, **impl_params)
hps = self.hyperparams_all()
if hps is not None:
hyperparams = {**hps, **impl_params}
else:
hyperparams = impl_params
frozen = self.frozen_hyperparams()
self._hyperparams = hyperparams
if frozen:
frozen.extend((k for k in impl_params if k not in frozen))
else:
self._frozen_hyperparams = list(impl_params.keys())
if self._is_instantiated():
# if we already have an instance impl, we need to update it
impl = self._impl
if hasattr(impl, "set_params"):
new_impl = impl.set_params(**hyperparams)
self._impl = new_impl
self._impl_class_ = new_impl.__class__
elif hasattr(impl, "_wrapped_model") and hasattr(
impl._wrapped_model, "set_params"
):
impl._wrapped_model.set_params(**hyperparams)
else:
hyper_d = {**self.get_defaults(), **hyperparams}
self._impl = self._impl_class()(**hyper_d)
return self
def _clone_impl(self):
impl_instance = self._impl_instance()
if hasattr(impl_instance, "get_params"):
result = sklearn.base.clone(impl_instance)
else:
try:
result = copy.deepcopy(impl_instance)
except Exception:
impl_class = self._impl_class()
params_all = self._get_params_all()
result = impl_class(**params_all)
return result
def _trained_hyperparams(self, trained_impl) -> Optional[Dict[str, Any]]:
hp = self.hyperparams()
if not hp:
return None
# TODO: may also want to do this for other higher-order operators
if self.class_name() != _LALE_SKL_PIPELINE:
return hp
names_list = [name for name, op in hp["steps"]]
steps_list = trained_impl._pipeline.steps_list()
trained_steps = list(zip(names_list, steps_list))
result = {**hp, "steps": trained_steps}
return result
def _validate_hyperparam_data_constraints(self, X: Any, y: Any = None):
from lale.settings import disable_hyperparams_schema_validation
if disable_hyperparams_schema_validation:
return
hp_schema = self.hyperparam_schema()
if not hasattr(self, "__has_data_constraints"):
has_dc = has_data_constraints(hp_schema)
self.__has_data_constraints = has_dc
if self.__has_data_constraints:
hp_explicit = self.hyperparams()
hp_all = self._get_params_all()
data_schema = fold_schema(X, y)
hp_schema_2 = replace_data_constraints(hp_schema, data_schema)
self._validate_hyperparams(
hp_explicit, hp_all, hp_schema_2, self.impl_class
)
def fit(self, X: Any, y: Any = None, **fit_params) -> "TrainedIndividualOp":
# logger.info("%s enter fit %s", time.asctime(), self.name())
X = self._validate_input_schema("X", X, "fit")
y = self._validate_input_schema("y", y, "fit")
self._validate_hyperparam_data_constraints(X, y)
filtered_fit_params = _fixup_hyperparams_dict(fit_params)
if isinstance(self, TrainedIndividualOp):
trainable_impl = self._impl_instance()
else:
trainable_impl = self._clone_impl()
if filtered_fit_params is None:
trained_impl = trainable_impl.fit(X, y)
else:
trained_impl = trainable_impl.fit(X, y, **filtered_fit_params)
# if the trainable fit method returns None, assume that
# the trainableshould be used as the trained impl as well
if trained_impl is None:
trained_impl = trainable_impl
hps = self._trained_hyperparams(trained_impl)
frozen: Optional[List[str]] = list(hps.keys()) if hps is not None else None
if hps is None:
hps = {}
result = TrainedIndividualOp(
self.name(),
trained_impl,
self._schemas,
_lale_trained=True,
_lale_frozen_hyperparameters=frozen,
**hps,
)
if not isinstance(self, TrainedIndividualOp):
self._trained = result
# logger.info("%s exit fit %s", time.asctime(), self.name())
return result
def partial_fit(self, X: Any, y: Any = None, **fit_params) -> "TrainedIndividualOp":
if not self.has_method("partial_fit"):
raise AttributeError(f"{self.name()} has no partial_fit implemented.")
X = self._validate_input_schema("X", X, "partial_fit")
y = self._validate_input_schema("y", y, "partial_fit")
self._validate_hyperparam_data_constraints(X, y)
filtered_fit_params = _fixup_hyperparams_dict(fit_params)
# if the operator is trainable but has been trained before, use the _trained to
# call partial fit, and update ._trained
if hasattr(self, "_trained"):
self._trained = self._trained.partial_fit(X, y, **fit_params)
return self._trained
else:
trainable_impl = self._clone_impl()
if filtered_fit_params is None:
trained_impl = trainable_impl.partial_fit(X, y)
else:
trained_impl = trainable_impl.partial_fit(X, y, **filtered_fit_params)
if trained_impl is None:
trained_impl = trainable_impl
hps = self.hyperparams_all()
if hps is None:
hps = {}
result = TrainedIndividualOp(
self.name(),
trained_impl,
self._schemas,
_lale_trained=True,
_lale_frozen_hyperparameters=self.frozen_hyperparams(),
**hps,
)
if not isinstance(self, TrainedIndividualOp):
self._trained = result
return result
def freeze_trained(self) -> "TrainedIndividualOp":
"""
.. deprecated:: 0.0.0
The `freeze_trained` method is deprecated on a trainable
operator, because the learned coefficients could be
accidentally overwritten by retraining. Call `freeze_trained`
on the trained operator returned by `fit` instead.
"""
warnings.warn(_mutation_warning("freeze_trained"), DeprecationWarning)
try:
return self._trained.freeze_trained()
except AttributeError as exc:
raise ValueError("Must call `fit` before `freeze_trained`.") from exc
def __repr__(self):
name = self.name()
hps = self.reduced_hyperparams()
hyp_string: str
if hps is None:
hyp_string = ""
else:
hyp_string = lale.pretty_print.hyperparams_to_string(hps)
return name + "(" + hyp_string + ")"
@if_delegate_has_method(delegate="_impl")
def get_pipeline(
self, pipeline_name: Optional[str] = None, astype: astype_type = "lale"
) -> Optional[TrainableOperator]:
"""
.. deprecated:: 0.0.0
The `get_pipeline` method is deprecated on a trainable
operator, because the learned coefficients could be
accidentally overwritten by retraining. Call `get_pipeline`
on the trained operator returned by `fit` instead.
"""
warnings.warn(_mutation_warning("get_pipeline"), DeprecationWarning)
try:
return self._trained.get_pipeline(pipeline_name, astype)
except AttributeError as exc:
raise ValueError("Must call `fit` before `get_pipeline`.") from exc
@if_delegate_has_method(delegate="_impl")
def summary(self) -> pd.DataFrame:
"""
.. deprecated:: 0.0.0
The `summary` method is deprecated on a trainable
operator, because the learned coefficients could be
accidentally overwritten by retraining. Call `summary`
on the trained operator returned by `fit` instead.
"""
warnings.warn(_mutation_warning("summary"), DeprecationWarning)
try:
return self._trained.summary()
except AttributeError as exc:
raise ValueError("Must call `fit` before `summary`.") from exc
@if_delegate_has_method(delegate="_impl")
def transform(self, X: Any, y: Any = None) -> Any:
"""
.. deprecated:: 0.0.0
The `transform` method is deprecated on a trainable
operator, because the learned coefficients could be
accidentally overwritten by retraining. Call `transform`
on the trained operator returned by `fit` instead.
"""
warnings.warn(_mutation_warning("transform"), DeprecationWarning)
try:
return self._trained.transform(X, y)
except AttributeError as exc:
raise ValueError("Must call `fit` before `transform`.") from exc
@if_delegate_has_method(delegate="_impl")
def predict(self, X=None, **predict_params) -> Any:
"""
.. deprecated:: 0.0.0
The `predict` method is deprecated on a trainable
operator, because the learned coefficients could be
accidentally overwritten by retraining. Call `predict`
on the trained operator returned by `fit` instead.
"""
warnings.warn(_mutation_warning("predict"), DeprecationWarning)
try:
return self._trained.predict(X)
except AttributeError as exc:
raise ValueError("Must call `fit` before `predict`.") from exc
@if_delegate_has_method(delegate="_impl")
def predict_proba(self, X=None):
"""
.. deprecated:: 0.0.0
The `predict_proba` method is deprecated on a trainable
operator, because the learned coefficients could be
accidentally overwritten by retraining. Call `predict_proba`
on the trained operator returned by `fit` instead.
"""
warnings.warn(_mutation_warning("predict_proba"), DeprecationWarning)
try:
return self._trained.predict_proba(X)
except AttributeError as exc:
raise ValueError("Must call `fit` before `predict_proba`.") from exc
@if_delegate_has_method(delegate="_impl")
def decision_function(self, X=None):
"""
.. deprecated:: 0.0.0
The `decision_function` method is deprecated on a trainable
operator, because the learned coefficients could be
accidentally overwritten by retraining. Call `decision_function`
on the trained operator returned by `fit` instead.
"""
warnings.warn(_mutation_warning("decision_function"), DeprecationWarning)
try:
return self._trained.decision_function(X)
except AttributeError as exc:
raise ValueError("Must call `fit` before `decision_function`.") from exc
@if_delegate_has_method(delegate="_impl")
def score(self, X, y, **score_params) -> Any:
"""
.. deprecated:: 0.0.0
The `score` method is deprecated on a trainable
operator, because the learned coefficients could be
accidentally overwritten by retraining. Call `score`
on the trained operator returned by `fit` instead.
"""
warnings.warn(_mutation_warning("score"), DeprecationWarning)
try:
if score_params is None:
return self._trained.score(X, y)
else:
return self._trained.score(X, y, **score_params)
except AttributeError as exc:
raise ValueError("Must call `fit` before `score`.") from exc
@if_delegate_has_method(delegate="_impl")
def score_samples(self, X=None):
"""
.. deprecated:: 0.0.0
The `score_samples` method is deprecated on a trainable
operator, because the learned coefficients could be
accidentally overwritten by retraining. Call `score_samples`
on the trained operator returned by `fit` instead.
"""
warnings.warn(_mutation_warning("score_samples"), DeprecationWarning)
try:
return self._trained.score_samples(X)
except AttributeError as exc:
raise ValueError("Must call `fit` before `score_samples`.") from exc
@if_delegate_has_method(delegate="_impl")
def predict_log_proba(self, X=None):
"""
.. deprecated:: 0.0.0
The `predict_log_proba` method is deprecated on a trainable
operator, because the learned coefficients could be
accidentally overwritten by retraining. Call `predict_log_proba`
on the trained operator returned by `fit` instead.
"""
warnings.warn(_mutation_warning("predict_log_proba"), DeprecationWarning)
try:
return self._trained.predict_log_proba(X)
except AttributeError as exc:
raise ValueError("Must call `fit` before `predict_log_proba`.") from exc
def free_hyperparams(self) -> Set[str]:
hyperparam_schema = self.hyperparam_schema()
to_bind: List[str]
if (
"allOf" in hyperparam_schema
and "relevantToOptimizer" in hyperparam_schema["allOf"][0]
):
to_bind = hyperparam_schema["allOf"][0]["relevantToOptimizer"]
else:
to_bind = []
bound = self.frozen_hyperparams()
if bound is None:
return set(to_bind)
else:
return set(to_bind) - set(bound)
def _freeze_trainable_bindings(self) -> Dict[str, Any]:
old_bindings = self.hyperparams_all()
if old_bindings is None:
old_bindings = {}
free = self.free_hyperparams()
defaults: Mapping[str, Any] = self.get_defaults()
new_bindings: Dict[str, Any] = {name: defaults[name] for name in free}
bindings: Dict[str, Any] = {**old_bindings, **new_bindings}
return bindings
def freeze_trainable(self) -> "TrainableIndividualOp":
bindings = self._freeze_trainable_bindings()
result = self._configure(**bindings)
assert result.is_frozen_trainable(), str(result.free_hyperparams())
return result
def transform_schema(self, s_X: JSON_TYPE):
from lale.settings import disable_data_schema_validation
if disable_data_schema_validation:
return {}
if self.has_method("transform_schema"):
try:
return self._impl_instance().transform_schema(s_X)
except BaseException as exc:
raise ValueError(
f"unexpected error in {self.name()}.transform_schema({lale.pretty_print.to_string(s_X)}"
) from exc
else:
return super().transform_schema(s_X)
def input_schema_fit(self) -> JSON_TYPE:
if self.has_method("input_schema_fit"):
return self._impl_instance().input_schema_fit()
else:
return super().input_schema_fit()
def customize_schema(
self,
schemas: Optional[Schema] = None,
relevantToOptimizer: Optional[List[str]] = None,
constraint: Union[
Schema, JSON_TYPE, List[Union[Schema, JSON_TYPE]], None
] = None,
tags: Optional[Dict] = None,
forwards: Union[bool, List[str], None] = None,
set_as_available: bool = False,
**kwargs: Union[Schema, JSON_TYPE, None],
) -> "TrainableIndividualOp":
return customize_schema(
self,
schemas,
relevantToOptimizer,
constraint,
tags,
forwards,
set_as_available,
**kwargs,
)
def convert_to_trained(self) -> "TrainedIndividualOp":
trained_op = TrainedIndividualOp(
_lale_name=self._name,
_lale_impl=self.impl,
_lale_schemas=self._schemas,
_lale_frozen_hyperparameters=self.frozen_hyperparams(),
_lale_trained=True,
)
if hasattr(self, "_frozen_trained"):
trained_op._frozen_trained = self._frozen_trained
if hasattr(self, "_hyperparams"):
trained_op._hyperparams = self._hyperparams
return trained_op
class TrainedIndividualOp(TrainableIndividualOp, TrainedOperator):
_frozen_trained: bool
def __new__(cls, *args, _lale_trained=False, _lale_impl=None, **kwargs):
if (
"_lale_name" not in kwargs
or _lale_trained
or (_lale_impl is not None and not hasattr(_lale_impl, "fit"))
):
obj = super().__new__(TrainedIndividualOp)
return obj
else:
# unless _lale_trained=True, we actually want to return a Trainable
obj = super().__new__(TrainableIndividualOp)
# apparently python does not call __ini__ if the type returned is not the
# expected type
obj.__init__(*args, **kwargs)
return obj
def __init__(
self,
_lale_name,
_lale_impl,
_lale_schemas,
_lale_frozen_hyperparameters=None,
_lale_trained=False,
**hp,
):
super().__init__(
_lale_name, _lale_impl, _lale_schemas, _lale_frozen_hyperparameters, **hp
)
self._frozen_trained = not self.has_method("fit")
def __call__(self, *args, **kwargs) -> "TrainedIndividualOp":
filtered_kwargs_params = _fixup_hyperparams_dict(kwargs)
trainable = self._configure(*args, **filtered_kwargs_params)
hps = trainable.hyperparams_all()
if hps is None:
hps = {}
instance = TrainedIndividualOp(
trainable._name,
trainable._impl,
trainable._schemas,
_lale_trained=True,
_lale_frozen_hyperparameters=trainable.frozen_hyperparams(),
**hps,
)
return instance
def fit(self, X: Any, y: Any = None, **fit_params) -> "TrainedIndividualOp":
if self.has_method("fit") and not self.is_frozen_trained():
filtered_fit_params = _fixup_hyperparams_dict(fit_params)
try:
return super().fit(X, y, **filtered_fit_params)
except AttributeError:
return self # for Project with static columns after clone()
else:
return self
@if_delegate_has_method(delegate="_impl")
def transform(self, X: Any, y: Any = None) -> Any:
"""Transform the data.
Parameters
----------
X :
Features; see input_transform schema of the operator.
y: None
Returns
-------
result :
Transformed features; see output_transform schema of the operator.
"""
# logger.info("%s enter transform %s", time.asctime(), self.name())
X = self._validate_input_schema("X", X, "transform")
if "y" in [
required_property.lower()
for required_property in self.input_schema_transform().get("required", [])
]:
y = self._validate_input_schema("y", y, "transform")
raw_result = self._impl_instance().transform(X, y)
else:
raw_result = self._impl_instance().transform(X)
result = self._validate_output_schema(raw_result, "transform")
# logger.info("%s exit transform %s", time.asctime(), self.name())
return result
@if_delegate_has_method(delegate="_impl")
def transform_X_y(self, X: Any, y: Any) -> Any:
"""Transform the data and target.
Parameters
----------
X :
Features; see input_transform schema of the operator.
y :
target; see input_transform schema of the operator.
Returns
-------
result :
Transformed features and target; see output_transform schema of the operator.
"""
X = self._validate_input_schema("X", X, "transform_X_y")
y = self._validate_input_schema("y", y, "transform_X_y")
output_X, output_y = self._impl_instance().transform_X_y(X, y)
output_X, output_y = self._validate_output_schema(
(output_X, output_y), "transform_X_y"
)
return output_X, output_y
def _predict(self, X, **predict_params):
X = self._validate_input_schema("X", X, "predict")
raw_result = self._impl_instance().predict(X, **predict_params)
result = self._validate_output_schema(raw_result, "predict")
return result
@if_delegate_has_method(delegate="_impl")
def predict(self, X: Any = None, **predict_params) -> Any:
"""Make predictions.
Parameters
----------
X :
Features; see input_predict schema of the operator.
predict_params:
Additional parameters that should be passed to the predict method
Returns
-------
result :
Predictions; see output_predict schema of the operator.
"""
# logger.info("%s enter predict %s", time.asctime(), self.name())
result = self._predict(X, **predict_params)
# logger.info("%s exit predict %s", time.asctime(), self.name())
if isinstance(result, NDArrayWithSchema):
return strip_schema(result) # otherwise scorers return zero-dim array
return result
@if_delegate_has_method(delegate="_impl")
def predict_proba(self, X: Any = None):
"""Probability estimates for all classes.
Parameters
----------
X :
Features; see input_predict_proba schema of the operator.
Returns
-------
result :
Probabilities; see output_predict_proba schema of the operator.
"""
# logger.info("%s enter predict_proba %s", time.asctime(), self.name())
X = self._validate_input_schema("X", X, "predict_proba")
raw_result = self._impl_instance().predict_proba(X)
result = self._validate_output_schema(raw_result, "predict_proba")
# logger.info("%s exit predict_proba %s", time.asctime(), self.name())
return result
@if_delegate_has_method(delegate="_impl")
def decision_function(self, X: Any = None):
"""Confidence scores for all classes.
Parameters
----------
X :
Features; see input_decision_function schema of the operator.
Returns
-------
result :
Confidences; see output_decision_function schema of the operator.
"""
# logger.info("%s enter decision_function %s", time.asctime(), self.name())
X = self._validate_input_schema("X", X, "decision_function")
raw_result = self._impl_instance().decision_function(X)
result = self._validate_output_schema(raw_result, "decision_function")
# logger.info("%s exit decision_function %s", time.asctime(), self.name())
return result
@if_delegate_has_method(delegate="_impl")
def score(self, X: Any, y: Any, **score_params) -> Any:
"""Performance evaluation with a default metric.
Parameters
----------
X :
Features.
y:
Ground truth labels.
score_params:
Any additional parameters expected by the score function of
the underlying operator.
Returns
-------
score :
performance metric value
"""
# Use the input schema of predict as in most cases it applies to score as well.
X = self._validate_input_schema("X", X, "predict")
if score_params is None:
result = self._impl_instance().score(X, y)
else:
result = self._impl_instance().score(X, y, **score_params)
# We skip output validation for score for now
return result
@if_delegate_has_method(delegate="_impl")
def score_samples(self, X: Any = None):
"""Scores for each sample in X. The type of scores depends on the operator.
Parameters
----------
X :
Features.
Returns
-------
result :
scores per sample.
"""
X = self._validate_input_schema("X", X, "score_samples")
raw_result = self._impl_instance().score_samples(X)
result = self._validate_output_schema(raw_result, "score_samples")
return result
@if_delegate_has_method(delegate="_impl")
def predict_log_proba(self, X: Any = None):
"""Predicted class log-probabilities for X.
Parameters
----------
X :
Features.
Returns
-------
result :
Class log probabilities.
"""
X = self._validate_input_schema("X", X, "predict_log_proba")
raw_result = self._impl_instance().predict_log_proba(X)
result = self._validate_output_schema(raw_result, "predict_log_proba")
return result
def freeze_trainable(self) -> "TrainedIndividualOp":
result = copy.deepcopy(self)
new_bindings = self._freeze_trainable_bindings()
result._hyperparams = new_bindings
result._frozen_hyperparams = list(new_bindings)
assert result.is_frozen_trainable(), str(result.free_hyperparams())
assert isinstance(result, TrainedIndividualOp)
return result
def is_frozen_trained(self) -> bool:
return self._frozen_trained
def freeze_trained(self) -> "TrainedIndividualOp":
if self.is_frozen_trained():
return self
result = copy.deepcopy(self)
result._frozen_trained = True
assert result.is_frozen_trained()
return result
@overload
def get_pipeline(
self, pipeline_name: None = None, astype: astype_type = "lale"
) -> Optional[TrainedOperator]:
...
@overload
def get_pipeline( # pylint:disable=signature-differs
self, pipeline_name: str, astype: astype_type = "lale"
) -> Optional[TrainableOperator]:
...
@if_delegate_has_method(delegate="_impl")
def get_pipeline(self, pipeline_name=None, astype: astype_type = "lale"):
result = self._impl_instance().get_pipeline(pipeline_name, astype)
return result
@if_delegate_has_method(delegate="_impl")
def summary(self) -> pd.DataFrame:
return self._impl_instance().summary()
def customize_schema(
self,
schemas: Optional[Schema] = None,
relevantToOptimizer: Optional[List[str]] = None,
constraint: Union[
Schema, JSON_TYPE, List[Union[Schema, JSON_TYPE]], None
] = None,
tags: Optional[Dict] = None,
forwards: Union[bool, List[str], None] = None,
set_as_available: bool = False,
**kwargs: Union[Schema, JSON_TYPE, None],
) -> "TrainedIndividualOp":
return customize_schema(
self,
schemas,
relevantToOptimizer,
constraint,
tags,
forwards,
set_as_available,
**kwargs,
)
def partial_fit(self, X: Any, y: Any = None, **fit_params) -> "TrainedIndividualOp":
if not self.has_method("partial_fit"):
raise AttributeError(f"{self.name()} has no partial_fit implemented.")
X = self._validate_input_schema("X", X, "partial_fit")
y = self._validate_input_schema("y", y, "partial_fit")
self._validate_hyperparam_data_constraints(X, y)
filtered_fit_params = _fixup_hyperparams_dict(fit_params)
# Since this is a trained operator and we are calling partial_fit,
# we allow the trained op to be mutated by using the same impl to
# call partial_fit
trainable_impl = self.shallow_impl
if filtered_fit_params is None:
trained_impl = trainable_impl.partial_fit(X, y)
else:
trained_impl = trainable_impl.partial_fit(X, y, **filtered_fit_params)
if trained_impl is None:
trained_impl = trainable_impl
self._impl = trained_impl
return self
_all_available_operators: List[PlannedOperator] = []
def wrap_operator(impl) -> Operator:
if isinstance(impl, Operator):
return impl
else:
return make_operator(impl)
# variant of make_operator for impls that are already trained (don't have a fit method)
def make_pretrained_operator(
impl, schemas=None, name: Optional[str] = None
) -> TrainedIndividualOp:
x = make_operator(impl, schemas, name)
assert isinstance(x, TrainedIndividualOp)
return x
def get_op_from_lale_lib(impl_class, wrapper_modules=None) -> Optional[IndividualOp]:
assert inspect.isclass(impl_class)
assert not issubclass(impl_class, Operator)
assert hasattr(impl_class, "predict") or hasattr(impl_class, "transform")
result = None
if impl_class.__module__.startswith("lale.lib"):
assert impl_class.__name__.endswith("Impl"), impl_class.__name__
assert impl_class.__name__.startswith("_"), impl_class.__name__
module = importlib.import_module(impl_class.__module__)
class_name = impl_class.__name__[1 : -len("Impl")]
result = getattr(module, class_name)
else:
try:
module_name = impl_class.__module__.split(".")[0]
module = importlib.import_module("lale.lib." + module_name)
result = getattr(module, impl_class.__name__)
except (ModuleNotFoundError, AttributeError):
try:
module = importlib.import_module("lale.lib.autogen")
result = getattr(module, impl_class.__name__)
except (ModuleNotFoundError, AttributeError):
if wrapper_modules is not None:
for wrapper_module in wrapper_modules:
try:
module = importlib.import_module(wrapper_module)
result = getattr(module, impl_class.__name__)
if result is not None:
break
except (ModuleNotFoundError, AttributeError):
pass
else:
result = None
if result is not None:
result._check_schemas()
return result
def get_lib_schemas(impl_class) -> Optional[JSON_TYPE]:
operator = get_op_from_lale_lib(impl_class)
return None if operator is None else operator._schemas
def make_operator(
impl, schemas=None, name: Optional[str] = None, set_as_available: bool = True
) -> PlannedIndividualOp:
if name is None:
name = assignee_name(level=2)
if name is None:
if inspect.isclass(impl):
n: str = impl.__name__
if n.startswith("_"):
n = n[1:]
if n.endswith("Impl"):
n = n[: -len("Impl")]
name = n
else:
name = "Unknown"
if schemas is None:
if isinstance(impl, IndividualOp):
schemas = impl._schemas
elif inspect.isclass(impl):
schemas = get_lib_schemas(impl)
else:
schemas = get_lib_schemas(impl.__class__)
if inspect.isclass(impl):
if hasattr(impl, "fit"):
operatorObj = PlannedIndividualOp(
name, impl, schemas, _lale_frozen_hyperparameters=None
)
else:
operatorObj = TrainedIndividualOp(
name,
impl,
schemas,
_lale_trained=True,
_lale_frozen_hyperparameters=None,
)
else:
hps: Dict[str, Any] = {}
frozen: Optional[List[str]] = None
impl_get_params = getattr(impl, "get_params", None)
if impl_get_params is not None:
hps = impl_get_params(deep=False)
frozen = list(hps.keys())
if hasattr(impl, "fit"):
operatorObj = TrainableIndividualOp(
name, impl, schemas, _lale_frozen_hyperparameters=frozen, **hps
)
else:
operatorObj = TrainedIndividualOp(
name,
impl,
schemas,
_lale_trained=True,
_lale_frozen_hyperparameters=frozen,
**hps,
)
operatorObj._check_schemas()
if set_as_available:
_all_available_operators.append(operatorObj)
return operatorObj
def get_available_operators(
tag: str, more_tags: Optional[AbstractSet[str]] = None
) -> List[PlannedOperator]:
singleton = set([tag])
tags = singleton if (more_tags is None) else singleton.union(more_tags)
def filter_by_tags(op):
tags_dict = op.get_tags()
if tags_dict is None:
return False
tags_set = {tag for prefix in tags_dict for tag in tags_dict[prefix]}
return tags.issubset(tags_set)
return [op for op in _all_available_operators if filter_by_tags(op)]
def get_available_estimators(
tags: Optional[AbstractSet[str]] = None,
) -> List[PlannedOperator]:
return get_available_operators("estimator", tags)
def get_available_transformers(
tags: Optional[AbstractSet[str]] = None,
) -> List[PlannedOperator]:
return get_available_operators("transformer", tags)
OpType_co = TypeVar("OpType_co", bound=Operator, covariant=True)
class BasePipeline(Operator, Generic[OpType_co]):
"""
This is a concrete class that can instantiate a new pipeline operator and provide access to its meta data.
"""
_steps: List[OpType_co]
_preds: Dict[OpType_co, List[OpType_co]]
_cached_preds: Optional[Dict[int, List[int]]]
_name: str
def _steps_to_indices(self) -> Dict[OpType_co, int]:
return {op: i for i, op in enumerate(self._steps)}
def _preds_to_indices(self) -> Dict[int, List[int]]:
step_map = self._steps_to_indices()
return {
step_map[k]: ([step_map[v] for v in vs]) for (k, vs) in self._preds.items()
}
def _get_preds_indices(self) -> Dict[int, List[int]]:
p: Dict[int, List[int]]
if self._cached_preds is None:
p = self._preds_to_indices()
self._cached_preds = p
else:
p = self._cached_preds
return p
@property
def _estimator_type(self):
estimator = self._final_individual_op
if estimator is not None:
return estimator._estimator_type
else:
raise ValueError(
"Cannot determine the _estimator_type, since this pipeline does not have a unique final operator"
)
@classmethod
def _indices_to_preds(
cls, _steps: List[OpType_co], _pred_indices: Dict[int, List[int]]
) -> Dict[OpType_co, List[OpType_co]]:
return {
_steps[k]: ([_steps[v] for v in vs]) for (k, vs) in _pred_indices.items()
}
def get_params(self, deep: Union[bool, Literal[0]] = True) -> Dict[str, Any]:
"""
If deep is False, additional '_lale_XXX' fields are added to support
cloning. If these are not desires, deep=0 can be used to disable this
"""
out: Dict[str, Any] = {}
out["steps"] = self._steps
if deep is False:
out["_lale_preds"] = self._get_preds_indices()
indices: Dict[str, int] = {}
def make_indexed(name: str) -> str:
idx = 0
if name in indices:
idx = indices[name] + 1
indices[name] = idx
else:
indices[name] = 0
return make_indexed_name(name, idx)
if deep:
for op in self._steps:
name = make_indexed(op.name())
nested_params = op.get_params(deep=deep)
if nested_params:
out.update(nest_HPparams(name, nested_params))
return out
def set_params(self, **impl_params):
"""This implements the set_params, as per the scikit-learn convention,
extended as documented in the module docstring"""
return self._with_params(True, **impl_params)
def _with_params(
self, try_mutate: bool, **impl_params
) -> "BasePipeline[OpType_co]":
steps = self.steps_list()
main_params, partitioned_sub_params = partition_sklearn_params(impl_params)
assert not main_params, f"Unexpected non-nested arguments {main_params}"
found_names: Dict[str, int] = {}
step_map: Dict[OpType_co, OpType_co] = {}
for s in steps:
name = s.name()
name_index = 0
params: Dict[str, Any] = {}
if name in found_names:
name_index = found_names[name] + 1
found_names[name] = name_index
uname = make_indexed_name(name, name_index)
params = partitioned_sub_params.get(uname, params)
else:
found_names[name] = 0
uname = make_degen_indexed_name(name, 0)
if uname in partitioned_sub_params:
params = partitioned_sub_params[uname]
assert name not in partitioned_sub_params
else:
params = partitioned_sub_params.get(name, params)
new_s = s._with_params(try_mutate, **params)
if s != new_s:
# getting this to statically type check would be very complicated
# if even possible
step_map[s] = new_s # type: ignore
# make sure that no parameters were passed in for operations
# that are not actually part of this pipeline
for k in partitioned_sub_params:
n, i = get_name_and_index(k)
assert n in found_names and i <= found_names[n]
if try_mutate:
if step_map:
self._subst_steps(step_map)
pipeline_graph_class = _pipeline_graph_class(self.steps_list())
self.__class__ = pipeline_graph_class # type: ignore
return self
else:
needs_copy = False
if step_map:
needs_copy = True
else:
pipeline_graph_class = _pipeline_graph_class(self.steps_list())
if pipeline_graph_class != self.__class__: # type: ignore
needs_copy = True
if needs_copy:
# it may be better practice to change the steps/edges ahead of time
# and then create the correct class
op_copy = make_pipeline_graph(self.steps_list(), self.edges(), ordered=True) # type: ignore
op_copy._subst_steps(step_map)
pipeline_graph_class = _pipeline_graph_class(op_copy.steps_list())
op_copy.__class__ = pipeline_graph_class # type: ignore
return op_copy
else:
return self
def __init__(
self,
steps: List[OpType_co],
edges: Optional[Iterable[Tuple[OpType_co, OpType_co]]] = None,
_lale_preds: Optional[
Union[Dict[int, List[int]], Dict[OpType_co, List[OpType_co]]]
] = None,
ordered: bool = False,
) -> None:
self._name = "pipeline_" + str(id(self))
self._preds = {}
for step in steps:
assert isinstance(step, Operator)
if _lale_preds is not None:
# this is a special case that is meant for use with cloning
# if preds is set, we assume that it is ordered as well
assert edges is None
self._steps = steps
if _lale_preds:
# TODO: improve typing situation
keys: Iterable[Any] = _lale_preds.keys()
first_key = next(iter(keys))
if isinstance(first_key, int):
self._preds = self._indices_to_preds(steps, _lale_preds) # type: ignore
self._cached_preds = _lale_preds # type: ignore
else:
self._preds = _lale_preds # type: ignore
self._cached_preds = None # type: ignore
else:
self._cached_preds = _lale_preds # type: ignore
return
self._cached_preds = None
if edges is None:
# Which means there is a linear pipeline #TODO:Test extensively with clone and get_params
# This constructor is mostly called due to cloning. Make sure the objects are kept the same.
self.__constructor_for_cloning(steps)
else:
self._steps = []
for step in steps:
if step in self._steps:
raise ValueError(
f"Same instance of {step.name()} already exists in the pipeline. "
f"This is not allowed."
)
if isinstance(step, BasePipeline):
# PIPELINE_TYPE_INVARIANT_NOTE
# we use tstep (typed step) here to help pyright
# with some added information we have:
# Since the step is an OpType, if it is a pipeline,
# then its steps must all be at least OpType as well
# this invariant is not expressible in the type system due to
# the open world assumption, but is intended to hold
tstep: BasePipeline[OpType_co] = step
# Flatten out the steps and edges
self._steps.extend(tstep.steps_list())
# from step's edges, find out all the source and sink nodes
source_nodes = [
dst
for dst in tstep.steps_list()
if (step._preds[dst] is None or step._preds[dst] == [])
]
sink_nodes = tstep._find_sink_nodes()
# Now replace the edges to and from the inner pipeline to to and from source and sink nodes respectively
new_edges: List[Tuple[OpType_co, OpType_co]] = tstep.edges()
# list comprehension at the cost of iterating edges thrice
new_edges.extend(
[
(node, edge[1])
for edge in edges
if edge[0] == tstep
for node in sink_nodes
]
)
new_edges.extend(
[
(edge[0], node)
for edge in edges
if edge[1] == tstep
for node in source_nodes
]
)
new_edges.extend(
edge for edge in edges if tstep not in (edge[0], edge[1])
)
edges = new_edges
else:
self._steps.append(step)
self._preds = {step: [] for step in self._steps}
for src, dst in edges:
self._preds[dst].append(src) # type: ignore
if not ordered:
self.__sort_topologically()
assert self.__is_in_topological_order()
def __constructor_for_cloning(self, steps: List[OpType_co]):
edges: List[Tuple[OpType_co, OpType_co]] = []
prev_op: Optional[OpType_co] = None
# This is due to scikit base's clone method that needs the same list object
self._steps = steps
prev_leaves: List[OpType_co]
curr_roots: List[OpType_co]
for curr_op in self._steps:
if isinstance(prev_op, BasePipeline):
# using tprev_op as per PIPELINE_TYPE_INVARIANT_NOTE above
tprev_op: BasePipeline[OpType_co] = prev_op
prev_leaves = tprev_op._find_sink_nodes()
else:
prev_leaves = [] if prev_op is None else [prev_op]
prev_op = curr_op
if isinstance(curr_op, BasePipeline):
# using tcurr_op as per PIPELINE_TYPE_INVARIANT_NOTE above
tcurr_op: BasePipeline[OpType_co] = curr_op
curr_roots = tcurr_op._find_source_nodes()
self._steps.extend(tcurr_op.steps_list())
edges.extend(tcurr_op.edges())
else:
curr_roots = [curr_op]
edges.extend([(src, tgt) for src in prev_leaves for tgt in curr_roots])
seen_steps: List[OpType_co] = []
for step in self._steps:
if step in seen_steps:
raise ValueError(
f"Same instance of {step.name()} already exists in the pipeline. "
f"This is not allowed."
)
seen_steps.append(step)
self._preds = {step: [] for step in self._steps}
for src, dst in edges:
self._preds[dst].append(src)
# Since this case is only allowed for linear pipelines, it is always
# expected to be in topological order
assert self.__is_in_topological_order()
def edges(self) -> List[Tuple[OpType_co, OpType_co]]:
return [(src, dst) for dst in self._steps for src in self._preds[dst]]
def __is_in_topological_order(self) -> bool:
seen: Dict[OpType_co, bool] = {}
for operator in self._steps:
for pred in self._preds[operator]:
if pred not in seen:
return False
seen[operator] = True
return True
def steps_list(self) -> List[OpType_co]:
return self._steps
@property
def steps(self) -> List[Tuple[str, OpType_co]]:
"""This is meant to function similarly to the scikit-learn steps property
and for linear pipelines, should behave the same
"""
return [(s.name(), s) for s in self._steps]
def _subst_steps(self, m: Dict[OpType_co, OpType_co]) -> None:
if m:
# for i, s in enumerate(self._steps):
# self._steps[i] = m.get(s,s)
self._steps = [m.get(s, s) for s in self._steps]
self._preds = {
m.get(k, k): [m.get(s, s) for s in v] for k, v in self._preds.items()
}
def __sort_topologically(self) -> None:
class state(enumeration.Enum):
TODO = (enumeration.auto(),)
DOING = (enumeration.auto(),)
DONE = enumeration.auto()
states: Dict[OpType_co, state] = {op: state.TODO for op in self._steps}
result: List[OpType_co] = []
# Since OpType is covariant, this is disallowed by mypy for safety
# in this case it is safe, since while the value of result will be written
# into _steps, all the values in result came from _steps originally
def dfs(operator: OpType_co) -> None: # type: ignore
if states[operator] is state.DONE:
return
if states[operator] is state.DOING:
raise ValueError("Cycle detected.")
states[operator] = state.DOING
for pred in self._preds[operator]:
dfs(pred)
states[operator] = state.DONE
result.append(operator)
for operator in self._steps:
if states[operator] is state.TODO:
dfs(operator)
self._steps = result
def _has_same_impl(self, other: Operator) -> bool:
"""Checks if the type of the operator imnplementations are compatible"""
if not isinstance(other, BasePipeline):
return False
my_steps = self.steps_list()
other_steps = other.steps_list()
if len(my_steps) != len(other_steps):
return False
for m, o in zip(my_steps, other_steps):
if not m._has_same_impl(o):
return False
return True
def _find_sink_nodes(self) -> List[OpType_co]:
is_sink = {s: True for s in self.steps_list()}
for src, _ in self.edges():
is_sink[src] = False
result = [s for s in self.steps_list() if is_sink[s]]
return result
def _find_source_nodes(self) -> List[OpType_co]:
is_source = {s: True for s in self.steps_list()}
for _, dst in self.edges():
is_source[dst] = False
result = [s for s in self.steps_list() if is_source[s]]
return result
def _validate_or_transform_schema(self, X: Any, y: Any = None, validate=True):
def combine_schemas(schemas):
n_datasets = len(schemas)
if n_datasets == 1:
result = schemas[0]
else:
result = {
"type": "array",
"minItems": n_datasets,
"maxItems": n_datasets,
"items": [_to_schema(i) for i in schemas],
}
return result
outputs: Dict[OpType_co, Any] = {}
for operator in self._steps:
preds = self._preds[operator]
if len(preds) == 0:
input_X, input_y = X, y
else:
input_X = combine_schemas([outputs[pred][0] for pred in preds])
input_y = outputs[preds[0]][1]
if validate:
operator.validate_schema(X=input_X, y=input_y)
if operator.has_method("transform_X_y"):
output_Xy = operator.output_schema_transform_X_y()
output_X, output_y = output_Xy["items"]
else:
output_X = operator.transform_schema(input_X)
output_y = input_y
outputs[operator] = output_X, output_y
if not validate:
sinks = self._find_sink_nodes()
pipeline_outputs = [outputs[sink][0] for sink in sinks]
return combine_schemas(pipeline_outputs)
def validate_schema(self, X: Any, y: Any = None):
self._validate_or_transform_schema(X, y, validate=True)
def transform_schema(self, s_X: JSON_TYPE):
from lale.settings import disable_data_schema_validation
if disable_data_schema_validation:
return {}
else:
return self._validate_or_transform_schema(s_X, validate=False)
def input_schema_fit(self) -> JSON_TYPE:
sources = self._find_source_nodes()
pipeline_inputs = [source.input_schema_fit() for source in sources]
result = join_schemas(*pipeline_inputs)
return result
def is_supervised(self) -> bool:
s = self.steps_list()
if len(s) == 0:
return False
return self.steps_list()[-1].is_supervised()
def remove_last(self, inplace: bool = False) -> "BasePipeline[OpType_co]":
sink_nodes = self._find_sink_nodes()
if len(sink_nodes) > 1:
raise ValueError(
"This pipeline has more than 1 sink nodes, can not remove last step meaningfully."
)
if not inplace:
modified_pipeline = copy.deepcopy(self)
old_clf = modified_pipeline._steps[-1]
modified_pipeline._steps.remove(old_clf)
del modified_pipeline._preds[old_clf]
return modified_pipeline
else:
old_clf = self._steps[-1]
self._steps.remove(old_clf)
del self._preds[old_clf]
return self
def get_last(self) -> Optional[OpType_co]:
sink_nodes = self._find_sink_nodes()
if len(sink_nodes) > 1:
return None
else:
old_clf = self._steps[-1]
return old_clf
def export_to_sklearn_pipeline(self):
from sklearn.pipeline import FeatureUnion
from sklearn.pipeline import make_pipeline as sklearn_make_pipeline
from lale.lib.lale.no_op import NoOp
from lale.lib.rasl.concat_features import ConcatFeatures
from lale.lib.rasl.relational import Relational
def convert_nested_objects(node):
for element in dir(node): # Looking at only 1 level for now.
try:
value = getattr(node, element)
if isinstance(value, IndividualOp):
if isinstance(value.shallow_impl, sklearn.base.BaseEstimator):
setattr(node, element, value.shallow_impl)
if hasattr(value.shallow_impl, "_wrapped_model"):
# node is a higher order operator
setattr(node, element, value.shallow_impl._wrapped_model)
stripped = strip_schema(value)
if value is stripped:
continue
setattr(node, element, stripped)
except BaseException:
# This is an optional processing, so if there is any exception, continue.
# For example, some scikit-learn classes will fail at getattr because they have
# that property defined conditionally.
pass
def create_pipeline_from_sink_node(sink_node):
# Ensure that the pipeline is either linear or has a "union followed by concat" construct
# Translate the "union followed by concat" constructs to "featureUnion"
# Inspect the node and convert any data with schema objects to original data types
if isinstance(sink_node, OperatorChoice):
raise ValueError(
f"A pipeline that has an OperatorChoice can not be converted to "
f" a scikit-learn pipeline:{self.to_json()}"
)
if sink_node.impl_class == Relational.impl_class:
return None
convert_nested_objects(sink_node._impl)
if sink_node.impl_class == ConcatFeatures.impl_class:
list_of_transformers = []
for pred in self._preds[sink_node]:
pred_transformer = create_pipeline_from_sink_node(pred)
list_of_transformers.append(
(
pred.name() + "_" + str(id(pred)),
sklearn_make_pipeline(*pred_transformer)
if isinstance(pred_transformer, list)
else pred_transformer,
)
)
return FeatureUnion(list_of_transformers)
else:
preds = self._preds[sink_node]
if preds is not None and len(preds) > 1:
raise ValueError(
f"A pipeline graph that has operators other than ConcatFeatures with "
f"multiple incoming edges is not a valid scikit-learn pipeline:{self.to_json()}"
)
if hasattr(sink_node.shallow_impl, "_wrapped_model"):
sklearn_op = sink_node.shallow_impl._wrapped_model
convert_nested_objects(
sklearn_op
) # This case needs one more level of conversion
else:
sklearn_op = sink_node.shallow_impl
sklearn_op = copy.deepcopy(sklearn_op)
if preds is None or len(preds) == 0:
return sklearn_op
else:
output_pipeline_steps = []
previous_sklearn_op = create_pipeline_from_sink_node(preds[0])
if previous_sklearn_op is not None and not isinstance(
previous_sklearn_op, NoOp.impl_class
):
if isinstance(previous_sklearn_op, list):
output_pipeline_steps = previous_sklearn_op
else:
output_pipeline_steps.append(previous_sklearn_op)
if not isinstance(
sklearn_op, NoOp.impl_class
): # Append the current op only if not NoOp
output_pipeline_steps.append(sklearn_op)
return output_pipeline_steps
sklearn_steps_list = []
# Finding the sink node so that we can do a backward traversal
sink_nodes = self._find_sink_nodes()
# For a trained pipeline that is scikit compatible, there should be only one sink node
if len(sink_nodes) != 1:
raise ValueError(
f"A pipeline graph that ends with more than one estimator is not a"
f" valid scikit-learn pipeline:{self.to_json()}"
)
sklearn_steps_list = create_pipeline_from_sink_node(sink_nodes[0])
# not checking for isinstance(sklearn_steps_list, NoOp) here as there is no valid sklearn pipeline with just one NoOp.
try:
sklearn_pipeline = (
sklearn_make_pipeline(*sklearn_steps_list)
if isinstance(sklearn_steps_list, list)
else sklearn_make_pipeline(sklearn_steps_list)
)
except TypeError as exc:
raise TypeError(
"Error creating a scikit-learn pipeline, most likely because the steps are not scikit compatible."
) from exc
return sklearn_pipeline
def is_classifier(self) -> bool:
sink_nodes = self._find_sink_nodes()
for op in sink_nodes:
if not op.is_classifier():
return False
return True
def get_defaults(self) -> Dict[str, Any]:
defaults_list: Iterable[Dict[str, Any]] = (
nest_HPparams(s.name(), s.get_defaults()) for s in self.steps_list()
)
# TODO: could this just be dict(defaults_list)
defaults: Dict[str, Any] = {}
for d in defaults_list:
defaults.update(d)
return defaults
@property
def _final_individual_op(self) -> Optional["IndividualOp"]:
op = self.get_last()
if op is None:
return None
else:
return op._final_individual_op
PlannedOpType_co = TypeVar("PlannedOpType_co", bound=PlannedOperator, covariant=True)
class PlannedPipeline(BasePipeline[PlannedOpType_co], PlannedOperator):
def __init__(
self,
steps: List[PlannedOpType_co],
edges: Optional[Iterable[Tuple[PlannedOpType_co, PlannedOpType_co]]] = None,
_lale_preds: Optional[Dict[int, List[int]]] = None,
ordered: bool = False,
) -> None:
super().__init__(steps, edges=edges, _lale_preds=_lale_preds, ordered=ordered)
# give it a more precise type: if the input is a pipeline, the output is as well
def auto_configure(
self, X: Any, y: Any = None, optimizer=None, cv=None, scoring=None, **kwargs
) -> "TrainedPipeline":
trained = super().auto_configure(
X, y=y, optimizer=optimizer, cv=cv, scoring=scoring, **kwargs
)
assert isinstance(trained, TrainedPipeline)
return trained
def remove_last(self, inplace: bool = False) -> "PlannedPipeline[PlannedOpType_co]":
pipe = super().remove_last(inplace=inplace)
assert isinstance(pipe, PlannedPipeline)
return pipe
def is_frozen_trainable(self) -> bool:
return all(step.is_frozen_trainable() for step in self.steps_list())
def is_frozen_trained(self) -> bool:
return all(step.is_frozen_trained() for step in self.steps_list())
TrainableOpType_co = TypeVar(
"TrainableOpType_co", bound=TrainableIndividualOp, covariant=True # type: ignore
)
class TrainablePipeline(PlannedPipeline[TrainableOpType_co], TrainableOperator):
def __init__(
self,
steps: List[TrainableOpType_co],
edges: Optional[Iterable[Tuple[TrainableOpType_co, TrainableOpType_co]]] = None,
_lale_preds: Optional[Dict[int, List[int]]] = None,
ordered: bool = False,
_lale_trained=False,
) -> None:
super().__init__(steps, edges=edges, _lale_preds=_lale_preds, ordered=ordered)
def remove_last(
self, inplace: bool = False
) -> "TrainablePipeline[TrainableOpType_co]":
pipe = super().remove_last(inplace=inplace)
assert isinstance(pipe, TrainablePipeline)
return pipe
def fit(
self, X: Any, y: Any = None, **fit_params
) -> "TrainedPipeline[TrainedIndividualOp]":
# filtered_fit_params = _fixup_hyperparams_dict(fit_params)
X = add_schema(X)
y = add_schema(y)
self.validate_schema(X, y)
trained_steps: List[TrainedIndividualOp] = []
outputs: Dict[Operator, Tuple[Any, Any]] = {}
meta_outputs: Dict[Operator, Any] = {}
edges: List[Tuple[TrainableOpType_co, TrainableOpType_co]] = self.edges()
trained_map: Dict[TrainableOpType_co, TrainedIndividualOp] = {}
sink_nodes = self._find_sink_nodes()
for operator in self._steps:
preds = self._preds[operator]
if len(preds) == 0:
inputs = [(X, y)]
meta_data_inputs: Dict[Operator, Any] = {}
else:
inputs = [outputs[pred] for pred in preds]
# we create meta_data_inputs as a dictionary with metadata from all previous steps
# Note that if multiple previous steps generate the same key, it will retain only one of those.
meta_data_inputs = {
key: meta_outputs[pred][key]
for pred in preds
if meta_outputs[pred] is not None
for key in meta_outputs[pred]
}
trainable = operator
assert isinstance(inputs, list) and len(inputs) >= 1
if len(inputs) == 1:
input_X, input_y = inputs[0]
else:
input_X = [iX for iX, _ in inputs]
input_y = next(iy for _, iy in inputs)
if operator.has_method("set_meta_data"):
operator._impl_instance().set_meta_data(meta_data_inputs)
meta_output: Dict[Operator, Any] = {}
trained: TrainedOperator
if trainable.is_supervised():
trained = trainable.fit(input_X, input_y)
else:
trained = trainable.fit(input_X)
trained_map[operator] = trained
trained_steps.append(trained)
if (
trainable not in sink_nodes
): # There is no need to transform/predict on the last node during fit
if trained.is_transformer():
if trained.has_method("transform_X_y"):
output = trained.transform_X_y(input_X, input_y)
else:
output = trained.transform(input_X), input_y
if trained.has_method("get_transform_meta_output"):
meta_output = (
trained._impl_instance().get_transform_meta_output()
)
else:
# This is ok because trainable pipelines steps
# must only be individual operators
if trained.has_method("predict_proba"): # type: ignore
output = trained.predict_proba(input_X), input_y
elif trained.has_method("decision_function"): # type: ignore
output = trained.decision_function(input_X), input_y
else:
output = trained._predict(input_X), input_y
if trained.has_method("get_predict_meta_output"):
meta_output = trained._impl_instance().get_predict_meta_output()
outputs[operator] = output
meta_output_so_far = {
key: meta_outputs[pred][key]
for pred in preds
if meta_outputs[pred] is not None
for key in meta_outputs[pred]
}
meta_output_so_far.update(
meta_output
) # So newest gets preference in case of collisions
meta_outputs[operator] = meta_output_so_far
trained_edges = [(trained_map[a], trained_map[b]) for a, b in edges]
result: TrainedPipeline[TrainedIndividualOp] = TrainedPipeline(
trained_steps, trained_edges, ordered=True, _lale_trained=True
)
self._trained = result
return result
def transform(self, X: Any, y=None) -> Any:
"""
.. deprecated:: 0.0.0
The `transform` method is deprecated on a trainable
operator, because the learned coefficients could be
accidentally overwritten by retraining. Call `transform`
on the trained operator returned by `fit` instead.
"""
warnings.warn(_mutation_warning("transform"), DeprecationWarning)
try:
return self._trained.transform(X, y=y)
except AttributeError as exc:
raise ValueError("Must call `fit` before `transform`.") from exc
def predict(self, X, **predict_params) -> Any:
"""
.. deprecated:: 0.0.0
The `predict` method is deprecated on a trainable
operator, because the learned coefficients could be
accidentally overwritten by retraining. Call `predict`
on the trained operator returned by `fit` instead.
"""
warnings.warn(_mutation_warning("predict"), DeprecationWarning)
try:
return self._trained.predict(X, **predict_params)
except AttributeError as exc:
raise ValueError("Must call `fit` before `predict`.") from exc
def predict_proba(self, X):
"""
.. deprecated:: 0.0.0
The `predict_proba` method is deprecated on a trainable
operator, because the learned coefficients could be
accidentally overwritten by retraining. Call `predict_proba`
on the trained operator returned by `fit` instead.
"""
warnings.warn(_mutation_warning("predict_proba"), DeprecationWarning)
try:
return self._trained.predict_proba(X)
except AttributeError as exc:
raise ValueError("Must call `fit` before `predict_proba`.") from exc
def decision_function(self, X):
"""
.. deprecated:: 0.0.0
The `decision_function` method is deprecated on a trainable
operator, because the learned coefficients could be
accidentally overwritten by retraining. Call `decision_function`
on the trained operator returned by `fit` instead.
"""
warnings.warn(_mutation_warning("decision_function"), DeprecationWarning)
try:
return self._trained.decision_function(X)
except AttributeError as exc:
raise ValueError("Must call `fit` before `decision_function`.") from exc
def score(self, X, y, **score_params):
"""
.. deprecated:: 0.0.0
The `score` method is deprecated on a trainable
operator, because the learned coefficients could be
accidentally overwritten by retraining. Call `score`
on the trained operator returned by `fit` instead.
"""
warnings.warn(_mutation_warning("score"), DeprecationWarning)
try:
return self._trained.score(X, y, **score_params)
except AttributeError as exc:
raise ValueError("Must call `fit` before `score`.") from exc
def score_samples(self, X=None):
"""
.. deprecated:: 0.0.0
The `score_samples` method is deprecated on a trainable
operator, because the learned coefficients could be
accidentally overwritten by retraining. Call `score_samples`
on the trained operator returned by `fit` instead.
"""
warnings.warn(_mutation_warning("score_samples"), DeprecationWarning)
try:
return self._trained.score_samples(X)
except AttributeError as exc:
raise ValueError("Must call `fit` before `score_samples`.") from exc
def predict_log_proba(self, X):
"""
.. deprecated:: 0.0.0
The `predict_log_proba` method is deprecated on a trainable
operator, because the learned coefficients could be
accidentally overwritten by retraining. Call `predict_log_proba`
on the trained operator returned by `fit` instead.
"""
warnings.warn(_mutation_warning("predict_log_proba"), DeprecationWarning)
try:
return self._trained.predict_log_proba(X)
except AttributeError as exc:
raise ValueError("Must call `fit` before `predict_log_proba`.") from exc
def freeze_trainable(self) -> "TrainablePipeline":
frozen_steps: List[TrainableOperator] = []
frozen_map: Dict[Operator, Operator] = {}
for liquid in self._steps:
frozen = liquid.freeze_trainable()
frozen_map[liquid] = frozen
frozen_steps.append(frozen)
frozen_edges = [(frozen_map[x], frozen_map[y]) for x, y in self.edges()]
result = cast(
TrainablePipeline,
make_pipeline_graph(frozen_steps, frozen_edges, ordered=True),
)
assert result.is_frozen_trainable()
return result
def is_transformer(self) -> bool:
"""Checks if the operator is a transformer"""
sink_nodes = self._find_sink_nodes()
all_transformers = [
bool(operator.has_method("transform")) for operator in sink_nodes
]
return all(all_transformers)
def convert_to_trained(self) -> "TrainedPipeline[TrainedIndividualOp]":
trained_steps: List[TrainedIndividualOp] = []
trained_map: Dict[TrainableOpType_co, TrainedIndividualOp] = {}
for step in self.steps_list():
trained_step = step.convert_to_trained()
trained_steps.append(trained_step)
trained_map[step] = trained_step
trained_edges = [(trained_map[x], trained_map[y]) for (x, y) in self.edges()]
return TrainedPipeline(trained_steps, trained_edges, _lale_trained=True)
def partial_fit(
self,
X: Any,
y: Any = None,
freeze_trained_prefix: bool = True,
unsafe: bool = False,
**fit_params,
) -> "TrainedPipeline[TrainedIndividualOp]":
"""partial_fit for a pipeline.
This method assumes that all but the last node of a pipeline are frozen_trained and
only the last node needs to be fit using its partial_fit method.
If that is not the case, and `freeze_trained_prefix` is True, it freezes the prefix
of the pipeline except the last node if they are trained.
Parameters
----------
X :
Features; see partial_fit schema of the last node.
y:
Labels/target
freeze_trained_prefix:
If True, all but the last node are freeze_trained and only
the last node is partial_fit.
unsafe:
boolean.
This flag allows users to override the validation that throws an error when the
the operators in the prefix of this pipeline are not tagged with `has_partial_transform`.
Setting unsafe to True would perform the transform as if it was row-wise even in the case it may not be.
fit_params:
dict
Additional keyword arguments to be passed to partial_fit of the estimator
Returns
-------
TrainedPipeline :
A partially trained pipeline, which can be trained further by other calls to partial_fit
Raises
------
ValueError
The piepline has a non-frozen prefix
"""
estimator_only = True
for operator in self._steps[:-1]:
if not operator.is_frozen_trained():
estimator_only = False
if not estimator_only and not freeze_trained_prefix:
raise ValueError(
"""partial_fit is only supported on pipelines when all but the last node are frozen_trained and
only the last node needs to be fit using its partial_fit method. The parameter `freeze_trained_prefix`
can be set to True if the prefix is trained and needs to be frozen during partial_fit."""
)
if hasattr(self, "_trained"):
# This is the case where partial_fit has been called before,
# so the partially fit pipeline is stored in _trained.
# update that object
self._trained = self._trained.partial_fit(X, y, **fit_params)
return self._trained
else:
# if this is the first time partial_fit is called on this pipeline,
# we would not have a _trained obj, so convert the prefix to a trained pipeline
# explicitly and do a transform and partial_fit as expected.
sink_node = self._steps[-1]
pipeline_prefix = self.remove_last()
if not estimator_only and freeze_trained_prefix:
pipeline_prefix = pipeline_prefix.freeze_trained()
trained_pipeline_prefix = pipeline_prefix.convert_to_trained()
transformed_output = trained_pipeline_prefix.transform(X, y)
if isinstance(transformed_output, tuple):
transformed_X, transformed_y = transformed_output
else:
transformed_X = transformed_output
transformed_y = y
trained_sink_node = sink_node.partial_fit(
transformed_X, transformed_y, **fit_params
)
new_pipeline = trained_pipeline_prefix >> trained_sink_node
self._trained = new_pipeline
return new_pipeline
def freeze_trained(self) -> "TrainedPipeline":
frozen_steps = []
frozen_map = {}
for liquid in self._steps:
frozen = liquid.freeze_trained()
frozen_map[liquid] = frozen
frozen_steps.append(frozen)
frozen_edges = [(frozen_map[x], frozen_map[y]) for x, y in self.edges()]
result = TrainedPipeline(
frozen_steps, frozen_edges, ordered=True, _lale_trained=True
)
assert result.is_frozen_trained()
return result
TrainedOpType_co = TypeVar("TrainedOpType_co", bound=TrainedIndividualOp, covariant=True) # type: ignore
class TrainedPipeline(TrainablePipeline[TrainedOpType_co], TrainedOperator):
def __new__(cls, *args, _lale_trained=False, **kwargs):
if "steps" not in kwargs or _lale_trained:
obj = super().__new__(TrainedPipeline)
return obj
else:
# unless _lale_trained=True, we actually want to return a Trainable
obj = super().__new__(TrainablePipeline)
# apparently python does not call __ini__ if the type returned is not the
# expected type
obj.__init__(*args, **kwargs)
return obj
def __init__(
self,
steps: List[TrainedOpType_co],
edges: Optional[List[Tuple[TrainedOpType_co, TrainedOpType_co]]] = None,
_lale_preds: Optional[Dict[int, List[int]]] = None,
ordered: bool = False,
_lale_trained=False,
) -> None:
super().__init__(steps, edges=edges, _lale_preds=_lale_preds, ordered=ordered)
def remove_last(self, inplace: bool = False) -> "TrainedPipeline[TrainedOpType_co]":
pipe = super().remove_last(inplace)
assert isinstance(pipe, TrainedPipeline)
return pipe
def _predict(self, X: Any, y: Any = None, **predict_params):
return self._predict_based_on_type(
"predict", "_predict", X, y, **predict_params
)
def predict(self, X, **predict_params) -> Any:
result = self._predict(X, **predict_params)
if isinstance(result, NDArrayWithSchema):
return strip_schema(result) # otherwise scorers return zero-dim array
return result
def transform(self, X: Any, y: Any = None) -> Any:
# TODO: What does a transform on a pipeline mean, if the last step is not a transformer
# can it be just the output of predict of the last step?
# If this implementation changes, check to make sure that the implementation of
# self.is_transformer is kept in sync with the new assumptions.
return self._predict_based_on_type("transform", "transform", X, y)
def transform_X_y(self, X: Any, y: Any = None) -> Any:
return self._predict_based_on_type("transform_X_y", "transform_X_y", X, y)
def _predict_based_on_type(
self, impl_method_name, operator_method_name, X=None, y=None, **kwargs
):
outputs = {}
meta_outputs = {}
sink_nodes = self._find_sink_nodes()
for operator in self._steps:
preds = self._preds[operator]
if len(preds) == 0:
inputs = [(X, y)]
meta_data_inputs = {}
else:
inputs = [outputs[pred] for pred in preds]
# we create meta_data_inputs as a dictionary with metadata from all previous steps
# Note that if multiple previous steps generate the same key, it will retain only one of those.
meta_data_inputs = {
key: meta_outputs[pred][key]
for pred in preds
if meta_outputs[pred] is not None
for key in meta_outputs[pred]
}
assert isinstance(inputs, list) and len(inputs) >= 1
if len(inputs) == 1:
input_X, input_y = inputs[0]
else:
input_X = [iX for iX, _ in inputs]
input_y = next(iy for _, iy in inputs)
if operator.has_method("set_meta_data"):
operator._impl_instance().set_meta_data(meta_data_inputs)
meta_output = {}
if operator in sink_nodes:
if operator.has_method(
impl_method_name
): # Since this is pipeline's predict, we should invoke predict from sink nodes
method_to_call_on_operator = getattr(operator, operator_method_name)
if operator_method_name == "score":
output = (
method_to_call_on_operator(input_X, input_y, **kwargs),
input_y,
)
elif operator_method_name == "transform_X_y":
output = method_to_call_on_operator(input_X, input_y, **kwargs)
else:
output = method_to_call_on_operator(input_X, **kwargs), input_y
else:
raise AttributeError(
f"The sink node {type(operator.impl)} of the pipeline does not support {operator_method_name}"
)
elif operator.is_transformer():
if operator.has_method("transform_X_y"):
output = operator.transform_X_y(input_X, input_y)
else:
output = operator.transform(input_X), input_y
if hasattr(operator._impl, "get_transform_meta_output"):
meta_output = operator._impl_instance().get_transform_meta_output()
elif operator.has_method(
"predict_proba"
): # For estimator as a transformer, use predict_proba if available
output = operator.predict_proba(input_X), input_y
elif operator.has_method(
"decision_function"
): # For estimator as a transformer, use decision_function if available
output = operator.decision_function(input_X), input_y
else:
output = operator._predict(input_X), input_y
if operator.has_method("get_predict_meta_output"):
meta_output = operator._impl_instance().get_predict_meta_output()
outputs[operator] = output
meta_output_so_far = {
key: meta_outputs[pred][key]
for pred in preds
if meta_outputs[pred] is not None
for key in meta_outputs[pred]
}
meta_output_so_far.update(
meta_output
) # So newest gets preference in case of collisions
meta_outputs[operator] = meta_output_so_far
result_X, result_y = outputs[self._steps[-1]]
if operator_method_name == "transform_X_y":
return result_X, result_y
return result_X
def predict_proba(self, X: Any):
"""Probability estimates for all classes.
Parameters
----------
X :
Features; see input_predict_proba schema of the operator.
Returns
-------
result :
Probabilities; see output_predict_proba schema of the operator.
"""
return self._predict_based_on_type("predict_proba", "predict_proba", X)
def decision_function(self, X: Any):
"""Confidence scores for all classes.
Parameters
----------
X :
Features; see input_decision_function schema of the operator.
Returns
-------
result :
Confidences; see output_decision_function schema of the operator.
"""
return self._predict_based_on_type("decision_function", "decision_function", X)
def score(self, X: Any, y: Any, **score_params):
"""Performance evaluation with a default metric based on the final estimator.
Parameters
----------
X :
Features.
y:
Ground truth labels.
score_params:
Any additional parameters expected by the score function of
the final estimator. These will be ignored for now.
Returns
-------
score :
Performance metric value.
"""
return self._predict_based_on_type("score", "score", X, y)
def score_samples(self, X: Any = None):
"""Scores for each sample in X. There type of scores is based on the last operator in the pipeline.
Parameters
----------
X :
Features.
Returns
-------
result :
Scores per sample.
"""
return self._predict_based_on_type("score_samples", "score_samples", X)
def predict_log_proba(self, X: Any):
"""Predicted class log-probabilities for X.
Parameters
----------
X :
Features.
Returns
-------
result :
Class log probabilities.
"""
return self._predict_based_on_type("predict_log_proba", "predict_log_proba", X)
def transform_with_batches(self, X: Any, y: Any = None, serialize: bool = True):
"""[summary]
Parameters
----------
X : Any
[description]
y : [type], optional
by default None
serialize: boolean
should data be serialized if needed
Returns
-------
[type]
[description]
"""
outputs: Dict[TrainedOpType_co, tuple] = {}
serialization_out_dir: Text = ""
if serialize:
serialization_out_dir = os.path.join(
os.path.dirname(__file__), "temp_serialized"
)
if not os.path.exists(serialization_out_dir):
os.mkdir(serialization_out_dir)
sink_nodes = self._find_sink_nodes()
sink_node = sink_nodes[0]
operator_idx = 0
inputs: Any
output = None
for batch_data in X: # batching_transformer will output only one obj
if isinstance(batch_data, tuple):
batch_X, batch_y = batch_data
else:
batch_X = batch_data
batch_y = None
for operator in self._steps:
preds = self._preds[operator]
if len(preds) == 0:
inputs = batch_X
else:
inputs = [
outputs[pred][0]
if isinstance(outputs[pred], tuple)
else outputs[pred]
for pred in preds
]
if len(inputs) == 1:
inputs = inputs[0]
trained = operator
if trained.is_transformer():
assert not trained.has_method("transform_X_y"), "TODO"
batch_output = trained.transform(inputs, batch_y)
else:
if trained in sink_nodes:
batch_output = trained._predict(
X=inputs
) # We don't support y for predict yet as there is no compelling case
else:
# This is ok because trainable pipelines steps
# must only be individual operators
if trained.has_method("predict_proba"): # type: ignore
batch_output = trained.predict_proba(X=inputs)
elif trained.has_method("decision_function"): # type: ignore
batch_output = trained.decision_function(X=inputs)
else:
batch_output = trained._predict(X=inputs)
if trained == sink_node:
if isinstance(batch_output, tuple):
output = append_batch(
output, (batch_output[0], batch_output[1])
)
else:
output = append_batch(output, batch_output)
outputs[operator] = batch_output
operator_idx += 1
# if serialize:
# output = lale.helpers.write_batch_output_to_file(
# output,
# os.path.join(
# serialization_out_dir,
# "fit_with_batches" + str(operator_idx) + ".hdf5",
# ),
# len(inputs.dataset),
# batch_idx,
# batch_X,
# batch_y,
# batch_out_X,
# batch_out_y,
# )
# else:
# if batch_out_y is not None:
# output = lale.helpers.append_batch(
# output, (batch_output, batch_out_y)
# )
# else:
# output = lale.helpers.append_batch(output, batch_output)
# if serialize:
# output.close() # type: ignore
# output = lale.helpers.create_data_loader(
# os.path.join(
# serialization_out_dir,
# "fit_with_batches" + str(operator_idx) + ".hdf5",
# ),
# batch_size=inputs.batch_size,
# )
# else:
# if isinstance(output, tuple):
# output = lale.helpers.create_data_loader(
# X=output[0], y=output[1], batch_size=inputs.batch_size
# )
# else:
# output = lale.helpers.create_data_loader(
# X=output, y=None, batch_size=inputs.batch_size
# )
# outputs[operator] = output
# operator_idx += 1
return_data = output # outputs[self._steps[-1]]#.dataset.get_data()
# if serialize:
# shutil.rmtree(serialization_out_dir)
return return_data
def freeze_trainable(self) -> "TrainedPipeline":
result = super().freeze_trainable()
return cast(TrainedPipeline, result)
def partial_fit(
self,
X: Any,
y: Any = None,
freeze_trained_prefix: bool = True,
unsafe: bool = False,
classes: Any = None,
**fit_params,
) -> "TrainedPipeline[TrainedIndividualOp]":
"""partial_fit for a pipeline.
This method assumes that all but the last node of a pipeline are frozen_trained and
only the last node needs to be fit using its partial_fit method.
If that is not the case, and `freeze_trained_prefix` is True, it freezes the prefix
of the pipeline except the last node if they are trained.
Parameters
----------
X :
Features; see partial_fit schema of the last node.
y:
Labels/target
freeze_trained_prefix:
If True, all but the last node are freeze_trained and only
the last node is partial_fit.
unsafe:
boolean.
This flag allows users to override the validation that throws an error when the
the operators in the prefix of this pipeline are not tagged with `has_partial_transform`.
Setting unsafe to True would perform the transform as if it was row-wise even in the case it may not be.
fit_params:
dict
Additional keyword arguments to be passed to partial_fit of the estimator
classes: Any
Returns
-------
TrainedPipeline :
A partially trained pipeline, which can be trained further by other calls to partial_fit
Raises
------
ValueError
The piepline has a non-frozen prefix
"""
estimator_only = True
for operator in self._steps[:-1]:
if not operator.is_frozen_trained():
estimator_only = False
if not estimator_only and not freeze_trained_prefix:
raise ValueError(
"""partial_fit is only supported on pipelines when all but the last node are frozen_trained and
only the last node needs to be fit using its partial_fit method. The parameter `freeze_trained_prefix`
can be set to True if the prefix is trained and needs to be frozen during partial_fit."""
)
sink_node = self._steps[-1]
pipeline_prefix = self.remove_last()
if not estimator_only and freeze_trained_prefix:
pipeline_prefix = pipeline_prefix.freeze_trained()
transformed_output = pipeline_prefix.transform(X, y)
if isinstance(transformed_output, tuple):
transformed_X, transformed_y = transformed_output
else:
transformed_X = transformed_output
transformed_y = y
try:
trained_sink_node = sink_node.partial_fit(
transformed_X, transformed_y, classes=classes, **fit_params
)
except TypeError: # occurs when `classes` is not expected
trained_sink_node = sink_node.partial_fit(
transformed_X, transformed_y, **fit_params
)
trained_pipeline = pipeline_prefix >> trained_sink_node
return trained_pipeline
OperatorChoiceType_co = TypeVar("OperatorChoiceType_co", bound=Operator, covariant=True)
class OperatorChoice(PlannedOperator, Generic[OperatorChoiceType_co]):
_name: str
_steps: List[OperatorChoiceType_co]
def get_params(self, deep: bool = True) -> Dict[str, Any]:
out: Dict[str, Any] = {}
out["steps"] = self._steps
out["name"] = self._name
indices: Dict[str, int] = {}
def make_indexed(name: str) -> str:
idx = 0
if name in indices:
idx = indices[name] + 1
indices[name] = idx
else:
indices[name] = 0
return make_indexed_name(name, idx)
if deep:
for op in self._steps:
name = make_indexed(op.name())
nested_params = op.get_params(deep=deep)
if nested_params:
out.update(nest_HPparams(name, nested_params))
return out
def set_params(self, **impl_params):
"""This implements the set_params, as per the scikit-learn convention,
extended as documented in the module docstring"""
return self._with_params(True, **impl_params)
# TODO: enhance to support setting params of a choice without picking a choice
# TODO: also, enhance to support mutating it in place?
def _with_params(self, try_mutate: bool, **impl_params) -> Operator:
"""
This method updates the parameters of the operator.
If try_mutate is set, it will attempt to update the operator in place
this may not always be possible
"""
choices = self.steps_list()
choice_index: int
chosen_params: Dict[str, Any]
if len(choices) == 1:
choice_index = 0
chosen_params = impl_params
else:
(choice_index, chosen_params) = partition_sklearn_choice_params(impl_params)
assert 0 <= choice_index < len(choices)
choice: Operator = choices[choice_index]
new_step = choice._with_params(try_mutate, **chosen_params)
# in the functional case
# we remove the OperatorChoice, replacing it with the branch that was taken
# TODO: in the mutating case, we could update this choice
return new_step
def __init__(self, steps, name: Optional[str] = None) -> None:
if name is None or name == "":
name = assignee_name(level=2)
if name is None or name == "":
name = "OperatorChoice"
self._name = name
self._steps = steps
def steps_list(self) -> List[OperatorChoiceType_co]:
return self._steps
@property
def steps(self) -> List[Tuple[str, OperatorChoiceType_co]]:
"""This is meant to function similarly to the scikit-learn steps property
and for linear pipelines, should behave the same
"""
return [(s.name(), s) for s in self._steps]
def fit(self, X: Any, y: Any = None, **fit_params):
if len(self.steps_list()) == 1:
s = self.steps_list()[0]
if s is not None:
f = getattr(s, "fit", None)
if f is not None:
return f(X, y, **fit_params)
else:
return None
else:
return None
else:
# This call is to get the correct error message
# calling getattr(self, "fit") would result in
# infinite recursion, but this explicit call works
return self.__getattr__("fit") # pylint:disable=unnecessary-dunder-call
def _has_same_impl(self, other: Operator) -> bool:
"""Checks if the type of the operator imnplementations are compatible"""
if not isinstance(other, OperatorChoice):
return False
my_steps = self.steps_list()
other_steps = other.steps_list()
if len(my_steps) != len(other_steps):
return False
for m, o in zip(my_steps, other_steps):
if not m._has_same_impl(o):
return False
return True
def is_supervised(self) -> bool:
s = self.steps_list()
if len(s) == 0:
return False
return self.steps_list()[-1].is_supervised()
def validate_schema(self, X: Any, y: Any = None):
for step in self.steps_list():
step.validate_schema(X, y)
def transform_schema(self, s_X: JSON_TYPE):
from lale.settings import disable_data_schema_validation
if disable_data_schema_validation:
return {}
else:
transformed_schemas = [st.transform_schema(s_X) for st in self.steps_list()]
result = join_schemas(*transformed_schemas)
return result
def input_schema_fit(self) -> JSON_TYPE:
pipeline_inputs = [s.input_schema_fit() for s in self.steps_list()]
result = join_schemas(*pipeline_inputs)
return result
def is_frozen_trainable(self) -> bool:
return all(step.is_frozen_trainable() for step in self.steps_list())
def is_classifier(self) -> bool:
for op in self.steps_list():
if not op.is_classifier():
return False
return True
def get_defaults(self) -> Mapping[str, Any]:
defaults_list: Iterable[Mapping[str, Any]] = (
s.get_defaults() for s in self.steps_list()
)
defaults: Dict[str, Any] = {}
for d in defaults_list:
defaults.update(d)
return defaults
class _PipelineFactory:
def __init__(self):
pass
def __call__(self, steps: List[Any]):
warnings.warn(
"lale.operators.Pipeline is deprecated, use sklearn.pipeline.Pipeline or lale.lib.sklearn.Pipeline instead",
DeprecationWarning,
)
for i in range(len(steps)): # pylint:disable=consider-using-enumerate
op = steps[i]
if isinstance(op, tuple):
assert isinstance(op[1], Operator)
op[1]._set_name(op[0])
steps[i] = op[1]
return make_pipeline(*steps)
Pipeline = _PipelineFactory()
def _pipeline_graph_class(steps) -> Type[PlannedPipeline]:
isTrainable: bool = True
isTrained: bool = True
for operator in steps:
if not isinstance(operator, TrainedOperator):
isTrained = False # Even if a single step is not trained, the pipeline can't be used for predict/transform
# without training it first
if isinstance(operator, OperatorChoice) or not isinstance(
operator, TrainableOperator
):
isTrainable = False
if isTrained:
return TrainedPipeline
elif isTrainable:
return TrainablePipeline
else:
return PlannedPipeline
@overload
def make_pipeline_graph(
steps: List[TrainedOperator],
edges: List[Tuple[Operator, Operator]],
ordered: bool = False,
) -> TrainedPipeline:
...
@overload
def make_pipeline_graph(
steps: List[TrainableOperator],
edges: List[Tuple[Operator, Operator]],
ordered: bool = False,
) -> TrainablePipeline:
...
@overload
def make_pipeline_graph(
steps: List[Operator],
edges: List[Tuple[Operator, Operator]],
ordered: bool = False,
) -> PlannedPipeline:
...
def make_pipeline_graph(steps, edges, ordered=False) -> PlannedPipeline:
"""
Based on the state of the steps, it is important to decide an appropriate type for
a new Pipeline. This method will decide the type, create a new Pipeline of that type and return it.
#TODO: If multiple independently trained components are composed together in a pipeline,
should it be of type TrainedPipeline?
Currently, it will be TrainablePipeline, i.e. it will be forced to train it again.
"""
pipeline_class = _pipeline_graph_class(steps)
if pipeline_class is TrainedPipeline:
return TrainedPipeline(steps, edges, ordered=ordered, _lale_trained=True)
else:
return pipeline_class(steps, edges, ordered=ordered)
@overload
def make_pipeline(*orig_steps: TrainedOperator) -> TrainedPipeline:
...
@overload
def make_pipeline(*orig_steps: TrainableOperator) -> TrainablePipeline:
...
@overload
def make_pipeline(*orig_steps: Union[Operator, Any]) -> PlannedPipeline:
...
def make_pipeline(*orig_steps):
steps: List[Operator] = []
edges: List[Tuple[Operator, Operator]] = []
prev_op: Optional[Operator] = None
for curr_op in orig_steps:
if isinstance(prev_op, BasePipeline):
prev_leaves: List[Operator] = prev_op._find_sink_nodes()
else:
prev_leaves = [] if prev_op is None else [prev_op]
if isinstance(curr_op, BasePipeline):
curr_roots: List[Operator] = curr_op._find_source_nodes()
steps.extend(curr_op.steps_list())
edges.extend(curr_op.edges())
else:
if not isinstance(curr_op, Operator):
curr_op = make_operator(curr_op, name=curr_op.__class__.__name__)
curr_roots = [curr_op]
steps.append(curr_op)
edges.extend([(src, tgt) for src in prev_leaves for tgt in curr_roots])
prev_op = curr_op
return make_pipeline_graph(steps, edges, ordered=True)
@overload
def make_union_no_concat(*orig_steps: TrainedOperator) -> TrainedPipeline:
...
@overload
def make_union_no_concat(*orig_steps: TrainableOperator) -> TrainablePipeline:
...
@overload
def make_union_no_concat(*orig_steps: Union[Operator, Any]) -> PlannedPipeline:
...
def make_union_no_concat(*orig_steps): # type: ignore
steps, edges = [], []
for curr_op in orig_steps:
if isinstance(curr_op, BasePipeline):
steps.extend(curr_op._steps)
edges.extend(curr_op.edges())
else:
if not isinstance(curr_op, Operator):
curr_op = make_operator(curr_op, name=curr_op.__class__.__name__)
steps.append(curr_op)
return make_pipeline_graph(steps, edges, ordered=True)
@overload
def make_union(*orig_steps: TrainedOperator) -> TrainedPipeline:
...
@overload
def make_union(*orig_steps: TrainableOperator) -> TrainablePipeline:
...
@overload
def make_union(*orig_steps: Union[Operator, Any]) -> PlannedPipeline:
...
def make_union(*orig_steps): # type: ignore
from lale.lib.rasl import ConcatFeatures
return make_union_no_concat(*orig_steps) >> ConcatFeatures()
def make_choice(
*orig_steps: Union[Operator, Any], name: Optional[str] = None
) -> OperatorChoice:
if name is None:
name = ""
name_: str = name # to make mypy happy
steps: List[Operator] = []
for operator in orig_steps:
if isinstance(operator, OperatorChoice):
steps.extend(operator.steps_list())
else:
if not isinstance(operator, Operator):
operator = make_operator(operator, name=operator.__class__.__name__)
steps.append(operator)
name_ = name_ + " | " + operator.name()
return OperatorChoice(steps, name_[3:])
def _fixup_hyperparams_dict(d):
d1 = remove_defaults_dict(d)
d2 = {k: val_wrapper.unwrap(v) for k, v in d1.items()}
return d2
CustomizeOpType = TypeVar("CustomizeOpType", bound=PlannedIndividualOp)
def customize_schema( # pylint: disable=differing-param-doc,differing-type-doc
op: CustomizeOpType,
schemas: Optional[Schema] = None,
relevantToOptimizer: Optional[List[str]] = None,
constraint: Union[Schema, JSON_TYPE, List[Union[Schema, JSON_TYPE]], None] = None,
tags: Optional[Dict] = None,
forwards: Union[bool, List[str], None] = None,
set_as_available: bool = False,
**kwargs: Union[Schema, JSON_TYPE, None],
) -> CustomizeOpType:
"""Return a new operator with a customized schema
Parameters
----------
op: Operator
The base operator to customize
schemas : Schema
A dictionary of json schemas for the operator. Override the entire schema and ignore other arguments
input : Schema
(or `input_*`) override the input schema for method `*`.
`input_*` must be an existing method (already defined in the schema for lale operators, existing method for external operators)
output : Schema
(or `output_*`) override the output schema for method `*`.
`output_*` must be an existing method (already defined in the schema for lale operators, existing method for external operators)
relevantToOptimizer : String list
update the set parameters that will be optimized.
constraint : Schema
Add a constraint in JSON schema format.
tags : Dict
Override the tags of the operator.
forwards: boolean or a list of strings
Which methods/properties to forward to the underlying impl. (False for none, True for all).
set_as_available: bool
Override the list of available operators so `get_available_operators` returns this customized operator.
kwargs : Schema
Override the schema of the hyperparameter.
`param` must be an existing parameter (already defined in the schema for lale operators, __init__ parameter for external operators)
Returns
-------
PlannedIndividualOp
Copy of the operator with a customized schema
"""
op_index = -1
try:
op_index = _all_available_operators.index(op)
except ValueError:
pass
# TODO: why are we doing a deeopcopy here?
op = copy.deepcopy(op)
methods = ["fit", "transform", "predict", "predict_proba", "decision_function"]
# explicitly enable the hyperparams schema check because it is important
from lale.settings import (
disable_hyperparams_schema_validation,
set_disable_hyperparams_schema_validation,
)
existing_disable_hyperparams_schema_validation = (
disable_hyperparams_schema_validation
)
set_disable_hyperparams_schema_validation(False)
if schemas is not None:
schemas.schema["$schema"] = "http://json-schema.org/draft-04/schema#"
validate_is_schema(schemas.schema)
op._schemas = schemas.schema
else:
if relevantToOptimizer is not None:
assert isinstance(relevantToOptimizer, list)
op._schemas["properties"]["hyperparams"]["allOf"][0][
"relevantToOptimizer"
] = relevantToOptimizer
if constraint is not None:
cl: List[Union[Schema, JSON_TYPE]]
if isinstance(constraint, list):
cl = constraint
else:
cl = [constraint]
for c in cl:
if isinstance(c, Schema):
c = c.schema
op._schemas["properties"]["hyperparams"]["allOf"].append(c)
if tags is not None:
assert isinstance(tags, dict)
op._schemas["tags"] = tags
if forwards is not None:
assert isinstance(forwards, (bool, list))
op._schemas["forwards"] = forwards
for arg, value in kwargs.items():
if value is not None and isinstance(value, Schema):
value = value.schema
if value is not None:
validate_is_schema(value)
if arg in [p + n for p in ["input_", "output_"] for n in methods]:
# multiple input types (e.g., fit, predict)
assert value is not None
validate_method(op, arg)
op._schemas["properties"][arg] = value
elif value is None:
scm = op._schemas["properties"]["hyperparams"]["allOf"][0]
scm["required"] = [k for k in scm["required"] if k != arg]
scm["relevantToOptimizer"] = [
k for k in scm["relevantToOptimizer"] if k != arg
]
scm["properties"] = {
k: scm["properties"][k] for k in scm["properties"] if k != arg
}
else:
op._schemas["properties"]["hyperparams"]["allOf"][0]["properties"][
arg
] = value
# since the schema has changed, we need to invalidate any
# cached enum attributes
op._invalidate_enum_attributes()
set_disable_hyperparams_schema_validation(
existing_disable_hyperparams_schema_validation
)
# we also need to prune the hyperparameter, if any, removing defaults (which may have changed)
op._hyperparams = op.hyperparams()
if set_as_available and op_index >= 0:
_all_available_operators[op_index] = op
return op
CloneOpType = TypeVar("CloneOpType", bound=Operator)
def clone_op(op: CloneOpType, name: Optional[str] = None) -> CloneOpType:
"""Clone any operator."""
nop = clone(op)
if name:
nop._set_name(name)
return nop
def with_structured_params(
try_mutate: bool, k, params: Dict[str, Any], hyper_parent
) -> None:
# need to handle the different encoding schemes used
if params is None:
return
if structure_type_name in params:
# this is a structured type
structure_type = params[structure_type_name]
type_params, sub_params = partition_sklearn_params(params)
hyper = None
if isinstance(hyper_parent, dict):
hyper = hyper_parent.get(k, None)
elif isinstance(hyper_parent, list) and k < len(hyper_parent):
hyper = hyper_parent[k]
if hyper is None:
hyper = {}
elif isinstance(hyper, tuple):
# to make it mutable
hyper = list(hyper)
del type_params[structure_type_name]
actual_key: Union[str, int]
for elem_key, elem_value in type_params.items():
if elem_value is not None:
if not isinstance(hyper, dict):
assert is_numeric_structure(structure_type)
actual_key = int(elem_key)
# we may need to extend the array
try:
hyper[actual_key] = elem_value
except IndexError:
assert 0 <= actual_key
hyper.extend((actual_key - len(hyper)) * [None])
hyper.append(elem_value)
else:
actual_key = elem_key
hyper[actual_key] = elem_value
for elem_key, elem_params in sub_params.items():
if not isinstance(hyper, dict):
assert is_numeric_structure(structure_type)
actual_key = int(elem_key)
else:
actual_key = elem_key
with_structured_params(try_mutate, actual_key, elem_params, hyper)
if isinstance(hyper, dict) and is_numeric_structure(structure_type):
max_key = max((int(x) for x in hyper.keys()))
hyper = [hyper.get(str(x), None) for x in range(max_key)]
if structure_type == "tuple":
hyper = tuple(hyper)
hyper_parent[k] = hyper
else:
# if it is not a structured parameter
# then it must be a nested higher order operator
sub_op = hyper_parent[k]
if isinstance(sub_op, list):
if len(sub_op) == 1:
sub_op = sub_op[0]
else:
(disc, chosen_params) = partition_sklearn_choice_params(params)
assert 0 <= disc < len(sub_op)
sub_op = sub_op[disc]
params = chosen_params
trainable_sub_op = sub_op._with_params(try_mutate, **params)
hyper_parent[k] = trainable_sub_op
| 213,691 | 37.023488 | 376 |
py
|
lale
|
lale-master/lale/schema_simplifier.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import logging
from typing import (
Any,
Dict,
Generic,
Iterable,
List,
Optional,
Set,
Tuple,
TypeVar,
Union,
)
import jsonschema
from .schema_ranges import SchemaRange
from .schema_utils import (
JsonSchema,
SFalse,
STrue,
is_false_schema,
is_lale_any_schema,
is_true_schema,
isForOptimizer,
makeAllOf,
makeAnyOf,
makeOneOf,
)
from .type_checking import always_validate_schema
logger = logging.getLogger(__name__)
# Goal: given a json schema, convert it into an equivalent json-schema
# in "grouped-dnf" form:
# allOf: [anyOf: nochoice], where
# nochoice
#
# initial version, which does not try to group things intelligently:
# allOf [anyOf [P1 P2], anyOf[Q1 Q2]] ==
# anyOf [map allOf [Ps]x[Pqs]]
# Note that P1 == anyOf [P] == allOf [P]
# Given a schema, if it is an anyof, return the list of choices.
# Otherwise, return a singleton choice -- the schema
# enumerations should logically be sets.
# However, the keys are not hashable
VV = TypeVar("VV")
class set_with_str_for_keys(Generic[VV]):
"""This mimicks a set, but uses the string representation
of the elements for comparison tests.
It can be used for unhashable elements, as long
as the str function is injective
"""
_elems: Dict[str, VV]
def __init__(self, elems: Union[Dict[str, VV], Iterable[VV]]):
if isinstance(elems, dict):
# The type hint is needed since technically a Dict[str, something_else]
# is an Iterable[str], which could match the latter type,
# but pass this type guard
self._elems = elems # type: ignore
else:
self._elems = {str(v): v for v in elems}
def __iter__(self):
return iter(self._elems.values())
def __bool__(self):
return bool(self._elems)
def __str__(self):
return str(list(self._elems.values()))
def __contains__(self, key):
return key in self._elems
def union(self, *others):
return set_with_str_for_keys(
[elem for subl in [self] + list(others) for elem in subl]
)
def intersection(self, *others: "set_with_str_for_keys[VV]"):
d: Dict[str, VV] = dict(self._elems)
for ssk in others:
for k in list(d.keys()):
if k not in ssk:
del d[k]
return set_with_str_for_keys(d)
def difference(self, *others):
d: Dict[str, VV] = dict(self._elems)
for ssk in others:
for k in list(d.keys()):
if k in ssk:
del d[k]
return set_with_str_for_keys(d)
def toAnyOfList(schema: JsonSchema) -> List[JsonSchema]:
if "anyOf" in schema:
return schema["anyOf"]
else:
return [schema]
def toAllOfList(schema: JsonSchema) -> List[JsonSchema]:
if "allOf" in schema:
return schema["allOf"]
else:
return [schema]
def liftAllOf(schemas: List[JsonSchema]) -> Iterable[JsonSchema]:
"""Given a list of schemas, if any of them are
allOf schemas, lift them out to the top level
"""
for sch in schemas:
schs2 = toAllOfList(sch)
for s in schs2:
yield s
def liftAnyOf(schemas: List[JsonSchema]) -> Iterable[JsonSchema]:
"""Given a list of schemas, if any of them are
anyOf schemas, lift them out to the top level
"""
for sch in schemas:
schs2 = toAnyOfList(sch)
for s in schs2:
yield s
# This is a great function for a breakpoint :-)
def impossible() -> JsonSchema:
return SFalse
def enumValues(
es: set_with_str_for_keys[Any], s: JsonSchema
) -> set_with_str_for_keys[Any]:
"""Given an enumeration set and a schema, return all the consistent values of the enumeration."""
# TODO: actually check. This should call the json schema validator
ret = []
for e in es:
try:
always_validate_schema(e, s)
ret.append(e)
except jsonschema.ValidationError:
logger.debug(
f"enumValues: {e} removed from {es} because it does not validate according to {s}"
)
return set_with_str_for_keys(iter(ret))
# invariants for all the simplify* functions:
# - invariant: if floatAny then at most the top level return value will be 'anyOf'
# - invariant: if there is no (nested or top level) 'anyOf' then the result will not have any either
extra_field_names: List[str] = ["default", "description"]
def hasAllOperatorSchemas(schemas: List[JsonSchema]) -> bool:
if not schemas:
return False
for s in schemas:
if "anyOf" in s:
if not hasAnyOperatorSchemas(s["anyOf"]):
return False
elif "allOf" in s:
if not hasAllOperatorSchemas(s["allOf"]):
return False
else:
to = s.get("laleType", None)
if to != "operator":
return False
return True
def hasAnyOperatorSchemas(schemas: List[JsonSchema]) -> bool:
for s in schemas:
if "anyOf" in s:
if hasAnyOperatorSchemas(s["anyOf"]):
return True
elif "allOf" in s:
if hasAllOperatorSchemas(s["allOf"]):
return True
else:
to = s.get("laleType", None)
if to == "operator":
return True
return False
def simplifyAll(schemas: List[JsonSchema], floatAny: bool) -> JsonSchema:
# First, we partition the schemas into the different types
# that we care about
combined_original_schema: JsonSchema = {"allOf": schemas}
s_all: List[JsonSchema] = schemas
s_any: List[List[JsonSchema]] = []
s_one: List[JsonSchema] = []
s_not: List[JsonSchema] = []
s_not_number_list: List[
JsonSchema
] = (
[]
) # a list of schemas that are a top level 'not' with a type='integer' or 'number' under it
s_not_enum_list: List[set_with_str_for_keys[Any]] = []
s_enum_list: List[set_with_str_for_keys[Any]] = []
s_type: Optional[str] = None
s_type_for_optimizer: Optional[str] = None
s_typed: List[JsonSchema] = []
s_other: List[JsonSchema] = []
s_not_for_optimizer: List[JsonSchema] = []
s_extra: Dict[str, Any] = {}
while s_all:
l: List[JsonSchema] = s_all
s_all = []
s: JsonSchema
for s in l:
if s is None:
continue
s = simplify(s, floatAny)
if s is None:
continue
if not isForOptimizer(s):
logger.info(
f"simplifyAll: skipping not for optimizer {s} (after simplification)"
)
s_not_for_optimizer.append(s)
continue
if is_true_schema(s):
continue
if is_false_schema(s):
return SFalse
if is_lale_any_schema(s):
continue
if "allOf" in s:
s_all.extend(s["allOf"])
elif "anyOf" in s:
s_any.append(s["anyOf"])
elif "oneOf" in s:
s_one.append(s)
elif "not" in s:
snot = s["not"]
if snot is None:
continue
if "enum" in snot:
ev = enumValues(
set_with_str_for_keys(snot["enum"]),
{"not": combined_original_schema},
)
s_not_enum_list.append(ev)
elif "type" in snot and (
snot["type"] == "number" or snot["type"] == "integer"
):
s_not_number_list.append(s)
else:
s_not.append(s)
elif "enum" in s:
ev = enumValues(
set_with_str_for_keys(s["enum"]), combined_original_schema
)
if ev:
s_enum_list.append(ev)
for k in extra_field_names:
if k in s:
d = s[k]
if k in s_extra and s_extra[k] != d:
logger.info(
f"mergeAll: conflicting {k} fields: {s_extra[k]} and {d} found when merging schemas {schemas}"
)
else:
s_extra[k] = d
else:
logger.info(
f"simplifyAll: {schemas} is not a satisfiable list of conjoined schemas because the enumeration {list(s['enum'])} has no elements that are satisfiable by the conjoined schemas"
)
return impossible()
elif "type" in s:
t = s.get("type", None)
to = s.get("laleType", None)
if t == "array":
# tuples are distinct from arrays
if to is not None and to == "tuple":
t = to
if s_type:
# handle subtyping relation between integers and numbers
if (
s_type == "number"
and t == "integer"
or s_type == "integer"
and t == "number"
):
s_type = "integer"
elif s_type != t:
logger.info(
f"simplifyAll: {schemas} is not a satisfiable list of conjoined schemas because {s} has type '{t}' and a previous schema had type '{s_type}'"
)
return impossible()
else:
s_type = t
s_typed.append(s)
elif "XXX TODO XXX" in s and len(s) == 1:
# Ignore missing constraints
pass
else:
to = s.get("laleType", None)
if to is None:
logger.warning(f"simplifyAll: '{s}' has unknown type")
s_other.append(s)
to = s.get("laleType", None)
if to == "operator":
if (
s_type_for_optimizer is not None
and s_type_for_optimizer != "operator"
):
logger.error(
f"simplifyAll: '{s}' has operator type for optimizer, but we also have another type for optimizer saved"
)
s_type_for_optimizer = to
# Now that we have partitioned things
# Note: I am sure some of our assumptions here are not correct :-(, but this should do for now :-)
# let's try to find a quick contradiction
if s_not or s_not_number_list:
# a bit of a special case here (which should eventually be replaced by more prinicipalled logic):
# if one of the not cases is identical to to one of the extra cases
# then this entire case is impossible.
# This provides a workaround to #42 amongst other problems
# first gather the set of extras
pos_k: Set[str] = set()
pk: JsonSchema
for pk in s_typed:
pos_k.add(str(pk))
for sn in itertools.chain(s_not, s_not_number_list):
snn = sn["not"]
if str(snn) in pos_k:
logger.info(
f"simplifyAll: Contradictory schema {str(combined_original_schema)} contains both {str(snn)} and its negation"
)
return impossible()
# first, we simplify enumerations
s_enum: Optional[set_with_str_for_keys[Any]] = None
s_not_enum: Optional[set_with_str_for_keys[Any]] = None
if s_enum_list:
# if there are enumeration constraints, we want their intersection
# pylint note: s_enum_list must be non-empty, and the first element will be used as self
s_enum = (
set_with_str_for_keys.intersection( # pylint:disable=no-value-for-parameter
*s_enum_list
)
)
if not s_enum:
# This means that enumeration values where specified
# but none are possible, so this schema is impossible to satisfy
logger.info(
f"simplifyAll: {schemas} is not a satisfiable list of conjoined schemas because the conjugation of these enumerations {list(s_enum_list)} is unsatisfiable (the intersection is empty)"
)
return impossible()
if s_not_enum_list:
# pylint note: s_enum_list must be non-empty, and the first element will be used as self
s_not_enum = (
set_with_str_for_keys.union( # pylint:disable=no-value-for-parameter
*s_not_enum_list
)
)
if s_enum and s_not_enum:
s_enum_diff = set_with_str_for_keys.difference(s_enum, s_not_enum)
if not s_enum_diff:
# This means that enumeration values where specified
# but none are possible, so this schema is impossible to satisfy
logger.info(
f"simplifyAll: {schemas} is not a satisfiable list of conjoined schemas because the conjugation of the enumerations is {s_enum} all of which are excluded by the conjugation of the disallowed enumerations {s_not_enum}"
)
return impossible()
s_enum = s_enum_diff
s_not_enum = None
# break out, combine, and keep 'extra' fields, like description
if s_typed:
s_typed = [s.copy() for s in s_typed]
for o in s_typed:
for k in extra_field_names:
if k in o:
d = o[k]
if k in s_extra and s_extra[k] != d:
logger.info(
f"mergeAll: conflicting {k} fields: {s_extra[k]} and {d} found when merging schemas {schemas}"
)
else:
s_extra[k] = d
del o[k]
s_typed = [s for s in s_typed if s]
if s_type in ["number", "integer"]:
# First we combine all the positive number range schemas
s_range = SchemaRange()
s_range_for_optimizer = SchemaRange()
for o in s_typed:
o_range = SchemaRange.fromSchema(o)
s_range &= o_range
o_range_for_optimizer = SchemaRange.fromSchemaForOptimizer(o)
s_range_for_optimizer &= o_range_for_optimizer
# now let us look at negative number ranges
# for now, we will not handle cases that would require splitting ranges
# TODO: 42 is about handling more reasoning
s_not_list = s_not_number_list
s_not_number_list = []
for s in s_not_list:
snot = s["not"]
o_range = SchemaRange.fromSchema(snot)
success = s_range.diff(o_range)
if success is None:
logger.info(
f"simplifyAll: [range]: {s} is not a satisfiable schema, since it negates everything, falsifying the entire combined schema {combined_original_schema}"
)
return impossible()
o_range_for_optimizer = SchemaRange.fromSchemaForOptimizer(snot)
success2 = s_range_for_optimizer.diff(o_range_for_optimizer)
if success2 is None:
logger.info(
f"simplifyAll: [range]: {s} is not a satisfiable schema for the optimizer, since it negates everything, falsifying the entire combined schema {combined_original_schema}"
)
return impossible()
elif success is False or success2 is False:
s_not_number_list.append(s)
# Now we look at negative enumarations.
# for now, we will not handle cases that would require splitting ranges
# TODO: 42 is about handling more reasoning
if s_not_enum:
s_cur_not_enum_list: set_with_str_for_keys[Any] = s_not_enum
s_not_enum_l: List[Any] = []
for s in s_cur_not_enum_list:
if isinstance(s, (int, float)):
success = s_range.remove_point(s)
if success is None:
logger.info(
f'simplifyAll: [range]: {{"not": {{"enum": [{s}]}}}} is not a satisfiable schema, since it negates everything, falsifying the entire combined schema {combined_original_schema}'
)
return impossible()
success2 = s_range_for_optimizer.remove_point(s)
if success2 is None:
logger.info(
f'simplifyAll: [range]: {{"not": {{"enum": [{s}]}}}} is not a satisfiable schema for the optimizer, since it negates everything, falsifying the entire combined schema {combined_original_schema}'
)
return impossible()
elif success is False or success2 is False:
s_not_enum_l.append(s)
s_not_enum = set_with_str_for_keys(iter(s_not_enum_l))
# now let us put everything back together
number_schema = SchemaRange.to_schema_with_optimizer(
s_range, s_range_for_optimizer
)
if SchemaRange.is_empty2(s_range, s_range):
logger.info(
f"simplifyAll: [range]: range simplification determined that the required minimum is greater than the required maximum, so the entire thing is unsatisfiable {combined_original_schema}"
)
# if the actual range is empty, the entire schema is invalid
return impossible()
elif SchemaRange.is_empty2(s_range_for_optimizer, s_range):
number_schema["forOptimizer"] = SFalse
logger.info(
f"simplifyAll: [range]: range simplification determined that the required minimum for the optimizer is greater than the required maximum, so the range is being marked as not for the optimizer: {number_schema}"
)
elif SchemaRange.is_empty2(s_range, s_range_for_optimizer):
number_schema["forOptimizer"] = SFalse
logger.info(
f"simplifyAll: [range]: range simplification determined that the required minimum is greater than the required maximum for the optimizer, so the range is being marked as not for the optimizer: {number_schema}"
)
elif SchemaRange.is_empty2(s_range_for_optimizer, s_range_for_optimizer):
logger.info(
f"simplifyAll: [range]: range simplification determined that the required minimum for the optimizer is greater than the required maximum for the optimizer, so the range is being marked as not for the optimizer: {number_schema}"
)
number_schema["forOptimizer"] = SFalse
s_typed = [number_schema]
elif s_type == "object":
# if this is an object type, we want to merge the properties
s_required: Set[str] = set()
s_props: Dict[str, List[JsonSchema]] = {}
# TODO: generalize this to handle schema types here
s_additionalProperties = True
# propertyNames = []
for o in s_typed:
o_required = o.get("required", None)
if o_required:
s_required = s_required.union(o_required)
# TODO: handle empty/absent properties case
if "properties" in o:
o_props = o["properties"]
else:
o_props = {}
o_additionalProperties = (
"additionalProperties" not in o or o["additionalProperties"]
)
# safety check:
if not o_additionalProperties:
for p in s_required:
if p not in o_props:
# There is a required key, but our schema
# does not contain that key and does not allow additional properties
# This schema can never be satisfied, so we can simplify this whole thing to the False schema
logger.info(
f"simplifyAll: {s_typed} is not a mergable list of schemas because {o} does not have the required key '{p}' and excludes additional properties"
)
return impossible()
# If we do not allow additional properties
# Remove all existing properties that are
# not in our schema
if not o_additionalProperties:
for p in s_props: # pylint:disable=consider-using-dict-items
if p not in o_props:
del s_props[p]
# now go through our properties and add them
for p, pv in o_props.items():
if p in s_props:
s_props[p].append(pv)
elif s_additionalProperties:
s_props[p] = [pv]
s_additionalProperties = s_additionalProperties and o_additionalProperties
# at this point, we have aggregated the object schemas
# for all the properties in them
if s_required and not s_additionalProperties:
for k in s_required:
if k not in s_props:
logger.info(
f"simplifyAll: {s_typed} is not a mergable list of schemas because one of the schemas requires key '{k}', which is not in the other schemas, and a different schema excluded additional properties"
)
return impossible()
merged_props = {p: simplifyAll(v, False) for p, v in s_props.items()}
if s_required:
for k in s_required:
# if the schema is not present, it could be in another branch (such as an anyOf conjunct)
if is_false_schema(merged_props.get(k, STrue)):
logger.info(
f"simplifyAll: required key {k} is False, so the entire conjugation of schemas {schemas} is False"
)
return impossible()
obj: Dict[Any, Any] = {}
obj["type"] = "object"
if merged_props:
obj["properties"] = merged_props
if not s_additionalProperties:
obj["additionalProperties"] = False
if len(s_required) != 0:
obj["required"] = list(s_required)
s_typed = [obj]
elif s_type in ["array", "tuple"]:
is_tuple = s_type == "tuple"
min_size: int = 0
max_size: Optional[int] = None
min_size_for_optimizer: int = 0
max_size_for_optimizer: Optional[int] = None
longest_item_list: int = 0
items_schemas: List[JsonSchema] = []
item_list_entries: List[Tuple[List[JsonSchema], Optional[JsonSchema]]] = []
for arr in s_typed:
arr_min_size = arr.get("minItems", 0)
min_size = max(min_size, arr_min_size)
arr_min_size_for_optimizer = arr.get("minItemsForOptimizer", 0)
min_size_for_optimizer = max(
min_size_for_optimizer, arr_min_size_for_optimizer
)
arr_max_size = arr.get("maxItems", None)
if arr_max_size is not None:
if max_size is None:
max_size = arr_max_size
else:
max_size = min(max_size, arr_max_size)
arr_max_size_for_optimizer = arr.get("maxItemsForOptimizer", None)
if arr_max_size_for_optimizer is not None:
if max_size_for_optimizer is None:
max_size_for_optimizer = arr_max_size_for_optimizer
else:
max_size_for_optimizer = min(
max_size_for_optimizer, arr_max_size_for_optimizer
)
arr_item = arr.get("items", None)
if arr_item is not None:
if isinstance(arr_item, list):
arr_item_len = len(arr_item)
longest_item_list = max(longest_item_list, arr_item_len)
arr_additional = arr.get("additionalItems", None)
item_list_entries.append((arr_item, arr_additional))
if arr_additional is False:
# If we are not allowed additional elements,
# that effectively sets the maximum allowed length
if max_size is None:
max_size = arr_item_len
else:
max_size = min(max_size, arr_item_len)
else:
items_schemas.append(arr_item)
# We now have accurate min/max bounds, and if there are item lists
# we know how long the longest one is
# additionally, we have gathered up all the item (object) schemas
ret_arr: Dict[str, Any] = {"type": "array"}
if is_tuple:
ret_arr["laleType"] = "tuple"
if min_size > 0:
ret_arr["minItems"] = min_size
if min_size_for_optimizer > min_size:
ret_arr["minItemsForOptimizer"] = min_size_for_optimizer
all_items_schema: Optional[JsonSchema] = None
if items_schemas:
all_items_schema = simplifyAll(items_schemas, floatAny=floatAny)
if not item_list_entries:
# there are no list items schemas
assert longest_item_list == 0
if all_items_schema:
# deal with False schemas
if is_false_schema(all_items_schema):
if min_size > 0 or min_size_for_optimizer > 0:
return impossible()
else:
max_size = 0
max_size_for_optimizer = None
ret_arr["items"] = all_items_schema
else:
ret_item_list_list: List[List[JsonSchema]] = [
[] for _ in range(longest_item_list)
]
additional_schemas: List[JsonSchema] = []
for arr_item_list, arr_additional_schema in item_list_entries:
for x in range(longest_item_list):
ils = ret_item_list_list[x]
if x < len(arr_item_list):
ils.append(arr_item_list[x])
elif arr_additional_schema:
ils.append(arr_additional_schema)
if all_items_schema:
ils.append(all_items_schema)
if arr_additional_schema:
additional_schemas.append(arr_additional_schema)
if max_size is None or max_size > longest_item_list:
# if it is possible to have more elements
# we constrain them as specified
if additional_schemas:
if all_items_schema is not None:
additional_schemas.append(all_items_schema)
all_items_schema = simplifyAll(
additional_schemas, floatAny=floatAny
)
if all_items_schema is not None:
ret_arr["additionalItems"] = all_items_schema
ret_item_list: List[JsonSchema] = [
simplifyAll(x, floatAny=True) for x in ret_item_list_list
]
first_false: Optional[int] = None
for i, s in enumerate(ret_item_list):
if is_false_schema(s):
first_false = i
break
if first_false is not None:
if min_size > first_false or min_size_for_optimizer > first_false:
return impossible()
else:
if max_size is None:
max_size = first_false
else:
max_size = min(max_size, first_false)
if max_size_for_optimizer is not None:
if max_size_for_optimizer >= max_size:
max_size_for_optimizer = None
ret_item_list = ret_item_list[0:first_false]
ret_arr["items"] = ret_item_list
if max_size is not None:
ret_arr["maxItems"] = max_size
if max_size_for_optimizer is not None:
if max_size is None or max_size_for_optimizer < max_size:
ret_arr["maxItemsForOptimizer"] = max_size_for_optimizer
s_typed = [ret_arr]
# TODO: more!
assert not s_all
ret_all = []
ret_main: JsonSchema = s_extra if s_extra else {}
if s_type_for_optimizer is not None:
ret_main["laleType"] = s_type_for_optimizer
if s_enum:
# we should simplify these as for s_not_enum
ret_main["enum"] = list(s_enum)
# now, we do some extra work to keep 'laleType':'operator' annotations
if s_type_for_optimizer is None:
from lale.operators import Operator
if all(isinstance(x, Operator) for x in s_enum):
# All the enumeration values are operators
# This means it is probably an operator schema
# which might have been missed if
# this is being allOf'ed with an anyOfList
if s_any and all(hasAnyOperatorSchemas(s) for s in s_any):
ret_main["laleType"] = "operator"
return ret_main
if ret_main:
if s_typed:
s_typed[0] = {**ret_main, **s_typed[0]}
elif s_other:
s_other[0] = {**ret_main, **s_other[0]}
else:
ret_all.append(ret_main)
if s_typed:
ret_all.extend(s_typed)
if s_other:
ret_all.extend(s_other)
if s_not_for_optimizer:
ret_all.extend(s_not_for_optimizer)
if s_one:
ret_all.extend(s_one)
if s_not_number_list:
ret_all.extend(s_not_number_list)
if s_not:
ret_all.extend(s_not)
if s_not_enum:
# We can't do not alongside anything else
# TODO: we should validate the list against the
# other parts of ret_all (this would need to move down): if any elements don't validate
# then they already would be excluded
# we can simplify +enum's the same way
ret_all_agg = makeAllOf(ret_all)
s_not_enum_simpl = enumValues(s_not_enum, ret_all_agg)
if s_not_enum_simpl:
sne = {"not": {"enum": list(s_not_enum)}}
ret_all.append(sne)
else:
logger.debug(
f"simplifyAll: {s_not_enum} was a negated enum that was simplified away because its elements anyway don't satisfy the additional constraints {ret_all_agg}"
)
s_not_enum = s_not_enum_simpl
if not floatAny:
ret_all.extend([simplifyAny(s, False) for s in s_any])
ret_all_schema = makeAllOf(ret_all)
if floatAny and s_any:
args = list(([ret_all_schema], *tuple(s_any)))
cp = list(itertools.product(*args))
alls = [simplifyAll(list(s), False) for s in cp]
ret = simplifyAny(alls, False)
return ret
else:
return ret_all_schema
def simplifyAny(schema: List[JsonSchema], floatAny: bool) -> JsonSchema:
s_any = schema
s_enum_list: List[set_with_str_for_keys[Any]] = []
s_not_enum_list: List[set_with_str_for_keys[Any]] = []
s_other: List[JsonSchema] = []
s_not_for_optimizer: List[JsonSchema] = []
while s_any:
schema_list = s_any
s_any = []
for s in schema_list:
if s is None:
continue
s = simplify(s, floatAny)
if s is None:
continue
if not isForOptimizer(s):
logger.info(
f"simplifyAny: skipping not for optimizer {s} (after simplification)"
)
s_not_for_optimizer.append(s)
continue
if is_true_schema(s):
return STrue
if is_false_schema(s):
continue
if "anyOf" in s:
s_any.extend(s["anyOf"])
elif "enum" in s:
ev = enumValues(set_with_str_for_keys(s["enum"]), s)
if ev:
s_enum_list.append(ev)
elif "not" in s:
snot = s["not"]
if "enum" in s["not"]:
ev = enumValues(set_with_str_for_keys(snot["enum"]), snot)
if ev:
s_not_enum_list.append(ev)
else:
s_other.append(s)
s_enum: Optional[set_with_str_for_keys[Any]] = None
s_not_enum: Optional[set_with_str_for_keys[Any]] = None
if s_enum_list:
# if there are enumeration constraints, we want their intersection
# pylint note: s_enum_list must be non-empty, and the first element will be used as self
s_enum = set_with_str_for_keys.union( # pylint:disable=no-value-for-parameter
*s_enum_list
)
if s_not_enum_list:
# pylint note: s_enum_list must be non-empty, and the first element will be used as self
s_not_enum = (
set_with_str_for_keys.intersection( # pylint:disable=no-value-for-parameter
*s_not_enum_list
)
)
if s_enum and s_not_enum:
s_not_enum = set_with_str_for_keys.difference(s_not_enum, s_enum)
s_enum = None
assert not s_any
ret: List[JsonSchema] = []
if s_enum:
ret.append({"enum": list(s_enum)})
if s_not_enum:
ret.append({"not": {"enum": list(s_not_enum)}})
ret.extend(s_other)
ret.extend(s_not_for_optimizer)
return makeAnyOf(ret)
def simplifyNot(schema: JsonSchema, floatAny: bool) -> JsonSchema:
return simplifyNot_(schema, floatAny, alreadySimplified=False)
def simplifyNot_(
schema: JsonSchema, floatAny: bool, alreadySimplified: bool = False
) -> JsonSchema:
"""alreadySimplified=true implies that schema has already been simplified"""
if "not" in schema:
# if there is a not/not, we can just skip it
ret = simplify(schema["not"], floatAny)
return ret
elif "anyOf" in schema:
anys = schema["anyOf"]
alls = [{"not": s} for s in anys]
ret = simplifyAll(alls, floatAny)
return ret
elif "allOf" in schema:
alls = schema["allOf"]
anys = [{"not": s} for s in alls]
ret = simplifyAny(anys, floatAny)
return ret
elif not alreadySimplified:
s = simplify(schema, floatAny)
# it is possible that the result of calling simplify
# resulted in something that we can push 'not' down into
# so we call ourselves, being careful to avoid an infinite loop.
return simplifyNot_(s, floatAny, alreadySimplified=True)
else:
return {"not": schema}
def simplify(schema: JsonSchema, floatAny: bool) -> JsonSchema:
"""Tries to simplify a schema into an equivalent but
more compact/simpler one. If floatAny if true, then
the only anyOf in the return value will be at the top level.
Using this option may cause a combinatorial blowup in the size
of the schema
"""
if is_true_schema(schema):
return STrue
if is_false_schema(schema):
return SFalse
if "enum" in schema:
# TODO: simplify the schemas by removing anything that does not validate
# against the rest of the schema
return schema
if "allOf" in schema:
ret = simplifyAll(schema["allOf"], floatAny)
return ret
elif "anyOf" in schema:
ret = simplifyAny(schema["anyOf"], floatAny)
return ret
elif "not" in schema:
return simplifyNot(schema["not"], floatAny)
elif "type" in schema and schema["type"] == "object" and "properties" in schema:
schema2 = schema.copy()
props = {}
all_objs = [schema2]
# TODO: how does this interact with required?
# {k1:s_1, k2:anyOf:[s2s], k3:anyOf:[s3s]}
# If floatAny is true and any properties have an anyOf in them
# we need to float it out to the top. We can then
# give it to simplifyAll, which does the cross product to lift
# them out of the list
for k, v in schema["properties"].items():
s = simplify(v, floatAny)
if is_false_schema(s) and "required" in schema and s in schema["required"]:
logger.info(
f"simplify: required key {k} is False, so the entire schema {schema} is False"
)
return impossible()
if (not is_true_schema(s)) and floatAny and "anyOf" in s:
all_objs.append(
{
"anyOf": [
{"type": "object", "properties": {k: vv}}
for vv in s["anyOf"]
]
}
)
# If we are disallowing additionalProperties, then we can't remove this property entirely
if not schema.get("additionalProperties", True):
props[k] = STrue
else:
props[k] = s
schema2["properties"] = props
if len(all_objs) == 1:
return schema2
else:
# The termination argument here is somewhat subtle
s = simplifyAll(all_objs, floatAny)
return s
else:
return schema
# TODO: semantically, allOf should force an intersection
# of relevantFields, yet union seems kinder to the user/more modular (at least if additionalProperties:True)
def findRelevantFields(schema: JsonSchema) -> Optional[Set[str]]:
"""Either returns the relevant fields for the schema, or None if there was none specified"""
if "allOf" in schema:
fields_list: List[Optional[Set[str]]] = [
findRelevantFields(s) for s in schema["allOf"]
]
real_fields_list: List[Set[str]] = [f for f in fields_list if f is not None]
if real_fields_list:
return set.union(*real_fields_list)
else:
return None
else:
if "relevantToOptimizer" in schema:
return set(schema["relevantToOptimizer"])
else:
return None
# does not handle nested objects and nested relevant fields well
def narrowToGivenRelevantFields(
schema: JsonSchema, relevantFields: Set[str]
) -> JsonSchema:
if is_true_schema(schema) or is_false_schema(schema):
return schema
if "anyOf" in schema:
return {
"anyOf": [
narrowToGivenRelevantFields(a, relevantFields) for a in schema["anyOf"]
]
}
if "allOf" in schema:
return {
"allOf": [
narrowToGivenRelevantFields(a, relevantFields) for a in schema["allOf"]
]
}
if "not" in schema:
return {"not": narrowToGivenRelevantFields(schema["not"], relevantFields)}
if "type" in schema and schema["type"] == "object" and "properties" in schema:
props = schema["properties"]
new_props = {
k: narrowToGivenRelevantFields(v, relevantFields)
for (k, v) in props.items()
if k in relevantFields
}
schema2 = schema.copy()
schema2["properties"] = new_props
if "required" in schema:
reqs = set(schema["required"])
schema2["required"] = list(reqs.intersection(relevantFields))
return schema2
else:
return schema
def narrowToRelevantFields(schema: JsonSchema) -> JsonSchema:
relevantFields: Optional[Set[str]] = findRelevantFields(schema)
if relevantFields is not None:
return narrowToGivenRelevantFields(schema, relevantFields)
else:
return schema
# Given a json schema, removes any elements marked as 'forOptimizer:false'
# also does some basic simplifications
def filterForOptimizer(schema: JsonSchema) -> Optional[JsonSchema]:
if schema is None or is_true_schema(schema) or is_false_schema(schema):
return schema
if not isForOptimizer(schema):
return None
if "anyOf" in schema:
subs = schema["anyOf"]
sch = [filterForOptimizer(s) for s in subs]
sch_nnil = [s for s in sch if s is not None]
if sch_nnil:
return makeAnyOf(sch_nnil)
else:
return None
if "allOf" in schema:
subs = schema["allOf"]
sch = [filterForOptimizer(s) for s in subs]
sch_nnil = [s for s in sch if s is not None]
filtered_sch = sch_nnil
if len(sch_nnil) != len(sch):
# Questionable semantics here (aka HACK!!!!)
# Since we removed something from the schema
# we will also remove negated schemas
filtered_sch = [
s for s in sch_nnil if not isinstance(s, dict) or "not" not in s
]
if filtered_sch:
return makeAllOf(filtered_sch)
else:
return None
if "oneOf" in schema:
subs = schema["oneOf"]
sch = [filterForOptimizer(s) for s in subs]
sch_nnil = [s for s in sch if s is not None]
if sch_nnil:
return makeOneOf(sch_nnil)
else:
return None
if "not" in schema:
s = filterForOptimizer(schema["not"])
if s is None:
return None
else:
return {"not": s}
if "type" in schema and schema["type"] == "object" and "properties" in schema:
# required = schema.get("required", None)
props = {}
for k, v in schema["properties"].items():
s = filterForOptimizer(v)
if s is None:
# if required and k in required:
# if this field is required (and has now been filtered)
# filter the whole object schema
return None
else:
props[k] = s
ret = schema.copy()
ret["properties"] = props
return ret
return schema
def narrowToRelevantConstraints(schema: JsonSchema) -> JsonSchema:
# only narrow in top-level conjuncts, to avoid tricky reasoning
if "allOf" not in schema:
return schema
# drop conjuncts that are explicitly marked as not relevant to
# optimizer, to reduce cost in the simplify() call that would be
# wasted when a filterForOptimizer() call later drops them anyway
result = {
**schema,
"allOf": [
narrowToRelevantConstraints(s)
for s in schema["allOf"]
if s.get("forOptimizer", True)
],
}
return result
def narrowSimplifyAndFilter(schema: JsonSchema, floatAny: bool) -> Optional[JsonSchema]:
nc_schema = narrowToRelevantConstraints(schema)
nf_schema = narrowToRelevantFields(nc_schema)
simplified_schema = simplify(nf_schema, floatAny)
filtered_schema = filterForOptimizer(simplified_schema)
return filtered_schema
| 43,964 | 37.330427 | 243 |
py
|
lale
|
lale-master/lale/docstrings.py
|
# Copyright 2022 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import pprint
import re
from typing import TYPE_CHECKING
import lale.helpers
if TYPE_CHECKING:
from lale.operators import IndividualOp
def _indent(prefix, string, first_prefix=None):
lines = string.splitlines()
if lines:
if first_prefix is None:
first_prefix = prefix
first_indented = (first_prefix + lines[0]).rstrip()
rest_indented = [(prefix + line).rstrip() for line in lines[1:]]
result = first_indented + "\n" + "\n".join(rest_indented)
return result
else:
return ""
def _value_docstring(value):
return pprint.pformat(value, width=10000, compact=True)
def _kind_tag(schema):
if "anyOf" in schema:
return "union type"
elif "allOf" in schema:
return "intersection type"
elif "not" in schema or "laleNot" in schema:
return "negated type"
elif "type" in schema:
if schema["type"] == "object":
return "dict"
elif schema["type"] == "number":
return "float"
elif isinstance(schema["type"], list):
return " *or* ".join(schema["type"])
else:
return schema["type"]
elif "enum" in schema:
values = schema["enum"]
assert len(values) >= 1
if len(values) == 1:
return _value_docstring(values[0])
elif len(values) == 2:
return " *or* ".join([_value_docstring(v) for v in values])
else:
prefix = ", ".join([_value_docstring(v) for v in values[:-1]])
suffix = ", *or* " + _value_docstring(values[-1])
return prefix + suffix
else:
return "any type"
def _schema_docstring(name, schema, required=True, relevant=True):
tags = []
if "laleType" in schema:
tags.append(schema["laleType"])
else:
tags.append(_kind_tag(schema))
if "minimum" in schema:
op = ">" if schema.get("exclusiveMinimum", False) else ">="
tags.append(op + _value_docstring(schema["minimum"]))
if "minimumForOptimizer" in schema:
tags.append(
">=" + _value_docstring(schema["minimumForOptimizer"]) + " for optimizer"
)
if "maximum" in schema:
op = "<" if schema.get("exclusiveMaximum", False) else "<="
tags.append(op + _value_docstring(schema["maximum"]))
if "laleMaximum" in schema:
tags.append("<=" + _value_docstring(schema["laleMaximum"]))
if "maximumForOptimizer" in schema:
tags.append(
"<=" + _value_docstring(schema["maximumForOptimizer"]) + " for optimizer"
)
if "distribution" in schema:
tags.append(schema["distribution"] + " distribution")
if "minItems" in schema:
tags.append(">=" + _value_docstring(schema["minItems"]) + " items")
if "minItemsForOptimizer" in schema:
tags.append(
">="
+ _value_docstring(schema["minItemsForOptimizer"])
+ " items for optimizer"
)
if "maxItems" in schema:
tags.append("<=" + _value_docstring(schema["maxItems"]) + " items")
if "maxItemsForOptimizer" in schema:
tags.append(
"<="
+ _value_docstring(schema["maxItemsForOptimizer"])
+ " items for optimizer"
)
if not required:
tags.append("optional")
if not relevant or schema.get("forOptimizer", True) is False:
tags.append("not for optimizer")
if "transient" in schema:
if schema["transient"] == "alwaysPrint":
tags.append("always print")
elif schema["transient"] is True:
tags.append("transient")
if "default" in schema:
tags.append("default " + _value_docstring(schema["default"]))
def item_docstring(name, item_schema, required=True):
sd = _schema_docstring(name, item_schema, required=required)
return _indent(" ", sd, " - ").rstrip()
body = None
if "anyOf" in schema:
item_docstrings = [item_docstring(None, s) for s in schema["anyOf"]]
if name is not None and name.startswith("_`constraint-"):
rexp = re.compile(r"^( - )(dict \*of\* )(.+)")
item_docstrings = [rexp.sub(r"\1\3", s) for s in item_docstrings]
if len(item_docstrings) > 1:
rexp = re.compile(r"^( - )(.+)")
rest = [rexp.sub(r"\1*or* \2", s) for s in item_docstrings[1:]]
item_docstrings = item_docstrings[:1] + rest
body = "\n\n".join(item_docstrings)
elif "allOf" in schema:
item_docstrings = [item_docstring(None, s) for s in schema["allOf"]]
if len(item_docstrings) > 1:
rexp = re.compile(r"^( - )(.+)")
rest = [rexp.sub(r"\1*and* \2", s) for s in item_docstrings[1:]]
item_docstrings = item_docstrings[:1] + rest
body = "\n\n".join(item_docstrings)
elif "not" in schema:
body = item_docstring(None, schema["not"])
elif "laleNot" in schema:
body = f" - '{schema['laleNot']}'"
elif schema.get("type", "") == "array":
if "items" in schema:
items_schemas = schema["items"]
if isinstance(items_schemas, dict):
body = item_docstring("items", items_schemas)
else:
items_docstrings = [
item_docstring(f"item {i}", s) for i, s in enumerate(items_schemas)
]
body = "\n\n".join(items_docstrings)
elif schema.get("type", "") == "object" and "properties" in schema:
item_docstrings = [
item_docstring(k, s) for k, s in schema["properties"].items()
]
body = "\n\n".join(item_docstrings)
result = name + " : " if name else ""
try:
result += ", ".join(tags)
except BaseException as e:
raise ValueError(f"Unexpected internal error for {schema}.") from e
assert len(result) > 0 and result.rstrip() == result
if result.startswith("-"):
result = "\\" + result
if body is not None and body.find("\n") == -1:
assert body.startswith(" - ")
result += " *of* " + body[4:]
if "description" in schema:
result += "\n\n" + _indent(" ", schema["description"]).rstrip()
if body is not None and body.find("\n") != -1:
result += "\n\n" + body
return result.rstrip()
def _params_docstring(params_schema, hp2constraints=None):
if params_schema is None:
return ""
params = params_schema.get("properties", {})
if len(params) == 0:
result = ""
else:
result = "Parameters\n----------\n"
for param_name, param_schema in params.items():
required = param_name in params_schema.get("required", {})
relevant = (
"relevantToOptimizer" not in params_schema
or param_name in params_schema["relevantToOptimizer"]
)
item_docstring = _schema_docstring(param_name, param_schema, required, relevant)
result += _indent(" ", item_docstring, "").rstrip()
if hp2constraints is not None and param_name in hp2constraints:
constraints = [f"`constraint-{i}`_" for i in hp2constraints[param_name]]
result += f"\n\n See also {', '.join(constraints)}."
result += "\n\n"
return result
def _arg_docstring(val):
if val is None:
return str("None")
if isinstance(val, (int, float)):
return str(val)
elif isinstance(val, list):
return [_arg_docstring(x) for x in val]
elif isinstance(val, dict):
return {_arg_docstring(k): _arg_docstring(v) for k, v in val.items()}
else:
return f'"{str(val)}"'
def _paramlist_docstring(hyperparams_schema) -> str:
params = hyperparams_schema.get("allOf", None)
if params is None:
return ""
if isinstance(params, list):
if not params:
return ""
params = params[0]
if params is None:
return ""
params = params.get("properties", {})
if len(params) == 0:
return ""
result = ", *"
for param_name, param_schema in params.items():
result += f", {param_name}"
default = param_schema.get("default", None)
if "default" in param_schema:
default = param_schema["default"]
default_str = _arg_docstring(default)
if default_str is not None:
result += f"={default_str}"
return result
def _get_hp2constraints(hyperparams_schema):
result = {}
for i in range(1, len(hyperparams_schema["allOf"])):
schema = hyperparams_schema["allOf"][i]
for disjunct in schema.get("anyOf", []):
for hyperparam in disjunct.get("properties", {}).keys():
result[hyperparam] = result.get(hyperparam, []) + [i]
return result
def _hyperparams_docstring(hyperparams_schema):
hp2constraints = _get_hp2constraints(hyperparams_schema)
result = _params_docstring(hyperparams_schema["allOf"][0], hp2constraints)
if len(hyperparams_schema["allOf"]) > 1:
result += "Notes\n-----\n"
item_docstrings = [
_schema_docstring(f"_`constraint-{i}`", hyperparams_schema["allOf"][i])
for i in range(1, len(hyperparams_schema["allOf"]))
]
result += "\n\n".join(item_docstrings)
return result
def _method_docstring(description, ready_string, params_schema, result_schema=None):
result = description + "\n\n"
if ready_string is not None:
result += "*Note: " + ready_string + "*\n\n"
result += (
"Once this method is available, it will have the following signature: \n\n"
)
result += _params_docstring(params_schema)
if result_schema is not None:
result += "Returns\n-------\n"
item_docstring = _schema_docstring("result", result_schema)
result += _indent(" ", item_docstring, "")
result += "\n\n"
return result
def _cls_docstring(cls, combined_schemas):
descr_lines = combined_schemas["description"].splitlines()
result = descr_lines[0]
result += "\n\nThis documentation is auto-generated from JSON schemas.\n\n"
more_description = "\n".join(descr_lines[1:]).strip()
if more_description != "":
result += more_description + "\n\n"
return result
def _set_docstrings_helper(cls, lale_op, combined_schemas):
properties = combined_schemas.get("properties", None)
assert cls.__doc__ is None
impl_cls = lale_op.impl_class
cls.__doc__ = _cls_docstring(impl_cls, combined_schemas)
if properties is not None:
hyperparams_schema = properties.get("hyperparams", None)
if hyperparams_schema is not None:
doc = _hyperparams_docstring(hyperparams_schema)
try:
args = _paramlist_docstring(hyperparams_schema)
code = f"""
def __init__(self{args}):
pass
"""
import math
d = {}
# this should be safe, since the user controllable
# part is created by _paramlist_docstring.
# While this can include user (schema) specified defaults,
# they would need to be objects (that were already run),
# not code that is executed (since that would be invalid in a schema)
# so that would not cause user provided code to run here (only to be referenced)
exec(code, {"nan": math.nan, "inf": math.inf}, d) # nosec
__init__ = d["__init__"] # type: ignore
except BaseException as e:
import warnings
warnings.warn(
f"""While trying to generate a docstring for {cls.__name__}, when trying
to create an init method with the appropriate parameter list, an exception was raised: {e}"""
)
def __init__(self):
pass
__init__.__doc__ = doc
cls.__init__ = __init__
def make_fun(
fun_name,
fake_fun,
description,
ready_string,
params_schema_key,
result_schema_key=None,
):
params_schema = None
result_schema = None
if properties is not None:
if params_schema_key is not None:
params_schema = properties.get(params_schema_key, None)
if result_schema_key is not None:
result_schema = properties.get(result_schema_key, None)
if hasattr(impl_cls, fun_name):
ready_string_to_use = None
if not hasattr(cls, fun_name):
ready_string_to_use = ready_string
doc = _method_docstring(
description, ready_string_to_use, params_schema, result_schema
)
setattr(cls, fun_name, fake_fun)
fake_fun.__name__ = "fun_name"
fake_fun.__doc__ = doc
def fit(self, X, y=None, **fit_params):
pass
make_fun(
"fit",
fit,
"Train the operator.",
"The fit method is not available until this operator is trainable.",
"input_fit",
)
def partial_fit(self, X, y=None, **fit_params):
pass
make_fun(
"partial_fit",
partial_fit,
"Incremental fit to train train the operator on a batch of samples.",
"The partial_fit method is not available until this operator is trainable.",
"input_partial_fit",
)
def transform(self, X, y=None):
pass
make_fun(
"transform",
transform,
"Transform the data.",
"The transform method is not available until this operator is trained.",
"input_transform",
"output_transform",
)
def transform_X_y(self, X, y):
pass
make_fun(
"transform_X_y",
transform_X_y,
"Transform the data and target.",
"The transform_X_y method is not available until this operator is trained.",
"input_transform_X_y",
"output_transform_X_y",
)
def predict(self, X, **predict_params):
pass
make_fun(
"predict",
predict,
"Make predictions.",
"The predict method is not available until this operator is trained.",
"input_predict",
"output_predict",
)
def predict_proba(self, X):
pass
make_fun(
"predict_proba",
predict_proba,
"Probability estimates for all classes.",
"The predict_proba method is not available until this operator is trained.",
"input_predict_proba",
"output_predict_proba",
)
def decision_function(self, X):
pass
make_fun(
"decision_function",
decision_function,
"Confidence scores for all classes.",
"The decision_function method is not available until this operator is trained.",
"input_decision_function",
"output_decision_function",
)
def set_docstrings(lale_op: "IndividualOp"):
"""
If we are running under sphinx, this will take
a variable whose value is a lale operator
and change it to a value of an artificial class
with appropriately documented methods.
"""
try:
if __sphinx_build__: # type: ignore
try:
# impl = lale_op.impl_class
frm = inspect.stack()[1]
module = inspect.getmodule(frm[0])
assert module is not None
combined_schemas = lale_op._schemas
name = lale.helpers.arg_name(pos=0, level=1)
assert name is not None
# we want to make sure that the Operator constructor args are not shown
def __init__():
pass
new_class = type(name, (lale_op.__class__,), {"__init__": __init__}) # type: ignore
new_class.__module__ = module.__name__
module.__dict__[name] = new_class
_set_docstrings_helper(new_class, lale_op, combined_schemas)
except NameError as exc:
raise ValueError(exc) from exc
except NameError:
pass
| 16,753 | 33.977035 | 100 |
py
|
lale
|
lale-master/lale/settings.py
|
disable_hyperparams_schema_validation = False
disable_data_schema_validation = True
def set_disable_data_schema_validation(flag: bool):
"""Lale can validate the input and output data used for fit, predict, predict_proba etc.
against the data schemas defined for an operator. This method allows users to control
whether the data schema validation should be turned on or not.
Parameters
----------
flag : bool
A value of True will disable the data schema validation, and a value of False will enable it.
It is True by default.
"""
global disable_data_schema_validation # pylint:disable=global-statement
disable_data_schema_validation = flag
def set_disable_hyperparams_schema_validation(flag: bool):
"""Lale can validate the hyperparameter values passed while creating an operator against
the json schema defined for hyperparameters of an operator. This method allows users to control
whether such validation should be turned on or not.
Parameters
----------
flag : bool
A value of True will disable the hyperparameter schema validation, and a value of False will enable it.
It is False by default.
"""
global disable_hyperparams_schema_validation # pylint:disable=global-statement
disable_hyperparams_schema_validation = flag
| 1,337 | 39.545455 | 111 |
py
|
lale
|
lale-master/lale/schemas.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict, List, Optional, TypeVar, Union
class Undefined:
pass
undefined = Undefined()
T = TypeVar("T")
Option = Union[Undefined, T]
class Schema:
schema: Dict[str, Any]
def __init__(
self,
desc: Option[str] = undefined,
default: Option[Any] = undefined,
forOptimizer: bool = True,
):
self.schema: Dict[str, Any] = {}
if not isinstance(default, Undefined):
self.schema["default"] = default
if not isinstance(desc, Undefined):
self.schema["description"] = desc
if not forOptimizer:
self.schema["forOptimizer"] = forOptimizer
def set(self, prop: str, value: Option[Any]):
if not isinstance(value, Undefined):
self.schema[prop] = value
# Base Type
class Bool(Schema):
def __init__(
self,
desc: Option[str] = undefined,
default: Option[bool] = undefined,
forOptimizer: bool = True,
):
super().__init__(desc, default, forOptimizer)
self.set("type", "boolean")
class Enum(Schema):
def __init__(
self,
values: Optional[List[Any]] = None,
desc: Option[str] = undefined,
default: Option[Any] = undefined,
forOptimizer: bool = True,
):
super().__init__(desc, default, forOptimizer)
if values is None:
values = []
self.set("enum", values)
class Float(Schema):
def __init__(
self,
desc: Option[str] = undefined,
default: Option[float] = undefined,
forOptimizer: bool = True,
minimum: Option[float] = undefined,
exclusiveMinimum: Option[bool] = undefined,
minimumForOptimizer: Option[float] = undefined,
exclusiveMinimumForOptimizer: Option[bool] = undefined,
maximum: Option[float] = undefined,
exclusiveMaximum: Option[bool] = undefined,
maximumForOptimizer: Option[float] = undefined,
exclusiveMaximumForOptimizer: Option[bool] = undefined,
distribution: Option[str] = undefined,
):
super().__init__(desc, default, forOptimizer)
self.set("type", "number")
self.set("minimum", minimum)
self.set("exclusiveMinimum", exclusiveMinimum)
self.set("minimumForOptimizer", minimumForOptimizer)
self.set("exclusiveMinimumForOptimizer", exclusiveMinimumForOptimizer)
self.set("maximum", maximum)
self.set("exclusiveMaximum", exclusiveMaximum)
self.set("maximumForOptimizer", maximumForOptimizer)
self.set("exclusiveMaximumForOptimizer", exclusiveMaximumForOptimizer)
self.set("distribution", distribution)
class Int(Schema):
def __init__(
self,
desc: Option[str] = undefined,
default: Option[int] = undefined,
forOptimizer: bool = True,
minimum: Option[int] = undefined,
exclusiveMinimum: Option[bool] = undefined,
minimumForOptimizer: Option[int] = undefined,
exclusiveMinimumForOptimizer: Option[bool] = undefined,
maximum: Option[int] = undefined,
exclusiveMaximum: Option[bool] = undefined,
maximumForOptimizer: Option[int] = undefined,
exclusiveMaximumForOptimizer: Option[bool] = undefined,
distribution: Option[str] = undefined,
laleMaximum: Option[str] = undefined,
):
super().__init__(desc, default, forOptimizer)
self.set("type", "integer")
self.set("minimum", minimum)
self.set("exclusiveMinimum", exclusiveMinimum)
self.set("minimumForOptimizer", minimumForOptimizer)
self.set("maximum", maximum)
self.set("exclusiveMaximum", exclusiveMaximum)
self.set("exclusiveMinimumForOptimizer", exclusiveMinimumForOptimizer)
self.set("maximumForOptimizer", maximumForOptimizer)
self.set("exclusiveMaximumForOptimizer", exclusiveMaximumForOptimizer)
self.set("distribution", distribution)
self.set("laleMaximum", laleMaximum)
class Null(Schema):
def __init__(self, desc: Option[str] = undefined, forOptimizer: bool = True):
super().__init__(desc=desc, forOptimizer=forOptimizer)
self.set("enum", [None])
class Not(Schema):
def __init__(self, body: Schema):
super().__init__()
self.schema = {"not": body.schema}
class JSON(Schema):
def __init__(self, body: Dict[str, Any]):
super().__init__()
self.schema = body
# Combinator
class AnyOf(Schema):
def __init__(
self,
types: Optional[List[Schema]] = None,
desc: Option[str] = undefined,
default: Option[Any] = undefined,
forOptimizer: bool = True,
):
super().__init__(desc, default, forOptimizer)
if types is None:
types = []
self.set("anyOf", [t.schema for t in types])
class AllOf(Schema):
def __init__(
self,
types: Optional[List[Schema]] = None,
desc: Option[str] = undefined,
default: Option[Any] = undefined,
):
super().__init__(desc, default)
if types is None:
types = []
self.set("allOf", [t.schema for t in types])
class Array(Schema):
def __init__(
self,
items: Schema,
desc: Option[str] = undefined,
default: Option[List[Any]] = undefined,
forOptimizer: bool = True,
minItems: Option[int] = undefined,
minItemsForOptimizer: Option[int] = undefined,
maxItems: Option[int] = undefined,
maxItemsForOptimizer: Option[int] = undefined,
laleType: Option[str] = undefined,
):
super().__init__(desc, default, forOptimizer)
self.set("type", "array")
self.set("items", items.schema)
self.set("minItems", minItems)
self.set("minItemsForOptimizer", minItemsForOptimizer)
self.set("maxItems", maxItems)
self.set("maxItemsForOptimizer", maxItemsForOptimizer)
self.set("laleType", laleType)
class Object(Schema):
def __init__(
self,
default: Option[Any] = undefined,
desc: Option[str] = undefined,
forOptimizer: bool = True,
required: Option[List[str]] = undefined,
additionalProperties: Option[bool] = undefined,
**kwargs: Schema
):
super().__init__(desc, default, forOptimizer)
self.set("type", "object")
self.set("required", required)
self.set("additionalProperties", additionalProperties)
self.set("properties", {k: p.schema for (k, p) in kwargs.items()})
class String(Schema):
def __init__(
self,
desc: Option[str] = undefined,
default: Option[str] = undefined,
forOptimizer: bool = False,
):
super().__init__(desc, default, forOptimizer)
self.set("type", "string")
| 7,455 | 30.72766 | 81 |
py
|
lale
|
lale-master/lale/type_checking.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Lale uses `JSON Schema`_ to check machine-learning pipelines for correct types.
In general, there are two kinds of checks. The first is an instance
check (`v: s`), which checks whether a JSON value v is valid for a
schema s. The second is a subschema_ check (`s <: t`), which checks
whether one schema s is a subchema of another schema t.
Besides regular JSON values, Lale also supports certain JSON-like
values. For example, a ``np.ndarray`` of numbers is treated like a
JSON array of arrays of numbers. Furthermore, Lale supports an 'Any'
type for which all instance and subschema checks on the left as well
as the right side succeed. This is specified using ``{'laleType': 'Any'}``.
.. _`JSON Schema`: https://json-schema.org/understanding-json-schema/reference/
.. _subschema: https://arxiv.org/abs/1911.12651
"""
import functools
import inspect
from collections.abc import Iterable
from typing import Any, Dict, List, Optional, Tuple, overload
import jsonschema
import jsonschema.exceptions
import jsonschema.validators
import jsonsubschema
import numpy as np
import numpy.random
import sklearn.base
import lale.datasets.data_schemas
import lale.expressions
import lale.helpers
import lale.operators
JSON_TYPE = Dict[str, Any]
def _validate_lale_type(
validator, laleType, instance, schema
): # pylint:disable=unused-argument
# https://github.com/Julian/jsonschema/blob/master/jsonschema/_validators.py
if laleType == "Any":
return
elif laleType == "callable":
if not callable(instance):
yield jsonschema.exceptions.ValidationError(
f"expected {laleType}, got {type(instance)}"
)
elif laleType == "operator":
if not (
isinstance(instance, (lale.operators.Operator, sklearn.base.BaseEstimator))
or (
inspect.isclass(instance)
and issubclass(instance, sklearn.base.BaseEstimator)
)
):
yield jsonschema.exceptions.ValidationError(
f"expected {laleType}, got {type(instance)}"
)
elif laleType == "expression":
if not isinstance(instance, lale.expressions.Expr):
yield jsonschema.exceptions.ValidationError(
f"expected {laleType}, got {type(instance)}"
)
elif laleType == "numpy.random.RandomState":
if not isinstance(instance, numpy.random.RandomState):
yield jsonschema.exceptions.ValidationError(
f"expected {laleType}, got {type(instance)}"
)
elif laleType == "CrossvalGenerator":
if not (hasattr(instance, "split") or isinstance(instance, Iterable)):
yield jsonschema.exceptions.ValidationError(
f"expected {laleType}, got {type(instance)}"
)
def _is_extended_boolean(checker, instance):
# https://python-jsonschema.readthedocs.io/en/stable/validate/#jsonschema.TypeChecker
return isinstance(instance, (bool, np.bool_))
# https://github.com/Julian/jsonschema/blob/master/jsonschema/validators.py
_lale_validator = jsonschema.validators.extend(
validator=jsonschema.Draft4Validator,
validators={"laleType": _validate_lale_type},
type_checker=jsonschema.Draft4Validator.TYPE_CHECKER.redefine(
"boolean", _is_extended_boolean
),
)
def always_validate_schema(value: Any, schema: JSON_TYPE, subsample_array: bool = True):
"""Validate that the value is an instance of the schema.
Parameters
----------
value: JSON (int, float, str, list, dict) or JSON-like (tuple, np.ndarray, pd.DataFrame ...).
Left-hand side of instance check.
schema: JSON schema
Right-hand side of instance check.
subsample_array: bool
Speed up checking by doing only partial conversion to JSON.
Raises
------
jsonschema.ValidationError
The value was invalid for the schema.
"""
json_value = lale.helpers.data_to_json(value, subsample_array)
sch: Any = lale.helpers.data_to_json(schema, False)
try:
validator = _lale_validator(sch)
validator.validate(json_value)
except Exception:
jsonschema.validate(json_value, sch, _lale_validator)
def validate_schema_directly(
value: Any, schema: JSON_TYPE, subsample_array: bool = True
):
"""Validate that the value is an instance of the schema.
Parameters
----------
value: JSON (int, float, str, list, dict) or JSON-like (tuple, np.ndarray, pd.DataFrame ...).
Left-hand side of instance check.
schema: JSON schema
Right-hand side of instance check.
subsample_array: bool
Speed up checking by doing only partial conversion to JSON.
Raises
------
jsonschema.ValidationError
The value was invalid for the schema.
"""
from lale.settings import disable_hyperparams_schema_validation
if disable_hyperparams_schema_validation:
return True # if schema validation is disabled, always return as valid
return always_validate_schema(value, schema, subsample_array=subsample_array)
_JSON_META_SCHEMA_URL = "http://json-schema.org/draft-04/schema#"
def _json_meta_schema() -> Dict[str, Any]:
return jsonschema.Draft4Validator.META_SCHEMA
_validator = jsonschema.Draft4Validator(_json_meta_schema())
def validate_is_schema(value: Dict[str, Any]):
# only checking hyperparams schema validation flag because it is likely to be true and this call is cheap.
from lale.settings import disable_hyperparams_schema_validation
if disable_hyperparams_schema_validation:
return
if "$schema" in value:
assert value["$schema"] == _JSON_META_SCHEMA_URL
_validator.validate(value)
def is_schema(value) -> bool:
if isinstance(value, dict):
try:
_validator.validate(value)
except jsonschema.ValidationError:
return False
return True
return False
def _json_replace(subject, old, new):
if subject == old:
return new
if isinstance(subject, list):
result = [_json_replace(s, old, new) for s in subject]
for s, r in zip(subject, result):
if s != r:
return result
elif isinstance(subject, tuple):
result = tuple(_json_replace(s, old, new) for s in subject)
for s, r in zip(subject, result):
if s != r:
return result
elif isinstance(subject, dict):
if isinstance(old, dict):
is_sub_dict = True
for k, v in old.items():
if k not in subject or subject[k] != v:
is_sub_dict = False
break
if is_sub_dict:
return new
result = {k: _json_replace(v, old, new) for k, v in subject.items()}
for k in subject:
if subject[k] != result[k]:
return result
return subject # nothing changed so share original object (not a copy)
def is_subschema(sub_schema: JSON_TYPE, super_schema: JSON_TYPE) -> bool:
"""Is sub_schema a subschema of super_schema?
Parameters
----------
sub_schema: JSON schema
Left-hand side of subschema check.
super_schema: JSON schema
Right-hand side of subschema check.
Returns
-------
bool
True if `sub_schema <: super_schema`, False otherwise.
Raises
------
jsonschema.ValueError
An error occured while checking the subschema relation
"""
new_sub = _json_replace(sub_schema, {"laleType": "Any"}, {"not": {}})
try:
return jsonsubschema.isSubschema(new_sub, super_schema)
except Exception as e:
raise ValueError(
f"unexpected internal error checking ({new_sub} <: {super_schema})"
) from e
class SubschemaError(Exception):
"""Raised when a subschema check (sub `<:` sup) failed."""
def __init__(self, sub, sup, sub_name="sub", sup_name="super"):
self.sub = sub
self.sup = sup
self.sub_name = sub_name
self.sup_name = sup_name
def __str__(self):
summary = f"Expected {self.sub_name} to be a subschema of {self.sup_name}."
from lale.pretty_print import json_to_string
sub = json_to_string(self.sub)
sup = json_to_string(self.sup)
details = f"\n{self.sub_name} = {sub}\n{self.sup_name} = {sup}"
return summary + details
def _validate_subschema(
sub: JSON_TYPE, sup: JSON_TYPE, sub_name="sub", sup_name="super"
):
if not is_subschema(sub, sup):
raise SubschemaError(sub, sup, sub_name, sup_name)
def validate_schema(lhs: Any, super_schema: JSON_TYPE):
"""Validate that lhs is an instance of or a subschema of super_schema.
Parameters
----------
lhs: value
Left-hand side of instance or subschema check.
super_schema: JSON schema
Right-hand side of instance or subschema check.
Raises
------
jsonschema.ValidationError
The lhs was an invalid value for super_schema.
SubschemaError
The lhs had a schema that was not a subschema of super_schema.
"""
from lale.settings import disable_data_schema_validation
if disable_data_schema_validation:
return # If schema validation is disabled, always return as valid
sub_schema: Optional[JSON_TYPE]
try:
sub_schema = lale.datasets.data_schemas._to_schema(lhs)
except ValueError:
sub_schema = None
if sub_schema is None:
validate_schema_directly(lhs, super_schema)
else:
_validate_subschema(sub_schema, super_schema)
def join_schemas(*schemas: JSON_TYPE) -> JSON_TYPE:
"""Compute the lattice join (union type, disjunction) of the arguments.
Parameters
----------
*schemas: list of JSON schemas
Schemas to be joined.
Returns
-------
JSON schema
The joined schema.
"""
def join_two_schemas(s_a: JSON_TYPE, s_b: JSON_TYPE) -> JSON_TYPE:
if s_a is None:
return s_b
s_a = lale.helpers.dict_without(s_a, "description")
s_b = lale.helpers.dict_without(s_b, "description")
if is_subschema(s_a, s_b):
return s_b
if is_subschema(s_b, s_a):
return s_a
# we should improve the typing of the jsonsubschema API so that this ignore can be removed
return jsonsubschema.joinSchemas(s_a, s_b) # type: ignore
if len(schemas) == 0:
return {"not": {}}
result = functools.reduce(join_two_schemas, schemas)
return result
def get_hyperparam_names(op: "lale.operators.IndividualOp") -> List[str]:
"""Names of the arguments to the constructor of the impl.
Parameters
----------
op: lale.operators.IndividualOp
Operator whose hyperparameters to get.
Returns
-------
List[str]
List of hyperparameter names.
"""
if op.impl_class.__module__.startswith("lale"):
hp_schema = op.hyperparam_schema()
params = next(iter(hp_schema.get("allOf", []))).get("properties", {})
return list(params.keys())
else:
c: Any = op.impl_class
sig = inspect.signature(c.__init__)
params = sig.parameters
return list(params.keys())
def validate_method(op: "lale.operators.IndividualOp", schema_name: str):
"""Check whether the operator has the given method schema.
Parameters
----------
op: lale.operators.IndividualOp
Operator whose methods to check.
schema_name: 'input_fit' or 'input_predict' or 'input_predict_proba' or 'input_transform' 'output_predict' or 'output_predict_proba' or 'output_transform'
Name of schema to check.
Raises
------
AssertionError
The operator does not have the given schema.
"""
if op._impl.__module__.startswith("lale"):
assert schema_name in op._schemas["properties"]
else:
method_name = ""
if schema_name.startswith("input_"):
method_name = schema_name[len("input_") :]
elif schema_name.startswith("output_"):
method_name = schema_name[len("output_") :]
if method_name:
assert hasattr(op._impl, method_name)
def _get_args_schema(fun):
sig = inspect.signature(fun)
result = {"type": "object", "properties": {}}
required = []
additional_properties = False
for name, param in sig.parameters.items():
ignored_kinds = [
inspect.Parameter.VAR_POSITIONAL,
inspect.Parameter.VAR_KEYWORD,
]
if name != "self":
if param.kind in ignored_kinds:
additional_properties = True
else:
if param.default == inspect.Parameter.empty:
param_schema = {"laleType": "Any"}
required.append(name)
else:
param_schema = {"default": param.default}
result["properties"][name] = param_schema
if not additional_properties:
result["additionalProperties"] = False
if len(required) > 0:
result["required"] = required
return result
def get_hyperparam_defaults(impl):
result = {}
if hasattr(impl, "__init__"):
sig = inspect.signature(impl.__init__)
for name, param in sig.parameters.items():
if param.default != inspect.Parameter.empty:
result[name] = param.default
return result
def get_default_schema(impl):
"""Creates combined schemas for a bare operator implementation class.
Used when there were no explicit combined schemas provided when
the operator was created. The default schema provides defaults by
inspecting the signature of the ``__init__`` method, and uses
'Any' types for the inputs and outputs of other methods.
Returns
-------
JSON Schema
Combined schema with properties for hyperparams and
all applicable method inputs and outputs.
"""
if hasattr(impl, "__init__"):
hyperparams_schema = _get_args_schema(impl.__init__)
else:
hyperparams_schema = {"type": "object", "properties": {}}
hyperparams_schema["relevantToOptimizer"] = []
method_schemas: Dict[str, JSON_TYPE] = {
"hyperparams": {"allOf": [hyperparams_schema]}
}
if hasattr(impl, "fit"):
method_schemas["input_fit"] = _get_args_schema(impl.fit)
for method_name in ["predict", "predict_proba", "transform"]:
if hasattr(impl, method_name):
method_args_schema = _get_args_schema(getattr(impl, method_name))
method_schemas["input_" + method_name] = method_args_schema
method_schemas["output_" + method_name] = {"laleType": "Any"}
tags = {
"pre": [],
"op": (["transformer"] if hasattr(impl, "transform") else [])
+ (["estimator"] if hasattr(impl, "predict") else []),
"post": [],
}
result = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": f"Schema for {type(impl)} auto-generated by lale.type_checking.get_default_schema().",
"type": "object",
"tags": tags,
"properties": method_schemas,
}
return result
_data_info_keys = {"laleMaximum": "maximum", "laleNot": "not"}
def has_data_constraints(hyperparam_schema: JSON_TYPE) -> bool:
def recursive_check(subject: Any) -> bool:
if isinstance(subject, (list, tuple)):
for v in subject:
if recursive_check(v):
return True
elif isinstance(subject, dict):
for k, v in subject.items():
if k in _data_info_keys or recursive_check(v):
return True
return False
result = recursive_check(hyperparam_schema)
return result
def replace_data_constraints(
hyperparam_schema: JSON_TYPE, data_schema: JSON_TYPE
) -> JSON_TYPE:
@overload
def recursive_replace(subject: JSON_TYPE) -> JSON_TYPE:
...
@overload
def recursive_replace(subject: List) -> List:
...
@overload
def recursive_replace(subject: Tuple) -> Tuple:
...
@overload
def recursive_replace(subject: Any) -> Any:
...
def recursive_replace(subject):
any_changes = False
if isinstance(subject, (list, tuple)):
result = []
for v in subject:
new_v = recursive_replace(v)
result.append(new_v)
any_changes = any_changes or v is not new_v
if isinstance(subject, tuple):
result = tuple(result)
elif isinstance(subject, dict):
result = {}
for k, v in subject.items():
if k in _data_info_keys:
new_v = lale.helpers.json_lookup("properties/" + v, data_schema)
if new_v is None:
new_k = k
new_v = v
else:
new_k = _data_info_keys[k]
else:
new_v = recursive_replace(v)
new_k = k
result[new_k] = new_v
any_changes = any_changes or k != new_k or v is not new_v
else:
return subject
return result if any_changes else subject
result = recursive_replace(hyperparam_schema)
return result
| 18,022 | 31.650362 | 158 |
py
|
lale
|
lale-master/lale/schema_utils.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict, List, Optional, Set, Union
# Type definitions
JsonSchema = Dict[str, Any]
SchemaEnum = Set[Any]
STrue: JsonSchema = {}
SFalse: JsonSchema = {"not": STrue}
forOptimizerConstant: str = "forOptimizer"
forOptimizerConstantSuffix: str = "ForOptimizer"
def is_true_schema(s: JsonSchema) -> bool:
return s is True or s == STrue
def is_false_schema(s: JsonSchema) -> bool:
return s is False or s == SFalse
def is_lale_any_schema(s: JsonSchema) -> bool:
if isinstance(s, dict):
t = s.get("laleType", None)
return t == "Any"
else:
return False
def getForOptimizer(obj, prop: str):
return obj.get(prop + forOptimizerConstantSuffix, None)
def getMinimum(obj):
prop = "minimum"
m = obj.get(prop)
mfo = getForOptimizer(obj, prop)
if mfo is None:
return m
else:
if m is not None and mfo < m:
raise ValueError(
f"A minimum ({m}) and a *smaller* minimumForOptimizer ({mfo}) was specified in {obj}"
)
return mfo
def getMaximum(obj):
prop = "maximum"
m = obj.get(prop)
mfo = getForOptimizer(obj, prop)
if mfo is None:
return m
else:
if m is not None and mfo > m:
raise ValueError(
f"A maximum ({m}) and a *greater* maximumForOptimizer ({mfo}) was specified in {obj}"
)
return mfo
def getExclusiveMinimum(obj):
prop = "exclusveMinimum"
m = obj.get(prop)
mfo = getForOptimizer(obj, prop)
if mfo is None:
return m
else:
return mfo
def getExclusiveMaximum(obj):
prop = "exclusiveMaximum"
m = obj.get(prop)
mfo = getForOptimizer(obj, prop)
if mfo is None:
return m
else:
return mfo
def isForOptimizer(s: JsonSchema) -> bool:
if isinstance(s, dict):
return s.get(forOptimizerConstant, True)
else:
return True
def makeSingleton_(k: str, schemas: List[JsonSchema]) -> JsonSchema:
if len(schemas) == 0:
return {}
if len(schemas) == 1:
return schemas[0]
else:
return {k: schemas}
def makeAllOf(schemas: List[JsonSchema]) -> JsonSchema:
return makeSingleton_("allOf", schemas)
def makeAnyOf(schemas: List[JsonSchema]) -> JsonSchema:
return makeSingleton_("anyOf", schemas)
def makeOneOf(schemas: List[JsonSchema]) -> JsonSchema:
return makeSingleton_("oneOf", schemas)
def forOptimizer(schema: JsonSchema) -> Optional[JsonSchema]:
if schema is None or schema is True or schema is False:
return schema
if not isForOptimizer(schema):
return None
if "anyOf" in schema:
subs = schema["anyOf"]
sch = [forOptimizer(s) for s in subs]
sch_nnil = [s for s in sch if s is not None]
if sch_nnil:
return makeAnyOf(sch_nnil)
else:
return None
if "allOf" in schema:
subs = schema["allOf"]
sch = [forOptimizer(s) for s in subs]
sch_nnil = [s for s in sch if s is not None]
filtered_sch = sch_nnil
if len(sch_nnil) != len(sch):
# Questionable semantics here (aka HACK!!!!)
# Since we removed something from the schema
# we will also remove negated schemas
filtered_sch = [
s for s in sch_nnil if not isinstance(s, dict) or "not" not in s
]
if filtered_sch:
return makeAllOf(filtered_sch)
else:
return None
if "oneOf" in schema:
subs = schema["oneOf"]
sch = [forOptimizer(s) for s in subs]
sch_nnil = [s for s in sch if s is not None]
if sch_nnil:
return makeOneOf(sch_nnil)
else:
return None
if "not" in schema:
s = forOptimizer(schema["not"])
if s is None:
return None
else:
return {"not": s}
transformedSchema: JsonSchema = {}
for k, v in schema.items():
if k.endswith(forOptimizerConstantSuffix):
base: str = k[: -len(forOptimizerConstantSuffix)]
transformedSchema[base] = v
elif k not in transformedSchema:
transformedSchema[k] = v
schema = transformedSchema
if "type" in schema and schema["type"] == "object" and "properties" in schema:
# required = schema.get("required", None)
props = {}
for k, v in schema["properties"].items():
s = forOptimizer(v)
if s is None:
# if required and k in required:
# if this field is required (and has now been filtered)
# filter the whole object schema
return None
else:
props[k] = s
ret = schema.copy()
ret["properties"] = props
return ret
return schema
def has_operator(schema: JsonSchema) -> bool:
to = schema.get("laleType", None)
if to == "operator":
return True
if "not" in schema:
if has_operator(schema["not"]):
return True
if "anyOf" in schema:
if any(has_operator(s) for s in schema["anyOf"]):
return True
if "allOf" in schema:
if any(has_operator(s) for s in schema["allOf"]):
return True
if "oneOf" in schema:
if any(has_operator(s) for s in schema["oneOf"]):
return True
if "items" in schema:
it = schema["items"]
if isinstance(it, list):
if any(has_operator(s) for s in it):
return True
else:
if has_operator(it):
return True
if "properties" in schema:
props = schema["properties"]
if any(has_operator(s) for s in props.values()):
return True
if "patternProperties" in schema:
pattern_props = schema["patternProperties"]
if any(has_operator(s) for s in pattern_props.values()):
return True
if "additionalProperties" in schema:
add_props = schema["additionalProperties"]
if not isinstance(add_props, bool):
if has_operator(add_props):
return True
if "dependencies" in schema:
depends = schema["dependencies"]
for d in depends.values():
if not isinstance(d, list):
if has_operator(d):
return True
# if we survived all of the checks, then we
return False
def atomize_schema_enumerations(
schema: Union[None, JsonSchema, List[JsonSchema]]
) -> None:
"""Given a schema, converts structured enumeration values (records, arrays)
into schemas where the structured part is specified as a schema, with the
primitive as the enum.
"""
if schema is None:
return
if isinstance(schema, list):
for s in schema:
atomize_schema_enumerations(s)
return
if not isinstance(schema, dict):
return
for key in ["anyOf", "allOf", "oneOf", "items", "additionalProperties", "not"]:
atomize_schema_enumerations(schema.get(key, None))
for key in ["properties", "patternProperties", "dependencies"]:
v = schema.get(key, None)
if v is not None:
atomize_schema_enumerations(list(v.values()))
# now that we have handled all the recursive cases
ev = schema.get("enum", None)
if ev is not None:
simple_evs: List[Any] = []
complex_evs: List[JsonSchema] = []
for e in ev:
if isinstance(e, dict):
required: List[str] = []
props: Dict[str, JsonSchema] = {}
for k, v in e.items():
required.append(k)
vs = {"enum": [v]}
atomize_schema_enumerations(vs)
props[k] = vs
ds = {
"type": "object",
"additionalProperties": False,
"required": list(e.keys()),
"properties": props,
}
complex_evs.append(ds)
elif isinstance(e, (list, tuple)):
is_tuple = isinstance(e, tuple)
items_len = len(e)
items: List[JsonSchema] = []
for v in e:
vs = {"enum": [v]}
atomize_schema_enumerations(vs)
items.append(vs)
ls = {
"type": "array",
"items": items,
"additionalItems": False,
"minItems": items_len,
"maxItems": items_len,
}
if is_tuple:
ls["laleType"] = "tuple"
complex_evs.append(ls)
else:
simple_evs.append(ev)
if complex_evs:
del schema["enum"]
if simple_evs:
complex_evs.append({"enum": simple_evs})
if len(complex_evs) == 1:
# special case, just update in place
schema.update(complex_evs[0])
else:
schema["anyOf"] = complex_evs
def check_operators_schema(
schema: Optional[Union[List[JsonSchema], JsonSchema]], warnings: List[str]
) -> None:
"""Given a schema, collect warnings if there
are any enumeration with all Operator values
that are not marked as `'laleType':'operator'`.
This should be called after simplification.
"""
if schema is None:
return
if isinstance(schema, list):
for s in schema:
check_operators_schema(s, warnings)
return
if not isinstance(schema, dict):
return
for key in ["anyOf", "allOf", "oneOf", "items", "additionalProperties", "not"]:
v = schema.get(key, None)
if v is not None:
check_operators_schema(v, warnings)
for key in ["properties", "patternProperties", "dependencies"]:
v = schema.get(key, None)
if v is not None:
check_operators_schema(list(v.values()), warnings)
if "enum" in schema:
es = schema["enum"]
if es:
to = schema.get("laleType", None)
if to != "operator":
from lale.operators import Operator
if all(isinstance(e, Operator) for e in es):
warnings.append(
f"The enumeration {[e.name() for e in es]} is all lale operators, but the schema fragment {schema} it is part of does not stipulate that it should be 'laleType':'operator'. While legal, this likely indicates either an omission in the schema or a bug in the schema simplifier"
)
| 11,335 | 29.888283 | 300 |
py
|
lale
|
lale-master/lale/operator_wrapper.py
|
# Copyright 2019-2022 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import logging
import sys
from typing import List, Optional, Set
from lale.operators import Operator, clone_op, get_op_from_lale_lib
if sys.version_info < (3, 9):
from typing import Container # raises a mypy error for <3.8
else:
from collections.abc import Container
logger = logging.getLogger(__name__)
def _wrap_operators_in_symtab(
symtab,
exclude_classes: Optional[Container[str]] = None,
wrapper_modules: Optional[List[str]] = None,
) -> None:
for name, impl in symtab.items():
if (
inspect.isclass(impl)
and not issubclass(impl, Operator)
and (hasattr(impl, "predict") or hasattr(impl, "transform"))
):
if exclude_classes is not None:
if name in exclude_classes:
continue
operator = get_op_from_lale_lib(impl, wrapper_modules)
if operator is None:
# symtab[name] = make_operator(impl=impl, name=name)
logger.info(f"Lale:Not wrapping unknown operator:{name}")
else:
symtab[name] = clone_op(operator, name)
if operator.class_name().startswith("lale.lib.autogen"):
logger.info(f"Lale:Wrapped autogen operator:{name}")
else:
logger.info(f"Lale:Wrapped known operator:{name}")
def wrap_imported_operators(
exclude_classes: Optional[Container[str]] = None,
wrapper_modules: Optional[List[str]] = None,
) -> None:
"""Wrap the currently imported operators from the symbol table
to their lale wrappers.
Parameters
----------
exclude_classes : string, optional, default None
List of class names to exclude from wrapping,
alias names if they are used while importing.
wrapper_modules : set of string, optional, default None
Set of Lale modules to use for wrapping operators.
"""
current_frame = inspect.currentframe()
assert (
current_frame is not None
), "Try to use inspect.stack()[1][0] to get the calling frame"
calling_frame = current_frame.f_back
assert (
calling_frame is not None
), "Try to use inspect.stack()[1][0] to get the calling frame"
if wrapper_modules is not None:
wrapper_modules.extend(get_lale_wrapper_modules())
else:
wrapper_modules = list(get_lale_wrapper_modules())
_wrap_operators_in_symtab(
calling_frame.f_globals, exclude_classes, wrapper_modules=wrapper_modules
)
if calling_frame.f_code.co_name == "<module>": # for testing with exec()
_wrap_operators_in_symtab(
calling_frame.f_locals, exclude_classes, wrapper_modules=wrapper_modules
)
_lale_wrapper_modules: Set[str] = set()
def register_lale_wrapper_modules(m: str) -> None:
"""Register a module with lale's import system
so that :meth:`lale.helpers.import_from_sklearn_pipeline` will look for replacement classes in that module.
Example: (in `__init__.py` file for the module):
.. code-block:: python
from lale import register_lale_wrapper_modules
register_lale_wrapper_modules(__name__)
Parameters
----------
m : [str]
The module name
"""
_lale_wrapper_modules.add(m)
def get_lale_wrapper_modules() -> Set[str]:
return _lale_wrapper_modules
for builtin_lale_modules in [
"lale.lib.sklearn",
"lale.lib.autoai_libs",
"lale.lib.xgboost",
"lale.lib.lightgbm",
"lale.lib.snapml",
"autoai_ts_libs.lale",
]:
register_lale_wrapper_modules(builtin_lale_modules)
| 4,203 | 31.589147 | 111 |
py
|
lale
|
lale-master/lale/grammar.py
|
import random
from typing import Any, Dict, List, Optional, cast
from lale.helpers import nest_HPparams
from lale.lib.lale import NoOp
from lale.operators import (
BasePipeline,
IndividualOp,
Operator,
OperatorChoice,
PlannedOperator,
clone_op,
make_choice,
make_pipeline,
make_pipeline_graph,
)
class NonTerminal(Operator):
"""Abstract operator for non-terminal grammar rules."""
def get_params(self, deep: bool = True) -> Dict[str, Any]:
out = {}
out["name"] = self._name
return out
def _with_params(self, try_mutate: bool, **impl_params) -> Operator:
"""
This method updates the parameters of the operator. NonTerminals do not support
in-place mutation
"""
known_keys = set(["name"])
if impl_params:
new_keys = set(impl_params.keys())
if not new_keys.issubset(known_keys):
unknowns = {k: v for k, v in impl_params.items() if k not in known_keys}
raise ValueError(
f"NonTerminal._with_params called with unknown parameters: {unknowns}"
)
assert "name" in impl_params
return NonTerminal(impl_params["name"])
else:
return self
def __init__(self, name):
self._name = name
def _has_same_impl(self, other: Operator):
pass
def is_supervised(self):
return False
def validate_schema(self, X, y=None):
raise NotImplementedError() # TODO
def transform_schema(self, s_X):
raise NotImplementedError() # TODO
def input_schema_fit(self):
raise NotImplementedError() # TODO
def is_classifier(self) -> bool:
return False # TODO
class Grammar(Operator):
"""Base class for Lale grammars."""
_variables: Dict[str, Operator]
def get_params(self, deep: bool = True) -> Dict[str, Any]:
out = {}
out["variables"] = self._variables
if deep:
deep_stuff: Dict[str, Any] = {}
for k, v in self._variables.items():
deep_stuff.update(nest_HPparams(k, v.get_params(deep=deep)))
out.update(deep_stuff)
return out
def _with_params(self, try_mutate: bool, **impl_params) -> Operator:
"""
This method updates the parameters of the operator.
If try_mutate is set, it will attempt to update the operator in place
this may not always be possible
"""
# TODO implement support
# from this point of view, Grammar is just a higher order operator
raise NotImplementedError("setting Grammar parameters is not yet supported")
def __init__(self, variables: Optional[Dict[str, Operator]] = None):
if variables is None:
variables = {}
self._variables = variables
def __getattr__(self, name):
if name.startswith("_"):
return self.__dict__[name]
if name not in self._variables:
self._variables[name] = NonTerminal(name)
return clone_op(self._variables[name])
def __setattr__(self, name, value):
if name.startswith("_"):
self.__dict__[name] = value
else:
self._variables[name] = value
def _has_same_impl(self, other: Operator):
pass
def is_supervised(self):
return False
def validate_schema(self, X, y=None):
raise NotImplementedError() # TODO
def transform_schema(self, s_X):
raise NotImplementedError() # TODO
def input_schema_fit(self):
raise NotImplementedError() # TODO
def is_classifier(self) -> bool:
raise NotImplementedError() # TODO
def _unfold(self, op: Operator, n: int) -> Optional[Operator]:
"""Unroll all possible operators from the grammar `g` starting from non-terminal `op` after `n` derivations.
Parameters
----------
op : Operator
starting rule (e.g., `g.start`)
n : int
number of derivations
Returns
-------
Optional[Operator]
"""
if isinstance(op, BasePipeline):
steps = op.steps_list()
new_maybe_steps: List[Optional[Operator]] = [
self._unfold(sop, n) for sop in op.steps_list()
]
if None not in new_maybe_steps:
new_steps: List[Operator] = cast(List[Operator], new_maybe_steps)
step_map = {steps[i]: new_steps[i] for i in range(len(steps))}
new_edges = [(step_map[s], step_map[d]) for s, d in op.edges()]
return make_pipeline_graph(new_steps, new_edges, True)
else:
return None
if isinstance(op, OperatorChoice):
steps = [s for s in (self._unfold(sop, n) for sop in op.steps_list()) if s]
return make_choice(*steps) if steps else None
if isinstance(op, NonTerminal):
return self._unfold(self._variables[op.name()], n - 1) if n > 0 else None
if isinstance(op, IndividualOp):
return op
assert False, f"Unknown operator {op}"
def unfold(self, n: int) -> PlannedOperator:
"""
Explore this grammar `self.start` and generate all possible choices after `n` derivations.
Parameters
----------
n : int
number of derivations
Returns
-------
PlannedOperator
"""
assert hasattr(self, "start"), "Rule start must be defined"
op = self._unfold(self.start, n)
return make_pipeline(op) if op else NoOp
def _sample(self, op: Operator, n: int) -> Optional[Operator]:
"""
Sample the grammar `g` starting from `g.start`, that is, choose one element at random for each possible choices.
Parameters
----------
op : Operator
starting rule (e.g., `g.start`)
n : int
number of derivations
Returns
-------
Optional[Operator]
"""
if isinstance(op, BasePipeline):
steps = op.steps_list()
new_maybe_steps: List[Optional[Operator]] = [
self._sample(sop, n) for sop in op.steps_list()
]
if None not in new_maybe_steps:
new_steps: List[Operator] = cast(List[Operator], new_maybe_steps)
step_map = {steps[i]: new_steps[i] for i in range(len(steps))}
new_edges = [(step_map[s], step_map[d]) for s, d in op.edges()]
return make_pipeline_graph(new_steps, new_edges, True)
else:
return None
if isinstance(op, OperatorChoice):
# This choice does not need to be cryptographically secure or hard to predict
return self._sample(random.choice(op.steps_list()), n) # nosec
if isinstance(op, NonTerminal):
return self._sample(getattr(self, op.name()), n - 1) if n > 0 else None
if isinstance(op, IndividualOp):
return op
assert False, f"Unknown operator {op}"
def sample(self, n: int) -> PlannedOperator:
"""
Sample the grammar `g` starting from `g.start`, that is, choose one element at random for each possible choices.
Parameters
----------
n : int
number of derivations
Returns
-------
PlannedOperator
"""
assert hasattr(self, "start"), "Rule start must be defined"
op = self._sample(self.start, n)
return make_pipeline(op) if op else NoOp
| 7,654 | 31.713675 | 120 |
py
|
lale
|
lale-master/lale/json_operator.py
|
# Copyright 2019-2023 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import inspect
import keyword
import logging
import re
from typing import Any, Dict, Tuple, cast
import jsonschema
import lale.operators
from lale.helpers import GenSym
logger = logging.getLogger(__name__)
JSON_TYPE = Dict[str, Any]
SCHEMA = {
"$schema": "http://json-schema.org/draft-04/schema#",
"definitions": {
"operator": {
"anyOf": [
{"$ref": "#/definitions/planned_individual_op"},
{"$ref": "#/definitions/trainable_individual_op"},
{"$ref": "#/definitions/trained_individual_op"},
{"$ref": "#/definitions/planned_pipeline"},
{"$ref": "#/definitions/trainable_pipeline"},
{"$ref": "#/definitions/trained_pipeline"},
{"$ref": "#/definitions/operator_choice"},
]
},
"individual_op": {
"type": "object",
"required": ["class", "state", "operator"],
"properties": {
"class": {
"type": "string",
"pattern": "^([A-Za-z_][A-Za-z_0-9]*[.])*[A-Za-z_][A-Za-z_0-9]*$",
},
"state": {"enum": ["metamodel", "planned", "trainable", "trained"]},
"operator": {"type": "string", "pattern": "^[A-Za-z_][A-Za-z_0-9]*$"},
"label": {"type": "string", "pattern": "^[A-Za-z_][A-Za-z_0-9]*$"},
"documentation_url": {"type": "string"},
"hyperparams": {
"anyOf": [
{"enum": [None]},
{
"type": "object",
"patternProperties": {"^[A-Za-z_][A-Za-z_0-9]*$": {}},
},
]
},
"steps": {
"description": "Nested operators in higher-order individual op.",
"type": "object",
"patternProperties": {
"^[a-z][a-z_0-9]*$": {"$ref": "#/definitions/operator"}
},
},
"is_frozen_trainable": {"type": "boolean"},
"is_frozen_trained": {"type": "boolean"},
"coefs": {"enum": [None, "coefs_not_available"]},
"customize_schema": {
"anyOf": [
{"enum": ["not_available"]},
{"type": "object"},
],
},
},
},
"planned_individual_op": {
"allOf": [
{"$ref": "#/definitions/individual_op"},
{"type": "object", "properties": {"state": {"enum": ["planned"]}}},
]
},
"trainable_individual_op": {
"allOf": [
{"$ref": "#/definitions/individual_op"},
{
"type": "object",
"required": ["hyperparams", "is_frozen_trainable"],
"properties": {"state": {"enum": ["trainable"]}},
},
]
},
"trained_individual_op": {
"allOf": [
{"$ref": "#/definitions/individual_op"},
{
"type": "object",
"required": ["hyperparams", "coefs", "is_frozen_trained"],
"properties": {"state": {"enum": ["trained"]}},
},
]
},
"pipeline": {
"type": "object",
"required": ["class", "state", "edges", "steps"],
"properties": {
"class": {
"enum": [
"lale.operators.PlannedPipeline",
"lale.operators.TrainablePipeline",
"lale.operators.TrainedPipeline",
]
},
"state": {"enum": ["planned", "trainable", "trained"]},
"edges": {
"type": "array",
"items": {
"type": "array",
"minItems": 2,
"maxItems": 2,
"items": {"type": "string", "pattern": "^[a-z][a-z_0-9]*$"},
},
},
"steps": {
"type": "object",
"patternProperties": {
"^[a-z][a-z_0-9]*$": {"$ref": "#/definitions/operator"}
},
},
},
},
"planned_pipeline": {
"allOf": [
{"$ref": "#/definitions/pipeline"},
{
"type": "object",
"properties": {
"state": {"enum": ["planned"]},
"class": {"enum": ["lale.operators.PlannedPipeline"]},
},
},
]
},
"trainable_pipeline": {
"allOf": [
{"$ref": "#/definitions/pipeline"},
{
"type": "object",
"properties": {
"state": {"enum": ["trainable"]},
"class": {"enum": ["lale.operators.TrainablePipeline"]},
"steps": {
"type": "object",
"patternProperties": {
"^[a-z][a-z_0-9]*$": {
"type": "object",
"properties": {
"state": {"enum": ["trainable", "trained"]}
},
}
},
},
},
},
]
},
"trained_pipeline": {
"allOf": [
{"$ref": "#/definitions/pipeline"},
{
"type": "object",
"properties": {
"state": {"enum": ["trained"]},
"class": {"enum": ["lale.operators.TrainedPipeline"]},
"steps": {
"type": "object",
"patternProperties": {
"^[a-z][a-z_0-9]*$": {
"type": "object",
"properties": {"state": {"enum": ["trained"]}},
}
},
},
},
},
]
},
"operator_choice": {
"type": "object",
"required": ["class", "state", "operator", "steps"],
"properties": {
"class": {"enum": ["lale.operators.OperatorChoice"]},
"state": {"enum": ["planned"]},
"operator": {"type": "string"},
"steps": {
"type": "object",
"patternProperties": {
"^[a-z][a-z_0-9]*$": {"$ref": "#/definitions/operator"}
},
},
},
},
},
"$ref": "#/definitions/operator",
}
def json_op_kind(jsn: JSON_TYPE) -> str:
if jsn["class"] == "lale.operators.OperatorChoice":
return "OperatorChoice"
if jsn["class"] in [
"lale.operators.PlannedPipeline",
"lale.operators.TrainablePipeline",
"lale.operators.TrainedPipeline",
]:
return "Pipeline"
return "IndividualOp"
def _get_state(op: "lale.operators.Operator") -> str:
if isinstance(op, lale.operators.TrainedOperator):
return "trained"
if isinstance(op, lale.operators.TrainableOperator):
return "trainable"
if isinstance(op, (lale.operators.PlannedOperator, lale.operators.OperatorChoice)):
return "planned"
if isinstance(op, lale.operators.Operator):
return "metamodel"
raise TypeError(f"Expected lale.operators.Operator, got {type(op)}.")
def _get_cls2label(call_depth: int) -> Dict[str, str]:
inspect_stack = inspect.stack()
if call_depth >= len(inspect_stack):
return {}
frame = inspect_stack[call_depth][0]
cls2label: Dict[str, str] = {}
cls2state: Dict[str, str] = {}
all_items: Dict[str, Any] = {**frame.f_locals, **frame.f_globals}
for label, op in all_items.items():
if isinstance(op, lale.operators.IndividualOp):
state = _get_state(op)
cls = op.class_name()
if cls in cls2state:
insert = (
(cls2state[cls] == "trainable" and state == "planned")
or (
cls2state[cls] == "trained"
and state in ["trainable", "planned"]
)
or (cls2state[cls] == state and label[0].isupper())
)
else:
insert = True
if insert:
if not label.islower():
cls2label[cls] = label
cls2state[cls] = state
return cls2label
def _camelCase_to_snake(name):
s1 = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", name)
return re.sub("([a-z0-9])([A-Z])", r"\1_\2", s1).lower()
def _init_gensym(op: "lale.operators.Operator", cls2label: Dict[str, str]) -> GenSym:
label2count: Dict[str, int] = {}
def populate_label2count(op: "lale.operators.Operator"):
if isinstance(op, lale.operators.IndividualOp):
label = cls2label.get(op.class_name(), op.name())
elif isinstance(op, lale.operators.BasePipeline):
for s in op.steps_list():
populate_label2count(s)
label = "pipeline"
elif isinstance(op, lale.operators.OperatorChoice):
for s in op.steps_list():
populate_label2count(s)
label = "choice"
else:
raise ValueError(f"Unexpected argument of type: {type(op)}")
label2count[label] = label2count.get(label, 0) + 1
populate_label2count(op)
non_unique_labels = {ll for ll, c in label2count.items() if c > 1}
snakes = {_camelCase_to_snake(ll) for ll in non_unique_labels}
return GenSym(
{"lale", "make_pipeline", "make_union", "make_choice"}
| set(keyword.kwlist)
| non_unique_labels
| snakes
)
def _hps_to_json_rec(
hps,
cls2label: Dict[str, str],
gensym: GenSym,
steps,
add_custom_default: bool,
) -> Any:
if isinstance(hps, lale.operators.Operator):
step_uid, step_jsn = _op_to_json_rec(hps, cls2label, gensym, add_custom_default)
steps[step_uid] = step_jsn
return {"$ref": f"../steps/{step_uid}"}
elif isinstance(hps, dict):
return {
hp_name: _hps_to_json_rec(
hp_val, cls2label, gensym, steps, add_custom_default
)
for hp_name, hp_val in hps.items()
}
elif isinstance(hps, tuple):
return tuple(
_hps_to_json_rec(hp_val, cls2label, gensym, steps, add_custom_default)
for hp_val in hps
)
elif isinstance(hps, list):
return [
_hps_to_json_rec(hp_val, cls2label, gensym, steps, add_custom_default)
for hp_val in hps
]
else:
return hps
def _get_customize_schema(after, before):
if after == before:
return {}
if after is None or before is None:
return "not_available"
def dict_equal_modulo(d1, d2, mod):
for k in d1.keys():
if k != mod and (k not in d2 or d1[k] != d2[k]):
return False
for k in d2.keys():
if k != mod and k not in d1:
return False
return True
def list_equal_modulo(l1, l2, mod):
if len(l1) != len(l2):
return False
for i, (v1, v2) in enumerate(zip(l1, l2)):
if i != mod and v1 != v2:
return False
return True
if not dict_equal_modulo(after, before, "properties"):
return "not_available"
after = after["properties"]
before = before["properties"]
if not dict_equal_modulo(after, before, "hyperparams"):
return "not_available"
after = after["hyperparams"]["allOf"]
before = before["hyperparams"]["allOf"]
if not list_equal_modulo(after, before, 0):
return "not_available"
after = after[0]
before = before[0]
if not dict_equal_modulo(after, before, "properties"):
return "not_available"
after = after["properties"]
before = before["properties"]
# TODO: only supports customizing the schema for individual hyperparams
hp_diff = {
hp_name: hp_schema
for hp_name, hp_schema in after.items()
if hp_name not in before or hp_schema != before[hp_name]
}
result = {
"properties": {
"hyperparams": {"allOf": [{"type": "object", "properties": hp_diff}]}
}
}
return result
def _top_schemas_to_hparams(top_level_schemas) -> JSON_TYPE:
if not isinstance(top_level_schemas, dict):
return {}
return top_level_schemas.get("properties", {}).get("hyperparams", {})
def _hparams_schemas_to_props(hparams_schemas) -> JSON_TYPE:
if not isinstance(hparams_schemas, dict):
return {}
return hparams_schemas.get("allOf", [{}])[0].get("properties", {})
def _top_schemas_to_hp_props(top_level_schemas) -> JSON_TYPE:
hparams = _top_schemas_to_hparams(top_level_schemas)
props = _hparams_schemas_to_props(hparams)
return props
def _op_to_json_rec(
op: "lale.operators.Operator",
cls2label: Dict[str, str],
gensym: GenSym,
add_custom_default: bool,
) -> Tuple[str, JSON_TYPE]:
jsn: JSON_TYPE = {}
jsn["class"] = op.class_name()
jsn["state"] = _get_state(op)
if isinstance(op, lale.operators.IndividualOp):
jsn["operator"] = op.name()
jsn["label"] = cls2label.get(op.class_name(), op.name())
uid = gensym(_camelCase_to_snake(jsn["label"]))
documentation_url = op.documentation_url()
if documentation_url is not None:
jsn["documentation_url"] = documentation_url
if isinstance(op, lale.operators.TrainableIndividualOp):
if hasattr(op._impl, "viz_label"):
jsn["viz_label"] = op._impl.viz_label()
hyperparams = op.reduced_hyperparams()
if hyperparams is None:
jsn["hyperparams"] = {} if hasattr(op._impl, "fit") else None
else:
hp_schema = _hparams_schemas_to_props(op.hyperparam_schema())
hyperparams = {
k: v
for k, v in hyperparams.items()
if hp_schema.get(k, {}).get("transient", False) is not True
}
for k, s in hp_schema.items():
if s.get("transient", False) == "alwaysPrint":
if k not in hyperparams and "default" in s:
hyperparams[k] = s["default"]
steps: Dict[str, JSON_TYPE] = {}
jsn["hyperparams"] = _hps_to_json_rec(
hyperparams, cls2label, gensym, steps, add_custom_default
)
if len(steps) > 0:
jsn["steps"] = steps
jsn["is_frozen_trainable"] = op.is_frozen_trainable()
if isinstance(op, lale.operators.TrainedIndividualOp):
if hasattr(op._impl, "fit"):
jsn["coefs"] = "coefs_not_available"
else:
jsn["coefs"] = None
jsn["is_frozen_trained"] = op.is_frozen_trained()
orig_schemas = lale.operators.get_lib_schemas(op.impl_class)
if op._schemas is not orig_schemas:
jsn["customize_schema"] = _get_customize_schema(op._schemas, orig_schemas)
if add_custom_default and isinstance(
jsn.get("customize_schema", None), dict
):
if isinstance(jsn.get("hyperparams", None), dict):
assert jsn["hyperparams"] is not None # to help pyright
orig = _top_schemas_to_hp_props(orig_schemas)
cust = _top_schemas_to_hp_props(jsn["customize_schema"])
for hp_name, hp_schema in cust.items():
if "default" in hp_schema:
if hp_name not in jsn["hyperparams"]:
cust_default = hp_schema["default"]
if hp_name in orig and "default" in orig[hp_name]:
orig_default = orig[hp_name]["default"]
if cust_default != orig_default:
jsn["hyperparams"][hp_name] = cust_default
else:
jsn["hyperparams"][hp_name] = cust_default
elif isinstance(op, lale.operators.BasePipeline):
uid = gensym("pipeline")
child2uid: Dict[lale.operators.Operator, str] = {}
child2jsn: Dict[lale.operators.Operator, JSON_TYPE] = {}
for child in op.steps_list():
child_uid, child_jsn = _op_to_json_rec(
child, cls2label, gensym, add_custom_default
)
child2uid[child] = child_uid
child2jsn[child] = child_jsn
jsn["edges"] = [[child2uid[x], child2uid[y]] for x, y in op.edges()]
jsn["steps"] = {child2uid[z]: child2jsn[z] for z in op.steps_list()}
elif isinstance(op, lale.operators.OperatorChoice):
jsn["operator"] = "OperatorChoice"
uid = gensym("choice")
jsn["state"] = "planned"
jsn["steps"] = {}
for step in op.steps_list():
child_uid, child_jsn = _op_to_json_rec(
step, cls2label, gensym, add_custom_default
)
jsn["steps"][child_uid] = child_jsn
else:
raise ValueError(f"Unexpected argument of type: {type(op)}")
return uid, jsn
def to_json(
op: "lale.operators.Operator",
call_depth: int = 1,
add_custom_default: bool = False,
) -> JSON_TYPE:
from lale.settings import disable_hyperparams_schema_validation
cls2label = _get_cls2label(call_depth + 1)
gensym = _init_gensym(op, cls2label)
_uid, jsn = _op_to_json_rec(op, cls2label, gensym, add_custom_default)
if not disable_hyperparams_schema_validation:
jsonschema.validate(jsn, SCHEMA, jsonschema.Draft4Validator)
return jsn
def _hps_from_json_rec(jsn: Any, steps: JSON_TYPE) -> Any:
if isinstance(jsn, dict):
if "$ref" in jsn:
step_uid = jsn["$ref"].split("/")[-1]
step_jsn = steps[step_uid]
return _op_from_json_rec(step_jsn)
else:
return {k: _hps_from_json_rec(v, steps) for k, v in jsn.items()}
elif isinstance(jsn, tuple):
return tuple(_hps_from_json_rec(v, steps) for v in jsn)
elif isinstance(jsn, list):
return [_hps_from_json_rec(v, steps) for v in jsn]
else:
return jsn
def _op_from_json_rec(jsn: JSON_TYPE) -> "lale.operators.Operator":
kind = json_op_kind(jsn)
if kind == "Pipeline":
steps_dict = {uid: _op_from_json_rec(jsn["steps"][uid]) for uid in jsn["steps"]}
steps = [steps_dict[i] for i in steps_dict]
edges = [(steps_dict[x], steps_dict[y]) for (x, y) in jsn["edges"]]
return lale.operators.make_pipeline_graph(steps, edges)
elif kind == "OperatorChoice":
steps = [_op_from_json_rec(s) for s in jsn["steps"].values()]
name = jsn["operator"]
return lale.operators.OperatorChoice(steps, name)
else:
assert kind == "IndividualOp"
full_class_name = jsn["class"]
last_period = full_class_name.rfind(".")
module = importlib.import_module(full_class_name[:last_period])
impl = getattr(module, full_class_name[last_period + 1 :])
schemas = lale.operators.get_lib_schemas(impl)
name = jsn["operator"]
result = lale.operators.make_operator(impl, schemas, name)
if jsn.get("customize_schema", {}) != {}:
new_hps = _top_schemas_to_hp_props(jsn["customize_schema"])
result = result.customize_schema(**new_hps)
if jsn["state"] in ["trainable", "trained"]:
if _get_state(result) == "planned":
hps = jsn["hyperparams"]
if hps is None:
result = result()
else:
hps = _hps_from_json_rec(hps, jsn.get("steps", {}))
result = result(**hps)
trnbl = cast(lale.operators.TrainableIndividualOp, result)
if jsn["is_frozen_trainable"] and not trnbl.is_frozen_trainable():
trnbl = trnbl.freeze_trainable()
assert jsn["is_frozen_trainable"] == trnbl.is_frozen_trainable()
result = trnbl
if jsn["state"] == "trained":
if jsn["coefs"] == "coefs_not_available":
logger.warning(
f"Since the JSON representation of trained operator {name} lacks coefficients, from_json returns a trainable operator instead."
)
else:
assert jsn["coefs"] is None, jsn["coefs"]
assert (
_get_state(result) == jsn["state"]
or jsn["state"] == "trained"
and jsn["coefs"] == "coefs_not_available"
)
if "documentation_url" in jsn:
assert result.documentation_url() == jsn["documentation_url"]
return result
assert False, f"unexpected JSON {jsn}"
def from_json(jsn: JSON_TYPE) -> "lale.operators.Operator":
jsonschema.validate(jsn, SCHEMA, jsonschema.Draft4Validator)
result = _op_from_json_rec(jsn)
return result
| 22,625 | 37.479592 | 147 |
py
|
lale
|
lale-master/lale/expressions.py
|
# Copyright 2020-2022 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ast # see also https://greentreesnakes.readthedocs.io/
import pprint
import typing
from copy import deepcopy
from io import StringIO
from typing import Any, Dict, Optional, Union, overload
import astunparse
from typing_extensions import Literal
AstLits = (ast.Num, ast.Str, ast.List, ast.Tuple, ast.Set, ast.Dict, ast.Constant)
AstLit = Union[ast.Num, ast.Str, ast.List, ast.Tuple, ast.Set, ast.Dict, ast.Constant]
AstExprs = (
*AstLits,
ast.Name,
ast.Expr,
ast.UnaryOp,
ast.BinOp,
ast.BoolOp,
ast.Compare,
ast.Call,
ast.Attribute,
ast.Subscript,
)
AstExpr = Union[
AstLit,
ast.Name,
ast.Expr,
ast.UnaryOp,
ast.BinOp,
ast.BoolOp,
ast.Compare,
ast.Call,
ast.Attribute,
ast.Subscript,
]
# !! WORKAROUND !!
# There is a bug with astunparse and Python 3.8.
# https://github.com/simonpercivall/astunparse/issues/43
# Until it is fixed (which may be never), here is a workaround,
# based on the workaround found in https://github.com/juanlao7/codeclose
class FixUnparser(astunparse.Unparser):
def _Constant(self, t):
if not hasattr(t, "kind"):
setattr(t, "kind", None)
super()._Constant(t)
# !! WORKAROUND !!
# This method should be called instead of astunparse.unparse
def fixedUnparse(tree):
v = StringIO()
FixUnparser(tree, file=v)
return v.getvalue()
class Expr:
_expr: AstExpr
@property
def expr(self):
return self._expr
def __init__(self, expr: AstExpr, istrue=None):
# _istrue variable is used to check the boolean nature of
# '==' and '!=' operator's results.
self._expr = expr
self._istrue = istrue
def __bool__(self) -> bool:
if self._istrue is not None:
return self._istrue
raise TypeError(
f"Cannot convert expression e1=`{str(self)}` to bool."
"Instead of `e1 and e2`, try writing `[e1, e2]`."
)
def __deepcopy__(self, memo):
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
setattr(result, k, deepcopy(v, memo))
return result
# the type: ignore statements are needed because the type of object.__eq__
# in typeshed is overly restrictive (to catch common errors)
@overload # type: ignore
def __eq__(self, other: Union["Expr", str, int, float, bool]) -> "Expr":
...
@overload # type: ignore
def __eq__(self, other: None) -> Literal[False]:
...
def __eq__(self, other: Union["Expr", str, int, float, bool, None]):
if isinstance(other, Expr):
comp = ast.Compare(
left=self._expr, ops=[ast.Eq()], comparators=[other._expr]
)
return Expr(comp, istrue=self is other)
elif other is not None:
comp = ast.Compare(
left=self._expr, ops=[ast.Eq()], comparators=[ast.Constant(value=other)]
)
return Expr(comp, istrue=False)
else:
return False
@overload
def __ge__(self, other: Union["Expr", str, int, float, bool]) -> "Expr":
...
@overload
def __ge__(self, other: None) -> Literal[False]:
...
def __ge__(self, other):
if isinstance(other, Expr):
comp = ast.Compare(
left=self._expr, ops=[ast.GtE()], comparators=[other._expr]
)
return Expr(comp)
elif other is not None:
comp = ast.Compare(
left=self._expr,
ops=[ast.GtE()],
comparators=[ast.Constant(value=other)],
)
return Expr(comp)
else:
return False
def __getattr__(self, name: str) -> "Expr":
attr = ast.Attribute(value=self._expr, attr=name)
return Expr(attr)
def __getitem__(self, key: Union[int, str, slice]) -> "Expr":
key_ast: Union[ast.Index, ast.Slice]
if isinstance(key, int):
key_ast = ast.Index(ast.Num(n=key))
elif isinstance(key, str):
key_ast = ast.Index(ast.Str(s=key))
elif isinstance(key, slice):
key_ast = ast.Slice(key.start, key.stop, key.step)
else:
raise TypeError(f"expected int, str, or slice, got {type(key)}")
subscript = ast.Subscript(value=self._expr, slice=key_ast)
return Expr(subscript)
@overload
def __gt__(self, other: Union["Expr", str, int, float, bool]) -> "Expr":
...
@overload
def __gt__(self, other: None) -> Literal[False]:
...
def __gt__(self, other):
if isinstance(other, Expr):
comp = ast.Compare(
left=self._expr, ops=[ast.Gt()], comparators=[other._expr]
)
return Expr(comp)
elif other is not None:
comp = ast.Compare(
left=self._expr, ops=[ast.Gt()], comparators=[ast.Constant(value=other)]
)
return Expr(comp)
else:
return False
@overload
def __le__(self, other: Union["Expr", str, int, float, bool]) -> "Expr":
...
@overload
def __le__(self, other: None) -> Literal[False]:
...
def __le__(self, other):
if isinstance(other, Expr):
comp = ast.Compare(
left=self._expr, ops=[ast.LtE()], comparators=[other._expr]
)
return Expr(comp)
elif other is not None:
comp = ast.Compare(
left=self._expr,
ops=[ast.LtE()],
comparators=[ast.Constant(value=other)],
)
return Expr(comp)
else:
return False
@overload
def __lt__(self, other: Union["Expr", str, int, float, bool]) -> "Expr":
...
@overload
def __lt__(self, other: None) -> Literal[False]:
...
def __lt__(self, other):
if isinstance(other, Expr):
comp = ast.Compare(
left=self._expr, ops=[ast.Lt()], comparators=[other._expr]
)
return Expr(comp)
elif other is not None:
comp = ast.Compare(
left=self._expr, ops=[ast.Lt()], comparators=[ast.Constant(value=other)]
)
return Expr(comp)
else:
return False
@overload # type: ignore
def __ne__(self, other: Union["Expr", str, int, float, bool]) -> "Expr":
...
@overload # type: ignore
def __ne__(self, other: None) -> Literal[False]:
...
def __ne__(self, other):
if isinstance(other, Expr):
comp = ast.Compare(
left=self._expr, ops=[ast.NotEq()], comparators=[other._expr]
)
return Expr(comp, istrue=self is other)
elif other is not None:
comp = ast.Compare(
left=self._expr,
ops=[ast.NotEq()],
comparators=[ast.Constant(value=other)],
)
return Expr(comp, istrue=False)
else:
return False
def __str__(self) -> str:
result = fixedUnparse(self._expr).strip()
if isinstance(self._expr, (ast.UnaryOp, ast.BinOp, ast.Compare, ast.BoolOp)):
if result.startswith("(") and result.endswith(")"):
result = result[1:-1]
return result
@overload
def __add__(self, other: Union["Expr", str, int, float, bool]) -> "Expr":
...
@overload
def __add__(self, other: None) -> Literal[False]:
...
def __add__(self, other) -> Union["Expr", Literal[False]]:
return _make_binop(ast.Add(), self._expr, other)
@overload
def __sub__(self, other: Union["Expr", str, int, float, bool]) -> "Expr":
...
@overload
def __sub__(self, other: None) -> Literal[False]:
...
def __sub__(self, other) -> Union["Expr", Literal[False]]:
return _make_binop(ast.Sub(), self._expr, other)
@overload
def __mul__(self, other: Union["Expr", str, int, float, bool]) -> "Expr":
...
@overload
def __mul__(self, other: None) -> Literal[False]:
...
def __mul__(self, other) -> Union["Expr", Literal[False]]:
return _make_binop(ast.Mult(), self._expr, other)
@overload
def __truediv__(self, other: Union["Expr", str, int, float, bool]) -> "Expr":
...
@overload
def __truediv__(self, other: None) -> Literal[False]:
...
def __truediv__(self, other) -> Union["Expr", Literal[False]]:
return _make_binop(ast.Div(), self._expr, other)
@overload
def __floordiv__(self, other: Union["Expr", str, int, float, bool]) -> "Expr":
...
@overload
def __floordiv__(self, other: None) -> Literal[False]:
...
def __floordiv__(self, other) -> Union["Expr", Literal[False]]:
return _make_binop(ast.FloorDiv(), self._expr, other)
@overload
def __mod__(self, other: Union["Expr", str, int, float, bool]) -> "Expr":
...
@overload
def __mod__(self, other: None) -> Literal[False]:
...
def __mod__(self, other) -> Union["Expr", Literal[False]]:
return _make_binop(ast.Mod(), self._expr, other)
@overload
def __pow__(self, other: Union["Expr", str, int, float, bool]) -> "Expr":
...
@overload
def __pow__(self, other: None) -> Literal[False]:
...
def __pow__(self, other) -> Union["Expr", Literal[False]]:
return _make_binop(ast.Pow(), self._expr, other)
@overload
def __and__(self, other: Union["Expr", str, int, float, bool]) -> "Expr":
...
@overload
def __and__(self, other: None) -> Literal[False]:
...
def __and__(self, other) -> Union["Expr", Literal[False]]:
return _make_binop(ast.BitAnd(), self._expr, other)
@overload
def __or__(self, other: Union["Expr", str, int, float, bool]) -> "Expr":
...
@overload
def __or__(self, other: None) -> Literal[False]:
...
def __or__(self, other) -> Union["Expr", Literal[False]]:
return _make_binop(ast.BitOr(), self._expr, other)
@overload
def _make_binop(op, left: Any, other: Union[Expr, str, int, float, bool]) -> Expr:
...
@overload
def _make_binop(op, left: Any, other: None) -> Literal[False]:
...
def _make_binop(
op, left: Any, other: Union[Expr, str, int, float, bool, None]
) -> Union["Expr", Literal[False]]:
if isinstance(other, Expr):
e = ast.BinOp(left=left, op=op, right=other.expr)
return Expr(e)
elif other is not None:
e = ast.BinOp(left=left, op=op, right=ast.Constant(value=other))
return Expr(e)
else:
return False
def _make_ast_expr(arg: Union[None, Expr, int, float, str, AstExpr]) -> AstExpr:
if arg is None:
return ast.Constant(value=None)
elif isinstance(arg, Expr):
return arg.expr
elif isinstance(arg, (int, float)):
return ast.Num(n=arg)
elif isinstance(arg, str):
return ast.Str(s=arg)
else:
assert isinstance(arg, AstExprs), type(arg)
return arg
def _make_call_expr(
name: str, *args: Union[Expr, AstExpr, int, float, bool, str, None]
) -> Expr:
func_ast = ast.Name(id=name)
args_asts = [_make_ast_expr(arg) for arg in args]
call_ast = ast.Call(func=func_ast, args=args_asts, keywords=[])
return Expr(call_ast)
def string_indexer(subject: Expr) -> Expr:
return _make_call_expr("string_indexer", subject)
def collect_set(group: Expr) -> Expr:
return _make_call_expr("collect_set", group)
def count(group: Expr) -> Expr:
return _make_call_expr("count", group)
def day_of_month(subject: Expr, fmt: Optional[str] = None) -> Expr:
if fmt is None:
return _make_call_expr("day_of_month", subject)
return _make_call_expr("day_of_month", subject, fmt)
def day_of_week(subject: Expr, fmt: Optional[str] = None) -> Expr:
if fmt is None:
return _make_call_expr("day_of_week", subject)
return _make_call_expr("day_of_week", subject, fmt)
def day_of_year(subject: Expr, fmt: Optional[str] = None) -> Expr:
if fmt is None:
return _make_call_expr("day_of_year", subject)
return _make_call_expr("day_of_year", subject, fmt)
def distinct_count(group: Expr) -> Expr:
return _make_call_expr("distinct_count", group)
def hour(subject: Expr, fmt: Optional[str] = None) -> Expr:
if fmt is None:
return _make_call_expr("hour", subject)
return _make_call_expr("hour", subject, fmt)
def item(group: Expr, value: Union[int, str]) -> Expr:
return _make_call_expr("item", group, value)
def max(group: Expr) -> Expr: # pylint:disable=redefined-builtin
return _make_call_expr("max", group)
def max_gap_to_cutoff(group: Expr, cutoff: Expr) -> Expr:
return _make_call_expr("max_gap_to_cutoff", group, cutoff)
def mean(group: Expr) -> Expr:
return _make_call_expr("mean", group)
def min(group: Expr) -> Expr: # pylint:disable=redefined-builtin
return _make_call_expr("min", group)
def minute(subject: Expr, fmt: Optional[str] = None) -> Expr:
if fmt is None:
return _make_call_expr("minute", subject)
return _make_call_expr("minute", subject, fmt)
def month(subject: Expr, fmt: Optional[str] = None) -> Expr:
if fmt is None:
return _make_call_expr("month", subject)
return _make_call_expr("month", subject, fmt)
def normalized_count(group: Expr) -> Expr:
return _make_call_expr("normalized_count", group)
def normalized_sum(group: Expr) -> Expr:
return _make_call_expr("normalized_sum", group)
def recent(series: Expr, age: int) -> Expr:
return _make_call_expr("recent", series, age)
def recent_gap_to_cutoff(series: Expr, cutoff: Expr, age: int) -> Expr:
return _make_call_expr("recent_gap_to_cutoff", series, cutoff, age)
def replace(
subject: Expr,
old2new: Dict[Any, Any],
handle_unknown: str = "identity",
unknown_value=None,
) -> Expr:
old2new_str = pprint.pformat(old2new)
module_ast = ast.parse(old2new_str)
old2new_ast = typing.cast(ast.Expr, module_ast.body[0])
assert handle_unknown in ["identity", "use_encoded_value"]
return _make_call_expr(
"replace",
subject,
old2new_ast,
handle_unknown,
unknown_value,
)
def ite(
cond: Expr,
v1: Union[Expr, int, float, bool, str],
v2: Union[Expr, int, float, bool, str],
) -> Expr:
if not isinstance(v1, Expr):
v1 = Expr(ast.Constant(value=v1))
if not isinstance(v2, Expr):
v2 = Expr(ast.Constant(value=v2))
return _make_call_expr("ite", cond, v1, v2)
def identity(subject: Expr) -> Expr:
return _make_call_expr("identity", subject)
def astype(dtype, subject: Expr) -> Expr:
return _make_call_expr("astype", dtype, subject)
def hash(hash_method: str, subject: Expr) -> Expr: # pylint:disable=redefined-builtin
return _make_call_expr("hash", hash_method, subject)
def hash_mod(hash_method: str, subject: Expr, n: Expr) -> Expr:
return _make_call_expr("hash_mod", hash_method, subject, n)
def sum(group: Expr) -> Expr: # pylint:disable=redefined-builtin
return _make_call_expr("sum", group)
def trend(series: Expr) -> Expr:
return _make_call_expr("trend", series)
def variance(group: Expr) -> Expr:
return _make_call_expr("variance", group)
def window_max(series: Expr, size: int) -> Expr:
return _make_call_expr("window_max", series, size)
def window_max_trend(series: Expr, size: int) -> Expr:
return _make_call_expr("window_max_trend", series, size)
def window_mean(series: Expr, size: int) -> Expr:
return _make_call_expr("window_mean", series, size)
def window_mean_trend(series: Expr, size: int) -> Expr:
return _make_call_expr("window_mean_trend", series, size)
def window_min(series: Expr, size: int) -> Expr:
return _make_call_expr("window_min", series, size)
def window_min_trend(series: Expr, size: int) -> Expr:
return _make_call_expr("window_min_trend", series, size)
def window_variance(series: Expr, size: int) -> Expr:
return _make_call_expr("window_variance", series, size)
def window_variance_trend(series: Expr, size: int) -> Expr:
return _make_call_expr("window_variance_trend", series, size)
def first(group: Expr) -> Expr:
return _make_call_expr("first", group)
def isnan(column: Expr) -> Expr:
return _make_call_expr("isnan", column)
def isnotnan(column: Expr) -> Expr:
return _make_call_expr("isnotnan", column)
def isnull(column: Expr) -> Expr:
return _make_call_expr("isnull", column)
def isnotnull(column: Expr) -> Expr:
return _make_call_expr("isnotnull", column)
def asc(column: Union[Expr, str]) -> Expr:
return _make_call_expr("asc", column)
def desc(column: Union[Expr, str]) -> Expr:
return _make_call_expr("desc", column)
def median(group: Expr) -> Expr:
return _make_call_expr("median", group)
def mode(group: Expr) -> Expr:
return _make_call_expr("mode", group)
it = Expr(ast.Name(id="it"))
def _it_column(expr):
if isinstance(expr, ast.Attribute):
if _is_ast_name_it(expr.value):
return expr.attr
else:
raise ValueError(
f"Illegal {fixedUnparse(expr)}. Only the access to `it` is supported"
)
elif isinstance(expr, ast.Subscript):
if isinstance(expr.slice, ast.Constant) or (
_is_ast_name_it(expr.value) and isinstance(expr.slice, ast.Index)
):
v = getattr(expr.slice, "value", None)
if isinstance(expr.slice, ast.Constant):
return v
elif isinstance(v, ast.Constant):
return v.value
elif isinstance(v, ast.Str):
return v.s
else:
raise ValueError(
f"Illegal {fixedUnparse(expr)}. Only the access to `it` is supported"
)
else:
raise ValueError(
f"Illegal {fixedUnparse(expr)}. Only the access to `it` is supported"
)
else:
raise ValueError(
f"Illegal {fixedUnparse(expr)}. Only the access to `it` is supported"
)
def _is_ast_name_it(expr):
return isinstance(expr, ast.Name) and expr.id == "it"
| 19,054 | 27.611111 | 89 |
py
|
lale
|
lale-master/lale/schema_ranges.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Optional, Union
from .schema_utils import JsonSchema
class SchemaRange:
def __init__(
self,
minimum=None,
maximum=None,
exclusive_minimum=False,
exclusive_maximum=False,
is_integer: bool = False,
distribution: Optional[str] = None,
) -> None:
self.minimum = minimum
self.maximum = maximum
self.exclusive_minimum = exclusive_minimum
self.exclusive_maximum = exclusive_maximum
self.is_integer = is_integer
self.distribution = distribution
def __str__(self):
res = ""
if self.minimum is None:
res += "(infty"
else:
if self.exclusive_minimum:
res += "("
else:
res += "["
res += str(self.minimum)
res += ","
if self.maximum is None:
res += "infty"
if self.distribution == "loguniform":
res += "//log"
res += ")"
else:
res += str(self.maximum)
if self.distribution == "loguniform":
res += "//log"
if self.exclusive_maximum:
res += ")"
else:
res += "]"
return res
@classmethod
def point(cls, pt: Union[int, float]):
return SchemaRange(
minimum=pt,
maximum=pt,
exclusive_minimum=False,
exclusive_maximum=False,
is_integer=isinstance(pt, int),
)
@classmethod
def fromSchema(cls, schema: Any) -> "SchemaRange":
return SchemaRange(
minimum=schema.get("minimum", None),
maximum=schema.get("maximum", None),
exclusive_minimum=schema.get("exclusiveMinimum", False),
exclusive_maximum=schema.get("exclusiveMaximum", False),
is_integer=schema.get("type", "number") == "integer",
distribution=schema.get("distribution", None),
)
@classmethod
def fromSchemaForOptimizer(cls, schema: Any) -> "SchemaRange":
s = cls.fromSchema(schema)
minimum = schema.get("minimumForOptimizer", None)
maximum = schema.get("maximumForOptimizer", None)
exclusive_minimum = schema.get("exclusiveMinimumForOptimizer", False)
exclusive_maximum = schema.get("exclusiveMaximumForOptimizer", False)
is_integer = (
schema.get("type", "numberForOptimizer") == "integer" or s.is_integer
)
if minimum is None:
minimum = s.minimum
if s.minimum is not None:
exclusive_minimum = exclusive_minimum or s.exclusive_minimum
elif s.minimum is not None and minimum == s.minimum:
exclusive_minimum = exclusive_minimum or s.exclusive_minimum
if maximum is None:
maximum = s.maximum
if s.maximum is not None:
exclusive_maximum = exclusive_maximum or s.exclusive_maximum
elif s.maximum is not None and minimum == s.minimum:
exclusive_maximum = exclusive_maximum or s.exclusive_maximum
distribution = s.distribution
return SchemaRange(
minimum=minimum,
maximum=maximum,
exclusive_minimum=exclusive_minimum,
exclusive_maximum=exclusive_maximum,
is_integer=is_integer,
distribution=distribution,
)
@classmethod
def to_schema_with_optimizer(
cls, actual_range: "SchemaRange", optimizer_range: "SchemaRange"
) -> JsonSchema:
number_schema: JsonSchema = {}
if actual_range.is_integer:
number_schema["type"] = "integer"
else:
number_schema["type"] = "number"
if optimizer_range.is_integer:
number_schema["laleType"] = "integer"
if actual_range.minimum is not None:
number_schema["minimum"] = actual_range.minimum
if actual_range.exclusive_minimum:
number_schema["exclusiveMinimum"] = True
if optimizer_range.minimum is not None:
if (
actual_range.minimum is None
or actual_range.minimum < optimizer_range.minimum
or (
actual_range.minimum == optimizer_range.minimum
and optimizer_range.exclusive_minimum
and not actual_range.exclusive_minimum
)
):
number_schema["minimumForOptimizer"] = optimizer_range.minimum
if optimizer_range.exclusive_minimum:
number_schema["exclusiveMinimumForOptimizer"] = True
if actual_range.maximum is not None:
number_schema["maximum"] = actual_range.maximum
if actual_range.exclusive_maximum:
number_schema["exclusiveMaximum"] = True
if optimizer_range.maximum is not None:
if (
actual_range.maximum is None
or actual_range.maximum > optimizer_range.maximum
or (
actual_range.maximum == optimizer_range.maximum
and optimizer_range.exclusive_maximum
and not actual_range.exclusive_maximum
)
):
number_schema["maximumForOptimizer"] = optimizer_range.maximum
if optimizer_range.exclusive_maximum:
number_schema["exclusiveMaximumForOptimizer"] = True
if optimizer_range.distribution is not None:
number_schema["distribution"] = optimizer_range.distribution
return number_schema
def __iand__(self, other: "SchemaRange"):
self.is_integer = self.is_integer or other.is_integer
if other.minimum is not None:
if self.minimum is None:
self.minimum = other.minimum
self.exclusive_minimum = other.exclusive_minimum
elif self.minimum == other.minimum:
self.exclusive_minimum = (
self.exclusive_minimum or other.exclusive_minimum
)
elif self.minimum < other.minimum:
self.minimum = other.minimum
self.exclusive_minimum = other.exclusive_minimum
if other.maximum is not None:
if self.maximum is None:
self.maximum = other.maximum
self.exclusive_maximum = other.exclusive_maximum
elif self.maximum == other.maximum:
self.exclusive_maximum = (
self.exclusive_maximum or other.exclusive_maximum
)
elif self.maximum > other.maximum:
self.maximum = other.maximum
self.exclusive_maximum = other.exclusive_maximum
if self.distribution is None:
self.distribution = other.distribution
return self
def diff(self, other: "SchemaRange") -> Optional[bool]:
"""Returns None if the resulting region is impossible.
Returns True if the other constraint was completely subtracted from
self. If it could not be, then it returns False (and the caller should probably
keep the other constraint as a negated constraint)
"""
# for now, just handle simple exclusions
if not other.is_integer or other.is_integer == self.is_integer:
# the exclusion is less than the actual range
if (
self.minimum is not None
and other.maximum is not None
and (
other.maximum < self.minimum
or (
other.maximum == self.minimum
and (self.exclusive_minimum or other.exclusive_maximum)
)
)
):
return True
# the exclusion is greater than the actual range
if (
self.maximum is not None
and other.minimum is not None
and (
other.minimum > self.maximum
or (
other.minimum == self.maximum
and (self.exclusive_maximum or other.exclusive_minimum)
)
)
):
return True
if other.minimum is None:
if self.minimum is None:
# the exclusion and the range have no minimum
if other.maximum is None:
# nothing is possible
return None
else:
self.minimum = other.maximum
self.exclusive_minimum = not other.exclusive_maximum
return True
# else might create a hole, so avoid this case
else:
# ASSERT: other.minimum is not None
if (
self.minimum is None
or self.minimum < other.minimum
or (
self.minimum == other.minimum
and (not self.exclusive_minimum or other.exclusive_minimum)
)
):
if (
other.maximum is None
or self.maximum is not None
and (
other.maximum > self.maximum
or (
(
other.maximum == self.maximum
and (
not other.exclusive_maximum
or self.exclusive_maximum
)
)
)
)
):
self.maximum = other.minimum
self.exclusive_maximum = not other.exclusive_minimum
return True
# else might create a hole, so avoid this case
else:
# self.minimum >= other.minimum
if (
other.maximum is None
or self.maximum < other.maximum
or (
self.maximum == other.maximum
and (not other.exclusive_maximum or self.exclusive_maximum)
)
):
# nothing is possible
return None
else:
self.minimum = other.maximum
self.exclusive_minimum = not other.exclusive_maximum
return True
if other.maximum is None:
if self.maximum is None:
# the exclusion and the range have no maximum
if other.minimum is None:
# nothing is possible
return None
else:
self.maximum = other.minimum
self.exclusive_maximum = not other.exclusive_minimum
return True
# else might create a hole, so avoid this case
else:
# ASSERT: other.maximum is not None
if (
self.maximum is None
or self.maximum > other.maximum
or (
self.maximum == other.maximum
and (not self.exclusive_maximum or other.exclusive_maximum)
)
):
if (
other.minimum is None
or self.minimum is not None
and (
other.minimum < self.minimum
or (
(
other.minimum == self.minimum
and (
not other.exclusive_minimum
or self.exclusive_minimum
)
)
)
)
):
self.minimum = other.maximum
self.exclusive_minimum = not other.exclusive_maximum
return True
# else might create a hole, so avoid this case
else:
# self.maximum >= other.maximum
if (
other.minimum is None
or self.minimum > other.minimum
or (
self.minimum == other.minimum
and (not other.exclusive_minimum or self.exclusive_minimum)
)
):
# nothing is possible
return None
else:
self.maximum = other.minimum
self.exclusive_maximum = not other.exclusive_minimum
return True
return False
def remove_point(self, other: Union[int, float]) -> Optional[bool]:
"""Returns None if the resulting region is impossible.
Returns True if the other constraint was completely subtracted from
self. If it could not be, then it returns False (and the caller should probably
keep the other constraint as a negated constraint)
"""
return self.diff(SchemaRange.point(other))
@classmethod
def is_empty2(cls, lower: "SchemaRange", upper: "SchemaRange") -> bool:
"""Determines if the range given by taking lower bounds from lower and upper bound from upper is empty (contains nothing)
is_integer is assumed to be their disjunction
"""
is_integer = lower.is_integer or upper.is_integer
if lower.minimum is not None and upper.maximum is not None:
if lower.minimum > upper.maximum:
return True
if lower.minimum == upper.maximum and (
lower.exclusive_minimum or upper.exclusive_maximum
):
return True
if (
is_integer
and lower.exclusive_minimum
and upper.exclusive_maximum
and lower.minimum + 1 == upper.maximum
):
return True
return False
def is_empty(self) -> bool:
"""Determines if the range is empty (contains nothing)"""
return SchemaRange.is_empty2(self, self)
| 15,509 | 38.465649 | 129 |
py
|
lale
|
lale-master/lale/__init__.py
|
# Copyright 2019-2023 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
__version__ = "0.7.8"
try:
# This variable is injected in the __builtins__ by the build
# process. It is used to not try to import rest of the lale packages when
# it is being installed.
__LALE_SETUP__ # type: ignore # pylint:disable=used-before-assignment
except NameError:
__LALE_SETUP__ = False
if __LALE_SETUP__: # type: ignore
sys.stderr.write("Partial import of lale during the build process.\n")
# We are not importing the rest of lale during the build
# process.
else:
# all other code will go here.
from .operator_wrapper import (
register_lale_wrapper_modules as register_lale_wrapper_modules,
)
from .operator_wrapper import wrap_imported_operators as wrap_imported_operators
| 1,341 | 35.27027 | 84 |
py
|
lale
|
lale-master/lale/sklearn_compat.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import Any
# This method (and the to_lale() method on the returned value)
# are the only ones intended to be exported
# This entire file is deprecated, and will be removed soon.
# Please remove all calls to make_sklearn_compat from your code
# as they are no longer needed
def make_sklearn_compat(op):
"""This is a deprecated method for backward compatibility and will be removed soon"""
warnings.warn(
"sklearn_compat.make_sklearn_compat exists for backwards compatibility and will be removed soon",
DeprecationWarning,
)
return op
def sklearn_compat_clone(impl: Any) -> Any:
"""This is a deprecated method for backward compatibility and will be removed soon.
call lale.operators.clone (or scikit-learn clone) instead"""
warnings.warn(
"sklearn_compat.sklearn_compat_clone exists for backwards compatibility and will be removed soon",
DeprecationWarning,
)
if impl is None:
return None
from sklearn.base import clone
cp = clone(impl, safe=False)
return cp
| 1,656 | 33.520833 | 106 |
py
|
lale
|
lale-master/lale/schema2enums.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import enum
import logging
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple
from .schema_utils import JsonSchema, SchemaEnum
logger = logging.getLogger(__name__)
class DiscoveredEnums:
def __init__(
self,
enums: Optional[SchemaEnum] = None,
children: Optional[Dict[str, "DiscoveredEnums"]] = None,
) -> None:
self.enums = enums
self.children = children
def __str__(self) -> str:
def val_as_str(v):
if v is None:
return "null"
elif isinstance(v, str):
return f"'{v}'"
else:
return str(v)
en = ""
if self.enums:
ens = [val_as_str(v) for v in self.enums]
en = ", ".join(sorted(ens))
ch = ""
if self.children:
chs = [f"{str(k)}->{str(v)}" for k, v in self.children.items()]
ch = ",".join(chs)
if en and ch:
en = en + "; "
return "<" + en + ch + ">"
def schemaToDiscoveredEnums(schema: JsonSchema) -> Optional[DiscoveredEnums]:
"""Given a schema, returns a positive enumeration set.
This is very conservative, and even includes negated enum constants
(since the assumption is that they may, in some contexts, be valid)
"""
def combineDiscoveredEnums(
combine: Callable[[Iterable[SchemaEnum]], Optional[SchemaEnum]],
des: Iterable[Optional[DiscoveredEnums]],
) -> Optional[DiscoveredEnums]:
enums: List[SchemaEnum] = []
children: Dict[str, List[DiscoveredEnums]] = {}
for de in des:
if de is None:
continue
if de.enums is not None:
enums.append(de.enums)
if de.children is not None:
for cn, cv in de.children.items():
if cv is None:
continue
if cn in children:
children[cn].append(cv)
else:
children[cn] = [cv]
combined_enums: Optional[SchemaEnum] = None
if enums:
combined_enums = combine(enums)
if not children:
if combined_enums is None:
return None
else:
return DiscoveredEnums(enums=combined_enums)
else:
combined_children: Dict[str, DiscoveredEnums] = {}
for ccn, ccv in children.items():
if not ccv:
continue
ccvc = combineDiscoveredEnums(combine, ccv)
if ccvc is not None:
combined_children[ccn] = ccvc
return DiscoveredEnums(enums=combined_enums, children=combined_children)
def joinDiscoveredEnums(
des: Iterable[Optional[DiscoveredEnums]],
) -> Optional[DiscoveredEnums]:
def op(args: Iterable[SchemaEnum]) -> Optional[SchemaEnum]:
return set.union(*args)
return combineDiscoveredEnums(op, des)
if schema is True or schema is False:
return None
if "enum" in schema:
# TODO: we should validate the enum elements according to the schema, like schema2search_space does
return DiscoveredEnums(enums=set(schema["enum"]))
if "type" in schema:
typ = schema["type"]
if typ == "object" and "properties" in schema:
props = schema["properties"]
pret: Dict[str, DiscoveredEnums] = {}
for p, s in props.items():
pos = schemaToDiscoveredEnums(s)
if pos is not None:
pret[p] = pos
if pret:
return DiscoveredEnums(children=pret)
else:
return None
else:
return None
if "not" in schema:
neg = schemaToDiscoveredEnums(schema["not"])
return neg
if "allOf" in schema:
posl = [schemaToDiscoveredEnums(s) for s in schema["allOf"]]
return joinDiscoveredEnums(posl)
if "anyOf" in schema:
posl = [schemaToDiscoveredEnums(s) for s in schema["anyOf"]]
return joinDiscoveredEnums(posl)
if "oneOf" in schema:
posl = [schemaToDiscoveredEnums(s) for s in schema["oneOf"]]
return joinDiscoveredEnums(posl)
return None
def accumulateDiscoveredEnumsToPythonEnums(
de: Optional[DiscoveredEnums], path: List[str], acc: Dict[str, enum.Enum]
) -> None:
def withEnumValue(e: str) -> Tuple[str, Any]:
if isinstance(e, str):
return (e.replace("-", "_"), e)
elif isinstance(e, (int, float, complex)):
return ("num" + str(e), e)
else:
logger.info(
f"Unknown type ({type(e)}) of enumeration constant {e}, not handling very well"
)
return (str(e), e)
if de is None:
return
if de.enums is not None:
ppath, _ = withEnumValue("_".join(path))
epath = ".".join(path)
vals = (withEnumValue(x) for x in de.enums if x is not None)
# pyright does not currently understand this form
acc[ppath] = enum.Enum(epath, vals) # type: ignore
if de.children is not None:
for k in de.children:
accumulateDiscoveredEnumsToPythonEnums(de.children[k], [k] + path, acc)
def discoveredEnumsToPythonEnums(de: Optional[DiscoveredEnums]) -> Dict[str, enum.Enum]:
acc: Dict[str, enum.Enum] = {}
accumulateDiscoveredEnumsToPythonEnums(de, [], acc)
return acc
def schemaToPythonEnums(schema: JsonSchema) -> Dict[str, enum.Enum]:
de = schemaToDiscoveredEnums(schema)
enums = discoveredEnumsToPythonEnums(de)
return enums
def addDictAsFields(obj: Any, d: Dict[str, Any], force=False) -> None:
if d is None:
return
for k, v in d.items():
if k == "":
logger.warning(
f"There was a top level enumeration specified, so it is not being added to {getattr(obj, '_name', '???')}"
)
elif hasattr(obj, k) and not force:
logger.error(
f"The object {getattr(obj, '_name', '???')} already has the field {k}. This conflicts with our attempt at adding that key as an enumeration field"
)
else:
setattr(obj, k, v)
def addSchemaEnumsAsFields(obj: Any, schema: JsonSchema, force=False) -> None:
enums = schemaToPythonEnums(schema)
addDictAsFields(obj, enums, force)
| 7,060 | 32.784689 | 163 |
py
|
lale
|
lale-master/lale/helpers.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ast
import copy
import importlib
import logging
import sys
import time
import traceback
from importlib import util
from typing import (
TYPE_CHECKING,
Any,
Dict,
Iterable,
List,
Mapping,
Optional,
Set,
Tuple,
TypeVar,
Union,
overload,
)
import numpy as np
import pandas as pd
import scipy.sparse
import sklearn.pipeline
from numpy.random import RandomState
from sklearn.metrics import accuracy_score, check_scoring, log_loss
from sklearn.model_selection import StratifiedKFold
from sklearn.utils.metaestimators import _safe_split
import lale.datasets.data_schemas
if sys.version_info >= (3, 8):
from typing import Literal # raises a mypy error for <3.8
else:
from typing_extensions import Literal
try:
import torch
torch_installed = True
except ImportError:
torch_installed = False
spark_loader = util.find_spec("pyspark")
spark_installed = spark_loader is not None
if spark_installed:
from pyspark.sql.dataframe import DataFrame as spark_df
logger = logging.getLogger(__name__)
LALE_NESTED_SPACE_KEY = "__lale_nested_space"
astype_type = Literal["lale", "sklearn"]
datatype_param_type = Literal["pandas", "spark"]
randomstate_type = Union[RandomState, int, None]
def make_nested_hyperopt_space(sub_space):
return {LALE_NESTED_SPACE_KEY: sub_space}
def assignee_name(level=1) -> Optional[str]:
tb = traceback.extract_stack()
file_name, _line_number, _function_name, text = tb[-(level + 2)]
try:
tree = ast.parse(text, file_name)
except SyntaxError:
return None
assert tree is not None and isinstance(tree, ast.Module)
if len(tree.body) == 1:
stmt = tree.body[0]
if isinstance(stmt, ast.Assign):
lhs = stmt.targets
if len(lhs) == 1:
res = lhs[0]
if isinstance(res, ast.Name):
return res.id
return None
def arg_name(pos=0, level=1) -> Optional[str]:
tb = traceback.extract_stack()
file_name, _line_number, _function_name, text = tb[-(level + 2)]
try:
tree = ast.parse(text, file_name)
except SyntaxError:
return None
assert tree is not None and isinstance(tree, ast.Module)
if len(tree.body) == 1:
stmt = tree.body[0]
if isinstance(stmt, ast.Expr):
expr = stmt.value
if isinstance(expr, ast.Call):
args = expr.args
if pos < len(args):
res = args[pos]
if isinstance(res, ast.Name):
return res.id
return None
def data_to_json(data, subsample_array: bool = True) -> Union[list, dict, int, float]:
if isinstance(data, tuple):
# convert to list
return [data_to_json(elem, subsample_array) for elem in data]
if isinstance(data, list):
return [data_to_json(elem, subsample_array) for elem in data]
elif isinstance(data, dict):
return {key: data_to_json(data[key], subsample_array) for key in data}
elif isinstance(data, np.ndarray):
return ndarray_to_json(data, subsample_array)
elif isinstance(data, scipy.sparse.csr_matrix):
return ndarray_to_json(data.toarray(), subsample_array)
elif isinstance(data, (pd.DataFrame, pd.Series)):
np_array = data.values
return ndarray_to_json(np_array, subsample_array)
elif torch_installed and isinstance(data, torch.Tensor):
np_array = data.detach().numpy()
return ndarray_to_json(np_array, subsample_array)
elif isinstance(data, (np.int64, np.int32, np.int16)): # type: ignore
return int(data)
elif isinstance(data, (np.float32, np.float64)): # type: ignore
return float(data)
else:
return data
def is_empty_dict(val) -> bool:
return isinstance(val, dict) and len(val) == 0
def dict_without(orig_dict: Dict[str, Any], key: str) -> Dict[str, Any]:
if key not in orig_dict:
return orig_dict
return {k: v for k, v in orig_dict.items() if k != key}
def json_lookup(ptr, jsn, default=None):
steps = ptr.split("/")
sub_jsn = jsn
for s in steps:
if s not in sub_jsn:
return default
sub_jsn = sub_jsn[s]
return sub_jsn
def ndarray_to_json(arr: np.ndarray, subsample_array: bool = True) -> Union[list, dict]:
# sample 10 rows and no limit on columns
num_subsamples: List[int]
if subsample_array:
num_subsamples = [10, np.iinfo(int).max, np.iinfo(int).max]
else:
num_subsamples = [
np.iinfo(int).max,
np.iinfo(int).max,
np.iinfo(int).max,
]
def subarray_to_json(indices: Tuple[int, ...]) -> Any:
if len(indices) == len(arr.shape):
if isinstance(arr[indices], (bool, int, float, str)):
return arr[indices]
elif np.issubdtype(arr.dtype, np.bool_):
return bool(arr[indices])
elif np.issubdtype(arr.dtype, np.integer):
return int(arr[indices])
elif np.issubdtype(arr.dtype, np.number):
return float(arr[indices])
elif arr.dtype.kind in ["U", "S", "O"]:
return str(arr[indices])
else:
raise ValueError(
f"Unexpected dtype {arr.dtype}, "
f"kind {arr.dtype.kind}, "
f"type {type(arr[indices])}."
)
else:
assert len(indices) < len(arr.shape)
return [
subarray_to_json(indices + (i,))
for i in range(
min(num_subsamples[len(indices)], arr.shape[len(indices)])
)
]
return subarray_to_json(())
def split_with_schemas(estimator, all_X, all_y, indices, train_indices=None):
subset_X, subset_y = _safe_split(estimator, all_X, all_y, indices, train_indices)
if hasattr(all_X, "json_schema"):
n_rows = subset_X.shape[0]
schema = {
"type": "array",
"minItems": n_rows,
"maxItems": n_rows,
"items": all_X.json_schema["items"],
}
lale.datasets.data_schemas.add_schema(subset_X, schema)
if hasattr(all_y, "json_schema"):
n_rows = subset_y.shape[0]
schema = {
"type": "array",
"minItems": n_rows,
"maxItems": n_rows,
"items": all_y.json_schema["items"],
}
lale.datasets.data_schemas.add_schema(subset_y, schema)
return subset_X, subset_y
def fold_schema(X, y, cv=1, is_classifier=True):
def fold_schema_aux(data, n_rows):
orig_schema = lale.datasets.data_schemas._to_schema(data)
aux_result = {**orig_schema, "minItems": n_rows, "maxItems": n_rows}
return aux_result
n_splits = cv if isinstance(cv, int) else cv.get_n_splits()
try:
n_samples = X.shape[0] if hasattr(X, "shape") else len(X)
except TypeError: # raised for Spark dataframes.
n_samples = X.count() if hasattr(X, "count") else 0
if n_splits == 1:
n_rows_fold = n_samples
elif is_classifier:
n_classes = len(set(y))
n_rows_unstratified = (n_samples // n_splits) * (n_splits - 1)
# in stratified case, fold sizes can differ by up to n_classes
n_rows_fold = max(1, n_rows_unstratified - n_classes)
else:
n_rows_fold = (n_samples // n_splits) * (n_splits - 1)
schema_X = fold_schema_aux(X, n_rows_fold)
schema_y = fold_schema_aux(y, n_rows_fold)
result = {"properties": {"X": schema_X, "y": schema_y}}
return result
def cross_val_score_track_trials(
estimator,
X,
y=None,
scoring: Any = accuracy_score,
cv: Any = 5,
args_to_scorer: Optional[Dict[str, Any]] = None,
args_to_cv: Optional[Dict[str, Any]] = None,
**fit_params,
):
"""
Use the given estimator to perform fit and predict for splits defined by 'cv' and compute the given score on
each of the splits.
Parameters
----------
estimator: A valid sklearn_wrapper estimator
X: Valid data that works with the estimator
y: Valid target that works with the estimator
scoring: string or a scorer object created using
https://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html#sklearn.metrics.make_scorer.
A string from sklearn.metrics.SCORERS.keys() can be used or a scorer created from one of
sklearn.metrics (https://scikit-learn.org/stable/modules/classes.html#module-sklearn.metrics).
A completely custom scorer object can be created from a python function following the example at
https://scikit-learn.org/stable/modules/model_evaluation.html
The metric has to return a scalar value,
cv: an integer or an object that has a split function as a generator yielding (train, test) splits as arrays of indices.
Integer value is used as number of folds in sklearn.model_selection.StratifiedKFold, default is 5.
Note that any of the iterators from https://scikit-learn.org/stable/modules/cross_validation.html#cross-validation-iterators can be used here.
args_to_scorer: A dictionary of additional keyword arguments to pass to the scorer.
Used for cases where the scorer has a signature such as ``scorer(estimator, X, y, **kwargs)``.
args_to_cv: A dictionary of additional keyword arguments to pass to the split method of cv.
This is only applicable when cv is not an integer.
fit_params: Additional parameters that should be passed when calling fit on the estimator
Returns
-------
cv_results: a list of scores corresponding to each cross validation fold
"""
if isinstance(cv, int):
cv = StratifiedKFold(cv)
if args_to_scorer is None:
args_to_scorer = {}
if args_to_cv is None:
args_to_cv = {}
scorer = check_scoring(estimator, scoring=scoring)
cv_results: List[float] = []
log_loss_results = []
time_results = []
for train, test in cv.split(X, y, **args_to_cv):
X_train, y_train = split_with_schemas(estimator, X, y, train)
X_test, y_test = split_with_schemas(estimator, X, y, test, train)
start = time.time()
# Not calling sklearn.base.clone() here, because:
# (1) For Lale pipelines, clone() calls the pipeline constructor
# with edges=None, so the resulting topology is incorrect.
# (2) For Lale individual operators, the fit() method already
# clones the impl object, so cloning again is redundant.
trained = estimator.fit(X_train, y_train, **fit_params)
score_value = scorer(trained, X_test, y_test, **args_to_scorer)
execution_time = time.time() - start
# not all estimators have predict probability
try:
y_pred_proba = trained.predict_proba(X_test)
logloss = log_loss(y_true=y_test, y_pred=y_pred_proba)
log_loss_results.append(logloss)
except BaseException:
logger.debug("Warning, log loss cannot be computed")
cv_results.append(score_value)
time_results.append(execution_time)
result = (
np.array(cv_results).mean(),
np.array(log_loss_results).mean(),
np.array(time_results).mean(),
)
return result
def cross_val_score(estimator, X, y=None, scoring: Any = accuracy_score, cv: Any = 5):
"""
Use the given estimator to perform fit and predict for splits defined by 'cv' and compute the given score on
each of the splits.
Parameters
----------
estimator: A valid sklearn_wrapper estimator
X: Valid data value that works with the estimator
y: Valid target value that works with the estimator
scoring: a scorer object from sklearn.metrics (https://scikit-learn.org/stable/modules/classes.html#module-sklearn.metrics)
Default value is accuracy_score.
cv: an integer or an object that has a split function as a generator yielding (train, test) splits as arrays of indices.
Integer value is used as number of folds in sklearn.model_selection.StratifiedKFold, default is 5.
Note that any of the iterators from https://scikit-learn.org/stable/modules/cross_validation.html#cross-validation-iterators can be used here.
Returns
-------
cv_results: a list of scores corresponding to each cross validation fold
"""
if isinstance(cv, int):
cv = StratifiedKFold(cv)
cv_results = []
for train, test in cv.split(X, y):
X_train, y_train = split_with_schemas(estimator, X, y, train)
X_test, y_test = split_with_schemas(estimator, X, y, test, train)
trained_estimator = estimator.fit(X_train, y_train)
predicted_values = trained_estimator.predict(X_test)
cv_results.append(scoring(y_test, predicted_values))
return cv_results
def create_individual_op_using_reflection(class_name, operator_name, param_dict):
instance = None
if class_name is not None:
class_name_parts = class_name.split(".")
assert (
len(class_name_parts)
) > 1, (
"The class name needs to be fully qualified, i.e. module name + class name"
)
module_name = ".".join(class_name_parts[0:-1])
class_name = class_name_parts[-1]
module = importlib.import_module(module_name)
class_ = getattr(module, class_name)
if param_dict is None:
instance = class_()
else:
instance = class_(**param_dict)
return instance
if TYPE_CHECKING:
import lale.operators
def to_graphviz(
lale_operator: "lale.operators.Operator",
ipython_display: bool = True,
call_depth: int = 1,
**dot_graph_attr,
):
import lale.json_operator
import lale.operators
import lale.visualize
if not isinstance(lale_operator, lale.operators.Operator):
raise TypeError("The input to to_graphviz needs to be a valid LALE operator.")
jsn = lale.json_operator.to_json(lale_operator, call_depth=call_depth + 1)
dot = lale.visualize.json_to_graphviz(jsn, ipython_display, dot_graph_attr)
return dot
def instantiate_from_hyperopt_search_space(obj_hyperparams, new_hyperparams):
if isinstance(new_hyperparams, dict) and LALE_NESTED_SPACE_KEY in new_hyperparams:
sub_params = new_hyperparams[LALE_NESTED_SPACE_KEY]
sub_op = obj_hyperparams
if isinstance(sub_op, list):
if len(sub_op) == 1:
sub_op = sub_op[0]
else:
step_index, step_params = list(sub_params)[0]
if step_index < len(sub_op):
sub_op = sub_op[step_index]
sub_params = step_params
return create_instance_from_hyperopt_search_space(sub_op, sub_params)
elif isinstance(new_hyperparams, (list, tuple)):
assert isinstance(obj_hyperparams, (list, tuple))
params_len = len(new_hyperparams)
assert params_len == len(obj_hyperparams)
res: Optional[List[Any]] = None
for i in range(params_len):
nhi = new_hyperparams[i]
ohi = obj_hyperparams[i]
updated_params = instantiate_from_hyperopt_search_space(ohi, nhi)
if updated_params is not None:
if res is None:
res = list(new_hyperparams)
res[i] = updated_params
if res is not None:
if isinstance(obj_hyperparams, tuple):
return tuple(res)
else:
return res
# workaround for what seems to be a hyperopt bug
# where hyperopt returns a tuple even though the
# hyperopt search space specifies a list
is_obj_tuple = isinstance(obj_hyperparams, tuple)
is_new_tuple = isinstance(new_hyperparams, tuple)
if is_obj_tuple != is_new_tuple:
if is_obj_tuple:
return tuple(new_hyperparams)
else:
return list(new_hyperparams)
return None
elif isinstance(new_hyperparams, dict):
assert isinstance(obj_hyperparams, dict)
for k, sub_params in new_hyperparams.items():
if k in obj_hyperparams:
sub_op = obj_hyperparams[k]
updated_params = instantiate_from_hyperopt_search_space(
sub_op, sub_params
)
if updated_params is not None:
new_hyperparams[k] = updated_params
return None
else:
return None
def create_instance_from_hyperopt_search_space(
lale_object, hyperparams
) -> "lale.operators.Operator":
"""
Hyperparams is a n-tuple of dictionaries of hyper-parameters, each
dictionary corresponds to an operator in the pipeline
"""
# lale_object can either be an individual operator, a pipeline or an operatorchoice
# Validate that the number of elements in the n-tuple is the same
# as the number of steps in the current pipeline
from lale.operators import (
BasePipeline,
OperatorChoice,
PlannedIndividualOp,
TrainableOperator,
TrainablePipeline,
)
if isinstance(lale_object, PlannedIndividualOp):
new_hyperparams: Dict[str, Any] = dict_without(hyperparams, "name")
hps = lale_object.hyperparams()
if hps:
obj_hyperparams = dict(hps)
else:
obj_hyperparams = {}
for k, sub_params in new_hyperparams.items():
if k in obj_hyperparams:
sub_op = obj_hyperparams[k]
updated_params = instantiate_from_hyperopt_search_space(
sub_op, sub_params
)
if updated_params is not None:
new_hyperparams[k] = updated_params
all_hyperparams = {**obj_hyperparams, **new_hyperparams}
return lale_object(**all_hyperparams)
elif isinstance(lale_object, BasePipeline):
steps = lale_object.steps_list()
if len(hyperparams) != len(steps):
raise ValueError(
"The number of steps in the hyper-parameter space does not match the number of steps in the pipeline."
)
op_instances = []
edges = lale_object.edges()
# op_map:Dict[PlannedOpType, TrainableOperator] = {}
op_map = {}
for op_index, sub_params in enumerate(hyperparams):
sub_op = steps[op_index]
op_instance = create_instance_from_hyperopt_search_space(sub_op, sub_params)
assert isinstance(op_instance, TrainableOperator)
assert (
isinstance(sub_op, OperatorChoice)
or sub_op.class_name() == op_instance.class_name()
), f"sub_op {sub_op.class_name()}, op_instance {op_instance.class_name()}"
op_instances.append(op_instance)
op_map[sub_op] = op_instance
# trainable_edges:List[Tuple[TrainableOperator, TrainableOperator]]
try:
trainable_edges = [(op_map[x], op_map[y]) for (x, y) in edges]
except KeyError as e:
raise ValueError(
"An edge was found with an endpoint that is not a step (" + str(e) + ")"
) from e
return TrainablePipeline(op_instances, trainable_edges, ordered=True) # type: ignore
elif isinstance(lale_object, OperatorChoice):
# Hyperopt search space for an OperatorChoice is generated as a dictionary with a single element
# corresponding to the choice made, the only key is the index of the step and the value is
# the params corresponding to that step.
step_index: int
choices = lale_object.steps_list()
if len(choices) == 1:
step_index = 0
else:
step_index_str, hyperparams = list(hyperparams.items())[0]
step_index = int(step_index_str)
step_object = choices[step_index]
return create_instance_from_hyperopt_search_space(step_object, hyperparams)
else:
assert False, f"Unknown operator type: {type(lale_object)}"
def find_lale_wrapper(sklearn_obj: Any) -> Optional[Any]:
"""
:param sklearn_obj: An sklearn compatible object that may have a lale wrapper
:return: The lale wrapper type, or None if one could not be found
"""
from .operator_wrapper import get_lale_wrapper_modules
module_names = get_lale_wrapper_modules()
class_name = sklearn_obj.__class__.__name__
for module_name in module_names:
try:
module = importlib.import_module(module_name)
except ModuleNotFoundError:
continue
try:
class_ = getattr(module, class_name)
return class_
except AttributeError:
continue
return None
def _import_from_sklearn_inplace_helper(
sklearn_obj, fitted: bool = True, is_nested=False
):
"""
This method take an object and tries to wrap sklearn objects
(at the top level or contained within hyperparameters of other
sklearn objects).
It will modify the object to add in the appropriate lale wrappers.
It may also return a wrapper or different object than given.
:param sklearn_obj: the object that we are going to try and wrap
:param fitted: should we return a TrainedOperator
:param is_hyperparams: is this a nested invocation (which allows for returning
a Trainable operator even if fitted is set to True)
"""
@overload
def import_nested_params(
orig_hyperparams: dict, partial_dict: bool
) -> Optional[dict]:
...
@overload
def import_nested_params(orig_hyperparams: Any, partial_dict: bool) -> Any:
...
def import_nested_params(orig_hyperparams: Any, partial_dict: bool = False):
"""
look through lists/tuples/dictionaries for sklearn compatible objects to import.
:param orig_hyperparams: the input to recursively look through for sklearn compatible objects
:param partial_dict: If this is True and the input is a dictionary, the returned dictionary will only have the
keys with modified values
:return: Either a modified version of the input or None if nothing was changed
"""
if isinstance(orig_hyperparams, (tuple, list)):
new_list: list = []
list_modified: bool = False
for e in orig_hyperparams:
new_e = import_nested_params(e, partial_dict=False)
if new_e is None:
new_list.append(e)
else:
new_list.append(new_e)
list_modified = True
if not list_modified:
return None
if isinstance(orig_hyperparams, tuple):
return tuple(new_list)
else:
return new_list
if isinstance(orig_hyperparams, dict):
new_dict: dict = {}
dict_modified: bool = False
for k, v in orig_hyperparams.items():
new_v = import_nested_params(v, partial_dict=False)
if new_v is None:
if not partial_dict:
new_dict[k] = v
else:
new_dict[k] = new_v
dict_modified = True
if not dict_modified:
return None
return new_dict
if isinstance(orig_hyperparams, object) and hasattr(
orig_hyperparams, "get_params"
):
newobj = _import_from_sklearn_inplace_helper(
orig_hyperparams, fitted=fitted, is_nested=True
) # allow nested_op to be trainable
if newobj is orig_hyperparams:
return None
return newobj
return None
if sklearn_obj is None:
return None
if isinstance(sklearn_obj, lale.operators.TrainedIndividualOp):
# if fitted=False, we may want to return a TrainedIndidivualOp
return sklearn_obj
# if the object is a trainable operator, we clean that up
if isinstance(sklearn_obj, lale.operators.TrainableIndividualOp) and hasattr(
sklearn_obj, "_trained"
):
if fitted:
# get rid of the indirection, and just return the trained operator directly
return sklearn_obj._trained
else:
# since we are not supposed to be trained, delete the trained part
delattr(sklearn_obj, "_trained") # delete _trained before returning
return sklearn_obj
if isinstance(sklearn_obj, lale.operators.Operator):
if (
fitted and is_nested or not hasattr(sklearn_obj._impl_instance(), "fit")
): # Operators such as NoOp do not have a fit, so return them as is.
return sklearn_obj
if fitted:
raise ValueError(
f"""The input pipeline has an operator {sklearn_obj} that is not trained and fitted is set to True,
please pass fitted=False if you want a trainable pipeline as output."""
)
# the lale operator is not trained and fitted=False
return sklearn_obj
# special case for FeatureUnion.
# An alternative would be to (like for sklearn pipeline)
# create a lale wrapper for the sklearn feature union
# as a higher order operator
# and then the special case would be just to throw away the outer wrapper
# Note that lale union does not currently support weights or other features of feature union.
if isinstance(sklearn_obj, sklearn.pipeline.FeatureUnion):
transformer_list = sklearn_obj.transformer_list
concat_predecessors = [
_import_from_sklearn_inplace_helper(
transformer[1], fitted=fitted, is_nested=is_nested
)
for transformer in transformer_list
]
return lale.operators.make_union(*concat_predecessors)
if not hasattr(sklearn_obj, "get_params"):
# if it does not have a get_params method,
# then we just return it without trying to wrap it
return sklearn_obj
class_ = find_lale_wrapper(sklearn_obj)
if not class_:
return sklearn_obj # Return the original object
# next, we need to figure out what the right hyperparameters are
orig_hyperparams = sklearn_obj.get_params(deep=False)
hyperparams = import_nested_params(orig_hyperparams, partial_dict=True)
if hyperparams:
# if we have updated any of the hyperparameters then we modify them in the actual sklearn object
try:
new_obj = sklearn_obj.set_params(**hyperparams)
if new_obj is not None:
sklearn_obj = new_obj
except NotImplementedError:
# if the set_params method does not work, then do our best
pass
all_new_hyperparams = {**orig_hyperparams, **hyperparams}
else:
all_new_hyperparams = orig_hyperparams
# now, we get the lale operator for the wrapper, with the corresponding hyperparameters
if not fitted: # If fitted is False, we do not want to return a Trained operator.
lale_op_obj_base = class_
else:
lale_op_obj_base = lale.operators.TrainedIndividualOp(
class_._name,
class_._impl,
class_._schemas,
None,
_lale_trained=True,
)
lale_op_obj = lale_op_obj_base(**all_new_hyperparams)
from lale.lib.sklearn import Pipeline as LaleSKPipelineWrapper
# If this is a scklearn pipeline, then we want to discard the outer wrapper
# and just return a lale pipeline
if isinstance(lale_op_obj, LaleSKPipelineWrapper): # type: ignore
return lale_op_obj.shallow_impl._pipeline
# at this point, the object's hyper-parameters are modified as needed
# and our wrapper is initialized with the correct hyperparameters.
# Now we need to replace the wrapper impl with our (possibly modified)
# sklearn object
cl_shallow_impl = lale_op_obj.shallow_impl
if hasattr(cl_shallow_impl, "_wrapped_model"):
cl_shallow_impl._wrapped_model = sklearn_obj
else:
lale_op_obj._impl = sklearn_obj
lale_op_obj._impl_class_ = sklearn_obj.__class__
return lale_op_obj
def import_from_sklearn(sklearn_obj: Any, fitted: bool = True, in_place: bool = False):
"""
This method take an object and tries to wrap sklearn objects
(at the top level or contained within hyperparameters of other
sklearn objects).
It will modify the object to add in the appropriate lale wrappers.
It may also return a wrapper or different object than given.
:param sklearn_obj: the object that we are going to try and wrap
:param fitted: should we return a TrainedOperator
:param in_place: should we try to mutate what we can in place, or should we
aggressively deepcopy everything
:return: The wrapped object (or the input object if we could not wrap it)
"""
obj = sklearn_obj
if in_place:
obj = sklearn_obj
else:
obj = copy.deepcopy(sklearn_obj)
return _import_from_sklearn_inplace_helper(obj, fitted=fitted, is_nested=False)
def import_from_sklearn_pipeline(sklearn_pipeline: Any, fitted: bool = True):
"""
Note: Same as import_from_sklearn. This alternative name exists for backwards compatibility.
This method take an object and tries to wrap sklearn objects
(at the top level or contained within hyperparameters of other
sklearn objects).
It will modify the object to add in the appropriate lale wrappers.
It may also return a wrapper or different object than given.
:param sklearn_pipeline: the object that we are going to try and wrap
:param fitted: should we return a TrainedOperator
:return: The wrapped object (or the input object if we could not wrap it)
"""
op = import_from_sklearn(sklearn_pipeline, fitted=fitted, in_place=False)
from typing import cast
from lale.operators import TrainableOperator
# simplify using the returned value in the common case
return cast(TrainableOperator, op)
class val_wrapper:
"""This is used to wrap values that cause problems for hyper-optimizer backends
lale will unwrap these when given them as the value of a hyper-parameter"""
def __init__(self, base):
self._base = base
def unwrap_self(self):
return self._base
@classmethod
def unwrap(cls, obj):
if isinstance(obj, cls):
return cls.unwrap(obj.unwrap_self())
else:
return obj
def append_batch(data, batch_data):
if data is None:
return batch_data
elif isinstance(data, np.ndarray):
if isinstance(batch_data, np.ndarray):
if len(data.shape) == 1 and len(batch_data.shape) == 1:
return np.concatenate([data, batch_data])
else:
return np.vstack((data, batch_data))
elif isinstance(data, tuple):
X, y = data
if isinstance(batch_data, tuple):
batch_X, batch_y = batch_data
X = append_batch(X, batch_X)
y = append_batch(y, batch_y)
return X, y
elif torch_installed and isinstance(data, torch.Tensor):
if isinstance(batch_data, torch.Tensor):
return torch.cat((data, batch_data))
elif isinstance(data, (pd.Series, pd.DataFrame)):
return pd.concat([data, batch_data], axis=0)
try:
import h5py
if isinstance(data, h5py.File):
if isinstance(batch_data, tuple):
batch_X, batch_y = batch_data
except ModuleNotFoundError:
pass
raise ValueError(
f"{type(data)} is unsupported. Supported types are np.ndarray, torch.Tensor and h5py file"
)
def create_data_loader(
X: Any,
y: Any = None,
batch_size: int = 1,
num_workers: int = 0,
shuffle: bool = True,
):
"""A function that takes a dataset as input and outputs a Pytorch dataloader.
Parameters
----------
X : Input data.
The formats supported are Pandas DataFrame, Numpy array,
a sparse matrix, torch.tensor, torch.utils.data.Dataset, path to a HDF5 file,
lale.util.batch_data_dictionary_dataset.BatchDataDict,
a Python dictionary of the format `{"dataset": torch.utils.data.Dataset,
"collate_fn":collate_fn for torch.utils.data.DataLoader}`
y : Labels., optional
Supported formats are Numpy array or Pandas series, by default None
batch_size : int, optional
Number of samples in each batch, by default 1
num_workers : int, optional
Number of workers used by the data loader, by default 0
shuffle: boolean, optional, default True
Whether to use SequentialSampler or RandomSampler for creating batches
Returns
-------
torch.utils.data.DataLoader
Raises
------
TypeError
Raises a TypeError if the input format is not supported.
"""
from torch.utils.data import DataLoader, Dataset, TensorDataset
from lale.util.batch_data_dictionary_dataset import BatchDataDict
from lale.util.hdf5_to_torch_dataset import HDF5TorchDataset
from lale.util.numpy_torch_dataset import NumpyTorchDataset, numpy_collate_fn
from lale.util.pandas_torch_dataset import PandasTorchDataset, pandas_collate_fn
collate_fn = None
worker_init_fn = None
if isinstance(X, Dataset) and not isinstance(X, BatchDataDict):
dataset = X
elif isinstance(X, pd.DataFrame):
dataset = PandasTorchDataset(X, y)
collate_fn = pandas_collate_fn
elif isinstance(X, scipy.sparse.csr_matrix):
# unfortunately, NumpyTorchDataset won't accept a subclass of np.ndarray
X = X.toarray() # type: ignore
if isinstance(y, lale.datasets.data_schemas.NDArrayWithSchema):
y = y.view(np.ndarray)
dataset = NumpyTorchDataset(X, y)
collate_fn = numpy_collate_fn
elif isinstance(X, np.ndarray):
# unfortunately, NumpyTorchDataset won't accept a subclass of np.ndarray
if isinstance(X, lale.datasets.data_schemas.NDArrayWithSchema):
X = X.view(np.ndarray)
if isinstance(y, lale.datasets.data_schemas.NDArrayWithSchema):
y = y.view(np.ndarray)
dataset = NumpyTorchDataset(X, y)
collate_fn = numpy_collate_fn
elif isinstance(X, str): # Assume that this is path to hdf5 file
dataset = HDF5TorchDataset(X)
elif isinstance(X, BatchDataDict):
dataset = X
def my_collate_fn(batch):
return batch[
0
] # because BatchDataDict's get_item returns a batch, so no collate is required.
return DataLoader(
dataset, batch_size=1, collate_fn=my_collate_fn, shuffle=shuffle
)
elif isinstance(X, dict): # Assumed that it is data indexed by batch number
if "dataset" in X:
dataset = X["dataset"]
collate_fn = X.get("collate_fn", None)
worker_init_fn = getattr(dataset, "worker_init_fn", None)
else:
return [X]
elif isinstance(X, torch.Tensor) and y is not None:
if isinstance(y, np.ndarray):
y = torch.from_numpy(y)
dataset = TensorDataset(X, y)
elif isinstance(X, torch.Tensor):
dataset = TensorDataset(X)
else:
raise TypeError(
f"Can not create a data loader for a dataset with type {type(X)}"
)
return DataLoader(
dataset,
batch_size=batch_size,
collate_fn=collate_fn,
num_workers=num_workers,
worker_init_fn=worker_init_fn,
shuffle=shuffle,
)
def write_batch_output_to_file(
file_obj,
file_path,
total_len,
batch_idx,
batch_X,
batch_y,
batch_out_X,
batch_out_y,
):
if file_obj is None and file_path is None:
raise ValueError("Only one of the file object or file path can be None.")
if file_obj is None:
import h5py
file_obj = h5py.File(file_path, "w")
# estimate the size of the dataset based on the first batch output size
transform_ratio = int(len(batch_out_X) / len(batch_X))
if len(batch_out_X.shape) == 1:
h5_data_shape = (transform_ratio * total_len,)
elif len(batch_out_X.shape) == 2:
h5_data_shape = (transform_ratio * total_len, batch_out_X.shape[1])
elif len(batch_out_X.shape) == 3:
h5_data_shape = (
transform_ratio * total_len,
batch_out_X.shape[1],
batch_out_X.shape[2],
)
else:
raise ValueError(
"batch_out_X is expected to be a 1-d, 2-d or 3-d array. Any other data types are not handled."
)
dataset = file_obj.create_dataset(
name="X", shape=h5_data_shape, chunks=True, compression="gzip"
)
if batch_out_y is None and batch_y is not None:
batch_out_y = batch_y
if batch_out_y is not None:
if len(batch_out_y.shape) == 1:
h5_labels_shape = (transform_ratio * total_len,)
elif len(batch_out_y.shape) == 2:
h5_labels_shape = (transform_ratio * total_len, batch_out_y.shape[1])
else:
raise ValueError(
"batch_out_y is expected to be a 1-d or 2-d array. Any other data types are not handled."
)
dataset = file_obj.create_dataset(
name="y", shape=h5_labels_shape, chunks=True, compression="gzip"
)
dataset = file_obj["X"]
dataset[
batch_idx * len(batch_out_X) : (batch_idx + 1) * len(batch_out_X)
] = batch_out_X
if batch_out_y is not None or batch_y is not None:
labels = file_obj["y"]
if batch_out_y is not None:
labels[
batch_idx * len(batch_out_y) : (batch_idx + 1) * len(batch_out_y)
] = batch_out_y
else:
labels[batch_idx * len(batch_y) : (batch_idx + 1) * len(batch_y)] = batch_y
return file_obj
def add_missing_values(orig_X, missing_rate=0.1, seed=None):
# see scikit-learn.org/stable/auto_examples/impute/plot_missing_values.html
n_samples, n_features = orig_X.shape
n_missing_samples = int(n_samples * missing_rate)
if seed is None:
rng = np.random.RandomState()
else:
rng = np.random.RandomState(seed)
missing_samples = np.zeros(n_samples, dtype=bool)
missing_samples[:n_missing_samples] = True
rng.shuffle(missing_samples)
missing_features = rng.randint(0, n_features, n_missing_samples)
missing_X = orig_X.copy()
if isinstance(missing_X, np.ndarray):
missing_X[missing_samples, missing_features] = np.nan
else:
assert isinstance(missing_X, pd.DataFrame)
i_missing_sample = 0
for i_sample in range(n_samples):
if missing_samples[i_sample]:
i_feature = missing_features[i_missing_sample]
i_missing_sample += 1
missing_X.iloc[i_sample, i_feature] = np.nan
return missing_X
# helpers for manipulating (extended) sklearn style paths.
# documentation of the path format is part of the operators module docstring
def partition_sklearn_params(
d: Dict[str, Any]
) -> Tuple[Dict[str, Any], Dict[str, Dict[str, Any]]]:
sub_parts: Dict[str, Dict[str, Any]] = {}
main_parts: Dict[str, Any] = {}
for k, v in d.items():
ks = k.split("__", 1)
if len(ks) == 1:
assert k not in main_parts
main_parts[k] = v
else:
assert len(ks) == 2
bucket: Dict[str, Any] = {}
group: str = ks[0]
param: str = ks[1]
if group in sub_parts:
bucket = sub_parts[group]
else:
sub_parts[group] = bucket
assert param not in bucket
bucket[param] = v
return (main_parts, sub_parts)
def partition_sklearn_choice_params(d: Dict[str, Any]) -> Tuple[int, Dict[str, Any]]:
discriminant_value: int = -1
choice_parts: Dict[str, Any] = {}
for k, v in d.items():
if k == discriminant_name:
assert discriminant_value == -1
discriminant_value = int(v)
else:
k_rest = unnest_choice(k)
choice_parts[k_rest] = v
assert discriminant_value != -1
return (discriminant_value, choice_parts)
DUMMY_SEARCH_SPACE_GRID_PARAM_NAME: str = "$"
discriminant_name: str = "?"
choice_prefix: str = "?"
structure_type_name: str = "#"
structure_type_list: str = "list"
structure_type_tuple: str = "tuple"
structure_type_dict: str = "dict"
def get_name_and_index(name: str) -> Tuple[str, int]:
"""given a name of the form "name@i", returns (name, i)
if given a name of the form "name", returns (name, 0)
"""
splits = name.split("@", 1)
if len(splits) == 1:
return splits[0], 0
else:
return splits[0], int(splits[1])
def make_degen_indexed_name(name, index):
return f"{name}@{index}"
def make_indexed_name(name, index):
if index == 0:
return name
else:
return f"{name}@{index}"
def make_array_index_name(index, is_tuple: bool = False):
sep = "##" if is_tuple else "#"
return f"{sep}{str(index)}"
def is_numeric_structure(structure_type: str):
if structure_type in ["list", "tuple"]:
return True
elif structure_type == "dict":
return False
else:
assert False, f"Unknown structure type {structure_type} found"
V = TypeVar("V")
def nest_HPparam(name: str, key: str):
if key == DUMMY_SEARCH_SPACE_GRID_PARAM_NAME:
# we can get rid of the dummy now, since we have a name for it
return name
return name + "__" + key
def nest_HPparams(name: str, grid: Mapping[str, V]) -> Dict[str, V]:
return {(nest_HPparam(name, k)): v for k, v in grid.items()}
def nest_all_HPparams(
name: str, grids: Iterable[Mapping[str, V]]
) -> List[Dict[str, V]]:
"""Given the name of an operator in a pipeline, this transforms every key(parameter name) in the grids
to use the operator name as a prefix (separated by __). This is the convention in scikit-learn pipelines.
"""
return [nest_HPparams(name, grid) for grid in grids]
def nest_choice_HPparam(key: str):
return choice_prefix + key
def nest_choice_HPparams(grid: Mapping[str, V]) -> Dict[str, V]:
return {(nest_choice_HPparam(k)): v for k, v in grid.items()}
def nest_choice_all_HPparams(grids: Iterable[Mapping[str, V]]) -> List[Dict[str, V]]:
"""this transforms every key(parameter name) in the grids
to be nested under a choice, using a ? as a prefix (separated by __). This is the convention in scikit-learn pipelines.
"""
return [nest_choice_HPparams(grid) for grid in grids]
def unnest_choice(k: str) -> str:
assert k.startswith(choice_prefix)
return k[len(choice_prefix) :]
def unnest_HPparams(k: str) -> List[str]:
return k.split("__")
def are_hyperparameters_equal(hyperparam1, hyperparam2):
if isinstance(
hyperparam1, np.ndarray
): # hyperparam2 is from schema default, so it may not always be an array
return np.all(hyperparam1 == hyperparam2)
else:
return hyperparam1 == hyperparam2
def _is_ast_subscript(expr):
return isinstance(expr, ast.Subscript)
def _is_ast_attribute(expr):
return isinstance(expr, ast.Attribute)
def _is_ast_constant(expr):
return isinstance(expr, ast.Constant)
def _is_ast_subs_or_attr(expr):
return isinstance(expr, (ast.Subscript, ast.Attribute))
def _is_ast_call(expr):
return isinstance(expr, ast.Call)
def _is_ast_name(expr):
return isinstance(expr, ast.Name)
def _ast_func_id(expr):
if isinstance(expr, ast.Name):
return expr.id
else:
raise ValueError("function name expected")
def _is_df(df):
return _is_pandas_df(df) or _is_spark_df(df)
def _is_pandas_series(df):
return isinstance(df, pd.Series)
def _is_pandas_df(df):
return isinstance(df, pd.DataFrame)
def _is_pandas(df):
return isinstance(df, (pd.Series, pd.DataFrame))
def _is_spark_df(df):
if spark_installed:
return isinstance(df, lale.datasets.data_schemas.SparkDataFrameWithIndex)
else:
return False
def _is_spark_df_without_index(df):
if spark_installed:
return isinstance(df, spark_df) and not _is_spark_df(df)
else:
return False
def _ensure_pandas(df) -> pd.DataFrame:
if _is_spark_df(df):
return df.toPandas()
assert _is_pandas(df), type(df)
return df
def _get_subscript_value(subscript_expr):
if isinstance(subscript_expr.slice, ast.Constant): # for Python 3.9
subscript_value = subscript_expr.slice.value
else:
subscript_value = subscript_expr.slice.value.s # type: ignore
return subscript_value
class GenSym:
def __init__(self, names: Set[str]):
self._names = names
def __call__(self, prefix):
if prefix in self._names:
suffix = 0
while True:
result = f"{prefix}_{suffix}"
if result not in self._names:
break
suffix += 1
else:
result = prefix
self._names |= {result}
return result
def get_sklearn_estimator_name() -> str:
"""Some higher order sklearn operators changed the name of the nested estimatator in later versions.
This returns the appropriate version dependent paramater name
"""
from packaging import version
import lale.operators
if lale.operators.sklearn_version < version.Version("1.2"):
return "base_estimator"
else:
return "estimator"
def get_estimator_param_name_from_hyperparams(hyperparams):
be = hyperparams.get("base_estimator", "deprecated")
if be == "deprecated" or (be is None and "estimator" in hyperparams):
return "estimator"
else:
return "base_estimator"
| 46,947 | 34.459215 | 150 |
py
|
lale
|
lale-master/lale/pretty_print.py
|
# Copyright 2019-2022 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ast
import importlib
import keyword
import logging
import math
import pprint
import re
from typing import Any, Dict, List, Optional, Set, Tuple, Union, cast
import black
import numpy as np
import sklearn.metrics
import lale.expressions
import lale.helpers
import lale.json_operator
import lale.operators
import lale.type_checking
logger = logging.getLogger(__name__)
JSON_TYPE = Dict[str, Any]
_black78 = black.FileMode(line_length=78)
class _CodeGenState:
imports: List[str]
assigns: List[str]
external_wrapper_modules: List[str]
def __init__(
self,
names: Set[str],
combinators: bool,
assign_nested: bool,
customize_schema: bool,
astype: str,
):
self.imports = []
self.assigns = []
self.external_wrapper_modules = []
self.combinators = combinators
self.assign_nested = assign_nested
self.customize_schema = customize_schema
self.astype = astype
self.gensym = lale.helpers.GenSym(
{
"make_pipeline_graph",
"lale",
"make_choice",
"make_pipeline",
"make_union",
"make_union_no_concat",
"np",
"pd",
"pipeline",
}
| set(keyword.kwlist)
| names
)
def hyperparams_to_string(
hps: JSON_TYPE,
steps: Optional[Dict[str, str]] = None,
gen: Optional[_CodeGenState] = None,
) -> str:
def sklearn_module(value):
module = value.__module__
if module.startswith("sklearn."):
i = module.rfind(".")
if module[i + 1] == "_":
module = module[:i]
return module
def value_to_string(value):
if isinstance(value, dict):
if "$ref" in value and steps is not None:
step_uid = value["$ref"].split("/")[-1]
return steps[step_uid]
else:
sl = {f"'{k}': {value_to_string(v)}" for k, v in value.items()}
return "{" + ", ".join(sl) + "}"
elif isinstance(value, tuple):
sl = [value_to_string(v) for v in value]
return "(" + ", ".join(sl) + ")"
elif isinstance(value, list):
sl = [value_to_string(v) for v in value]
return "[" + ", ".join(sl) + "]"
elif isinstance(value, range):
return str(value)
elif isinstance(value, (int, float)) and math.isnan(value):
return "float('nan')"
elif isinstance(value, np.dtype):
if gen is not None:
gen.imports.append("import numpy as np")
return f"np.{repr(value)}"
elif isinstance(value, np.ndarray):
if gen is not None:
gen.imports.append("import numpy as np")
array_expr = f"np.{repr(value)}"
# For an array string representation, numpy includes dtype for some data types
# we need to insert "np." for the dtype so that executing the pretty printed code
# does not give any error for the dtype. The following code manipulates the
# string representation given by numpy to add "np." for dtype.
dtype_indx = array_expr.find("dtype")
if dtype_indx != -1:
array_dtype_expr = array_expr[dtype_indx:]
dtype_name = array_dtype_expr.split("=")[1]
return array_expr[:dtype_indx] + "dtype=np." + dtype_name
return array_expr
elif isinstance(value, np.ufunc):
if gen is not None:
gen.imports.append("import numpy as np")
return f"np.{value.__name__}" # type: ignore
elif isinstance(value, lale.expressions.Expr):
v: lale.expressions.Expr = value
e = v.expr
if gen is not None:
gen.imports.append("from lale.expressions import it")
for node in ast.walk(e):
if isinstance(node, ast.Call):
f: Any = node.func
gen.imports.append("from lale.expressions import " + f.id)
return str(value)
elif hasattr(value, "__module__") and hasattr(value, "__name__"):
modules = {"numpy": "np", "pandas": "pd"}
module = modules.get(value.__module__, value.__module__)
if gen is not None:
if value.__module__ == module:
gen.imports.append(f"import {module}")
else:
gen.imports.append(f"import {value.__module__} as {module}")
return f"{module}.{value.__name__}" # type: ignore
elif hasattr(value, "get_params"):
module = sklearn_module(value)
if gen is not None:
gen.imports.append(f"import {module}")
actuals = value.get_params(False) # type: ignore
defaults = lale.type_checking.get_hyperparam_defaults(value)
non_defaults = {
k: v
for k, v in actuals.items()
if k not in defaults or defaults[k] != v
}
kwargs_string = hyperparams_to_string(non_defaults, steps, gen)
printed = f"{module}.{value.__class__.__name__}({kwargs_string})"
return printed
elif hasattr(sklearn.metrics, "_scorer") and isinstance(
value, sklearn.metrics._scorer._BaseScorer
):
if gen is not None:
gen.imports.append("import sklearn.metrics")
func = value._score_func # type: ignore
module = sklearn_module(func)
if gen is not None:
gen.imports.append(f"import {module}")
func_string = f"{module}.{func.__name__}"
sign_strings = [] if value._sign > 0 else ["greater_is_better=False"] # type: ignore
kwargs_strings = [
f"{k}={value_to_string(v)}" for k, v in value._kwargs.items() # type: ignore
]
args_strings = [func_string, *sign_strings, *kwargs_strings]
printed = f"sklearn.metrics.make_scorer({', '.join(args_strings)})"
return printed
else:
printed = pprint.pformat(value, width=10000, compact=True)
if printed.endswith(")"):
m = re.match(r"(\w+)\(", printed)
if m:
module = value.__module__
if gen is not None:
gen.imports.append(f"import {module}")
printed = f"{module}.{printed}"
if printed.startswith("<"):
m = re.match(r"<(\w[\w.]*)\.(\w+) object at 0x[0-9a-fA-F]+>$", printed)
if m:
module, clazz = m.group(1), m.group(2)
if gen is not None:
gen.imports.append(f"import {module}")
# logger.warning(f"bare {clazz} with unknown constructor")
printed = f"{module}.{clazz}()"
return printed
strings = [f"{k}={value_to_string(v)}" for k, v in hps.items()]
return ", ".join(strings)
def _get_module_name(op_label: str, op_name: str, class_name: str) -> str:
def find_op(module_name, sym):
module = importlib.import_module(module_name)
if hasattr(module, sym):
op = getattr(module, sym)
if isinstance(op, lale.operators.IndividualOp):
if op.class_name() == class_name:
return op
elif hasattr(op, "__init__") and hasattr(op, "fit"):
if hasattr(op, "predict") or hasattr(op, "transform"):
return op
return None
mod_name_long = class_name[: class_name.rfind(".")]
if mod_name_long.rfind(".") == -1:
mod_name_short = mod_name_long
else:
mod_name_short = mod_name_long[: mod_name_long.rfind(".")]
unqualified = class_name[class_name.rfind(".") + 1 :]
if (
class_name.startswith("lale.")
and unqualified.startswith("_")
and unqualified.endswith("Impl")
):
unqualified = unqualified[1 : -len("Impl")]
op = find_op(mod_name_short, op_name)
if op is not None:
mod = mod_name_short
else:
op = find_op(mod_name_long, op_name)
if op is not None:
mod = mod_name_long
else:
op = find_op(mod_name_short, unqualified)
if op is not None:
mod = mod_name_short
else:
op = find_op(mod_name_long, unqualified)
if op is not None:
mod = mod_name_long
else:
assert False, (op_label, op_name, class_name)
assert op is not None, (op_label, op_name, class_name)
if isinstance(op, lale.operators.IndividualOp):
if "import_from" in op._schemas:
mod = op._schemas["import_from"]
return mod
def _get_wrapper_module_if_external(impl_class_name):
# If the lale operator was not found in the list of libraries registered with
# lale, return the operator's i.e. wrapper's module name
# This is pass to `wrap_imported_operators` in the output of `pretty_print`.
impl_name = impl_class_name[impl_class_name.rfind(".") + 1 :]
impl_module_name = impl_class_name[: impl_class_name.rfind(".")]
module = importlib.import_module(impl_module_name)
if hasattr(module, impl_name):
wrapped_model = getattr(module, impl_name)
wrapper = lale.operators.get_op_from_lale_lib(wrapped_model)
if wrapper is None:
# TODO: The assumption here is that the operator is created in the same
# module as where the impl is defined.
# Do we have a better way to know where `make_operator` is called from instead?
return impl_module_name
else:
return None
return None
def _op_kind(op: JSON_TYPE) -> str:
assert isinstance(op, dict)
if "kind" in op:
return op["kind"]
return lale.json_operator.json_op_kind(op)
_OP_KIND_TO_COMBINATOR = {"Seq": ">>", "Par": "&", "OperatorChoice": "|"}
_OP_KIND_TO_FUNCTION = {
"Seq": "make_pipeline",
"Par": "make_union_no_concat",
"OperatorChoice": "make_choice",
"Union": "make_union",
}
def _introduce_structure(pipeline: JSON_TYPE, gen: _CodeGenState) -> JSON_TYPE:
assert _op_kind(pipeline) == "Pipeline"
def make_graph(pipeline: JSON_TYPE) -> JSON_TYPE:
steps = pipeline["steps"]
preds: Dict[str, List[str]] = {step: [] for step in steps}
succs: Dict[str, List[str]] = {step: [] for step in steps}
for src, dst in pipeline["edges"]:
preds[dst].append(src)
succs[src].append(dst)
return {"kind": "Graph", "steps": steps, "preds": preds, "succs": succs}
def find_seq(
graph: JSON_TYPE,
) -> Optional[Tuple[Dict[str, JSON_TYPE], Dict[str, JSON_TYPE]]]:
for src in graph["steps"]:
if len(graph["succs"][src]) == 1:
dst = graph["succs"][src][0]
if len(graph["preds"][dst]) == 1:
old: Dict[str, JSON_TYPE] = {
uid: graph["steps"][uid] for uid in [src, dst]
}
new_uid = None
new_steps: Dict[str, JSON_TYPE] = {}
for step_uid, step_jsn in old.items():
if _op_kind(step_jsn) == "Seq": # flatten
new_steps.update(step_jsn["steps"])
if new_uid is None:
new_uid = step_uid
else:
new_steps[step_uid] = step_jsn
if new_uid is None:
new_uid = gen.gensym("pipeline")
new = {new_uid: {"kind": "Seq", "steps": new_steps}}
return old, new
return None
def find_par(
graph: JSON_TYPE,
) -> Optional[Tuple[Dict[str, JSON_TYPE], Dict[str, JSON_TYPE]]]:
step_uids = list(graph["steps"].keys())
for i0 in range(len(step_uids)): # pylint:disable=consider-using-enumerate
for i1 in range(i0 + 1, len(step_uids)):
s0, s1 = step_uids[i0], step_uids[i1]
preds0, preds1 = graph["preds"][s0], graph["preds"][s1]
if len(preds0) == len(preds1) and set(preds0) == set(preds1):
succs0, succs1 = graph["succs"][s0], graph["succs"][s1]
if len(succs0) == len(succs1) and set(succs0) == set(succs1):
old: Dict[str, JSON_TYPE] = {
uid: graph["steps"][uid] for uid in [s0, s1]
}
new_uid = None
new_steps: Dict[str, JSON_TYPE] = {}
for step_uid, step_jsn in old.items():
if _op_kind(step_jsn) == "Par": # flatten
new_steps.update(step_jsn["steps"])
if new_uid is None:
new_uid = step_uid
else:
new_steps[step_uid] = step_jsn
if new_uid is None:
new_uid = gen.gensym("union")
new: Dict[str, JSON_TYPE] = {
new_uid: {"kind": "Par", "steps": new_steps}
}
return old, new
return None
def find_union(
graph: JSON_TYPE,
) -> Optional[Tuple[Dict[str, JSON_TYPE], Dict[str, JSON_TYPE]]]:
cat_cls = "lale.lib.rasl.concat_features._ConcatFeaturesImpl"
for seq_uid, seq_jsn in graph["steps"].items():
if _op_kind(seq_jsn) == "Seq":
seq_uids = list(seq_jsn["steps"].keys())
for i in range(len(seq_uids) - 1):
src, dst = seq_uids[i], seq_uids[i + 1]
src_jsn = seq_jsn["steps"][src]
if _op_kind(src_jsn) == "Par":
dst_jsn = seq_jsn["steps"][dst]
if dst_jsn.get("class", None) == cat_cls:
old = {seq_uid: seq_jsn}
union = {"kind": "Union", "steps": src_jsn["steps"]}
if len(seq_uids) == 2:
new = {src: union}
else:
new_steps: Dict[str, JSON_TYPE] = {}
for uid, jsn in seq_jsn["steps"].items():
if uid == src:
new_steps[uid] = union
elif uid != dst:
new_steps[uid] = jsn
new = {src: {"kind": "Seq", "steps": new_steps}}
return old, new
return None
def replace(
subject: JSON_TYPE, old: Dict[str, JSON_TYPE], new: Dict[str, JSON_TYPE]
) -> JSON_TYPE:
assert _op_kind(subject) == "Graph"
new_uid, new_jsn = list(new.items())[0]
assert _op_kind(new_jsn) in ["Seq", "Par", "Union"]
subj_steps = subject["steps"]
subj_preds = subject["preds"]
subj_succs = subject["succs"]
res_steps: Dict[str, JSON_TYPE] = {}
res_preds: Dict[str, List[str]] = {}
res_succs: Dict[str, List[str]] = {}
old_steps_uids = list(old.keys())
for step_uid in subj_steps: # careful to keep topological order
if step_uid == old_steps_uids[0]:
res_steps[new_uid] = new_jsn
res_preds[new_uid] = subj_preds[old_steps_uids[0]]
res_succs[new_uid] = subj_succs[old_steps_uids[-1]]
elif step_uid not in old_steps_uids:
res_steps[step_uid] = subj_steps[step_uid]
res_preds[step_uid] = []
for pred in subj_preds[step_uid]:
if pred == old_steps_uids[-1]:
res_preds[step_uid].append(new_uid)
elif pred not in old_steps_uids:
res_preds[step_uid].append(pred)
res_succs[step_uid] = []
for succ in subj_succs[step_uid]:
if succ == old_steps_uids[0]:
res_succs[step_uid].append(new_uid)
elif succ not in old_steps_uids:
res_succs[step_uid].append(succ)
result = {
"kind": "Graph",
"steps": res_steps,
"preds": res_preds,
"succs": res_succs,
}
return result
def find_and_replace(graph: JSON_TYPE) -> JSON_TYPE:
if len(graph["steps"]) == 1: # singleton
return {"kind": "Seq", "steps": graph["steps"]}
progress = True
while progress:
seq = find_seq(graph)
if seq is not None:
graph = replace(graph, *seq)
par = find_par(graph)
if par is not None:
graph = replace(graph, *par)
if not gen.combinators:
union = find_union(graph)
if union is not None:
graph = replace(graph, *union)
progress = seq is not None or par is not None
if len(graph["steps"]) == 1: # flatten
return list(graph["steps"].values())[0]
else:
return graph
graph = make_graph(pipeline)
result = find_and_replace(graph)
return result
def _operator_jsn_to_string_rec(uid: str, jsn: JSON_TYPE, gen: _CodeGenState) -> str:
op_expr: str
if _op_kind(jsn) == "Pipeline":
structured = _introduce_structure(jsn, gen)
return _operator_jsn_to_string_rec(uid, structured, gen)
elif _op_kind(jsn) == "Graph":
steps, succs = jsn["steps"], jsn["succs"]
step2name: Dict[str, str] = {}
for step_uid, step_val in steps.items():
expr = _operator_jsn_to_string_rec(step_uid, step_val, gen)
if re.fullmatch("[A-Za-z][A-Za-z0-9_]*", expr):
step2name[step_uid] = expr
else:
step2name[step_uid] = step_uid
gen.assigns.append(f"{step_uid} = {expr}")
make_pipeline = "make_pipeline_graph"
gen.imports.append(f"from lale.operators import {make_pipeline}")
steps_string = ", ".join([step2name[step] for step in steps])
edges_string = ", ".join(
[
f"({step2name[src]},{step2name[tgt]})"
for src in steps
for tgt in succs[src]
]
)
result = f"{make_pipeline}(steps=[{steps_string}], edges=[{edges_string}])"
return result
elif _op_kind(jsn) in ["Seq", "Par", "OperatorChoice", "Union"]:
if gen.combinators:
def print_for_comb(step_uid, step_val):
printed = _operator_jsn_to_string_rec(step_uid, step_val, gen)
parens = _op_kind(step_val) != _op_kind(jsn) and _op_kind(step_val) in [
"Seq",
"Par",
"OperatorChoice",
]
return f"({printed})" if parens else printed
printed_steps = {
step_uid: print_for_comb(step_uid, step_val)
for step_uid, step_val in jsn["steps"].items()
}
combinator = _OP_KIND_TO_COMBINATOR[_op_kind(jsn)]
if len(printed_steps.values()) == 1 and combinator == ">>":
gen.imports.append("from lale.operators import make_pipeline")
op_expr = f"make_pipeline({', '.join(printed_steps.values())})"
return op_expr
return f" {combinator} ".join(printed_steps.values())
else:
printed_steps = {
step_uid: _operator_jsn_to_string_rec(step_uid, step_val, gen)
for step_uid, step_val in jsn["steps"].items()
}
function = _OP_KIND_TO_FUNCTION[_op_kind(jsn)]
if gen.astype == "sklearn" and function in ["make_union", "make_pipeline"]:
gen.imports.append(f"from sklearn.pipeline import {function}")
else:
gen.imports.append(f"from lale.operators import {function}")
op_expr = f"{function}({', '.join(printed_steps.values())})"
gen.assigns.append(f"{uid} = {op_expr}")
return uid
elif _op_kind(jsn) == "IndividualOp":
label: str = jsn["label"]
class_name = jsn["class"]
module_name = _get_module_name(label, jsn["operator"], class_name)
if module_name.startswith("lale."):
op_name = jsn["operator"]
else:
op_name = class_name[class_name.rfind(".") + 1 :]
if op_name.startswith("_"):
op_name = op_name[1:]
if op_name.endswith("Impl"):
op_name = op_name[: -len("Impl")]
if op_name == label:
import_stmt = f"from {module_name} import {op_name}"
else:
import_stmt = f"from {module_name} import {op_name} as {label}"
if module_name != "__main__":
gen.imports.append(import_stmt)
external_module_name = _get_wrapper_module_if_external(class_name)
if external_module_name is not None:
gen.external_wrapper_modules.append(external_module_name)
printed_steps = {
step_uid: _operator_jsn_to_string_rec(step_uid, step_val, gen)
for step_uid, step_val in jsn.get("steps", {}).items()
}
op_expr = label
if "customize_schema" in jsn and gen.customize_schema:
if jsn["customize_schema"] == "not_available":
logger.warning(f"missing {label}.customize_schema(..) call")
elif jsn["customize_schema"] != {}:
new_hps = lale.json_operator._top_schemas_to_hp_props(
jsn["customize_schema"]
)
customize_schema_string = ",".join(
[
f"{hp_name}={json_to_string(hp_schema)}"
for hp_name, hp_schema in new_hps.items()
]
)
op_expr = f"{op_expr}.customize_schema({customize_schema_string})"
if "hyperparams" in jsn and jsn["hyperparams"] is not None:
hp_string = hyperparams_to_string(jsn["hyperparams"], printed_steps, gen)
op_expr = f"{op_expr}({hp_string})"
if gen.assign_nested and re.fullmatch(r".+\(.+\)", op_expr):
gen.assigns.append(f"{uid} = {op_expr}")
return uid
else:
return op_expr
else:
assert False, f"unexpected type {type(jsn)} of jsn {jsn}"
def _collect_names(jsn: JSON_TYPE) -> Set[str]:
result: Set[str] = set()
if "steps" in jsn:
steps: Dict[str, JSON_TYPE] = jsn["steps"]
for step_uid, step_jsn in steps.items():
result |= {step_uid}
result |= _collect_names(step_jsn)
if "label" in jsn:
lbl: str = jsn["label"]
result |= {lbl}
return result
def _combine_lonely_literals(printed_code):
lines = printed_code.split("\n")
regex = re.compile(
r' +("[^"]*"|\d+\.?\d*|\[\]|float\("nan"\)|np\.dtype\("[^"]+"\)),'
)
for i in range(len(lines)): # pylint:disable=consider-using-enumerate
if lines[i] is not None:
match_i = regex.fullmatch(lines[i])
if match_i is not None:
j = i + 1
while j < len(lines) and lines[j] is not None:
match_j = regex.fullmatch(lines[j])
if match_j is None:
break
candidate = lines[i] + " " + match_j.group(1) + ","
if len(candidate) > 78:
break
lines[i] = candidate
lines[j] = None
j += 1
result = "\n".join([s for s in lines if s is not None])
return result
def _format_code(printed_code):
formatted = black.format_str(printed_code, mode=_black78).rstrip()
combined = _combine_lonely_literals(formatted)
return combined
def _operator_jsn_to_string(
jsn: JSON_TYPE,
show_imports: bool,
combinators: bool,
assign_nested: bool,
customize_schema: bool,
astype: str,
) -> str:
gen = _CodeGenState(
_collect_names(jsn), combinators, assign_nested, customize_schema, astype
)
expr = _operator_jsn_to_string_rec("pipeline", jsn, gen)
if expr != "pipeline":
gen.assigns.append(f"pipeline = {expr}")
if show_imports and len(gen.imports) > 0:
if combinators:
gen.imports.append("import lale")
imports_set: Set[str] = set()
imports_list: List[str] = []
for imp in gen.imports:
if imp not in imports_set:
imports_set |= {imp}
imports_list.append(imp)
result = "\n".join(imports_list)
external_wrapper_modules_set: Set[str] = set()
external_wrapper_modules_list: List[str] = []
for module in gen.external_wrapper_modules:
if module not in external_wrapper_modules_set:
external_wrapper_modules_set |= {module}
external_wrapper_modules_list.append(module)
if combinators:
if len(external_wrapper_modules_list) > 0:
result += (
f"\nlale.wrap_imported_operators({external_wrapper_modules_list})"
)
else:
result += "\nlale.wrap_imported_operators()"
result += "\n"
result += "\n".join(gen.assigns)
else:
result = "\n".join(gen.assigns)
formatted = _format_code(result)
return formatted
def json_to_string(jsn: JSON_TYPE) -> str:
def _inner(value):
if value is None:
return "None"
elif isinstance(value, (bool, str)):
return pprint.pformat(value, width=10000, compact=True)
elif isinstance(value, (int, float)):
if math.isnan(value):
return "float('nan')"
else:
return pprint.pformat(value, width=10000, compact=True)
elif isinstance(value, list):
sl = [_inner(v) for v in value]
return "[" + ", ".join(sl) + "]"
elif isinstance(value, tuple):
sl = [_inner(v) for v in value]
return "(" + ", ".join(sl) + ")"
elif isinstance(value, dict):
sl = [f"'{k}': {_inner(v)}" for k, v in value.items()]
return "{" + ", ".join(sl) + "}"
else:
return f"<<{type(value).__qualname__}>>"
s1 = _inner(jsn)
s2 = _format_code(s1)
return s2
def to_string(
arg: Union[JSON_TYPE, "lale.operators.Operator"],
*,
show_imports: bool = True,
combinators: bool = True,
assign_nested: bool = True,
customize_schema: bool = False,
astype: str = "lale",
call_depth: int = 1,
) -> str:
assert astype in ["lale", "sklearn"], astype
if astype == "sklearn":
combinators = False
if lale.type_checking.is_schema(arg):
return json_to_string(cast(JSON_TYPE, arg))
elif isinstance(arg, lale.operators.Operator):
jsn = lale.json_operator.to_json(
arg,
call_depth=call_depth + 1,
add_custom_default=not customize_schema,
)
return _operator_jsn_to_string(
jsn,
show_imports,
combinators,
assign_nested,
customize_schema,
astype,
)
else:
raise ValueError(f"Unexpected argument type {type(arg)} for {arg}")
def ipython_display(
arg: Union[JSON_TYPE, "lale.operators.Operator"],
*,
show_imports: bool = True,
combinators: bool = True,
assign_nested: bool = True,
):
import IPython.display
pretty_printed = to_string(
arg,
show_imports=show_imports,
combinators=combinators,
assign_nested=assign_nested,
call_depth=3,
)
markdown = IPython.display.Markdown(f"```python\n{pretty_printed}\n```")
IPython.display.display(markdown)
| 29,239 | 38.945355 | 97 |
py
|
lale
|
lale-master/lale/visualize.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from typing import Any, Dict, List, Optional, Tuple
import graphviz
import lale.json_operator
import lale.pretty_print
_LALE_SKL_PIPELINE = "lale.lib.sklearn.pipeline._PipelineImpl"
def _get_cluster2reps(jsn) -> Tuple[Dict[str, str], Dict[str, str]]:
"""For each cluster (Pipeline, OperatorChoice, or higher-order IndividualOp), get two representatives (first-order IndividualOps).
Lale visualizes composite operators using graphviz clusters. To
visualize an edge to (from) a cluster, we need to tell graphviz a
representative in that cluster for the edge to connect to (from).
Parameters
----------
jsn:
JSON representation of a Lale pipeline.
The steps of all sub-pipelines must be topologically ordered.
Returns
-------
reps:
Two dictionaries from cluster id fields to node id fields.
Nodes in the first dictionary are roots that are furthest left
in the visualization, suitable for incoming edges.
Nodes in the second dictionary are roots that are furthest
right in the visualization, suitable for outgoing edges.
"""
cluster2root: Dict[str, str] = {}
cluster2leaf: Dict[str, str] = {}
node2depth: Dict[str, int] = {}
def populate(uid: str, jsn, depth: int, clusters: List[str]) -> int:
kind: str = lale.json_operator.json_op_kind(jsn)
if kind == "Pipeline" or jsn["class"] == _LALE_SKL_PIPELINE:
step2idx: Dict[str, int] = {}
for step_idx, step_uid in enumerate(jsn["steps"].keys()):
step2idx[step_uid] = step_idx
if kind == "Pipeline":
edges = jsn["edges"]
else:
names = list(jsn["steps"].keys())
edges = [[names[i], names[i + 1]] for i in range(len(names) - 1)]
for tail, head in edges:
assert (
step2idx[tail] < step2idx[head]
), f"steps {tail} and {head} are not in topological order"
node2preds: Dict[str, List[str]] = {
step: [] for step in jsn["steps"].keys()
}
for tail, head in edges:
node2preds[head].append(tail)
more_clusters = [uid, *clusters]
d_max = depth
for step_uid, step_jsn in jsn["steps"].items():
d_root = max(
(node2depth[p] for p in node2preds[step_uid]), default=depth
)
d_leaf = populate(step_uid, step_jsn, d_root, more_clusters)
d_max = max(d_max, d_leaf)
elif kind == "OperatorChoice" or "steps" in jsn:
more_clusters = [uid, *clusters]
d_max = depth
for step_uid, step_jsn in jsn["steps"].items():
d_leaf = populate(step_uid, step_jsn, depth, more_clusters)
d_max = max(d_max, d_leaf)
else:
assert kind == "IndividualOp"
d_max = depth + 1
for cluster in clusters:
if (
cluster not in cluster2root
or node2depth[cluster2root[cluster]] > d_max
):
cluster2root[cluster] = uid
if (
cluster not in cluster2leaf
or node2depth[cluster2leaf[cluster]] < d_max
):
cluster2leaf[cluster] = uid
node2depth[uid] = d_max
return d_max
populate("(root)", jsn, 0, [])
return cluster2root, cluster2leaf
_STATE2COLOR = {"trained": "white", "trainable": "lightskyblue1", "planned": "skyblue2"}
def _indiv_op_tooltip(uid, jsn) -> str:
assert lale.json_operator.json_op_kind(jsn) == "IndividualOp"
tooltip = f"{uid} = {jsn['label']}"
if "hyperparams" in jsn:
hps = jsn["hyperparams"]
if hps is not None:
steps: Optional[Dict[str, Any]]
if "steps" in jsn:
steps = {step_uid: step_uid for step_uid in jsn["steps"]}
else:
steps = None
hp_string = lale.pretty_print.hyperparams_to_string(hps, steps)
if len(hp_string) > 255: # too long for graphviz
hp_string = hp_string[:252] + "..."
tooltip = f"{tooltip}({hp_string})"
return tooltip
def _url_new_tab(jsn):
url = jsn["documentation_url"]
# note the missing double-quotes before the url and after the rel:
# we are pretending to only provide the URL itself, and abusing
# non-grammatical string concatenation to piggy-back more attributes
html = f'{url}" target="_blank" rel="noopener noreferrer'
return html
def _json_to_graphviz_rec(uid, jsn, cluster2reps, is_root, dot_graph_attr):
kind = lale.json_operator.json_op_kind(jsn)
dot: graphviz.Digraph
if kind in ["Pipeline", "OperatorChoice"] or "steps" in jsn:
dot = graphviz.Digraph(name=f"cluster:{uid}")
else:
dot = graphviz.Digraph()
if is_root:
dot.attr(
"graph",
{**dot_graph_attr, "rankdir": "LR", "compound": "true", "nodesep": "0.1"},
)
dot.attr("node", fontsize="11", margin="0.06,0.03")
if kind == "Pipeline":
dot.attr(
"graph",
label="",
style="rounded,filled",
fillcolor=_STATE2COLOR[jsn["state"]],
tooltip=f"{uid} = ...",
)
nodes = jsn["steps"]
edges = jsn["edges"]
else:
if is_root:
nodes = {"(root)": jsn}
edges = []
elif kind == "OperatorChoice":
rhs = " | ".join(jsn["steps"].keys())
dot.attr(
"graph",
label="Choice",
style="filled",
fillcolor=_STATE2COLOR[jsn["state"]],
tooltip=f"{uid} = {rhs}",
)
nodes = jsn["steps"]
edges = []
else:
assert kind == "IndividualOp" and "steps" in jsn
dot.attr(
"graph",
label=jsn.get("viz_label", jsn["label"]),
style="filled",
fillcolor=_STATE2COLOR[jsn["state"]],
tooltip=_indiv_op_tooltip(uid, jsn),
)
if "documentation_url" in jsn:
dot.attr("graph", URL=_url_new_tab(jsn))
nodes = jsn["steps"]
if jsn["class"] == _LALE_SKL_PIPELINE:
names = list(nodes.keys())
edges = [[names[i], names[i + 1]] for i in range(len(names) - 1)]
else:
edges = []
for step_uid, step_jsn in nodes.items():
node_kind = lale.json_operator.json_op_kind(step_jsn)
if node_kind in ["Pipeline", "OperatorChoice"] or "steps" in step_jsn:
sub_dot = _json_to_graphviz_rec(step_uid, step_jsn, cluster2reps, False, {})
dot.subgraph(sub_dot)
else:
assert node_kind == "IndividualOp"
tooltip = _indiv_op_tooltip(step_uid, step_jsn)
attrs = {
"style": "filled",
"fillcolor": _STATE2COLOR[step_jsn["state"]],
"tooltip": tooltip,
}
if "documentation_url" in step_jsn:
attrs["URL"] = _url_new_tab(step_jsn)
label0 = step_jsn.get("viz_label", step_jsn["label"])
if "\n" in label0:
label3 = label0
else:
label1 = re.sub("(.)([A-Z][a-z]+)", r"\1-\n\2", label0)
label2 = re.sub("([a-z0-9])([A-Z])", r"\1-\n\2", label1)
label3 = re.sub(r"([^_\n-]_)([^_\n-])", r"\1-\n\2", label2)
dot.node(step_uid, label3, **attrs)
cluster2root, cluster2leaf = cluster2reps
for tail, head in edges:
tail_is_cluster = "steps" in nodes[tail]
head_is_cluster = "steps" in nodes[head]
if tail_is_cluster:
if head_is_cluster:
dot.edge(
cluster2leaf[tail],
cluster2root[head],
ltail=f"cluster:{tail}",
lhead=f"cluster:{head}",
)
else:
dot.edge(cluster2leaf[tail], head, ltail=f"cluster:{tail}")
else:
if head_is_cluster:
dot.edge(tail, cluster2root[head], lhead=f"cluster:{head}")
else:
dot.edge(tail, head)
return dot
def json_to_graphviz(jsn, ipython_display, dot_graph_attr):
cluster2reps = _get_cluster2reps(jsn)
dot = _json_to_graphviz_rec("(root)", jsn, cluster2reps, True, dot_graph_attr)
if ipython_display:
import IPython.display
IPython.display.display(dot)
return None
return dot
| 9,382 | 37.142276 | 134 |
py
|
lale
|
lale-master/lale/util/pandas_torch_dataset.py
|
# Copyright 2021 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
try:
from torch.utils.data import Dataset
except ModuleNotFoundError as exc:
raise ModuleNotFoundError(
"""Your Python environment does not have torch installed. You can install it with
pip install torch
or with
pip install 'lale[full]'"""
) from exc
class PandasTorchDataset(Dataset):
"""Pytorch Dataset subclass that takes a pandas DataFrame and an optional label pandas Series."""
def __init__(self, X, y=None):
"""X and y are the dataset and labels respectively.
Parameters
----------
X : pandas DataFrame
Two dimensional dataset of input features.
y : pandas Series
Labels
"""
self.X = X
self.y = y
def __len__(self):
return self.X.shape[0]
def __getitem__(self, idx):
if self.y is not None:
return self.X.iloc[idx], self.y.iloc[idx]
else:
return self.X.iloc[idx]
def get_data(self):
if self.y is None:
return self.X
else:
return self.X, self.y
def pandas_collate_fn(batch):
return_X = None
return_y = None
for item in batch:
if isinstance(item, tuple):
if return_X is None:
return_X = [item[0].to_dict()]
else:
return_X.append(item[0].to_dict())
if return_y is None:
return_y = [item[1]]
else:
return_y.append(item[1])
else:
if return_X is None:
return_X = [item.to_dict()]
else:
return_X.append(item.to_dict())
if return_y is not None:
return (pd.DataFrame(return_X), pd.Series(return_y))
else:
return pd.DataFrame(return_X)
| 2,475 | 28.831325 | 101 |
py
|
lale
|
lale-master/lale/util/VisitorPathError.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Iterator, List, Optional
class VisitorPathError(ValueError):
_path: List[Any]
def __init__(self, path: List[Any], message: Optional[str] = None):
super().__init__(message)
self._path = path
def push_parent_path(self, part: Any) -> None:
self._path.append(part)
@property
def path(self) -> Iterator[Any]:
return reversed(self._path)
def get_message_str(self) -> str:
return super().__str__()
def path_string(self) -> str:
return "->".join((str(x) for x in self.path))
def __str__(self):
pstr = self.path_string()
return f"[{pstr}]: {self.get_message_str()}"
| 1,262 | 29.071429 | 74 |
py
|
lale
|
lale-master/lale/util/hdf5_to_torch_dataset.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
from torch.utils.data import Dataset
except ModuleNotFoundError as import_exc:
raise ModuleNotFoundError(
"""Your Python environment does not have torch installed. You can install it with
pip install torch
or with
pip install 'lale[full]'"""
) from import_exc
try:
import h5py
except ModuleNotFoundError as import_exc:
raise ModuleNotFoundError(
"""Your Python environment does not have h5py installed. You can install it with
pip install h5py
or with
pip install 'lale[full]'"""
) from import_exc
class HDF5TorchDataset(Dataset):
"""Pytorch Dataset subclass that takes a hdf5 file pointer."""
def __init__(self, file_path):
""".
Parameters
----------
file : file is an object of class h5py.File
"""
self.file_path = file_path
h5_file = h5py.File(file_path)
self.length = h5_file["X"].shape[0]
def __len__(self):
return self.length
def __getitem__(self, idx):
with h5py.File(self.file_path) as h5_file:
X = h5_file["X"]
try:
y = h5_file["y"]
except KeyError:
y = None
if y is None:
element = X[idx]
else:
element = X[idx], y[idx]
return element
def get_data(self):
with h5py.File(self.file_path) as h5_file:
X = h5_file["X"][:]
try:
y = h5_file["y"][:]
except KeyError:
y = None
if y is None:
return X
else:
return X, y
| 2,412 | 30.75 | 89 |
py
|
lale
|
lale-master/lale/util/batch_data_dictionary_dataset.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from torch.utils.data import Dataset
class BatchDataDict(Dataset):
"""Pytorch Dataset subclass that takes a dictionary of format {'<batch_idx>': <batch_data>}."""
def __init__(self, X, y=None):
"""X is the dictionary dataset and y is ignored.
Parameters
----------
X : dict
Dictionary of format {'<batch_idx>': <batch_data>}
y : None
Ignored.
"""
self.data_dict = X
def __len__(self):
return len(self.data_dict)
def __getitem__(self, idx):
# This returns the batch at idx instead of a single element.
return self.data_dict[idx]
| 1,227 | 30.487179 | 99 |
py
|
lale
|
lale-master/lale/util/VisitorMeta.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A shim for compatibility across 3.7.
# pre 3.7, we need to inherit from the GenericMeta class (which inherits from ABCmeta)
# to use Generic (which we want to do)
# post 3.7, GenericMeta no longer exists
import sys
from abc import ABCMeta
class VisitorMeta(type):
"""This meta class adds a private _accept method that calls visitCLASSNAME on the visitor.
It does not currently support inheritance: you need to define the visitC method for subclasses
explicitly.
The private _accept method should be called via the Visitor#acccept method
"""
def __init__(cls, *args, **kwargs):
super(VisitorMeta, cls).__init__(*args, **kwargs)
method_name = getattr(cls, "__name__", "???")
# ensure that only idenifiers are used
if not isinstance(method_name, str) or not method_name.isidentifier():
method_name = "???"
selector = f"""
from lale.util import VisitorPathError
try:
return visitor.visit{method_name}(self, *args, **kwargs)
except VisitorPathError as e:
e.push_parent_path(self)
raise
except BaseException as e:
raise VisitorPathError([self]) from e
"""
_accept_code = f"def _accept(self, visitor, *args, **kwargs):\n\t{selector}"
ll = {}
# This is safe since the only user manipulatable part of the code is
# cls.__name__, which we sanitize to ensure that it is a valid identifier
exec(_accept_code, globals(), ll) # nosec
setattr(cls, "_accept", ll["_accept"])
if sys.version_info < (3, 7, 0):
from typing import GenericMeta # type: ignore
else:
global GenericMeta # pylint:disable=global-at-module-level
GenericMeta = ABCMeta # type: ignore
class AbstractVisitorMeta(VisitorMeta, GenericMeta):
"""This meta class adds an _accept method that calls visitCLASSNAME on the visitor.
It does not currently support inheritance: you need to define the visitC method for subclasses
explicitly.
The private _accept method should be called via the Visitor#acccept method.
"""
def __init__(cls, *args, **kwargs):
super(AbstractVisitorMeta, cls).__init__(*args, **kwargs)
| 2,806 | 37.986111 | 98 |
py
|
lale
|
lale-master/lale/util/__init__.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .VisitorPathError import VisitorPathError
| 623 | 38 | 74 |
py
|
lale
|
lale-master/lale/util/numpy_to_torch_dataset.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
try:
from torch.utils.data import Dataset
except ModuleNotFoundError as exc:
raise ModuleNotFoundError(
"""Your Python environment does not have torch installed. You can install it with
pip install torch
or with
pip install 'lale[full]'"""
) from exc
class NumpyTorchDataset(Dataset):
"""Pytorch Dataset subclass that takes a numpy array and an optional label array."""
def __init__(self, X, y=None):
"""X and y are the dataset and labels respectively.
Parameters
----------
X : numpy array
Two dimensional dataset of input features.
y : numpy array
Labels
"""
self.X = X
self.y = y
def __len__(self):
return self.X.shape[0]
def __getitem__(self, idx):
if self.y is not None:
return self.X[idx], self.y[idx]
else:
return self.X[idx]
def get_data(self):
if self.y is None:
return self.X
else:
return self.X, self.y
def numpy_collate_fn(batch):
return_X = None
return_y = None
for item in batch:
if isinstance(item, tuple):
if return_X is None:
return_X = item[0]
else:
return_X = np.vstack((return_X, item[0]))
if return_y is None:
return_y = item[1]
else:
return_y = np.vstack((return_y, item[1])) # type: ignore
else:
if return_X is None:
return_X = item
else:
return_X = np.vstack((return_X, item)) # type: ignore
if return_y is not None:
if len(return_y.shape) > 1 and return_y.shape[1] == 1:
return_y = np.reshape(return_y, (len(return_y),))
return return_X, return_y
else:
return return_X
| 2,557 | 29.452381 | 89 |
py
|
lale
|
lale-master/lale/util/Visitor.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any
class Visitor:
def defaultVisit(self, node, *args, **kwargs):
raise NotImplementedError
def __getattr__(self, attr):
if attr.startswith("visit"):
return self.defaultVisit
return self.__getattribute__(attr)
def _visitAll(self, iterable, *args, **kwargs):
def filter_none(x):
return (x is not None) or None
return [filter_none(x) and accept(x, self, *args, **kwargs) for x in iterable]
# Because of the magic way we add accept methods, mypy does not know they exist
# so this method is important for accept calls to typecheck
def accept(obj: Any, v: Visitor, *args, **kwargs):
return obj._accept(v, *args, **kwargs)
| 1,300 | 33.236842 | 86 |
py
|
lale
|
lale-master/lale/util/numpy_torch_dataset.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
try:
from torch.utils.data import Dataset
except ModuleNotFoundError as exc:
raise ModuleNotFoundError(
"""Your Python environment does not have torch installed. You can install it with
pip install torch
or with
pip install 'lale[full]'"""
) from exc
class NumpyTorchDataset(Dataset):
"""Pytorch Dataset subclass that takes a numpy array and an optional label array."""
def __init__(self, X, y=None):
"""X and y are the dataset and labels respectively.
Parameters
----------
X : numpy array
Two dimensional dataset of input features.
y : numpy array
Labels
"""
self.X = X
self.y = y
def __len__(self):
return self.X.shape[0]
def __getitem__(self, idx):
if self.y is not None:
return self.X[idx], self.y[idx]
else:
return self.X[idx]
def get_data(self):
if self.y is None:
return self.X
else:
return self.X, self.y
def numpy_collate_fn(batch):
return_X = None
return_y = None
for item in batch:
if isinstance(item, tuple):
if return_X is None:
return_X = item[0]
else:
return_X = np.vstack((return_X, item[0]))
if return_y is None:
return_y = item[1]
else:
return_y = np.vstack((return_y, item[1])) # type: ignore
else:
if return_X is None:
return_X = item
else:
return_X = np.vstack((return_X, item)) # type: ignore
if return_y is not None:
if len(return_y.shape) > 1 and return_y.shape[1] == 1:
return_y = np.reshape(return_y, (len(return_y),))
return return_X, return_y
else:
return return_X
| 2,557 | 29.452381 | 89 |
py
|
lale
|
lale-master/lale/util/pandas_to_torch_dataset.py
|
# Copyright 2021 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
try:
from torch.utils.data import Dataset
except ModuleNotFoundError as exc:
raise ModuleNotFoundError(
"""Your Python environment does not have torch installed. You can install it with
pip install torch
or with
pip install 'lale[full]'"""
) from exc
class PandasTorchDataset(Dataset):
"""Pytorch Dataset subclass that takes a pandas DataFrame and an optional label pandas Series."""
def __init__(self, X, y=None):
"""X and y are the dataset and labels respectively.
Parameters
----------
X : pandas DataFrame
Two dimensional dataset of input features.
y : pandas Series
Labels
"""
self.X = X
self.y = y
def __len__(self):
return self.X.shape[0]
def __getitem__(self, idx):
if self.y is not None:
return self.X.iloc[idx], self.y.iloc[idx]
else:
return self.X.iloc[idx]
def get_data(self):
if self.y is None:
return self.X
else:
return self.X, self.y
def pandas_collate_fn(batch):
return_X = None
return_y = None
for item in batch:
if isinstance(item, tuple):
if return_X is None:
return_X = [item[0].to_dict()]
else:
return_X.append(item[0].to_dict())
if return_y is None:
return_y = [item[1]]
else:
return_y.append(item[1])
else:
if return_X is None:
return_X = [item[0].to_dict()]
else:
return_X.append(item[0].to_dict())
if return_y is not None:
return (pd.DataFrame(return_X), pd.Series(return_y))
else:
return pd.DataFrame(return_X)
| 2,481 | 28.903614 | 101 |
py
|
lale
|
lale-master/lale/datasets/sklearn_to_pandas.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
import sklearn.datasets
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
import lale.datasets.data_schemas
from lale.datasets.util import load_boston
def _bunch_to_df(bunch, schema_X, schema_y, test_size=0.2, random_state=42):
train_X_arr, test_X_arr, train_y_arr, test_y_arr = train_test_split(
bunch.data, bunch.target, test_size=test_size, random_state=random_state
)
feature_schemas = schema_X["items"]["items"]
if isinstance(feature_schemas, list):
feature_names = [f["description"] for f in feature_schemas]
else:
feature_names = [f"x{i}" for i in range(schema_X["items"]["maxItems"])]
train_X_df = pd.DataFrame(train_X_arr, columns=feature_names)
test_X_df = pd.DataFrame(test_X_arr, columns=feature_names)
train_y_df = pd.Series(train_y_arr, name="target")
test_y_df = pd.Series(test_y_arr, name="target")
train_nrows, test_nrows = train_X_df.shape[0], test_X_df.shape[0]
train_X = lale.datasets.data_schemas.add_schema(
train_X_df, {**schema_X, "minItems": train_nrows, "maxItems": train_nrows}
)
test_X = lale.datasets.data_schemas.add_schema(
test_X_df, {**schema_X, "minItems": test_nrows, "maxItems": test_nrows}
)
train_y = lale.datasets.data_schemas.add_schema(
train_y_df, {**schema_y, "minItems": train_nrows, "maxItems": train_nrows}
)
test_y = lale.datasets.data_schemas.add_schema(
test_y_df, {**schema_y, "minItems": test_nrows, "maxItems": test_nrows}
)
return (train_X, train_y), (test_X, test_y)
def load_iris_df(test_size=0.2):
iris = sklearn.datasets.load_iris()
X = iris.data
y = iris.target
target_name = "target"
X, y = shuffle(iris.data, iris.target, random_state=42)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=test_size, random_state=42
)
X_train_df = pd.DataFrame(X_train, columns=iris.feature_names)
y_train_df = pd.Series(y_train, name=target_name)
X_test_df = pd.DataFrame(X_test, columns=iris.feature_names)
y_test_df = pd.Series(y_test, name=target_name)
return (X_train_df, y_train_df), (X_test_df, y_test_df)
def digits_df(test_size=0.2, random_state=42):
digits = sklearn.datasets.load_digits()
ncols = digits.data.shape[1]
schema_X = {
"description": "Features of digits dataset (classification).",
"documentation_url": "https://scikit-learn.org/0.20/datasets/index.html#optical-recognition-of-handwritten-digits-dataset",
"type": "array",
"items": {
"type": "array",
"minItems": ncols,
"maxItems": ncols,
"items": {"type": "number", "minimum": 0, "maximum": 16},
},
}
schema_y = {
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "array",
"items": {"type": "integer", "minimum": 0, "maximum": 9},
}
(train_X, train_y), (test_X, test_y) = _bunch_to_df(
digits, schema_X, schema_y, test_size, random_state
)
return (train_X, train_y), (test_X, test_y)
def covtype_df(test_size=0.2, random_state=42):
covtype = sklearn.datasets.fetch_covtype()
schema_X = {
"description": "Features of forest covertypes dataset (classification).",
"documentation_url": "https://scikit-learn.org/0.20/datasets/index.html#forest-covertypes",
"type": "array",
"items": {
"type": "array",
"minItems": 54,
"maxItems": 54,
"items": [
{"description": "Elevation", "type": "integer"},
{"description": "Aspect", "type": "integer"},
{"description": "Slope", "type": "integer"},
{"description": "Horizontal_Distance_To_Hydrology", "type": "integer"},
{"description": "Vertical_Distance_To_Hydrology", "type": "integer"},
{"description": "Horizontal_Distance_To_Roadways", "type": "integer"},
{"description": "Hillshade_9am", "type": "integer"},
{"description": "Hillshade_Noon", "type": "integer"},
{"description": "Hillshade_3pm", "type": "integer"},
{
"description": "Horizontal_Distance_To_Fire_Points",
"type": "integer",
},
{"description": "Wilderness_Area1", "enum": [0, 1]},
{"description": "Wilderness_Area2", "enum": [0, 1]},
{"description": "Wilderness_Area3", "enum": [0, 1]},
{"description": "Wilderness_Area4", "enum": [0, 1]},
{"description": "Soil_Type1", "enum": [0, 1]},
{"description": "Soil_Type2", "enum": [0, 1]},
{"description": "Soil_Type3", "enum": [0, 1]},
{"description": "Soil_Type4", "enum": [0, 1]},
{"description": "Soil_Type5", "enum": [0, 1]},
{"description": "Soil_Type6", "enum": [0, 1]},
{"description": "Soil_Type7", "enum": [0, 1]},
{"description": "Soil_Type8", "enum": [0, 1]},
{"description": "Soil_Type9", "enum": [0, 1]},
{"description": "Soil_Type10", "enum": [0, 1]},
{"description": "Soil_Type11", "enum": [0, 1]},
{"description": "Soil_Type12", "enum": [0, 1]},
{"description": "Soil_Type13", "enum": [0, 1]},
{"description": "Soil_Type14", "enum": [0, 1]},
{"description": "Soil_Type15", "enum": [0, 1]},
{"description": "Soil_Type16", "enum": [0, 1]},
{"description": "Soil_Type17", "enum": [0, 1]},
{"description": "Soil_Type18", "enum": [0, 1]},
{"description": "Soil_Type19", "enum": [0, 1]},
{"description": "Soil_Type20", "enum": [0, 1]},
{"description": "Soil_Type21", "enum": [0, 1]},
{"description": "Soil_Type22", "enum": [0, 1]},
{"description": "Soil_Type23", "enum": [0, 1]},
{"description": "Soil_Type24", "enum": [0, 1]},
{"description": "Soil_Type25", "enum": [0, 1]},
{"description": "Soil_Type26", "enum": [0, 1]},
{"description": "Soil_Type27", "enum": [0, 1]},
{"description": "Soil_Type28", "enum": [0, 1]},
{"description": "Soil_Type29", "enum": [0, 1]},
{"description": "Soil_Type30", "enum": [0, 1]},
{"description": "Soil_Type31", "enum": [0, 1]},
{"description": "Soil_Type32", "enum": [0, 1]},
{"description": "Soil_Type33", "enum": [0, 1]},
{"description": "Soil_Type34", "enum": [0, 1]},
{"description": "Soil_Type35", "enum": [0, 1]},
{"description": "Soil_Type36", "enum": [0, 1]},
{"description": "Soil_Type37", "enum": [0, 1]},
{"description": "Soil_Type38", "enum": [0, 1]},
{"description": "Soil_Type39", "enum": [0, 1]},
{"description": "Soil_Type40", "enum": [0, 1]},
],
},
}
schema_y = {
"description": "Target of forest covertypes dataset (classification).",
"documentation_url": "https://scikit-learn.org/0.20/datasets/index.html#forest-covertypes",
"type": "array",
"items": {
"description": "The cover type, i.e., the dominant species of trees.",
"enum": [0, 1, 2, 3, 4, 5, 6],
},
}
(train_X, train_y), (test_X, test_y) = _bunch_to_df(
covtype, schema_X, schema_y, test_size, random_state
)
return (train_X, train_y), (test_X, test_y)
def california_housing_df(test_size=0.2, random_state=42):
housing = sklearn.datasets.fetch_california_housing()
schema_X = {
"description": "Features of California housing dataset (regression).",
"documentation_url": "https://scikit-learn.org/0.20/datasets/index.html#california-housing-dataset",
"type": "array",
"items": {
"type": "array",
"minItems": 8,
"maxItems": 8,
"items": [
{"description": "MedInc", "type": "number", "minimum": 0.0},
{"description": "HouseAge", "type": "number", "minimum": 0.0},
{"description": "AveRooms", "type": "number", "minimum": 0.0},
{"description": "AveBedrms", "type": "number", "minimum": 0.0},
{"description": "Population", "type": "number", "minimum": 0.0},
{"description": "AveOccup", "type": "number", "minimum": 0.0},
{"description": "Latitude", "type": "number", "minimum": 0.0},
{"description": "Longitude", "type": "number"},
],
},
}
schema_y = {
"description": "Target of California housing dataset (regression).",
"documentation_url": "https://scikit-learn.org/0.20/datasets/index.html#california-housing-dataset",
"type": "array",
"items": {
"description": "Median house value for California districts.",
"type": "number",
"minimum": 0.0,
},
}
(train_X, train_y), (test_X, test_y) = _bunch_to_df(
housing, schema_X, schema_y, test_size, random_state
)
return (train_X, train_y), (test_X, test_y)
def boston_housing_df(test_size=0.2, random_state=42):
housing = load_boston()
schema_X = {
"description": "Features of Boston house prices dataset (regression).",
"documentation_url": "https://scikit-learn.org/0.20/datasets/index.html#boston-house-prices-dataset",
"type": "array",
"items": {
"type": "array",
"minItems": 13,
"maxItems": 13,
"items": [
{"description": "CRIM", "type": "number", "minimum": 0.0},
{"description": "ZN", "type": "number", "minimum": 0.0},
{"description": "INDUS", "type": "number", "minimum": 0.0},
{"description": "CHAS", "enum": [0, 1]},
{"description": "NOX", "type": "number", "minimum": 0.0},
{"description": "RM", "type": "number", "minimum": 1.0},
{"description": "AGE", "type": "number", "minimum": 0.0},
{"description": "DIS", "type": "number", "minimum": 0.0},
{"description": "RAD", "type": "number", "minimum": 1},
{"description": "TAX", "type": "number", "minimum": 0.0},
{"description": "PRATIO", "type": "number", "minimum": 0.0},
{"description": "B", "type": "number", "minimum": 0.0},
{"description": "LSTAT", "type": "number", "minimum": 0.0},
],
},
}
schema_y = {
"description": "Target of Boston house prices dataset (regression).",
"documentation_url": "https://scikit-learn.org/0.20/datasets/index.html#boston-house-prices-dataset",
"type": "array",
"items": {
"description": "Median value of owner-occupied homes in $1000's (MEDV)",
"type": "number",
"minimum": 0.0,
},
}
(train_X, train_y), (test_X, test_y) = _bunch_to_df(
housing, schema_X, schema_y, test_size, random_state
)
return (train_X, train_y), (test_X, test_y)
| 12,087 | 44.787879 | 131 |
py
|
lale
|
lale-master/lale/datasets/movie_review.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tarfile
import urllib.request
import numpy as np
def load_movie_review():
"""Loads the sentiment classification from a movie reviews dataset.
Read the readme from data/movie_review for more details.
"""
download_base_url = "https://www.cs.cornell.edu/people/pabo/movie%2Dreview%2Ddata/rt-polaritydata.tar.gz"
download_data_dir = os.path.join(
os.path.dirname(__file__), "data", "movie_review", "download_data"
)
data_file_path = os.path.join(download_data_dir, "rt-polaritydata.tar.gz")
if not os.path.exists(download_data_dir):
os.makedirs(download_data_dir)
print(f"created directory {download_data_dir}")
# this request is to a hardcoded https url, so does not risk leaking local data
urllib.request.urlretrieve(download_base_url, data_file_path) # nosec
X = []
y = []
with tarfile.open(data_file_path) as data_file:
data_file.extractall(path=download_data_dir) # nosec B202
with open(
os.path.join(download_data_dir, "rt-polaritydata", "rt-polarity.neg"), "rb"
) as neg_data_file:
for line in neg_data_file.readlines():
X.append(str(line))
y.append(-1)
with open(
os.path.join(download_data_dir, "rt-polaritydata", "rt-polarity.pos"), "rb"
) as pos_data_file:
for line in pos_data_file.readlines():
X.append(str(line))
y.append(1)
X = np.asarray(X, dtype=np.string_)
y = np.asarray(y)
from sklearn.utils import shuffle
X, y = shuffle(X, y)
return X, y
| 2,168 | 33.428571 | 109 |
py
|
lale
|
lale-master/lale/datasets/util.py
|
# Copyright 2021 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from typing import Any, Tuple, overload
import numpy as np
import pandas as pd
from sklearn.utils import Bunch
from lale.datasets.data_schemas import add_table_name, get_table_name
if sys.version_info >= (3, 8):
from typing import Literal # raises a mypy error for <3.8
else:
from typing_extensions import Literal
try:
from pyspark.sql import SparkSession
from lale.datasets.data_schemas import ( # pylint:disable=ungrouped-imports
SparkDataFrameWithIndex,
)
spark_installed = True
except ImportError:
spark_installed = False
def pandas2spark(pandas_df):
assert spark_installed
spark_session = (
SparkSession.builder.master("local[2]") # type: ignore
.config("spark.driver.memory", "64g")
.getOrCreate()
)
name = get_table_name(pandas_df)
if isinstance(pandas_df, pd.Series):
pandas_df = pandas_df.to_frame()
index_names = pandas_df.index.names
if len(index_names) == 1 and index_names[0] is None:
index_names = ["index"]
cols = list(pandas_df.columns) + list(index_names)
pandas_df = pandas_df.reset_index().reindex(columns=cols)
spark_dataframe = spark_session.createDataFrame(pandas_df)
spark_dataframe_with_index = SparkDataFrameWithIndex(spark_dataframe, index_names)
return add_table_name(spark_dataframe_with_index, name)
@overload
def load_boston(return_X_y: Literal[True]) -> Tuple[Any, Any]:
...
@overload
def load_boston(return_X_y: Literal[False] = False) -> Bunch:
...
def load_boston(return_X_y: bool = False):
data_url = "http://lib.stat.cmu.edu/datasets/boston"
raw_df = pd.read_csv(data_url, sep=r"\s+", skiprows=22, header=None)
data = np.hstack([raw_df.values[::2, :], raw_df.values[1::2, :2]])
target = raw_df.values[1::2, 2]
if return_X_y:
return (data, target)
else:
return Bunch(data=data, target=target)
| 2,504 | 30.3125 | 86 |
py
|
lale
|
lale-master/lale/datasets/__init__.py
|
# Copyright 2019, 2020, 2021 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Note: all imports should be done as
# from .xxx import XXX as XXX
# this ensures that pyright considers them to be publicly available
# and not private imports (this affects lale users that use pyright)
from .movie_review import load_movie_review as load_movie_review
from .sklearn_to_pandas import boston_housing_df as boston_housing_df
from .sklearn_to_pandas import california_housing_df as california_housing_df
from .sklearn_to_pandas import covtype_df as covtype_df
from .sklearn_to_pandas import digits_df as digits_df
from .sklearn_to_pandas import load_iris_df as load_iris_df
from .util import pandas2spark as pandas2spark
| 1,224 | 44.37037 | 77 |
py
|
lale
|
lale-master/lale/datasets/data_schemas.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, List, Optional, Tuple, Type, Union
import numpy as np
from numpy import issubdtype, ndarray
from pandas import DataFrame, Series
from pandas.core.groupby import DataFrameGroupBy, SeriesGroupBy
from scipy.sparse import csr_matrix
import lale.type_checking
from lale.helpers import _is_spark_df
from lale.type_checking import JSON_TYPE
try:
import torch
from torch import Tensor
torch_installed = True
except ImportError:
torch_installed = False
try:
import py4j.protocol
from pyspark.sql import DataFrame as SparkDataFrame
from pyspark.sql import GroupedData as SparkGroupedData
spark_installed = True
except ImportError:
spark_installed = False
# See instructions for subclassing numpy ndarray:
# https://docs.scipy.org/doc/numpy/user/basics.subclassing.html
class NDArrayWithSchema(ndarray):
def __new__(
cls,
shape,
dtype=float,
buffer=None,
offset=0,
strides=None,
order=None,
json_schema=None,
table_name=None,
):
result = super( # pylint:disable=too-many-function-args
NDArrayWithSchema, cls
).__new__(
cls, shape, dtype, buffer, offset, strides, order # type: ignore
)
result.json_schema = json_schema
result.table_name = table_name
return result
def __array_finalize__(self, obj):
if obj is None:
return
self.json_schema = getattr(obj, "json_schema", None)
self.table_name = getattr(obj, "table_name", None)
# See instructions for subclassing pandas DataFrame:
# https://pandas.pydata.org/pandas-docs/stable/development/extending.html#extending-subclassing-pandas
class DataFrameWithSchema(DataFrame):
_internal_names = DataFrame._internal_names + ["json_schema", "table_name"]
_internal_names_set = set(_internal_names)
@property
def _constructor(self):
return DataFrameWithSchema
class SeriesWithSchema(Series):
_internal_names = DataFrame._internal_names + [
"json_schema",
"table_name",
"folds_for_monoid",
]
_internal_names_set = set(_internal_names)
@property
def _constructor(self):
return SeriesWithSchema
if spark_installed:
def _gen_index_name(df, cpt=None):
name = f"index{cpt if cpt is not None else ''}"
if name in df.columns:
return _gen_index_name(df, cpt=cpt + 1 if cpt is not None else 0)
else:
return name
class SparkDataFrameWithIndex(SparkDataFrame): # type: ignore
def __init__(self, df, index_names=None):
if index_names is not None and len(index_names) == 1:
index_name = index_names[0]
elif index_names is None or len(index_names) == 0:
index_name = _gen_index_name(df)
index_names = [index_name]
else:
index_name = None
if index_name is not None and index_name not in df.columns:
df_with_index = (
df.rdd.zipWithIndex()
.map(lambda row: row[0] + (row[1],))
.toDF(df.columns + [index_name])
)
else:
df_with_index = df
table_name = get_table_name(df)
if table_name is not None:
df_with_index = df_with_index.alias(table_name)
super().__init__(df_with_index._jdf, df_with_index.sql_ctx)
self.index_name = index_name
self.index_names = index_names
for f in df.schema.fieldNames():
self.schema[f].metadata = df.schema[f].metadata
def drop_indexes(self):
result = self.drop(*self.index_names)
result = add_table_name(result, get_table_name(self))
return result
@property
def columns_without_indexes(self):
cols = list(self.columns)
for name in self.index_names:
cols.remove(name)
return cols
def toPandas(self, *args, **kwargs):
df = super().toPandas(*args, **kwargs)
return df.set_index(self.index_names)
else:
class SparkDataFrameWithIndex: # type: ignore
def __init__(self, df, index_names=None) -> None:
raise ValueError("pyspark is not installed") # type: ignore
@property
def index_name(self) -> Union[str, None]:
raise ValueError("pyspark is not installed") # type: ignore
@property
def index_names(self) -> List[str]:
raise ValueError("pyspark is not installed") # type: ignore
def toPandas(self, *args, **kwargs) -> DataFrame:
raise ValueError("pyspark is not installed") # type: ignore
@property
def schema(self) -> Any:
raise ValueError("pyspark is not installed") # type: ignore
def add_schema(obj, schema=None, raise_on_failure=False, recalc=False) -> Any:
from lale.settings import disable_data_schema_validation
if disable_data_schema_validation:
return obj
if obj is None:
return None
if isinstance(obj, NDArrayWithSchema):
result = obj
elif isinstance(obj, ndarray):
result = obj.view(NDArrayWithSchema)
elif isinstance(obj, SeriesWithSchema):
result = obj
elif isinstance(obj, Series):
result = SeriesWithSchema(obj)
elif isinstance(obj, DataFrameWithSchema):
result = obj
elif isinstance(obj, DataFrame):
result = DataFrameWithSchema(obj)
elif is_list_tensor(obj):
obj = np.array(obj)
result = obj.view(NDArrayWithSchema)
elif raise_on_failure:
raise ValueError(f"unexpected type(obj) {type(obj)}")
else:
return obj
if recalc:
setattr(result, "json_schema", None)
if getattr(result, "json_schema", None) is None:
if schema is None:
setattr(result, "json_schema", to_schema(obj))
else:
lale.type_checking.validate_is_schema(schema)
setattr(result, "json_schema", schema)
return result
def add_schema_adjusting_n_rows(obj, schema):
assert isinstance(obj, (ndarray, DataFrame, Series)), type(obj)
assert schema.get("type", None) == "array", schema
n_rows = obj.shape[0]
mod_schema = {**schema, "minItems": n_rows, "maxItems": n_rows}
result = add_schema(obj, mod_schema)
return result
def add_table_name(obj, name) -> Any:
if obj is None:
return None
if name is None:
return obj
if spark_installed and isinstance(obj, SparkDataFrame):
# alias method documentation: https://spark.apache.org/docs/latest/api/python/reference/api/pyspark.sql.DataFrame.alias.html
# Python class DataFrame with method alias(self, alias): https://github.com/apache/spark/blob/master/python/pyspark/sql/dataframe.py
# Scala type DataFrame: https://github.com/apache/spark/blob/master/sql/core/src/main/scala/org/apache/spark/sql/package.scala
# Scala class DataSet with method as(alias: String): https://github.com/apache/spark/blob/master/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala
o = obj.alias(name)
for f in obj.schema.fieldNames():
o.schema[f].metadata = obj.schema[f].metadata
if isinstance(obj, SparkDataFrameWithIndex):
o = SparkDataFrameWithIndex(o, obj.index_names)
return o
if isinstance(obj, NDArrayWithSchema):
result = obj.view(NDArrayWithSchema)
if hasattr(obj, "json_schema"):
result.json_schema = obj.json_schema
elif isinstance(obj, ndarray):
result = obj.view(NDArrayWithSchema)
elif isinstance(obj, SeriesWithSchema):
result = obj.copy(deep=False)
if hasattr(obj, "json_schema"):
result.json_schema = obj.json_schema
elif isinstance(obj, Series):
result = SeriesWithSchema(obj)
elif isinstance(obj, DataFrameWithSchema):
result = obj.copy(deep=False)
if hasattr(obj, "json_schema"):
result.json_schema = obj.json_schema
elif isinstance(obj, DataFrame):
result = DataFrameWithSchema(obj)
elif is_list_tensor(obj):
obj = np.array(obj)
result = obj.view(NDArrayWithSchema)
elif isinstance(obj, (DataFrameGroupBy, SeriesGroupBy)):
result = obj
elif spark_installed and isinstance(obj, SparkGroupedData):
result = obj
else:
raise ValueError(f"unexpected type(obj) {type(obj)}")
setattr(result, "table_name", name)
return result
def get_table_name(obj):
if spark_installed and isinstance(obj, SparkDataFrame):
# Python class DataFrame with field self._jdf: https://github.com/apache/spark/blob/master/python/pyspark/sql/dataframe.py
# Scala type DataFrame: https://github.com/apache/spark/blob/master/sql/core/src/main/scala/org/apache/spark/sql/package.scala
# Scala class DataSet with field queryExecution: https://github.com/apache/spark/blob/master/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala
# Scala fields turn into Java nullary methods
# Py4J exposes Java methods as Python methods
# Scala class QueryExecution with field analyzed: LogicalPlan: https://github.com/apache/spark/blob/master/sql/core/src/main/scala/org/apache/spark/sql/execution/QueryExecution.scala
spark_query = obj._jdf.queryExecution().analyzed() # type: ignore
try:
# calling spark_df.explain("extended") shows the analyzed contents
# after spark_df.alias("foo"), analyzed contents should be SubqueryAlias
# Scala class SuqueryAlias with field identifier: https://github.com/apache/spark/blob/master/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/basicLogicalOperators.scala
# str(..) converts the Java string into a Python string
result = str(spark_query.identifier())
except py4j.protocol.Py4JError:
result = None
return result
if isinstance(
obj,
(
NDArrayWithSchema,
SeriesWithSchema,
DataFrameWithSchema,
DataFrameGroupBy,
SeriesGroupBy,
),
) or (spark_installed and isinstance(obj, SparkGroupedData)):
return getattr(obj, "table_name", None)
return None
def get_index_name(obj):
result = None
if spark_installed and isinstance(obj, SparkDataFrameWithIndex):
result = obj.index_name
elif isinstance(
obj,
(
SeriesWithSchema,
DataFrameWithSchema,
DataFrameGroupBy,
SeriesGroupBy,
),
):
result = obj.index.name
return result
def get_index_names(obj):
result = None
if spark_installed and isinstance(obj, SparkDataFrameWithIndex):
result = obj.index_names
elif isinstance(
obj,
(
SeriesWithSchema,
DataFrameWithSchema,
DataFrameGroupBy,
SeriesGroupBy,
),
):
result = obj.index.names
return result
def forward_metadata(old, new):
new = add_table_name(new, get_table_name(old))
if isinstance(old, SparkDataFrameWithIndex):
new = SparkDataFrameWithIndex(new, index_names=get_index_names(old))
return new
def strip_schema(obj):
if isinstance(obj, NDArrayWithSchema):
result = np.array(obj)
assert type(result) == ndarray # pylint:disable=unidiomatic-typecheck
elif isinstance(obj, SeriesWithSchema):
result = Series(obj)
assert type(result) == Series # pylint:disable=unidiomatic-typecheck
elif isinstance(obj, DataFrameWithSchema):
result = DataFrame(obj)
assert type(result) == DataFrame # pylint:disable=unidiomatic-typecheck
else:
result = obj
return result
def _dtype_to_schema(typ) -> JSON_TYPE:
result: JSON_TYPE
if typ is bool or issubdtype(typ, np.bool_):
result = {"type": "boolean"}
elif issubdtype(typ, np.unsignedinteger):
result = {"type": "integer", "minimum": 0}
elif issubdtype(typ, np.integer):
result = {"type": "integer"}
elif issubdtype(typ, np.number):
result = {"type": "number"}
elif issubdtype(typ, np.string_) or issubdtype(typ, np.unicode_):
result = {"type": "string"}
elif isinstance(typ, np.dtype):
if typ.fields:
props = {k: _dtype_to_schema(t) for k, t in typ.fields.items()}
result = {"type": "object", "properties": props}
elif typ.shape:
result = _shape_and_dtype_to_schema(typ.shape, typ.subdtype)
elif issubdtype(typ, np.object_):
result = {"type": "string"}
else:
assert False, f"unexpected dtype {typ}"
else:
assert False, f"unexpected non-dtype {typ}"
return result
def dtype_to_schema(typ) -> JSON_TYPE:
result = _dtype_to_schema(typ)
lale.type_checking.validate_is_schema(result)
return result
def _shape_and_dtype_to_schema(shape, dtype) -> JSON_TYPE:
result = _dtype_to_schema(dtype)
for dim in reversed(shape):
result = {"type": "array", "minItems": dim, "maxItems": dim, "items": result}
return result
def shape_and_dtype_to_schema(shape, dtype) -> JSON_TYPE:
result = _shape_and_dtype_to_schema(shape, dtype)
lale.type_checking.validate_is_schema(result)
return result
def list_tensor_to_shape_and_dtype(ls) -> Optional[Tuple[Tuple[int, ...], Type]]:
if isinstance(ls, (int, float, str)):
return ((), type(ls))
if isinstance(ls, list):
sub_result: Any = "Any"
for item in ls:
item_result = list_tensor_to_shape_and_dtype(item)
if item_result is None:
return None
if sub_result == "Any":
sub_result = item_result
elif sub_result != item_result:
return None
if sub_result == "Any" and len(ls) == 0:
return ((len(ls),) + (), int)
sub_shape, sub_dtype = sub_result
return ((len(ls),) + sub_shape, sub_dtype)
return None
def is_list_tensor(obj) -> bool:
if isinstance(obj, list):
shape_and_dtype = list_tensor_to_shape_and_dtype(obj)
return shape_and_dtype is not None
return False
def _list_tensor_to_schema(ls) -> Optional[JSON_TYPE]:
shape_and_dtype = list_tensor_to_shape_and_dtype(ls)
if shape_and_dtype is None:
return None
result = _shape_and_dtype_to_schema(*shape_and_dtype)
return result
def list_tensor_to_schema(ls) -> Optional[JSON_TYPE]:
result = _list_tensor_to_schema(ls)
if result is None:
return None
lale.type_checking.validate_is_schema(result)
return result
def _ndarray_to_schema(array) -> JSON_TYPE:
assert isinstance(array, ndarray)
if (
isinstance(array, NDArrayWithSchema)
and hasattr(array, "json_schema")
and array.json_schema is not None
):
return array.json_schema
return _shape_and_dtype_to_schema(array.shape, array.dtype)
def ndarray_to_schema(array) -> JSON_TYPE:
result = _ndarray_to_schema(array)
lale.type_checking.validate_is_schema(result)
return result
def _csr_matrix_to_schema(matrix) -> JSON_TYPE:
assert isinstance(matrix, csr_matrix)
result = _shape_and_dtype_to_schema(matrix.shape, matrix.dtype)
result["isSparse"] = {} # true schema
return result
def csr_matrix_to_schema(matrix) -> JSON_TYPE:
result = _csr_matrix_to_schema(matrix)
lale.type_checking.validate_is_schema(result)
return result
def _dataframe_to_schema(df) -> JSON_TYPE:
assert isinstance(df, DataFrame)
if (
isinstance(df, DataFrameWithSchema)
and hasattr(df, "json_schema")
and df.json_schema is not None
):
return df.json_schema
n_rows, n_columns = df.shape
df_dtypes = df.dtypes
assert n_columns == len(df.columns) and n_columns == len(df_dtypes)
items = [
{"description": str(col), **_dtype_to_schema(df_dtypes[col])}
for col in df.columns
]
result = {
"type": "array",
"minItems": n_rows,
"maxItems": n_rows,
"items": {
"type": "array",
"minItems": n_columns,
"maxItems": n_columns,
"items": items,
},
}
return result
def dataframe_to_schema(df) -> JSON_TYPE:
result = _dataframe_to_schema(df)
lale.type_checking.validate_is_schema(result)
return result
def _series_to_schema(series) -> JSON_TYPE:
assert isinstance(series, Series)
if (
isinstance(series, SeriesWithSchema)
and hasattr(series, "json_schema")
and series.json_schema is not None
):
return series.json_schema
(n_rows,) = series.shape
result = {
"type": "array",
"minItems": n_rows,
"maxItems": n_rows,
"items": {"description": str(series.name), **_dtype_to_schema(series.dtype)},
}
return result
def series_to_schema(series) -> JSON_TYPE:
result = _series_to_schema(series)
lale.type_checking.validate_is_schema(result)
return result
def _torch_tensor_to_schema(tensor) -> JSON_TYPE:
assert torch_installed, """Your Python environment does not have torch installed. You can install it with
pip install torch
or with
pip install 'lale[full]'"""
assert isinstance(tensor, Tensor)
result: JSON_TYPE
# https://pytorch.org/docs/stable/tensor_attributes.html#torch-dtype
if tensor.dtype == torch.bool:
result = {"type": "boolean"}
elif tensor.dtype == torch.uint8:
result = {"type": "integer", "minimum": 0, "maximum": 255}
elif torch.is_floating_point(tensor):
result = {"type": "number"}
else:
result = {"type": "integer"}
for dim in reversed(tensor.shape):
result = {"type": "array", "minItems": dim, "maxItems": dim, "items": result}
return result
def torch_tensor_to_schema(tensor) -> JSON_TYPE:
result = _torch_tensor_to_schema(tensor)
lale.type_checking.validate_is_schema(result)
return result
def is_liac_arff(obj) -> bool:
expected_types = {
"description": str,
"relation": str,
"attributes": list,
"data": list,
}
if not isinstance(obj, dict):
return False
for k, t in expected_types.items():
if k not in obj or not isinstance(obj[k], t):
return False
return True
def _liac_arff_to_schema(larff) -> JSON_TYPE:
assert is_liac_arff(
larff
), """Your Python environment might contain an 'arff' package different from 'liac-arff'. You can install it with
pip install 'liac-arff>=2.4.0'
or with
pip install 'lale[full]'"""
n_rows, n_columns = len(larff["data"]), len(larff["attributes"])
def larff_type_to_schema(larff_type) -> JSON_TYPE:
if isinstance(larff_type, str):
a2j = {
"numeric": "number",
"real": "number",
"integer": "integer",
"string": "string",
}
return {"type": a2j[larff_type.lower()]}
assert isinstance(larff_type, list)
return {"enum": [*larff_type]}
items = [
{"description": attr[0], **larff_type_to_schema(attr[1])}
for attr in larff["attributes"]
]
result = {
"type": "array",
"minItems": n_rows,
"maxItems": n_rows,
"items": {
"type": "array",
"minItems": n_columns,
"maxItems": n_columns,
"items": items,
},
}
return result
def liac_arff_to_schema(larff) -> JSON_TYPE:
result = _liac_arff_to_schema(larff)
lale.type_checking.validate_is_schema(result)
return result
def make_optional_schema(schema: JSON_TYPE) -> JSON_TYPE:
return {"anyOf": [schema, {"enum": [None]}]}
def _spark_df_to_schema(df) -> JSON_TYPE:
assert spark_installed, """Your Python environment does not have spark installed. You can install it with
pip install pyspark
"""
assert isinstance(df, SparkDataFrameWithIndex)
import pyspark.sql.types as stypes
from pyspark.sql.types import StructField, StructType
def maybe_make_optional(schema: JSON_TYPE, is_option: bool) -> JSON_TYPE:
if is_option:
return make_optional_schema(schema)
return schema
def spark_datatype_to_json_schema(dtype: stypes.DataType) -> JSON_TYPE:
if isinstance(dtype, stypes.ArrayType):
return {
"type": "array",
"items": maybe_make_optional(
spark_datatype_to_json_schema(dtype.elementType), dtype.containsNull
),
}
if isinstance(dtype, stypes.BooleanType):
return {"type": "boolean"}
if isinstance(dtype, stypes.DoubleType):
return {"type": "number"}
if isinstance(dtype, stypes.FloatType):
return {"type": "number"}
if isinstance(dtype, stypes.IntegerType):
return {"type": "integer"}
if isinstance(dtype, stypes.LongType):
return {"type": "integer"}
if isinstance(dtype, stypes.ShortType):
return {"type": "integer"}
if isinstance(dtype, stypes.NullType):
return {"enum": [None]}
if isinstance(dtype, stypes.StringType):
return {"type": "string"}
return {}
def spark_struct_field_to_json_schema(f: StructField) -> JSON_TYPE:
type_schema = spark_datatype_to_json_schema(f.dataType)
result = maybe_make_optional(type_schema, f.nullable)
if f.name is not None:
result["description"] = f.name
return result
def spark_struct_to_json_schema(
s: StructType, index_names, table_name: Optional[str] = None
) -> JSON_TYPE:
items = [
spark_struct_field_to_json_schema(f) for f in s if f.name not in index_names
]
num_items = len(items)
result = {
"type": "array",
"items": {
"type": "array",
"description": "rows",
"minItems": num_items,
"maxItems": num_items,
"items": items,
},
}
if table_name is not None:
result["description"] = table_name
return result
return spark_struct_to_json_schema(df.schema, df.index_names, get_table_name(df))
def spark_df_to_schema(df) -> JSON_TYPE:
result = _spark_df_to_schema(df)
lale.type_checking.validate_is_schema(result)
return result
def _to_schema(obj) -> JSON_TYPE:
result = None
if obj is None:
result = {"enum": [None]}
elif isinstance(obj, ndarray):
result = _ndarray_to_schema(obj)
elif isinstance(obj, csr_matrix):
result = _csr_matrix_to_schema(obj)
elif isinstance(obj, DataFrame):
result = _dataframe_to_schema(obj)
elif isinstance(obj, Series):
result = _series_to_schema(obj)
elif torch_installed and isinstance(obj, Tensor):
result = _torch_tensor_to_schema(obj)
elif is_liac_arff(obj):
result = _liac_arff_to_schema(obj)
elif isinstance(obj, list):
result = _list_tensor_to_schema(obj)
elif _is_spark_df(obj):
result = _spark_df_to_schema(obj)
elif lale.type_checking.is_schema(obj):
result = obj
# Does not need to validate again the schema
return result # type: ignore
if result is None:
raise ValueError(f"to_schema(obj), type {type(obj)}, value {obj}")
return result
def to_schema(obj) -> JSON_TYPE:
result = _to_schema(obj)
lale.type_checking.validate_is_schema(result)
return result
| 24,607 | 32.389417 | 205 |
py
|
lale
|
lale-master/lale/datasets/multitable/util.py
|
# Copyright 2019, 2020, 2021 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, List, Tuple
from numpy import random
try:
from pyspark.sql import SparkSession
spark_installed = True
except ImportError:
spark_installed = False
from lale.datasets.data_schemas import add_table_name, get_table_name
from lale.helpers import _is_pandas_df, _is_spark_df, randomstate_type
def multitable_train_test_split(
dataset: List[Any],
main_table_name: str,
label_column_name: str,
test_size: float = 0.25,
random_state: randomstate_type = None,
) -> Tuple:
"""
Splits X and y into random train and test subsets stratified by
labels and protected attributes.
Behaves similar to the `train_test_split`_ function from scikit-learn.
.. _`train_test_split`: https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html
Parameters
----------
dataset : list of either Pandas or Spark dataframes
Each dataframe in the list corresponds to an entity/table in the multi-table setting.
main_table_name : string
The name of the main table as the split is going to be based on the main table.
label_column_name : string
The name of the label column from the main table.
test_size : float or int, default=0.25
If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the test split.
If int, represents the absolute number of test samples.
random_state : int, RandomState instance or None, default=None
Controls the shuffling applied to the data before applying the split.
Pass an integer for reproducible output across multiple function calls.
- None
RandomState used by numpy.random
- numpy.random.RandomState
Use the provided random state, only affecting other users of that same random state instance.
- integer
Explicit seed.
Returns
-------
result : tuple
- item 0: train_X, List of datasets corresponding to the train split
- item 1: test_X, List of datasets corresponding to the test split
- item 2: train_y
- item 3: test_y
Raises
------
jsonschema.ValueError
Bad configuration. Either the table name was not found, or te provided list does not contain
spark or pandas dataframes
"""
main_table_df = None
index_of_main_table = -1
for i, df in enumerate(dataset):
if get_table_name(df) == main_table_name:
main_table_df = df
index_of_main_table = i
if main_table_df is None:
table_names = [get_table_name(df) for df in dataset]
raise ValueError(
f"Could not find {main_table_name} in the given dataset, the table names are {table_names}"
)
if _is_pandas_df(main_table_df):
num_rows = len(main_table_df)
elif _is_spark_df(main_table_df):
# main_table_df = main_table_df.toPandas()
num_rows = main_table_df.count()
else:
raise ValueError(
"multitable_train_test_split can only work with a list of Pandas or Spark dataframes."
)
if 0 < test_size < 1:
num_test_rows = int(num_rows * test_size)
else:
num_test_rows = int(test_size)
test_indices = random.choice(range(num_rows), num_test_rows, replace=False)
train_indices = list(set([*range(num_rows)]) - set(test_indices.tolist()))
assert len(test_indices) + len(train_indices) == num_rows
train_dataset = list(dataset)
test_dataset = list(dataset)
if _is_pandas_df(main_table_df):
train_main_df = main_table_df.iloc[train_indices]
test_main_df = main_table_df.iloc[test_indices]
train_y = train_main_df[label_column_name]
test_y = test_main_df[label_column_name]
elif _is_spark_df(main_table_df):
spark_session = SparkSession.builder.appName( # type: ignore
"multitable_train_test_split"
).getOrCreate()
train_main_df = spark_session.createDataFrame(
data=main_table_df.toPandas().iloc[train_indices]
)
test_main_df = spark_session.createDataFrame(
data=main_table_df.toPandas().iloc[test_indices]
)
train_y = train_main_df.select(label_column_name)
test_y = test_main_df.select(label_column_name)
else:
raise ValueError(
"multitable_train_test_split can only work with a list of Pandas or Spark dataframes."
)
train_main_df = add_table_name(train_main_df, main_table_name)
test_main_df = add_table_name(test_main_df, main_table_name)
train_dataset[index_of_main_table] = train_main_df
test_dataset[index_of_main_table] = test_main_df
return train_dataset, test_dataset, train_y, test_y
| 5,377 | 33.254777 | 123 |
py
|
lale
|
lale-master/lale/datasets/multitable/__init__.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Functions:
================
* `multitable_train_test_split`_
.. _`multitable_train_test_split`: lale.datasets.multitable.util.html#lale.datasets.multitable.util.multitable_train_test_split
"""
# Note: all imports should be done as
# from .xxx import XXX as XXX
# this ensures that pyright considers them to be publicly available
# and not private imports (this affects lale users that use pyright)
from .fetch_datasets import (
fetch_creditg_multitable_dataset as fetch_creditg_multitable_dataset,
)
from .util import multitable_train_test_split as multitable_train_test_split
| 1,166 | 33.323529 | 127 |
py
|
lale
|
lale-master/lale/datasets/multitable/fetch_datasets.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import urllib.request
import numpy as np
import pandas as pd
import lale.datasets.openml
from lale.datasets.data_schemas import add_table_name
from lale.helpers import datatype_param_type
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
try:
from pyspark.sql import SparkSession
from lale.datasets.data_schemas import ( # pylint:disable=ungrouped-imports
SparkDataFrameWithIndex,
)
spark_installed = True
except ImportError:
spark_installed = False
def get_data_from_csv(datatype: datatype_param_type, data_file_name):
datatype = datatype.casefold() # type: ignore
if datatype == "pandas":
return pd.read_csv(data_file_name)
elif datatype == "spark":
if spark_installed:
spark = SparkSession.builder.appName("GoSales Dataset").getOrCreate() # type: ignore
df = spark.read.options(inferSchema="True", delimiter=",").csv(
data_file_name, header=True
)
return SparkDataFrameWithIndex(df)
else:
raise ValueError("Spark is not installed on this machine.")
else:
raise ValueError(
"Can fetch the go_sales data in pandas or spark dataframes only. Pass either 'pandas' or 'spark' in datatype parameter."
)
def fetch_go_sales_dataset(datatype: datatype_param_type = "pandas"):
"""
Fetches the Go_Sales dataset from IBM's Watson's ML samples.
It contains information about daily sales, methods, retailers
and products of a company in form of 5 CSV files.
This method downloads and stores these 5 CSV files under the
'lale/lale/datasets/multitable/go_sales_data' directory. It creates
this directory by itself if it does not exists.
Dataset URL: https://github.com/IBM/watson-machine-learning-samples/raw/master/cloud/data/go_sales/
Parameters
----------
datatype : string, optional, default 'pandas'
If 'pandas',
Returns a list of singleton dictionaries (each element of the list is one
table from the dataset) after reading the downloaded CSV files. The key of
each dictionary is the name of the table and the value contains a pandas
dataframe consisting of the data.
If 'spark',
Returns a list of singleton dictionaries (each element of the list is one
table from the dataset) after reading the downloaded CSV files. The key of
each dictionary is the name of the table and the value contains a spark
dataframe consisting of the data extended with an index column.
Else,
Throws an error as it does not support any other return type.
Returns
-------
go_sales_list : list of singleton dictionary of pandas / spark dataframes
"""
download_data_dir = os.path.join(os.path.dirname(__file__), "go_sales_data")
base_url = "https://github.com/IBM/watson-machine-learning-samples/raw/master/cloud/data/go_sales/"
filenames = [
"go_1k.csv",
"go_daily_sales.csv",
"go_methods.csv",
"go_products.csv",
"go_retailers.csv",
]
go_sales_list = []
for file in filenames:
data_file_name = os.path.join(download_data_dir, file)
if not os.path.exists(data_file_name):
if not os.path.exists(download_data_dir):
os.makedirs(download_data_dir)
# this request is to a hardcoded https url, so does not risk leaking local data
urllib.request.urlretrieve(base_url + file, data_file_name) # nosec
logger.info(f" Created: {data_file_name}")
table_name = file.split(".", maxsplit=1)[0]
data_frame = get_data_from_csv(datatype, data_file_name)
go_sales_list.append(add_table_name(data_frame, table_name))
logger.info(" Fetched the Go_Sales dataset. Process completed.")
return go_sales_list
def fetch_imdb_dataset(datatype: datatype_param_type = "pandas"):
"""
Fetches the IMDB movie dataset from Relational Dataset Repo.
It contains information about directors, actors, roles
and genres of multiple movies in form of 7 CSV files.
This method downloads and stores these 7 CSV files under the
'lale/lale/datasets/multitable/imdb_data' directory. It creates
this directory by itself if it does not exists.
Dataset URL: https://relational.fit.cvut.cz/dataset/IMDb
Parameters
----------
datatype : string, optional, default 'pandas'
If 'pandas',
Returns a list of singleton dictionaries (each element of the list is one
table from the dataset) after reading the already existing CSV files.
The key of each dictionary is the name of the table and the value contains
a pandas dataframe consisting of the data.
If 'spark',
Returns a list of singleton dictionaries (each element of the list is one
table from the dataset) after reading the downloaded CSV files. The key of
each dictionary is the name of the table and the value contains a spark
dataframe consisting of the data extended with an index column.
Else,
Throws an error as it does not support any other return type.
Returns
-------
imdb_list : list of singleton dictionary of pandas / spark dataframes
Raises
------
jsonschema.ValueError
dataset not found
"""
download_data_dir = os.path.join(os.path.dirname(__file__), "imdb_data")
imdb_list = []
if not os.path.exists(download_data_dir):
raise ValueError(
f"IMDB dataset not found at {download_data_dir}. Please download it using lalegpl repository."
)
for _root, _dirs, files in os.walk(download_data_dir):
for file in files:
filename, extension = os.path.splitext(file)
if extension == ".csv":
data_file_name = os.path.join(download_data_dir, file)
table_name = filename
data_frame = get_data_from_csv(datatype, data_file_name)
imdb_list.append(add_table_name(data_frame, table_name))
if len(imdb_list) == 7:
logger.info(" Fetched the IMDB dataset. Process completed.")
else:
raise ValueError(
f"Incomplete IMDB dataset found at {download_data_dir}. Please download complete dataset using lalegpl repository."
)
return imdb_list
def fetch_creditg_multitable_dataset(datatype: datatype_param_type = "pandas"):
"""
Fetches credit-g dataset from OpenML, but in a multi-table format.
It transforms the [credit-g](https://www.openml.org/d/31) dataset from OpenML
to a multi-table format. We split the dataset into 3 tables: `loan_application`,
`bank_account_info` and `existing_credits_info`.
The table `loan_application` serves as our primary table,
and we treat the other two tables as providing additional information related to
the applicant's bank account and existing credits. As one can see, this is very
close to a real life scenario where information is present in multiple tables in
normalized forms. We created a primary key column `id` as a proxy to the loan applicant's
identity number.
Parameters
----------
datatype : string, optional, default 'pandas'
If 'pandas',
Returns a list of singleton dictionaries (each element of the list is one
table from the dataset) after reading the downloaded CSV files. The key of
each dictionary is the name of the table and the value contains a pandas
dataframe consisting of the data.
Returns
-------
dataframes_list : list of singleton dictionary of pandas dataframes
"""
(train_X, train_y), (test_X, test_y) = lale.datasets.openml.fetch(
"credit-g", "classification", preprocess=False
)
# vstack train and test
X = pd.concat([train_X, test_X], axis=0)
y = pd.concat([train_y, test_y], axis=0)
bank_account_columns = ["checking_status", "savings_status"]
loan_application_columns = [
"duration",
"credit_history",
"purpose",
"credit_amount",
"employment",
"installment_commitment",
"personal_status",
"other_parties",
"residence_since",
"property_magnitude",
"age",
"other_payment_plans",
"housing",
"job",
"num_dependents",
"own_telephone",
"foreign_worker",
]
dataframes_list = []
bank_acc_df = X[bank_account_columns]
bank_acc_df = bank_acc_df.copy()
bank_acc_df.insert(0, "id", bank_acc_df.index)
dataframes_list.append(add_table_name(bank_acc_df, "bank_account_info"))
loan_application_df = X[loan_application_columns]
loan_application_df = loan_application_df.copy()
loan_application_df.insert(0, "id", loan_application_df.index)
loan_application_df["class"] = y
loan_application_df.iloc[2, 7] = "M single"
loan_application_df.iloc[996, 7] = "M single"
loan_application_df.iloc[998, 7] = "F div/dep/mar"
dataframes_list.append(add_table_name(loan_application_df, "loan_application"))
# existing credits is a fake table we are adding, so a join and count can create the `existing_credits` column
df_col = X["existing_credits"]
records = []
for row in df_col.iteritems():
row_id = row[0]
credit_count = int(row[1])
for _i in range(credit_count):
records.append(
{
"id": row_id,
"credit_number": np.random.randint(1, 1000000),
"type": "credit",
"status": "on",
}
)
existing_credits_df = pd.DataFrame.from_records(records)
dataframes_list.append(add_table_name(existing_credits_df, "existing_credits_info"))
return dataframes_list
| 10,468 | 37.347985 | 132 |
py
|
lale
|
lale-master/lale/datasets/uci/uci_datasets.py
|
# Copyright 2019-2023 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
import urllib.request
import zipfile
import numpy as np
import pandas as pd
import lale.datasets.data_schemas
download_data_dir = os.path.join(os.path.dirname(__file__), "download_data")
download_data_url = "http://archive.ics.uci.edu/static/public"
def download(dataset_id, zip_name, contents_files):
zip_url = f"{download_data_url}/{dataset_id}/{zip_name}"
data_dir = os.path.join(download_data_dir, dataset_id)
if not os.path.exists(data_dir):
os.makedirs(data_dir)
full_file_names = [os.path.join(data_dir, base) for base in contents_files]
def all_downloaded():
for full in full_file_names:
if not os.path.exists(full):
return False
return True
if not all_downloaded():
with tempfile.NamedTemporaryFile(suffix=".zip") as tmp_zip_file:
# this request is to a string that begins with a hardcoded http url, so does not risk leaking local data
urllib.request.urlretrieve(zip_url, tmp_zip_file.name) # nosec
with zipfile.ZipFile(tmp_zip_file.name) as myzip:
for full, base in zip(full_file_names, contents_files):
if not os.path.exists(full):
myzip.extract(base, data_dir)
assert all_downloaded()
return full_file_names
def tsv_to_Xy(file_name, target_col, schema_orig):
data_all = pd.read_csv(file_name, sep="\t")
row_schema_X = [
col_schema
for col_schema in schema_orig["items"]["items"]
if col_schema["description"] != target_col
]
columns_X = [col_schema["description"] for col_schema in row_schema_X]
data_X = data_all.loc[:, columns_X]
nrows, ncols_X = data_X.shape
schema_X = {
**schema_orig,
"minItems": nrows,
"maxItems": nrows,
"items": {
"type": "array",
"minItems": ncols_X,
"maxItems": ncols_X,
"items": row_schema_X,
},
}
data_X = lale.datasets.data_schemas.add_schema(data_X, schema_X)
row_schema_y = [
col_schema
for col_schema in schema_orig["items"]["items"]
if col_schema["description"] == target_col
]
data_y = data_all[target_col]
schema_y = {
**schema_orig,
"minItems": nrows,
"maxItems": nrows,
"items": row_schema_y[0],
}
data_y = lale.datasets.data_schemas.add_schema(data_y, schema_y)
return data_X, data_y
def fetch_drugscom():
files = download(
"462",
"drug+review+dataset+drugs+com.zip",
["drugsComTest_raw.tsv", "drugsComTrain_raw.tsv"],
)
target_col = "rating"
json_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "array",
"items": {
"type": "array",
"minItems": 6,
"maxItems": 6,
"items": [
{"description": "drugName", "type": "string"},
{
"description": "condition",
"anyOf": [{"type": "string"}, {"enum": [np.NaN]}],
},
{"description": "review", "type": "string"},
{
"description": "rating",
"enum": [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0],
},
{"description": "date", "type": "string"},
{"description": "usefulCount", "type": "integer", "minimum": 0},
],
},
}
test_X, test_y = tsv_to_Xy(files[0], target_col, json_schema)
train_X, train_y = tsv_to_Xy(files[1], target_col, json_schema)
return train_X, train_y, test_X, test_y
def fetch_household_power_consumption():
file_name = download(
"235",
"individual+household+electric+power+consumption.zip",
["household_power_consumption.txt"],
)
df = pd.read_csv(file_name[0], sep=";")
return df
| 4,549 | 32.703704 | 116 |
py
|
lale
|
lale-master/lale/datasets/uci/__init__.py
|
# Note: all imports should be done as
# from .xxx import XXX as XXX
# this ensures that pyright considers them to be publicly available
# and not private imports (this affects lale users that use pyright)
from .uci_datasets import fetch_drugscom as fetch_drugscom
from .uci_datasets import (
fetch_household_power_consumption as fetch_household_power_consumption,
)
| 371 | 36.2 | 75 |
py
|
lale
|
lale-master/lale/datasets/openml/__init__.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Note: all imports should be done as
# from .xxx import XXX as XXX
# this ensures that pyright considers them to be publicly available
# and not private imports (this affects lale users that use pyright)
from .openml_datasets import fetch as fetch
| 826 | 38.380952 | 74 |
py
|
lale
|
lale-master/lale/datasets/openml/openml_datasets.py
|
# Copyright 2019-2023 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import urllib.request
from typing import Any, Dict, Optional, Union, cast
import numpy as np
import pandas as pd
import sklearn
from packaging import version
from sklearn.compose import ColumnTransformer
from sklearn.impute import SimpleImputer
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
sklearn_version = version.parse(getattr(sklearn, "__version__"))
try:
import arff
except ModuleNotFoundError as import_exc:
raise ModuleNotFoundError(
"""Package 'arff' not found. You can install it with
pip install 'liac-arff>=2.4.0'
or with
pip install 'lale[full]'"""
) from import_exc
download_data_dir = os.path.join(os.path.dirname(__file__), "download_data")
experiments_dict: Dict[str, Dict[str, Union[str, int]]] = {}
# 1.25
experiments_dict["vehicle"] = {
"download_arff_url": "https://www.openml.org/data/download/54/dataset_54_vehicle.arff",
"download_csv_url": "https://www.openml.org/data/get_csv/54/dataset_54_vehicle.arff",
"task_type": "classification",
"target": "class",
"n_rows": 846,
}
# 1.3
experiments_dict["blood-transfusion-service-center"] = {
"download_arff_url": "https://www.openml.org/data/download/1586225/php0iVrYT",
"download_csv_url": "https://www.openml.org/data/get_csv/1586225/php0iVrYT",
"task_type": "classification",
"target": "class",
"n_rows": 748,
}
# 1.5
experiments_dict["car"] = {
"download_arff_url": "https://www.openml.org/data/download/18116966/php2jDIhh",
"download_csv_url": "https://www.openml.org/data/get_csv/18116966/php2jDIhh",
"task_type": "classification",
"target": "class",
"n_rows": 1728,
}
# 1.6
experiments_dict["kc1"] = {
"download_arff_url": "https://www.openml.org/data/download/53950/kc1.arff",
"download_csv_url": "https://www.openml.org/data/get_csv/53950/kc1.arff",
"task_type": "classification",
"target": "defects",
"n_rows": 2109,
}
# 2.6
experiments_dict["Australian"] = {
"download_arff_url": "https://www.openml.org/data/download/18151910/phpelnJ6y",
"download_csv_url": "https://www.openml.org/data/get_csv/18151910/phpelnJ6y",
"task_type": "classification",
"target": "a15",
"n_rows": 690,
}
# 3.1
experiments_dict["credit-g"] = {
"download_arff_url": "https://www.openml.org/data/download/31/dataset_31_credit-g.arff",
"download_csv_url": "https://www.openml.org/data/get_csv/31/dataset_31_credit-g.arff",
"task_type": "classification",
"target": "class",
"n_rows": 1000,
}
# 3.4
experiments_dict["phoneme"] = {
"download_arff_url": "https://www.openml.org/data/download/1592281/php8Mz7BG",
"download_csv_url": "https://www.openml.org/data/get_csv/1592281/php8Mz7BG",
"task_type": "classification",
"target": "class",
"n_rows": 5404,
}
# 3.6
experiments_dict["kr-vs-kp"] = {
"download_arff_url": "https://www.openml.org/data/download/3/dataset_3_kr-vs-kp.arff",
"download_csv_url": "https://www.openml.org/data/get_csv/3/dataset_3_kr-vs-kp.arff",
"task_type": "classification",
"target": "class",
"n_rows": 3196,
}
# 4.0
experiments_dict["mfeat-factors"] = {
"download_arff_url": "https://www.openml.org/data/download/12/dataset_12_mfeat-factors.arff",
"download_csv_url": "https://www.openml.org/data/get_csv/12/dataset_12_mfeat-factors.arff",
"task_type": "classification",
"target": "class",
"n_rows": 2000,
}
# 5.9
experiments_dict["cnae-9"] = {
"download_arff_url": "https://www.openml.org/data/download/1586233/phpmcGu2X",
"download_csv_url": "https://www.openml.org/data/get_csv/1586233/phpmcGu2X",
"task_type": "classification",
"target": "class",
"n_rows": 1080,
}
# 8.1
experiments_dict["sylvine"] = {
"download_arff_url": "https://www.openml.org/data/download/19335519/file7a97574fa9ae.arff",
"download_csv_url": "https://www.openml.org/data/get_csv/19335519/file7a97574fa9ae.arff",
"task_type": "classification",
"target": "class",
"n_rows": 5124,
}
# 17
experiments_dict["jungle_chess_2pcs_raw_endgame_complete"] = {
"download_arff_url": "https://www.openml.org/data/download/18631418/jungle_chess_2pcs_raw_endgame_complete.arff",
"download_csv_url": "https://www.openml.org/data/get_csv/18631418/jungle_chess_2pcs_raw_endgame_complete.arff",
"task_type": "classification",
"target": "class",
"n_rows": 44819,
}
# 32
experiments_dict["shuttle"] = {
"download_arff_url": "https://www.openml.org/data/download/4965262/shuttle.arff",
"download_csv_url": "https://www.openml.org/data/get_csv/4965262/shuttle.arff",
"task_type": "classification",
"target": "class",
"n_rows": 58000,
}
# 55
experiments_dict["jasmine"] = {
"download_arff_url": "https://www.openml.org/data/download/19335516/file79b563a1a18.arff",
"download_csv_url": "https://www.openml.org/data/get_csv/19335516/file79b563a1a18.arff",
"task_type": "classification",
"target": "class",
"n_rows": 2984,
}
# 118
experiments_dict["fabert"] = {
"download_arff_url": "https://www.openml.org/data/download/19335687/file1c555f4ca44d.arff",
"download_csv_url": "https://www.openml.org/data/get_csv/19335687/file1c555f4ca44d.arff",
"task_type": "classification",
"target": "class",
"n_rows": 8237,
}
# 226
experiments_dict["helena"] = {
"download_arff_url": "https://www.openml.org/data/download/19335692/file1c556677f875.arff",
"download_csv_url": "https://www.openml.org/data/get_csv/19335692/file1c556677f875.arff",
"task_type": "classification",
"target": "class",
"n_rows": 65196,
}
# 230
experiments_dict["bank-marketing"] = {
"download_arff_url": "https://www.openml.org/data/download/1586218/phpkIxskf",
"download_csv_url": "https://www.openml.org/data/get_csv/1586218/phpkIxskf",
"task_type": "classification",
"target": "class",
"n_rows": 4521,
}
# 407
experiments_dict["nomao"] = {
"download_arff_url": "https://www.openml.org/data/download/1592278/phpDYCOet",
"download_csv_url": "https://www.openml.org/data/get_csv/1592278/phpDYCOet",
"task_type": "classification",
"target": "class",
"n_rows": 34465,
}
# 425
experiments_dict["dilbert"] = {
"download_arff_url": "https://www.openml.org/data/download/19335686/file1c5552c0c4b0.arff",
"download_csv_url": "https://www.openml.org/data/get_csv/19335686/file1c5552c0c4b0.arff",
"task_type": "classification",
"target": "class",
"n_rows": 10000,
}
# 442
experiments_dict["numerai28.6"] = {
"download_arff_url": "https://www.openml.org/data/download/2160285/phpg2t68G",
"download_csv_url": "https://www.openml.org/data/get_csv/2160285/phpg2t68G",
"task_type": "classification",
"target": "attribute_21",
"n_rows": 96320,
}
# 457
experiments_dict["prnn_cushings"] = {
"task_type": "classification",
"target": "type",
"download_arff_url": "https://www.openml.org/data/download/52569/prnn_cushings.arff",
"download_csv_url": "https://www.openml.org/data/get_csv/52569/prnn_cushings.csv",
"n_rows": 27,
}
# 503
experiments_dict["adult"] = {
"download_arff_url": "https://www.openml.org/data/download/1595261/phpMawTba",
"download_csv_url": "https://www.openml.org/data/get_csv/1595261/phpMawTba",
"task_type": "classification",
"target": "class",
"n_rows": 48842,
}
# 633
experiments_dict["higgs"] = {
"download_arff_url": "https://www.openml.org/data/download/2063675/phpZLgL9q",
"download_csv_url": "https://www.openml.org/data/get_csv/2063675/phpZLgL9q",
"task_type": "classification",
"target": "class",
"n_rows": 98050,
}
# 981
experiments_dict["christine"] = {
"download_arff_url": "https://www.openml.org/data/download/19335515/file764d5d063390.arff",
"download_csv_url": "https://www.openml.org/data/get_csv/19335515/file764d5d063390.arff",
"task_type": "classification",
"target": "class",
"n_rows": 5418,
}
# 1169
experiments_dict["jannis"] = {
"download_arff_url": "https://www.openml.org/data/download/19335691/file1c558ee247d.arff",
"download_csv_url": "https://www.openml.org/data/get_csv/19335691/file1c558ee247d.arff",
"task_type": "classification",
"target": "class",
"n_rows": 83733,
}
# 1503
experiments_dict["connect-4"] = {
"download_arff_url": "https://www.openml.org/data/download/4965243/connect-4.arff",
"download_csv_url": "https://www.openml.org/data/get_csv/4965243/connect-4.arff",
"task_type": "classification",
"target": "class",
"n_rows": 67557,
}
# 1580
experiments_dict["volkert"] = {
"download_arff_url": "https://www.openml.org/data/download/19335689/file1c556e3db171.arff",
"download_csv_url": "https://www.openml.org/data/get_csv/19335689/file1c556e3db171.arff",
"task_type": "classification",
"target": "class",
"n_rows": 58310,
}
# 2112
experiments_dict["APSFailure"] = {
"download_arff_url": "https://www.openml.org/data/download/19335511/aps_failure.arff",
"download_csv_url": "https://www.openml.org/data/get_csv/19335511/aps_failure.arff",
"task_type": "classification",
"target": "class",
"n_rows": 76000,
}
# 3700
experiments_dict["riccardo"] = {
"download_arff_url": "https://www.openml.org/data/download/19335534/file7b535210a7df.arff",
"download_csv_url": "https://www.openml.org/data/get_csv/19335534/file7b535210a7df.arff",
"task_type": "classification",
"target": "class",
"n_rows": 20000,
}
# 3759
experiments_dict["guillermo"] = {
"download_arff_url": "https://www.openml.org/data/download/19335532/file7b5323e77330.arff",
"download_csv_url": "https://www.openml.org/data/get_csv/19335532/file7b5323e77330.arff",
"task_type": "classification",
"target": "class",
"n_rows": 20000,
}
experiments_dict["albert"] = {
"download_arff_url": "https://www.openml.org/data/download/19335520/file7b53746cbda2.arff",
"download_csv_url": "https://www.openml.org/data/get_csv/19335520/file7b53746cbda2.arff",
"task_type": "classification",
"target": "class",
"n_rows": 425240,
}
experiments_dict["robert"] = {
"download_arff_url": "https://www.openml.org/data/download/19335688/file1c55384ec217.arff",
"download_csv_url": "https://www.openml.org/data/get_csv/19335688/file1c55384ec217.arff",
"task_type": "classification",
"target": "class",
"n_rows": 10000,
}
experiments_dict["covertype"] = {
"download_arff_url": "https://www.openml.org/data/download/1601911/phpQOf0wY",
"download_csv_url": "https://www.openml.org/data/get_csv/1601911/phpQOf0wY",
"task_type": "classification",
"target": "class",
"n_rows": 581012,
}
# This dataset doesn't work with the pre-processing pipeline coded below, as the SimpleImputer drops some columns
# which have all missing values. There is no easy way to pass this info to the downstream ColumnTransformer.
# experiments_dict['KDDCup09_appetency'] = {}
# 'download_arff_url'] = 'https://www.openml.org/data/download/53994/KDDCup09_appetency.arff'
# 'download_csv_url'] = 'https://www.openml.org/data/get_csv/53994/KDDCup09_appetency.arff'
# 'task_type'] = 'classification'
# 'target'] = 'appetency'
experiments_dict["Amazon_employee_access"] = {
"download_arff_url": "https://www.openml.org/data/download/1681098/phpmPOD5A",
"download_csv_url": "https://www.openml.org/data/get_csv/1681098/phpmPOD5A",
"task_type": "classification",
"target": "target",
"n_rows": 32769,
}
experiments_dict["Fashion-MNIST"] = {
"download_arff_url": "https://www.openml.org/data/download/18238735/phpnBqZGZ",
"download_csv_url": "https://www.openml.org/data/get_csv/18238735/phpnBqZGZ",
"task_type": "classification",
"target": "class",
"n_rows": 70000,
}
experiments_dict["dionis"] = {
"download_arff_url": "https://www.openml.org/data/download/19335690/file1c55272d7b5b.arff",
"download_csv_url": "https://www.openml.org/data/get_csv/19335690/file1c55272d7b5b.arff",
"task_type": "classification",
"target": "class",
"n_rows": 416188,
}
experiments_dict["MiniBooNE"] = {
"download_arff_url": "https://www.openml.org/data/download/19335523/MiniBooNE.arff",
"download_csv_url": "https://www.openml.org/data/get_csv/19335523/MiniBooNE.arff",
"task_type": "classification",
"target": "signal",
"n_rows": 130064,
}
experiments_dict["airlines"] = {
"download_arff_url": "https://www.openml.org/data/download/66526/phpvcoG8S",
"download_csv_url": "https://www.openml.org/data/get_csv/66526/phpvcoG8S",
"task_type": "stream classification",
"target": "class",
"n_rows": 539383,
}
experiments_dict["diabetes"] = {
"dataset_url": "https://www.openml.org/d/37",
"download_arff_url": "https://www.openml.org/data/download/37/dataset_37_diabetes.arff",
"download_csv_url": "https://www.openml.org/data/get_csv/37/dataset_37_diabetes.arff",
"task_type": "classification",
"target": "class",
"n_rows": 768,
}
experiments_dict["spectf"] = {
"dataset_url": "https://www.openml.org/d/337",
"download_arff_url": "https://www.openml.org/data/download/52240/phpDQbeeh",
"download_csv_url": "https://www.openml.org/data/get_csv/52240/phpDQbeeh",
"task_type": "classification",
"target": "overall_diagnosis",
"n_rows": 267,
}
experiments_dict["hill-valley"] = {
"dataset_url": "https://www.openml.org/d/1479",
"download_arff_url": "https://www.openml.org/data/download/1590101/php3isjYz",
"download_csv_url": "https://www.openml.org/data/get_csv/1590101/php3isjYz",
"task_type": "classification",
"target": "class",
"n_rows": 1212,
}
experiments_dict["breast-cancer"] = {
"dataset_url": "https://www.openml.org/d/13",
"download_arff_url": "https://www.openml.org/data/download/13/dataset_13_breast-cancer.arff",
"download_csv_url": "https://www.openml.org/data/get_csv/13/dataset_13_breast-cancer.arff",
"task_type": "classification",
"target": "class",
"n_rows": 286,
}
experiments_dict["compas"] = {
"download_arff_url": "https://www.openml.org/data/download/21757035/compas.arff",
"download_csv_url": "https://www.openml.org/data/get_csv/21757035/compas.arff",
"task_type": "classification",
"target": "two_year_recid",
"n_rows": 5278,
}
experiments_dict["ricci"] = {
"download_arff_url": "https://www.openml.org/data/download/22044446/ricci_processed.arff",
"download_csv_url": "https://www.openml.org/data/get_csv/22044446/ricci_processed.arff",
"task_type": "classification",
"target": "promotion",
"n_rows": 118,
}
experiments_dict["SpeedDating"] = {
"dataset_url": "https://www.openml.org/d/40536",
"download_arff_url": "https://www.openml.org/data/download/13153954/speeddating.arff",
"download_csv_url": "https://www.openml.org/data/get_csv/13153954/speeddating.arff",
"task_type": "classification",
"target": "match",
"n_rows": 8378,
}
experiments_dict["nursery"] = {
"dataset_url": "https://www.openml.org/d/26",
"download_arff_url": "https://www.openml.org/data/download/26/dataset_26_nursery.arff",
"download_csv_url": "https://www.openml.org/data/get_csv/26/dataset_26_nursery.arff",
"task_type": "classification",
"target": "class",
"n_rows": 12960,
}
experiments_dict["titanic"] = {
"dataset_url": "https://www.openml.org/d/40945",
"download_arff_url": "https://www.openml.org/data/download/16826755/phpMYEkMl",
"download_csv_url": "https://www.openml.org/data/get_csv/16826755/phpMYEkMl",
"task_type": "classification",
"target": "survived",
"n_rows": 1309,
}
experiments_dict["tae"] = {
"dataset_url": "https://www.openml.org/d/48",
"download_arff_url": "https://www.openml.org/data/download/48/dataset_48_tae.arff",
"download_csv_url": "https://www.openml.org/data/get_csv/48/dataset_48_tae.arff",
"task_type": "classification",
"target": "class_attribute",
"n_rows": 151,
}
experiments_dict["airlines_delay"] = {
"dataset_url": "https://www.openml.org/d/42728",
"download_arff_url": "https://www.openml.org/data/download/22044760/airlines_train_regression_10000000.arff",
"download_csv_url": "https://www.openml.org/data/get_csv/22044760/airlines_train_regression_10000000.arff",
"task_type": "regression",
"target": "depdelay",
"n_rows": 10000000,
}
experiments_dict["kddcup99full"] = {
"dataset_url": "https://www.openml.org/d/42728",
"download_arff_url": "https://www.openml.org/data/download/53993/KDDCup99_full.arff",
"download_csv_url": "https://www.openml.org/data/download/53993/KDDCup99_full.arff",
"task_type": "classification",
"target": "label",
"n_rows": 4898431,
}
experiments_dict["cholesterol"] = {
"dataset_url": "https://www.openml.org/d/204",
"download_arff_url": "https://www.openml.org/data/download/3641/dataset_2190_cholesterol.arff",
"task_type": "regression",
"target": "chol",
"n_rows": 303,
}
experiments_dict["cloud"] = {
"dataset_url": "https://www.openml.org/d/210",
"download_arff_url": "https://www.openml.org/data/download/3647/dataset_2196_cloud.arff",
"task_type": "regression",
"target": "te",
"n_rows": 108,
}
experiments_dict["heart-disease"] = {
"dataset_url": "https://www.openml.org/d/43398",
"download_arff_url": "https://www.openml.org/data/download/22102223/dataset",
"task_type": "classification",
"target": "target",
"n_rows": 303,
}
experiments_dict["UCI-student-performance-mat"] = {
"dataset_url": "https://www.openml.org/d/42352",
"download_arff_url": "https://www.openml.org/data/download/21826977/mat.arff",
"task_type": "regression",
"target": "g3",
"n_rows": 395,
}
experiments_dict["UCI-student-performance-por"] = {
"dataset_url": "https://www.openml.org/d/42351",
"download_arff_url": "https://www.openml.org/data/download/21826962/por.arff",
"task_type": "regression",
"target": "g3",
"n_rows": 649,
}
experiments_dict["us_crime"] = {
"dataset_url": "https://www.openml.org/d/315",
"download_arff_url": "https://www.openml.org/data/download/52219/phpeZQVCe",
"task_type": "regression",
"target": "violentcrimesperpop",
"n_rows": 1_994,
}
experiments_dict["national-longitudinal-survey-binary"] = {
"dataset_url": "https://www.openml.org/d/43892",
"download_arff_url": "https://www.openml.org/data/download/22102800/file8627017dbe5e7.arff",
"task_type": "classification",
"target": "income96gt17",
"n_rows": 4_908,
}
experiments_dict["law-school-admission-bianry"] = {
"dataset_url": "https://www.openml.org/d/43890",
"download_arff_url": "https://www.openml.org/data/download/22102798/file862706e533fa5.arff",
"task_type": "classification",
"target": "ugpagt3",
"n_rows": 20_800,
}
experiments_dict["Default-of-Credit-Card-Clients-Dataset"] = {
"dataset_url": "https://www.openml.org/d/43435",
"download_arff_url": "https://www.openml.org/data/download/22102260/dataset",
"task_type": "classification",
"target": "default.payment.next.month",
"n_rows": 30_000,
}
def add_schemas(schema_orig, target_col, train_X, test_X, train_y, test_y):
from lale.datasets.data_schemas import add_schema
elems_X = [
item_schema
for item_schema in schema_orig["items"]["items"]
if item_schema["description"].lower() != target_col
]
elem_y = [
item_schema
for item_schema in schema_orig["items"]["items"]
if item_schema["description"].lower() == target_col
][0]
if "enum" in elem_y:
if isinstance(train_y, pd.Series):
elem_y["enum"] = list(train_y.unique())
else:
elem_y["enum"] = [*range(len(elem_y["enum"]))]
ncols_X = len(elems_X)
rows_X = {
**schema_orig["items"],
"minItems": ncols_X,
"maxItems": ncols_X,
"items": elems_X,
}
if "json_schema" not in pd.DataFrame._internal_names:
pd.DataFrame._internal_names.append("json_schema")
nrows_train, nrows_test = len(train_y), len(test_y)
train_X = add_schema(
train_X,
{
**schema_orig,
"minItems": nrows_train,
"maxItems": nrows_train,
"items": rows_X,
},
)
test_X = add_schema(
test_X,
{
**schema_orig,
"minItems": nrows_test,
"maxItems": nrows_test,
"items": rows_X,
},
)
train_y = add_schema(
train_y,
{
**schema_orig,
"minItems": nrows_train,
"maxItems": nrows_train,
"items": elem_y,
},
)
test_y = add_schema(
test_y,
{
**schema_orig,
"minItems": nrows_test,
"maxItems": nrows_test,
"items": elem_y,
},
)
return train_X, test_X, train_y, test_y
numeric_data_types_list = ["numeric", "integer", "real"]
def download_if_missing(dataset_name, verbose=False):
file_name = os.path.join(download_data_dir, dataset_name + ".arff")
is_missing = not os.path.exists(file_name)
if verbose:
print(
f"download_if_missing('{dataset_name}'): is_missing {is_missing}, file_name '{file_name}'"
)
if is_missing:
if not os.path.exists(download_data_dir):
os.makedirs(download_data_dir)
url = cast(str, experiments_dict[dataset_name]["download_arff_url"])
# This should be safe, since all of these strings are all explicitly listed, not user injectable
urllib.request.urlretrieve(url, file_name) # nosec
assert os.path.exists(file_name)
return file_name
def fetch(
dataset_name,
task_type,
verbose=False,
preprocess=True,
test_size=0.33,
astype=None,
seed=0,
):
# Check that the dataset name exists in experiments_dict
try:
if experiments_dict[dataset_name]["task_type"] != task_type.lower():
raise ValueError(
f"The task type {task_type} does not match with the given datasets task type {experiments_dict[dataset_name]['task_type']}"
)
except KeyError as exc:
raise KeyError(
f"Dataset name {dataset_name} not found in the supported datasets"
) from exc
data_file_name = download_if_missing(dataset_name, verbose)
with open(data_file_name) as f: # pylint:disable=unspecified-encoding
dataDictionary = arff.load(f)
f.close()
from lale.datasets.data_schemas import liac_arff_to_schema
schema_orig = liac_arff_to_schema(dataDictionary)
target_col = experiments_dict[dataset_name]["target"]
y: Optional[Any] = None
if preprocess:
arffData = pd.DataFrame(dataDictionary["data"])
# arffData = arffData.fillna(0)
attributes = dataDictionary["attributes"]
if verbose:
print(f"attributes: {attributes}")
categorical_cols = []
numeric_cols = []
X_columns = []
for i, item in enumerate(attributes):
if item[0].lower() == target_col:
target_indx = i
# remove it from attributes so that the next loop indices are adjusted accordingly.
del attributes[i]
# the type stubs for pandas are not currently complete enough to type this correctly
y = arffData.iloc[:, target_indx] # type: ignore
arffData = arffData.drop(i, axis=1)
for i, item in enumerate(attributes):
X_columns.append(i)
if (
(
isinstance(item[1], str)
and item[1].lower() not in numeric_data_types_list
)
or isinstance(item[1], list)
) and (item[0].lower() != "class"):
categorical_cols.append(i)
elif (
isinstance(item[1], str) and item[1].lower() in numeric_data_types_list
) and (item[0].lower() != "class"):
numeric_cols.append(i)
if verbose:
print(f"categorical columns: {categorical_cols}")
print(f"numeric columns: {numeric_cols}")
X = arffData.iloc[:, X_columns]
# Check whether there is any error
num_classes_from_last_row = len(list(set(y))) if y is not None else 0
if verbose:
print("num_classes_from_last_row", num_classes_from_last_row)
transformers1 = [
(
"imputer_str",
SimpleImputer(missing_values=None, strategy="most_frequent"),
categorical_cols,
),
("imputer_num", SimpleImputer(strategy="mean"), numeric_cols),
]
txm1 = ColumnTransformer(transformers1, sparse_threshold=0.0)
transformers2 = [
("ohe", OneHotEncoder(sparse=False), list(range(len(categorical_cols)))),
(
"no_op",
"passthrough",
list(
range(
len(categorical_cols), len(categorical_cols) + len(numeric_cols)
)
),
),
]
txm2 = ColumnTransformer(transformers2, sparse_threshold=0.0)
if verbose:
print("Shape of X before preprocessing", X.shape)
from sklearn.pipeline import make_pipeline
preprocessing = make_pipeline(txm1, txm2)
X = preprocessing.fit(X).transform(X)
if verbose:
print(f"shape of X after preprocessing: {X.shape}")
if astype in ["pandas", "spark"]:
cat_col_names = [attributes[i][0].lower() for i in categorical_cols]
one_hot_encoder = preprocessing.steps[1][1].named_transformers_["ohe"]
if sklearn_version >= version.Version("1.0"):
encoded_names = one_hot_encoder.get_feature_names_out(cat_col_names)
else:
encoded_names = one_hot_encoder.get_feature_names(cat_col_names)
num_col_names = [attributes[i][0].lower() for i in numeric_cols]
col_names = list(encoded_names) + list(num_col_names)
if verbose:
print(f"column names after preprocessing: {col_names}")
X = pd.DataFrame(X, columns=col_names)
else:
col_names = [attr[0].lower() for attr in dataDictionary["attributes"]]
df_all = pd.DataFrame(dataDictionary["data"], columns=col_names)
assert target_col in col_names, (target_col, col_names)
y = df_all[target_col]
# the type stubs for pandas are not currently complete enough to type this correctly
y = y.squeeze() # type: ignore
cols_X = [col for col in col_names if col != target_col]
X = df_all[cols_X]
if preprocess:
labelencoder = LabelEncoder()
y = labelencoder.fit_transform(y)
if astype in ["pandas", "spark"] and not isinstance(y, pd.Series):
y = pd.Series(y, name=target_col)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=test_size, random_state=seed
)
if verbose:
print(f"training set shapes: X {X_train.shape}, y {y_train.shape}")
print(f"test set shapes: X {X_test.shape}, y {y_test.shape}")
if preprocess:
from lale.datasets.data_schemas import add_schema
X_train = add_schema(X_train.astype(np.number), recalc=True)
y_train = add_schema(y_train.astype(int), recalc=True)
X_test = add_schema(X_test.astype(np.number), recalc=True)
y_test = add_schema(y_test.astype(int), recalc=True)
else:
X_train, X_test, y_train, y_test = add_schemas(
schema_orig, target_col, X_train, X_test, y_train, y_test
)
if astype == "spark":
from lale.datasets import pandas2spark
X_train = pandas2spark(X_train)
X_test = pandas2spark(X_test)
return (X_train, y_train), (X_test, y_test)
| 28,780 | 34.444581 | 139 |
py
|
lale
|
lale-master/lale/search/schema2search_space.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union
import jsonschema
import lale.type_checking
from lale.operators import (
Operator,
OperatorChoice,
PlannedIndividualOp,
PlannedOperator,
PlannedPipeline,
)
from lale.schema_simplifier import (
filterForOptimizer,
findRelevantFields,
narrowToGivenRelevantFields,
narrowToRelevantConstraints,
simplify,
)
from lale.schema_utils import (
JsonSchema,
atomize_schema_enumerations,
check_operators_schema,
forOptimizer,
has_operator,
is_false_schema,
)
from lale.search.lale_hyperopt import search_space_to_str_for_comparison
from lale.search.PGO import PGO, Freqs
from lale.search.search_space import (
SearchSpace,
SearchSpaceArray,
SearchSpaceBool,
SearchSpaceConstant,
SearchSpaceDict,
SearchSpaceEmpty,
SearchSpaceEnum,
SearchSpaceNumber,
SearchSpaceObject,
SearchSpaceOperator,
SearchSpaceProduct,
SearchSpaceSum,
should_print_search_space,
)
from lale.util import VisitorPathError
from lale.util.Visitor import Visitor, accept
logger = logging.getLogger(__name__)
class OperatorSchemaError(VisitorPathError):
def __init__(self, sub_path: Any, message: Optional[str] = None):
super().__init__([], message)
self.sub_path = sub_path
def get_message_str(self) -> str:
msg = super().get_message_str()
if self.sub_path is None:
return msg
else:
return f"for path {self.sub_path}: {msg}"
def op_to_search_space(
op: PlannedOperator,
pgo: Optional[PGO] = None,
data_schema: Optional[Dict[str, Any]] = None,
) -> SearchSpace:
"""Given an operator, this method compiles its schemas into a SearchSpace"""
search_space = SearchSpaceOperatorVisitor.run(op, pgo=pgo, data_schema=data_schema)
if should_print_search_space("true", "all", "search_space"):
name = op.name()
if not name:
name = "an operator"
print(f"search space for {name}:\n {str(search_space)}")
return search_space
def get_default(schema) -> Optional[Any]:
d = schema.get("default", None)
if d is not None:
try:
s = forOptimizer(schema)
lale.type_checking._validator.validate(d, s)
return d
except jsonschema.ValidationError:
logger.debug(
f"get_default: default {d} not used because it is not valid for the schema {schema}"
)
return None
return None
class FreqsWrapper:
base: Optional[Dict[str, Freqs]]
def __init__(self, base: Optional[Dict[str, Freqs]]):
self.base = base
def pgo_lookup(pgo: Optional[PGO], name: str) -> Optional[FreqsWrapper]:
if pgo is None:
return None
else:
freqs: Optional[Dict[str, Freqs]] = None
if pgo is not None:
freqs = pgo.get(name, None)
return FreqsWrapper(freqs)
pgo_part = Union[FreqsWrapper, Freqs, None]
def freqs_wrapper_lookup(part: pgo_part, k: str) -> pgo_part:
if part is None:
return None
elif isinstance(part, FreqsWrapper):
f = part.base
if f is not None and k in f:
return f[k]
else:
return None
else:
return None
def asFreqs(part: pgo_part) -> Optional[Iterable[Tuple[Any, int]]]:
if part is None:
return None
elif isinstance(part, FreqsWrapper):
return None
else:
return part.items()
def add_sub_space(space, k, v):
"""Given a search space and a "key",
if the defined subschema does not exist,
set it to be the constant v space
"""
# TODO!
# I should parse __ and such and walk down the schema
if isinstance(space, SearchSpaceObject):
if k not in space.keys:
space.keys.append(k)
space.choices = (c + (SearchSpaceConstant(v),) for c in space.choices)
return
# TODO: do we use 'path' above anymore?
# or do we just add the paths later as needed?
class SearchSpaceOperatorVisitor(Visitor):
pgo: Optional[PGO]
data_schema: Optional[Dict[str, Any]]
@classmethod
def run(
cls,
op: PlannedOperator,
pgo: Optional[PGO] = None,
data_schema: Optional[Dict[str, Any]] = None,
) -> SearchSpace:
visitor = cls(pgo=pgo, data_schema=data_schema)
return accept(op, visitor)
def __init__(
self, pgo: Optional[PGO] = None, data_schema: Optional[Dict[str, Any]] = None
):
super().__init__()
self.pgo = pgo
self.data_schema = data_schema
def visitPlannedIndividualOp(self, op: PlannedIndividualOp) -> SearchSpace:
schema = op._hyperparam_schema_with_hyperparams(self.data_schema)
module = op._impl.__module__
if module is None or module == str.__class__.__module__: # type: ignore
long_name = op.name()
else:
long_name = module + "." + op.name()
name = op.name()
space = self.schemaToSearchSpace(long_name, name, schema)
if space is None:
space = SearchSpaceEmpty()
# we now augment the search space as needed with the specified hyper-parameters
# even if they are marked as not relevant to the optimizer, we still want to include them now
if hasattr(op, "_hyperparams"):
hyperparams = op.hyperparams()
if hyperparams:
for k, v in hyperparams.items():
add_sub_space(space, k, v)
return space
visitTrainableIndividualOp = visitPlannedIndividualOp
visitTrainedIndividualOp = visitPlannedIndividualOp
def visitPlannedPipeline(self, op: "PlannedPipeline") -> SearchSpace:
spaces: List[Tuple[str, SearchSpace]] = [
(s.name(), accept(s, self)) for s in op.steps_list()
]
return SearchSpaceProduct(spaces)
visitTrainablePipeline = visitPlannedPipeline
visitTrainedPipeline = visitPlannedPipeline
def visitOperatorChoice(self, op: "OperatorChoice") -> SearchSpace:
spaces: List[SearchSpace] = [accept(s, self) for s in op.steps_list()]
return SearchSpaceSum(spaces)
# functions to actually convert an individual operator
# schema into a search space
def JsonSchemaToSearchSpaceHelper(
self,
longName: str,
path: str,
schema: JsonSchema,
relevantFields: Optional[Set[str]],
pgo_freqs: pgo_part = None,
sub_space: bool = True,
) -> Dict[str, SearchSpace]:
if "properties" not in schema:
return {}
props = schema["properties"]
hyp: Dict[str, SearchSpace] = {}
for p, s in props.items():
if relevantFields is None or p in relevantFields:
# We would need to specify what is correct in that case
sub_freqs = freqs_wrapper_lookup(pgo_freqs, p)
sub_sch = self.schemaToSearchSpaceHelper_(
longName, path + "_" + p, s, None, pgo_freqs=sub_freqs
)
if sub_sch is None:
# if it is a required field, this entire thing should be None
hyp[p] = SearchSpaceConstant(None)
else:
hyp[p] = sub_sch
else:
logger.debug(f"schemaToSearchSpace: skipping not relevant field {p}")
return hyp
def schemaToSearchSpaceHelper_(
self,
longName,
path: str,
schema: JsonSchema,
relevantFields: Optional[Set[str]],
pgo_freqs: pgo_part = None,
sub_space: bool = True,
) -> Optional[SearchSpace]:
# TODO: handle degenerate cases
# right now, this handles only a very fixed form
if is_false_schema(schema):
return None
typ: Optional[str] = None
typ = schema.get("laleType", None)
if typ is None:
typ = schema.get("type", None)
if "enum" in schema and typ != "operator":
vals = schema["enum"]
return SearchSpaceEnum(
vals, pgo=asFreqs(pgo_freqs), default=get_default(schema)
)
if typ is not None:
if typ == "boolean":
return SearchSpaceBool(
pgo=asFreqs(pgo_freqs), default=get_default(schema)
)
elif typ in ["number", "integer"]:
exclusive_minimum = False
minimum = schema.get("minimumForOptimizer", None)
if minimum is not None:
exclusive_minimum = schema.get(
"exclusiveMinimumForOptimizer", False
)
else:
minimum = schema.get("minimum", None)
if minimum is not None:
exclusive_minimum = schema.get("exclusiveMinimum", False)
exclusive_maximum = False
maximum = schema.get("maximumForOptimizer", None)
if maximum is not None:
exclusive_maximum = schema.get(
"exclusiveMaximumForOptimizer", False
)
else:
maximum = schema.get("maximum", None)
if maximum is not None:
exclusive_maximum = schema.get("exclusiveMaximum", False)
distribution = schema.get("distribution", "uniform")
if not isinstance(distribution, str):
raise OperatorSchemaError(
path,
f"specified distribution should be a string, not: {distribution}.",
)
laleType = schema.get("laleType", None)
if laleType is None:
laleType = typ
if laleType == "number":
discrete = False
elif laleType == "integer":
discrete = True
else:
raise OperatorSchemaError(
path,
f"specified laleType should be a number or integer, not: {laleType}.",
)
return SearchSpaceNumber(
minimum=minimum,
exclusiveMinimum=exclusive_minimum,
maximum=maximum,
exclusiveMaximum=exclusive_maximum,
discrete=discrete,
distribution=distribution,
pgo=asFreqs(pgo_freqs),
default=get_default(schema),
)
elif typ in ["array", "tuple"]:
laleType = schema.get("laleType", None)
if laleType is None:
laleType = typ
is_tuple: bool = laleType == "tuple"
min_items = schema.get("minItemsForOptimizer", None)
if min_items is None:
min_items = schema.get("minItems", None)
if min_items is None:
min_items = 0
max_items = schema.get("maxItemsForOptimizer", None)
if max_items is None:
max_items = schema.get("maxItems", None)
items_schema = schema.get("itemsForOptimizer", None)
if items_schema is None:
items_schema = schema.get("items", None)
if items_schema is None:
raise OperatorSchemaError(
path,
f"An array type was found without a provided schema for the items in the schema {schema}. Please provide a schema for the items (consider using itemsForOptimizer)",
)
# we can search an empty list even without schemas
if max_items == 0:
if is_tuple:
return SearchSpaceConstant([()])
else:
return SearchSpaceConstant([[]])
prefix: Optional[List[SearchSpace]] = None
additional: Optional[SearchSpace] = None
if isinstance(items_schema, list):
prefix = []
for i, sub_schema in enumerate(items_schema):
sub = self.schemaToSearchSpaceHelper_(
longName, path + "_" + str(i), sub_schema, relevantFields
)
if sub is None:
return None
else:
prefix.append(sub)
prefix_len = len(prefix)
additional_items_schema = schema.get(
"additionalItemsForOptimizer", None
)
if additional_items_schema is None:
additional_items_schema = schema.get("additionalItems", None)
if additional_items_schema is None:
if max_items is None or max_items > prefix_len:
raise OperatorSchemaError(
path,
f"An array type was found with provided schemas for {prefix_len} elements, but either an unspecified or too high a maxItems, and no schema for the additionalItems. Please constraing maxItems to <= {prefix_len} (you can set maxItemsForOptimizer), or provide a schema for additionalItems",
)
elif additional_items_schema is False:
if max_items is None:
max_items = prefix_len
else:
max_items = min(max_items, prefix_len)
else:
additional = self.schemaToSearchSpaceHelper_(
longName,
path + "-",
additional_items_schema,
relevantFields,
)
# if items_schema is None:
# raise ValueError(f"an array type was found without a provided schema for the items in the schema {schema}. Please provide a schema for the items (consider using itemsForOptimizer)")
else:
additional = self.schemaToSearchSpaceHelper_(
longName, path + "-", items_schema, relevantFields
)
if max_items is None:
raise OperatorSchemaError(
path,
f"An array type was found without a provided maximum number of items in the schema {schema}, and it is not a list with 'additionalItems' set to False. Please provide a maximum (consider using maxItemsForOptimizer), or, if you are using a list, set additionalItems to False",
)
return SearchSpaceArray(
prefix=prefix,
minimum=min_items,
maximum=max_items,
additional=additional,
is_tuple=is_tuple,
)
elif typ == "object":
if "properties" not in schema:
return SearchSpaceObject(longName, [], [])
o = self.JsonSchemaToSearchSpaceHelper(
longName,
path,
schema,
relevantFields,
pgo_freqs=pgo_freqs,
sub_space=sub_space,
)
if sub_space:
return SearchSpaceDict(o)
else:
all_keys = list(o.keys())
all_keys.sort()
o_choice = tuple(o.get(k, None) for k in all_keys)
return SearchSpaceObject(longName, all_keys, [o_choice])
elif typ == "string":
pass
elif typ == "operator":
# TODO: If there is a default, we could use it
vals = schema.get("enum", None)
if vals is None:
logger.error(
"An operator is required by the schema but was not provided"
)
return None
sub_schemas: List[SearchSpace] = [
accept(op, self)
if isinstance(op, Operator)
else SearchSpaceConstant(op)
for op in vals
]
combined_sub_schema: SearchSpace
if len(sub_schemas) == 1:
combined_sub_schema = sub_schemas[0]
if isinstance(combined_sub_schema, SearchSpaceConstant):
return combined_sub_schema
else:
combined_sub_schema = SearchSpaceSum(sub_schemas)
if all((isinstance(x, SearchSpaceConstant) for x in sub_schemas)):
return combined_sub_schema
return SearchSpaceOperator(combined_sub_schema)
elif typ == "Any":
raise OperatorSchemaError(
path,
f"A search space was found with laleType ({typ}), which is not searchable. Please mark the relevant hyperparameter as not relevant for the optimizer. schema: {schema}",
)
else:
raise OperatorSchemaError(
path, f"An unknown type ({typ}) was found in the schema {schema}"
)
if "anyOf" in schema:
objs = []
for s_obj in schema["anyOf"]:
if "type" in s_obj and s_obj["type"] == "object":
o = self.JsonSchemaToSearchSpaceHelper(
longName,
path,
s_obj,
relevantFields,
pgo_freqs=pgo_freqs,
sub_space=sub_space,
)
if o:
objs.append(o)
if objs:
# First, gather a list of all the properties
keys_list = [set(o.keys()) for o in objs]
# make sure the iterator is deterministic
all_keys = list(set.union(*keys_list))
# and we might as well make it sorted
all_keys.sort()
def as_str(k, c):
if c is None:
return "None"
else:
return search_space_to_str_for_comparison(c, path + "_" + k)
anys: Dict[str, Any] = {}
for o in objs:
o_choice = tuple(o.get(k, None) for k in all_keys)
k = str(
[as_str(all_keys[idx], c) for idx, c in enumerate(o_choice)]
)
if k in anys:
logger.info(f"Ignoring Duplicate SearchSpace entry {k}")
anys[k] = o_choice
return SearchSpaceObject(longName, all_keys, anys.values())
else:
return SearchSpaceObject(longName, [], [])
if "allOf" in schema:
# if all but one are negated constraints, we will just ignore them
pos_sub_schema: List[JsonSchema] = []
for sub_schema in schema["allOf"]:
if "not" not in sub_schema:
pos_sub_schema.append(sub_schema)
if len(pos_sub_schema) > 1:
raise OperatorSchemaError(
path,
f"schemaToSearchSpaceHelper does not yet know how to compile the given schema {schema}, because it is an allOf with more than one non-negated schemas ({pos_sub_schema})",
)
if len(pos_sub_schema) == 0:
raise OperatorSchemaError(
path,
f"schemaToSearchSpaceHelper does not yet know how to compile the given schema {schema}, because it is an allOf with only negated schemas",
)
logger.debug(
f"[{path}]: schemaToSearchSpaceHelper: ignoring negated schemas in the conjunction {schema}"
)
return self.schemaToSearchSpaceHelper_(
longName,
path,
pos_sub_schema[0],
relevantFields,
pgo_freqs=pgo_freqs,
sub_space=sub_space,
)
# TODO: handle degenerate cases
raise OperatorSchemaError(
path,
f"schemaToSearchSpaceHelper does not yet know how to compile the given schema {schema}",
)
def schemaToSearchSpaceHelper(
self,
longName,
schema: Optional[JsonSchema],
relevantFields: Optional[Set[str]],
pgo_freqs: pgo_part = None,
sub_space: bool = True,
) -> Optional[SearchSpace]:
if schema is None or is_false_schema(schema):
return None
else:
if sub_space:
relevantFields = None
return self.schemaToSearchSpaceHelper_(
longName,
longName,
schema,
relevantFields,
pgo_freqs=pgo_freqs,
sub_space=sub_space,
)
def schemaToSimplifiedAndSearchSpace(
self, longName: str, name: str, schema: JsonSchema
) -> Tuple[Optional[JsonSchema], Optional[SearchSpace]]:
schema = narrowToRelevantConstraints(schema)
relevantFields = findRelevantFields(schema)
if relevantFields:
schema = narrowToGivenRelevantFields(schema, relevantFields)
if has_operator(schema):
atomize_schema_enumerations(schema)
simplified_schema = simplify(schema, True)
# from .. import pretty_print
# print(f"SIMPLIFIED_{longName}: {pretty_print.to_string(simplified_schema)}")
filtered_schema: Optional[JsonSchema] = filterForOptimizer(simplified_schema)
# print(f'SIMPLIFIED_{longName}: {pretty_print.to_string(filtered_schema)}')
if logger.isEnabledFor(logging.WARNING):
op_warnings: List[str] = []
check_operators_schema(filtered_schema, op_warnings)
if op_warnings:
for w in op_warnings:
logger.warning(w)
return (
filtered_schema,
self.schemaToSearchSpaceHelper(
longName,
filtered_schema,
relevantFields,
pgo_freqs=pgo_lookup(self.pgo, name),
sub_space=False,
),
)
def schemaToSearchSpace(
self, longName: str, name: str, schema: JsonSchema
) -> Optional[SearchSpace]:
(_s, h) = self.schemaToSimplifiedAndSearchSpace(longName, name, schema)
return h
| 23,807 | 36.910828 | 320 |
py
|
lale
|
lale-master/lale/search/PGO.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import random
from enum import Enum
from typing import (
Any,
Dict,
Generic,
Iterable,
Iterator,
List,
Optional,
Sequence,
Tuple,
TypeVar,
Union,
cast,
overload,
)
import jsonschema
import numpy as np
Freqs = Dict[str, int]
PGO = Dict[str, Dict[str, Freqs]]
class DefaultValue(Enum):
token = 0
_default_value = DefaultValue.token
Def = TypeVar("Def")
Defaultable = Union[DefaultValue, Def]
XDK = TypeVar("XDK")
XDV = TypeVar("XDV")
def remove_defaults_dict(d: Dict[XDK, Union[DefaultValue, XDV]]) -> Dict[XDK, XDV]:
ret: Dict[XDK, XDV] = {}
for k, v in d.items():
if v is not _default_value:
assert not isinstance(v, DefaultValue)
# not sure why pyright can't figure this out
ret[k] = v # type: ignore
return ret
# utilites to load a pgo from json-ish
def load_pgo_file(filepath) -> PGO:
with open(filepath) as json_file: # pylint:disable=unspecified-encoding
json_data = json.load(json_file)
return load_pgo_data(json_data)
def load_pgo_data(json_data) -> PGO:
jsonschema.validate(json_data, _input_schema, jsonschema.Draft4Validator)
norm = normalize_pgo_type(json_data)
return norm
# TODO: Add support for falling back on an underlying distribution
# with some probability
T = TypeVar("T")
class FrequencyDistribution(Generic[T]):
"""Represents the distribution implied by a histogram"""
freq_dist: np.ndarray # Array[T,int]
vals: np.ndarray # Array[T]
cumulative_freqs: np.ndarray # Array[int]
@classmethod
def asIntegerValues(
cls,
freqs: Iterable[Tuple[Any, int]],
inclusive_min: Optional[float] = None,
inclusive_max: Optional[float] = None,
) -> "FrequencyDistribution[int]":
freqs = freqsAsIntegerValues(
freqs, inclusive_min=inclusive_min, inclusive_max=inclusive_max
)
return FrequencyDistribution[int](list(freqs), dtype=int)
@classmethod
def asFloatValues(
cls,
freqs: Iterable[Tuple[Any, int]],
inclusive_min: Optional[float] = None,
inclusive_max: Optional[float] = None,
) -> "FrequencyDistribution[float]":
freqs = freqsAsFloatValues(
freqs, inclusive_min=inclusive_min, inclusive_max=inclusive_max
)
return FrequencyDistribution[float](list(freqs), dtype=float)
@classmethod
def asEnumValues(
cls, freqs: Iterable[Tuple[Any, int]], values: List[Any]
) -> "FrequencyDistribution[Any]":
freqs = freqsAsEnumValues(freqs, values=values)
return FrequencyDistribution[Any](list(freqs), dtype=object)
def __init__(self, freqs: Iterable[Tuple[Defaultable[T], int]], dtype=object):
# we need them to be sorted for locality
sorted_freq_list = sorted(
freqs,
key=(
lambda k: (
k[0] is _default_value,
None if k[0] is _default_value else k[0],
)
),
)
freqs_array = np.array(
sorted_freq_list, dtype=[("value", object), ("frequency", int)]
)
# freqs_array.sort(order='value')
self.freq_dist = freqs_array
self.vals = freqs_array["value"]
self.cumulative_freqs = np.cumsum(freqs_array["frequency"])
def __len__(self) -> int:
return cast(int, np.int_(self.cumulative_freqs[-1]))
@overload
def __getitem__(self, key: int) -> T:
...
@overload
def __getitem__(self, key: Sequence[int]) -> Sequence[T]:
...
@overload
def __getitem__(self, key: slice) -> Sequence[T]:
...
def __getitem__(
self, key: Union[int, Sequence[int], slice]
) -> Union[T, Sequence[T]]:
indices: Sequence[int]
single = False
if isinstance(key, (int, float)):
single = True
indices = [key]
elif isinstance(key, slice):
# TODO: this could be made more efficient
indices = range(key.start or 0, key.stop or len(self), key.step or 1)
else:
indices = key
val_indices = np.searchsorted(self.cumulative_freqs, indices, side="right")
values = self.vals[val_indices].tolist()
if single:
assert len(values) == 1
return values[0]
else:
return values
def sample(self) -> T:
ll = len(self)
# This choice does not need to be cryptographically secure or hard to predict
i = random.randrange(ll) # nosec
return self[i]
def samples(self, count: int) -> Sequence[T]:
ll = len(self)
# This choice does not need to be cryptographically secure or hard to predict
i: Sequence[int] = [random.randrange(ll) for _ in range(count)] # nosec
return self[i]
# utiltities to convert and sample from a PGO frequency distribution
DEFAULT_STR = "default"
def freqsAsIntegerValues(
freqs: Iterable[Tuple[Any, int]],
inclusive_min: Optional[float] = None,
inclusive_max: Optional[float] = None,
) -> Iterator[Tuple[Defaultable[int], int]]:
"""maps the str values to integers, and skips anything that does not look like an integer"""
for v, f in freqs:
try:
if v == DEFAULT_STR:
yield _default_value, f
continue
i = int(v)
if inclusive_min is not None and inclusive_min > i:
continue
if inclusive_max is not None and inclusive_max < i:
continue
yield i, f
except ValueError:
pass
def freqsAsFloatValues(
freqs: Iterable[Tuple[Any, int]],
inclusive_min: Optional[float] = None,
inclusive_max: Optional[float] = None,
) -> Iterator[Tuple[Defaultable[float], int]]:
"""maps the str values to integers, and skips anything that does not look like an integer"""
for v, f in freqs:
try:
if v == DEFAULT_STR:
yield _default_value, f
continue
i = float(v)
if inclusive_min is not None and inclusive_min > i:
continue
if inclusive_max is not None and inclusive_max < i:
continue
yield i, f
except ValueError:
pass
# TODO: we can get a dictionary from freqs (before items() was called)
# and then lookup values in it (since values is likely smaller then freqs)
# or, of course, check which one is smaller and iterate through it
def freqsAsEnumValues(
freqs: Iterable[Tuple[Any, int]], values: List[Any]
) -> Iterator[Tuple[Defaultable[Any], int]]:
"""only keeps things that match the string representation of values in the enumeration.
converts from the string to the value as represented in the enumeration.
"""
def as_str(v) -> str:
"""There are some quirks in how the PGO files
encodes values relative to python's str method
"""
if v is None:
return "none"
elif v is True:
return "true"
elif v is False:
return "false"
else:
return str(v)
value_lookup = {as_str(k): k for k in values}
for v, f in freqs:
if v == DEFAULT_STR:
yield _default_value, f
continue
if v in value_lookup:
yield value_lookup[v], f
_input_type = Dict[str, Dict[str, Union[int, Dict[str, Union[str, int]]]]]
# For now, we skip things of the form
# alg -> {default: number}
# (i.e. without parameters)
def normalize_pgo_type(data: _input_type) -> PGO:
return {
alg: {
param_keys: {
param_values: int(param_counts)
for param_values, param_counts in v2.items()
}
for param_keys, v2 in v1.items()
if isinstance(v2, dict)
}
for alg, v1 in data.items()
}
_input_schema: Any = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Input format for pgo files. Keys are the name of the algorithm",
"type": "object",
"additionalProperties": {
"anyOf": [
{
"description": "Keys are the parameter names",
"type": "object",
"additionalProperties": {
"description": "Keys are value names",
"type": "object",
"additionalProperties": {
"anyOf": [
{
"description": "the number of times this value was found",
"type": "integer",
},
{
"description": "the number of times this value was found",
"type": "string",
},
]
},
},
},
{
"description": "default value for the optimizer",
"type": "object",
"additionalProperties": False,
"required": ["default"],
"properties": {
"default": {
"anyOf": [
{
"description": "the number of times the default was found",
"type": "integer",
},
{
"description": "the number of times the default was found",
"type": "string",
},
]
}
},
},
]
},
}
| 10,479 | 29.643275 | 96 |
py
|
lale
|
lale-master/lale/search/lale_grid_search_cv.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING, Any, Dict, List, Optional
import numpy as np
import sklearn.model_selection
import lale.operators as Ops
from lale.search.PGO import PGO
from lale.search.search_space import (
SearchSpace,
SearchSpaceArray,
SearchSpaceEnum,
SearchSpaceNumber,
should_print_search_space,
)
from lale.search.search_space_grid import SearchSpaceGrid, get_search_space_grids
if TYPE_CHECKING:
from lale.operators import PlannedOperator
def get_defaults_as_param_grid(op: "Ops.IndividualOp"):
defaults = op.get_defaults()
return {k: [v] for k, v in defaults.items()}
def get_lale_gridsearchcv_op(op, params, **kwargs):
g = sklearn.model_selection.GridSearchCV(op, params, **kwargs)
return g
# TODO: turn it into a Lale TrainableOperator
# name = f"GridSearchCV[{op.name()}]"
# return Ops.TrainableIndividualOp(_name=name, _impl=g, _schemas=None)
def get_parameter_grids(
op: "PlannedOperator",
num_samples: Optional[int] = None,
num_grids: Optional[float] = None,
pgo: Optional[PGO] = None,
data_schema: Optional[Dict[str, Any]] = None,
):
"""
Parameters
----------
op : The lale PlannedOperator
num_samples : integer, optional
If set, will limit the number of samples for each distribution
num_grids: integer or float, optional
if set to an integer => 1, it will determine how many parameter grids will be returned (at most)
if set to an float between 0 and 1, it will determine what fraction should be returned
note that setting it to 1 is treated as in integer. To return all results, use None
pgo: Optional profile guided optimization data that guides discretization
data_schema: Optional schema for the input data. which is used for hyperparameter schema data constraints
"""
return get_grid_search_parameter_grids(
op,
num_samples=num_samples,
num_grids=num_grids,
pgo=pgo,
data_schema=data_schema,
)
def get_grid_search_parameter_grids(
op: "PlannedOperator",
num_samples: Optional[int] = None,
num_grids: Optional[float] = None,
pgo: Optional[PGO] = None,
data_schema: Optional[Dict[str, Any]] = None,
) -> List[Dict[str, List[Any]]]:
"""Top level function: given a lale operator, returns a list of parameter grids
suitable for passing to GridSearchCV.
Note that you will need to wrap the lale operator for sklearn compatibility to call GridSearchCV
directly. The lale GridSearchCV wrapper takes care of that for you
"""
hp_grids = get_search_space_grids(
op, num_grids=num_grids, pgo=pgo, data_schema=data_schema
)
grids = SearchSpaceGridstoGSGrids(hp_grids, num_samples=num_samples)
if should_print_search_space("true", "all", "backend", "gridsearchcv"):
name = op.name()
if not name:
name = "an operator"
print(f"GridSearchCV grids for {name}:\n{gridsearchcv_grids_to_string(grids)}")
return grids
GSValue = Any
GSGrid = Dict[str, List[GSValue]]
DEFAULT_SAMPLES_PER_DISTRIBUTION = 2
def SearchSpaceNumberToGSValues(
key: str, hp: SearchSpaceNumber, num_samples: Optional[int] = None
) -> List[GSValue]:
"""Returns either a list of values intended to be sampled uniformly"""
samples: int
if num_samples is None:
samples = DEFAULT_SAMPLES_PER_DISTRIBUTION
else:
samples = num_samples
# Add preliminary support for PGO
if hp.pgo is not None:
ret = list(hp.pgo.samples(samples))
return ret
# if we are not doing PGO
dist = "uniform"
if hp.distribution:
dist = hp.distribution
if hp.maximum is None:
raise ValueError(
f"maximum not specified for a number with distribution {dist} for {key}"
)
space_max = hp.getInclusiveMax()
assert space_max is not None
if hp.minimum is None:
raise ValueError(
f"minimum not specified for a number with distribution {dist} for {key}"
)
space_min = hp.getInclusiveMin()
assert space_min is not None
dt: np.dtype
if hp.discrete:
dt = np.dtype(int)
else:
dt = np.dtype(float)
default = hp.default()
if default is not None:
# always use the default as one of the samples
# TODO: ensure that the default is valid according to the schema
if samples <= 1:
return [default]
samples = samples - 1
if dist in ["uniform", "integer"]:
ret = np.linspace(space_min, space_max, num=samples, dtype=dt).tolist()
elif dist == "loguniform":
ret = np.logspace(space_min, space_max, num=samples, dtype=dt).tolist()
else:
raise ValueError(f"unknown/unsupported distribution {dist} for {key}")
if default is not None:
ret.append(default)
return ret
def HPValuetoGSValue(
key: str, hp: SearchSpace, num_samples: Optional[int] = None
) -> List[GSValue]:
if isinstance(hp, SearchSpaceEnum):
return hp.vals
elif isinstance(hp, SearchSpaceNumber):
return SearchSpaceNumberToGSValues(key, hp, num_samples=num_samples)
elif isinstance(hp, SearchSpaceArray):
raise ValueError(
f"Arrays are not yet supported by the GridSearchCV backend (key: {key})"
)
else:
raise ValueError(
f"Not yet supported hp description ({type(hp)}) (key: {key}) in the GridSearchCV backend"
)
def SearchSpaceGridtoGSGrid(
hp: SearchSpaceGrid, num_samples: Optional[int] = None
) -> GSGrid:
return {k: HPValuetoGSValue(k, v, num_samples=num_samples) for k, v in hp.items()}
def SearchSpaceGridstoGSGrids(
hp_grids: List[SearchSpaceGrid], num_samples: Optional[int] = None
) -> List[GSGrid]:
return [SearchSpaceGridtoGSGrid(g, num_samples=num_samples) for g in hp_grids]
def gridsearchcv_grid_to_string(grid: GSGrid) -> str:
return "{" + ";".join(f"{k}->{str(v)}" for k, v in grid.items()) + "}"
def gridsearchcv_grids_to_string(grids: List[GSGrid]) -> str:
return "|".join(gridsearchcv_grid_to_string(grid) for grid in grids)
| 6,730 | 32.487562 | 109 |
py
|
lale
|
lale-master/lale/search/search_space_grid.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import math
import random
import warnings
from collections import ChainMap
from typing import Any, Dict, Iterable, List, Optional, Union
from lale.helpers import (
DUMMY_SEARCH_SPACE_GRID_PARAM_NAME,
discriminant_name,
make_indexed_name,
nest_all_HPparams,
nest_choice_all_HPparams,
structure_type_dict,
structure_type_list,
structure_type_name,
structure_type_tuple,
)
from lale.operators import PlannedOperator
from lale.search.PGO import PGO
from lale.search.schema2search_space import op_to_search_space
from lale.search.search_space import (
SearchSpace,
SearchSpaceArray,
SearchSpaceConstant,
SearchSpaceDict,
SearchSpaceEmpty,
SearchSpaceError,
SearchSpaceObject,
SearchSpaceOperator,
SearchSpacePrimitive,
SearchSpaceProduct,
SearchSpaceSum,
should_print_search_space,
)
from lale.util.Visitor import Visitor, accept
SearchSpaceGrid = Dict[str, SearchSpacePrimitive]
def search_space_grid_to_string(grid: SearchSpaceGrid) -> str:
return "{" + ";".join(f"{k}->{str(v)}" for k, v in grid.items()) + "}"
def search_space_grids_to_string(grids: List[SearchSpaceGrid]) -> str:
return "|".join(search_space_grid_to_string(grid) for grid in grids)
def get_search_space_grids(
op: "PlannedOperator",
num_grids: Optional[float] = None,
pgo: Optional[PGO] = None,
data_schema: Optional[Dict[str, Any]] = None,
) -> List[SearchSpaceGrid]:
"""Top level function: given a lale operator, returns a list of hp grids.
Parameters
----------
op : The lale PlannedOperator
num_grids: integer or float, optional
if set to an integer => 1, it will determine how many parameter grids will be returned (at most)
if set to an float between 0 and 1, it will determine what fraction should be returned
note that setting it to 1 is treated as in integer. To return all results, use None
pgo: Optional Profile Guided Optimization data that can be used when discretizing continuous parameters
data_schema: A schema for the actual data. If provided, it is used to instantiate data dependent schema hyperamparameter specifications.
"""
all_parameters = op_to_search_space_grids(op, pgo=pgo, data_schema=data_schema)
if should_print_search_space("true", "all", "search_space_grids", "grids"):
name = op.name()
if not name:
name = "an operator"
print(
f"search space grids for {name}:\n{search_space_grids_to_string(all_parameters)}"
)
if num_grids is None:
return all_parameters
else:
if num_grids <= 0:
warnings.warn(
f"get_search_space_grids(num_grids={num_grids}) called with a non-positive value for lale_num_grids"
)
return []
if num_grids >= 1:
samples = math.ceil(num_grids)
if samples >= len(all_parameters):
return all_parameters
else:
warnings.warn(
f"get_search_space_grids(num_grids={num_grids}) sampling {math.ceil(num_grids)}/{len(all_parameters)}"
)
return random.sample(all_parameters, math.ceil(num_grids))
else:
samples = round(len(all_parameters) * num_grids)
warnings.warn(
f"get_search_space_grids(num_grids={num_grids}) sampling {samples}/{len(all_parameters)}"
)
return random.sample(all_parameters, samples)
def search_space_to_grids(hp: SearchSpace) -> List[SearchSpaceGrid]:
return SearchSpaceToGridVisitor.run(hp)
def op_to_search_space_grids(
op: PlannedOperator,
pgo: Optional[PGO] = None,
data_schema: Optional[Dict[str, Any]] = None,
) -> List[SearchSpaceGrid]:
search_space = op_to_search_space(op, pgo=pgo, data_schema=data_schema)
grids = search_space_to_grids(search_space)
return grids
# lets handle the general case
SearchSpaceGridInternalType = Union[List[SearchSpaceGrid], SearchSpacePrimitive]
class SearchSpaceToGridVisitor(Visitor):
@classmethod
def run(cls, space: SearchSpace) -> List[SearchSpaceGrid]:
visitor = cls()
grids: SearchSpaceGridInternalType = accept(space, visitor)
fixed_grids = cls.fixupDegenerateSearchSpaces(grids)
return fixed_grids
@classmethod
def fixupDegenerateSearchSpaces(
cls, space: SearchSpaceGridInternalType
) -> List[SearchSpaceGrid]:
if isinstance(space, SearchSpacePrimitive):
return [{DUMMY_SEARCH_SPACE_GRID_PARAM_NAME: space}]
else:
return space
def visitSearchSpacePrimitive(
self, space: SearchSpacePrimitive
) -> SearchSpacePrimitive:
return space
visitSearchSpaceEnum = visitSearchSpacePrimitive
visitSearchSpaceConstant = visitSearchSpaceEnum
visitSearchSpaceBool = visitSearchSpaceEnum
visitSearchSpaceNumber = visitSearchSpacePrimitive
def _searchSpaceList(
self, space: SearchSpaceArray, *, size: int
) -> List[SearchSpaceGrid]:
sub_spaces = space.items(max_elts=size)
param_grids: List[List[SearchSpaceGrid]] = [
nest_all_HPparams(
str(index), self.fixupDegenerateSearchSpaces(accept(sub, self))
)
for index, sub in enumerate(sub_spaces)
]
param_grids_product: Iterable[Iterable[SearchSpaceGrid]] = itertools.product(
*param_grids
)
chained_grids: List[SearchSpaceGrid] = [
dict(
ChainMap(
*gridline,
)
)
for gridline in param_grids_product
]
if space.is_tuple:
st_val = structure_type_tuple
else:
st_val = structure_type_list
discriminated_grids: List[SearchSpaceGrid] = [
{**d, structure_type_name: SearchSpaceConstant(st_val)}
for d in chained_grids
]
return discriminated_grids
def visitSearchSpaceArray(self, space: SearchSpaceArray) -> List[SearchSpaceGrid]:
if space.minimum == space.maximum:
return self._searchSpaceList(space, size=space.minimum)
else:
ret: List[SearchSpaceGrid] = []
for i in range(space.minimum, space.maximum + 1):
ret.extend(self._searchSpaceList(space, size=i))
return ret
def visitSearchSpaceObject(self, space: SearchSpaceObject) -> List[SearchSpaceGrid]:
keys = space.keys
keys_len = len(keys)
final_choices: List[SearchSpaceGrid] = []
for c in space.choices:
assert keys_len == len(c)
kvs_complex: List[List[SearchSpaceGrid]] = []
kvs_simple: SearchSpaceGrid = {}
for k, v in zip(keys, c):
vspace: Union[List[SearchSpaceGrid], SearchSpacePrimitive] = accept(
v, self
)
if isinstance(vspace, SearchSpacePrimitive):
kvs_simple[k] = vspace
else:
nested_vspace: List[SearchSpaceGrid] = nest_all_HPparams(k, vspace)
if nested_vspace:
kvs_complex.append(nested_vspace)
nested_space_choices: Iterable[
Iterable[SearchSpaceGrid]
] = itertools.product(*kvs_complex)
nested_space_choices_lists: List[List[SearchSpaceGrid]] = [
list(x) for x in nested_space_choices
]
nested_space_choices_filtered: List[List[SearchSpaceGrid]] = [
ll for ll in nested_space_choices_lists if ll
]
if nested_space_choices_filtered:
chained_grids: Iterable[SearchSpaceGrid] = [
dict(ChainMap(*nested_choice, kvs_simple))
for nested_choice in nested_space_choices_filtered
]
final_choices.extend(chained_grids)
else:
final_choices.append(kvs_simple)
return final_choices
def visitSearchSpaceSum(self, op: SearchSpaceSum) -> SearchSpaceGridInternalType:
sub_spaces: List[SearchSpace] = op.sub_spaces
sub_grids: Iterable[SearchSpaceGridInternalType] = [
accept(cur_space, self) for cur_space in sub_spaces
]
if len(sub_spaces) == 1:
return list(sub_grids)[0]
else:
fixed_grids: Iterable[List[SearchSpaceGrid]] = (
SearchSpaceToGridVisitor.fixupDegenerateSearchSpaces(grid)
for grid in sub_grids
)
final_grids: List[SearchSpaceGrid] = []
for i, grids in enumerate(fixed_grids):
if not grids:
grids = [{}]
else:
# we need to add in this nesting
# in case a higher order operator directly contains
# another
grids = nest_choice_all_HPparams(grids)
discriminated_grids: List[SearchSpaceGrid] = [
{**d, discriminant_name: SearchSpaceConstant(i)} for d in grids
]
final_grids.extend(discriminated_grids)
return final_grids
def visitSearchSpaceProduct(
self, op: SearchSpaceProduct
) -> SearchSpaceGridInternalType:
sub_spaces = op.get_indexed_spaces()
param_grids: List[List[SearchSpaceGrid]] = [
nest_all_HPparams(
make_indexed_name(name, index),
self.fixupDegenerateSearchSpaces(accept(space, self)),
)
for name, index, space in sub_spaces
]
param_grids_product: Iterable[Iterable[SearchSpaceGrid]] = itertools.product(
*param_grids
)
chained_grids: List[SearchSpaceGrid] = [
dict(ChainMap(*gridline)) for gridline in param_grids_product
]
return chained_grids
def visitSearchSpaceDict(self, op: SearchSpaceDict) -> SearchSpaceGridInternalType:
sub_spaces = op.space_dict.items()
param_grids: List[List[SearchSpaceGrid]] = [
nest_all_HPparams(
name,
self.fixupDegenerateSearchSpaces(accept(space, self)),
)
for name, space in sub_spaces
]
param_grids_product: Iterable[Iterable[SearchSpaceGrid]] = itertools.product(
*param_grids
)
chained_grids: List[SearchSpaceGrid] = [
dict(ChainMap(*gridline)) for gridline in param_grids_product
]
discriminated_grids: List[SearchSpaceGrid] = [
{**d, structure_type_name: SearchSpaceConstant(structure_type_dict)}
for d in chained_grids
]
return discriminated_grids
def visitSearchSpaceOperator(
self, op: SearchSpaceOperator
) -> SearchSpaceGridInternalType:
return accept(op.sub_space, self)
def visitSearchSpaceEmpty(self, op: SearchSpaceEmpty):
raise SearchSpaceError(
None, "Grid based backends can't compile an empty (sub-) search space"
)
| 11,850 | 35.021277 | 141 |
py
|
lale
|
lale-master/lale/search/lale_smac.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional
from ConfigSpace.conditions import EqualsCondition
from ConfigSpace.hyperparameters import (
CategoricalHyperparameter,
Hyperparameter,
UniformFloatHyperparameter,
UniformIntegerHyperparameter,
)
from smac.configspace import ConfigurationSpace
from lale.search.PGO import PGO
from lale.search.search_space import (
SearchSpace,
SearchSpaceArray,
SearchSpaceEnum,
SearchSpaceNumber,
should_print_search_space,
)
from lale.search.search_space_grid import SearchSpaceGrid, get_search_space_grids
if TYPE_CHECKING:
import lale.operators as Ops
def lale_op_smac_tae(op: "Ops.PlannedOperator", f_min):
# TODO: we can probably do this in a different way, but get_smac_configuration_space
# we already have these sklearn compatibility wrappers it is easier for now to use them
op_compat = op
def f(cfg):
from sklearn.base import clone
wrapped_op = clone(op_compat)
cfg2 = smac_fixup_params(cfg)
trainable = wrapped_op.set_params(**cfg2)
return f_min(trainable)
return f
def lale_trainable_op_from_config(
op: "Ops.PlannedOperator", cfg
) -> "Ops.TrainableOperator":
from sklearn.base import clone
op_compat = op
wrapped_op = clone(op_compat)
cfg2 = smac_fixup_params(cfg)
trainable = wrapped_op.with_params(**cfg2)
return trainable
def get_smac_space(
op: "Ops.PlannedOperator",
lale_num_grids: Optional[float] = None,
lale_pgo: Optional[PGO] = None,
data_schema: Optional[Dict[str, Any]] = None,
) -> ConfigurationSpace:
"""Top level function: given a lale operator, returns a ConfigurationSpace for use with SMAC.
Parameters
----------
op : The lale PlannedOperator
lale_num_grids: integer or float, optional
if set to an integer => 1, it will determine how many parameter grids will be returned (at most)
if set to an float between 0 and 1, it will determine what fraction should be returned
note that setting it to 1 is treated as in integer. To return all results, use None
lale_pgo: Optional profile guided optimization data that guides discretization
data_schema: Optional schema for the input data. which is used for hyperparameter schema data constraints
"""
hp_grids = get_search_space_grids(
op, num_grids=lale_num_grids, pgo=lale_pgo, data_schema=data_schema
)
cs = hp_grids_to_smac_cs(hp_grids)
if should_print_search_space("true", "all", "backend", "smac"):
name = op.name()
if not name:
name = "an operator"
print(f"SMAC configuration for {name}:\n{str(cs)}")
return cs
def smac_fixup_params(cfg):
def strip_key(k: str) -> str:
return k.rsplit("_", 1)[0]
def transform_value(v):
if v == "_lale_none":
return None
else:
return v
ret = {
strip_key(k): transform_value(v)
for (k, v) in cfg.get_dictionary().items()
if k != "disjunct_discriminant"
}
return ret
# When sampling from distributions, this is the default number of samples to take.
# Users can override this by passing in num_samples to the appropriate function
SAMPLES_PER_DISTRIBUTION = 2
# We can first convert from our search space IR
# to a more limited grid structure
# This can than be converted to the format required for SMAC
def SearchSpaceNumberToSMAC(key: str, hp: SearchSpaceNumber) -> Hyperparameter:
"""Returns either a list of values intended to be sampled uniformly or a frozen scipy.stats distribution"""
dist = "uniform"
if hp.distribution:
dist = hp.distribution
if hp.maximum is None:
raise ValueError(
f"maximum not specified for a number with distribution {dist} for {key}"
)
space_max = hp.getInclusiveMax()
if hp.minimum is None:
raise ValueError(
f"minimum not specified for a number with distribution {dist} for {key}"
)
space_min = hp.getInclusiveMin()
log: bool
if dist in ["uniform", "integer"]:
log = False
elif dist == "loguniform":
log = True
else:
raise ValueError(f"unknown/unsupported distribution {dist} for {key}")
if hp.discrete:
return UniformIntegerHyperparameter(key, space_min, space_max, log=log)
else:
return UniformFloatHyperparameter(key, space_min, space_max, log=log)
class FakeNone:
pass
MyFakeNone = FakeNone()
def HPValuetoSMAC(key: str, hp: SearchSpace) -> Hyperparameter:
def val_to_str(v):
if v is None:
return "_lale_none"
else:
return v
if isinstance(hp, SearchSpaceEnum):
return CategoricalHyperparameter(key, [val_to_str(x) for x in hp.vals])
elif isinstance(hp, SearchSpaceNumber):
return SearchSpaceNumberToSMAC(key, hp)
elif isinstance(hp, SearchSpaceArray):
raise ValueError(
f"Arrays are not yet supported by the SMAC backend (key: {key})"
)
else:
raise ValueError(
f"Not yet supported hp description ({type(hp)}) (key: {key}) in the GridSearchCV backend"
)
def SearchSpaceGridtoSMAC(hp: SearchSpaceGrid, disc: int) -> Iterable[Hyperparameter]:
return (HPValuetoSMAC(f"{k}_{disc}", v) for k, v in hp.items())
disc_str = "disjunct_discriminant"
def addSearchSpaceGrid(
hp: SearchSpaceGrid, disc: int, parent_disc: Hyperparameter, cs: ConfigurationSpace
) -> None:
smac = SearchSpaceGridtoSMAC(hp, disc)
for hyp in smac:
cs.add_hyperparameter(hyp)
cs.add_condition(EqualsCondition(child=hyp, parent=parent_disc, value=disc))
def addSearchSpaceGrids(grids: List[SearchSpaceGrid], cs: ConfigurationSpace) -> None:
parent_disc = CategoricalHyperparameter(disc_str, range(len(grids)))
cs.add_hyperparameter(parent_disc)
for i, g in enumerate(grids):
addSearchSpaceGrid(g, i, parent_disc, cs)
def hp_grids_to_smac_cs(grids: List[SearchSpaceGrid]) -> ConfigurationSpace:
cs: ConfigurationSpace = ConfigurationSpace()
addSearchSpaceGrids(grids, cs)
return cs
| 6,785 | 30.562791 | 111 |
py
|
lale
|
lale-master/lale/search/op2hp.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING, Any, Dict, Optional
from lale.search.lale_hyperopt import search_space_to_hp_expr, search_space_to_hp_str
from lale.search.PGO import PGO
from lale.search.schema2search_space import op_to_search_space
from lale.search.search_space import should_print_search_space
if TYPE_CHECKING:
from lale.operators import PlannedOperator
def hyperopt_search_space(
op: "PlannedOperator",
schema=None,
pgo: Optional[PGO] = None,
data_schema: Optional[Dict[str, Any]] = None,
):
search_space = op_to_search_space(op, pgo=pgo, data_schema=data_schema)
if search_space:
name = op.name()
if should_print_search_space("true", "all", "backend", "hyperopt"):
print(
f"hyperopt search space for {name}: {search_space_to_hp_str(search_space, name)}"
)
return search_space_to_hp_expr(search_space, name)
else:
return None
| 1,517 | 34.302326 | 97 |
py
|
lale
|
lale-master/lale/search/__init__.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 575 | 40.142857 | 74 |
py
|
lale
|
lale-master/lale/search/search_space.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import os
from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union
import numpy
from lale.search.PGO import FrequencyDistribution
from lale.util import VisitorPathError
from lale.util.VisitorMeta import AbstractVisitorMeta
PGO_input_type = Union[FrequencyDistribution, Iterable[Tuple[Any, int]], None]
class SearchSpaceError(VisitorPathError):
def __init__(self, sub_path: Any, message: Optional[str] = None):
super().__init__([], message)
self.sub_path = sub_path
def path_string(self) -> str:
return SearchSpace.focused_path_string(list(self.path))
def get_message_str(self) -> str:
msg = super().get_message_str()
if self.sub_path is None:
return msg
else:
return f"for path {self.sub_path}: {msg}"
class SearchSpace(metaclass=AbstractVisitorMeta):
def __init__(self, default: Optional[Any] = None):
self._default = default
_default: Optional[Any]
def default(self) -> Optional[Any]:
"""Return an optional default value, if None.
if not None, the default value should be in the
search space
"""
return self._default
@classmethod
def focused_path_string(cls, path: List["SearchSpace"]) -> str:
if path:
return path[0].str_with_focus(path, default="")
else:
return ""
def str_with_focus(
self, path: Optional[List["SearchSpace"]] = None, default: Any = None
) -> Union[str, Any]:
"""Given a path list, returns a string for the focused path.
If the path is None, returns everything, without focus.
If the path does not start with self, returns None
"""
if path is None:
return self._focused_str(path=None)
elif path and path[0] is self:
return self._focused_str(path=path[1:])
else:
return default
@abc.abstractmethod
def _focused_str(self, path: Optional[List["SearchSpace"]] = None) -> str:
"""Given the continuation path list, returns a string for the focused path.
If the path is None, returns everything, without focus.
Otherwise, the path is for children
"""
pass
def __str__(self) -> str:
return self.str_with_focus(path=None, default="")
class SearchSpaceEmpty(SearchSpace):
def __init__(self):
super().__init__()
def _focused_str(self, path: Optional[List[SearchSpace]] = None) -> str:
return "***EMPTY***"
class SearchSpacePrimitive(SearchSpace):
def __init__(self, default: Optional[Any] = None):
super().__init__(default=default)
class SearchSpaceEnum(SearchSpacePrimitive):
pgo: Optional[FrequencyDistribution]
vals: List[Any]
def __init__(
self,
vals: Iterable[Any],
pgo: PGO_input_type = None,
default: Optional[Any] = None,
):
super().__init__(default=default)
self.vals = sorted(vals, key=str)
if pgo is None or isinstance(pgo, FrequencyDistribution):
self.pgo = pgo
else:
self.pgo = FrequencyDistribution.asEnumValues(pgo, self.vals)
def _focused_str(self, path: Optional[List[SearchSpace]] = None) -> str:
return "<" + ",".join((str(x) for x in self.vals)) + ">"
class SearchSpaceConstant(SearchSpaceEnum):
def __init__(self, v, pgo: PGO_input_type = None):
super().__init__([v], pgo=pgo, default=v)
def _focused_str(self, path: Optional[List[SearchSpace]] = None) -> str:
return str(self.vals[0])
class SearchSpaceBool(SearchSpaceEnum):
def __init__(self, pgo: PGO_input_type = None, default: Optional[Any] = None):
super().__init__([True, False], pgo=pgo, default=default)
class SearchSpaceNumber(SearchSpacePrimitive):
minimum: Optional[float]
exclusiveMinumum: bool
maximum: Optional[float]
exclusiveMaximum: bool
discrete: bool
distribution: str
pgo: Optional[FrequencyDistribution]
def __init__(
self,
minimum=None,
exclusiveMinimum: bool = False,
maximum=None,
exclusiveMaximum: bool = False,
discrete: bool = False,
distribution="uniform",
pgo: PGO_input_type = None,
default: Optional[Any] = None,
) -> None:
super().__init__(default=default)
self.minimum = minimum
self.exclusiveMinimum = exclusiveMinimum
self.maximum = maximum
self.exclusiveMaximum = exclusiveMaximum
self.distribution = distribution
self.discrete = discrete
if pgo is None or isinstance(pgo, FrequencyDistribution):
self.pgo = pgo
else:
if discrete:
self.pgo = FrequencyDistribution.asIntegerValues(
pgo,
inclusive_min=self.getInclusiveMin(),
inclusive_max=self.getInclusiveMax(),
)
else:
self.pgo = FrequencyDistribution.asFloatValues(
pgo,
inclusive_min=self.getInclusiveMin(),
inclusive_max=self.getInclusiveMax(),
)
def getInclusiveMax(self) -> Optional[float]:
"""Return the maximum as an inclusive maximum (exclusive maxima are adjusted accordingly)"""
schema_max = self.maximum
if schema_max is None:
return None
if self.exclusiveMaximum:
if self.discrete:
schema_max = schema_max - 1
else:
schema_max = numpy.nextafter(schema_max, float("-inf"))
return schema_max
def getInclusiveMin(self) -> Optional[float]:
"""Return the maximum as an inclusive minimum (exclusive minima are adjusted accordingly)"""
schema_min = self.minimum
if schema_min is None:
return None
if self.exclusiveMinimum:
if self.discrete:
schema_min = schema_min + 1
else:
schema_min = numpy.nextafter(schema_min, float("+inf"))
return schema_min
def _focused_str(self, path: Optional[List[SearchSpace]] = None) -> str:
ret: str = ""
if self.exclusiveMinimum or self.minimum is None:
ret += "("
else:
ret += "["
if self.discrete:
ret += "\u2308"
if self.minimum is None:
ret += "\u221E"
else:
ret += str(self.minimum)
if (
not self.distribution
or self.distribution == "uniform"
or self.distribution == "integer"
):
ret += ","
elif self.distribution == "loguniform":
ret += ",<log>,"
else:
ret += ",<" + self.distribution + ">,"
if self.maximum is None:
ret += "\u221E"
else:
ret += str(self.maximum)
if self.discrete:
ret += "\u2309"
if self.exclusiveMaximum or self.maximum is None:
ret += ")"
else:
ret += "]"
return ret
class SearchSpaceArray(SearchSpace):
def __init__(
self,
prefix: Optional[List[SearchSpace]],
minimum: int = 0,
*,
maximum: int,
additional: Optional[SearchSpace] = None,
is_tuple=False,
) -> None:
super().__init__()
self.minimum = minimum
self.maximum = maximum
self.prefix = prefix
self.additional = additional
self.is_tuple = is_tuple
def _focused_str(self, path: Optional[List[SearchSpace]] = None) -> str:
ret: str = ""
ret += f"Array<{self.minimum}, {self.maximum}>"
if self.is_tuple:
ret += "("
else:
ret += "["
if self.prefix is not None:
ret += ",".join(
p.str_with_focus(path=path, default="") for p in self.prefix
)
if self.additional is not None:
ret += ","
if self.additional is not None:
ret += "...,"
ret += self.additional.str_with_focus(path=path, default="")
if self.is_tuple:
ret += ")"
else:
ret += "]"
return ret
def items(self, max_elts: Optional[int] = None) -> Iterable[SearchSpace]:
prefix_len: int
if self.prefix is not None:
prefix_len = len(self.prefix)
else:
prefix_len = 0
num_elts = self.maximum
if max_elts is not None:
num_elts = min(num_elts, max_elts)
for i in range(num_elts):
if self.prefix is not None and i < prefix_len:
yield self.prefix[i]
else:
if self.additional is not None:
yield self.additional
class SearchSpaceDict(SearchSpace):
def __init__(self, d: Dict[str, SearchSpace]) -> None:
super().__init__()
self.space_dict = d
def _focused_str(self, path: Optional[List[SearchSpace]] = None) -> str:
ret: str = ""
ret += "Dict{"
dict_strs: List[str] = []
for k, v in self.space_dict.items():
dict_strs.append(k + "->" + v.str_with_focus(path=path, default=None))
ret += ",".join(dict_strs) + "}"
return ret
class SearchSpaceObject(SearchSpace):
def __init__(self, longName: str, keys: List[str], choices: Iterable[Any]) -> None:
super().__init__()
self.longName = longName
self.keys = keys
self.choices = choices
def _focused_str(self, path: Optional[List[SearchSpace]] = None) -> str:
ret: str = ""
ret += f"Object<{self.longName}>["
choice_strs: List[str] = []
for c in self.choices:
opts: List[str] = []
for k, v in zip(self.keys, c):
vv = v.str_with_focus(path=path, default=None)
if vv is not None:
opts.append(k + "->" + vv)
if opts:
ll = ";".join(opts)
choice_strs.append("{" + ll + "}")
else:
choice_strs.append("")
ret += ",".join(choice_strs) + "]"
return ret
class SearchSpaceSum(SearchSpace):
sub_spaces: List[SearchSpace]
def __init__(self, sub_spaces: List[SearchSpace], default: Optional[Any] = None):
super().__init__(default=default)
self.sub_spaces = sub_spaces
def _focused_str(self, path: Optional[List[SearchSpace]] = None) -> str:
ret: str = "\u2211["
ret += "|".join(
p.str_with_focus(path=path, default="") for p in self.sub_spaces
)
ret += "]"
return ret
class SearchSpaceOperator(SearchSpace):
sub_space: SearchSpace
def __init__(self, sub_space: SearchSpace, default: Optional[Any] = None):
super().__init__(default=default)
self.sub_space = sub_space
def _focused_str(self, path: Optional[List[SearchSpace]] = None) -> str:
ret: str = "\u00AB"
ret += self.sub_space.str_with_focus(path=path, default="")
ret += "\u00BB"
return ret
class SearchSpaceProduct(SearchSpace):
sub_spaces: List[Tuple[str, SearchSpace]]
def __init__(
self, sub_spaces: List[Tuple[str, SearchSpace]], default: Optional[Any] = None
):
super().__init__(default=default)
self.sub_spaces = sub_spaces
def get_indexed_spaces(self) -> Iterable[Tuple[str, int, SearchSpace]]:
indices: Dict[str, int] = {}
def make_indexed(name: str) -> Tuple[str, int]:
idx = 0
if name in indices:
idx = indices[name] + 1
indices[name] = idx
else:
indices[name] = 0
return (name, idx)
def enhance_tuple(
x: Tuple[str, int], space: SearchSpace
) -> Tuple[str, int, SearchSpace]:
return (x[0], x[1], space)
return [
enhance_tuple(make_indexed(name), space) for name, space in self.sub_spaces
]
def _focused_str(self, path: Optional[List[SearchSpace]] = None) -> str:
ret: str = "\u220F{"
vv: Optional[str]
parts: List[str] = []
for k, v in self.sub_spaces:
vv = v.str_with_focus(path=path, default=None)
if vv is not None:
parts.append(k + "->" + vv)
ret = ";".join(parts)
ret += "}"
return ret
# for debugging
_print_search_space_env_options: Optional[Set[str]] = None
def _get_print_search_space_options() -> Set[str]:
global _print_search_space_env_options # pylint:disable=global-statement
options: Set[str]
if _print_search_space_env_options is None:
debug = os.environ.get("LALE_PRINT_SEARCH_SPACE", None)
if debug is None:
options = set()
else:
options_raw = debug.split(",")
options = set(s.strip().lower() for s in options_raw)
_print_search_space_env_options = options
else:
options = _print_search_space_env_options
return options
def should_print_search_space(*s: str):
options: Set[str] = _get_print_search_space_options()
for x in s:
if x in options:
return True
return False
| 13,992 | 30.164811 | 100 |
py
|
lale
|
lale-master/lale/search/lale_hyperopt.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import re
from typing import Dict, Iterable, List, Optional
from hyperopt import hp
from hyperopt.pyll import scope
from lale import helpers
from lale.helpers import make_indexed_name
from lale.operators import Operator
from lale.search.PGO import FrequencyDistribution
from lale.search.search_space import (
SearchSpace,
SearchSpaceArray,
SearchSpaceDict,
SearchSpaceEmpty,
SearchSpaceEnum,
SearchSpaceError,
SearchSpaceNumber,
SearchSpaceObject,
SearchSpaceOperator,
SearchSpaceProduct,
SearchSpaceSum,
)
from lale.util.Visitor import Visitor, accept
def search_space_to_hp_expr(space: SearchSpace, name: str):
return SearchSpaceHPExprVisitor.run(space, name)
def search_space_to_hp_str(space: SearchSpace, name: str) -> str:
return SearchSpaceHPStrVisitor.run(space, name)
def search_space_to_str_for_comparison(space: SearchSpace, name: str) -> str:
return SearchSpaceHPStrVisitor.run(space, name, counter=None, useCounter=False)
def _mk_label(label, counter, useCounter=True):
if counter is None or not useCounter:
return label
else:
return f"{label}{counter}"
@scope.define
def pgo_sample(pgo, sample):
return pgo[sample]
@scope.define
def make_nested_hyperopt(space):
return helpers.make_nested_hyperopt_space(space)
class SearchSpaceHPExprVisitor(Visitor):
names: Dict[str, int]
@classmethod
def run(cls, space: SearchSpace, name: str):
visitor = cls(name)
return accept(space, visitor, name)
def __init__(self, name: str):
super().__init__()
self.names = {}
def get_unique_name(self, name: str) -> str:
if name in self.names:
counter = self.names[name] + 1
self.names[name] = counter
return f"{name}${str(counter)}"
else:
self.names[name] = 0
return name
def mk_label(self, label, counter, useCounter=True):
return self.get_unique_name(_mk_label(label, counter, useCounter=useCounter))
def visitSearchSpaceEnum(self, space: SearchSpaceEnum, path: str, counter=None):
def as_hp_vals(v):
# Lists are not "safe" to pass to hyperopt without wrapping
if isinstance(v, (list, tuple, Operator)):
return helpers.val_wrapper(v)
else:
return v
if len(space.vals) == 1:
return as_hp_vals(space.vals[0])
else:
return hp.choice(
self.mk_label(path, counter), [as_hp_vals(v) for v in space.vals]
)
visitSearchSpaceConstant = visitSearchSpaceEnum
visitSearchSpaceBool = visitSearchSpaceEnum
def visitSearchSpaceNumber(self, space: SearchSpaceNumber, path: str, counter=None):
label = self.mk_label(path, counter)
if space.pgo is not None:
return scope.pgo_sample(
space.pgo, hp.quniform(label, 0, len(space.pgo) - 1, 1)
)
dist = "uniform"
if space.distribution:
dist = space.distribution
if space.maximum is None:
raise SearchSpaceError(
path, f"maximum not specified for a number with distribution {dist}"
)
space_max = space.getInclusiveMax()
# if the maximum is not None, the inclusive maximum should not be none
assert space_max is not None
# These distributions need only a maximum
if dist == "integer":
if not space.discrete:
raise SearchSpaceError(
path,
"integer distribution specified for a non discrete numeric type",
)
return hp.randint(label, space_max)
if space.minimum is None:
raise SearchSpaceError(
path, f"minimum not specified for a number with distribution {dist}"
)
space_min = space.getInclusiveMin()
# if the minimum is not None, the inclusive minimum should not be none
assert space_min is not None
if dist == "uniform":
if space.discrete:
return scope.int(hp.quniform(label, space_min, space_max, 1))
else:
return hp.uniform(label, space_min, space_max)
elif dist == "loguniform":
# for log distributions, hyperopt requires that we provide the log of the min/max
if space_min <= 0:
raise SearchSpaceError(
path,
f"minimum of 0 specified with a {dist} distribution. This is not allowed; please set it (possibly using minimumForOptimizer) to be positive",
)
if space_min > 0:
space_min = math.log(space_min)
if space_max > 0:
space_max = math.log(space_max)
if space.discrete:
return scope.int(hp.qloguniform(label, space_min, space_max, 1))
else:
return hp.loguniform(label, space_min, space_max)
else:
raise SearchSpaceError(path, f"Unknown distribution type: {dist}")
def array_single_expr_(self, space: SearchSpaceArray, path: str, num):
p = _mk_label(path, num) + "_"
items: Iterable[SearchSpace] = space.items()
ret = [accept(sub, self, p, counter=x) for x, sub in enumerate(items)]
return tuple(ret) if space.is_tuple else ret
def visitSearchSpaceArray(self, space: SearchSpaceArray, path: str, counter=None):
assert space.maximum >= space.minimum
p = _mk_label(path, counter)
cp = p + "_"
if space.minimum == space.maximum:
expr = self.array_single_expr_(space, cp, space.minimum)
return expr
else:
exprs = [
self.array_single_expr_(space, cp, x)
for x in range(space.minimum, space.maximum + 1)
]
res = hp.choice(p, exprs)
return res
def visitSearchSpaceObject(self, space: SearchSpaceObject, path: str, counter=None):
search_space = {}
any_path = self.get_unique_name(_mk_label(path, counter) + "_" + "combos")
search_space["name"] = space.longName
child_counter = None
def asexpr(key, e):
nonlocal child_counter
if e is None:
return None
else:
ee = accept(e, self, path + "_" + key, counter=child_counter)
if child_counter is None:
child_counter = 1
else:
child_counter = child_counter + 1
return ee
def choice_as_tuple_expr(c):
assert len(space.keys) == len(c)
ret = [asexpr(space.keys[ind], e) for ind, e in enumerate(c)]
return ret
choices = [choice_as_tuple_expr(c) for c in space.choices]
valid_hyperparam_combinations = hp.choice(any_path, choices)
i = 0
for k in space.keys:
search_space[k] = valid_hyperparam_combinations[i]
i = i + 1
return search_space
def visitSearchSpaceDict(self, sd: SearchSpaceDict, path: str, counter=None):
search_spaces = {
name: accept(space, self, path + "_" + name)
for name, space in sd.space_dict.items()
}
return search_spaces
def visitSearchSpaceProduct(
self, prod: SearchSpaceProduct, path: str, counter=None
):
search_spaces = [
accept(space, self, self.get_unique_name(make_indexed_name(name, index)))
for name, index, space in prod.get_indexed_spaces()
]
return search_spaces
def visitSearchSpaceSum(self, space_sum: SearchSpaceSum, path: str, counter=None):
if len(space_sum.sub_spaces) == 1:
return accept(space_sum.sub_spaces[0], self, "")
else:
unique_name: str = self.get_unique_name("choice")
search_spaces = hp.choice(
unique_name,
[
{str(i): accept(m, self, "")}
for i, m in enumerate(space_sum.sub_spaces)
],
)
return search_spaces
def visitSearchSpaceOperator(
self, op: SearchSpaceOperator, path: str, counter=None
):
return scope.make_nested_hyperopt(accept(op.sub_space, self, path))
def visitSearchSpaceEmpty(self, op: SearchSpaceEmpty, path: str, counter=None):
raise SearchSpaceError(
path, "The hyperopt backend can't compile an empty (sub-) search space"
)
class SearchSpaceHPStrVisitor(Visitor):
pgo_dict: Dict[str, FrequencyDistribution]
names: Dict[str, int]
pgo_header: Optional[str]
nested_header: Optional[str]
decls: str
@classmethod
def run(cls, space: SearchSpace, name: str, counter=None, useCounter=True):
visitor = cls(name)
ret: str = ""
body = accept(space, visitor, name, counter=counter, useCounter=useCounter)
if visitor.pgo_header is not None:
ret += visitor.pgo_header
if visitor.nested_header is not None:
ret += visitor.nested_header
if visitor.decls:
ret += visitor.decls + "\n"
ret += "return " + body
return ret
def get_unique_name(self, name: str) -> str:
if name in self.names:
counter = self.names[name] + 1
self.names[name] = counter
return f"{name}${str(counter)}"
else:
self.names[name] = 0
return name
def get_unique_variable_name(self, name: str) -> str:
if name in self.names:
counter = self.names[name] + 1
self.names[name] = counter
return f"{name}__{str(counter)}"
else:
self.names[name] = 0
return name
def mk_label(self, label, counter, useCounter=True):
return self.get_unique_name(_mk_label(label, counter, useCounter=useCounter))
def __init__(self, name: str):
super().__init__()
self.pgo_dict = {}
self.names = {}
self.pgo_header = None
self.nested_header = None
self.decls = ""
def visitSearchSpaceEnum(
self, space: SearchSpaceEnum, path: str, counter=None, useCounter=True
):
def val_as_str(v):
if v is None:
return "null"
elif isinstance(v, str):
return f"'{v}'"
else:
return str(v)
if len(space.vals) == 1:
return val_as_str(space.vals[0])
else:
vals_str = "[" + ", ".join([val_as_str(v) for v in space.vals]) + "]"
return (
f"hp.choice('{self.mk_label(path, counter, useCounter)}', {vals_str})"
)
visitSearchSpaceConstant = visitSearchSpaceEnum
visitSearchSpaceBool = visitSearchSpaceEnum
def visitSearchSpaceNumber(
self, space: SearchSpaceNumber, path: str, counter=None, useCounter=True
):
label = self.mk_label(path, counter, useCounter=useCounter)
if space.pgo is not None:
self.pgo_dict[label] = space.pgo
return f"scope.pgo_sample(pgo_{label}, hp.quniform('{label}', {0}, {len(space.pgo)-1}, 1))"
dist = "uniform"
if space.distribution:
dist = space.distribution
if space.maximum is None:
raise SearchSpaceError(
path, f"maximum not specified for a number with distribution {dist}"
)
space_max = space.getInclusiveMax()
# if the maximum is not None, the inclusive maximum should not be none
assert space_max is not None
# These distributions need only a maximum
if dist == "integer":
if not space.discrete:
raise SearchSpaceError(
path,
"integer distribution specified for a non discrete numeric type....",
)
return f"hp.randint('{label}', {space_max})"
if space.minimum is None:
raise SearchSpaceError(
path, f"minimum not specified for a number with distribution {dist}"
)
space_min = space.getInclusiveMin()
# if the minimum is not None, the inclusive minimum should not be none
assert space_min is not None
if dist == "uniform":
if space.discrete:
return f"hp.quniform('{label}', {space_min}, {space_max}, 1)"
else:
return f"hp.uniform('{label}', {space_min}, {space_max})"
elif dist == "loguniform":
# for log distributions, hyperopt requires that we provide the log of the min/max
if space_min <= 0:
raise SearchSpaceError(
path,
f"minimum of 0 specified with a {dist} distribution. This is not allowed; please set it (possibly using minimumForOptimizer) to be positive",
)
if space_min > 0:
space_min = math.log(space_min)
if space_max > 0:
space_max = math.log(space_max)
if space.discrete:
return f"hp.qloguniform('{label}', {space_min}, {space_max}, 1)"
else:
return f"hp.loguniform('{label}', {space_min}, {space_max})"
else:
raise SearchSpaceError(path, f"Unknown distribution type: {dist}")
def array_single_str_(
self, space: SearchSpaceArray, path: str, num, useCounter=True
) -> str:
p = _mk_label(path, num, useCounter=useCounter) + "_"
ret = "(" if space.is_tuple else "["
items: Iterable[SearchSpace] = space.items()
ret += ",".join(
(
accept(sub, self, p, counter=x, useCounter=useCounter)
for x, sub in enumerate(items)
)
)
ret += ")" if space.is_tuple else "]"
return ret
def visitSearchSpaceArray(
self, space: SearchSpaceArray, path: str, counter=None, useCounter=True
) -> str:
assert space.maximum >= space.minimum
p = _mk_label(path, counter, useCounter=useCounter)
cp = p + "_"
if space.minimum == space.maximum:
return self.array_single_str_(
space, cp, space.minimum, useCounter=useCounter
)
else:
res = "hp.choice(" + p + ", ["
res += ",".join(
(
self.array_single_str_(space, cp, x, useCounter=useCounter)
for x in range(space.minimum, space.maximum + 1)
)
)
res += "])"
return res
def visitSearchSpaceObject(
self, space: SearchSpaceObject, path: str, counter=None, useCounter=True
):
s_decls = []
space_name = self.get_unique_variable_name("search_space")
any_name = self.get_unique_variable_name("valid_hyperparam_combinations")
any_path = self.get_unique_name(
_mk_label(path, counter, useCounter=useCounter) + "_" + "combos"
)
s_decls.append(space_name + " = {}")
s_decls.append(f"{space_name}['name'] = {space.longName}")
child_counter = None
def cstr(key, x):
nonlocal child_counter
if x is None:
return "None"
else:
s = accept(
x, self, path + "_" + key, child_counter, useCounter=useCounter
)
if child_counter is None:
child_counter = 1
else:
child_counter = child_counter + 1
return s
def choice_as_tuple_str(c):
assert len(space.keys) == len(c)
ret = [cstr(space.keys[ind], e) for ind, e in enumerate(c)]
return ret
str_choices = (
"["
+ ",".join(
["(" + ",".join(choice_as_tuple_str(c)) + ")" for c in space.choices]
)
+ "]"
)
s_decls.append(f"{any_name} = hp.choice('{any_path}', {str_choices})")
i = 0
for k in space.keys:
s_decls.append(f"{space_name}['{k}'] = {any_name}[{i}]")
i = i + 1
if self.pgo_dict:
if not self.pgo_header:
self.pgo_header = """
@scope.define
def pgo_sample(pgo, sample):
return pgo[sample]
"""
# use this to sort the pgo_labels by the unique ind
# appended to the key.
# This puts them in the order they appear in the hyperopt
# expression, making it easier to read
def last_num(kv):
matches = re.search(r"(\d+)$", kv[0])
if matches is None:
return 0
else:
return int(matches.group(0))
pgo_decls: List[str] = []
for k, v in sorted(self.pgo_dict.items(), key=last_num):
fl = v.freq_dist.tolist()
pgo_decls.append(f"pgo_{k} = {fl}")
if self.decls:
self.decls = self.decls + "\n"
self.decls = self.decls + "\n".join(pgo_decls)
self.decls += "\n".join(s_decls) + "\n"
return space_name
def visitSearchSpaceDict(
self, sd: SearchSpaceDict, path: str, counter=None, useCounter=True
):
search_spaces = (
name + ":" + accept(space, self, path + "_" + name)
for name, space in sd.space_dict.items()
)
return "{" + ",".join(search_spaces) + "}"
def visitSearchSpaceProduct(
self, prod: SearchSpaceProduct, path: str, counter=None, useCounter=True
):
search_spaces = (
accept(space, self, self.get_unique_name(make_indexed_name(name, index)))
for name, index, space in prod.get_indexed_spaces()
)
return "[" + ",".join(search_spaces) + "]"
def visitSearchSpaceSum(
self, sum_space: SearchSpaceSum, path: str, counter=None, useCounter=True
):
unique_name: str = self.get_unique_name("choice")
sub_str: Iterable[str] = (
'"' + str(i) + '"' + " : " + '"' + accept(m, self, "") + '"'
for i, m in enumerate(sum_space.sub_spaces)
)
sub_spaces_str: str = "[" + ",".join(sub_str) + "]"
return f"hp.choice({unique_name}, {sub_spaces_str})"
def visitSearchSpaceOperator(
self, op: SearchSpaceOperator, path: str, counter=None, useCounter=True
):
if not self.nested_header:
self.nested_header = """
@scope.define
def make_nested_hyperopt(space):
from lale.helpers import make_nested_hyperopt_space
return make_nested_hyperopt_space(space)
"""
return f"scope.make_nested_hyperopt({accept(op.sub_space, self, path, counter=counter, useCounter=useCounter)})"
def visitSearchSpaceEmpty(
self, op: SearchSpaceEmpty, path: str, counter=None, useCounter=True
) -> str:
return "***EMPTY**"
| 19,805 | 34.054867 | 162 |
py
|
lale
|
lale-master/lale/lib/dataframe.py
|
# Copyright 2021 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Common interface to manipulate different type of dataframes supported in Lale.
"""
from typing import List, Union
import numpy as np
import pandas as pd
from lale.datasets.data_schemas import SparkDataFrameWithIndex
from lale.helpers import (
_is_pandas_df,
_is_pandas_series,
_is_spark_df,
_is_spark_df_without_index,
)
column_index = Union[str, int]
def get_columns(df) -> List[column_index]:
if _is_pandas_series(df):
return pd.Series([df.name])
if _is_pandas_df(df):
return df.columns
if _is_spark_df(df):
return pd.Series(df.columns_without_indexes)
if isinstance(df, np.ndarray):
# should have more asserts here
_, num_cols = df.shape
return list(range(num_cols))
if _is_spark_df_without_index(df):
return df.columns
assert False, type(df)
def select_col(df, col: column_index):
if isinstance(df, np.ndarray):
return df[:, col] # type: ignore
elif _is_pandas_df(df):
return df[col]
elif _is_spark_df(df):
res = df.select([col] + df.index_names)
return SparkDataFrameWithIndex(res, index_names=df.index_names)
else:
raise ValueError(f"Unsupported series type {type(df)}")
def count(df):
if isinstance(df, np.ndarray):
return df.size
if _is_pandas_df(df) or _is_pandas_series(df):
return len(df)
elif _is_spark_df(df):
return df.count()
elif _is_spark_df_without_index(df):
return df.count()
else:
return len(df)
def make_series_distinct(df):
if isinstance(df, np.ndarray):
return np.unique(df)
elif isinstance(df, pd.Series):
return df.unique()
elif _is_spark_df(df):
return df.drop_indexes().distinct()
else:
raise ValueError(f"Unsupported series type {type(df)}")
def make_series_concat(df1, df2):
if isinstance(df1, np.ndarray):
assert isinstance(df2, np.ndarray)
return np.concatenate((df1, df2))
elif isinstance(df1, pd.Series):
assert isinstance(df2, pd.Series)
return pd.concat([df1, df2])
elif _is_spark_df(df1):
assert _is_spark_df(df2)
return df1.union(df2)
else:
raise ValueError(f"Unsupported series type {type(df1)}")
| 2,863 | 27.929293 | 78 |
py
|
lale
|
lale-master/lale/lib/_common_schemas.py
|
# Copyright 2021 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict
JSON_TYPE = Dict[str, Any]
schema_estimator: JSON_TYPE = {
"description": "Planned Lale individual operator or pipeline.",
"anyOf": [
{"laleType": "operator"},
{
"enum": [None],
"description": "lale.lib.sklearn.LogisticRegression",
},
],
"default": None,
}
# schemas used by many optimizers
schema_scoring_item: JSON_TYPE = {
"description": "Scorer object, or known scorer named by string.",
"anyOf": [
{
"description": """Callable with signature ``scoring(estimator, X, y)`` as documented in `sklearn scoring`_.
The callable has to return a scalar value, such that a higher score is better.
This may be created from one of the `sklearn metrics`_ using `make_scorer`_.
Or it can be one of the scoring callables returned by the factory
functions in `lale.lib.aif360 metrics`_, for example,
``symmetric_disparate_impact(**fairness_info)``.
Or it can be a completely custom user-written Python callable.
.. _`sklearn scoring`: https://scikit-learn.org/stable/modules/model_evaluation.html#the-scoring-parameter-defining-model-evaluation-rules
.. _`make_scorer`: https://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html#sklearn.metrics.make_scorer.
.. _`sklearn metrics`: https://scikit-learn.org/stable/modules/classes.html#module-sklearn.metrics
.. _`lale.lib.aif360 metrics`: https://lale.readthedocs.io/en/latest/modules/lale.lib.aif360.html#metrics
""",
"laleType": "callable",
},
{
"description": "Known scorer for classification task.",
"enum": [
"accuracy",
"explained_variance",
"max_error",
"roc_auc",
"roc_auc_ovr",
"roc_auc_ovo",
"roc_auc_ovr_weighted",
"roc_auc_ovo_weighted",
"balanced_accuracy",
"average_precision",
"neg_log_loss",
"neg_brier_score",
],
},
{
"description": "Known scorer for regression task.",
"enum": [
"r2",
"neg_mean_squared_error",
"neg_mean_absolute_error",
"neg_root_mean_squared_error",
"neg_mean_squared_log_error",
"neg_median_absolute_error",
],
},
],
}
schema_scoring_single: JSON_TYPE = {
"description": "Scorer object, or known scorer named by string.",
"anyOf": [
{
"enum": [None],
"description": "When not specified, use `accuracy` for classification tasks and `r2` for regression.",
},
schema_scoring_item,
],
}
schema_scoring_list: JSON_TYPE = {
"description": "A list of Scorer objects, or known scorers named by string. The optimizer may take the order into account.",
"type": "array",
"items": schema_scoring_item,
}
schema_scoring: JSON_TYPE = {
"description": "Either a single or a list of (Scorer objects, or known scorers named by string).",
"anyOf": [schema_scoring_single, schema_scoring_list],
}
schema_best_score_single: JSON_TYPE = {
"description": """The best score for the specified scorer.
Given that higher scores are better, passing ``(best_score - score)``
as a loss to the minimizing optimizer will maximize the score.
By specifying best_score, the loss can be >=0, where 0 is the best loss.""",
"type": "number",
"default": 0.0,
}
schema_best_score: JSON_TYPE = {
"description": """The best score for the specified scorer.
Given that higher scores are better, passing ``(best_score - score)``
as a loss to the minimizing optimizer will maximize the score.
By specifying best_score, the loss can be >=0, where 0 is the best loss.""",
"default": 0.0,
"anyOf": [
schema_best_score_single,
{
"description": """The best score for each specified scorer.
If not enough are specified, the remainder are assumed to be the default.
Given that higher scores are better, passing ``(best_score - score)``
as a loss to the minimizing optimizer will maximize the score.
By specifying best_score, the loss can be >=0, where 0 is the best loss.""",
"type": "array",
"items": schema_best_score_single,
},
],
}
def check_scoring_best_score_constraint(scoring=None, best_score=0) -> None:
if isinstance(best_score, list):
if isinstance(scoring, list):
if len(scoring) < len(best_score):
raise ValueError(
f"Error: {len(best_score)} best scores were specified, but there are only {len(scoring)} scorers."
)
else:
raise ValueError(
f"Error: {len(best_score)} best scores were specified, but there is only one scorer."
)
schema_simple_cv: JSON_TYPE = {
"anyOf": [
{
"description": "Number of folds for cross-validation.",
"type": "integer",
"minimum": 2,
"default": 5,
"minimumForOptimizer": 3,
"maximumForOptimizer": 4,
"distribution": "uniform",
},
{
"enum": [None],
"description": "to use the default 5-fold cross validation",
"forOptimizer": False,
},
]
}
schema_cv: JSON_TYPE = {
"description": """Cross-validation as integer or as object that has a split function.
The fit method performs cross validation on the input dataset for per
trial, and uses the mean cross validation performance for optimization.
This behavior is also impacted by the handle_cv_failure flag.
""",
"anyOf": [
schema_simple_cv,
{
"laleType": "CrossvalGenerator",
"forOptimizer": False,
"description": "Object with split function: generator yielding (train, test) splits as arrays of indices. Can use any of the iterators from https://scikit-learn.org/stable/modules/cross_validation.html#cross-validation-iterators",
},
],
"default": 5,
}
schema_cv_1_1: JSON_TYPE = {
"description": "Determines the cross-validation splitting strategy used in cross_val_predict to train final_estimator.",
"anyOf": [
schema_simple_cv,
{
"enum": ["prefit"],
"description": '"prefit" to assume the estimators are prefit. In this case, the estimators will not be refitted.',
"forOptimizer": False,
},
{
"laleType": "CrossvalGenerator",
"forOptimizer": False,
"description": "Object with split function: generator yielding (train, test) splits as arrays of indices. Can use any of the iterators from https://scikit-learn.org/stable/modules/cross_validation.html#cross-validation-iterators",
},
],
"default": 5,
}
schema_max_opt_time: JSON_TYPE = {
"description": "Maximum amount of time in seconds for the optimization.",
"anyOf": [
{"type": "number", "minimum": 0.0},
{"description": "No runtime bound.", "enum": [None]},
],
"default": None,
}
| 7,845 | 35.663551 | 242 |
py
|
lale
|
lale-master/lale/lib/__init__.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 575 | 40.142857 | 74 |
py
|
lale
|
lale-master/lale/lib/rasl/_eval_pandas_df.py
|
# Copyright 2021-2022 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ast
import collections
import hashlib
from typing import Any
import numpy as np
import pandas as pd
from lale.expressions import AstExpr, Expr, _it_column
from lale.helpers import _ast_func_id
def eval_expr_pandas_df(X, expr: Expr) -> pd.Series:
return _eval_ast_expr_pandas_df(X, expr.expr)
def _eval_ast_expr_pandas_df(X, expr: AstExpr) -> pd.Series:
evaluator = _PandasEvaluator(X)
evaluator.visit(expr)
return evaluator.result
class _PandasEvaluator(ast.NodeVisitor):
def __init__(self, X):
self.result = None
self.df = X
def visit_Num(self, node: ast.Num):
self.result = node.n
def visit_Str(self, node: ast.Str):
self.result = node.s
def visit_Constant(self, node: ast.Constant):
self.result = node.value
def visit_Attribute(self, node: ast.Attribute):
column_name = _it_column(node)
self.result = self.df[column_name]
def visit_Subscript(self, node: ast.Subscript):
column_name = _it_column(node)
self.result = self.df[column_name]
def visit_BinOp(self, node: ast.BinOp):
self.visit(node.left)
v1 = self.result
self.visit(node.right)
v2 = self.result
if isinstance(node.op, ast.Add):
self.result = v1 + v2 # type: ignore
elif isinstance(node.op, ast.Sub):
self.result = v1 - v2 # type: ignore
elif isinstance(node.op, ast.Mult):
self.result = v1 * v2 # type: ignore
elif isinstance(node.op, ast.Div):
self.result = v1 / v2 # type: ignore
elif isinstance(node.op, ast.FloorDiv):
self.result = v1 // v2 # type: ignore
elif isinstance(node.op, ast.Mod):
self.result = v1 % v2 # type: ignore
elif isinstance(node.op, ast.Pow):
self.result = v1**v2 # type: ignore
elif isinstance(node.op, ast.BitAnd):
self.result = v1 & v2 # type: ignore
elif isinstance(node.op, ast.BitOr):
self.result = v1 | v2 # type: ignore
else:
raise ValueError(f"""Unimplemented operator {ast.dump(node.op)}""")
def visit_Compare(self, node: ast.Compare):
self.visit(node.left)
left = self.result
assert len(node.ops) == len(node.comparators)
if len(node.ops) != 1: # need chained comparison in lale.expressions.Expr
raise ValueError("Chained comparisons not supported yet.")
self.visit(node.comparators[0])
right = self.result
op = node.ops[0]
if isinstance(op, ast.Eq):
self.result = left.eq(right) # type: ignore
elif isinstance(op, ast.NotEq):
self.result = left.ne(right) # type: ignore
elif isinstance(op, ast.Lt):
self.result = left.lt(right) # type: ignore
elif isinstance(op, ast.LtE):
self.result = left.le(right) # type: ignore
elif isinstance(op, ast.Gt):
self.result = left.gt(right) # type: ignore
elif isinstance(op, ast.GtE):
self.result = left.ge(right) # type: ignore
else:
raise ValueError(f"Unimplemented operator {ast.dump(op)}")
def visit_Call(self, node: ast.Call):
function_name = _ast_func_id(node.func)
try:
map_func_to_be_called = globals()[function_name]
except KeyError as exc:
raise ValueError(f"""Unimplemented function {function_name}""") from exc
self.result = map_func_to_be_called(self.df, node)
def astype(df: Any, call: ast.Call):
dtype = ast.literal_eval(call.args[0])
column = _eval_ast_expr_pandas_df(df, call.args[1]) # type: ignore
return column.astype(dtype)
def ite(df: Any, call: ast.Call):
cond = _eval_ast_expr_pandas_df(df, call.args[0]) # type: ignore
assert isinstance(cond, pd.Series)
v1 = _eval_ast_expr_pandas_df(df, call.args[1]) # type: ignore
v2 = _eval_ast_expr_pandas_df(df, call.args[2]) # type: ignore
if not isinstance(v1, pd.Series):
if not isinstance(v2, pd.Series): # two scalars, can avoid broadcast
result = cond.map(lambda b: v1 if b else v2)
else: # pandas will implicitly broadcast v1
result = v2.mask(cond, v1)
else: # pandas will implicitly broadcast v2 if it is not a Series
result = v1.where(cond, v2)
return result
def hash(df: Any, call: ast.Call): # pylint:disable=redefined-builtin
hashing_method = ast.literal_eval(call.args[0])
column = _eval_ast_expr_pandas_df(df, call.args[1]) # type: ignore
def hash_fun(v):
hasher = hashlib.new(hashing_method)
hasher.update(bytes(str(v), "utf-8"))
return hasher.hexdigest()
return column.map(hash_fun)
def hash_mod(df: Any, call: ast.Call):
h_column = hash(df, call)
N = ast.literal_eval(call.args[2])
return h_column.map(lambda h: int(h, 16) % N)
def replace(df: Any, call: ast.Call):
column = _eval_ast_expr_pandas_df(df, call.args[0]) # type: ignore
try:
mapping_dict = ast.literal_eval(call.args[1].value) # type: ignore
except ValueError:
mapping_dict_ast = call.args[1].value # type: ignore
# ast.literal_eval fails for `nan` with ValueError, we handle the case when
# one of the keys is a `nan`. This is the case when using map with replace
# in missing value imputation.
mapping_dict = {}
for i, key in enumerate(mapping_dict_ast.keys):
if hasattr(key, "id") and key.id == "nan":
mapping_dict[np.nan] = ast.literal_eval(mapping_dict_ast.values[i])
else:
mapping_dict[
ast.literal_eval(mapping_dict_ast.keys[i])
] = ast.literal_eval(mapping_dict_ast.values[i])
handle_unknown = ast.literal_eval(call.args[2])
if handle_unknown == "use_encoded_value":
unknown_value = ast.literal_eval(call.args[3])
mapping2 = collections.defaultdict(lambda: unknown_value, mapping_dict)
new_column = column.map(mapping2) # type: ignore
else:
new_column = column.replace(mapping_dict)
return new_column
def identity(df: Any, call: ast.Call):
return _eval_ast_expr_pandas_df(df, call.args[0]) # type: ignore
def time_functions(df: Any, call, pandas_func: str):
fmt = None
column = _eval_ast_expr_pandas_df(df, call.args[0])
if len(call.args) > 1:
fmt = ast.literal_eval(call.args[1])
new_column = pd.to_datetime(column, format=fmt)
return getattr(getattr(new_column, "dt"), pandas_func)
def day_of_month(df: Any, call: ast.Call):
return time_functions(df, call, "day")
def day_of_week(df: Any, call: ast.Call):
return time_functions(df, call, "weekday")
def day_of_year(df: Any, call: ast.Call):
return time_functions(df, call, "dayofyear")
def hour(df: Any, call: ast.Call):
return time_functions(df, call, "hour")
def minute(df: Any, call: ast.Call):
return time_functions(df, call, "minute")
def month(df: Any, call: ast.Call):
return time_functions(df, call, "month")
| 7,741 | 34.351598 | 84 |
py
|
lale
|
lale-master/lale/lib/rasl/filter.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ast
import importlib
from typing import Any, Optional, Tuple
import lale.docstrings
import lale.operators
from lale.datasets.data_schemas import forward_metadata
from lale.helpers import (
_get_subscript_value,
_is_ast_attribute,
_is_ast_constant,
_is_ast_subs_or_attr,
_is_ast_subscript,
_is_pandas_df,
_is_spark_df,
)
try:
from pyspark.sql.functions import col
spark_installed = True
except ImportError:
spark_installed = False
class _FilterImpl:
def __init__(self, pred=None):
self.pred = pred
# @classmethod
# def validate_hyperparams(cls, pred=None, X=None, **hyperparams):
# for pred_element in pred:
# if not isinstance(pred_element.expr, ast.Compare):
# raise ValueError(
# (
# "Filter predicate '{}' not a comparison. All filter predicates should be comparisons."
# ).format(pred_element)
# )
# Parse the predicate element passed as input
def _get_filter_info(self, expr_to_parse, X) -> Tuple[str, Any, Optional[str]]:
col_list = X.columns
if isinstance(expr_to_parse, ast.Call):
op = expr_to_parse.func
# for now, we only support single argument predicates
if len(expr_to_parse.args) != 1:
raise ValueError(
"Filter predicate functions currently only support a single argument"
)
arg = expr_to_parse.args[0]
if _is_ast_subscript(arg):
lhs = _get_subscript_value(arg)
elif _is_ast_attribute(arg):
lhs = arg.attr # type: ignore
else:
raise ValueError(
"Filter predicate functions only supports subscript or dot notation for the argument. For example, it.col_name or it['col_name']"
)
if lhs not in col_list:
raise ValueError(
f"Cannot perform filter predicate operation as {lhs} not a column of input dataframe X."
)
return lhs, op, None
if _is_ast_subscript(expr_to_parse.left):
lhs = _get_subscript_value(expr_to_parse.left)
elif _is_ast_attribute(expr_to_parse.left):
lhs = expr_to_parse.left.attr
else:
raise ValueError(
"Filter predicate only supports subscript or dot notation for the left hand side. For example, it.col_name or it['col_name']"
)
if lhs not in col_list:
raise ValueError(
f"Cannot perform filter operation as {lhs} not a column of input dataframe X."
)
op = expr_to_parse.ops[0]
if _is_ast_subscript(expr_to_parse.comparators[0]):
rhs = _get_subscript_value(expr_to_parse.comparators[0])
elif _is_ast_attribute(expr_to_parse.comparators[0]):
rhs = expr_to_parse.comparators[0].attr
elif _is_ast_constant(expr_to_parse.comparators[0]):
rhs = expr_to_parse.comparators[0].value
else:
raise ValueError(
"Filter predicate only supports subscript or dot notation for the right hand side. For example, it.col_name or it['col_name'] or a constant value"
)
if not _is_ast_constant(expr_to_parse.comparators[0]) and rhs not in col_list:
raise ValueError(
f"Cannot perform filter operation as {rhs} not a column of input dataframe X."
)
return lhs, op, rhs
def transform(self, X):
filtered_df = X
def filter_fun(X):
if isinstance(op, ast.Name):
# currently only handles single argument predicates
functions_module = importlib.import_module("lale.lib.rasl.functions")
func = getattr(functions_module, "filter_" + op.id)
return func(X, lhs)
# Filtering spark dataframes
if _is_spark_df(X):
if isinstance(op, ast.Eq):
assert lhs is not None
assert rhs is not None
return (
X.filter(col(lhs) == col(rhs))
if _is_ast_subs_or_attr(expr_to_parse.comparators[0])
else X.filter(col(lhs) == rhs)
)
elif isinstance(op, ast.NotEq):
assert lhs is not None
assert rhs is not None
return (
X.filter(col(lhs) != col(rhs))
if _is_ast_subs_or_attr(expr_to_parse.comparators[0])
else X.filter(col(lhs) != rhs)
)
elif isinstance(op, ast.GtE):
assert lhs is not None
assert rhs is not None
return (
X.filter(col(lhs) >= col(rhs))
if _is_ast_subs_or_attr(expr_to_parse.comparators[0])
else X.filter(col(lhs) >= rhs)
)
elif isinstance(op, ast.Gt):
assert lhs is not None
assert rhs is not None
return (
X.filter(col(lhs) > col(rhs))
if _is_ast_subs_or_attr(expr_to_parse.comparators[0])
else X.filter(col(lhs) > rhs)
)
elif isinstance(op, ast.LtE):
assert lhs is not None
assert rhs is not None
return (
X.filter(col(lhs) <= col(rhs))
if _is_ast_subs_or_attr(expr_to_parse.comparators[0])
else X.filter(col(lhs) <= rhs)
)
elif isinstance(op, ast.Lt):
assert lhs is not None
assert rhs is not None
return (
X.filter(col(lhs) < col(rhs))
if _is_ast_subs_or_attr(expr_to_parse.comparators[0])
else X.filter(col(lhs) < rhs)
)
else:
raise ValueError(
f"{op} operator type found. Only ==, !=, >=, <=, >, < operators are supported"
)
# Filtering pandas dataframes
if _is_pandas_df(X):
assert lhs is not None
assert rhs is not None
if isinstance(op, ast.Eq):
return (
X[X[lhs] == X[rhs]]
if _is_ast_subs_or_attr(expr_to_parse.comparators[0])
else X[X[lhs] == rhs]
)
elif isinstance(op, ast.NotEq):
assert lhs is not None
assert rhs is not None
return (
X[X[lhs] != X[rhs]]
if _is_ast_subs_or_attr(expr_to_parse.comparators[0])
else X[X[lhs] != rhs]
)
elif isinstance(op, ast.GtE):
assert lhs is not None
assert rhs is not None
return (
X[X[lhs] >= X[rhs]]
if _is_ast_subs_or_attr(expr_to_parse.comparators[0])
else X[X[lhs] >= rhs]
)
elif isinstance(op, ast.Gt):
assert lhs is not None
assert rhs is not None
return (
X[X[lhs] > X[rhs]]
if _is_ast_subs_or_attr(expr_to_parse.comparators[0])
else X[X[lhs] > rhs]
)
elif isinstance(op, ast.LtE):
assert lhs is not None
assert rhs is not None
return (
X[X[lhs] <= X[rhs]]
if _is_ast_subs_or_attr(expr_to_parse.comparators[0])
else X[X[lhs] <= rhs]
)
elif isinstance(op, ast.Lt):
assert lhs is not None
assert rhs is not None
return (
X[X[lhs] < X[rhs]]
if _is_ast_subs_or_attr(expr_to_parse.comparators[0])
else X[X[lhs] < rhs]
)
else:
raise ValueError(
f"{op} operator type found. Only ==, !=, >=, <=, >, < operators are supported"
)
else:
raise ValueError(
"Only pandas and spark dataframes are supported by the filter operator."
)
for pred_element in self.pred if self.pred is not None else []:
expr_to_parse = pred_element.expr
lhs, op, rhs = self._get_filter_info(expr_to_parse, X)
filtered_df = filter_fun(filtered_df)
filtered_df = forward_metadata(X, filtered_df)
return filtered_df
_hyperparams_schema = {
"allOf": [
{
"description": "This first sub-object lists all constructor arguments with their "
"types, one at a time, omitting cross-argument constraints, if any.",
"type": "object",
"additionalProperties": False,
"required": ["pred"],
"relevantToOptimizer": [],
"properties": {
"pred": {
"description": "Filter predicate. Given as Python AST expression.",
"laleType": "Any",
},
},
}
]
}
_input_transform_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Input table or dataframe",
"type": "array",
"items": {"type": "array", "items": {"laleType": "Any"}},
"minItems": 1,
}
},
}
_output_transform_schema = {
"description": "Features; no restrictions on data type.",
"laleType": "Any",
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Relational algebra filter operator.",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.rasl.filter.html",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
Filter = lale.operators.make_operator(_FilterImpl, _combined_schemas)
lale.docstrings.set_docstrings(Filter)
| 11,551 | 37.895623 | 162 |
py
|
lale
|
lale-master/lale/lib/rasl/select_k_best.py
|
# Copyright 2021 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Tuple
import numpy as np
import lale.docstrings
import lale.operators
from lale.expressions import it
from lale.lib.dataframe import count, get_columns
from lale.lib.rasl import Map
from lale.lib.sklearn import select_k_best
from .monoid import Monoid, MonoidableOperator
from .scores import FClassif
class _SelectKBestMonoid(Monoid):
def __init__(self, *, n_samples_seen_, feature_names_in_, lifted_score_):
self.n_samples_seen_ = n_samples_seen_
self.feature_names_in_ = feature_names_in_
self.lifted_score_ = lifted_score_
def combine(self, other: "_SelectKBestMonoid"):
n_samples_seen_ = self.n_samples_seen_ + other.n_samples_seen_
assert list(self.feature_names_in_) == list(other.feature_names_in_)
feature_names_in_ = self.feature_names_in_
lifted_score_ = self.lifted_score_.combine(other.lifted_score_)
return _SelectKBestMonoid(
n_samples_seen_=n_samples_seen_,
feature_names_in_=feature_names_in_,
lifted_score_=lifted_score_,
)
class _SelectKBestImpl(MonoidableOperator[_SelectKBestMonoid]):
def __init__(self, monoidable_score_func=FClassif, score_func=None, *, k=10):
self._hyperparams = {
"score_func": monoidable_score_func(),
"k": k,
}
def transform(self, X):
if self._transformer is None:
self._transformer = self._build_transformer()
return self._transformer.transform(X)
@property
def n_samples_seen_(self):
return getattr(self._monoid, "n_samples_seen_", 0)
@property
def feature_names_in_(self):
return getattr(self._monoid, "feature_names_in_", None)
def from_monoid(self, monoid: _SelectKBestMonoid):
self._monoid = monoid
score_func = self._hyperparams["score_func"]
lifted_score_ = self._monoid.lifted_score_
self.scores_, self.pvalues_ = score_func.from_monoid(lifted_score_)
self.n_features_in_ = len(self._monoid.feature_names_in_)
self._transformer = None
def _build_transformer(self):
assert self._monoid is not None
k = self._hyperparams["k"]
scores = self.scores_.copy()
scores[np.isnan(scores)] = np.finfo(scores.dtype).min
ind = np.sort(np.argpartition(scores, -min(k, len(scores)))[-k:])
kbest = self._monoid.feature_names_in_[ind]
result = Map(columns={col: it[col] for col in kbest})
return result
def to_monoid(self, batch: Tuple[Any, Any]):
X, y = batch
score_func = self._hyperparams["score_func"]
n_samples_seen_ = count(X)
feature_names_in_ = get_columns(X)
lifted_score_ = score_func.to_monoid((X, y))
return _SelectKBestMonoid(
n_samples_seen_=n_samples_seen_,
feature_names_in_=feature_names_in_,
lifted_score_=lifted_score_,
)
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Relational algebra implementation of SelectKBest.",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.rasl.select_k_best.html",
"type": "object",
"tags": {
"pre": ["~categoricals"],
"op": ["transformer", "interpretable"],
"post": [],
},
"properties": {
"hyperparams": select_k_best._hyperparams_schema,
"input_fit": select_k_best._input_fit_schema,
"input_transform": select_k_best._input_transform_schema,
"output_transform": select_k_best._output_transform_schema,
},
}
SelectKBest: lale.operators.PlannedIndividualOp
SelectKBest = lale.operators.make_operator(_SelectKBestImpl, _combined_schemas)
lale.docstrings.set_docstrings(SelectKBest)
| 4,383 | 35.533333 | 106 |
py
|
lale
|
lale-master/lale/lib/rasl/scan.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ast
from typing import Optional
import lale.docstrings
import lale.operators
from lale.datasets.data_schemas import get_table_name
from lale.expressions import Expr
from lale.helpers import _get_subscript_value
class _ScanImpl:
def __init__(self, table=None):
assert table is not None
if isinstance(table.expr, ast.Attribute):
self.table_name = table.expr.attr
elif isinstance(table.expr, ast.Subscript):
self.table_name = _get_subscript_value(table.expr)
@classmethod
def validate_hyperparams(cls, table: Optional[Expr] = None, X=None, **hyperparams):
assert table is not None
if isinstance(table.expr, (ast.Attribute, ast.Subscript)):
base = table.expr.value
if isinstance(base, ast.Name) and base.id == "it":
if isinstance(table.expr, ast.Subscript):
sub = table.expr.slice
if isinstance(sub, ast.Constant) or (
isinstance(sub, ast.Index)
and isinstance(getattr(sub, "value", None), ast.Str)
):
return
else:
return
raise ValueError("expected `it.table_name` or `it['table name']`")
def transform(self, X):
named_datasets = {get_table_name(d): d for d in X}
if self.table_name in named_datasets:
return named_datasets[self.table_name]
raise ValueError(
f"could not find '{self.table_name}' in {list(named_datasets.keys())}"
)
def viz_label(self) -> str:
return "Scan:\n" + self.table_name
_hyperparams_schema = {
"allOf": [
{
"description": "This first sub-object lists all constructor arguments with their "
"types, one at a time, omitting cross-argument constraints, if any.",
"type": "object",
"additionalProperties": False,
"required": ["table"],
"relevantToOptimizer": [],
"properties": {
"table": {
"description": "Which table to scan.",
"laleType": "expression",
}
},
}
]
}
_input_transform_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Outermost array dimension is over datasets that have table names.",
"type": "array",
"items": {
"description": "Middle array dimension is over samples (aka rows).",
"type": "array",
"items": {
"description": "Innermost array dimension is over features (aka columns).",
"type": "array",
"items": {"laleType": "Any"},
},
},
"minItems": 1,
}
},
}
_output_transform_schema = {
"type": "array",
"items": {
"type": "array",
"items": {"laleType": "Any"},
},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Scans a database table.",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.rasl.scan.html",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
Scan = lale.operators.make_operator(_ScanImpl, _combined_schemas)
lale.docstrings.set_docstrings(Scan)
| 4,261 | 32.03876 | 97 |
py
|
lale
|
lale-master/lale/lib/rasl/join.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Iterable, List, Optional, Set
import pandas as pd
import lale.docstrings
import lale.operators
from lale.datasets.data_schemas import add_table_name, get_table_name
from lale.helpers import (
_get_subscript_value,
_is_ast_attribute,
_is_ast_subscript,
_is_df,
_is_spark_df,
_is_spark_df_without_index,
)
from lale.lib.dataframe import get_columns
try:
from pyspark.sql.functions import col
from lale.datasets.data_schemas import ( # pylint:disable=ungrouped-imports
SparkDataFrameWithIndex,
)
spark_installed = True
except ImportError:
spark_installed = False
class _JoinImpl:
def __init__(
self,
*,
pred=None,
join_limit=None,
sliding_window_length=None,
join_type="inner",
name=None,
):
self.pred = pred
self.join_type = join_type
self.name = name
# Parse the predicate element passed as input
@classmethod
def _get_join_info(cls, expr_to_parse):
left_key = []
right_key = []
if _is_ast_subscript(expr_to_parse.left.value):
left_name = _get_subscript_value(expr_to_parse.left.value)
elif _is_ast_attribute(expr_to_parse.left.value):
left_name = expr_to_parse.left.value.attr
else:
raise ValueError(
"ERROR: Expression type not supported! Formats supported: it.table_name.column_name or it['table_name'].column_name"
)
if _is_ast_subscript(expr_to_parse.left):
left_key.append(_get_subscript_value(expr_to_parse.left))
elif _is_ast_attribute(expr_to_parse.left):
left_key.append(expr_to_parse.left.attr)
else:
raise ValueError(
"ERROR: Expression type not supported! Formats supported: it.table_name.column_name or it.table_name['column_name']"
)
if _is_ast_subscript(expr_to_parse.comparators[0].value):
right_name = _get_subscript_value(expr_to_parse.comparators[0].value)
elif _is_ast_attribute(expr_to_parse.comparators[0].value):
right_name = expr_to_parse.comparators[0].value.attr
else:
raise ValueError(
"ERROR: Expression type not supported! Formats supported: it.table_name.column_name or it['table_name'].column_name"
)
if _is_ast_subscript(expr_to_parse.comparators[0]):
right_key.append(_get_subscript_value(expr_to_parse.comparators[0]))
elif _is_ast_attribute(expr_to_parse.comparators[0]):
right_key.append(expr_to_parse.comparators[0].attr)
else:
raise ValueError(
"ERROR: Expression type not supported! Formats supported: it.table_name.column_name or it.table_name['column_name']"
)
return left_name, left_key, right_name, right_key
@classmethod
def validate_hyperparams(cls, pred: Optional[Iterable[Any]] = None, **hyperparams):
tables_encountered: Set[str] = set()
assert pred is not None
for key in pred:
if isinstance(key, list):
sub_list_tables: List[
str
] = [] # use an ordered list to improve error messages
for sub_key in key:
(
left_table_name,
left_key_col,
right_table_name,
right_key_col,
) = cls._get_join_info(sub_key.expr)
if sub_list_tables and not (
left_table_name in sub_list_tables
and right_table_name in sub_list_tables
):
sub_list_tables.append(left_table_name)
first_table_names = ", ".join(sub_list_tables)
raise ValueError(
f"ERROR: Composite key involving the {first_table_names}, and {right_table_name} tables is problematic, since it references more than two tables."
)
if tables_encountered and not (
left_table_name in tables_encountered
or right_table_name in tables_encountered
):
left_expr = f"it.{left_table_name}{left_key_col}"
right_expr = f"it.{right_table_name}{right_key_col}"
raise ValueError(
f"ERROR: Composite key involving {left_expr} == {right_expr} is problematic, since neither the {left_table_name} nor the {right_table_name} tables were used in a previous key. Join operations must be chained (they can't have two disconnected join conditions)"
)
sub_list_tables.append(left_table_name)
sub_list_tables.append(right_table_name)
tables_encountered.add(left_table_name)
tables_encountered.add(right_table_name)
else:
(
left_table_name,
left_key_col,
right_table_name,
right_key_col,
) = cls._get_join_info(key.expr)
if tables_encountered and not (
left_table_name in tables_encountered
or right_table_name in tables_encountered
):
left_expr = f"it.{left_table_name}{left_key_col}"
right_expr = f"it.{right_table_name}{right_key_col}"
raise ValueError(
f"ERROR: Single key involving {left_expr} == {right_expr} is problematic, since neither the {left_table_name} nor the {right_table_name} tables were used in a previous key. Join operations must be chained (they can't have two disconnected join conditions)"
)
tables_encountered.add(left_table_name)
tables_encountered.add(right_table_name)
def transform(self, X):
# X is assumed to be a list of datasets with get_table_name(d) != None
joined_df = pd.DataFrame()
tables_encountered = set()
# Implementation of join operator
def join_df(left_df, right_df):
# Joining spark dataframes
if (_is_spark_df(left_df) or _is_spark_df_without_index(left_df)) and (
_is_spark_df(right_df) or _is_spark_df_without_index(right_df)
):
on = []
drop_col = []
left_table = left_df.alias("left_table")
right_table = right_df.alias("right_table")
for k, key in enumerate(left_key_col):
on.append(
col(f"{'left_table'}.{key}").eqNullSafe(
col(f"{'right_table'}.{right_key_col[k]}")
)
)
if key == right_key_col[k]:
drop_col.append(key)
op_df = left_table.join(right_table, on, self.join_type)
for key in drop_col:
op_df = op_df.drop(getattr(right_table, key))
if _is_spark_df_without_index(op_df):
op_df = add_index(left_df, right_df, op_df)
return op_df
# Joining pandas dataframes
op_df = pd.merge(
left_df,
right_df,
how=self.join_type,
left_on=left_key_col,
right_on=right_key_col,
)
return op_df
def fetch_one_df(named_df, table_name):
if get_table_name(named_df) == table_name:
return named_df
return None
def fetch_df(left_table_name, right_table_name):
left_df = []
right_df = []
for named_df in X:
if not tables_encountered:
left_df_candidate = fetch_one_df(named_df, left_table_name)
if _is_df(left_df_candidate):
left_df = left_df_candidate
right_df_candidate = fetch_one_df(named_df, right_table_name)
if _is_df(right_df_candidate):
right_df = right_df_candidate
else:
if left_table_name in tables_encountered:
left_df = joined_df
right_df_candidate = fetch_one_df(named_df, right_table_name)
if _is_df(right_df_candidate):
right_df = right_df_candidate
elif right_table_name in tables_encountered:
right_df = joined_df
left_df_candidate = fetch_one_df(named_df, left_table_name)
if _is_df(left_df_candidate):
left_df = left_df_candidate
return left_df, right_df
def remove_implicit_col(key_col, df):
if _is_spark_df(df):
indexes = df.index_names
for index in set(indexes) - set(key_col):
df = df.drop(index)
return df
def add_index(left_df, right_df, joined_df):
joined_names = set(get_columns(joined_df))
if _is_spark_df(left_df) and _is_spark_df(right_df):
left_names = left_df.index_names
right_names = right_df.index_names
index_names = set.union(set(left_names), set(right_names))
elif _is_spark_df(left_df):
index_names = set(left_df.index_names)
elif _is_spark_df(right_df):
index_names = set(right_df.index_names)
else:
index_names = set([])
new_index_names = set.intersection(index_names, joined_names)
joined_df = SparkDataFrameWithIndex(
joined_df, index_names=list(new_index_names)
)
return joined_df
# Iterate over all the elements of the predicate
for pred_element in self.pred if self.pred is not None else []:
left_table_name = ""
left_key_col = []
right_table_name = ""
right_key_col = []
if isinstance(pred_element, list):
# Prepare composite key to apply join once for all the participating columns together
for sub_pred_element in pred_element:
(
left_table_name,
temp_left_key,
right_table_name,
temp_right_key,
) = self._get_join_info(sub_pred_element.expr)
left_key_col.extend(temp_left_key)
right_key_col.extend(temp_right_key)
else:
(
left_table_name,
left_key_col,
right_table_name,
right_key_col,
) = self._get_join_info(pred_element.expr)
left_df, right_df = fetch_df(left_table_name, right_table_name)
if not _is_df(left_df) or not _is_df(right_df):
raise ValueError(
f"ERROR: Cannot perform join operation, either '{left_table_name}' or '{right_table_name}' table not present in input X!"
)
left_df = remove_implicit_col(left_key_col, left_df)
right_df = remove_implicit_col(right_key_col, right_df)
columns_in_both_tables = set(get_columns(left_df)).intersection( # type: ignore
set(get_columns(right_df)) # type: ignore
)
if columns_in_both_tables and not set(
sorted(columns_in_both_tables)
) == set(sorted(left_key_col + right_key_col)):
raise ValueError(
"Cannot perform join operation! Non-key columns cannot be duplicate."
)
joined_df = join_df(left_df, right_df)
tables_encountered.add(left_table_name)
tables_encountered.add(right_table_name)
return add_table_name(joined_df, self.name)
def viz_label(self) -> str:
if isinstance(self.name, str):
return f"Join:\n{self.name}"
return "Join"
_hyperparams_schema = {
"allOf": [
{
"description": "This first sub-object lists all constructor arguments with their "
"types, one at a time, omitting cross-argument constraints, if any.",
"type": "object",
"additionalProperties": False,
"required": [
"pred",
"join_limit",
"sliding_window_length",
"join_type",
"name",
],
"relevantToOptimizer": [],
"properties": {
"pred": {
"description": "Join predicate. Given as Python AST expression.",
"laleType": "Any",
},
"join_limit": {
"description": """Not yet implemented!
For join paths that are one-to-many, join_limit is use to sample the joined results.
When the right hand side of the join has a timestamp column, the join_limit is applied to select the most recent rows.
When the right hand side does not have a timestamp, it randomly samples join_limit number of rows.
Sampling is applied after each pair of tables are joined.""",
"anyOf": [{"type": "number"}, {"enum": [None]}],
"default": None,
},
"sliding_window_length": {
"description": """Not yet implemented!
sliding_window_length is also used for sampling the joined results,
only rows in a recent window of length sliding_window_length seconds is used in addition to join_limit.""",
"anyOf": [{"type": "number"}, {"enum": [None]}],
"default": None,
},
"join_type": {
"description": """There are various types of SQL joins available and join_type gives the user the option
to choose which type of join the user wants to implement.""",
"enum": ["inner", "left", "right"],
"default": "inner",
},
"name": {
"description": "The table name to be given to the output dataframe.",
"anyOf": [
{
"type": "string",
"pattern": "[^ ]",
"description": "String (cannot be all spaces).",
},
{
"enum": [None],
"description": "No table name.",
},
],
"default": None,
},
},
}
]
}
_input_transform_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {
"description": "List of tables.",
"type": "array",
"items": {"type": "array", "items": {"laleType": "Any"}},
"minItems": 1,
}
},
}
_output_transform_schema = {
"description": "Features; no restrictions on data type.",
"laleType": "Any",
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Relational algebra join operator.",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.rasl.join.html",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
Join = lale.operators.make_operator(_JoinImpl, _combined_schemas)
lale.docstrings.set_docstrings(Join)
| 16,812 | 41.243719 | 287 |
py
|
lale
|
lale-master/lale/lib/rasl/hashing_encoder.py
|
# Copyright 2021-2023 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import reduce
from typing import Any, List, Optional, Tuple
import lale.docstrings
import lale.helpers
import lale.operators
from lale.expressions import Expr, hash_mod, it, ite
from lale.lib.category_encoders import hashing_encoder
from lale.lib.dataframe import count, get_columns
from ._util import get_obj_cols
from .map import Map
from .monoid import Monoid, MonoidableOperator
class _HashingEncoderMonoid(Monoid):
def __init__(self, *, n_samples_seen_, feature_names):
self.n_samples_seen_ = n_samples_seen_
self.feature_names = feature_names
def combine(self, other: "_HashingEncoderMonoid"):
assert list(self.feature_names) == list(other.feature_names)
n_samples_seen_ = self.n_samples_seen_ + other.n_samples_seen_
return _HashingEncoderMonoid(
n_samples_seen_=n_samples_seen_, feature_names=self.feature_names
)
class _HashingEncoderImpl(MonoidableOperator[_HashingEncoderMonoid]):
def __init__(
self,
*,
n_components=8,
cols: Optional[List[str]] = None,
# drop_invariant=False,
# return_df=True,
hash_method="md5",
):
self._hyperparams = {
"n_components": n_components,
"cols": cols,
# "drop_invariant": drop_invariant,
"hash_method": hash_method,
}
self._dim = None
def transform(self, X):
if self._transformer is None:
self._transformer = self._build_transformer(X)
return self._transformer.transform(X)
@property
def n_samples_seen_(self):
return getattr(self._monoid, "n_samples_seen_", 0)
@property
def feature_names(self):
return getattr(self._monoid, "feature_names", None)
def from_monoid(self, monoid: _HashingEncoderMonoid):
self._monoid = monoid
self._transformer = None
def _build_transformer(self, X):
cols = self._hyperparams["cols"]
hash_method = self._hyperparams["hash_method"]
N = self._hyperparams["n_components"]
columns_hash = {
col_name: hash_mod(hash_method, it[col_name], N) for col_name in cols
}
columns_cat = {
f"col_{i}": reduce(
Expr.__add__,
[ite(it[col_name] == i, 1, 0) for col_name in cols],
)
for i in range(N)
}
hasher = Map(columns=columns_hash, remainder="passthrough")
encode = Map(columns=columns_cat, remainder="passthrough")
return hasher >> encode
def to_monoid(self, batch: Tuple[Any, Any]):
X, _y = batch
cols = self._hyperparams["cols"]
if cols is None:
cols = get_obj_cols(X)
self._hyperparams["cols"] = cols
N = self._hyperparams["n_components"]
feature_names_cat = [f"col_{i}" for i in range(N)]
feature_names_num = [col for col in get_columns(X) if col not in cols]
feature_names = feature_names_cat + feature_names_num # type: ignore
n_samples_seen_ = count(X)
return _HashingEncoderMonoid(
n_samples_seen_=n_samples_seen_, feature_names=feature_names
)
# https://github.com/scikit-learn-contrib/category_encoders/blob/master/category_encoders/hashing.py
def get_feature_names(self):
"""
Returns the names of all transformed / added columns.
Returns
-------
feature_names: list
A list with all feature names transformed or added.
Note: potentially dropped features are not included!
Raises
------
jsonschema.ValueError
This function was called before fit was called.
"""
if not isinstance(self.feature_names, list):
raise ValueError(
"Must fit data first. Affected feature names are not known before."
)
return self.feature_names
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """Relational algebra reimplementation of scikit-learn contrib's `HashingEncoder`_ transformer.
Works on both pandas and Spark dataframes by using `Map`_ for `transform`, which in turn use the appropriate backend.
.. _`HashingEncoder`: https://contrib.scikit-learn.org/category_encoders/hashing.html
.. _`Map`: https://lale.readthedocs.io/en/latest/modules/lale.lib.rasl.map.html
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.rasl.hashing_encoder.html",
"type": "object",
"tags": {
"pre": ["categoricals"],
"op": ["transformer", "interpretable"],
"post": [],
},
"properties": {
"hyperparams": hashing_encoder._hyperparams_schema,
"input_fit": hashing_encoder._input_fit_schema,
"input_transform": hashing_encoder._input_transform_schema,
"output_transform": hashing_encoder._output_transform_schema,
},
}
HashingEncoder = lale.operators.make_operator(_HashingEncoderImpl, _combined_schemas)
lale.docstrings.set_docstrings(HashingEncoder)
| 5,707 | 34.453416 | 117 |
py
|
lale
|
lale-master/lale/lib/rasl/_eval_spark_df.py
|
# Copyright 2021-2022 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ast
from lale.expressions import AstExpr, Expr, _it_column
from lale.helpers import _ast_func_id
try:
import pyspark.sql.functions
# noqa in the imports here because those get used dynamically and flake fails.
from pyspark.sql.functions import col # noqa
from pyspark.sql.functions import lit # noqa
from pyspark.sql.functions import to_timestamp # noqa
from pyspark.sql.functions import hour as spark_hour # noqa
from pyspark.sql.functions import isnan, isnull # noqa
from pyspark.sql.functions import minute as spark_minute # noqa
from pyspark.sql.functions import month as spark_month # noqa
from pyspark.sql.types import LongType
from pyspark.sql.functions import ( # noqa; isort: skip
dayofmonth,
dayofweek,
dayofyear,
floor as spark_floor,
md5 as spark_md5,
udf as spark_udf,
when as spark_when,
)
spark_installed = True
except ImportError:
spark_installed = False
def eval_expr_spark_df(expr: Expr):
return _eval_ast_expr_spark_df(expr.expr)
def _eval_ast_expr_spark_df(expr: AstExpr):
evaluator = _SparkEvaluator()
evaluator.visit(expr)
return evaluator.result
class _SparkEvaluator(ast.NodeVisitor):
def __init__(self):
self.result = None
def visit_Num(self, node: ast.Num):
self.result = lit(node.n)
def visit_Str(self, node: ast.Str):
self.result = lit(node.s)
def visit_Constant(self, node: ast.Constant):
self.result = lit(node.value)
def visit_Attribute(self, node: ast.Attribute):
column_name = _it_column(node)
self.result = col(column_name) # type: ignore
def visit_Subscript(self, node: ast.Subscript):
column_name = _it_column(node)
self.result = col(column_name) # type: ignore
def visit_BinOp(self, node: ast.BinOp):
self.visit(node.left)
v1 = self.result
self.visit(node.right)
v2 = self.result
assert v1 is not None
assert v2 is not None
if isinstance(node.op, ast.Add):
self.result = v1 + v2
elif isinstance(node.op, ast.Sub):
self.result = v1 - v2
elif isinstance(node.op, ast.Mult):
self.result = v1 * v2
elif isinstance(node.op, ast.Div):
self.result = v1 / v2
elif isinstance(node.op, ast.FloorDiv):
self.result = spark_floor(v1 / v2)
elif isinstance(node.op, ast.Mod):
self.result = v1 % v2
elif isinstance(node.op, ast.Pow):
self.result = v1**v2
elif isinstance(node.op, ast.BitAnd):
self.result = v1 & v2
elif isinstance(node.op, ast.BitOr):
self.result = v1 | v2
else:
raise ValueError(f"""Unimplemented operator {ast.dump(node.op)}""")
def visit_Compare(self, node: ast.Compare):
self.visit(node.left)
left = self.result
assert len(node.ops) == len(node.comparators)
if len(node.ops) != 1: # need chained comparison in lale.expressions.Expr
raise ValueError("Chained comparisons not supported yet.")
self.visit(node.comparators[0])
right = self.result
op = node.ops[0]
if isinstance(op, ast.Eq):
self.result = left == right # type: ignore
elif isinstance(op, ast.NotEq):
self.result = left != right # type: ignore
elif isinstance(op, ast.Lt):
self.result = left < right # type: ignore
elif isinstance(op, ast.LtE):
self.result = left <= right # type: ignore
elif isinstance(op, ast.Gt):
self.result = left > right # type: ignore
elif isinstance(op, ast.GtE):
self.result = left >= right # type: ignore
else:
raise ValueError(f"Unimplemented operator {ast.dump(op)}")
def visit_Call(self, node: ast.Call):
function_name = _ast_func_id(node.func)
try:
map_func_to_be_called = globals()[function_name]
except KeyError as exc:
raise ValueError(f"""Unimplemented function {function_name}""") from exc
self.result = map_func_to_be_called(node)
def astype(call: ast.Call):
dtype = ast.literal_eval(call.args[0])
column = _eval_ast_expr_spark_df(call.args[1]) # type: ignore
assert column is not None
return column.astype(dtype)
def ite(call: ast.Call):
cond = _eval_ast_expr_spark_df(call.args[0]) # type: ignore
v1 = _eval_ast_expr_spark_df(call.args[1]) # type: ignore
v2 = _eval_ast_expr_spark_df(call.args[2]) # type: ignore
return spark_when(cond, v1).otherwise(v2) # type: ignore
def hash(call: ast.Call): # pylint:disable=redefined-builtin
hashing_method = ast.literal_eval(call.args[0])
column = _eval_ast_expr_spark_df(call.args[1]) # type: ignore
if hashing_method == "md5":
hash_fun = spark_md5(column) # type: ignore
else:
raise ValueError(f"Unimplementade hash function in Spark: {hashing_method}")
return hash_fun
def hash_mod(call: ast.Call):
h_column = hash(call)
N = ast.literal_eval(call.args[2])
int16_mod_N = spark_udf((lambda x: int(x, 16) % N), LongType())
return int16_mod_N(h_column)
def replace(call: ast.Call):
column = _eval_ast_expr_spark_df(call.args[0]) # type: ignore
mapping_dict = {}
try:
mapping_dict = ast.literal_eval(call.args[1].value) # type: ignore
except ValueError:
mapping_dict_ast = call.args[1].value # type: ignore
# ast.literal_eval fails for `nan` with ValueError, we handle the case when
# one of the keys is a `nan`. This is the case when using map with replace
# in missing value imputation.
for i, key in enumerate(mapping_dict_ast.keys):
if hasattr(key, "id") and key.id == "nan":
mapping_dict["nan"] = ast.literal_eval(mapping_dict_ast.values[i])
else:
mapping_dict[
ast.literal_eval(mapping_dict_ast.keys[i])
] = ast.literal_eval(mapping_dict_ast.values[i])
handle_unknown = ast.literal_eval(call.args[2])
chain_of_whens = None
for key, value in mapping_dict.items():
if key == "nan":
when_expr = isnan(column) # type: ignore
elif key is None:
when_expr = isnull(column) # type: ignore
else:
when_expr = column == key # type: ignore
if chain_of_whens is None:
chain_of_whens = pyspark.sql.functions.when(when_expr, value)
else:
chain_of_whens = chain_of_whens.when(when_expr, value)
if handle_unknown == "use_encoded_value":
fallback = lit(ast.literal_eval(call.args[3]))
else:
fallback = column
if chain_of_whens is None:
result = fallback
else:
result = chain_of_whens.otherwise(fallback)
return result
def identity(call: ast.Call):
return _eval_ast_expr_spark_df(call.args[0]) # type: ignore
def time_functions(call, spark_func):
column = _eval_ast_expr_spark_df(call.args[0])
if len(call.args) > 1:
fmt = ast.literal_eval(call.args[1])
return spark_func(to_timestamp(column, format=fmt)) # type: ignore
return spark_func(to_timestamp(column)) # type: ignore
def day_of_month(call: ast.Call):
return time_functions(call, dayofmonth)
def day_of_week(call: ast.Call):
return time_functions(call, dayofweek)
def day_of_year(call: ast.Call):
return time_functions(call, dayofyear)
def hour(call: ast.Call):
return time_functions(call, spark_hour)
def minute(call: ast.Call):
return time_functions(call, spark_minute)
def month(call: ast.Call):
return time_functions(call, spark_month)
| 8,433 | 33.145749 | 84 |
py
|
lale
|
lale-master/lale/lib/rasl/_util.py
|
# Copyright 2023 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
from lale.helpers import _is_pandas_df, _is_spark_df
# From https://github.com/scikit-learn-contrib/category_encoders/blob/master/category_encoders/utils.py
def _is_category(dtype):
return pd.api.types.is_categorical_dtype(dtype)
# Based on https://github.com/scikit-learn-contrib/category_encoders/blob/master/category_encoders/utils.py
def get_obj_cols(df):
"""
Returns names of 'object' columns in the DataFrame.
"""
obj_cols = []
if _is_pandas_df(df):
for idx, dt in enumerate(df.dtypes):
if dt == "object" or _is_category(dt):
obj_cols.append(df.columns.values[idx])
elif _is_spark_df(df):
for idx, (col, dt) in enumerate(df.dtypes):
if dt == "string":
obj_cols.append(col)
else:
assert False
return obj_cols
| 1,431 | 33.095238 | 107 |
py
|
lale
|
lale-master/lale/lib/rasl/task_graphs.py
|
# Copyright 2021, 2022 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import enum
import functools
import itertools
import logging
import pathlib
import sys
import tempfile
import time
from abc import ABC, abstractmethod
from typing import (
Any,
Callable,
Dict,
Iterable,
List,
Optional,
Set,
Tuple,
Type,
Union,
cast,
)
import graphviz
import numpy as np
import pandas as pd
import sklearn.model_selection
import sklearn.tree
import lale.helpers
import lale.json_operator
import lale.pretty_print
from lale.datasets import pandas2spark
from lale.operators import (
TrainableIndividualOp,
TrainablePipeline,
TrainedIndividualOp,
TrainedPipeline,
)
from .metrics import MetricMonoid, MetricMonoidFactory
from .monoid import Monoid, MonoidFactory
if lale.helpers.spark_installed:
from pyspark.sql.dataframe import DataFrame as SparkDataFrame
logger = logging.getLogger(__name__)
logger.setLevel(logging.WARNING)
_BatchStatus = enum.Enum("BatchStatus", "RESIDENT SPILLED")
_TaskStatus = enum.Enum("_TaskStatus", "FRESH READY WAITING DONE")
_Operation = enum.Enum(
"_Operation", "SCAN SPLIT TRANSFORM PREDICT FIT PARTIAL_FIT TO_MONOID COMBINE"
)
_DUMMY_INPUT_STEP = -1
_DUMMY_SCORE_STEP = sys.maxsize
_ALL_FOLDS = "*"
_ALL_BATCHES = -1
def is_pretrained(op: TrainableIndividualOp) -> bool:
"""Is the operator frozen-trained or does it lack a fit method?"""
return isinstance(op, TrainedIndividualOp) and (
op.is_frozen_trained() or not hasattr(op.impl, "fit")
)
def is_incremental(op: TrainableIndividualOp) -> bool:
"""Does the operator have a partial_fit method or is it pre-trained?"""
return op.has_method("partial_fit") or is_pretrained(op)
def is_associative(op: TrainableIndividualOp) -> bool:
"""Is the operator pre-trained or does it implement MonoidFactory?"""
return is_pretrained(op) or isinstance(op.impl, MonoidFactory)
def _batch_id(fold: str, idx: int) -> str:
return fold + ("*" if idx == _ALL_BATCHES else str(idx))
def _get_fold(batch_id: str) -> str:
return batch_id[0]
def _get_idx(batch_id: str) -> int:
return _ALL_BATCHES if batch_id[1] == "*" else int(batch_id[1:])
class _Batch:
def __init__(self, X, y, task: Optional["_ApplyTask"]):
self.X = X
self.y = y
self.task = task
if isinstance(X, pd.DataFrame) and isinstance(y, pd.Series):
space_X = int(cast(pd.DataFrame, X).memory_usage().sum())
space_y = cast(pd.Series, y).memory_usage()
self.space = space_X + space_y
else:
self.space = 1 # place-holder value for Spark
def spill(self, spill_dir: pathlib.Path) -> None:
name_X = spill_dir / f"X_{self}.pkl"
name_y = spill_dir / f"y_{self}.pkl"
if isinstance(self.X, pd.DataFrame):
cast(pd.DataFrame, self.X).to_pickle(name_X)
elif isinstance(self.X, np.ndarray):
np.save(name_X, self.X, allow_pickle=True)
else:
raise ValueError(
f"""Spilling of {type(self.X)} is not supported.
Supported types are: pandas DataFrame, numpy ndarray."""
)
if isinstance(self.y, pd.Series):
cast(pd.Series, self.y).to_pickle(name_y)
elif isinstance(self.y, np.ndarray):
np.save(name_y, self.y, allow_pickle=True)
else:
raise ValueError(
f"""Spilling of {type(self.y)} is not supported.
Supported types are: pandas DataFrame, pandas Series, and numpy ndarray."""
)
self.X, self.y = name_X, name_y
def load_spilled(self) -> None:
assert isinstance(self.X, pathlib.Path) and isinstance(self.y, pathlib.Path)
# we know these are pickles written by us, so we can trust them
try:
data_X = pd.read_pickle(self.X) # nosec B301
except FileNotFoundError:
data_X = np.load(f"{self.X}" + ".npy", allow_pickle=True)
try:
data_y = pd.read_pickle(self.y) # nosec B301
except FileNotFoundError:
data_y = np.load(f"{self.y}" + ".npy", allow_pickle=True)
self.X, self.y = data_X, data_y
def delete_if_spilled(self) -> None:
if isinstance(self.X, pathlib.Path) and isinstance(self.y, pathlib.Path):
self.X.unlink()
self.y.unlink()
def __str__(self) -> str:
assert self.task is not None
assert len(self.task.batch_ids) == 1 and not self.task.has_all_batches()
batch_id = self.task.batch_ids[0]
return f"{self.task.step_id}_{batch_id}_{self.task.held_out}"
@property
def Xy(self) -> Tuple[Any, Any]:
assert self.status == _BatchStatus.RESIDENT
return self.X, self.y
@property
def status(self) -> _BatchStatus:
if isinstance(self.X, pathlib.Path) and isinstance(self.y, pathlib.Path):
return _BatchStatus.SPILLED
return _BatchStatus.RESIDENT
_MemoKey = Tuple[Type["_Task"], int, Tuple[str, ...], Optional[str]]
class _Task:
preds: List["_Task"]
succs: List["_Task"]
def __init__(
self, step_id: int, batch_ids: Tuple[str, ...], held_out: Optional[str]
):
assert len(batch_ids) >= 1
self.step_id = step_id
self.batch_ids = batch_ids
self.held_out = held_out
self.status = _TaskStatus.FRESH
self.preds = []
self.succs = []
self.deletable_output = True
@abstractmethod
def get_operation(
self, pipeline: TrainablePipeline[TrainableIndividualOp]
) -> _Operation:
pass
def add_pred(self, pred):
if pred not in self.preds:
self.preds.append(pred)
pred.succs.append(self)
def has_all_batches(self) -> bool:
return any(b[1] == "*" for b in self.batch_ids)
def can_be_ready(self, end_of_scanned_batches) -> bool:
if any(p.status is not _TaskStatus.DONE for p in self.preds):
return False
if end_of_scanned_batches:
return True
return not self.has_all_batches()
def expand_batches(self, up_to) -> Tuple[str, ...]:
if self.has_all_batches():
result = tuple(
itertools.chain.from_iterable(
(
(_batch_id(_get_fold(b), i) for i in range(up_to))
if b[1] == "*"
else [b]
)
for b in self.batch_ids
)
)
else:
result = self.batch_ids
return result
def memo_key(self) -> _MemoKey:
return type(self), self.step_id, self.batch_ids, self.held_out
class _TrainTask(_Task):
monoid: Optional[Monoid]
trained: Optional[TrainedIndividualOp]
def __init__(self, step_id: int, batch_ids: Tuple[str, ...], held_out: str):
super().__init__(step_id, batch_ids, held_out)
self.monoid = None
self.trained = None
def get_operation(
self, pipeline: TrainablePipeline[TrainableIndividualOp]
) -> _Operation:
step = pipeline.steps_list()[self.step_id]
if is_pretrained(step):
return _Operation.FIT
if is_associative(step):
if len(self.batch_ids) == 1 and not self.has_all_batches():
return _Operation.TO_MONOID
return _Operation.COMBINE
if is_incremental(step):
return _Operation.PARTIAL_FIT
return _Operation.FIT
def get_trained(
self, pipeline: TrainablePipeline[TrainableIndividualOp]
) -> TrainedIndividualOp:
if self.trained is None:
assert self.monoid is not None
trainable = pipeline.steps_list()[self.step_id]
self.trained = trainable.convert_to_trained()
hyperparams = trainable.impl._hyperparams
self.trained._impl = trainable._impl_class()(**hyperparams)
if trainable.has_method("_set_fit_attributes"):
self.trained._impl._set_fit_attributes(self.monoid)
elif trainable.has_method("from_monoid"):
self.trained._impl.from_monoid(self.monoid)
else:
assert False, self.trained
return self.trained
class _ApplyTask(_Task):
batch: Optional[_Batch]
splits: Optional[List[Tuple[List[int], List[int]]]]
def __init__(self, step_id: int, batch_ids: Tuple[str, ...], held_out: str):
super().__init__(step_id, batch_ids, held_out)
assert len(batch_ids) == 1 and not self.has_all_batches()
self.batch = None
self.splits = None # for cross validation with scan tasks
def get_operation(self, pipeline: TrainablePipeline) -> _Operation:
if self.step_id == _DUMMY_INPUT_STEP:
return _Operation.SCAN if len(self.preds) == 0 else _Operation.SPLIT
step = pipeline.steps_list()[self.step_id]
return _Operation.TRANSFORM if step.is_transformer() else _Operation.PREDICT
class _MetricTask(_Task):
mscore: Optional[MetricMonoid]
def __init__(self, step_id: int, batch_ids: Tuple[str, ...], held_out: str):
super().__init__(step_id, batch_ids, held_out)
self.mmonoid = None
def get_operation(self, pipeline: TrainablePipeline) -> _Operation:
if len(self.batch_ids) == 1 and not self.has_all_batches():
return _Operation.TO_MONOID
return _Operation.COMBINE
def _task_type_prio(task: _Task) -> int:
if isinstance(task, _TrainTask):
return 0
if isinstance(task, _ApplyTask):
return 1
assert isinstance(task, _MetricTask), type(task)
return 2
class Prio(ABC):
"""Abstract base class for scheduling priority in task graphs."""
arity: int
def bottom(self) -> Any: # tuple of "inf" means all others are more important
return self.arity * (float("inf"),)
def batch_priority(self, batch: _Batch) -> Any: # prefer to keep resident if lower
assert batch.task is not None
return min(
(
self.task_priority(s)
for s in batch.task.succs
if s.status in [_TaskStatus.READY, _TaskStatus.WAITING]
),
default=self.bottom(),
)
@abstractmethod
def task_priority(self, task: _Task) -> Any: # prefer to do first if lower
pass
class PrioStep(Prio):
"""Execute tasks from earlier steps first, like nested-loop algorithm."""
arity = 6
def task_priority(self, task: _Task) -> Any:
if task.has_all_batches():
max_batch_idx = sys.maxsize
else:
max_batch_idx = max(_get_idx(b) for b in task.batch_ids)
result = (
task.status.value,
task.step_id,
max_batch_idx,
len(task.batch_ids),
task.batch_ids,
_task_type_prio(task),
)
assert len(result) == self.arity
return result
class PrioBatch(Prio):
"""Execute tasks from earlier batches first."""
arity = 6
def task_priority(self, task: _Task) -> Any:
if task.has_all_batches():
max_batch_idx = sys.maxsize
else:
max_batch_idx = max(_get_idx(b) for b in task.batch_ids)
result = (
task.status.value,
max_batch_idx,
len(task.batch_ids),
task.batch_ids,
task.step_id,
_task_type_prio(task),
)
assert len(result) == self.arity
return result
class PrioResourceAware(Prio):
"""Execute tasks with less non-resident data first."""
arity = 5
def task_priority(self, task: _Task) -> Any:
non_res = sum(
p.batch.space
for p in task.preds
if isinstance(p, _ApplyTask) and p.batch is not None
if p.batch.status != _BatchStatus.RESIDENT
)
result = (
task.status.value,
non_res,
task.batch_ids,
task.step_id,
_task_type_prio(task),
)
assert len(result) == self.arity
return result
def _step_id_to_string(
step_id: int,
pipeline: TrainablePipeline,
cls2label: Optional[Dict[str, str]] = None,
) -> str:
if step_id == _DUMMY_INPUT_STEP:
return "INP"
if step_id == _DUMMY_SCORE_STEP:
return "SCR"
step = pipeline.steps_list()[step_id]
cls = step.class_name()
return cls2label[cls] if cls2label and cls in cls2label else step.name()
def _task_to_string(
task: _Task,
pipeline: TrainablePipeline,
cls2label: Optional[Dict[str, str]] = None,
sep: str = "\n",
trace_id: Optional[int] = None,
) -> str:
trace_id_s = "" if trace_id is None else f"{trace_id} "
operation_s = task.get_operation(pipeline).name.lower()
step_s = _step_id_to_string(task.step_id, pipeline, cls2label)
batches_s = ",".join(task.batch_ids)
held_out_s = "" if task.held_out is None else f"\\\\{task.held_out}"
return f"{trace_id_s}{operation_s}{sep}{step_s}({batches_s}){held_out_s}"
# TODO: Maybe we can address this another way?
# pylint: disable=E1101
class _RunStats:
_values: Dict[str, float]
def __init__(self):
object.__setattr__(
self,
"_values",
{
"spill_count": 0,
"load_count": 0,
"spill_space": 0,
"load_space": 0,
"min_resident": 0,
"max_resident": 0,
"train_count": 0,
"apply_count": 0,
"metric_count": 0,
"train_time": 0,
"apply_time": 0,
"metric_time": 0,
"critical_count": 0,
"critical_time": 0,
},
)
def __getattr__(self, name: str) -> float:
if name in self._values:
return self._values[name]
raise AttributeError(f"'{name}' not in {self._values.keys()}")
def __setattr__(self, name: str, value: float) -> None:
if name in self._values:
self._values[name] = value
else:
raise AttributeError(f"'{name}' not in {self._values.keys()}")
def __repr__(self) -> str:
return lale.pretty_print.json_to_string(self._values)
class _TraceRecord:
task: _Task
time: float
def __init__(self, task: _Task, task_time: float):
self.task = task
self.time = task_time
if isinstance(task, _ApplyTask) and task.batch is not None:
self.space = task.batch.space
else:
self.space = 0 # TODO: size for train tasks and metrics tasks
class _TaskGraph:
step_ids: Dict[TrainableIndividualOp, int]
step_id_preds: Dict[int, List[int]]
fresh_tasks: List[_Task]
all_tasks: Dict[_MemoKey, _Task]
tasks_with_all_batches: List[_Task]
def __init__(
self,
pipeline: TrainablePipeline[TrainableIndividualOp],
folds: List[str],
partial_transform: Union[bool, str],
same_fold: bool,
):
self.pipeline = pipeline
self.folds = folds
self.partial_transform = partial_transform
self.same_fold = same_fold
self.step_ids = {step: i for i, step in enumerate(pipeline.steps_list())}
self.step_id_preds = {
self.step_ids[s]: (
[_DUMMY_INPUT_STEP]
if len(pipeline._preds[s]) == 0
else [self.step_ids[p] for p in pipeline._preds[s]]
)
for s in pipeline.steps_list()
}
self.fresh_tasks = []
self.all_tasks = {}
self.tasks_with_all_batches = []
def __enter__(self) -> "_TaskGraph":
return self
def __exit__(self, exc_value, exc_type, traceback) -> None:
for task in self.all_tasks.values():
# preds form a garbage collection cycle with succs
task.preds.clear()
task.succs.clear()
# tasks form a garbage collection cycle with batches
if isinstance(task, _ApplyTask) and task.batch is not None:
task.batch.task = None
task.batch = None
self.all_tasks.clear()
def extract_scores(self, scoring: MetricMonoidFactory) -> List[float]:
def extract_score(held_out: str) -> float:
batch_ids = (_batch_id(held_out, _ALL_BATCHES),)
task = self.all_tasks[(_MetricTask, _DUMMY_SCORE_STEP, batch_ids, held_out)]
assert isinstance(task, _MetricTask) and task.mmonoid is not None
return scoring.from_monoid(task.mmonoid)
scores = [extract_score(held_out) for held_out in self.folds]
return scores
def extract_trained_pipeline(
self, held_out: Optional[str], up_to: int
) -> TrainedPipeline:
if up_to == _ALL_BATCHES:
batch_ids = _batch_ids_except(self.folds, held_out)
else:
assert len(self.folds) == 1 and held_out is None
batch_ids = tuple(_batch_id(self.folds[0], i) for i in range(up_to))
def extract_trained_step(step_id: int) -> TrainedIndividualOp:
task = cast(
_TrainTask, self.all_tasks[(_TrainTask, step_id, batch_ids, held_out)]
)
return task.get_trained(self.pipeline)
step_map = {
old_step: extract_trained_step(step_id)
for step_id, old_step in enumerate(self.pipeline.steps_list())
}
trained_edges = [(step_map[x], step_map[y]) for x, y in self.pipeline.edges()]
result = TrainedPipeline(
list(step_map.values()), trained_edges, ordered=True, _lale_trained=True
)
return result
def find_or_create(
self,
task_class: Type["_Task"],
step_id: int,
batch_ids: Tuple[str, ...],
held_out: Optional[str],
) -> _Task:
memo_key = task_class, step_id, batch_ids, held_out
if memo_key not in self.all_tasks:
task = task_class(step_id, batch_ids, held_out)
self.all_tasks[memo_key] = task
self.fresh_tasks.append(task)
if task.has_all_batches():
self.tasks_with_all_batches.append(task)
return self.all_tasks[memo_key]
def visualize(
self, prio: Prio, call_depth: int, trace: Optional[List[_TraceRecord]]
) -> None:
cls2label = lale.json_operator._get_cls2label(call_depth + 1)
dot = graphviz.Digraph()
dot.attr("graph", rankdir="LR", nodesep="0.1")
dot.attr("node", fontsize="11", margin="0.03,0.03", shape="box", height="0.1")
next_task = min(self.all_tasks.values(), key=prio.task_priority)
task_key2trace_id: Dict[_MemoKey, int] = {}
if trace is not None:
task_key2trace_id = {r.task.memo_key(): i for i, r in enumerate(trace)}
for task in self.all_tasks.values():
if task.status is _TaskStatus.FRESH:
color = "white"
elif task.status is _TaskStatus.READY:
color = "lightgreen" if task is next_task else "yellow"
elif task.status is _TaskStatus.WAITING:
color = "coral"
else:
assert task.status is _TaskStatus.DONE
color = "lightgray"
# https://www.graphviz.org/doc/info/shapes.html
if isinstance(task, _TrainTask):
style = "filled,rounded"
elif isinstance(task, _ApplyTask):
style = "filled"
elif isinstance(task, _MetricTask):
style = "filled,diagonals"
else:
assert False, type(task)
trace_id = task_key2trace_id.get(task.memo_key(), None)
task_s = _task_to_string(task, self.pipeline, cls2label, trace_id=trace_id)
dot.node(task_s, style=style, fillcolor=color)
for task in self.all_tasks.values():
trace_id = task_key2trace_id.get(task.memo_key(), None)
task_s = _task_to_string(task, self.pipeline, cls2label, trace_id=trace_id)
for succ in task.succs:
succ_id = task_key2trace_id.get(succ.memo_key(), None)
succ_s = _task_to_string(
succ, self.pipeline, cls2label, trace_id=succ_id
)
dot.edge(task_s, succ_s)
import IPython.display
IPython.display.display(dot)
def _batch_ids_except(folds: List[str], held_out: Optional[str]) -> Tuple[str, ...]:
return tuple(_batch_id(f, _ALL_BATCHES) for f in folds if f != held_out)
def _create_initial_tasks(
tg: _TaskGraph, need_metrics: bool, keep_estimator: bool
) -> None:
held_out: Optional[str]
_ = tg.find_or_create(
_ApplyTask,
_DUMMY_INPUT_STEP,
(_batch_id(tg.folds[0] if len(tg.folds) == 1 else _ALL_FOLDS, 0),),
None,
)
if need_metrics:
for held_out in tg.folds:
task = tg.find_or_create(
_MetricTask,
_DUMMY_SCORE_STEP,
(_batch_id(held_out, _ALL_BATCHES),),
None if len(tg.folds) == 1 else held_out,
)
task.deletable_output = False
if keep_estimator:
for step_id in tg.step_ids.values():
held_outs = cast(
List[Optional[str]], [None] if len(tg.folds) == 1 else tg.folds
)
for held_out in held_outs:
task = tg.find_or_create(
_TrainTask,
step_id,
_batch_ids_except(tg.folds, held_out),
held_out,
)
assert isinstance(task, _TrainTask)
task.deletable_output = False
trainable = tg.pipeline.steps_list()[task.step_id]
if is_pretrained(trainable):
task.trained = cast(TrainedIndividualOp, trainable)
task.status = _TaskStatus.DONE
def _backward_chain_tasks(
tg: _TaskGraph, n_batches_scanned: int, end_of_scanned_batches: bool
) -> None:
def apply_pred_ho(task, pred_batch_id, pred_step_id):
assert isinstance(task, _TrainTask), type(task)
if len(tg.folds) == 1 or pred_step_id == _DUMMY_INPUT_STEP:
result = None
elif tg.same_fold:
result = task.held_out
else:
result = _get_fold(pred_batch_id)
return result
def train_pred_ho(task, pred_batch_ids):
assert isinstance(task, _TrainTask), type(task)
if len(pred_batch_ids) == 1 and (
tg.step_id_preds[task.step_id] == [_DUMMY_INPUT_STEP] or not tg.same_fold
):
result = None
else:
result = task.held_out
return result
pred_batch_ids: Tuple[str, ...]
while len(tg.fresh_tasks) > 0:
task = tg.fresh_tasks.pop()
if isinstance(task, _TrainTask):
step = tg.pipeline.steps_list()[task.step_id]
if is_pretrained(step):
pass
elif len(task.batch_ids) == 1 and not task.has_all_batches():
for pred_step_id in tg.step_id_preds[task.step_id]:
task.add_pred(
tg.find_or_create(
_ApplyTask,
pred_step_id,
task.batch_ids,
apply_pred_ho(task, task.batch_ids[0], pred_step_id),
)
)
else:
if is_associative(step):
if tg.partial_transform in ["score", True]:
if task.has_all_batches():
if n_batches_scanned > 0:
expanded_batch_ids = task.expand_batches(
n_batches_scanned
)
last_combine_task = tg.find_or_create(
_TrainTask,
task.step_id,
expanded_batch_ids,
train_pred_ho(task, expanded_batch_ids),
)
last_combine_task.deletable_output = False
if end_of_scanned_batches:
task.add_pred(last_combine_task)
else:
if len(task.batch_ids) > 1:
pred_batch_ids = task.batch_ids[:-1]
task.add_pred(
tg.find_or_create(
_TrainTask,
task.step_id,
pred_batch_ids,
train_pred_ho(task, pred_batch_ids),
)
)
pred_batch_ids = task.batch_ids[-1:]
task.add_pred(
tg.find_or_create(
_TrainTask,
task.step_id,
pred_batch_ids,
train_pred_ho(task, pred_batch_ids),
)
)
else:
for batch_id in task.expand_batches(n_batches_scanned):
pred_batch_ids = (batch_id,)
task.add_pred(
tg.find_or_create(
_TrainTask,
task.step_id,
pred_batch_ids,
train_pred_ho(task, pred_batch_ids),
)
)
elif is_incremental(step):
if task.has_all_batches():
if n_batches_scanned > 0:
expanded_batch_ids = task.expand_batches(n_batches_scanned)
last_partial_fit_task = tg.find_or_create(
_TrainTask,
task.step_id,
expanded_batch_ids,
train_pred_ho(task, expanded_batch_ids),
)
last_partial_fit_task.deletable_output = False
if end_of_scanned_batches:
task.add_pred(last_partial_fit_task)
else:
if len(task.batch_ids) > 1:
pred_batch_ids = task.batch_ids[:-1]
task.add_pred(
tg.find_or_create(
_TrainTask,
task.step_id,
pred_batch_ids,
train_pred_ho(task, pred_batch_ids),
)
)
pred_batch_id = task.batch_ids[-1]
for pred_step_id in tg.step_id_preds[task.step_id]:
task.add_pred(
tg.find_or_create(
_ApplyTask,
pred_step_id,
(pred_batch_id,),
apply_pred_ho(task, pred_batch_id, pred_step_id),
)
)
else:
for pred_step_id in tg.step_id_preds[task.step_id]:
for pred_batch_id in task.expand_batches(n_batches_scanned):
task.add_pred(
tg.find_or_create(
_ApplyTask,
pred_step_id,
(pred_batch_id,),
apply_pred_ho(task, pred_batch_id, pred_step_id),
)
)
elif isinstance(task, _ApplyTask):
assert len(task.batch_ids) == 1 and not task.has_all_batches()
if task.step_id == _DUMMY_INPUT_STEP:
assert task.held_out is None, task.held_out
batch_id = task.batch_ids[0]
if len(tg.folds) > 1 and _get_fold(batch_id) != _ALL_FOLDS:
task.add_pred(
tg.find_or_create(
_ApplyTask,
task.step_id,
(_batch_id(_ALL_FOLDS, _get_idx(batch_id)),),
None,
)
)
else:
if (
tg.partial_transform is True
or tg.partial_transform == "score"
and all(isinstance(s, _MetricTask) for s in task.succs)
):
fit_upto = _get_idx(task.batch_ids[0])
if end_of_scanned_batches and fit_upto == n_batches_scanned - 1:
pred_batch_ids = _batch_ids_except(tg.folds, task.held_out)
else:
pred_batch_ids = tuple(
_batch_id(fold, idx)
for fold in tg.folds
if fold != task.held_out
for idx in range(fit_upto + 1)
)
else:
pred_batch_ids = _batch_ids_except(tg.folds, task.held_out)
task.add_pred(
tg.find_or_create(
_TrainTask,
task.step_id,
pred_batch_ids,
task.held_out,
)
)
for pred_step_id in tg.step_id_preds[task.step_id]:
if len(tg.folds) == 1 or pred_step_id == _DUMMY_INPUT_STEP:
pred_held_out = None
else:
pred_held_out = task.held_out
task.add_pred(
tg.find_or_create(
_ApplyTask, pred_step_id, task.batch_ids, pred_held_out
)
)
elif isinstance(task, _MetricTask):
if len(task.batch_ids) == 1 and not task.has_all_batches():
task.add_pred(
tg.find_or_create(
_ApplyTask, _DUMMY_INPUT_STEP, task.batch_ids, None
)
)
sink = tg.pipeline.get_last()
assert sink is not None
task.add_pred(
tg.find_or_create(
_ApplyTask, tg.step_ids[sink], task.batch_ids, task.held_out
)
)
else:
for batch_id in task.expand_batches(n_batches_scanned):
task.add_pred(
tg.find_or_create(
_MetricTask, task.step_id, (batch_id,), task.held_out
)
)
else:
assert False, type(task)
if task.status is not _TaskStatus.DONE:
if task.can_be_ready(end_of_scanned_batches):
task.status = _TaskStatus.READY
else:
task.status = _TaskStatus.WAITING
def _create_tasks(
pipeline: TrainablePipeline[TrainableIndividualOp],
folds: List[str],
need_metrics: bool,
keep_estimator: bool,
partial_transform: Union[bool, str],
same_fold: bool,
) -> _TaskGraph:
tg = _TaskGraph(pipeline, folds, partial_transform, same_fold)
_create_initial_tasks(tg, need_metrics, keep_estimator)
_backward_chain_tasks(tg, 0, False)
return tg
def _analyze_run_trace(stats: _RunStats, trace: List[_TraceRecord]) -> _RunStats:
memo_key2critical_count: Dict[_MemoKey, int] = {}
memo_key2critical_time: Dict[_MemoKey, float] = {}
for record in trace:
if isinstance(record.task, _TrainTask):
stats.train_count += 1
stats.train_time += record.time
elif isinstance(record.task, _ApplyTask):
stats.apply_count += 1
stats.apply_time += record.time
elif isinstance(record.task, _MetricTask):
stats.metric_count += 1
stats.metric_time += record.time
else:
assert False, type(record.task)
critical_count = 1 + max(
(
memo_key2critical_count[p.memo_key()]
for p in record.task.preds
if p in memo_key2critical_count
),
default=0,
)
stats.critical_count = max(critical_count, stats.critical_count)
memo_key2critical_count[record.task.memo_key()] = critical_count
critical_time = record.time + max(
(
memo_key2critical_time[p.memo_key()]
for p in record.task.preds
if p in memo_key2critical_time
),
default=0,
)
stats.critical_time = max(critical_time, stats.critical_time)
memo_key2critical_time[record.task.memo_key()] = critical_time
return stats
class _BatchCache:
spill_dir: Optional[tempfile.TemporaryDirectory]
spill_path: Optional[pathlib.Path]
def __init__(
self,
tasks: Dict[_MemoKey, _Task],
max_resident: Optional[int],
prio: Prio,
verbose: int,
):
self.tasks = tasks
self.max_resident = sys.maxsize if max_resident is None else max_resident
self.prio = prio
self.spill_dir = None
self.spill_path = None
self.verbose = verbose
self.stats = _RunStats()
self.stats.max_resident = self.max_resident
def __enter__(self) -> "_BatchCache":
if self.max_resident < sys.maxsize:
self.spill_dir = tempfile.TemporaryDirectory()
self.spill_path = pathlib.Path(self.spill_dir.name)
return self
def __exit__(self, exc_value, exc_type, traceback) -> None:
if self.spill_dir is not None:
self.spill_dir.cleanup()
def _get_apply_preds(self, task: _Task) -> List[_ApplyTask]:
result = [t for t in task.preds if isinstance(t, _ApplyTask)]
assert all(t.batch is not None for t in result)
return result
def estimate_space(self, task: _ApplyTask) -> int:
other_tasks_with_similar_output = (
t
for t in self.tasks.values()
if t is not task and isinstance(t, _ApplyTask)
if t.step_id == task.step_id and t.batch is not None
)
try:
surrogate = next(other_tasks_with_similar_output)
assert isinstance(surrogate, _ApplyTask) and surrogate.batch is not None
return surrogate.batch.space
except StopIteration: # the iterator was empty
if task.step_id == _DUMMY_INPUT_STEP:
return 1 # safe to underestimate on first batch scanned
apply_preds = self._get_apply_preds(task)
return sum(cast(_Batch, t.batch).space for t in apply_preds)
def ensure_space(self, amount_needed: int, no_spill_set: Set[_Batch]) -> None:
no_spill_space = sum(b.space for b in no_spill_set)
min_resident = amount_needed + no_spill_space
self.stats.min_resident = max(self.stats.min_resident, min_resident)
resident_batches = [
t.batch
for t in self.tasks.values()
if isinstance(t, _ApplyTask) and t.batch is not None
if t.batch.status == _BatchStatus.RESIDENT
]
resident_batches.sort(key=self.prio.batch_priority)
resident_batches_space = sum(b.space for b in resident_batches)
while resident_batches_space + amount_needed > self.max_resident:
if len(resident_batches) == 0:
logger.warning(
f"ensure_space() failed, amount_needed {amount_needed}, no_spill_space {no_spill_space}, min_resident {min_resident}, max_resident {self.max_resident}"
)
break
batch = resident_batches.pop()
assert batch.status == _BatchStatus.RESIDENT and batch.task is not None
if batch in no_spill_set:
logger.warning(f"aborted spill of batch {batch}")
else:
assert self.spill_path is not None, self.max_resident
batch.spill(self.spill_path)
self.stats.spill_count += 1
self.stats.spill_space += batch.space
if self.verbose >= 2:
print(f"spill {batch.X} {batch.y}")
resident_batches_space -= batch.space
def load_input_batches(self, task: _Task) -> None:
apply_preds = self._get_apply_preds(task)
no_spill_set = cast(Set[_Batch], set(t.batch for t in apply_preds))
for pred in apply_preds:
assert pred.batch is not None
if pred.batch.status == _BatchStatus.SPILLED:
self.ensure_space(pred.batch.space, no_spill_set)
if self.verbose >= 2:
print(f"load {pred.batch.X} {pred.batch.y}")
pred.batch.load_spilled()
self.stats.load_count += 1
self.stats.load_space += pred.batch.space
for pred in apply_preds:
assert pred.batch is not None
assert pred.batch.status == _BatchStatus.RESIDENT
def _run_tasks_inner(
tg: _TaskGraph,
batches_train: Iterable[Tuple[Any, Any]],
batches_valid: Optional[List[Tuple[Any, Any]]],
scoring: Optional[MetricMonoidFactory],
cv,
unique_class_labels: List[Union[str, int, float]],
cache: _BatchCache,
prio: Prio,
verbose: int,
progress_callback: Optional[Callable[[float, float, int, bool], None]],
call_depth: int,
) -> None:
for task in tg.all_tasks.values():
assert task.status is not _TaskStatus.FRESH
n_batches_scanned = 0
end_of_scanned_batches = False
ready_keys = {k for k, t in tg.all_tasks.items() if t.status is _TaskStatus.READY}
def find_task(
task_class: Type["_Task"], task_list: List[_Task]
) -> Union[_Task, List[_Task]]:
task_list = [t for t in task_list if isinstance(t, task_class)]
if len(task_list) == 1:
return task_list[0]
else:
return task_list
def try_to_delete_output(task: _Task) -> None:
if task.deletable_output:
if all(s.status is _TaskStatus.DONE for s in task.succs):
if isinstance(task, _ApplyTask):
if task.batch is not None:
task.batch.delete_if_spilled()
task.batch = None
elif isinstance(task, _TrainTask):
task.monoid = None
if batches_valid is None:
task.trained = None
elif isinstance(task, _MetricTask):
task.mmonoid = None
else:
assert False, type(task)
def mark_done(task: _Task) -> None:
try_to_delete_output(task)
if task.status is _TaskStatus.DONE:
return
if task.status is _TaskStatus.READY:
ready_keys.remove(task.memo_key())
task.status = _TaskStatus.DONE
for succ in task.succs:
if succ.status is _TaskStatus.WAITING:
if succ.can_be_ready(end_of_scanned_batches):
succ.status = _TaskStatus.READY
ready_keys.add(succ.memo_key())
for pred in task.preds:
if all(s.status is _TaskStatus.DONE for s in pred.succs):
mark_done(pred)
if isinstance(task, _TrainTask):
if task.get_operation(tg.pipeline) is _Operation.TO_MONOID:
if task.monoid is not None and task.monoid.is_absorbing:
def is_moot(task2): # same modulo batch_ids
type1, step1, _, hold1 = task.memo_key()
type2, step2, _, hold2 = task2.memo_key()
return type1 == type2 and step1 == step2 and hold1 == hold2
task_monoid = task.monoid # prevent accidental None assignment
for task2 in tg.all_tasks.values():
if task2.status is not _TaskStatus.DONE and is_moot(task2):
assert isinstance(task2, _TrainTask)
task2.monoid = task_monoid
mark_done(task2)
trace: Optional[List[_TraceRecord]] = [] if verbose >= 2 else None
batches_iterator = iter(batches_train)
while len(ready_keys) > 0:
task = tg.all_tasks[
min(ready_keys, key=lambda k: prio.task_priority(tg.all_tasks[k]))
]
if verbose >= 3:
tg.visualize(prio, call_depth + 1, trace)
print(_task_to_string(task, tg.pipeline, sep=" "))
operation = task.get_operation(tg.pipeline)
start_time = time.time() if verbose >= 2 else float("nan")
if operation is _Operation.SCAN:
assert not end_of_scanned_batches
assert isinstance(task, _ApplyTask)
assert len(task.batch_ids) == 1 and len(task.preds) == 0
cache.ensure_space(cache.estimate_space(task), set())
try:
X, y = next(batches_iterator)
task.batch = _Batch(X, y, task)
n_batches_scanned += 1
_ = tg.find_or_create(
_ApplyTask,
_DUMMY_INPUT_STEP,
(_batch_id(_get_fold(task.batch_ids[0]), n_batches_scanned),),
None,
)
except StopIteration:
end_of_scanned_batches = True
assert n_batches_scanned >= 1
for task_with_ab in tg.tasks_with_all_batches:
if task_with_ab.status is _TaskStatus.WAITING:
task_with_ab.status = _TaskStatus.FRESH
tg.fresh_tasks.append(task_with_ab)
else:
assert task_with_ab.status is _TaskStatus.DONE
_backward_chain_tasks(tg, n_batches_scanned, end_of_scanned_batches)
ready_keys = {
k for k, t in tg.all_tasks.items() if t.status is _TaskStatus.READY
}
elif operation is _Operation.SPLIT:
assert isinstance(task, _ApplyTask)
assert len(task.batch_ids) == 1 and len(task.preds) == 1
batch_id = task.batch_ids[0]
scan_pred = cast(_ApplyTask, task.preds[0])
cache.load_input_batches(task)
assert scan_pred.batch is not None
cache.ensure_space(cache.estimate_space(task), {scan_pred.batch})
input_X, input_y = scan_pred.batch.Xy
is_sparky = lale.helpers.spark_installed and isinstance(
input_X, SparkDataFrame
)
if is_sparky: # TODO: use Spark native split instead
input_X, input_y = input_X.toPandas(), input_y.toPandas().squeeze()
if scan_pred.splits is None:
scan_pred.splits = list(cv.split(input_X, input_y))
train, test = scan_pred.splits[ord(_get_fold(batch_id)) - ord("d")]
dummy_estimator = sklearn.tree.DecisionTreeClassifier()
output_X, output_y = lale.helpers.split_with_schemas(
dummy_estimator, input_X, input_y, test, train
)
if is_sparky: # TODO: use Spark native split instead
output_X, output_y = pandas2spark(output_X), pandas2spark(output_y)
task.batch = _Batch(output_X, output_y, task)
elif operation in [_Operation.TRANSFORM, _Operation.PREDICT]:
assert isinstance(task, _ApplyTask)
assert len(task.batch_ids) == 1
train_pred = cast(_TrainTask, find_task(_TrainTask, task.preds))
trained = train_pred.get_trained(tg.pipeline)
apply_preds = [t for t in task.preds if isinstance(t, _ApplyTask)]
cache.load_input_batches(task)
if len(apply_preds) == 1:
assert apply_preds[0].batch is not None
input_X, input_y = apply_preds[0].batch.Xy
else:
assert not any(pred.batch is None for pred in apply_preds)
input_X = [cast(_Batch, pred.batch).X for pred in apply_preds]
# The assumption is that input_y is not changed by the preds, so we can
# use it from any one of them.
input_y = cast(_Batch, apply_preds[0].batch).y
no_spill_set = cast(Set[_Batch], set(t.batch for t in apply_preds))
cache.ensure_space(cache.estimate_space(task), no_spill_set)
if operation is _Operation.TRANSFORM:
if trained.has_method("transform_X_y"):
output_X, output_y = trained.transform_X_y(input_X, input_y)
else:
output_X, output_y = trained.transform(input_X), input_y
task.batch = _Batch(output_X, output_y, task)
else:
y_pred = trained.predict(input_X)
if isinstance(y_pred, np.ndarray):
y_pred = pd.Series(
y_pred,
cast(pd.Series, input_y).index,
cast(pd.Series, input_y).dtype,
"y_pred",
)
task.batch = _Batch(input_X, y_pred, task)
elif operation is _Operation.FIT:
assert isinstance(task, _TrainTask)
assert all(isinstance(p, _ApplyTask) for p in task.preds)
apply_preds = [cast(_ApplyTask, p) for p in task.preds]
assert not any(p.batch is None for p in apply_preds)
trainable = tg.pipeline.steps_list()[task.step_id]
if is_pretrained(trainable):
assert len(task.preds) == 0
if task.trained is None:
task.trained = cast(TrainedIndividualOp, trainable)
else:
cache.load_input_batches(task)
if len(task.preds) == 1:
input_X, input_y = cast(_Batch, apply_preds[0].batch).Xy
else:
assert not is_incremental(trainable)
list_X = [cast(_Batch, p.batch).X for p in apply_preds]
list_y = [cast(_Batch, p.batch).y for p in apply_preds]
if all(isinstance(X, pd.DataFrame) for X in list_X):
input_X = pd.concat(list_X)
input_y = pd.concat(list_y)
elif lale.helpers.spark_installed and all(
isinstance(X, SparkDataFrame) for X in list_X
):
input_X = functools.reduce(lambda a, b: a.union(b), list_X) # type: ignore
input_y = functools.reduce(lambda a, b: a.union(b), list_y) # type: ignore
elif all(isinstance(X, np.ndarray) for X in list_X):
input_X = np.concatenate(list_X)
input_y = np.concatenate(list_y)
else:
raise ValueError(
f"""Input of {type(list_X[0])} is not supported for
fit on a non-incremental operator.
Supported types are: pandas DataFrame, numpy ndarray, and spark DataFrame."""
)
task.trained = trainable.fit(input_X, input_y)
elif operation is _Operation.PARTIAL_FIT:
assert isinstance(task, _TrainTask)
if task.has_all_batches():
assert len(task.preds) == 1, (
_task_to_string(task, tg.pipeline, sep=" "),
len(task.preds),
)
train_pred = cast(_TrainTask, task.preds[0])
task.trained = train_pred.get_trained(tg.pipeline)
else:
assert len(task.preds) in [1, 2]
if len(task.preds) == 1:
trainee = tg.pipeline.steps_list()[task.step_id]
else:
train_pred = cast(_TrainTask, find_task(_TrainTask, task.preds))
trainee = train_pred.get_trained(tg.pipeline)
apply_pred = cast(_ApplyTask, find_task(_ApplyTask, task.preds))
assert apply_pred.batch is not None
cache.load_input_batches(task)
input_X, input_y = apply_pred.batch.Xy
if trainee.is_supervised() and trainee.is_classifier():
task.trained = trainee.partial_fit(
input_X, input_y, classes=unique_class_labels
)
else:
task.trained = trainee.partial_fit(input_X, input_y)
elif operation is _Operation.TO_MONOID:
assert len(task.batch_ids) == 1
assert all(isinstance(p, _ApplyTask) for p in task.preds)
assert all(cast(_ApplyTask, p).batch is not None for p in task.preds)
cache.load_input_batches(task)
if isinstance(task, _TrainTask):
assert len(task.preds) == 1
trainable = tg.pipeline.steps_list()[task.step_id]
input_X, input_y = task.preds[0].batch.Xy # type: ignore
task.monoid = trainable.impl.to_monoid((input_X, input_y))
elif isinstance(task, _MetricTask):
assert len(task.preds) == 2
assert task.preds[0].step_id == _DUMMY_INPUT_STEP
assert scoring is not None
X, y_true = task.preds[0].batch.Xy # type: ignore
y_pred = task.preds[1].batch.y # type: ignore
task.mmonoid = scoring.to_monoid((y_true, y_pred, X))
if progress_callback is not None:
if batches_valid is None or len(batches_valid) == 0:
score_valid = float("nan")
else:
partially_trained = tg.extract_trained_pipeline(
None, n_batches_scanned
)
score_valid = scoring.score_estimator_batched(
partially_trained, batches_valid
)
progress_callback(
scoring.from_monoid(task.mmonoid),
score_valid,
n_batches_scanned,
end_of_scanned_batches,
)
else:
assert False, type(task)
elif operation is _Operation.COMBINE:
cache.load_input_batches(task)
if isinstance(task, _TrainTask):
assert all(isinstance(p, _TrainTask) for p in task.preds)
trainable = tg.pipeline.steps_list()[task.step_id]
monoids = (cast(_TrainTask, p).monoid for p in task.preds)
task.monoid = functools.reduce(lambda a, b: a.combine(b), monoids) # type: ignore
elif isinstance(task, _MetricTask):
scores = (cast(_MetricTask, p).mmonoid for p in task.preds)
task.mmonoid = functools.reduce(lambda a, b: a.combine(b), scores) # type: ignore
else:
assert False, type(task)
else:
assert False, operation
if verbose >= 2:
finish_time = time.time()
assert trace is not None
trace.append(_TraceRecord(task, finish_time - start_time))
mark_done(task)
if verbose >= 2:
tg.visualize(prio, call_depth + 1, trace)
assert trace is not None
print(_analyze_run_trace(cache.stats, trace))
def _run_tasks(
tg: _TaskGraph,
batches_train: Iterable[Tuple[Any, Any]],
batches_valid: Optional[List[Tuple[Any, Any]]],
scoring: Optional[MetricMonoidFactory],
cv,
unique_class_labels: List[Union[str, int, float]],
max_resident: Optional[int],
prio: Prio,
verbose: int,
progress_callback: Optional[Callable[[float, float, int, bool], None]],
call_depth: int,
) -> None:
if scoring is None and progress_callback is not None:
logger.warning("progress_callback only gets called if scoring is not None")
with _BatchCache(tg.all_tasks, max_resident, prio, verbose) as cache:
_run_tasks_inner(
tg,
batches_train,
batches_valid,
scoring,
cv,
unique_class_labels,
cache,
prio,
verbose,
progress_callback,
call_depth + 1,
)
def fit_with_batches(
pipeline: TrainablePipeline[TrainableIndividualOp],
batches_train: Iterable[Tuple[Any, Any]],
batches_valid: Optional[List[Tuple[Any, Any]]],
scoring: Optional[MetricMonoidFactory],
unique_class_labels: List[Union[str, int, float]],
max_resident: Optional[int],
prio: Prio,
partial_transform: Union[bool, str],
verbose: int,
progress_callback: Optional[Callable[[float, float, int, bool], None]],
) -> TrainedPipeline[TrainedIndividualOp]:
"""Replacement for the `fit` method on a pipeline (early interface, subject to change)."""
assert partial_transform in [False, "score", True]
need_metrics = scoring is not None
folds = ["d"]
with _create_tasks(
pipeline, folds, need_metrics, True, partial_transform, False
) as tg:
_run_tasks(
tg,
batches_train,
batches_valid,
scoring,
None,
unique_class_labels,
max_resident,
prio,
verbose,
progress_callback,
call_depth=2,
)
trained_pipeline = tg.extract_trained_pipeline(None, _ALL_BATCHES)
return trained_pipeline
def cross_val_score(
pipeline: TrainablePipeline[TrainableIndividualOp],
batches: Iterable[Tuple[Any, Any]],
scoring: MetricMonoidFactory,
cv,
unique_class_labels: List[Union[str, int, float]],
max_resident: Optional[int],
prio: Prio,
same_fold: bool,
verbose: int,
) -> List[float]:
"""Replacement for sklearn's `cross_val_score`_ function (early interface, subject to change).
.. _`cross_val_score`: https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.cross_val_score.html
"""
cv = sklearn.model_selection.check_cv(cv)
folds = [chr(ord("d") + i) for i in range(cv.get_n_splits())]
with _create_tasks(pipeline, folds, True, False, False, same_fold) as tg:
_run_tasks(
tg,
batches,
None,
scoring,
cv,
unique_class_labels,
max_resident,
prio,
verbose,
None,
call_depth=2,
)
scores = tg.extract_scores(scoring)
return scores
def cross_validate(
pipeline: TrainablePipeline[TrainableIndividualOp],
batches: Iterable[Tuple[Any, Any]],
scoring: MetricMonoidFactory,
cv,
unique_class_labels: List[Union[str, int, float]],
max_resident: Optional[int],
prio: Prio,
same_fold: bool,
return_estimator: bool,
verbose: int,
) -> Dict[str, Union[List[float], List[TrainedPipeline]]]:
"""Replacement for sklearn's `cross_validate`_ function (early interface, subject to change).
.. _`cross_validate`: https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.cross_validate.html
"""
cv = sklearn.model_selection.check_cv(cv)
folds = [chr(ord("d") + i) for i in range(cv.get_n_splits())]
with _create_tasks(pipeline, folds, True, return_estimator, False, same_fold) as tg:
_run_tasks(
tg,
batches,
None,
scoring,
cv,
unique_class_labels,
max_resident,
prio,
verbose,
None,
call_depth=2,
)
result: Dict[str, Union[List[float], List[TrainedPipeline]]] = {}
result["test_score"] = tg.extract_scores(scoring)
if return_estimator:
result["estimator"] = [
tg.extract_trained_pipeline(held_out, _ALL_BATCHES)
for held_out in tg.folds
]
return result
| 57,980 | 38.123482 | 171 |
py
|
lale
|
lale-master/lale/lib/rasl/functions.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar
import numpy as np
from typing_extensions import Protocol
from lale.helpers import _is_pandas_df, _is_spark_df
from ..dataframe import (
column_index,
count,
get_columns,
make_series_concat,
make_series_distinct,
select_col,
)
from .monoid import Monoid, MonoidFactory
try:
import pyspark.sql.functions
spark_installed = True
except ImportError:
spark_installed = False
class _column_distinct_count_data(Monoid):
def __init__(self, df, limit: Optional[int] = None):
self.limit = limit
self.df = make_series_distinct(df)
def __len__(self):
return count(self.df)
@property
def is_absorbing(self):
if self.limit is None:
return False
else:
return len(self) > self.limit
def combine(self, other: "_column_distinct_count_data"):
if self.is_absorbing:
return self
elif other.is_absorbing:
return other
else:
c = make_series_concat(self.df, other.df)
return _column_distinct_count_data(c, limit=self.limit)
# numpy or sparkdf or pandas
_Batch = Any
class count_distinct_column(MonoidFactory[_Batch, int, _column_distinct_count_data]):
"""
Counts the number of distinct elements in a given column. If a limit is specified,
then, once the limit is reached, the count may no longer be accurate
(but will always remain over the limit).
"""
def __init__(self, col: column_index, limit: Optional[int] = None):
self._col = col
self._limit = limit
def to_monoid(self, batch) -> _column_distinct_count_data:
c = select_col(batch, self._col)
return _column_distinct_count_data(c, limit=self._limit)
def from_monoid(self, monoid: _column_distinct_count_data) -> int:
return len(monoid)
class categorical_column(MonoidFactory[_Batch, bool, _column_distinct_count_data]):
"""
Determines if a column should be considered categorical,
by seeing if there are more than threshold distinct values in it
"""
def __init__(self, col: column_index, threshold: int = 5):
self._col = col
self._threshold = threshold
def to_monoid(self, batch) -> _column_distinct_count_data:
c = select_col(batch, self._col)
return _column_distinct_count_data(c, limit=self._threshold)
def from_monoid(self, monoid: _column_distinct_count_data) -> bool:
return not monoid.is_absorbing
class make_categorical_column:
def __init__(self, threshold=5):
self._threshold = threshold
def __call__(self, col):
return categorical_column(col, threshold=self._threshold)
_D = TypeVar("_D", bound=Monoid)
class DictMonoid(Generic[_D], Monoid):
"""
Given a monoid, this class lifts it to a dictionary pointwise
"""
def __init__(self, m: Dict[Any, _D]):
self._m = m
def combine(self, other: "DictMonoid[_D]"):
r = {k: self._m[k].combine(other._m[k]) for k in self._m.keys()}
return DictMonoid(r)
@property
def is_absorbing(self):
return all(v.is_absorbing for v in self._m.values())
class ColumnSelector(MonoidFactory[_Batch, List[column_index], _D], Protocol):
def __call__(self, df) -> List[column_index]:
return self.from_monoid(self.to_monoid(df))
class ColumnMonoidFactory(ColumnSelector[DictMonoid[_D]]):
"""
Given a MonoidFactory for deciding if a given column is valid,
This returns the list of valid columns
"""
_makers: Optional[Dict[column_index, MonoidFactory[_Batch, bool, _D]]]
def __init__(
self, col_maker: Callable[[column_index], MonoidFactory[_Batch, bool, _D]]
):
self._col_maker = col_maker
self._makers = None
def _get_makers(self, df):
makers = self._makers
if makers is None:
indices = get_columns(df)
makers = {k: self._col_maker(k) for k in indices}
self._makers = makers
return makers
def to_monoid(self, batch):
makers = self._get_makers(batch)
return DictMonoid({k: v.to_monoid(batch) for k, v in makers.items()})
def from_monoid(self, monoid: DictMonoid[_D]) -> List[column_index]:
makers = self._makers
assert makers is not None
return [k for k, v in makers.items() if v.from_monoid(monoid._m[k])]
class categorical(ColumnMonoidFactory):
"""Creates a MonoidFactory (and callable) for projecting categorical columns with sklearn's ColumnTransformer or Lale's Project operator.
Parameters
----------
max_values : int
Maximum number of unique values in a column for it to be considered categorical.
Returns
-------
callable
Function that, given a dataset X, returns a list of columns,
containing either string column names or integer column indices."""
def __init__(self, max_values: int = 5):
super().__init__(make_categorical_column(max_values))
class date_time:
"""Creates a callable for projecting date/time columns with sklearn's ColumnTransformer or Lale's Project operator.
Parameters
----------
fmt : str
Format string for `strptime()`, see https://docs.python.org/3/library/datetime.html#strftime-strptime-behavior
Returns
-------
callable
Function that, given a dataset X, returns a list of columns,
containing either string column names or integer column indices."""
def __init__(self, fmt):
self._fmt = fmt
def __repr__(self):
return f"lale.lib.rasl.date_time(fmt={self._fmt})"
def __call__(self, X):
def is_date_time(column_values):
try:
for val in column_values:
if isinstance(val, str):
datetime.datetime.strptime(val, self._fmt)
else:
return False
except ValueError:
return False
return True
if _is_pandas_df(X):
result = [c for c in X.columns if is_date_time(X[c])]
elif isinstance(X, np.ndarray):
result = [c for c in range(X.shape[1]) if is_date_time(X[:, c])]
else:
raise TypeError(f"unexpected type {type(X)}")
return result
# functions for filter
def filter_isnan(df: Any, column_name: str):
if _is_pandas_df(df):
return df[df[column_name].isnull()]
elif spark_installed and _is_spark_df(df):
return df.filter(pyspark.sql.functions.isnan(df[column_name]))
else:
raise ValueError(
"the filter isnan supports only Pandas dataframes or spark dataframes."
)
def filter_isnotnan(df: Any, column_name: str):
if _is_pandas_df(df):
return df[df[column_name].notnull()]
elif spark_installed and _is_spark_df(df):
return df.filter(~pyspark.sql.functions.isnan(df[column_name]))
else:
raise ValueError(
"the filter isnotnan supports only Pandas dataframes or spark dataframes."
)
def filter_isnull(df: Any, column_name: str):
if _is_pandas_df(df):
return df[df[column_name].isnull()]
elif spark_installed and _is_spark_df(df):
return df.filter(pyspark.sql.functions.isnull(df[column_name]))
else:
raise ValueError(
"the filter isnan supports only Pandas dataframes or spark dataframes."
)
def filter_isnotnull(df: Any, column_name: str):
if _is_pandas_df(df):
return df[df[column_name].notnull()]
elif spark_installed and _is_spark_df(df):
return df.filter(~pyspark.sql.functions.isnull(df[column_name]))
else:
raise ValueError(
"the filter isnotnan supports only Pandas dataframes or spark dataframes."
)
| 8,505 | 29.818841 | 141 |
py
|
lale
|
lale-master/lale/lib/rasl/alias.py
|
# Copyright 2020, 2021 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import lale.datasets.data_schemas
import lale.docstrings
import lale.operators
class _AliasImpl:
def __init__(self, name=None):
self.name = name
@classmethod
def validate_hyperparams(cls, name=None, **hyperparams):
if name is None or not name.strip():
raise ValueError("Alias hyperparam 'name' cannot be None or empty.")
def transform(self, X):
return lale.datasets.data_schemas.add_table_name(X, self.name)
def viz_label(self) -> str:
return "Alias:\n" + str(self.name)
_hyperparams_schema = {
"allOf": [
{
"description": "This first sub-object lists all constructor arguments with their "
"types, one at a time, omitting cross-argument constraints, if any.",
"type": "object",
"additionalProperties": False,
"required": ["name"],
"relevantToOptimizer": [],
"properties": {
"name": {
"description": "The table name to be given to the output dataframe.",
"type": "string",
"pattern": "[^ ]",
},
},
}
]
}
_input_transform_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Input table or dataframe",
"type": "array",
"items": {"type": "array", "items": {"laleType": "Any"}},
"minItems": 1,
}
},
}
_output_transform_schema = {
"description": "Features; no restrictions on data type.",
"laleType": "Any",
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Relational algebra alias operator.",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.rasl.alias.html",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
Alias = lale.operators.make_operator(_AliasImpl, _combined_schemas)
lale.docstrings.set_docstrings(Alias)
| 2,837 | 29.847826 | 98 |
py
|
lale
|
lale-master/lale/lib/rasl/project.py
|
# Copyright 2019-2022 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, List, Optional
import numpy as np
from typing_extensions import Protocol
import lale.datasets.data_schemas
import lale.docstrings
import lale.operators
import lale.type_checking
from lale.expressions import it
from lale.lib.dataframe import column_index, get_columns
from lale.type_checking import is_schema, validate_is_schema
from .map import Map
from .monoid import Monoid, MonoidFactory
def _columns_schema_to_list(X, schema) -> List[column_index]:
s_all = lale.datasets.data_schemas._to_schema(X)
s_row = s_all["items"]
n_columns = s_row["minItems"]
assert n_columns == s_row["maxItems"]
s_cols = s_row["items"]
if isinstance(s_cols, dict):
if lale.type_checking.is_subschema(s_cols, schema):
result = get_columns(X)
else:
result = []
else:
assert isinstance(s_cols, list)
cols = get_columns(X)
result = [
cols[i]
for i in range(n_columns)
if lale.type_checking.is_subschema(s_cols[i], schema)
]
return result
class _ProjectMonoid(Monoid):
def __init__(self, columns: Monoid, drop_columns: Monoid):
self._columns = columns
self._drop_columns = drop_columns
def combine(self, other: "_ProjectMonoid"):
c = self._columns.combine(other._columns)
dc = self._drop_columns.combine(other._drop_columns)
return _ProjectMonoid(c, dc)
@property
def is_absorbing(self):
return self._columns.is_absorbing and self._drop_columns.is_absorbing
class _StaticMonoid(Monoid):
def __init__(self, v):
self._v = v
def combine(self, other: "_StaticMonoid"):
assert self._v == other._v
return self
@property
def is_absorbing(self):
return True
class _StaticMonoidFactory(MonoidFactory[Any, List[column_index], _StaticMonoid]):
def __init__(self, cl):
self._cl = cl
def to_monoid(self, batch):
cl = self._cl
if cl is None:
cl = get_columns(batch)
self._cl = cl
return _StaticMonoid(cl)
def from_monoid(self, monoid: _StaticMonoid):
return monoid._v
class _DynamicMonoidFactory(
MonoidFactory[Any, List[column_index], _StaticMonoid], Protocol
):
pass
class _CallableMonoidFactory(_DynamicMonoidFactory):
def __init__(self, c):
self._c = c
def to_monoid(self, batch):
c = self._c
if not isinstance(c, list):
assert callable(c)
c = c(batch)
self._c = c
return _StaticMonoid(c)
def from_monoid(self, monoid: _StaticMonoid):
return monoid._v
class _SchemaMonoidFactory(_DynamicMonoidFactory):
def __init__(self, c):
self._c = c
def to_monoid(self, batch):
c = self._c
if not isinstance(c, list):
assert isinstance(c, dict)
c = _columns_schema_to_list(batch, c)
self._c = c
return _StaticMonoid(c)
def from_monoid(self, monoid: _StaticMonoid):
return monoid._v
class _AllDataMonoidFactory(_CallableMonoidFactory):
"""This really needs all the data, and should not be considered a monoid.
It is used to simplify the code, but does not enable monoidal fitting
"""
def __init__(self, c):
super().__init__(c)
def get_column_factory(columns, kind) -> MonoidFactory:
if columns is None:
if kind == "passthrough":
return _StaticMonoidFactory(None)
else:
return _StaticMonoidFactory([])
elif isinstance(columns, list):
return _StaticMonoidFactory(columns)
elif isinstance(columns, MonoidFactory):
return columns
elif callable(columns):
return _AllDataMonoidFactory(columns)
elif isinstance(columns, dict):
validate_is_schema(columns)
return _SchemaMonoidFactory(columns)
else:
raise TypeError(f"type {type(columns)}, columns {columns}")
class _ProjectImpl:
def __init__(self, columns=None, drop_columns=None):
self._columns = get_column_factory(columns, "passthrough")
self._drop_columns = get_column_factory(drop_columns, "drop")
self._monoid = None
self._hyperparams = {"columns": columns, "drop_columns": drop_columns}
def __getattribute__(self, item):
# we want to remove fit if a static column is available
# since it should be considered already trained
if item == "fit":
omit_fit = False
try:
cols = super().__getattribute__("_columns")
drop_cols = super().__getattribute__("_drop_columns")
if isinstance(cols, _StaticMonoidFactory) and isinstance(
drop_cols, _StaticMonoidFactory
):
omit_fit = True
except AttributeError:
pass
if omit_fit:
raise AttributeError(
"fit cannot be called on a Project that has a static columns and drop_columns or has already been fit"
)
elif item in ["to_monoid", "from_monoid", "partial_fit"]:
omit_monoid = False
try:
cols = super().__getattribute__("_columns")
drop_cols = super().__getattribute__("_drop_columns")
if isinstance(cols, _AllDataMonoidFactory) or isinstance(
drop_cols, _AllDataMonoidFactory
):
omit_monoid = True
except AttributeError:
pass
if omit_monoid:
raise AttributeError("monoidal operations not available")
return super().__getattribute__(item)
def _to_monoid_internal(self, xy):
df, _ = xy
col = self._columns.to_monoid(df)
dcol = self._drop_columns.to_monoid(df)
return _ProjectMonoid(col, dcol)
def to_monoid(self, batch):
return self._to_monoid_internal(batch)
def _from_monoid_internal(self, pm: _ProjectMonoid):
col = self._columns.from_monoid(pm._columns)
dcol = self._drop_columns.from_monoid(pm._drop_columns)
self._fit_columns = [c for c in col if c not in dcol]
def from_monoid(self, monoid: _ProjectMonoid):
self._from_monoid_internal(monoid)
_monoid: Optional[_ProjectMonoid]
def partial_fit(self, X, y=None):
if self._monoid is None or not self._monoid.is_absorbing:
lifted = self._to_monoid_internal((X, y))
if self._monoid is not None: # not first fit
lifted = self._monoid.combine(lifted)
self._from_monoid_internal(lifted)
return self
def _fit_internal(self, X, y=None):
lifted = self._to_monoid_internal((X, y))
self._from_monoid_internal(lifted)
return self
def fit(self, X, y=None):
return self._fit_internal(X, y)
def transform(self, X):
fitcols = getattr(self, "_fit_columns", None)
if fitcols is None:
self._fit_internal(X)
fitcols = getattr(self, "_fit_columns", None)
assert fitcols is not None
if isinstance(X, np.ndarray):
result = X[:, self._fit_columns] # type: ignore
else:
# use the rasl backend
cols = [
X.columns[x] if isinstance(x, int) else x for x in self._fit_columns
]
exprs = {c: it[c] for c in cols}
m = Map(columns=exprs)
assert isinstance(m, lale.operators.TrainedOperator)
result = m.transform(X)
# elif isinstance(X, pd.DataFrame):
# if len(self._fit_columns) == 0 or isinstance(self._fit_columns[0], int):
# result = X.iloc[:, self._fit_columns]
# else:
# result = X[self._fit_columns]
# else:
# raise TypeError(f"type {type(X)}")
s_X = lale.datasets.data_schemas._to_schema(X)
s_result = self._transform_schema_nocheck(s_X)
result = lale.datasets.data_schemas.add_schema(result, s_result, recalc=True)
result = lale.datasets.data_schemas.add_table_name(
result, lale.datasets.data_schemas.get_table_name(X)
)
return result
def _transform_schema_nocheck(self, s_X):
if hasattr(self, "_fit_columns"):
return self._transform_schema_fit_columns(s_X)
keep_cols = self._columns
drop_cols = self._drop_columns
known_keep_cols = False
known_drop_cols = False
if keep_cols is None:
known_keep_cols = True
elif isinstance(keep_cols, _SchemaMonoidFactory):
kc = keep_cols._c
keep_cols = kc
known_keep_cols = True
if drop_cols is None:
known_drop_cols = True
elif isinstance(drop_cols, _SchemaMonoidFactory):
dc = drop_cols._c
drop_cols = dc
known_drop_cols = True
if known_keep_cols and known_drop_cols:
return self._transform_schema_schema(s_X, keep_cols, drop_cols)
return s_X
def transform_schema(self, s_X):
"""Used internally by Lale for type-checking downstream operators."""
if is_schema(s_X):
return self._transform_schema_nocheck(s_X)
else:
X = lale.datasets.data_schemas.add_schema(s_X)
assert X is not None
self.fit(X)
return self._transform_schema_fit_columns(X.json_schema)
def _transform_schema_fit_columns(self, s_X):
s_X = lale.datasets.data_schemas._to_schema(s_X)
s_row = s_X["items"]
s_cols = s_row["items"]
n_columns = len(self._fit_columns)
if isinstance(s_cols, dict):
s_cols_result = s_cols
else:
name2i = {s_cols[i]["description"]: i for i in range(len(s_cols))}
keep_cols_i = [
name2i[col] if isinstance(col, str) else col
for col in self._fit_columns
]
s_cols_result = [s_cols[i] for i in keep_cols_i]
s_result = {
**s_X,
"items": {
**s_row,
"minItems": n_columns,
"maxItems": n_columns,
"items": s_cols_result,
},
}
return s_result
def _transform_schema_schema(self, s_X, s_keep, s_drop):
def is_keeper(column_schema):
if s_keep is not None:
if not lale.type_checking.is_subschema(column_schema, s_keep):
return False
if s_drop is not None:
if lale.type_checking.is_subschema(column_schema, s_drop):
return False
return True
s_X = lale.datasets.data_schemas._to_schema(s_X)
s_row = s_X["items"]
s_cols = s_row["items"]
if isinstance(s_cols, dict):
if is_keeper(s_cols):
s_row_result = s_row
else:
s_row_result = {"type": "array", "minItems": 0, "maxItems": 0}
else:
assert isinstance(s_cols, list)
s_cols_result = [s for s in s_cols if is_keeper(s)]
n_columns = len(s_cols_result)
s_row_result = {
"type": "array",
"minItems": n_columns,
"maxItems": n_columns,
"items": s_cols_result,
}
return {"type": "array", "items": s_row_result}
_hyperparams_schema = {
"allOf": [
{
"description": "This first sub-object lists all constructor arguments with their types, one at a time, omitting cross-argument constraints, if any.",
"type": "object",
"additionalProperties": False,
"required": ["columns", "drop_columns"],
"relevantToOptimizer": [],
"properties": {
"columns": {
"description": """The subset of columns to retain.
The supported column specification formats include some of the ones
from scikit-learn's ColumnTransformer_, and in addition, filtering by
using a JSON subschema_ check.
.. _ColumnTransformer: https://scikit-learn.org/stable/modules/generated/sklearn.compose.ColumnTransformer.html
.. _subschema: https://github.com/IBM/jsonsubschema""",
"anyOf": [
{
"enum": [None],
"description": "If not specified, keep all columns.",
},
{
"type": "array",
"items": {"type": "integer"},
"description": "Multiple columns by index.",
},
{
"type": "array",
"items": {"type": "string"},
"description": "Multiple Dataframe columns by names.",
},
{
"laleType": "callable",
"description": "Callable that is passed the input data X and can return a list of column names or indices.",
},
{
"type": "object",
"description": "Keep columns whose schema is a subschema of this JSON schema.",
},
],
"default": None,
},
"drop_columns": {
"description": """The subset of columns to remove.
The `drop_columns` argument supports the same formats as `columns`.
If both are specified, keep everything from `columns` that is not
also in `drop_columns`.""",
"anyOf": [
{
"enum": [None],
"description": "If not specified, drop no further columns.",
},
{
"type": "array",
"items": {"type": "integer"},
"description": "Multiple columns by index.",
},
{
"type": "array",
"items": {"type": "string"},
"description": "Multiple Dataframe columns by names.",
},
{
"laleType": "callable",
"description": "Callable that is passed the input data X and can return a list of column names or indices.",
},
{
"type": "object",
"description": "Remove columns whose schema is a subschema of this JSON schema.",
},
],
"default": None,
},
},
}
]
}
_input_fit_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {
"type": "array",
"items": {"anyOf": [{"type": "number"}, {"type": "string"}]},
},
},
"y": {"description": "Target for supervised learning (ignored)."},
},
}
_input_transform_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {
"type": "array",
"items": {"anyOf": [{"type": "number"}, {"type": "string"}]},
},
}
},
}
_output_transform_schema = {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {
"type": "array",
"items": {"anyOf": [{"type": "number"}, {"type": "string"}]},
},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """Projection keeps a subset of the columns, like in relational algebra.
Examples
--------
>>> df = pd.DataFrame(data={'A': [1,2], 'B': ['x','y'], 'C': [3,4]})
>>> keep_numbers = Project(columns={'type': 'number'})
>>> keep_numbers.fit(df).transform(df)
NDArrayWithSchema([[1, 3],
[2, 4]])
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.rasl.project.html",
"import_from": "lale.lib.rasl",
"type": "object",
"tags": {"pre": ["categoricals"], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
Project = lale.operators.make_operator(_ProjectImpl, _combined_schemas)
lale.docstrings.set_docstrings(Project)
| 17,851 | 33.596899 | 161 |
py
|
lale
|
lale-master/lale/lib/rasl/simple_imputer.py
|
# Copyright 2021 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numbers
import typing
from typing import Any, Tuple
import numpy as np
import pandas as pd
import lale.docstrings
import lale.operators
from lale.expressions import sum # pylint:disable=redefined-builtin
from lale.expressions import count, it, median, mode, replace
from lale.helpers import _is_df, _is_pandas_df, _is_spark_df
from lale.lib.dataframe import get_columns
from lale.lib.sklearn import simple_imputer
from lale.schemas import Enum
from .aggregate import Aggregate
from .map import Map
from .monoid import Monoid, MonoidableOperator
def _is_numeric_df(X):
if _is_pandas_df(X):
return X.shape[1] == X.select_dtypes(include=np.number).shape[1]
elif _is_spark_df(X):
from pyspark.sql.types import NumericType
numeric_cols = [
f.name for f in X.schema.fields if isinstance(f.dataType, NumericType)
]
for index_name in X.index_names:
if index_name in numeric_cols:
numeric_cols.remove(index_name)
return len(get_columns(X)) == len(numeric_cols)
else:
return False
def _is_string_df(X):
if _is_pandas_df(X):
return X.shape[1] == X.select_dtypes(include="object").shape[1]
elif _is_spark_df(X):
from pyspark.sql.types import StringType
numeric_cols = [
f.name for f in X.schema.fields if isinstance(f.dataType, StringType)
]
return len(get_columns(X)) == len(numeric_cols)
else:
return False
class _SimpleImputerMonoid(Monoid):
def __init__(self, *, feature_names_in_, lifted_statistics, strategy):
self.feature_names_in_ = feature_names_in_
self.lifted_statistics = lifted_statistics
self.strategy = strategy
def combine(self, other: "_SimpleImputerMonoid"):
assert list(self.feature_names_in_) == list(other.feature_names_in_)
if self.strategy == "constant":
assert self.lifted_statistics.equals(other.lifted_statistics)
combined_statistic = self.lifted_statistics
elif self.strategy == "mean":
combined_statistic = {}
combined_statistic["sum"] = (
self.lifted_statistics["sum"] + other.lifted_statistics["sum"]
)
combined_statistic["count"] = (
self.lifted_statistics["count"] + other.lifted_statistics["count"]
)
else:
raise ValueError(
"_combine is only supported for imputation strategy `mean` and `constant`."
)
return _SimpleImputerMonoid(
feature_names_in_=self.feature_names_in_,
lifted_statistics=combined_statistic,
strategy=self.strategy,
)
class _SimpleImputerImpl(MonoidableOperator[_SimpleImputerMonoid]):
def __init__(
self,
missing_values=np.nan,
strategy="mean",
fill_value=None,
verbose=0,
copy=True,
add_indicator=False,
):
self._hyperparams = {}
self._hyperparams["missing_values"] = missing_values
self._hyperparams["strategy"] = strategy
self._hyperparams["fill_value"] = fill_value
self._hyperparams["verbose"] = verbose
if not copy:
raise ValueError("This implementation only supports `copy=True`.")
self._hyperparams["copy"] = copy
if add_indicator:
raise ValueError("This implementation only supports `add_indicator=False`.")
self._hyperparams["add_indicator"] = add_indicator
# the `indicator_`` property is always None as we do not support `add_indicator=True`
self.indicator_ = None
def to_monoid(self, batch: Tuple[Any, Any]):
hyperparams = self._hyperparams
X, _ = batch
feature_names_in_ = get_columns(X)
agg_data = None
# learn the values to be imputed
strategy = hyperparams["strategy"]
if strategy == "constant":
fill_value = _SimpleImputerImpl._get_fill_value(X, hyperparams)
agg_data = [[fill_value for col in get_columns(X)]]
lifted_statistics = pd.DataFrame(agg_data, columns=get_columns(X))
elif strategy == "mean":
agg_op_sum = Aggregate(
columns={c: sum(it[c]) for c in get_columns(X)},
exclude_value=hyperparams["missing_values"],
)
agg_op_count = Aggregate(
columns={c: count(it[c]) for c in get_columns(X)},
exclude_value=hyperparams["missing_values"],
)
lifted_statistics = {}
agg_sum = agg_op_sum.transform(X)
if agg_sum is not None and _is_spark_df(agg_sum):
agg_sum = agg_sum.toPandas()
agg_count = agg_op_count.transform(X)
if agg_count is not None and _is_spark_df(agg_count):
agg_count = agg_count.toPandas()
lifted_statistics["sum"] = agg_sum
lifted_statistics["count"] = agg_count
else:
raise ValueError(
"SimpleImputer can create a Monoind only for imputation strategy `mean` and `constant`."
)
return _SimpleImputerMonoid(
feature_names_in_=feature_names_in_,
lifted_statistics=lifted_statistics,
strategy=strategy,
)
def from_monoid(self, monoid: _SimpleImputerMonoid):
self._monoid = monoid
self.feature_names_in_ = monoid.feature_names_in_
self.n_features_in_ = len(self.feature_names_in_)
_lifted_statistics = monoid.lifted_statistics
strategy = self._hyperparams["strategy"]
if strategy == "constant":
self.statistics_ = _lifted_statistics.to_numpy()[0]
elif strategy == "mean":
self.statistics_ = (
_lifted_statistics["sum"] / _lifted_statistics["count"]
).to_numpy()[0]
self._transformer = None
def fit(self, X, y=None):
self._validate_input(X)
agg_op = None
agg_data = None
# learn the values to be imputed
if self._hyperparams["strategy"] in ["mean", "constant"]:
lifted = self.to_monoid((X, y))
self.from_monoid(lifted)
return self
elif self._hyperparams["strategy"] == "median":
agg_op = Aggregate(
columns={c: median(it[c]) for c in get_columns(X)},
exclude_value=self._hyperparams["missing_values"],
)
elif self._hyperparams["strategy"] == "most_frequent":
agg_op = Aggregate(
columns={c: mode(it[c]) for c in get_columns(X)},
exclude_value=self._hyperparams["missing_values"],
)
if agg_data is None and agg_op is not None:
agg_data = agg_op.transform(X)
self.feature_names_in_ = get_columns(X)
self.n_features_in_ = len(self.feature_names_in_)
if agg_data is not None and _is_spark_df(agg_data):
agg_data = agg_data.toPandas()
if agg_data is not None and _is_pandas_df(agg_data):
self.statistics_ = agg_data.to_numpy()[
0
] # Converting from a 2-d array to 1-d
self._transformer = None
return self
def _build_transformer(self):
# prepare the transformer
transformer = Map(
columns={
col_name: replace(
it[col_name],
{self._hyperparams["missing_values"]: self.statistics_[col_idx]},
)
for col_idx, col_name in enumerate(self.feature_names_in_)
}
)
return transformer
def transform(self, X):
if self._transformer is None:
self._transformer = self._build_transformer()
return self._transformer.transform(X)
@staticmethod
def _get_fill_value(X, hyperparams):
# assign appropriate value to fill_value depending on the datatype.
# default fill_value is 0 for numerical input and "missing_value"
# otherwise
if hyperparams["fill_value"] is None:
if _is_numeric_df(X):
fill_value = 0
else:
fill_value = "missing_value"
else:
fill_value = hyperparams["fill_value"]
# validate that fill_value is numerical for numerical data
if (
hyperparams["strategy"] == "constant"
and _is_numeric_df(X)
and not isinstance(fill_value, numbers.Real)
):
raise ValueError(
f"'fill_value'={fill_value} is invalid. Expected a "
f"numerical value when imputing numerical "
f"data"
)
return fill_value
def _validate_input(self, X):
# validate that the dataset is either a pandas dataframe or spark.
# For example, sparse matrix is not allowed.
if not _is_df(X):
raise ValueError(
f"""Unsupported type(X) {type(X)} for SimpleImputer.
Only pandas.DataFrame or pyspark.sql.DataFrame are allowed."""
)
# validate input to check the correct dtype and strategy
# `mean` and `median` are not applicable to string inputs
if not _is_numeric_df(X) and self._hyperparams["strategy"] in [
"mean",
"median",
]:
raise ValueError(
f"Cannot use {self._hyperparams['strategy']} strategy with non-numeric data."
)
# Check that missing_values are the right type
if _is_numeric_df(X) and not isinstance(
self._hyperparams["missing_values"], numbers.Real
):
raise ValueError(
f"'X' and 'missing_values' types are expected to be"
f" both numerical. Got X.dtypes={X.dtypes} and "
f" type(missing_values)={type(self._hyperparams['missing_values'])}."
)
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """Relational algebra reimplementation of scikit-learn's `SimpleImputer`_.
Works on both pandas and Spark dataframes by using `Aggregate`_ for `fit` and `Map`_ for `transform`, which in turn use the appropriate backend.
.. _`SimpleImputer`: https://scikit-learn.org/stable/modules/generated/sklearn.imputer.SimpleImputer.html
.. _`Aggregate`: https://lale.readthedocs.io/en/latest/modules/lale.lib.rasl.aggregate.html
.. _`Map`: https://lale.readthedocs.io/en/latest/modules/lale.lib.rasl.map.html
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.rasl.simple_imputer.html",
"type": "object",
"tags": {
"pre": [],
"op": ["transformer", "interpretable"],
"post": [],
},
"properties": {
"hyperparams": simple_imputer._hyperparams_schema,
"input_fit": simple_imputer._input_fit_schema,
"input_transform": simple_imputer._input_transform_schema,
"output_transform": simple_imputer._output_transform_schema,
},
}
SimpleImputer = lale.operators.make_operator(_SimpleImputerImpl, _combined_schemas)
SimpleImputer = typing.cast(
lale.operators.PlannedIndividualOp,
SimpleImputer.customize_schema(
copy=Enum(
values=[True],
desc="`copy=True` is the only value currently supported by this implementation",
default=True,
),
add_indicator=Enum(
values=[False],
desc="`add_indicator=False` is the only value currently supported by this implementation",
default=False,
),
),
)
lale.docstrings.set_docstrings(SimpleImputer)
| 12,353 | 37.36646 | 144 |
py
|
lale
|
lale-master/lale/lib/rasl/spark_explainer.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import Optional, Union
from lale.helpers import _is_spark_df
logger = logging.getLogger(__name__)
# from typing import Literal # raises a mypy error for <3.8, doesn't for >=3.8
#
# MODE_type = Union[
# Literal["simple", "extended", "codegen", "cost", "formatted"],
# ]
MODE_type = str
class SparkExplainer:
def __init__(
self, extended: Union[bool, MODE_type] = False, mode: Optional[MODE_type] = None
):
self._extended = extended
self._mode = mode
def __call__(self, X, y=None):
if not _is_spark_df(X):
logger.warning(f"SparkExplain called with non spark data of type {type(X)}")
else:
X.explain(extended=self._extended, mode=self._mode)
| 1,329 | 30.666667 | 88 |
py
|
lale
|
lale-master/lale/lib/rasl/sort_index.py
|
# Copyright 2022 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import lale.docstrings
import lale.operators
from lale.datasets.data_schemas import forward_metadata, get_index_names
from lale.helpers import _is_pandas_df, _is_pandas_series, _is_spark_df
class _SortIndexImpl:
def __init__(self, ascending=True):
self.ascending = ascending
def transform_schema(self, s_X):
return s_X
def transform(self, X):
if _is_pandas_df(X):
ordered_df = X.sort_index(ascending=self.ascending)
elif _is_spark_df(X):
index_cols = get_index_names(X) # type:ignore
ordered_df = X.orderBy(index_cols, ascending=self.ascending)
else:
raise ValueError(
"Only Pandas or Spark dataframe are supported as inputs. Please check that pyspark is installed if you see this error for a Spark dataframe."
)
ordered_df = forward_metadata(X, ordered_df)
return ordered_df
def transform_X_y(self, X, y=None):
result_y = None
if y is not None:
assert _is_pandas_df(y) or _is_pandas_series(
y
), "transform_X_y is supported only when y is a Pandas Series or DataFrame."
result_y = y.sort_index(
ascending=self.ascending
) # assumes that y is always Pandas
result_X = self.transform(X)
return result_X, result_y
_hyperparams_schema = {
"allOf": [
{
"description": "This first sub-object lists all constructor arguments with their "
"types, one at a time, omitting cross-argument constraints, if any.",
"type": "object",
"additionalProperties": False,
"required": ["ascending"],
"relevantToOptimizer": [],
"properties": {
"ascending": {
"description": "Sort by index of the dataframe.",
"type": "boolean",
"default": True,
}
},
}
]
}
_input_transform_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {
"description": "The outer array is over rows.",
"anyOf": [
{"laleType": "Any"},
{
"type": "array",
"items": {
"description": "The inner array is over columns.",
"type": "array",
"items": {"laleType": "Any"},
},
},
],
}
},
}
_output_transform_schema = {
"description": "The outer array is over rows.",
"anyOf": [
{
"type": "array",
"items": {
"description": "The inner array is over columns.",
"type": "array",
"items": {"laleType": "Any"},
},
},
{"laleType": "Any"},
],
}
_input_transform_X_y_schema = {
"type": "object",
"required": ["X", "y"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Input features as numpy, pandas, or PySpark.",
"type": "array",
"items": {"type": "array", "items": {"laleType": "Any"}},
},
"y": {
"anyOf": [
{"enum": [None]},
{
"description": "Input labels as numpy, pandas, or PySpark.",
"type": "array",
"items": {"laleType": "Any"},
},
],
},
},
}
_output_transform_X_y_schema = {
"type": "array",
"laleType": "tuple",
"items": [
{
"description": "X",
"type": "array",
"items": {"type": "array", "items": {"laleType": "Any"}},
},
{
"anyOf": [
{"enum": [None]},
{
"description": "Input labels as numpy, pandas, or PySpark.",
"type": "array",
"items": {"laleType": "Any"},
},
],
},
],
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "SortIndex operator.",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.rasl.sort_index.html",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
"input_transform_X_y": _input_transform_X_y_schema,
"output_transform_X_y": _output_transform_X_y_schema,
},
}
SortIndex = lale.operators.make_operator(_SortIndexImpl, _combined_schemas)
lale.docstrings.set_docstrings(SortIndex)
| 5,491 | 30.204545 | 157 |
py
|
lale
|
lale-master/lale/lib/rasl/orderby.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ast
from typing import List, Tuple
import lale.docstrings
import lale.operators
from lale.datasets.data_schemas import forward_metadata
from lale.expressions import Expr
from lale.helpers import (
_is_ast_attribute,
_is_ast_subscript,
_is_pandas_df,
_is_spark_df,
)
class _OrderByImpl:
def __init__(self, by=None):
self.by = by
def transform_schema(self, s_X):
return s_X
def _get_order_key(self, expr_to_parse) -> Tuple[str, bool]:
order_asc: bool = True
col: str
if isinstance(expr_to_parse, Expr):
expr_to_parse = expr_to_parse.expr
if isinstance(expr_to_parse, ast.Call):
op = expr_to_parse.func
if isinstance(op, ast.Name):
name = op.id
if name == "asc":
order_asc = True
elif name == "desc":
order_asc = False
else:
raise ValueError(
"OrderBy descriptor expressions must be either asc or desc"
)
else:
raise ValueError(
"OrderBy expressions must be a string or an order descriptor (asc, desc)"
)
# for now, we only support single argument predicates
if len(expr_to_parse.args) != 1:
raise ValueError(
"OrderBy predicates do not support multiple aruguments",
)
arg = expr_to_parse.args[0]
else:
arg = expr_to_parse
else:
arg = expr_to_parse
if isinstance(arg, str):
col = arg
elif isinstance(arg, ast.Name):
col = arg.id # type: ignore
elif hasattr(ast, "Constant") and isinstance(arg, ast.Constant):
col = arg.value # type: ignore
elif hasattr(ast, "Str") and isinstance(arg, ast.Str):
col = arg.s
elif _is_ast_subscript(arg):
col = arg.slice.value.s # type: ignore
elif _is_ast_attribute(arg):
col = arg.attr # type: ignore
else:
raise ValueError(
"OrderBy parameters only support string, subscript or dot notation for the column name. For example, it.col_name or it['col_name']."
)
return col, order_asc
def transform(self, X):
by = self.by
orders: List[Tuple[str, bool]]
if isinstance(by, list):
orders = [self._get_order_key(k) for k in by]
else:
orders = [self._get_order_key(by)]
cols: List[str] = [col for col, _ in orders]
ascs: List[bool] = [asc for _, asc in orders]
if _is_pandas_df(X):
ordered_df = X.sort_values(by=cols, ascending=ascs)
elif _is_spark_df(X):
ordered_df = X.orderBy(cols, ascending=ascs)
else:
raise ValueError(
"Only Pandas or Spark dataframe are supported as inputs. Please check that pyspark is installed if you see this error for a Spark dataframe."
)
ordered_df = forward_metadata(X, ordered_df)
return ordered_df
_hyperparams_schema = {
"allOf": [
{
"description": "This first sub-object lists all constructor arguments with their "
"types, one at a time, omitting cross-argument constraints, if any.",
"type": "object",
"additionalProperties": False,
"required": ["by"],
"relevantToOptimizer": [],
"properties": {
"by": {"description": "OrderBy key(s).", "laleType": "Any"},
},
}
]
}
_input_transform_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {
"description": "The outer array is over rows.",
"anyOf": [
{"laleType": "Any"},
{
"type": "array",
"items": {
"description": "The inner array is over columns.",
"type": "array",
"items": {"laleType": "Any"},
},
},
],
}
},
}
_output_transform_schema = {
"description": "The outer array is over rows.",
"anyOf": [
{
"type": "array",
"items": {
"description": "The inner array is over columns.",
"type": "array",
"items": {"laleType": "Any"},
},
},
{"laleType": "Any"},
],
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Relational algebra OrderBy (sort) operator.",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.rasl.orderby.html",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
OrderBy = lale.operators.make_operator(_OrderByImpl, _combined_schemas)
lale.docstrings.set_docstrings(OrderBy)
| 5,973 | 32.188889 | 157 |
py
|
lale
|
lale-master/lale/lib/rasl/scores.py
|
# Copyright 2021 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Tuple, TypeVar
import numpy as np
from scipy import special
from typing_extensions import Protocol
from lale.expressions import count as agg_count
from lale.expressions import it
from lale.expressions import sum as agg_sum
from lale.helpers import _ensure_pandas, _is_pandas_series
from lale.lib.dataframe import get_columns
from lale.lib.rasl import Aggregate, ConcatFeatures, GroupBy, Map
from .monoid import Monoid, MonoidFactory
ScoreMonoid = Monoid
_InputType = Tuple[Any, Any] # TODO: be more precise?
_OutputType = Tuple[float, float]
_M = TypeVar("_M", bound=ScoreMonoid)
class ScoreMonoidFactory(MonoidFactory[_InputType, _OutputType, _M], Protocol):
def score(self, X, y) -> Tuple[float, float]:
return self.from_monoid(self.to_monoid((X, y)))
class FOnewayData(Monoid):
def __init__(
self,
*,
classes,
n_samples_per_class,
n_samples,
ss_alldata,
sums_samples,
sums_alldata,
):
"""
Parameters
----------
classes: list
The list of classes.
n_samples_per_class: dictionary
The number of samples in each class.
n_samples: number
The total number of samples.
ss_alldata: array
The sum of square of each feature.
sums_samples: dictionary
The sum of each feaure per class.
sums_alldata: array
The sum of each feaure.
"""
self.classes = classes
self.n_samples_per_class = n_samples_per_class
self.n_samples = n_samples
self.ss_alldata = ss_alldata
self.sums_samples = sums_samples
self.sums_alldata = sums_alldata
def combine(self, other: "FOnewayData"):
classes_a = self.classes
n_samples_per_class_a = self.n_samples_per_class
n_samples_a = self.n_samples
ss_alldata_a = self.ss_alldata
sums_samples_a = self.sums_samples
sums_alldata_a = self.sums_alldata
classes_b = other.classes
n_samples_per_class_b = other.n_samples_per_class
n_samples_b = other.n_samples
ss_alldata_b = other.ss_alldata
sums_samples_b = other.sums_samples
sums_alldata_b = other.sums_alldata
classes = list(set(classes_a + classes_b))
n_samples_per_class = {
k: (n_samples_per_class_a[k] if k in n_samples_per_class_a else 0)
+ (n_samples_per_class_b[k] if k in n_samples_per_class_b else 0)
for k in classes
}
n_samples = n_samples_a + n_samples_b
ss_alldata = ss_alldata_a + ss_alldata_b
sums_samples = {
k: (sums_samples_a[k] if k in sums_samples_a else 0)
+ (sums_samples_b[k] if k in sums_samples_b else 0)
for k in classes
}
sums_alldata = sums_alldata_a + sums_alldata_b
return FOnewayData(
classes=classes,
n_samples_per_class=n_samples_per_class,
n_samples=n_samples,
ss_alldata=ss_alldata,
sums_samples=sums_samples,
sums_alldata=sums_alldata,
)
def _gen_name(base, avoid):
if base not in avoid:
return base
cpt = 0
while f"{base}{cpt}" in avoid:
cpt += 1
return f"{base}{cpt}"
# The following function is a rewriting of sklearn.feature_selection.f_oneway
# Compared to the sklearn.feature_selection.f_oneway implementation it
# takes as input the dataset and the target vector.
# Moreover, the function is splitted into two parts: `_f_oneway_lift` and
# `_f_oneway_lower`.
def _f_oneway_lift(X, y) -> FOnewayData:
"""Prepare the data for a 1-way ANOVA.
Parameters
----------
X: array
The sample measurements.
y: array
The target vector.
Returns
-------
monoid: FOnewayData
The inermediate data that can be combine for incremental computation.
"""
if get_columns(y)[0] is None:
if _is_pandas_series(y):
y = y.rename(_gen_name("target", get_columns(X)))
Xy = ConcatFeatures().transform([X, y])
X_by_y = GroupBy(by=[it[get_columns(y)[0]]]).transform(Xy)
agg_sum_cols = Aggregate(columns={col: agg_sum(it[col]) for col in get_columns(X)})
sums_samples = _ensure_pandas(agg_sum_cols.transform(X_by_y))
n_samples_per_class = Aggregate(
columns={"n_samples_per_class": agg_count(it[get_columns(X)[0]])}
).transform(X_by_y)
n_samples = _ensure_pandas(
Aggregate(columns={"sum": agg_sum(it["n_samples_per_class"])}).transform(
n_samples_per_class
)
)["sum"][0]
sqr_cols = Map(columns={col: it[col] ** 2 for col in get_columns(X)})
ss_alldata = _ensure_pandas((sqr_cols >> agg_sum_cols).transform(X)).loc[0]
sums_alldata = _ensure_pandas(agg_sum_cols.transform(X)).loc[0].to_numpy()
n_samples_per_class = _ensure_pandas(n_samples_per_class).to_dict()[
"n_samples_per_class"
]
classes = list(n_samples_per_class.keys())
sums_samples = {k: sums_samples.loc[k].to_numpy() for k in classes}
return FOnewayData(
classes=classes,
n_samples_per_class=n_samples_per_class,
n_samples=n_samples,
ss_alldata=ss_alldata,
sums_samples=sums_samples,
sums_alldata=sums_alldata,
)
def _f_oneway_lower(lifted: FOnewayData):
"""Performs a 1-way ANOVA.
Parameters
----------
lifted : FOnewayData
The result of `to_monoid`.
Returns
-------
F-value : float
The computed F-value of the test.
p-value : float
The associated p-value from the F-distribution.
"""
classes = lifted.classes
n_samples_per_class = lifted.n_samples_per_class
n_samples = lifted.n_samples
ss_alldata = lifted.ss_alldata
sums_samples = lifted.sums_samples
sums_alldata = lifted.sums_alldata
n_classes = len(classes)
square_of_sums_alldata = sums_alldata**2
square_of_sums_args = {k: s**2 for k, s in sums_samples.items()}
sstot = ss_alldata - square_of_sums_alldata / float(n_samples)
ssbn = 0.0
for k in n_samples_per_class:
ssbn += square_of_sums_args[k] / n_samples_per_class[k]
ssbn -= square_of_sums_alldata / float(n_samples)
sswn = sstot - ssbn
dfbn = n_classes - 1
dfwn = n_samples - n_classes
msb = ssbn / float(dfbn)
msw = sswn / float(dfwn)
# constant_features_idx = np.where(msw == 0.0)[0]
# if (np.nonzero(msb)[0].size != msb.size and constant_features_idx.size):
# warnings.warn("Features %s are constant." % constant_features_idx,
# UserWarning)
f = msb / msw
# flatten matrix to vector in sparse case
f = np.asarray(f).ravel()
prob = special.fdtrc(dfbn, dfwn, f)
return f, prob
class FClassif(ScoreMonoidFactory[FOnewayData]):
"""Compute the ANOVA F-value for the provided sample."""
def to_monoid(self, batch: Tuple[Any, Any]) -> FOnewayData:
X, y = batch
return _f_oneway_lift(X, y)
def from_monoid(self, monoid: FOnewayData):
return _f_oneway_lower(monoid)
| 7,753 | 32.5671 | 87 |
py
|
lale
|
lale-master/lale/lib/rasl/standard_scaler.py
|
# Copyright 2021 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import typing
from typing import Any, Tuple
import numpy as np
import lale.docstrings
import lale.helpers
import lale.operators
from lale.expressions import it
from lale.expressions import sum as agg_sum
from lale.lib.dataframe import count, get_columns
from lale.lib.sklearn import standard_scaler
from .aggregate import Aggregate
from .map import Map
from .monoid import Monoid, MonoidableOperator
def scale(X, **kwargs):
return StandardScaler(**kwargs).fit(X).transform(X)
class _StandardScalerMonoid(Monoid):
def __init__(self, *, feature_names_in_, n_samples_seen_, _sum1, _sum2):
self.feature_names_in_ = feature_names_in_
self.n_samples_seen_ = n_samples_seen_
self._sum1 = _sum1
self._sum2 = _sum2
def combine(self, other: "_StandardScalerMonoid"):
assert list(self.feature_names_in_) == list(other.feature_names_in_)
combined_feat = self.feature_names_in_
combined_n_samples_seen = self.n_samples_seen_ + other.n_samples_seen_
if self._sum1 is None:
combined_sum1 = None
else:
assert other._sum1 is not None and len(self._sum1) == len(other._sum1)
combined_sum1 = self._sum1 + other._sum1
if self._sum2 is None:
combined_sum2 = None
else:
assert other._sum2 is not None and len(self._sum2) == len(other._sum2)
combined_sum2 = self._sum2 + other._sum2
return _StandardScalerMonoid(
feature_names_in_=combined_feat,
n_samples_seen_=combined_n_samples_seen,
_sum1=combined_sum1,
_sum2=combined_sum2,
)
class _StandardScalerImpl(MonoidableOperator[_StandardScalerMonoid]):
def __init__(self, *, copy=True, with_mean=True, with_std=True):
self._hyperparams = {"copy": copy, "with_mean": with_mean, "with_std": with_std}
self.with_mean = with_mean
def transform(self, X, copy=None):
if self._transformer is None:
self._transformer = self._build_transformer()
return self._transformer.transform(X)
def get_feature_names_out(self, input_features):
assert input_features == self.feature_names_in_
return self.feature_names_in_
@property
def n_samples_seen_(self):
return getattr(self._monoid, "n_samples_seen_", 0)
@property
def feature_names_in_(self):
return getattr(self._monoid, "feature_names_in_", None)
def from_monoid(self, monoid: _StandardScalerMonoid):
self._monoid = monoid
n = monoid.n_samples_seen_
if self._hyperparams["with_std"]:
# Table 1 of http://www.vldb.org/pvldb/vol8/p702-tangwongsan.pdf
self.var_ = (monoid._sum2 - monoid._sum1 * monoid._sum1 / n) / n
self.scale_ = np.where(self.var_ == 0.0, 1.0, np.sqrt(self.var_))
else:
self.var_ = None
self.scale_ = None
if self._hyperparams["with_mean"]:
self.mean_ = monoid._sum1 / n
else:
self.mean_ = None
self.n_features_in_ = len(monoid.feature_names_in_)
self._transformer = None
def _build_transformer(self):
def scale_expr(col_idx, col_name):
expr = it[col_name]
if self.mean_ is not None:
expr = expr - self.mean_[col_idx]
if self.scale_ is not None:
expr = expr / self.scale_[col_idx]
return expr
assert self._monoid is not None
result = Map(
columns={
col_name: scale_expr(col_idx, col_name)
for col_idx, col_name in enumerate(self._monoid.feature_names_in_)
}
)
return result
def to_monoid(self, batch: Tuple[Any, Any]):
X, _ = batch
hyperparams = self._hyperparams
feature_names_in = get_columns(X)
n_samples_seen = count(X)
if hyperparams["with_mean"] or hyperparams["with_std"]:
sum1_op = Aggregate(columns={c: agg_sum(it[c]) for c in feature_names_in})
sum1_data = lale.helpers._ensure_pandas(sum1_op.transform(X))
sum1 = sum1_data[feature_names_in].values[0]
else:
sum1 = None
if hyperparams["with_std"]:
sum2_op = Map(
columns={c: it[c] * it[c] for c in feature_names_in}
) >> Aggregate(columns={c: agg_sum(it[c]) for c in feature_names_in})
sum2_data = lale.helpers._ensure_pandas(sum2_op.transform(X))
sum2 = sum2_data[feature_names_in].values[0]
else:
sum2 = None
return _StandardScalerMonoid(
feature_names_in_=feature_names_in,
n_samples_seen_=n_samples_seen,
_sum1=sum1,
_sum2=sum2,
)
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """Relational algebra reimplementation of scikit-learn's `StandardScaler`_ transformer that standardizes features by removing the mean and scaling to unit variance.
Works on both pandas and Spark dataframes by using `Aggregate`_ for `fit` and `Map`_ for `transform`, which in turn use the appropriate backend.
.. _`StandardScaler`: https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html
.. _`Aggregate`: https://lale.readthedocs.io/en/latest/modules/lale.lib.rasl.aggregate.html
.. _`Map`: https://lale.readthedocs.io/en/latest/modules/lale.lib.rasl.map.html
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.rasl.standard_scaler.html",
"type": "object",
"tags": {
"pre": ["~categoricals"],
"op": ["transformer", "interpretable"],
"post": [],
},
"properties": {
"hyperparams": standard_scaler._hyperparams_schema,
"input_fit": standard_scaler._input_fit_schema,
"input_transform": standard_scaler._input_transform_schema,
"output_transform": standard_scaler._output_transform_schema,
},
}
StandardScaler = lale.operators.make_operator(_StandardScalerImpl, _combined_schemas)
StandardScaler = typing.cast(
lale.operators.PlannedIndividualOp,
StandardScaler.customize_schema(
copy={
"enum": [True],
"description": "This implementation only supports `copy=True`.",
"default": True,
},
),
)
lale.docstrings.set_docstrings(StandardScaler)
| 7,048 | 36.494681 | 183 |
py
|
lale
|
lale-master/lale/lib/rasl/aggregate.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ast
import numpy as np
import pandas as pd
import lale.docstrings
import lale.expressions
import lale.operators
from lale.datasets.data_schemas import (
SparkDataFrameWithIndex,
add_table_name,
get_table_name,
)
try:
import pyspark.sql
import pyspark.sql.functions
from pyspark.sql.functions import col, isnan, when
spark_installed = True
except ImportError:
spark_installed = False
class _AggregateImpl:
def __init__(self, columns, group_by=None, exclude_value=None):
if group_by is None:
group_by = []
self.columns = columns
self.group_by = group_by
self.exclude_value = exclude_value
# Commented the validation for now to pass the OBM test cases.
# We can uncomment this when OBM starts supporting the new format of Aggregate operator.
# @classmethod
# def validate_hyperparams(cls, group_by=None, **hyperparams):
# if group_by is not None:
# raise ValueError(
# "The use of group_by in Aggregate is deprecated. Please use the GroupBy operator instead."
# )
def transform(self, X):
if not isinstance(self.columns, dict):
raise ValueError(
"Aggregate 'columns' parameter should be of dictionary type."
)
agg_info = []
for new_col_name, expr in self.columns.items():
if isinstance(expr.expr, ast.Call):
agg_func_name = expr.expr.func.id # type: ignore
old_col_name = lale.expressions._it_column(expr.expr.args[0])
else:
agg_func_name = "first"
old_col_name = lale.expressions._it_column(expr.expr)
agg_info.append((new_col_name, old_col_name, agg_func_name))
if isinstance(X, (pd.DataFrame, pd.core.groupby.generic.DataFrameGroupBy)):
aggregated_df = self._transform_pandas(X, agg_info)
elif isinstance(X, (pyspark.sql.DataFrame, pyspark.sql.GroupedData)): # type: ignore
aggregated_df = self._transform_spark(X, agg_info)
else:
raise ValueError(f"Unsupported type(X) {type(X)} for Aggregate.")
named_aggregated_df = add_table_name(aggregated_df, get_table_name(X))
return named_aggregated_df
def _transform_pandas(self, X, agg_info):
is_grouped = isinstance(X, pd.core.groupby.generic.DataFrameGroupBy)
if is_grouped:
_, first_group = next(iter(X)) # TODO: what if zero groups?
value_columns = first_group.columns
else:
value_columns = X.columns
def eval_agg_pandas(old_col_name, agg_func_name):
if agg_func_name == "collect_set":
agg_func_name = "unique"
elif agg_func_name == "mode":
agg_func_name = (
lambda x: x.value_counts() # pylint:disable=unnecessary-lambda-assignment
.sort_index(ascending=False)
.sort_values(ascending=False)
.index[0]
) # noqa
if is_grouped and old_col_name not in value_columns:
idx = X.count().index
if old_col_name not in idx.names:
raise KeyError(old_col_name, value_columns, idx.names)
if agg_func_name != "first":
raise ValueError(
"Expected plain group-by column access it['{old_col_name}'], found function '{agg_func_name}'"
)
return idx.get_level_values(old_col_name)
X_old_col = X[old_col_name]
if self.exclude_value is not None:
return X_old_col[~X_old_col.isin([self.exclude_value])].agg(
agg_func_name
)
else:
return X_old_col.agg(agg_func_name)
aggregated_columns = {
new_col_name: eval_agg_pandas(old_col_name, agg_func_name)
for new_col_name, old_col_name, agg_func_name in agg_info
}
if is_grouped:
aggregated_df = pd.DataFrame(aggregated_columns)
else:
aggregated_df = pd.DataFrame.from_records([aggregated_columns])
return aggregated_df
def _transform_spark(self, X, agg_info):
def create_spark_agg_expr(new_col_name, old_col_name, agg_func_name):
if agg_func_name == "median":
agg_func_name = "percentile_approx"
func = getattr(pyspark.sql.functions, agg_func_name)
if agg_func_name == "percentile_approx":
if self.exclude_value is not None:
result = func(self._get_exclude_when_expr(old_col_name), 0.5).alias(
new_col_name
)
else:
result = func(old_col_name, 0.5).alias(new_col_name)
else:
if self.exclude_value is not None:
result = func(self._get_exclude_when_expr(old_col_name)).alias(
new_col_name
)
else:
result = func(old_col_name).alias(new_col_name)
return result
agg_expr = []
mode_column_names = []
for new_col_name, old_col_name, agg_func_name in agg_info:
if agg_func_name != "mode":
agg_expr.append(
create_spark_agg_expr(new_col_name, old_col_name, agg_func_name)
)
else:
mode_column_names.append((new_col_name, old_col_name))
if len(agg_expr) == 0:
# This means that all the aggregate expressions were mode.
# For that case, compute the mean first, so that the dataframe has the right shape
# and replace the mean with mode next
agg_expr = [
create_spark_agg_expr(new_col_name, old_col_name, "mean")
for new_col_name, old_col_name, _ in agg_info
]
aggregated_df = X.agg(*agg_expr)
if len(mode_column_names) > 0:
if isinstance(X, pyspark.sql.GroupedData):
raise ValueError(
"Mode is not supported as an aggregate immediately after GroupBy for Spark dataframes."
)
from pyspark.sql.functions import lit
for new_col_name, old_col_name in mode_column_names:
if self.exclude_value is not None:
if self.exclude_value in [np.nan, "nan"]:
filter_expr = ~isnan(old_col_name)
else:
filter_expr = col(old_col_name) != self.exclude_value
aggregated_df = aggregated_df.withColumn(
new_col_name,
lit(
X.filter(filter_expr)
.groupby(old_col_name)
.count()
.orderBy("count", ascending=False)
.first()[0]
),
)
else:
aggregated_df = aggregated_df.withColumn(
new_col_name,
lit(
X.groupby(old_col_name)
.count()
.orderBy("count", ascending=False)
.first()[0]
),
)
keep_columns = [new_col_name for new_col_name, _, _ in agg_info]
drop_columns = [col for col in aggregated_df.columns if col not in keep_columns]
aggregated_df = SparkDataFrameWithIndex(aggregated_df, index_names=drop_columns)
return aggregated_df
def _get_exclude_when_expr(self, col_name):
if self.exclude_value is not None:
if self.exclude_value in [np.nan, "nan"]:
when_expr = when(~isnan(col_name), col(col_name))
else:
when_expr = when(
col(col_name) != self.exclude_value,
col(col_name),
)
else:
when_expr = None
return when_expr
_hyperparams_schema = {
"allOf": [
{
"description": "This first sub-object lists all constructor arguments with their types, one at a time, omitting cross-argument constraints, if any.",
"type": "object",
"additionalProperties": False,
"relevantToOptimizer": [],
"properties": {
"columns": {
"description": "Aggregations for producing output columns.",
"anyOf": [
{
"description": "Dictionary of output column names and aggregation expressions.",
"type": "object",
"additionalProperties": {"laleType": "expression"},
},
{
"description": "List of aggregation expressions. The output column name is determined by a heuristic based on the input column name and the transformation function.",
"type": "array",
"items": {"laleType": "expression"},
},
],
"default": [],
},
"group_by": {
"description": "Group by columns for aggregates.",
"anyOf": [
{
"description": "Expressions for columns name if there is a single column.",
"laleType": "expression",
},
{
"description": "List of expressions for columns.",
"type": "array",
"items": {"laleType": "expression"},
},
],
"default": [],
},
"exclude_value": {
"description": "Exclude this value in computation of aggregates. Useful for missing value imputation.",
"laleType": "Any",
"default": None,
},
},
}
]
}
_input_transform_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Output of the group by operator - Pandas / Pyspark grouped dataframe",
"type": "array",
"items": {"type": "array", "items": {"laleType": "Any"}},
"minItems": 1,
}
},
}
_output_transform_schema = {
"description": "The outer array is over rows.",
"type": "array",
"items": {
"description": "The inner array is over columns.",
"type": "array",
"items": {"laleType": "Any"},
},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Relational algebra aggregate operator.",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.rasl.aggregate.html",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
Aggregate = lale.operators.make_operator(_AggregateImpl, _combined_schemas)
lale.docstrings.set_docstrings(Aggregate)
| 12,271 | 38.587097 | 194 |
py
|
lale
|
lale-master/lale/lib/rasl/datasets.py
|
# Copyright 2022 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Iterable, Tuple, Union, cast, overload
import pandas as pd
import sklearn.model_selection
import sklearn.tree
from typing_extensions import Literal, TypeAlias
import lale.helpers
from lale.datasets import pandas2spark
from lale.helpers import datatype_param_type
from .split_xy import SplitXy
_PandasBatch: TypeAlias = Tuple[pd.DataFrame, pd.Series]
if lale.helpers.spark_installed:
from pyspark.sql.dataframe import DataFrame as SparkDataFrame
_SparkBatch: TypeAlias = Tuple[SparkDataFrame, SparkDataFrame]
_PandasOrSparkBatchAux = Union[
_PandasBatch,
_SparkBatch,
]
else:
_PandasOrSparkBatchAux = _PandasBatch # type: ignore
# pyright does not currently accept a TypeAlias with conditional definitions
_PandasOrSparkBatch: TypeAlias = _PandasOrSparkBatchAux # type: ignore
try:
import arff
from lale.datasets.openml import openml_datasets # pylint:disable=ungrouped-imports
liac_arff_installed = True
except ModuleNotFoundError:
liac_arff_installed = False
def arff_data_loader(
file_name: str, label_name: str, rows_per_batch: int
) -> Iterable[_PandasBatch]:
"""Incrementally load an ARFF file and yield it one (X, y) batch at a time."""
assert liac_arff_installed
split_x_y = SplitXy(label_name=label_name)
def make_batch():
start = n_batches * rows_per_batch
stop = start + len(row_list)
df = pd.DataFrame(row_list, range(start, stop), column_names)
X, y = split_x_y.transform_X_y(df, None)
return X, y
with open(file_name) as f: # pylint:disable=unspecified-encoding
arff_dict = arff.load(f, return_type=arff.DENSE_GEN)
column_names = [name.lower() for name, _ in arff_dict["attributes"]]
row_list = []
n_batches = 0
for row in arff_dict["data"]:
row_list.append(row)
if len(row_list) >= rows_per_batch:
yield make_batch()
row_list = []
n_batches += 1
if len(row_list) > 0: # last chunk
yield make_batch()
def csv_data_loader(
file_name: str, label_name: str, rows_per_batch: int
) -> Iterable[_PandasBatch]:
"""Incrementally load an CSV file and yield it one (X, y) batch at a time."""
split_x_y = SplitXy(label_name=label_name)
with pd.read_csv(file_name, chunksize=rows_per_batch) as reader:
for df in reader:
X, y = split_x_y.transform_X_y(df, None)
yield X, y
@overload
def mockup_data_loader(
X: pd.DataFrame, y: pd.Series, n_batches: int, astype: Literal["pandas"]
) -> Iterable[_PandasBatch]:
...
@overload
def mockup_data_loader(
X: pd.DataFrame, y: pd.Series, n_batches: int, astype: datatype_param_type
) -> Iterable[_PandasOrSparkBatch]:
...
def mockup_data_loader(
X: pd.DataFrame, y: pd.Series, n_batches: int, astype: datatype_param_type
) -> Iterable[_PandasOrSparkBatch]:
"""Split (X, y) into batches to emulate loading them incrementally.
Only intended for testing purposes, because if X and y are already
materialized in-memory, there is little reason to batch them.
"""
pandas_gen: Iterable[_PandasBatch]
if n_batches == 1:
pandas_gen = [(X, y)]
else:
cv = sklearn.model_selection.KFold(n_batches)
estimator = sklearn.tree.DecisionTreeClassifier()
pandas_gen = (
lale.helpers.split_with_schemas(estimator, X, y, test, train)
for train, test in cv.split(X, y)
)
if astype == "pandas":
return pandas_gen
elif astype == "spark":
return ((pandas2spark(X), pandas2spark(y)) for X, y in pandas_gen)
raise ValueError(f"expected astype in ['pandas', 'spark'], got {astype}")
def openml_data_loader(dataset_name: str, batch_size: int) -> Iterable[_PandasBatch]:
"""Download the OpenML dataset, incrementally load it, and yield it one (X,y) batch at a time."""
assert liac_arff_installed
metadata = openml_datasets.experiments_dict[dataset_name]
label_name = cast(str, metadata["target"]).lower()
file_name = openml_datasets.download_if_missing(dataset_name)
return arff_data_loader(file_name, label_name, batch_size)
| 4,815 | 32.915493 | 101 |
py
|
lale
|
lale-master/lale/lib/rasl/batching.py
|
# Copyright 2019-2022 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import lale.docstrings
import lale.helpers
import lale.operators
from .task_graphs import PrioBatch, PrioResourceAware, PrioStep, fit_with_batches
class _BatchingImpl:
def __init__(
self,
operator=None,
batch_size=32,
shuffle=True,
num_workers=0,
inmemory=False,
num_epochs=None,
max_resident=None,
scoring=None,
progress_callback=None,
partial_transform=False,
priority="resource_aware",
verbose=0,
):
self.operator = operator
self.batch_size = batch_size
self.shuffle = shuffle
self.num_workers = num_workers
self.inmemory = inmemory
self.num_epochs = num_epochs
self.max_resident = max_resident
self.scoring = scoring
self.progress_callback = progress_callback
self.partial_transform = partial_transform
self.priority = priority
self.verbose = verbose
def fit(self, X, y=None, classes=None):
if self.operator is None:
raise ValueError("The pipeline object can't be None at the time of fit.")
if hasattr(X, "__next__") and hasattr(
X, "__iter__"
): # allow an iterable that is not a torch data loader
assert y is None, "When X is an Iterable, y should be None"
data_loader = X
else:
try:
from torch.utils.data import DataLoader
except ImportError as exc:
raise ImportError(
"""Batching uses Pytorch for data loading. It is not
installed in the current environment, please install
the package and try again."""
) from exc
if isinstance(X, DataLoader):
assert (
y is None
), "When X is a torch.utils.data.DataLoader, y should be None"
data_loader = X
else:
data_loader = lale.helpers.create_data_loader(
X=X,
y=y,
batch_size=self.batch_size,
num_workers=self.num_workers,
shuffle=self.shuffle,
)
if y is not None and classes is None:
classes = np.unique(y)
if self.priority == "batch":
prio = PrioBatch()
elif self.priority == "step":
prio = PrioStep()
else:
prio = PrioResourceAware()
self.operator = fit_with_batches(
pipeline=self.operator,
batches_train=data_loader, # type:ignore
batches_valid=None,
unique_class_labels=classes, # type:ignore
max_resident=self.max_resident,
prio=prio,
partial_transform=self.partial_transform,
scoring=self.scoring,
progress_callback=self.progress_callback,
verbose=self.verbose,
)
return self
def transform(self, X, y=None):
if hasattr(X, "__next__") and hasattr(
X, "__iter__"
): # allow an iterable that is not a torch data loader
assert y is None, "When X is an Iterable, y should be None"
data_loader = X
else:
try:
from torch.utils.data import DataLoader
except ImportError as exc:
raise ImportError(
"""Batching uses Pytorch for data loading. It is not
installed in the current environment, please install
the package and try again."""
) from exc
if isinstance(X, DataLoader):
assert (
y is None
), "When X is a torch.utils.data.DataLoader, y should be None"
data_loader = X
else:
data_loader = lale.helpers.create_data_loader(
X=X,
y=y,
batch_size=self.batch_size,
num_workers=self.num_workers,
shuffle=self.shuffle,
)
op = self.operator
assert op is not None
transformed_data = op.transform_with_batches(
data_loader, serialize=self.inmemory
)
return transformed_data
def predict(self, X, y=None):
return self.transform(X, y)
_input_fit_schema = {
"description": "Input data schema for fit.",
"type": "object",
"required": ["X", "y"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Features; the outer array is over samples.",
"anyOf": [
{
"type": "array",
"items": {
"anyOf": [
{"type": "number"},
{"type": "string"},
{"type": "boolean"},
]
},
},
{
"type": "array",
"items": {
"type": "array",
"items": {
"anyOf": [
{"type": "number"},
{"type": "string"},
{"type": "boolean"},
]
},
},
},
{"type": "object"},
],
},
"y": {
"anyOf": [
{
"type": "array",
"items": {
"anyOf": [
{"type": "integer"},
{"type": "number"},
{"type": "string"},
]
},
},
{"enum": [None]},
],
},
"classes": {
"anyOf": [
{
"type": "array",
"items": {
"anyOf": [
{"type": "number"},
{"type": "string"},
{"type": "boolean"},
]
},
},
{"enum": [None]},
],
"description": """The total number of classes in the entire training dataset.""",
},
},
}
_input_predict_transform_schema = { # TODO: separate predict vs. transform
"description": "Input data schema for predictions.",
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Features; the outer array is over samples.",
"anyOf": [
{
"type": "array",
"items": {
"anyOf": [
{"type": "number"},
{"type": "string"},
{"type": "boolean"},
]
},
},
{
"type": "array",
"items": {
"type": "array",
"items": {
"anyOf": [
{"type": "number"},
{"type": "string"},
{"type": "boolean"},
]
},
},
},
{},
],
},
"y": {
"type": "array",
"items": {"anyOf": [{"type": "integer"}, {"type": "number"}]},
},
},
}
_output_schema = { # TODO: separate predict vs. transform
"description": "Output data schema for transformed data.",
"laleType": "Any",
}
_hyperparams_schema = {
"description": "Hyperparameter schema.",
"allOf": [
{
"description": "This first sub-object lists all constructor arguments with their "
"types, one at a time, omitting cross-argument constraints.",
"type": "object",
"additionalProperties": False,
"relevantToOptimizer": ["batch_size"],
"properties": {
"operator": {
"description": "A lale pipeline object to be used inside of batching",
"laleType": "operator",
},
"batch_size": {
"description": "Batch size used for transform.",
"type": "integer",
"default": 64,
"minimum": 1,
"distribution": "uniform",
"minimumForOptimizer": 32,
"maximumForOptimizer": 128,
},
"shuffle": {
"type": "boolean",
"default": False,
"description": "Shuffle dataset before batching or not.",
},
"num_workers": {
"type": "integer",
"default": 0,
"description": "Number of workers for pytorch dataloader.",
},
"inmemory": {
"type": "boolean",
"default": False,
"description": """Whether all the computations are done in memory
or intermediate outputs are serialized. Only applies to transform/predict.
For fit, use the `max_resident` argument.""",
},
"num_epochs": {
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"default": None,
"description": "Number of epochs. If the operator has `num_epochs` as a parameter, that takes precedence.",
},
"max_resident": {
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"default": None,
"description": "Amount of memory to be used in bytes.",
},
"scoring": {
"anyOf": [{"laleType": "callable"}, {"enum": [None]}],
"default": None,
"description": "Batch-wise scoring metrics from `lale.lib.rasl`.",
},
"progress_callback": {
"anyOf": [{"laleType": "callable"}, {"enum": [None]}],
"default": None,
"description": "Callback function to get performance metrics per batch.",
},
"partial_transform": {
"type": "boolean",
"default": False,
"description": """Whether to allow partially-trained upstream operators
to transform data for training downstream operators even before the upstream operator has been fully trained.""",
},
"priority": {
"description": """Scheduling priority in task graphs.
"batch" will execute tasks from earlier batches first.
"step" will execute tasks from earlier steps first, like nested-loop algorithm.
And "resource_aware" will execute tasks with less non-resident data first.""",
"enum": ["batch", "step", "resource_aware"],
"default": "resource_aware",
},
"verbose": {
"type": "integer",
"default": 0,
"description": "Verbosity level, higher values mean more information.",
},
},
}
],
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """Batching trains the given pipeline using batches.
The batch_size is used across all steps of the pipeline, serializing
the intermediate outputs if specified.""",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_transform_schema,
"output_predict": _output_schema,
"input_transform": _input_predict_transform_schema,
"output_transform": _output_schema,
},
}
Batching = lale.operators.make_operator(_BatchingImpl, _combined_schemas)
lale.docstrings.set_docstrings(Batching)
| 13,230 | 34.953804 | 127 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.