code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
---|---|---|---|---|---|---|---|
def test_ovo_consistent_binary_classification():
"""Check that ovo is consistent with binary classifier.
Non-regression test for #13617.
"""
X, y = load_breast_cancer(return_X_y=True)
clf = KNeighborsClassifier(n_neighbors=8, weights="distance")
ovo = OneVsOneClassifier(clf)
clf.fit(X, y)
ovo.fit(X, y)
assert_array_equal(clf.predict(X), ovo.predict(X))
|
Check that ovo is consistent with binary classifier.
Non-regression test for #13617.
|
test_ovo_consistent_binary_classification
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_multiclass.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_multiclass.py
|
BSD-3-Clause
|
def test_multiclass_estimator_attribute_error():
"""Check that we raise the proper AttributeError when the final estimator
does not implement the `partial_fit` method, which is decorated with
`available_if`.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/28108
"""
iris = datasets.load_iris()
# LogisticRegression does not implement 'partial_fit' and should raise an
# AttributeError
clf = OneVsRestClassifier(estimator=LogisticRegression(random_state=42))
outer_msg = "This 'OneVsRestClassifier' has no attribute 'partial_fit'"
inner_msg = "'LogisticRegression' object has no attribute 'partial_fit'"
with pytest.raises(AttributeError, match=outer_msg) as exec_info:
clf.partial_fit(iris.data, iris.target)
assert isinstance(exec_info.value.__cause__, AttributeError)
assert inner_msg in str(exec_info.value.__cause__)
|
Check that we raise the proper AttributeError when the final estimator
does not implement the `partial_fit` method, which is decorated with
`available_if`.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/28108
|
test_multiclass_estimator_attribute_error
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_multiclass.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_multiclass.py
|
BSD-3-Clause
|
def test_multi_output_not_fitted_error(response_method):
"""Check that we raise the proper error when the estimator is not fitted"""
moc = MultiOutputClassifier(LogisticRegression())
with pytest.raises(NotFittedError):
getattr(moc, response_method)(X)
|
Check that we raise the proper error when the estimator is not fitted
|
test_multi_output_not_fitted_error
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_multioutput.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_multioutput.py
|
BSD-3-Clause
|
def test_multi_output_delegate_predict_proba():
"""Check the behavior for the delegation of predict_proba to the underlying
estimator"""
# A base estimator with `predict_proba`should expose the method even before fit
moc = MultiOutputClassifier(LogisticRegression())
assert hasattr(moc, "predict_proba")
moc.fit(X, y)
assert hasattr(moc, "predict_proba")
# A base estimator without `predict_proba` should raise an AttributeError
moc = MultiOutputClassifier(LinearSVC())
assert not hasattr(moc, "predict_proba")
outer_msg = "'MultiOutputClassifier' has no attribute 'predict_proba'"
inner_msg = "'LinearSVC' object has no attribute 'predict_proba'"
with pytest.raises(AttributeError, match=outer_msg) as exec_info:
moc.predict_proba(X)
assert isinstance(exec_info.value.__cause__, AttributeError)
assert inner_msg == str(exec_info.value.__cause__)
moc.fit(X, y)
assert not hasattr(moc, "predict_proba")
with pytest.raises(AttributeError, match=outer_msg) as exec_info:
moc.predict_proba(X)
assert isinstance(exec_info.value.__cause__, AttributeError)
assert inner_msg == str(exec_info.value.__cause__)
|
Check the behavior for the delegation of predict_proba to the underlying
estimator
|
test_multi_output_delegate_predict_proba
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_multioutput.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_multioutput.py
|
BSD-3-Clause
|
def test_multioutputregressor_ducktypes_fitted_estimator():
"""Test that MultiOutputRegressor checks the fitted estimator for
predict. Non-regression test for #16549."""
X, y = load_linnerud(return_X_y=True)
stacker = StackingRegressor(
estimators=[("sgd", SGDRegressor(random_state=1))],
final_estimator=Ridge(),
cv=2,
)
reg = MultiOutputRegressor(estimator=stacker).fit(X, y)
# Does not raise
reg.predict(X)
|
Test that MultiOutputRegressor checks the fitted estimator for
predict. Non-regression test for #16549.
|
test_multioutputregressor_ducktypes_fitted_estimator
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_multioutput.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_multioutput.py
|
BSD-3-Clause
|
def test_fit_params_no_routing(Cls, method):
"""Check that we raise an error when passing metadata not requested by the
underlying classifier.
"""
X, y = make_classification(n_samples=50)
clf = Cls(PassiveAggressiveClassifier())
with pytest.raises(ValueError, match="is only supported if"):
getattr(clf, method)(X, y, test=1)
|
Check that we raise an error when passing metadata not requested by the
underlying classifier.
|
test_fit_params_no_routing
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_multioutput.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_multioutput.py
|
BSD-3-Clause
|
def test_base_estimator_deprecation(Estimator):
"""Check that we warn about the deprecation of `base_estimator`."""
X = np.array([[1, 2], [3, 4]])
y = np.array([[1, 0], [0, 1]])
estimator = LogisticRegression()
with pytest.warns(FutureWarning):
Estimator(base_estimator=estimator).fit(X, y)
with pytest.raises(ValueError):
Estimator(base_estimator=estimator, estimator=estimator).fit(X, y)
|
Check that we warn about the deprecation of `base_estimator`.
|
test_base_estimator_deprecation
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_multioutput.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_multioutput.py
|
BSD-3-Clause
|
def test_gnb_sample_weight(global_random_seed):
"""Test whether sample weights are properly used in GNB."""
# Sample weights all being 1 should not change results
sw = np.ones(6)
clf = GaussianNB().fit(X, y)
clf_sw = GaussianNB().fit(X, y, sw)
assert_array_almost_equal(clf.theta_, clf_sw.theta_)
assert_array_almost_equal(clf.var_, clf_sw.var_)
# Fitting twice with half sample-weights should result
# in same result as fitting once with full weights
rng = np.random.RandomState(global_random_seed)
sw = rng.rand(y.shape[0])
clf1 = GaussianNB().fit(X, y, sample_weight=sw)
clf2 = GaussianNB().partial_fit(X, y, classes=[1, 2], sample_weight=sw / 2)
clf2.partial_fit(X, y, sample_weight=sw / 2)
assert_array_almost_equal(clf1.theta_, clf2.theta_)
assert_array_almost_equal(clf1.var_, clf2.var_)
# Check that duplicate entries and correspondingly increased sample
# weights yield the same result
ind = rng.randint(0, X.shape[0], 20)
sample_weight = np.bincount(ind, minlength=X.shape[0])
clf_dupl = GaussianNB().fit(X[ind], y[ind])
clf_sw = GaussianNB().fit(X, y, sample_weight)
assert_array_almost_equal(clf_dupl.theta_, clf_sw.theta_)
assert_array_almost_equal(clf_dupl.var_, clf_sw.var_)
# non-regression test for gh-24140 where a division by zero was
# occurring when a single class was present
sample_weight = (y == 1).astype(np.float64)
clf = GaussianNB().fit(X, y, sample_weight=sample_weight)
|
Test whether sample weights are properly used in GNB.
|
test_gnb_sample_weight
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_naive_bayes.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_naive_bayes.py
|
BSD-3-Clause
|
def test_gnb_neg_priors():
"""Test whether an error is raised in case of negative priors"""
clf = GaussianNB(priors=np.array([-1.0, 2.0]))
msg = "Priors must be non-negative"
with pytest.raises(ValueError, match=msg):
clf.fit(X, y)
|
Test whether an error is raised in case of negative priors
|
test_gnb_neg_priors
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_naive_bayes.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_naive_bayes.py
|
BSD-3-Clause
|
def test_gnb_priors():
"""Test whether the class prior override is properly used"""
clf = GaussianNB(priors=np.array([0.3, 0.7])).fit(X, y)
assert_array_almost_equal(
clf.predict_proba([[-0.1, -0.1]]),
np.array([[0.825303662161683, 0.174696337838317]]),
8,
)
assert_array_almost_equal(clf.class_prior_, np.array([0.3, 0.7]))
|
Test whether the class prior override is properly used
|
test_gnb_priors
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_naive_bayes.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_naive_bayes.py
|
BSD-3-Clause
|
def test_gnb_wrong_nb_priors():
"""Test whether an error is raised if the number of prior is different
from the number of class"""
clf = GaussianNB(priors=np.array([0.25, 0.25, 0.25, 0.25]))
msg = "Number of priors must match number of classes"
with pytest.raises(ValueError, match=msg):
clf.fit(X, y)
|
Test whether an error is raised if the number of prior is different
from the number of class
|
test_gnb_wrong_nb_priors
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_naive_bayes.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_naive_bayes.py
|
BSD-3-Clause
|
def test_gnb_prior_greater_one():
"""Test if an error is raised if the sum of prior greater than one"""
clf = GaussianNB(priors=np.array([2.0, 1.0]))
msg = "The sum of the priors should be 1"
with pytest.raises(ValueError, match=msg):
clf.fit(X, y)
|
Test if an error is raised if the sum of prior greater than one
|
test_gnb_prior_greater_one
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_naive_bayes.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_naive_bayes.py
|
BSD-3-Clause
|
def test_gnb_prior_large_bias():
"""Test if good prediction when class prior favor largely one class"""
clf = GaussianNB(priors=np.array([0.01, 0.99]))
clf.fit(X, y)
assert clf.predict([[-0.1, -0.1]]) == np.array([2])
|
Test if good prediction when class prior favor largely one class
|
test_gnb_prior_large_bias
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_naive_bayes.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_naive_bayes.py
|
BSD-3-Clause
|
def test_gnb_check_update_with_no_data():
"""Test when the partial fit is called without any data"""
# Create an empty array
prev_points = 100
mean = 0.0
var = 1.0
x_empty = np.empty((0, X.shape[1]))
tmean, tvar = GaussianNB._update_mean_variance(prev_points, mean, var, x_empty)
assert tmean == mean
assert tvar == var
|
Test when the partial fit is called without any data
|
test_gnb_check_update_with_no_data
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_naive_bayes.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_naive_bayes.py
|
BSD-3-Clause
|
def test_check_alpha():
"""The provided value for alpha must only be
used if alpha < _ALPHA_MIN and force_alpha is True.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/10772
"""
_ALPHA_MIN = 1e-10
b = BernoulliNB(alpha=0, force_alpha=True)
assert b._check_alpha() == 0
alphas = np.array([0.0, 1.0])
b = BernoulliNB(alpha=alphas, force_alpha=True)
# We manually set `n_features_in_` not to have `_check_alpha` err
b.n_features_in_ = alphas.shape[0]
assert_array_equal(b._check_alpha(), alphas)
msg = (
"alpha too small will result in numeric errors, setting alpha = %.1e"
% _ALPHA_MIN
)
b = BernoulliNB(alpha=0, force_alpha=False)
with pytest.warns(UserWarning, match=msg):
assert b._check_alpha() == _ALPHA_MIN
b = BernoulliNB(alpha=0, force_alpha=False)
with pytest.warns(UserWarning, match=msg):
assert b._check_alpha() == _ALPHA_MIN
b = BernoulliNB(alpha=alphas, force_alpha=False)
# We manually set `n_features_in_` not to have `_check_alpha` err
b.n_features_in_ = alphas.shape[0]
with pytest.warns(UserWarning, match=msg):
assert_array_equal(b._check_alpha(), np.array([_ALPHA_MIN, 1.0]))
|
The provided value for alpha must only be
used if alpha < _ALPHA_MIN and force_alpha is True.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/10772
|
test_check_alpha
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_naive_bayes.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_naive_bayes.py
|
BSD-3-Clause
|
def create_mock_transformer(base_name, n_features=3):
"""Helper to create a mock transformer with custom feature names."""
mock = Transf()
mock.get_feature_names_out = lambda input_features: [
f"{base_name}{i}" for i in range(n_features)
]
return mock
|
Helper to create a mock transformer with custom feature names.
|
create_mock_transformer
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_pipeline.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_pipeline.py
|
BSD-3-Clause
|
def test_pipeline_estimator_type(pipeline, check_estimator_type):
"""Check that the estimator type returned by the pipeline is correct.
Non-regression test as part of:
https://github.com/scikit-learn/scikit-learn/issues/30197
"""
# Smoke test the repr
repr(pipeline)
assert check_estimator_type(pipeline)
|
Check that the estimator type returned by the pipeline is correct.
Non-regression test as part of:
https://github.com/scikit-learn/scikit-learn/issues/30197
|
test_pipeline_estimator_type
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_pipeline.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_pipeline.py
|
BSD-3-Clause
|
def test_sklearn_tags_with_empty_pipeline():
"""Check that we propagate properly the tags in a Pipeline.
Non-regression test as part of:
https://github.com/scikit-learn/scikit-learn/issues/30197
"""
empty_pipeline = Pipeline(steps=[])
be = BaseEstimator()
expected_tags = be.__sklearn_tags__()
assert empty_pipeline.__sklearn_tags__() == expected_tags
|
Check that we propagate properly the tags in a Pipeline.
Non-regression test as part of:
https://github.com/scikit-learn/scikit-learn/issues/30197
|
test_sklearn_tags_with_empty_pipeline
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_pipeline.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_pipeline.py
|
BSD-3-Clause
|
def test_set_feature_union_passthrough():
"""Check the behaviour of setting a transformer to `"passthrough"`."""
mult2 = Mult(2)
mult3 = Mult(3)
# We only test get_features_names_out, as get_feature_names is unsupported by
# FunctionTransformer, and hence unsupported by FeatureUnion passthrough.
mult2.get_feature_names_out = lambda input_features: ["x2"]
mult3.get_feature_names_out = lambda input_features: ["x3"]
X = np.asarray([[1]])
ft = FeatureUnion([("m2", mult2), ("m3", mult3)])
assert_array_equal([[2, 3]], ft.fit(X).transform(X))
assert_array_equal([[2, 3]], ft.fit_transform(X))
assert_array_equal(["m2__x2", "m3__x3"], ft.get_feature_names_out())
ft.set_params(m2="passthrough")
assert_array_equal([[1, 3]], ft.fit(X).transform(X))
assert_array_equal([[1, 3]], ft.fit_transform(X))
assert_array_equal(["m2__myfeat", "m3__x3"], ft.get_feature_names_out(["myfeat"]))
ft.set_params(m3="passthrough")
assert_array_equal([[1, 1]], ft.fit(X).transform(X))
assert_array_equal([[1, 1]], ft.fit_transform(X))
assert_array_equal(
["m2__myfeat", "m3__myfeat"], ft.get_feature_names_out(["myfeat"])
)
# check we can change back
ft.set_params(m3=mult3)
assert_array_equal([[1, 3]], ft.fit(X).transform(X))
assert_array_equal([[1, 3]], ft.fit_transform(X))
assert_array_equal(["m2__myfeat", "m3__x3"], ft.get_feature_names_out(["myfeat"]))
# Check 'passthrough' step at construction time
ft = FeatureUnion([("m2", "passthrough"), ("m3", mult3)])
assert_array_equal([[1, 3]], ft.fit(X).transform(X))
assert_array_equal([[1, 3]], ft.fit_transform(X))
assert_array_equal(["m2__myfeat", "m3__x3"], ft.get_feature_names_out(["myfeat"]))
X = iris.data
columns = X.shape[1]
pca = PCA(n_components=2, svd_solver="randomized", random_state=0)
ft = FeatureUnion([("passthrough", "passthrough"), ("pca", pca)])
assert_array_equal(X, ft.fit(X).transform(X)[:, :columns])
assert_array_equal(X, ft.fit_transform(X)[:, :columns])
assert_array_equal(
[
"passthrough__f0",
"passthrough__f1",
"passthrough__f2",
"passthrough__f3",
"pca__pca0",
"pca__pca1",
],
ft.get_feature_names_out(["f0", "f1", "f2", "f3"]),
)
ft.set_params(pca="passthrough")
X_ft = ft.fit(X).transform(X)
assert_array_equal(X_ft, np.hstack([X, X]))
X_ft = ft.fit_transform(X)
assert_array_equal(X_ft, np.hstack([X, X]))
assert_array_equal(
[
"passthrough__f0",
"passthrough__f1",
"passthrough__f2",
"passthrough__f3",
"pca__f0",
"pca__f1",
"pca__f2",
"pca__f3",
],
ft.get_feature_names_out(["f0", "f1", "f2", "f3"]),
)
ft.set_params(passthrough=pca)
assert_array_equal(X, ft.fit(X).transform(X)[:, -columns:])
assert_array_equal(X, ft.fit_transform(X)[:, -columns:])
assert_array_equal(
[
"passthrough__pca0",
"passthrough__pca1",
"pca__f0",
"pca__f1",
"pca__f2",
"pca__f3",
],
ft.get_feature_names_out(["f0", "f1", "f2", "f3"]),
)
ft = FeatureUnion(
[("passthrough", "passthrough"), ("pca", pca)],
transformer_weights={"passthrough": 2},
)
assert_array_equal(X * 2, ft.fit(X).transform(X)[:, :columns])
assert_array_equal(X * 2, ft.fit_transform(X)[:, :columns])
assert_array_equal(
[
"passthrough__f0",
"passthrough__f1",
"passthrough__f2",
"passthrough__f3",
"pca__pca0",
"pca__pca1",
],
ft.get_feature_names_out(["f0", "f1", "f2", "f3"]),
)
|
Check the behaviour of setting a transformer to `"passthrough"`.
|
test_set_feature_union_passthrough
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_pipeline.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_pipeline.py
|
BSD-3-Clause
|
def test_feature_union_passthrough_get_feature_names_out_false_errors():
"""Check get_feature_names_out and non-verbose names and colliding names."""
pd = pytest.importorskip("pandas")
X = pd.DataFrame([[1, 2], [2, 3]], columns=["a", "b"])
select_a = FunctionTransformer(
lambda X: X[["a"]], feature_names_out=lambda self, _: np.asarray(["a"])
)
union = FeatureUnion(
[("t1", StandardScaler()), ("t2", select_a)],
verbose_feature_names_out=False,
)
union.fit(X)
msg = re.escape(
"Output feature names: ['a'] are not unique. "
"Please set verbose_feature_names_out=True to add prefixes to feature names"
)
with pytest.raises(ValueError, match=msg):
union.get_feature_names_out()
|
Check get_feature_names_out and non-verbose names and colliding names.
|
test_feature_union_passthrough_get_feature_names_out_false_errors
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_pipeline.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_pipeline.py
|
BSD-3-Clause
|
def test_feature_union_passthrough_get_feature_names_out_false_errors_overlap_over_5():
"""Check get_feature_names_out with non-verbose names and >= 5 colliding names."""
pd = pytest.importorskip("pandas")
X = pd.DataFrame([list(range(10))], columns=[f"f{i}" for i in range(10)])
union = FeatureUnion(
[("t1", "passthrough"), ("t2", "passthrough")],
verbose_feature_names_out=False,
)
union.fit(X)
msg = re.escape(
"Output feature names: ['f0', 'f1', 'f2', 'f3', 'f4', ...] "
"are not unique. Please set verbose_feature_names_out=True to add prefixes to"
" feature names"
)
with pytest.raises(ValueError, match=msg):
union.get_feature_names_out()
|
Check get_feature_names_out with non-verbose names and >= 5 colliding names.
|
test_feature_union_passthrough_get_feature_names_out_false_errors_overlap_over_5
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_pipeline.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_pipeline.py
|
BSD-3-Clause
|
def test_pipeline_feature_names_out_error_without_definition():
"""Check that error is raised when a transformer does not define
`get_feature_names_out`."""
pipe = Pipeline(steps=[("notrans", NoTrans())])
iris = load_iris()
pipe.fit(iris.data, iris.target)
msg = "does not provide get_feature_names_out"
with pytest.raises(AttributeError, match=msg):
pipe.get_feature_names_out()
|
Check that error is raised when a transformer does not define
`get_feature_names_out`.
|
test_pipeline_feature_names_out_error_without_definition
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_pipeline.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_pipeline.py
|
BSD-3-Clause
|
def test_pipeline_get_feature_names_out_passes_names_through():
"""Check that pipeline passes names through.
Non-regresion test for #21349.
"""
X, y = iris.data, iris.target
class AddPrefixStandardScalar(StandardScaler):
def get_feature_names_out(self, input_features=None):
names = super().get_feature_names_out(input_features=input_features)
return np.asarray([f"my_prefix_{name}" for name in names], dtype=object)
pipe = make_pipeline(AddPrefixStandardScalar(), StandardScaler())
pipe.fit(X, y)
input_names = iris.feature_names
feature_names_out = pipe.get_feature_names_out(input_names)
assert_array_equal(feature_names_out, [f"my_prefix_{name}" for name in input_names])
|
Check that pipeline passes names through.
Non-regresion test for #21349.
|
test_pipeline_get_feature_names_out_passes_names_through
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_pipeline.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_pipeline.py
|
BSD-3-Clause
|
def test_feature_union_getitem_error(key):
"""Raise error when __getitem__ gets a non-string input."""
union = FeatureUnion([("scalar", StandardScaler()), ("pca", PCA())])
msg = "Only string keys are supported"
with pytest.raises(KeyError, match=msg):
union[key]
|
Raise error when __getitem__ gets a non-string input.
|
test_feature_union_getitem_error
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_pipeline.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_pipeline.py
|
BSD-3-Clause
|
def test_feature_union_feature_names_in_():
"""Ensure feature union has `.feature_names_in_` attribute if `X` has a
`columns` attribute.
Test for #24754.
"""
pytest.importorskip("pandas")
X, _ = load_iris(as_frame=True, return_X_y=True)
# FeatureUnion should have the feature_names_in_ attribute if the
# first transformer also has it
scaler = StandardScaler()
scaler.fit(X)
union = FeatureUnion([("scale", scaler)])
assert hasattr(union, "feature_names_in_")
assert_array_equal(X.columns, union.feature_names_in_)
assert_array_equal(scaler.feature_names_in_, union.feature_names_in_)
# fit with pandas.DataFrame
union = FeatureUnion([("pass", "passthrough")])
union.fit(X)
assert hasattr(union, "feature_names_in_")
assert_array_equal(X.columns, union.feature_names_in_)
# fit with numpy array
X_array = X.to_numpy()
union = FeatureUnion([("pass", "passthrough")])
union.fit(X_array)
assert not hasattr(union, "feature_names_in_")
|
Ensure feature union has `.feature_names_in_` attribute if `X` has a
`columns` attribute.
Test for #24754.
|
test_feature_union_feature_names_in_
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_pipeline.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_pipeline.py
|
BSD-3-Clause
|
def test_transform_input_pipeline(method):
"""Test that with transform_input, data is correctly transformed for each step."""
def get_transformer(registry, sample_weight, metadata):
"""Get a transformer with requests set."""
return (
ConsumingTransformer(registry=registry)
.set_fit_request(sample_weight=sample_weight, metadata=metadata)
.set_transform_request(sample_weight=sample_weight, metadata=metadata)
)
def get_pipeline():
"""Get a pipeline and corresponding registries.
The pipeline has 4 steps, with different request values set to test different
cases. One is aliased.
"""
registry_1, registry_2, registry_3, registry_4 = (
_Registry(),
_Registry(),
_Registry(),
_Registry(),
)
pipe = make_pipeline(
get_transformer(registry_1, sample_weight=True, metadata=True),
get_transformer(registry_2, sample_weight=False, metadata=False),
get_transformer(registry_3, sample_weight=True, metadata=True),
get_transformer(registry_4, sample_weight="other_weights", metadata=True),
transform_input=["sample_weight"],
)
return pipe, registry_1, registry_2, registry_3, registry_4
def check_metadata(registry, methods, **metadata):
"""Check that the right metadata was recorded for the given methods."""
assert registry
for estimator in registry:
for method in methods:
check_recorded_metadata(
estimator,
method=method,
parent=method,
**metadata,
)
X = np.array([[1, 2], [3, 4]])
y = np.array([0, 1])
sample_weight = np.array([[1, 2]])
other_weights = np.array([[30, 40]])
metadata = np.array([[100, 200]])
pipe, registry_1, registry_2, registry_3, registry_4 = get_pipeline()
pipe.fit(
X,
y,
sample_weight=sample_weight,
other_weights=other_weights,
metadata=metadata,
)
check_metadata(
registry_1, ["fit", "transform"], sample_weight=sample_weight, metadata=metadata
)
check_metadata(registry_2, ["fit", "transform"])
check_metadata(
registry_3,
["fit", "transform"],
sample_weight=sample_weight + 2,
metadata=metadata,
)
check_metadata(
registry_4,
method.split("_"), # ["fit", "transform"] if "fit_transform", ["fit"] otherwise
sample_weight=other_weights + 3,
metadata=metadata,
)
|
Test that with transform_input, data is correctly transformed for each step.
|
test_transform_input_pipeline
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_pipeline.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_pipeline.py
|
BSD-3-Clause
|
def get_pipeline():
"""Get a pipeline and corresponding registries.
The pipeline has 4 steps, with different request values set to test different
cases. One is aliased.
"""
registry_1, registry_2, registry_3, registry_4 = (
_Registry(),
_Registry(),
_Registry(),
_Registry(),
)
pipe = make_pipeline(
get_transformer(registry_1, sample_weight=True, metadata=True),
get_transformer(registry_2, sample_weight=False, metadata=False),
get_transformer(registry_3, sample_weight=True, metadata=True),
get_transformer(registry_4, sample_weight="other_weights", metadata=True),
transform_input=["sample_weight"],
)
return pipe, registry_1, registry_2, registry_3, registry_4
|
Get a pipeline and corresponding registries.
The pipeline has 4 steps, with different request values set to test different
cases. One is aliased.
|
get_pipeline
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_pipeline.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_pipeline.py
|
BSD-3-Clause
|
def check_metadata(registry, methods, **metadata):
"""Check that the right metadata was recorded for the given methods."""
assert registry
for estimator in registry:
for method in methods:
check_recorded_metadata(
estimator,
method=method,
parent=method,
**metadata,
)
|
Check that the right metadata was recorded for the given methods.
|
check_metadata
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_pipeline.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_pipeline.py
|
BSD-3-Clause
|
def test_transform_input_explicit_value_check():
"""Test that the right transformed values are passed to `fit`."""
class Transformer(TransformerMixin, BaseEstimator):
def fit(self, X, y):
self.fitted_ = True
return self
def transform(self, X):
return X + 1
class Estimator(ClassifierMixin, BaseEstimator):
def fit(self, X, y, X_val=None, y_val=None):
assert_array_equal(X, np.array([[1, 2]]))
assert_array_equal(y, np.array([0, 1]))
assert_array_equal(X_val, np.array([[2, 3]]))
assert_array_equal(y_val, np.array([0, 1]))
return self
X = np.array([[0, 1]])
y = np.array([0, 1])
X_val = np.array([[1, 2]])
y_val = np.array([0, 1])
pipe = Pipeline(
[
("transformer", Transformer()),
("estimator", Estimator().set_fit_request(X_val=True, y_val=True)),
],
transform_input=["X_val"],
)
pipe.fit(X, y, X_val=X_val, y_val=y_val)
|
Test that the right transformed values are passed to `fit`.
|
test_transform_input_explicit_value_check
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_pipeline.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_pipeline.py
|
BSD-3-Clause
|
def test_transform_input_no_slep6():
"""Make sure the right error is raised if slep6 is not enabled."""
X = np.array([[1, 2], [3, 4]])
y = np.array([0, 1])
msg = "The `transform_input` parameter can only be set if metadata"
with pytest.raises(ValueError, match=msg):
make_pipeline(DummyTransf(), transform_input=["blah"]).fit(X, y)
|
Make sure the right error is raised if slep6 is not enabled.
|
test_transform_input_no_slep6
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_pipeline.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_pipeline.py
|
BSD-3-Clause
|
def test_transform_tuple_input():
"""Test that if metadata is a tuple of arrays, both arrays are transformed."""
class Estimator(ClassifierMixin, BaseEstimator):
def fit(self, X, y, X_val=None, y_val=None):
assert isinstance(X_val, tuple)
assert isinstance(y_val, tuple)
# Here we make sure that each X_val is transformed by the transformer
assert_array_equal(X_val[0], np.array([[2, 3]]))
assert_array_equal(y_val[0], np.array([0, 1]))
assert_array_equal(X_val[1], np.array([[11, 12]]))
assert_array_equal(y_val[1], np.array([1, 2]))
self.fitted_ = True
return self
class Transformer(TransformerMixin, BaseEstimator):
def fit(self, X, y):
self.fitted_ = True
return self
def transform(self, X):
return X + 1
X = np.array([[1, 2]])
y = np.array([0, 1])
X_val0 = np.array([[1, 2]])
y_val0 = np.array([0, 1])
X_val1 = np.array([[10, 11]])
y_val1 = np.array([1, 2])
pipe = Pipeline(
[
("transformer", Transformer()),
("estimator", Estimator().set_fit_request(X_val=True, y_val=True)),
],
transform_input=["X_val"],
)
pipe.fit(X, y, X_val=(X_val0, X_val1), y_val=(y_val0, y_val1))
|
Test that if metadata is a tuple of arrays, both arrays are transformed.
|
test_transform_tuple_input
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_pipeline.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_pipeline.py
|
BSD-3-Clause
|
def test_metadata_routing_for_pipeline(method):
"""Test that metadata is routed correctly for pipelines."""
def set_request(est, method, **kwarg):
"""Set requests for a given method.
If the given method is a composite method, set the same requests for
all the methods that compose it.
"""
if method in COMPOSITE_METHODS:
methods = COMPOSITE_METHODS[method]
else:
methods = [method]
for method in methods:
getattr(est, f"set_{method}_request")(**kwarg)
return est
X, y = np.array([[1]]), np.array([1])
sample_weight, prop, metadata = [1], "a", "b"
# test that metadata is routed correctly for pipelines when requested
est = SimpleEstimator()
est = set_request(est, method, sample_weight=True, prop=True)
est = set_request(est, "fit", sample_weight=True, prop=True)
trs = (
ConsumingTransformer()
.set_fit_request(sample_weight=True, metadata=True)
.set_transform_request(sample_weight=True, metadata=True)
.set_inverse_transform_request(sample_weight=True, metadata=True)
)
pipeline = Pipeline([("trs", trs), ("estimator", est)])
if "fit" not in method:
pipeline = pipeline.fit(X, y, sample_weight=sample_weight, prop=prop)
try:
getattr(pipeline, method)(
X, y, sample_weight=sample_weight, prop=prop, metadata=metadata
)
except TypeError:
# Some methods don't accept y
getattr(pipeline, method)(
X, sample_weight=sample_weight, prop=prop, metadata=metadata
)
# Make sure the transformer has received the metadata
# For the transformer, always only `fit` and `transform` are called.
check_recorded_metadata(
obj=trs,
method="fit",
parent="fit",
sample_weight=sample_weight,
metadata=metadata,
)
check_recorded_metadata(
obj=trs,
method="transform",
parent="transform",
sample_weight=sample_weight,
metadata=metadata,
)
|
Test that metadata is routed correctly for pipelines.
|
test_metadata_routing_for_pipeline
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_pipeline.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_pipeline.py
|
BSD-3-Clause
|
def set_request(est, method, **kwarg):
"""Set requests for a given method.
If the given method is a composite method, set the same requests for
all the methods that compose it.
"""
if method in COMPOSITE_METHODS:
methods = COMPOSITE_METHODS[method]
else:
methods = [method]
for method in methods:
getattr(est, f"set_{method}_request")(**kwarg)
return est
|
Set requests for a given method.
If the given method is a composite method, set the same requests for
all the methods that compose it.
|
set_request
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_pipeline.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_pipeline.py
|
BSD-3-Clause
|
def test_metadata_routing_error_for_pipeline(method):
"""Test that metadata is not routed for pipelines when not requested."""
X, y = [[1]], [1]
sample_weight, prop = [1], "a"
est = SimpleEstimator()
# here not setting sample_weight request and leaving it as None
pipeline = Pipeline([("estimator", est)])
error_message = (
"[sample_weight, prop] are passed but are not explicitly set as requested"
f" or not requested for SimpleEstimator.{method}"
)
with pytest.raises(ValueError, match=re.escape(error_message)):
try:
# passing X, y positional as the first two arguments
getattr(pipeline, method)(X, y, sample_weight=sample_weight, prop=prop)
except TypeError:
# not all methods accept y (like `predict`), so here we only
# pass X as a positional arg.
getattr(pipeline, method)(X, sample_weight=sample_weight, prop=prop)
|
Test that metadata is not routed for pipelines when not requested.
|
test_metadata_routing_error_for_pipeline
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_pipeline.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_pipeline.py
|
BSD-3-Clause
|
def test_routing_passed_metadata_not_supported(method):
"""Test that the right error message is raised when metadata is passed while
not supported when `enable_metadata_routing=False`."""
pipe = Pipeline([("estimator", SimpleEstimator())])
with pytest.raises(
ValueError, match="is only supported if enable_metadata_routing=True"
):
getattr(pipe, method)([[1]], sample_weight=[1], prop="a")
|
Test that the right error message is raised when metadata is passed while
not supported when `enable_metadata_routing=False`.
|
test_routing_passed_metadata_not_supported
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_pipeline.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_pipeline.py
|
BSD-3-Clause
|
def test_pipeline_with_estimator_with_len():
"""Test that pipeline works with estimators that have a `__len__` method."""
pipe = Pipeline(
[("trs", RandomTreesEmbedding()), ("estimator", RandomForestClassifier())]
)
pipe.fit([[1]], [1])
pipe.predict([[1]])
|
Test that pipeline works with estimators that have a `__len__` method.
|
test_pipeline_with_estimator_with_len
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_pipeline.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_pipeline.py
|
BSD-3-Clause
|
def test_pipeline_with_no_last_step(last_step):
"""Test that the pipeline works when there is not last step.
It should just ignore and pass through the data on transform.
"""
pipe = Pipeline([("trs", FunctionTransformer()), ("estimator", last_step)])
assert pipe.fit([[1]], [1]).transform([[1], [2], [3]]) == [[1], [2], [3]]
|
Test that the pipeline works when there is not last step.
It should just ignore and pass through the data on transform.
|
test_pipeline_with_no_last_step
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_pipeline.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_pipeline.py
|
BSD-3-Clause
|
def test_feature_union_metadata_routing_error():
"""Test that the right error is raised when metadata is not requested."""
X = np.array([[0, 1], [2, 2], [4, 6]])
y = [1, 2, 3]
sample_weight, metadata = [1, 1, 1], "a"
# test lacking set_fit_request
feature_union = FeatureUnion([("sub_transformer", ConsumingTransformer())])
error_message = (
"[sample_weight, metadata] are passed but are not explicitly set as requested"
f" or not requested for {ConsumingTransformer.__name__}.fit"
)
with pytest.raises(UnsetMetadataPassedError, match=re.escape(error_message)):
feature_union.fit(X, y, sample_weight=sample_weight, metadata=metadata)
# test lacking set_transform_request
feature_union = FeatureUnion(
[
(
"sub_transformer",
ConsumingTransformer().set_fit_request(
sample_weight=True, metadata=True
),
)
]
)
error_message = (
"[sample_weight, metadata] are passed but are not explicitly set as requested "
f"or not requested for {ConsumingTransformer.__name__}.transform"
)
with pytest.raises(UnsetMetadataPassedError, match=re.escape(error_message)):
feature_union.fit(
X, y, sample_weight=sample_weight, metadata=metadata
).transform(X, sample_weight=sample_weight, metadata=metadata)
|
Test that the right error is raised when metadata is not requested.
|
test_feature_union_metadata_routing_error
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_pipeline.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_pipeline.py
|
BSD-3-Clause
|
def test_feature_union_metadata_routing(transformer):
"""Test that metadata is routed correctly for FeatureUnion."""
X = np.array([[0, 1], [2, 2], [4, 6]])
y = [1, 2, 3]
sample_weight, metadata = [1, 1, 1], "a"
feature_union = FeatureUnion(
[
(
"sub_trans1",
transformer(registry=_Registry())
.set_fit_request(sample_weight=True, metadata=True)
.set_transform_request(sample_weight=True, metadata=True),
),
(
"sub_trans2",
transformer(registry=_Registry())
.set_fit_request(sample_weight=True, metadata=True)
.set_transform_request(sample_weight=True, metadata=True),
),
]
)
kwargs = {"sample_weight": sample_weight, "metadata": metadata}
feature_union.fit(X, y, **kwargs)
feature_union.fit_transform(X, y, **kwargs)
feature_union.fit(X, y, **kwargs).transform(X, **kwargs)
for transformer in feature_union.transformer_list:
# access sub-transformer in (name, trans) with transformer[1]
registry = transformer[1].registry
assert len(registry)
for sub_trans in registry:
check_recorded_metadata(
obj=sub_trans,
method="fit",
parent="fit",
**kwargs,
)
|
Test that metadata is routed correctly for FeatureUnion.
|
test_feature_union_metadata_routing
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_pipeline.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_pipeline.py
|
BSD-3-Clause
|
def _check_function_param_validation(
func, func_name, func_params, required_params, parameter_constraints
):
"""Check that an informative error is raised when the value of a parameter does not
have an appropriate type or value.
"""
# generate valid values for the required parameters
valid_required_params = {}
for param_name in required_params:
if parameter_constraints[param_name] == "no_validation":
valid_required_params[param_name] = 1
else:
valid_required_params[param_name] = generate_valid_param(
make_constraint(parameter_constraints[param_name][0])
)
# check that there is a constraint for each parameter
if func_params:
validation_params = parameter_constraints.keys()
unexpected_params = set(validation_params) - set(func_params)
missing_params = set(func_params) - set(validation_params)
err_msg = (
"Mismatch between _parameter_constraints and the parameters of"
f" {func_name}.\nConsider the unexpected parameters {unexpected_params} and"
f" expected but missing parameters {missing_params}\n"
)
assert set(validation_params) == set(func_params), err_msg
# this object does not have a valid type for sure for all params
param_with_bad_type = type("BadType", (), {})()
for param_name in func_params:
constraints = parameter_constraints[param_name]
if constraints == "no_validation":
# This parameter is not validated
continue
# Mixing an interval of reals and an interval of integers must be avoided.
if any(
isinstance(constraint, Interval) and constraint.type == Integral
for constraint in constraints
) and any(
isinstance(constraint, Interval) and constraint.type == Real
for constraint in constraints
):
raise ValueError(
f"The constraint for parameter {param_name} of {func_name} can't have a"
" mix of intervals of Integral and Real types. Use the type"
" RealNotInt instead of Real."
)
match = (
rf"The '{param_name}' parameter of {func_name} must be .* Got .* instead."
)
err_msg = (
f"{func_name} does not raise an informative error message when the "
f"parameter {param_name} does not have a valid type. If any Python type "
"is valid, the constraint should be 'no_validation'."
)
# First, check that the error is raised if param doesn't match any valid type.
with pytest.raises(InvalidParameterError, match=match):
func(**{**valid_required_params, param_name: param_with_bad_type})
pytest.fail(err_msg)
# Then, for constraints that are more than a type constraint, check that the
# error is raised if param does match a valid type but does not match any valid
# value for this type.
constraints = [make_constraint(constraint) for constraint in constraints]
for constraint in constraints:
try:
bad_value = generate_invalid_param_val(constraint)
except NotImplementedError:
continue
err_msg = (
f"{func_name} does not raise an informative error message when the "
f"parameter {param_name} does not have a valid value.\n"
"Constraints should be disjoint. For instance "
"[StrOptions({'a_string'}), str] is not a acceptable set of "
"constraint because generating an invalid string for the first "
"constraint will always produce a valid string for the second "
"constraint."
)
with pytest.raises(InvalidParameterError, match=match):
func(**{**valid_required_params, param_name: bad_value})
pytest.fail(err_msg)
|
Check that an informative error is raised when the value of a parameter does not
have an appropriate type or value.
|
_check_function_param_validation
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_public_functions.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_public_functions.py
|
BSD-3-Clause
|
def test_function_param_validation(func_module):
"""Check param validation for public functions that are not wrappers around
estimators.
"""
func, func_name, func_params, required_params = _get_func_info(func_module)
parameter_constraints = getattr(func, "_skl_parameter_constraints")
_check_function_param_validation(
func, func_name, func_params, required_params, parameter_constraints
)
|
Check param validation for public functions that are not wrappers around
estimators.
|
test_function_param_validation
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_public_functions.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_public_functions.py
|
BSD-3-Clause
|
def test_class_wrapper_param_validation(func_module, class_module):
"""Check param validation for public functions that are wrappers around
estimators.
"""
func, func_name, func_params, required_params = _get_func_info(func_module)
module_name, class_name = class_module.rsplit(".", 1)
module = import_module(module_name)
klass = getattr(module, class_name)
parameter_constraints_func = getattr(func, "_skl_parameter_constraints")
parameter_constraints_class = getattr(klass, "_parameter_constraints")
parameter_constraints = {
**parameter_constraints_class,
**parameter_constraints_func,
}
parameter_constraints = {
k: v for k, v in parameter_constraints.items() if k in func_params
}
_check_function_param_validation(
func, func_name, func_params, required_params, parameter_constraints
)
|
Check param validation for public functions that are wrappers around
estimators.
|
test_class_wrapper_param_validation
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_public_functions.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_public_functions.py
|
BSD-3-Clause
|
def make_sparse_random_data(
coo_container,
n_samples,
n_features,
n_nonzeros,
random_state=None,
sparse_format="csr",
):
"""Make some random data with uniformly located non zero entries with
Gaussian distributed values; `sparse_format` can be `"csr"` (default) or
`None` (in which case a dense array is returned).
"""
rng = np.random.RandomState(random_state)
data_coo = coo_container(
(
rng.randn(n_nonzeros),
(
rng.randint(n_samples, size=n_nonzeros),
rng.randint(n_features, size=n_nonzeros),
),
),
shape=(n_samples, n_features),
)
if sparse_format is not None:
return data_coo.asformat(sparse_format)
else:
return data_coo.toarray()
|
Make some random data with uniformly located non zero entries with
Gaussian distributed values; `sparse_format` can be `"csr"` (default) or
`None` (in which case a dense array is returned).
|
make_sparse_random_data
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_random_projection.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_random_projection.py
|
BSD-3-Clause
|
def _compute_missing_values_in_feature_mask(self, X, estimator_name=None):
"""Return boolean mask denoting if there are missing values for each feature.
This method also ensures that X is finite.
Parameter
---------
X : array-like of shape (n_samples, n_features), dtype=DOUBLE
Input data.
estimator_name : str or None, default=None
Name to use when raising an error. Defaults to the class name.
Returns
-------
missing_values_in_feature_mask : ndarray of shape (n_features,), or None
Missing value mask. If missing values are not supported or there
are no missing values, return None.
"""
estimator_name = estimator_name or self.__class__.__name__
common_kwargs = dict(estimator_name=estimator_name, input_name="X")
if not self._support_missing_values(X):
assert_all_finite(X, **common_kwargs)
return None
with np.errstate(over="ignore"):
overall_sum = np.sum(X)
if not np.isfinite(overall_sum):
# Raise a ValueError in case of the presence of an infinite element.
_assert_all_finite_element_wise(X, xp=np, allow_nan=True, **common_kwargs)
# If the sum is not nan, then there are no missing values
if not np.isnan(overall_sum):
return None
missing_values_in_feature_mask = _any_isnan_axis0(X)
return missing_values_in_feature_mask
|
Return boolean mask denoting if there are missing values for each feature.
This method also ensures that X is finite.
Parameter
---------
X : array-like of shape (n_samples, n_features), dtype=DOUBLE
Input data.
estimator_name : str or None, default=None
Name to use when raising an error. Defaults to the class name.
Returns
-------
missing_values_in_feature_mask : ndarray of shape (n_features,), or None
Missing value mask. If missing values are not supported or there
are no missing values, return None.
|
_compute_missing_values_in_feature_mask
|
python
|
scikit-learn/scikit-learn
|
sklearn/tree/_classes.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tree/_classes.py
|
BSD-3-Clause
|
def _validate_X_predict(self, X, check_input):
"""Validate the training data on predict (probabilities)."""
if check_input:
if self._support_missing_values(X):
ensure_all_finite = "allow-nan"
else:
ensure_all_finite = True
X = validate_data(
self,
X,
dtype=DTYPE,
accept_sparse="csr",
reset=False,
ensure_all_finite=ensure_all_finite,
)
if issparse(X) and (
X.indices.dtype != np.intc or X.indptr.dtype != np.intc
):
raise ValueError("No support for np.int64 index based sparse matrices")
else:
# The number of features is checked regardless of `check_input`
_check_n_features(self, X, reset=False)
return X
|
Validate the training data on predict (probabilities).
|
_validate_X_predict
|
python
|
scikit-learn/scikit-learn
|
sklearn/tree/_classes.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tree/_classes.py
|
BSD-3-Clause
|
def predict(self, X, check_input=True):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : bool, default=True
Allow to bypass several input checking.
Don't use this parameter unless you know what you're doing.
Returns
-------
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
The predicted classes, or the predict values.
"""
check_is_fitted(self)
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
n_samples = X.shape[0]
# Classification
if is_classifier(self):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
class_type = self.classes_[0].dtype
predictions = np.zeros((n_samples, self.n_outputs_), dtype=class_type)
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1), axis=0
)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
|
Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : bool, default=True
Allow to bypass several input checking.
Don't use this parameter unless you know what you're doing.
Returns
-------
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
The predicted classes, or the predict values.
|
predict
|
python
|
scikit-learn/scikit-learn
|
sklearn/tree/_classes.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tree/_classes.py
|
BSD-3-Clause
|
def apply(self, X, check_input=True):
"""Return the index of the leaf that each sample is predicted as.
.. versionadded:: 0.17
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : bool, default=True
Allow to bypass several input checking.
Don't use this parameter unless you know what you're doing.
Returns
-------
X_leaves : array-like of shape (n_samples,)
For each datapoint x in X, return the index of the leaf x
ends up in. Leaves are numbered within
``[0; self.tree_.node_count)``, possibly with gaps in the
numbering.
"""
check_is_fitted(self)
X = self._validate_X_predict(X, check_input)
return self.tree_.apply(X)
|
Return the index of the leaf that each sample is predicted as.
.. versionadded:: 0.17
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : bool, default=True
Allow to bypass several input checking.
Don't use this parameter unless you know what you're doing.
Returns
-------
X_leaves : array-like of shape (n_samples,)
For each datapoint x in X, return the index of the leaf x
ends up in. Leaves are numbered within
``[0; self.tree_.node_count)``, possibly with gaps in the
numbering.
|
apply
|
python
|
scikit-learn/scikit-learn
|
sklearn/tree/_classes.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tree/_classes.py
|
BSD-3-Clause
|
def _prune_tree(self):
"""Prune tree using Minimal Cost-Complexity Pruning."""
check_is_fitted(self)
if self.ccp_alpha == 0.0:
return
# build pruned tree
if is_classifier(self):
n_classes = np.atleast_1d(self.n_classes_)
pruned_tree = Tree(self.n_features_in_, n_classes, self.n_outputs_)
else:
pruned_tree = Tree(
self.n_features_in_,
# TODO: the tree shouldn't need this param
np.array([1] * self.n_outputs_, dtype=np.intp),
self.n_outputs_,
)
_build_pruned_tree_ccp(pruned_tree, self.tree_, self.ccp_alpha)
self.tree_ = pruned_tree
|
Prune tree using Minimal Cost-Complexity Pruning.
|
_prune_tree
|
python
|
scikit-learn/scikit-learn
|
sklearn/tree/_classes.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tree/_classes.py
|
BSD-3-Clause
|
def cost_complexity_pruning_path(self, X, y, sample_weight=None):
"""Compute the pruning path during Minimal Cost-Complexity Pruning.
See :ref:`minimal_cost_complexity_pruning` for details on the pruning
process.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
The target values (class labels) as integers or strings.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. Splits are also
ignored if they would result in any single class carrying a
negative weight in either child node.
Returns
-------
ccp_path : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
ccp_alphas : ndarray
Effective alphas of subtree during pruning.
impurities : ndarray
Sum of the impurities of the subtree leaves for the
corresponding alpha value in ``ccp_alphas``.
"""
est = clone(self).set_params(ccp_alpha=0.0)
est.fit(X, y, sample_weight=sample_weight)
return Bunch(**ccp_pruning_path(est.tree_))
|
Compute the pruning path during Minimal Cost-Complexity Pruning.
See :ref:`minimal_cost_complexity_pruning` for details on the pruning
process.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
The target values (class labels) as integers or strings.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. Splits are also
ignored if they would result in any single class carrying a
negative weight in either child node.
Returns
-------
ccp_path : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
ccp_alphas : ndarray
Effective alphas of subtree during pruning.
impurities : ndarray
Sum of the impurities of the subtree leaves for the
corresponding alpha value in ``ccp_alphas``.
|
cost_complexity_pruning_path
|
python
|
scikit-learn/scikit-learn
|
sklearn/tree/_classes.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tree/_classes.py
|
BSD-3-Clause
|
def fit(self, X, y, sample_weight=None, check_input=True):
"""Build a decision tree classifier from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
The target values (class labels) as integers or strings.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. Splits are also
ignored if they would result in any single class carrying a
negative weight in either child node.
check_input : bool, default=True
Allow to bypass several input checking.
Don't use this parameter unless you know what you're doing.
Returns
-------
self : DecisionTreeClassifier
Fitted estimator.
"""
super()._fit(
X,
y,
sample_weight=sample_weight,
check_input=check_input,
)
return self
|
Build a decision tree classifier from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
The target values (class labels) as integers or strings.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. Splits are also
ignored if they would result in any single class carrying a
negative weight in either child node.
check_input : bool, default=True
Allow to bypass several input checking.
Don't use this parameter unless you know what you're doing.
Returns
-------
self : DecisionTreeClassifier
Fitted estimator.
|
fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/tree/_classes.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tree/_classes.py
|
BSD-3-Clause
|
def predict_proba(self, X, check_input=True):
"""Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : bool, default=True
Allow to bypass several input checking.
Don't use this parameter unless you know what you're doing.
Returns
-------
proba : ndarray of shape (n_samples, n_classes) or list of n_outputs \
such arrays if n_outputs > 1
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute :term:`classes_`.
"""
check_is_fitted(self)
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
return proba[:, : self.n_classes_]
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, : self.n_classes_[k]]
all_proba.append(proba_k)
return all_proba
|
Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : bool, default=True
Allow to bypass several input checking.
Don't use this parameter unless you know what you're doing.
Returns
-------
proba : ndarray of shape (n_samples, n_classes) or list of n_outputs such arrays if n_outputs > 1
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute :term:`classes_`.
|
predict_proba
|
python
|
scikit-learn/scikit-learn
|
sklearn/tree/_classes.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tree/_classes.py
|
BSD-3-Clause
|
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
proba : ndarray of shape (n_samples, n_classes) or list of n_outputs \
such arrays if n_outputs > 1
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute :term:`classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
|
Predict class log-probabilities of the input samples X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
proba : ndarray of shape (n_samples, n_classes) or list of n_outputs such arrays if n_outputs > 1
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute :term:`classes_`.
|
predict_log_proba
|
python
|
scikit-learn/scikit-learn
|
sklearn/tree/_classes.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tree/_classes.py
|
BSD-3-Clause
|
def fit(self, X, y, sample_weight=None, check_input=True):
"""Build a decision tree regressor from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
The target values (real numbers). Use ``dtype=np.float64`` and
``order='C'`` for maximum efficiency.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node.
check_input : bool, default=True
Allow to bypass several input checking.
Don't use this parameter unless you know what you're doing.
Returns
-------
self : DecisionTreeRegressor
Fitted estimator.
"""
super()._fit(
X,
y,
sample_weight=sample_weight,
check_input=check_input,
)
return self
|
Build a decision tree regressor from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
The target values (real numbers). Use ``dtype=np.float64`` and
``order='C'`` for maximum efficiency.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node.
check_input : bool, default=True
Allow to bypass several input checking.
Don't use this parameter unless you know what you're doing.
Returns
-------
self : DecisionTreeRegressor
Fitted estimator.
|
fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/tree/_classes.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tree/_classes.py
|
BSD-3-Clause
|
def _compute_partial_dependence_recursion(self, grid, target_features):
"""Fast partial dependence computation.
Parameters
----------
grid : ndarray of shape (n_samples, n_target_features), dtype=np.float32
The grid points on which the partial dependence should be
evaluated.
target_features : ndarray of shape (n_target_features), dtype=np.intp
The set of target features for which the partial dependence
should be evaluated.
Returns
-------
averaged_predictions : ndarray of shape (n_samples,), dtype=np.float64
The value of the partial dependence function on each grid point.
"""
grid = np.asarray(grid, dtype=DTYPE, order="C")
averaged_predictions = np.zeros(
shape=grid.shape[0], dtype=np.float64, order="C"
)
target_features = np.asarray(target_features, dtype=np.intp, order="C")
self.tree_.compute_partial_dependence(
grid, target_features, averaged_predictions
)
return averaged_predictions
|
Fast partial dependence computation.
Parameters
----------
grid : ndarray of shape (n_samples, n_target_features), dtype=np.float32
The grid points on which the partial dependence should be
evaluated.
target_features : ndarray of shape (n_target_features), dtype=np.intp
The set of target features for which the partial dependence
should be evaluated.
Returns
-------
averaged_predictions : ndarray of shape (n_samples,), dtype=np.float64
The value of the partial dependence function on each grid point.
|
_compute_partial_dependence_recursion
|
python
|
scikit-learn/scikit-learn
|
sklearn/tree/_classes.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tree/_classes.py
|
BSD-3-Clause
|
def _color_brew(n):
"""Generate n colors with equally spaced hues.
Parameters
----------
n : int
The number of colors required.
Returns
-------
color_list : list, length n
List of n tuples of form (R, G, B) being the components of each color.
"""
color_list = []
# Initialize saturation & value; calculate chroma & value shift
s, v = 0.75, 0.9
c = s * v
m = v - c
for h in np.arange(25, 385, 360.0 / n).astype(int):
# Calculate some intermediate values
h_bar = h / 60.0
x = c * (1 - abs((h_bar % 2) - 1))
# Initialize RGB with same hue & chroma as our color
rgb = [
(c, x, 0),
(x, c, 0),
(0, c, x),
(0, x, c),
(x, 0, c),
(c, 0, x),
(c, x, 0),
]
r, g, b = rgb[int(h_bar)]
# Shift the initial RGB values to match value and store
rgb = [(int(255 * (r + m))), (int(255 * (g + m))), (int(255 * (b + m)))]
color_list.append(rgb)
return color_list
|
Generate n colors with equally spaced hues.
Parameters
----------
n : int
The number of colors required.
Returns
-------
color_list : list, length n
List of n tuples of form (R, G, B) being the components of each color.
|
_color_brew
|
python
|
scikit-learn/scikit-learn
|
sklearn/tree/_export.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tree/_export.py
|
BSD-3-Clause
|
def _compute_depth(tree, node):
"""
Returns the depth of the subtree rooted in node.
"""
def compute_depth_(
current_node, current_depth, children_left, children_right, depths
):
depths += [current_depth]
left = children_left[current_node]
right = children_right[current_node]
if left != -1 and right != -1:
compute_depth_(
left, current_depth + 1, children_left, children_right, depths
)
compute_depth_(
right, current_depth + 1, children_left, children_right, depths
)
depths = []
compute_depth_(node, 1, tree.children_left, tree.children_right, depths)
return max(depths)
|
Returns the depth of the subtree rooted in node.
|
_compute_depth
|
python
|
scikit-learn/scikit-learn
|
sklearn/tree/_export.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tree/_export.py
|
BSD-3-Clause
|
def export_text(
decision_tree,
*,
feature_names=None,
class_names=None,
max_depth=10,
spacing=3,
decimals=2,
show_weights=False,
):
"""Build a text report showing the rules of a decision tree.
Note that backwards compatibility may not be supported.
Parameters
----------
decision_tree : object
The decision tree estimator to be exported.
It can be an instance of
DecisionTreeClassifier or DecisionTreeRegressor.
feature_names : array-like of shape (n_features,), default=None
An array containing the feature names.
If None generic names will be used ("feature_0", "feature_1", ...).
class_names : array-like of shape (n_classes,), default=None
Names of each of the target classes in ascending numerical order.
Only relevant for classification and not supported for multi-output.
- if `None`, the class names are delegated to `decision_tree.classes_`;
- otherwise, `class_names` will be used as class names instead of
`decision_tree.classes_`. The length of `class_names` must match
the length of `decision_tree.classes_`.
.. versionadded:: 1.3
max_depth : int, default=10
Only the first max_depth levels of the tree are exported.
Truncated branches will be marked with "...".
spacing : int, default=3
Number of spaces between edges. The higher it is, the wider the result.
decimals : int, default=2
Number of decimal digits to display.
show_weights : bool, default=False
If true the classification weights will be exported on each leaf.
The classification weights are the number of samples each class.
Returns
-------
report : str
Text summary of all the rules in the decision tree.
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.tree import DecisionTreeClassifier
>>> from sklearn.tree import export_text
>>> iris = load_iris()
>>> X = iris['data']
>>> y = iris['target']
>>> decision_tree = DecisionTreeClassifier(random_state=0, max_depth=2)
>>> decision_tree = decision_tree.fit(X, y)
>>> r = export_text(decision_tree, feature_names=iris['feature_names'])
>>> print(r)
|--- petal width (cm) <= 0.80
| |--- class: 0
|--- petal width (cm) > 0.80
| |--- petal width (cm) <= 1.75
| | |--- class: 1
| |--- petal width (cm) > 1.75
| | |--- class: 2
"""
if feature_names is not None:
feature_names = check_array(
feature_names, ensure_2d=False, dtype=None, ensure_min_samples=0
)
if class_names is not None:
class_names = check_array(
class_names, ensure_2d=False, dtype=None, ensure_min_samples=0
)
check_is_fitted(decision_tree)
tree_ = decision_tree.tree_
if is_classifier(decision_tree):
if class_names is None:
class_names = decision_tree.classes_
elif len(class_names) != len(decision_tree.classes_):
raise ValueError(
"When `class_names` is an array, it should contain as"
" many items as `decision_tree.classes_`. Got"
f" {len(class_names)} while the tree was fitted with"
f" {len(decision_tree.classes_)} classes."
)
right_child_fmt = "{} {} <= {}\n"
left_child_fmt = "{} {} > {}\n"
truncation_fmt = "{} {}\n"
if feature_names is not None and len(feature_names) != tree_.n_features:
raise ValueError(
"feature_names must contain %d elements, got %d"
% (tree_.n_features, len(feature_names))
)
if isinstance(decision_tree, DecisionTreeClassifier):
value_fmt = "{}{} weights: {}\n"
if not show_weights:
value_fmt = "{}{}{}\n"
else:
value_fmt = "{}{} value: {}\n"
if feature_names is not None:
feature_names_ = [
feature_names[i] if i != _tree.TREE_UNDEFINED else None
for i in tree_.feature
]
else:
feature_names_ = ["feature_{}".format(i) for i in tree_.feature]
export_text.report = ""
def _add_leaf(value, weighted_n_node_samples, class_name, indent):
val = ""
if isinstance(decision_tree, DecisionTreeClassifier):
if show_weights:
val = [
"{1:.{0}f}, ".format(decimals, v * weighted_n_node_samples)
for v in value
]
val = "[" + "".join(val)[:-2] + "]"
weighted_n_node_samples
val += " class: " + str(class_name)
else:
val = ["{1:.{0}f}, ".format(decimals, v) for v in value]
val = "[" + "".join(val)[:-2] + "]"
export_text.report += value_fmt.format(indent, "", val)
def print_tree_recurse(node, depth):
indent = ("|" + (" " * spacing)) * depth
indent = indent[:-spacing] + "-" * spacing
value = None
if tree_.n_outputs == 1:
value = tree_.value[node][0]
else:
value = tree_.value[node].T[0]
class_name = np.argmax(value)
if tree_.n_classes[0] != 1 and tree_.n_outputs == 1:
class_name = class_names[class_name]
weighted_n_node_samples = tree_.weighted_n_node_samples[node]
if depth <= max_depth + 1:
info_fmt = ""
info_fmt_left = info_fmt
info_fmt_right = info_fmt
if tree_.feature[node] != _tree.TREE_UNDEFINED:
name = feature_names_[node]
threshold = tree_.threshold[node]
threshold = "{1:.{0}f}".format(decimals, threshold)
export_text.report += right_child_fmt.format(indent, name, threshold)
export_text.report += info_fmt_left
print_tree_recurse(tree_.children_left[node], depth + 1)
export_text.report += left_child_fmt.format(indent, name, threshold)
export_text.report += info_fmt_right
print_tree_recurse(tree_.children_right[node], depth + 1)
else: # leaf
_add_leaf(value, weighted_n_node_samples, class_name, indent)
else:
subtree_depth = _compute_depth(tree_, node)
if subtree_depth == 1:
_add_leaf(value, weighted_n_node_samples, class_name, indent)
else:
trunc_report = "truncated branch of depth %d" % subtree_depth
export_text.report += truncation_fmt.format(indent, trunc_report)
print_tree_recurse(0, 1)
return export_text.report
|
Build a text report showing the rules of a decision tree.
Note that backwards compatibility may not be supported.
Parameters
----------
decision_tree : object
The decision tree estimator to be exported.
It can be an instance of
DecisionTreeClassifier or DecisionTreeRegressor.
feature_names : array-like of shape (n_features,), default=None
An array containing the feature names.
If None generic names will be used ("feature_0", "feature_1", ...).
class_names : array-like of shape (n_classes,), default=None
Names of each of the target classes in ascending numerical order.
Only relevant for classification and not supported for multi-output.
- if `None`, the class names are delegated to `decision_tree.classes_`;
- otherwise, `class_names` will be used as class names instead of
`decision_tree.classes_`. The length of `class_names` must match
the length of `decision_tree.classes_`.
.. versionadded:: 1.3
max_depth : int, default=10
Only the first max_depth levels of the tree are exported.
Truncated branches will be marked with "...".
spacing : int, default=3
Number of spaces between edges. The higher it is, the wider the result.
decimals : int, default=2
Number of decimal digits to display.
show_weights : bool, default=False
If true the classification weights will be exported on each leaf.
The classification weights are the number of samples each class.
Returns
-------
report : str
Text summary of all the rules in the decision tree.
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.tree import DecisionTreeClassifier
>>> from sklearn.tree import export_text
>>> iris = load_iris()
>>> X = iris['data']
>>> y = iris['target']
>>> decision_tree = DecisionTreeClassifier(random_state=0, max_depth=2)
>>> decision_tree = decision_tree.fit(X, y)
>>> r = export_text(decision_tree, feature_names=iris['feature_names'])
>>> print(r)
|--- petal width (cm) <= 0.80
| |--- class: 0
|--- petal width (cm) > 0.80
| |--- petal width (cm) <= 1.75
| | |--- class: 1
| |--- petal width (cm) > 1.75
| | |--- class: 2
|
export_text
|
python
|
scikit-learn/scikit-learn
|
sklearn/tree/_export.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tree/_export.py
|
BSD-3-Clause
|
def check_min_weight_fraction_leaf(name, datasets, sparse_container=None):
"""Test if leaves contain at least min_weight_fraction_leaf of the
training set"""
X = DATASETS[datasets]["X"].astype(np.float32)
if sparse_container is not None:
X = sparse_container(X)
y = DATASETS[datasets]["y"]
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
TreeEstimator = ALL_TREES[name]
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 6)):
est = TreeEstimator(
min_weight_fraction_leaf=frac, max_leaf_nodes=max_leaf_nodes, random_state=0
)
est.fit(X, y, sample_weight=weights)
if sparse_container is not None:
out = est.tree_.apply(X.tocsr())
else:
out = est.tree_.apply(X)
node_weights = np.bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert np.min(leaf_weights) >= total_weight * est.min_weight_fraction_leaf, (
"Failed with {0} min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf
)
)
# test case with no weights passed in
total_weight = X.shape[0]
for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 6)):
est = TreeEstimator(
min_weight_fraction_leaf=frac, max_leaf_nodes=max_leaf_nodes, random_state=0
)
est.fit(X, y)
if sparse_container is not None:
out = est.tree_.apply(X.tocsr())
else:
out = est.tree_.apply(X)
node_weights = np.bincount(out)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert np.min(leaf_weights) >= total_weight * est.min_weight_fraction_leaf, (
"Failed with {0} min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf
)
)
|
Test if leaves contain at least min_weight_fraction_leaf of the
training set
|
check_min_weight_fraction_leaf
|
python
|
scikit-learn/scikit-learn
|
sklearn/tree/tests/test_tree.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tree/tests/test_tree.py
|
BSD-3-Clause
|
def check_min_weight_fraction_leaf_with_min_samples_leaf(
name, datasets, sparse_container=None
):
"""Test the interaction between min_weight_fraction_leaf and
min_samples_leaf when sample_weights is not provided in fit."""
X = DATASETS[datasets]["X"].astype(np.float32)
if sparse_container is not None:
X = sparse_container(X)
y = DATASETS[datasets]["y"]
total_weight = X.shape[0]
TreeEstimator = ALL_TREES[name]
for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 3)):
# test integer min_samples_leaf
est = TreeEstimator(
min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
min_samples_leaf=5,
random_state=0,
)
est.fit(X, y)
if sparse_container is not None:
out = est.tree_.apply(X.tocsr())
else:
out = est.tree_.apply(X)
node_weights = np.bincount(out)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert np.min(leaf_weights) >= max(
(total_weight * est.min_weight_fraction_leaf), 5
), "Failed with {0} min_weight_fraction_leaf={1}, min_samples_leaf={2}".format(
name, est.min_weight_fraction_leaf, est.min_samples_leaf
)
for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 3)):
# test float min_samples_leaf
est = TreeEstimator(
min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
min_samples_leaf=0.1,
random_state=0,
)
est.fit(X, y)
if sparse_container is not None:
out = est.tree_.apply(X.tocsr())
else:
out = est.tree_.apply(X)
node_weights = np.bincount(out)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert np.min(leaf_weights) >= max(
(total_weight * est.min_weight_fraction_leaf),
(total_weight * est.min_samples_leaf),
), "Failed with {0} min_weight_fraction_leaf={1}, min_samples_leaf={2}".format(
name, est.min_weight_fraction_leaf, est.min_samples_leaf
)
|
Test the interaction between min_weight_fraction_leaf and
min_samples_leaf when sample_weights is not provided in fit.
|
check_min_weight_fraction_leaf_with_min_samples_leaf
|
python
|
scikit-learn/scikit-learn
|
sklearn/tree/tests/test_tree.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tree/tests/test_tree.py
|
BSD-3-Clause
|
def test_pickle():
"""Test pickling preserves Tree properties and performance."""
for name, TreeEstimator in ALL_TREES.items():
if "Classifier" in name:
X, y = iris.data, iris.target
else:
X, y = diabetes.data, diabetes.target
est = TreeEstimator(random_state=0)
est.fit(X, y)
score = est.score(X, y)
# test that all class properties are maintained
attributes = [
"max_depth",
"node_count",
"capacity",
"n_classes",
"children_left",
"children_right",
"n_leaves",
"feature",
"threshold",
"impurity",
"n_node_samples",
"weighted_n_node_samples",
"value",
]
fitted_attribute = {
attribute: getattr(est.tree_, attribute) for attribute in attributes
}
serialized_object = pickle.dumps(est)
est2 = pickle.loads(serialized_object)
assert type(est2) == est.__class__
score2 = est2.score(X, y)
assert score == score2, (
"Failed to generate same score after pickling with {0}".format(name)
)
for attribute in fitted_attribute:
assert_array_equal(
getattr(est2.tree_, attribute),
fitted_attribute[attribute],
err_msg=(
f"Failed to generate same attribute {attribute} after pickling with"
f" {name}"
),
)
|
Test pickling preserves Tree properties and performance.
|
test_pickle
|
python
|
scikit-learn/scikit-learn
|
sklearn/tree/tests/test_tree.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tree/tests/test_tree.py
|
BSD-3-Clause
|
def test_mae():
"""Check MAE criterion produces correct results on small toy dataset:
------------------
| X | y | weight |
------------------
| 3 | 3 | 0.1 |
| 5 | 3 | 0.3 |
| 8 | 4 | 1.0 |
| 3 | 6 | 0.6 |
| 5 | 7 | 0.3 |
------------------
|sum wt:| 2.3 |
------------------
Because we are dealing with sample weights, we cannot find the median by
simply choosing/averaging the centre value(s), instead we consider the
median where 50% of the cumulative weight is found (in a y sorted data set)
. Therefore with regards to this test data, the cumulative weight is >= 50%
when y = 4. Therefore:
Median = 4
For all the samples, we can get the total error by summing:
Absolute(Median - y) * weight
I.e., total error = (Absolute(4 - 3) * 0.1)
+ (Absolute(4 - 3) * 0.3)
+ (Absolute(4 - 4) * 1.0)
+ (Absolute(4 - 6) * 0.6)
+ (Absolute(4 - 7) * 0.3)
= 2.5
Impurity = Total error / total weight
= 2.5 / 2.3
= 1.08695652173913
------------------
From this root node, the next best split is between X values of 3 and 5.
Thus, we have left and right child nodes:
LEFT RIGHT
------------------ ------------------
| X | y | weight | | X | y | weight |
------------------ ------------------
| 3 | 3 | 0.1 | | 5 | 3 | 0.3 |
| 3 | 6 | 0.6 | | 8 | 4 | 1.0 |
------------------ | 5 | 7 | 0.3 |
|sum wt:| 0.7 | ------------------
------------------ |sum wt:| 1.6 |
------------------
Impurity is found in the same way:
Left node Median = 6
Total error = (Absolute(6 - 3) * 0.1)
+ (Absolute(6 - 6) * 0.6)
= 0.3
Left Impurity = Total error / total weight
= 0.3 / 0.7
= 0.428571428571429
-------------------
Likewise for Right node:
Right node Median = 4
Total error = (Absolute(4 - 3) * 0.3)
+ (Absolute(4 - 4) * 1.0)
+ (Absolute(4 - 7) * 0.3)
= 1.2
Right Impurity = Total error / total weight
= 1.2 / 1.6
= 0.75
------
"""
dt_mae = DecisionTreeRegressor(
random_state=0, criterion="absolute_error", max_leaf_nodes=2
)
# Test MAE where sample weights are non-uniform (as illustrated above):
dt_mae.fit(
X=[[3], [5], [3], [8], [5]],
y=[6, 7, 3, 4, 3],
sample_weight=[0.6, 0.3, 0.1, 1.0, 0.3],
)
assert_allclose(dt_mae.tree_.impurity, [2.5 / 2.3, 0.3 / 0.7, 1.2 / 1.6])
assert_array_equal(dt_mae.tree_.value.flat, [4.0, 6.0, 4.0])
# Test MAE where all sample weights are uniform:
dt_mae.fit(X=[[3], [5], [3], [8], [5]], y=[6, 7, 3, 4, 3], sample_weight=np.ones(5))
assert_array_equal(dt_mae.tree_.impurity, [1.4, 1.5, 4.0 / 3.0])
assert_array_equal(dt_mae.tree_.value.flat, [4, 4.5, 4.0])
# Test MAE where a `sample_weight` is not explicitly provided.
# This is equivalent to providing uniform sample weights, though
# the internal logic is different:
dt_mae.fit(X=[[3], [5], [3], [8], [5]], y=[6, 7, 3, 4, 3])
assert_array_equal(dt_mae.tree_.impurity, [1.4, 1.5, 4.0 / 3.0])
assert_array_equal(dt_mae.tree_.value.flat, [4, 4.5, 4.0])
|
Check MAE criterion produces correct results on small toy dataset:
------------------
| X | y | weight |
------------------
| 3 | 3 | 0.1 |
| 5 | 3 | 0.3 |
| 8 | 4 | 1.0 |
| 3 | 6 | 0.6 |
| 5 | 7 | 0.3 |
------------------
|sum wt:| 2.3 |
------------------
Because we are dealing with sample weights, we cannot find the median by
simply choosing/averaging the centre value(s), instead we consider the
median where 50% of the cumulative weight is found (in a y sorted data set)
. Therefore with regards to this test data, the cumulative weight is >= 50%
when y = 4. Therefore:
Median = 4
For all the samples, we can get the total error by summing:
Absolute(Median - y) * weight
I.e., total error = (Absolute(4 - 3) * 0.1)
+ (Absolute(4 - 3) * 0.3)
+ (Absolute(4 - 4) * 1.0)
+ (Absolute(4 - 6) * 0.6)
+ (Absolute(4 - 7) * 0.3)
= 2.5
Impurity = Total error / total weight
= 2.5 / 2.3
= 1.08695652173913
------------------
From this root node, the next best split is between X values of 3 and 5.
Thus, we have left and right child nodes:
LEFT RIGHT
------------------ ------------------
| X | y | weight | | X | y | weight |
------------------ ------------------
| 3 | 3 | 0.1 | | 5 | 3 | 0.3 |
| 3 | 6 | 0.6 | | 8 | 4 | 1.0 |
------------------ | 5 | 7 | 0.3 |
|sum wt:| 0.7 | ------------------
------------------ |sum wt:| 1.6 |
------------------
Impurity is found in the same way:
Left node Median = 6
Total error = (Absolute(6 - 3) * 0.1)
+ (Absolute(6 - 6) * 0.6)
= 0.3
Left Impurity = Total error / total weight
= 0.3 / 0.7
= 0.428571428571429
-------------------
Likewise for Right node:
Right node Median = 4
Total error = (Absolute(4 - 3) * 0.3)
+ (Absolute(4 - 4) * 1.0)
+ (Absolute(4 - 7) * 0.3)
= 1.2
Right Impurity = Total error / total weight
= 1.2 / 1.6
= 0.75
------
|
test_mae
|
python
|
scikit-learn/scikit-learn
|
sklearn/tree/tests/test_tree.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tree/tests/test_tree.py
|
BSD-3-Clause
|
def test_criterion_entropy_same_as_log_loss(Tree, n_classes):
"""Test that criterion=entropy gives same as log_loss."""
n_samples, n_features = 50, 5
X, y = datasets.make_classification(
n_classes=n_classes,
n_samples=n_samples,
n_features=n_features,
n_informative=n_features,
n_redundant=0,
random_state=42,
)
tree_log_loss = Tree(criterion="log_loss", random_state=43).fit(X, y)
tree_entropy = Tree(criterion="entropy", random_state=43).fit(X, y)
assert_tree_equal(
tree_log_loss.tree_,
tree_entropy.tree_,
f"{Tree!r} with criterion 'entropy' and 'log_loss' gave different trees.",
)
assert_allclose(tree_log_loss.predict(X), tree_entropy.predict(X))
|
Test that criterion=entropy gives same as log_loss.
|
test_criterion_entropy_same_as_log_loss
|
python
|
scikit-learn/scikit-learn
|
sklearn/tree/tests/test_tree.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tree/tests/test_tree.py
|
BSD-3-Clause
|
def test_tree_deserialization_from_read_only_buffer(tmpdir):
"""Check that Trees can be deserialized with read only buffers.
Non-regression test for gh-25584.
"""
pickle_path = str(tmpdir.join("clf.joblib"))
clf = DecisionTreeClassifier(random_state=0)
clf.fit(X_small, y_small)
joblib.dump(clf, pickle_path)
loaded_clf = joblib.load(pickle_path, mmap_mode="r")
assert_tree_equal(
loaded_clf.tree_,
clf.tree_,
"The trees of the original and loaded classifiers are not equal.",
)
|
Check that Trees can be deserialized with read only buffers.
Non-regression test for gh-25584.
|
test_tree_deserialization_from_read_only_buffer
|
python
|
scikit-learn/scikit-learn
|
sklearn/tree/tests/test_tree.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tree/tests/test_tree.py
|
BSD-3-Clause
|
def test_min_sample_split_1_error(Tree):
"""Check that an error is raised when min_sample_split=1.
non-regression test for issue gh-25481.
"""
X = np.array([[0, 0], [1, 1]])
y = np.array([0, 1])
# min_samples_split=1.0 is valid
Tree(min_samples_split=1.0).fit(X, y)
# min_samples_split=1 is invalid
tree = Tree(min_samples_split=1)
msg = (
r"'min_samples_split' .* must be an int in the range \[2, inf\) "
r"or a float in the range \(0.0, 1.0\]"
)
with pytest.raises(ValueError, match=msg):
tree.fit(X, y)
|
Check that an error is raised when min_sample_split=1.
non-regression test for issue gh-25481.
|
test_min_sample_split_1_error
|
python
|
scikit-learn/scikit-learn
|
sklearn/tree/tests/test_tree.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tree/tests/test_tree.py
|
BSD-3-Clause
|
def test_missing_values_best_splitter_on_equal_nodes_no_missing(criterion):
"""Check missing values goes to correct node during predictions."""
X = np.array([[0, 1, 2, 3, 8, 9, 11, 12, 15]]).T
y = np.array([0.1, 0.2, 0.3, 0.2, 1.4, 1.4, 1.5, 1.6, 2.6])
dtc = DecisionTreeRegressor(random_state=42, max_depth=1, criterion=criterion)
dtc.fit(X, y)
# Goes to right node because it has the most data points
y_pred = dtc.predict([[np.nan]])
assert_allclose(y_pred, [np.mean(y[-5:])])
# equal number of elements in both nodes
X_equal = X[:-1]
y_equal = y[:-1]
dtc = DecisionTreeRegressor(random_state=42, max_depth=1, criterion=criterion)
dtc.fit(X_equal, y_equal)
# Goes to right node because the implementation sets:
# missing_go_to_left = n_left > n_right, which is False
y_pred = dtc.predict([[np.nan]])
assert_allclose(y_pred, [np.mean(y_equal[-4:])])
|
Check missing values goes to correct node during predictions.
|
test_missing_values_best_splitter_on_equal_nodes_no_missing
|
python
|
scikit-learn/scikit-learn
|
sklearn/tree/tests/test_tree.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tree/tests/test_tree.py
|
BSD-3-Clause
|
def test_missing_values_random_splitter_on_equal_nodes_no_missing(criterion, seed):
"""Check missing values go to the correct node during predictions for ExtraTree.
Since ETC use random splits, we use different seeds to verify that the
left/right node is chosen correctly when the splits occur.
"""
X = np.array([[0, 1, 2, 3, 8, 9, 11, 12, 15]]).T
y = np.array([0.1, 0.2, 0.3, 0.2, 1.4, 1.4, 1.5, 1.6, 2.6])
etr = ExtraTreeRegressor(random_state=seed, max_depth=1, criterion=criterion)
etr.fit(X, y)
# Get the left and right children of the root node
left_child = etr.tree_.children_left[0]
right_child = etr.tree_.children_right[0]
# Get the number of samples for the left and right children
left_samples = etr.tree_.weighted_n_node_samples[left_child]
right_samples = etr.tree_.weighted_n_node_samples[right_child]
went_left = left_samples > right_samples
# predictions
y_pred_left = etr.tree_.value[left_child][0]
y_pred_right = etr.tree_.value[right_child][0]
# Goes to node with the most data points
y_pred = etr.predict([[np.nan]])
if went_left:
assert_allclose(y_pred_left, y_pred)
else:
assert_allclose(y_pred_right, y_pred)
|
Check missing values go to the correct node during predictions for ExtraTree.
Since ETC use random splits, we use different seeds to verify that the
left/right node is chosen correctly when the splits occur.
|
test_missing_values_random_splitter_on_equal_nodes_no_missing
|
python
|
scikit-learn/scikit-learn
|
sklearn/tree/tests/test_tree.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tree/tests/test_tree.py
|
BSD-3-Clause
|
def test_missing_values_best_splitter_three_classes(criterion):
"""Test when missing values are uniquely present in a class among 3 classes."""
missing_values_class = 0
X = np.array([[np.nan] * 4 + [0, 1, 2, 3, 8, 9, 11, 12]]).T
y = np.array([missing_values_class] * 4 + [1] * 4 + [2] * 4)
dtc = DecisionTreeClassifier(random_state=42, max_depth=2, criterion=criterion)
dtc.fit(X, y)
X_test = np.array([[np.nan, 3, 12]]).T
y_nan_pred = dtc.predict(X_test)
# Missing values necessarily are associated to the observed class.
assert_array_equal(y_nan_pred, [missing_values_class, 1, 2])
|
Test when missing values are uniquely present in a class among 3 classes.
|
test_missing_values_best_splitter_three_classes
|
python
|
scikit-learn/scikit-learn
|
sklearn/tree/tests/test_tree.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tree/tests/test_tree.py
|
BSD-3-Clause
|
def test_missing_values_best_splitter_to_left(criterion):
"""Missing values spanning only one class at fit-time must make missing
values at predict-time be classified has belonging to this class."""
X = np.array([[np.nan] * 4 + [0, 1, 2, 3, 4, 5]]).T
y = np.array([0] * 4 + [1] * 6)
dtc = DecisionTreeClassifier(random_state=42, max_depth=2, criterion=criterion)
dtc.fit(X, y)
X_test = np.array([[np.nan, 5, np.nan]]).T
y_pred = dtc.predict(X_test)
assert_array_equal(y_pred, [0, 1, 0])
|
Missing values spanning only one class at fit-time must make missing
values at predict-time be classified has belonging to this class.
|
test_missing_values_best_splitter_to_left
|
python
|
scikit-learn/scikit-learn
|
sklearn/tree/tests/test_tree.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tree/tests/test_tree.py
|
BSD-3-Clause
|
def test_missing_values_best_splitter_to_right(criterion):
"""Missing values and non-missing values sharing one class at fit-time
must make missing values at predict-time be classified has belonging
to this class."""
X = np.array([[np.nan] * 4 + [0, 1, 2, 3, 4, 5]]).T
y = np.array([1] * 4 + [0] * 4 + [1] * 2)
dtc = DecisionTreeClassifier(random_state=42, max_depth=2, criterion=criterion)
dtc.fit(X, y)
X_test = np.array([[np.nan, 1.2, 4.8]]).T
y_pred = dtc.predict(X_test)
assert_array_equal(y_pred, [1, 0, 1])
|
Missing values and non-missing values sharing one class at fit-time
must make missing values at predict-time be classified has belonging
to this class.
|
test_missing_values_best_splitter_to_right
|
python
|
scikit-learn/scikit-learn
|
sklearn/tree/tests/test_tree.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tree/tests/test_tree.py
|
BSD-3-Clause
|
def test_missing_values_best_splitter_missing_both_classes_has_nan(criterion):
"""Check behavior of missing value when there is one missing value in each class."""
X = np.array([[1, 2, 3, 5, np.nan, 10, 20, 30, 60, np.nan]]).T
y = np.array([0] * 5 + [1] * 5)
dtc = DecisionTreeClassifier(random_state=42, max_depth=1, criterion=criterion)
dtc.fit(X, y)
X_test = np.array([[np.nan, 2.3, 34.2]]).T
y_pred = dtc.predict(X_test)
# Missing value goes to the class at the right (here 1) because the implementation
# searches right first.
assert_array_equal(y_pred, [1, 0, 1])
|
Check behavior of missing value when there is one missing value in each class.
|
test_missing_values_best_splitter_missing_both_classes_has_nan
|
python
|
scikit-learn/scikit-learn
|
sklearn/tree/tests/test_tree.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tree/tests/test_tree.py
|
BSD-3-Clause
|
def test_missing_value_errors(sparse_container, tree):
"""Check unsupported configurations for missing values."""
X = np.array([[1, 2, 3, 5, np.nan, 10, 20, 30, 60, np.nan]]).T
y = np.array([0] * 5 + [1] * 5)
if sparse_container is not None:
X = sparse_container(X)
with pytest.raises(ValueError, match="Input X contains NaN"):
tree.fit(X, y)
|
Check unsupported configurations for missing values.
|
test_missing_value_errors
|
python
|
scikit-learn/scikit-learn
|
sklearn/tree/tests/test_tree.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tree/tests/test_tree.py
|
BSD-3-Clause
|
def test_missing_values_poisson(Tree):
"""Smoke test for poisson regression and missing values."""
X, y = diabetes.data.copy(), diabetes.target
# Set some values missing
X[::5, 0] = np.nan
X[::6, -1] = np.nan
reg = Tree(criterion="poisson", random_state=42)
reg.fit(X, y)
y_pred = reg.predict(X)
assert (y_pred >= 0.0).all()
|
Smoke test for poisson regression and missing values.
|
test_missing_values_poisson
|
python
|
scikit-learn/scikit-learn
|
sklearn/tree/tests/test_tree.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tree/tests/test_tree.py
|
BSD-3-Clause
|
def test_missing_values_is_resilience(
make_data, Tree, sample_weight_train, global_random_seed, tolerance
):
"""Check that trees can deal with missing values have decent performance."""
n_samples, n_features = 5_000, 10
X, y = make_data(
n_samples=n_samples,
n_features=n_features,
noise=1.0,
random_state=global_random_seed,
)
X_missing = X.copy()
rng = np.random.RandomState(global_random_seed)
X_missing[rng.choice([False, True], size=X.shape, p=[0.9, 0.1])] = np.nan
X_missing_train, X_missing_test, y_train, y_test = train_test_split(
X_missing, y, random_state=global_random_seed
)
if sample_weight_train == "ones":
sample_weight = np.ones(X_missing_train.shape[0])
else:
sample_weight = None
# max_depth is used to avoid overfitting and also improve the runtime
# of the test.
max_depth = 10
native_tree = Tree(max_depth=max_depth, random_state=global_random_seed)
native_tree.fit(X_missing_train, y_train, sample_weight=sample_weight)
score_native_tree = native_tree.score(X_missing_test, y_test)
tree_with_imputer = make_pipeline(
SimpleImputer(), Tree(max_depth=max_depth, random_state=global_random_seed)
)
tree_with_imputer.fit(X_missing_train, y_train)
score_tree_with_imputer = tree_with_imputer.score(X_missing_test, y_test)
assert score_native_tree + tolerance > score_tree_with_imputer, (
f"{score_native_tree=} + {tolerance} should be strictly greater than"
f" {score_tree_with_imputer}"
)
|
Check that trees can deal with missing values have decent performance.
|
test_missing_values_is_resilience
|
python
|
scikit-learn/scikit-learn
|
sklearn/tree/tests/test_tree.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tree/tests/test_tree.py
|
BSD-3-Clause
|
def test_missing_value_is_predictive(Tree, expected_score, global_random_seed):
"""Check the tree learns when only the missing value is predictive."""
rng = np.random.RandomState(0)
n_samples = 500
X = rng.standard_normal(size=(n_samples, 20))
y = np.concatenate([np.zeros(n_samples // 2), np.ones(n_samples // 2)])
# y = rng.randint(0, high=2, size=n_samples)
# Create a predictive feature using `y` and with some noise
X_random_mask = rng.choice([False, True], size=n_samples, p=[0.95, 0.05])
y_mask = y.copy().astype(bool)
y_mask[X_random_mask] = ~y_mask[X_random_mask]
X_predictive = rng.standard_normal(size=n_samples)
X_predictive[y_mask] = np.nan
X[:, 5] = X_predictive
tree = Tree(random_state=global_random_seed)
# Check that the tree can learn the predictive feature
# over an average of cross-validation fits.
tree_cv_score = cross_val_score(tree, X, y, cv=5).mean()
assert tree_cv_score >= expected_score, (
f"Expected CV score: {expected_score} but got {tree_cv_score}"
)
|
Check the tree learns when only the missing value is predictive.
|
test_missing_value_is_predictive
|
python
|
scikit-learn/scikit-learn
|
sklearn/tree/tests/test_tree.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tree/tests/test_tree.py
|
BSD-3-Clause
|
def test_sample_weight_non_uniform(make_data, Tree):
"""Check sample weight is correctly handled with missing values."""
rng = np.random.RandomState(0)
n_samples, n_features = 1000, 10
X, y = make_data(n_samples=n_samples, n_features=n_features, random_state=rng)
# Create dataset with missing values
X[rng.choice([False, True], size=X.shape, p=[0.9, 0.1])] = np.nan
# Zero sample weight is the same as removing the sample
sample_weight = np.ones(X.shape[0])
sample_weight[::2] = 0.0
tree_with_sw = Tree(random_state=0)
tree_with_sw.fit(X, y, sample_weight=sample_weight)
tree_samples_removed = Tree(random_state=0)
tree_samples_removed.fit(X[1::2, :], y[1::2])
assert_allclose(tree_samples_removed.predict(X), tree_with_sw.predict(X))
|
Check sample weight is correctly handled with missing values.
|
test_sample_weight_non_uniform
|
python
|
scikit-learn/scikit-learn
|
sklearn/tree/tests/test_tree.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tree/tests/test_tree.py
|
BSD-3-Clause
|
def test_regression_tree_missing_values_toy(Tree, X, criterion):
"""Check that we properly handle missing values in regression trees using a toy
dataset.
The regression targeted by this test was that we were not reinitializing the
criterion when it comes to the number of missing values. Therefore, the value
of the critetion (i.e. MSE) was completely wrong.
This test check that the MSE is null when there is a single sample in the leaf.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/28254
https://github.com/scikit-learn/scikit-learn/issues/28316
"""
X = X.reshape(-1, 1)
y = np.arange(6)
tree = Tree(criterion=criterion, random_state=0).fit(X, y)
tree_ref = clone(tree).fit(y.reshape(-1, 1), y)
impurity = tree.tree_.impurity
assert all(impurity >= 0), impurity.min() # MSE should always be positive
# Check the impurity match after the first split
assert_allclose(tree.tree_.impurity[:2], tree_ref.tree_.impurity[:2])
# Find the leaves with a single sample where the MSE should be 0
leaves_idx = np.flatnonzero(
(tree.tree_.children_left == -1) & (tree.tree_.n_node_samples == 1)
)
assert_allclose(tree.tree_.impurity[leaves_idx], 0.0)
|
Check that we properly handle missing values in regression trees using a toy
dataset.
The regression targeted by this test was that we were not reinitializing the
criterion when it comes to the number of missing values. Therefore, the value
of the critetion (i.e. MSE) was completely wrong.
This test check that the MSE is null when there is a single sample in the leaf.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/28254
https://github.com/scikit-learn/scikit-learn/issues/28316
|
test_regression_tree_missing_values_toy
|
python
|
scikit-learn/scikit-learn
|
sklearn/tree/tests/test_tree.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tree/tests/test_tree.py
|
BSD-3-Clause
|
def test_classification_tree_missing_values_toy():
"""Check that we properly handle missing values in classification trees using a toy
dataset.
The test is more involved because we use a case where we detected a regression
in a random forest. We therefore define the seed and bootstrap indices to detect
one of the non-frequent regression.
Here, we check that the impurity is null or positive in the leaves.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/28254
"""
X, y = datasets.load_iris(return_X_y=True)
rng = np.random.RandomState(42)
X_missing = X.copy()
mask = rng.binomial(
n=np.ones(shape=(1, 4), dtype=np.int32), p=X[:, [2]] / 8
).astype(bool)
X_missing[mask] = np.nan
X_train, _, y_train, _ = train_test_split(X_missing, y, random_state=13)
# fmt: off
# no black reformatting for this specific array
indices = np.array([
2, 81, 39, 97, 91, 38, 46, 31, 101, 13, 89, 82, 100, 42, 69, 27, 81, 16, 73, 74,
51, 47, 107, 17, 75, 110, 20, 15, 104, 57, 26, 15, 75, 79, 35, 77, 90, 51, 46,
13, 94, 91, 23, 8, 93, 93, 73, 77, 12, 13, 74, 109, 110, 24, 10, 23, 104, 27,
92, 52, 20, 109, 8, 8, 28, 27, 35, 12, 12, 7, 43, 0, 30, 31, 78, 12, 24, 105,
50, 0, 73, 12, 102, 105, 13, 31, 1, 69, 11, 32, 75, 90, 106, 94, 60, 56, 35, 17,
62, 85, 81, 39, 80, 16, 63, 6, 80, 84, 3, 3, 76, 78
], dtype=np.int32)
# fmt: on
tree = DecisionTreeClassifier(
max_depth=3, max_features="sqrt", random_state=1857819720
)
tree.fit(X_train[indices], y_train[indices])
assert all(tree.tree_.impurity >= 0)
leaves_idx = np.flatnonzero(
(tree.tree_.children_left == -1) & (tree.tree_.n_node_samples == 1)
)
assert_allclose(tree.tree_.impurity[leaves_idx], 0.0)
|
Check that we properly handle missing values in classification trees using a toy
dataset.
The test is more involved because we use a case where we detected a regression
in a random forest. We therefore define the seed and bootstrap indices to detect
one of the non-frequent regression.
Here, we check that the impurity is null or positive in the leaves.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/28254
|
test_classification_tree_missing_values_toy
|
python
|
scikit-learn/scikit-learn
|
sklearn/tree/tests/test_tree.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tree/tests/test_tree.py
|
BSD-3-Clause
|
def test_build_pruned_tree_py():
"""Test pruning a tree with the Python caller of the Cythonized prune tree."""
tree = DecisionTreeClassifier(random_state=0, max_depth=1)
tree.fit(iris.data, iris.target)
n_classes = np.atleast_1d(tree.n_classes_)
pruned_tree = CythonTree(tree.n_features_in_, n_classes, tree.n_outputs_)
# only keep the root note
leave_in_subtree = np.zeros(tree.tree_.node_count, dtype=np.uint8)
leave_in_subtree[0] = 1
_build_pruned_tree_py(pruned_tree, tree.tree_, leave_in_subtree)
assert tree.tree_.node_count == 3
assert pruned_tree.node_count == 1
with pytest.raises(AssertionError):
assert_array_equal(tree.tree_.value, pruned_tree.value)
assert_array_equal(tree.tree_.value[0], pruned_tree.value[0])
# now keep all the leaves
pruned_tree = CythonTree(tree.n_features_in_, n_classes, tree.n_outputs_)
leave_in_subtree = np.zeros(tree.tree_.node_count, dtype=np.uint8)
leave_in_subtree[1:] = 1
# Prune the tree
_build_pruned_tree_py(pruned_tree, tree.tree_, leave_in_subtree)
assert tree.tree_.node_count == 3
assert pruned_tree.node_count == 3, pruned_tree.node_count
assert_array_equal(tree.tree_.value, pruned_tree.value)
|
Test pruning a tree with the Python caller of the Cythonized prune tree.
|
test_build_pruned_tree_py
|
python
|
scikit-learn/scikit-learn
|
sklearn/tree/tests/test_tree.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tree/tests/test_tree.py
|
BSD-3-Clause
|
def test_build_pruned_tree_infinite_loop():
"""Test pruning a tree does not result in an infinite loop."""
# Create a tree with root and two children
tree = DecisionTreeClassifier(random_state=0, max_depth=1)
tree.fit(iris.data, iris.target)
n_classes = np.atleast_1d(tree.n_classes_)
pruned_tree = CythonTree(tree.n_features_in_, n_classes, tree.n_outputs_)
# only keeping one child as a leaf results in an improper tree
leave_in_subtree = np.zeros(tree.tree_.node_count, dtype=np.uint8)
leave_in_subtree[1] = 1
with pytest.raises(
ValueError, match="Node has reached a leaf in the original tree"
):
_build_pruned_tree_py(pruned_tree, tree.tree_, leave_in_subtree)
|
Test pruning a tree does not result in an infinite loop.
|
test_build_pruned_tree_infinite_loop
|
python
|
scikit-learn/scikit-learn
|
sklearn/tree/tests/test_tree.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tree/tests/test_tree.py
|
BSD-3-Clause
|
def test_sort_log2_build():
"""Non-regression test for gh-30554.
Using log2 and log in sort correctly sorts feature_values, but the tie breaking is
different which can results in placing samples in a different order.
"""
rng = np.random.default_rng(75)
some = rng.normal(loc=0.0, scale=10.0, size=10).astype(np.float32)
feature_values = np.concatenate([some] * 5)
samples = np.arange(50, dtype=np.intp)
_py_sort(feature_values, samples, 50)
# fmt: off
# no black reformatting for this specific array
expected_samples = [
0, 40, 30, 20, 10, 29, 39, 19, 49, 9, 45, 15, 35, 5, 25, 11, 31,
41, 1, 21, 22, 12, 2, 42, 32, 23, 13, 43, 3, 33, 6, 36, 46, 16,
26, 4, 14, 24, 34, 44, 27, 47, 7, 37, 17, 8, 38, 48, 28, 18
]
# fmt: on
assert_array_equal(samples, expected_samples)
|
Non-regression test for gh-30554.
Using log2 and log in sort correctly sorts feature_values, but the tie breaking is
different which can results in placing samples in a different order.
|
test_sort_log2_build
|
python
|
scikit-learn/scikit-learn
|
sklearn/tree/tests/test_tree.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tree/tests/test_tree.py
|
BSD-3-Clause
|
def compute_class_weight(class_weight, *, classes, y, sample_weight=None):
"""Estimate class weights for unbalanced datasets.
Parameters
----------
class_weight : dict, "balanced" or None
If "balanced", class weights will be given by
`n_samples / (n_classes * np.bincount(y))` or their weighted equivalent if
`sample_weight` is provided.
If a dictionary is given, keys are classes and values are corresponding class
weights.
If `None` is given, the class weights will be uniform.
classes : ndarray
Array of the classes occurring in the data, as given by
`np.unique(y_org)` with `y_org` the original class labels.
y : array-like of shape (n_samples,)
Array of original class labels per sample.
sample_weight : array-like of shape (n_samples,), default=None
Array of weights that are assigned to individual samples. Only used when
`class_weight='balanced'`.
Returns
-------
class_weight_vect : ndarray of shape (n_classes,)
Array with `class_weight_vect[i]` the weight for i-th class.
References
----------
The "balanced" heuristic is inspired by
Logistic Regression in Rare Events Data, King, Zen, 2001.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.class_weight import compute_class_weight
>>> y = [1, 1, 1, 1, 0, 0]
>>> compute_class_weight(class_weight="balanced", classes=np.unique(y), y=y)
array([1.5 , 0.75])
"""
# Import error caused by circular imports.
from ..preprocessing import LabelEncoder
if set(y) - set(classes):
raise ValueError("classes should include all valid labels that can be in y")
if class_weight is None or len(class_weight) == 0:
# uniform class weights
weight = np.ones(classes.shape[0], dtype=np.float64, order="C")
elif class_weight == "balanced":
# Find the weight of each class as present in y.
le = LabelEncoder()
y_ind = le.fit_transform(y)
if not all(np.isin(classes, le.classes_)):
raise ValueError("classes should have valid labels that are in y")
sample_weight = _check_sample_weight(sample_weight, y)
weighted_class_counts = np.bincount(y_ind, weights=sample_weight)
recip_freq = weighted_class_counts.sum() / (
len(le.classes_) * weighted_class_counts
)
weight = recip_freq[le.transform(classes)]
else:
# user-defined dictionary
weight = np.ones(classes.shape[0], dtype=np.float64, order="C")
unweighted_classes = []
for i, c in enumerate(classes):
if c in class_weight:
weight[i] = class_weight[c]
else:
unweighted_classes.append(c)
n_weighted_classes = len(classes) - len(unweighted_classes)
if unweighted_classes and n_weighted_classes != len(class_weight):
unweighted_classes_user_friendly_str = np.array(unweighted_classes).tolist()
raise ValueError(
f"The classes, {unweighted_classes_user_friendly_str}, are not in"
" class_weight"
)
return weight
|
Estimate class weights for unbalanced datasets.
Parameters
----------
class_weight : dict, "balanced" or None
If "balanced", class weights will be given by
`n_samples / (n_classes * np.bincount(y))` or their weighted equivalent if
`sample_weight` is provided.
If a dictionary is given, keys are classes and values are corresponding class
weights.
If `None` is given, the class weights will be uniform.
classes : ndarray
Array of the classes occurring in the data, as given by
`np.unique(y_org)` with `y_org` the original class labels.
y : array-like of shape (n_samples,)
Array of original class labels per sample.
sample_weight : array-like of shape (n_samples,), default=None
Array of weights that are assigned to individual samples. Only used when
`class_weight='balanced'`.
Returns
-------
class_weight_vect : ndarray of shape (n_classes,)
Array with `class_weight_vect[i]` the weight for i-th class.
References
----------
The "balanced" heuristic is inspired by
Logistic Regression in Rare Events Data, King, Zen, 2001.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.class_weight import compute_class_weight
>>> y = [1, 1, 1, 1, 0, 0]
>>> compute_class_weight(class_weight="balanced", classes=np.unique(y), y=y)
array([1.5 , 0.75])
|
compute_class_weight
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/class_weight.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/class_weight.py
|
BSD-3-Clause
|
def compute_sample_weight(class_weight, y, *, indices=None):
"""Estimate sample weights by class for unbalanced datasets.
Parameters
----------
class_weight : dict, list of dicts, "balanced", or None
Weights associated with classes in the form `{class_label: weight}`.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
Note that for multioutput (including multilabel) weights should be
defined for each class of every column in its own dict. For example,
for four-class multilabel classification weights should be
`[{0: 1, 1: 1}, {0: 1, 1: 5}, {0: 1, 1: 1}, {0: 1, 1: 1}]` instead of
`[{1:1}, {2:5}, {3:1}, {4:1}]`.
The `"balanced"` mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data:
`n_samples / (n_classes * np.bincount(y))`.
For multi-output, the weights of each column of y will be multiplied.
y : {array-like, sparse matrix} of shape (n_samples,) or (n_samples, n_outputs)
Array of original class labels per sample.
indices : array-like of shape (n_subsample,), default=None
Array of indices to be used in a subsample. Can be of length less than
`n_samples` in the case of a subsample, or equal to `n_samples` in the
case of a bootstrap subsample with repeated indices. If `None`, the
sample weight will be calculated over the full sample. Only `"balanced"`
is supported for `class_weight` if this is provided.
Returns
-------
sample_weight_vect : ndarray of shape (n_samples,)
Array with sample weights as applied to the original `y`.
Examples
--------
>>> from sklearn.utils.class_weight import compute_sample_weight
>>> y = [1, 1, 1, 1, 0, 0]
>>> compute_sample_weight(class_weight="balanced", y=y)
array([0.75, 0.75, 0.75, 0.75, 1.5 , 1.5 ])
"""
# Ensure y is 2D. Sparse matrices are already 2D.
if not sparse.issparse(y):
y = np.atleast_1d(y)
if y.ndim == 1:
y = np.reshape(y, (-1, 1))
n_outputs = y.shape[1]
if indices is not None and class_weight != "balanced":
raise ValueError(
"The only valid class_weight for subsampling is 'balanced'. "
f"Given {class_weight}."
)
elif n_outputs > 1:
if class_weight is None or isinstance(class_weight, dict):
raise ValueError(
"For multi-output, class_weight should be a list of dicts, or the "
"string 'balanced'."
)
elif isinstance(class_weight, list) and len(class_weight) != n_outputs:
raise ValueError(
"For multi-output, number of elements in class_weight should match "
f"number of outputs. Got {len(class_weight)} element(s) while having "
f"{n_outputs} outputs."
)
expanded_class_weight = []
for k in range(n_outputs):
if sparse.issparse(y):
# Ok to densify a single column at a time
y_full = y[:, [k]].toarray().flatten()
else:
y_full = y[:, k]
classes_full = np.unique(y_full)
classes_missing = None
if class_weight == "balanced" or n_outputs == 1:
class_weight_k = class_weight
else:
class_weight_k = class_weight[k]
if indices is not None:
# Get class weights for the subsample, covering all classes in
# case some labels that were present in the original data are
# missing from the sample.
y_subsample = y_full[indices]
classes_subsample = np.unique(y_subsample)
weight_k = np.take(
compute_class_weight(
class_weight_k, classes=classes_subsample, y=y_subsample
),
np.searchsorted(classes_subsample, classes_full),
mode="clip",
)
classes_missing = set(classes_full) - set(classes_subsample)
else:
weight_k = compute_class_weight(
class_weight_k, classes=classes_full, y=y_full
)
weight_k = weight_k[np.searchsorted(classes_full, y_full)]
if classes_missing:
# Make missing classes' weight zero
weight_k[np.isin(y_full, list(classes_missing))] = 0.0
expanded_class_weight.append(weight_k)
expanded_class_weight = np.prod(expanded_class_weight, axis=0, dtype=np.float64)
return expanded_class_weight
|
Estimate sample weights by class for unbalanced datasets.
Parameters
----------
class_weight : dict, list of dicts, "balanced", or None
Weights associated with classes in the form `{class_label: weight}`.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
Note that for multioutput (including multilabel) weights should be
defined for each class of every column in its own dict. For example,
for four-class multilabel classification weights should be
`[{0: 1, 1: 1}, {0: 1, 1: 5}, {0: 1, 1: 1}, {0: 1, 1: 1}]` instead of
`[{1:1}, {2:5}, {3:1}, {4:1}]`.
The `"balanced"` mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data:
`n_samples / (n_classes * np.bincount(y))`.
For multi-output, the weights of each column of y will be multiplied.
y : {array-like, sparse matrix} of shape (n_samples,) or (n_samples, n_outputs)
Array of original class labels per sample.
indices : array-like of shape (n_subsample,), default=None
Array of indices to be used in a subsample. Can be of length less than
`n_samples` in the case of a subsample, or equal to `n_samples` in the
case of a bootstrap subsample with repeated indices. If `None`, the
sample weight will be calculated over the full sample. Only `"balanced"`
is supported for `class_weight` if this is provided.
Returns
-------
sample_weight_vect : ndarray of shape (n_samples,)
Array with sample weights as applied to the original `y`.
Examples
--------
>>> from sklearn.utils.class_weight import compute_sample_weight
>>> y = [1, 1, 1, 1, 0, 0]
>>> compute_sample_weight(class_weight="balanced", y=y)
array([0.75, 0.75, 0.75, 0.75, 1.5 , 1.5 ])
|
compute_sample_weight
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/class_weight.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/class_weight.py
|
BSD-3-Clause
|
def _is_deprecated(func):
"""Helper to check if func is wrapped by our deprecated decorator"""
closures = getattr(func, "__closure__", [])
if closures is None:
closures = []
is_deprecated = "deprecated" in "".join(
[c.cell_contents for c in closures if isinstance(c.cell_contents, str)]
)
return is_deprecated
|
Helper to check if func is wrapped by our deprecated decorator
|
_is_deprecated
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/deprecation.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/deprecation.py
|
BSD-3-Clause
|
def _deprecate_force_all_finite(force_all_finite, ensure_all_finite):
"""Helper to deprecate force_all_finite in favor of ensure_all_finite."""
if force_all_finite != "deprecated":
warnings.warn(
"'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be "
"removed in 1.8.",
FutureWarning,
)
if ensure_all_finite is not None:
raise ValueError(
"'force_all_finite' and 'ensure_all_finite' cannot be used together. "
"Pass `ensure_all_finite` only."
)
return force_all_finite
if ensure_all_finite is None:
return True
return ensure_all_finite
|
Helper to deprecate force_all_finite in favor of ensure_all_finite.
|
_deprecate_force_all_finite
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/deprecation.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/deprecation.py
|
BSD-3-Clause
|
def all_estimators(type_filter=None):
"""Get a list of all estimators from `sklearn`.
This function crawls the module and gets all classes that inherit
from BaseEstimator. Classes that are defined in test-modules are not
included.
Parameters
----------
type_filter : {"classifier", "regressor", "cluster", "transformer"} \
or list of such str, default=None
Which kind of estimators should be returned. If None, no filter is
applied and all estimators are returned. Possible values are
'classifier', 'regressor', 'cluster' and 'transformer' to get
estimators only of these specific types, or a list of these to
get the estimators that fit at least one of the types.
Returns
-------
estimators : list of tuples
List of (name, class), where ``name`` is the class name as string
and ``class`` is the actual type of the class.
Examples
--------
>>> from sklearn.utils.discovery import all_estimators
>>> estimators = all_estimators()
>>> type(estimators)
<class 'list'>
>>> type(estimators[0])
<class 'tuple'>
>>> estimators[:2]
[('ARDRegression', <class 'sklearn.linear_model._bayes.ARDRegression'>),
('AdaBoostClassifier',
<class 'sklearn.ensemble._weight_boosting.AdaBoostClassifier'>)]
>>> classifiers = all_estimators(type_filter="classifier")
>>> classifiers[:2]
[('AdaBoostClassifier',
<class 'sklearn.ensemble._weight_boosting.AdaBoostClassifier'>),
('BaggingClassifier', <class 'sklearn.ensemble._bagging.BaggingClassifier'>)]
>>> regressors = all_estimators(type_filter="regressor")
>>> regressors[:2]
[('ARDRegression', <class 'sklearn.linear_model._bayes.ARDRegression'>),
('AdaBoostRegressor',
<class 'sklearn.ensemble._weight_boosting.AdaBoostRegressor'>)]
>>> both = all_estimators(type_filter=["classifier", "regressor"])
>>> both[:2]
[('ARDRegression', <class 'sklearn.linear_model._bayes.ARDRegression'>),
('AdaBoostClassifier',
<class 'sklearn.ensemble._weight_boosting.AdaBoostClassifier'>)]
"""
# lazy import to avoid circular imports from sklearn.base
from ..base import (
BaseEstimator,
ClassifierMixin,
ClusterMixin,
RegressorMixin,
TransformerMixin,
)
from ._testing import ignore_warnings
def is_abstract(c):
if not (hasattr(c, "__abstractmethods__")):
return False
if not len(c.__abstractmethods__):
return False
return True
all_classes = []
root = str(Path(__file__).parent.parent) # sklearn package
# Ignore deprecation warnings triggered at import time and from walking
# packages
with ignore_warnings(category=FutureWarning):
for _, module_name, _ in pkgutil.walk_packages(path=[root], prefix="sklearn."):
module_parts = module_name.split(".")
if (
any(part in _MODULE_TO_IGNORE for part in module_parts)
or "._" in module_name
):
continue
module = import_module(module_name)
classes = inspect.getmembers(module, inspect.isclass)
classes = [
(name, est_cls) for name, est_cls in classes if not name.startswith("_")
]
all_classes.extend(classes)
all_classes = set(all_classes)
estimators = [
c
for c in all_classes
if (issubclass(c[1], BaseEstimator) and c[0] != "BaseEstimator")
]
# get rid of abstract base classes
estimators = [c for c in estimators if not is_abstract(c[1])]
if type_filter is not None:
if not isinstance(type_filter, list):
type_filter = [type_filter]
else:
type_filter = list(type_filter) # copy
filtered_estimators = []
filters = {
"classifier": ClassifierMixin,
"regressor": RegressorMixin,
"transformer": TransformerMixin,
"cluster": ClusterMixin,
}
for name, mixin in filters.items():
if name in type_filter:
type_filter.remove(name)
filtered_estimators.extend(
[est for est in estimators if issubclass(est[1], mixin)]
)
estimators = filtered_estimators
if type_filter:
raise ValueError(
"Parameter type_filter must be 'classifier', "
"'regressor', 'transformer', 'cluster' or "
"None, got"
f" {type_filter!r}."
)
# drop duplicates, sort for reproducibility
# itemgetter is used to ensure the sort does not extend to the 2nd item of
# the tuple
return sorted(set(estimators), key=itemgetter(0))
|
Get a list of all estimators from `sklearn`.
This function crawls the module and gets all classes that inherit
from BaseEstimator. Classes that are defined in test-modules are not
included.
Parameters
----------
type_filter : {"classifier", "regressor", "cluster", "transformer"} or list of such str, default=None
Which kind of estimators should be returned. If None, no filter is
applied and all estimators are returned. Possible values are
'classifier', 'regressor', 'cluster' and 'transformer' to get
estimators only of these specific types, or a list of these to
get the estimators that fit at least one of the types.
Returns
-------
estimators : list of tuples
List of (name, class), where ``name`` is the class name as string
and ``class`` is the actual type of the class.
Examples
--------
>>> from sklearn.utils.discovery import all_estimators
>>> estimators = all_estimators()
>>> type(estimators)
<class 'list'>
>>> type(estimators[0])
<class 'tuple'>
>>> estimators[:2]
[('ARDRegression', <class 'sklearn.linear_model._bayes.ARDRegression'>),
('AdaBoostClassifier',
<class 'sklearn.ensemble._weight_boosting.AdaBoostClassifier'>)]
>>> classifiers = all_estimators(type_filter="classifier")
>>> classifiers[:2]
[('AdaBoostClassifier',
<class 'sklearn.ensemble._weight_boosting.AdaBoostClassifier'>),
('BaggingClassifier', <class 'sklearn.ensemble._bagging.BaggingClassifier'>)]
>>> regressors = all_estimators(type_filter="regressor")
>>> regressors[:2]
[('ARDRegression', <class 'sklearn.linear_model._bayes.ARDRegression'>),
('AdaBoostRegressor',
<class 'sklearn.ensemble._weight_boosting.AdaBoostRegressor'>)]
>>> both = all_estimators(type_filter=["classifier", "regressor"])
>>> both[:2]
[('ARDRegression', <class 'sklearn.linear_model._bayes.ARDRegression'>),
('AdaBoostClassifier',
<class 'sklearn.ensemble._weight_boosting.AdaBoostClassifier'>)]
|
all_estimators
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/discovery.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/discovery.py
|
BSD-3-Clause
|
def all_displays():
"""Get a list of all displays from `sklearn`.
Returns
-------
displays : list of tuples
List of (name, class), where ``name`` is the display class name as
string and ``class`` is the actual type of the class.
Examples
--------
>>> from sklearn.utils.discovery import all_displays
>>> displays = all_displays()
>>> displays[0]
('CalibrationDisplay', <class 'sklearn.calibration.CalibrationDisplay'>)
"""
# lazy import to avoid circular imports from sklearn.base
from ._testing import ignore_warnings
all_classes = []
root = str(Path(__file__).parent.parent) # sklearn package
# Ignore deprecation warnings triggered at import time and from walking
# packages
with ignore_warnings(category=FutureWarning):
for _, module_name, _ in pkgutil.walk_packages(path=[root], prefix="sklearn."):
module_parts = module_name.split(".")
if (
any(part in _MODULE_TO_IGNORE for part in module_parts)
or "._" in module_name
):
continue
module = import_module(module_name)
classes = inspect.getmembers(module, inspect.isclass)
classes = [
(name, display_class)
for name, display_class in classes
if not name.startswith("_") and name.endswith("Display")
]
all_classes.extend(classes)
return sorted(set(all_classes), key=itemgetter(0))
|
Get a list of all displays from `sklearn`.
Returns
-------
displays : list of tuples
List of (name, class), where ``name`` is the display class name as
string and ``class`` is the actual type of the class.
Examples
--------
>>> from sklearn.utils.discovery import all_displays
>>> displays = all_displays()
>>> displays[0]
('CalibrationDisplay', <class 'sklearn.calibration.CalibrationDisplay'>)
|
all_displays
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/discovery.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/discovery.py
|
BSD-3-Clause
|
def all_functions():
"""Get a list of all functions from `sklearn`.
Returns
-------
functions : list of tuples
List of (name, function), where ``name`` is the function name as
string and ``function`` is the actual function.
Examples
--------
>>> from sklearn.utils.discovery import all_functions
>>> functions = all_functions()
>>> name, function = functions[0]
>>> name
'accuracy_score'
"""
# lazy import to avoid circular imports from sklearn.base
from ._testing import ignore_warnings
all_functions = []
root = str(Path(__file__).parent.parent) # sklearn package
# Ignore deprecation warnings triggered at import time and from walking
# packages
with ignore_warnings(category=FutureWarning):
for _, module_name, _ in pkgutil.walk_packages(path=[root], prefix="sklearn."):
module_parts = module_name.split(".")
if (
any(part in _MODULE_TO_IGNORE for part in module_parts)
or "._" in module_name
):
continue
module = import_module(module_name)
functions = inspect.getmembers(module, _is_checked_function)
functions = [
(func.__name__, func)
for name, func in functions
if not name.startswith("_")
]
all_functions.extend(functions)
# drop duplicates, sort for reproducibility
# itemgetter is used to ensure the sort does not extend to the 2nd item of
# the tuple
return sorted(set(all_functions), key=itemgetter(0))
|
Get a list of all functions from `sklearn`.
Returns
-------
functions : list of tuples
List of (name, function), where ``name`` is the function name as
string and ``function`` is the actual function.
Examples
--------
>>> from sklearn.utils.discovery import all_functions
>>> functions = all_functions()
>>> name, function = functions[0]
>>> name
'accuracy_score'
|
all_functions
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/discovery.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/discovery.py
|
BSD-3-Clause
|
def _maybe_mark(
estimator,
check,
expected_failed_checks: dict[str, str] | None = None,
mark: Literal["xfail", "skip", None] = None,
pytest=None,
):
"""Mark the test as xfail or skip if needed.
Parameters
----------
estimator : estimator object
Estimator instance for which to generate checks.
check : partial or callable
Check to be marked.
expected_failed_checks : dict[str, str], default=None
Dictionary of the form {check_name: reason} for checks that are expected to
fail.
mark : "xfail" or "skip" or None
Whether to mark the check as xfail or skip.
pytest : pytest module, default=None
Pytest module to use to mark the check. This is only needed if ``mark`` is
`"xfail"`. Note that one can run `check_estimator` without having `pytest`
installed. This is used in combination with `parametrize_with_checks` only.
"""
should_be_marked, reason = _should_be_skipped_or_marked(
estimator, check, expected_failed_checks
)
if not should_be_marked or mark is None:
return estimator, check
estimator_name = estimator.__class__.__name__
if mark == "xfail":
return pytest.param(estimator, check, marks=pytest.mark.xfail(reason=reason))
else:
@wraps(check)
def wrapped(*args, **kwargs):
raise SkipTest(
f"Skipping {_check_name(check)} for {estimator_name}: {reason}"
)
return estimator, wrapped
|
Mark the test as xfail or skip if needed.
Parameters
----------
estimator : estimator object
Estimator instance for which to generate checks.
check : partial or callable
Check to be marked.
expected_failed_checks : dict[str, str], default=None
Dictionary of the form {check_name: reason} for checks that are expected to
fail.
mark : "xfail" or "skip" or None
Whether to mark the check as xfail or skip.
pytest : pytest module, default=None
Pytest module to use to mark the check. This is only needed if ``mark`` is
`"xfail"`. Note that one can run `check_estimator` without having `pytest`
installed. This is used in combination with `parametrize_with_checks` only.
|
_maybe_mark
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/estimator_checks.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/estimator_checks.py
|
BSD-3-Clause
|
def _should_be_skipped_or_marked(
estimator, check, expected_failed_checks: dict[str, str] | None = None
) -> tuple[bool, str]:
"""Check whether a check should be skipped or marked as xfail.
Parameters
----------
estimator : estimator object
Estimator instance for which to generate checks.
check : partial or callable
Check to be marked.
expected_failed_checks : dict[str, str], default=None
Dictionary of the form {check_name: reason} for checks that are expected to
fail.
Returns
-------
should_be_marked : bool
Whether the check should be marked as xfail or skipped.
reason : str
Reason for skipping the check.
"""
expected_failed_checks = expected_failed_checks or {}
check_name = _check_name(check)
if check_name in expected_failed_checks:
return True, expected_failed_checks[check_name]
return False, "Check is not expected to fail"
|
Check whether a check should be skipped or marked as xfail.
Parameters
----------
estimator : estimator object
Estimator instance for which to generate checks.
check : partial or callable
Check to be marked.
expected_failed_checks : dict[str, str], default=None
Dictionary of the form {check_name: reason} for checks that are expected to
fail.
Returns
-------
should_be_marked : bool
Whether the check should be marked as xfail or skipped.
reason : str
Reason for skipping the check.
|
_should_be_skipped_or_marked
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/estimator_checks.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/estimator_checks.py
|
BSD-3-Clause
|
def estimator_checks_generator(
estimator,
*,
legacy: bool = True,
expected_failed_checks: dict[str, str] | None = None,
mark: Literal["xfail", "skip", None] = None,
):
"""Iteratively yield all check callables for an estimator.
.. versionadded:: 1.6
Parameters
----------
estimator : estimator object
Estimator instance for which to generate checks.
legacy : bool, default=True
Whether to include legacy checks. Over time we remove checks from this category
and move them into their specific category.
expected_failed_checks : dict[str, str], default=None
Dictionary of the form {check_name: reason} for checks that are expected to
fail.
mark : {"xfail", "skip"} or None, default=None
Whether to mark the checks that are expected to fail as
xfail(`pytest.mark.xfail`) or skip. Marking a test as "skip" is done via
wrapping the check in a function that raises a
:class:`~sklearn.exceptions.SkipTest` exception.
Returns
-------
estimator_checks_generator : generator
Generator that yields (estimator, check) tuples.
"""
if mark == "xfail":
import pytest
else:
pytest = None # type: ignore[assignment]
name = type(estimator).__name__
# First check that the estimator is cloneable which is needed for the rest
# of the checks to run
yield estimator, partial(check_estimator_cloneable, name)
for check in _yield_all_checks(estimator, legacy=legacy):
check_with_name = partial(check, name)
for check_instance in _yield_instances_for_check(check, estimator):
yield _maybe_mark(
check_instance,
check_with_name,
expected_failed_checks=expected_failed_checks,
mark=mark,
pytest=pytest,
)
|
Iteratively yield all check callables for an estimator.
.. versionadded:: 1.6
Parameters
----------
estimator : estimator object
Estimator instance for which to generate checks.
legacy : bool, default=True
Whether to include legacy checks. Over time we remove checks from this category
and move them into their specific category.
expected_failed_checks : dict[str, str], default=None
Dictionary of the form {check_name: reason} for checks that are expected to
fail.
mark : {"xfail", "skip"} or None, default=None
Whether to mark the checks that are expected to fail as
xfail(`pytest.mark.xfail`) or skip. Marking a test as "skip" is done via
wrapping the check in a function that raises a
:class:`~sklearn.exceptions.SkipTest` exception.
Returns
-------
estimator_checks_generator : generator
Generator that yields (estimator, check) tuples.
|
estimator_checks_generator
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/estimator_checks.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/estimator_checks.py
|
BSD-3-Clause
|
def parametrize_with_checks(
estimators,
*,
legacy: bool = True,
expected_failed_checks: Callable | None = None,
):
"""Pytest specific decorator for parametrizing estimator checks.
Checks are categorised into the following groups:
- API checks: a set of checks to ensure API compatibility with scikit-learn.
Refer to https://scikit-learn.org/dev/developers/develop.html a requirement of
scikit-learn estimators.
- legacy: a set of checks which gradually will be grouped into other categories.
The `id` of each check is set to be a pprint version of the estimator
and the name of the check with its keyword arguments.
This allows to use `pytest -k` to specify which tests to run::
pytest test_check_estimators.py -k check_estimators_fit_returns_self
Parameters
----------
estimators : list of estimators instances
Estimators to generated checks for.
.. versionchanged:: 0.24
Passing a class was deprecated in version 0.23, and support for
classes was removed in 0.24. Pass an instance instead.
.. versionadded:: 0.24
legacy : bool, default=True
Whether to include legacy checks. Over time we remove checks from this category
and move them into their specific category.
.. versionadded:: 1.6
expected_failed_checks : callable, default=None
A callable that takes an estimator as input and returns a dictionary of the
form::
{
"check_name": "my reason",
}
Where `"check_name"` is the name of the check, and `"my reason"` is why
the check fails. These tests will be marked as xfail if the check fails.
.. versionadded:: 1.6
Returns
-------
decorator : `pytest.mark.parametrize`
See Also
--------
check_estimator : Check if estimator adheres to scikit-learn conventions.
Examples
--------
>>> from sklearn.utils.estimator_checks import parametrize_with_checks
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.tree import DecisionTreeRegressor
>>> @parametrize_with_checks([LogisticRegression(),
... DecisionTreeRegressor()])
... def test_sklearn_compatible_estimator(estimator, check):
... check(estimator)
"""
import pytest
if any(isinstance(est, type) for est in estimators):
msg = (
"Passing a class was deprecated in version 0.23 "
"and isn't supported anymore from 0.24."
"Please pass an instance instead."
)
raise TypeError(msg)
def _checks_generator(estimators, legacy, expected_failed_checks):
for estimator in estimators:
args = {"estimator": estimator, "legacy": legacy, "mark": "xfail"}
if callable(expected_failed_checks):
args["expected_failed_checks"] = expected_failed_checks(estimator)
yield from estimator_checks_generator(**args)
return pytest.mark.parametrize(
"estimator, check",
_checks_generator(estimators, legacy, expected_failed_checks),
ids=_get_check_estimator_ids,
)
|
Pytest specific decorator for parametrizing estimator checks.
Checks are categorised into the following groups:
- API checks: a set of checks to ensure API compatibility with scikit-learn.
Refer to https://scikit-learn.org/dev/developers/develop.html a requirement of
scikit-learn estimators.
- legacy: a set of checks which gradually will be grouped into other categories.
The `id` of each check is set to be a pprint version of the estimator
and the name of the check with its keyword arguments.
This allows to use `pytest -k` to specify which tests to run::
pytest test_check_estimators.py -k check_estimators_fit_returns_self
Parameters
----------
estimators : list of estimators instances
Estimators to generated checks for.
.. versionchanged:: 0.24
Passing a class was deprecated in version 0.23, and support for
classes was removed in 0.24. Pass an instance instead.
.. versionadded:: 0.24
legacy : bool, default=True
Whether to include legacy checks. Over time we remove checks from this category
and move them into their specific category.
.. versionadded:: 1.6
expected_failed_checks : callable, default=None
A callable that takes an estimator as input and returns a dictionary of the
form::
{
"check_name": "my reason",
}
Where `"check_name"` is the name of the check, and `"my reason"` is why
the check fails. These tests will be marked as xfail if the check fails.
.. versionadded:: 1.6
Returns
-------
decorator : `pytest.mark.parametrize`
See Also
--------
check_estimator : Check if estimator adheres to scikit-learn conventions.
Examples
--------
>>> from sklearn.utils.estimator_checks import parametrize_with_checks
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.tree import DecisionTreeRegressor
>>> @parametrize_with_checks([LogisticRegression(),
... DecisionTreeRegressor()])
... def test_sklearn_compatible_estimator(estimator, check):
... check(estimator)
|
parametrize_with_checks
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/estimator_checks.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/estimator_checks.py
|
BSD-3-Clause
|
def check_estimator(
estimator=None,
generate_only=False,
*,
legacy: bool = True,
expected_failed_checks: dict[str, str] | None = None,
on_skip: Literal["warn"] | None = "warn",
on_fail: Literal["raise", "warn"] | None = "raise",
callback: Callable | None = None,
):
"""Check if estimator adheres to scikit-learn conventions.
This function will run an extensive test-suite for input validation,
shapes, etc, making sure that the estimator complies with `scikit-learn`
conventions as detailed in :ref:`rolling_your_own_estimator`.
Additional tests for classifiers, regressors, clustering or transformers
will be run if the Estimator class inherits from the corresponding mixin
from sklearn.base.
scikit-learn also provides a pytest specific decorator,
:func:`~sklearn.utils.estimator_checks.parametrize_with_checks`, making it
easier to test multiple estimators.
Checks are categorised into the following groups:
- API checks: a set of checks to ensure API compatibility with scikit-learn.
Refer to https://scikit-learn.org/dev/developers/develop.html a requirement of
scikit-learn estimators.
- legacy: a set of checks which gradually will be grouped into other categories.
Parameters
----------
estimator : estimator object
Estimator instance to check.
generate_only : bool, default=False
When `False`, checks are evaluated when `check_estimator` is called.
When `True`, `check_estimator` returns a generator that yields
(estimator, check) tuples. The check is run by calling
`check(estimator)`.
.. versionadded:: 0.22
.. deprecated:: 1.6
`generate_only` will be removed in 1.8. Use
:func:`~sklearn.utils.estimator_checks.estimator_checks_generator` instead.
legacy : bool, default=True
Whether to include legacy checks. Over time we remove checks from this category
and move them into their specific category.
.. versionadded:: 1.6
expected_failed_checks : dict, default=None
A dictionary of the form::
{
"check_name": "this check is expected to fail because ...",
}
Where `"check_name"` is the name of the check, and `"my reason"` is why
the check fails.
.. versionadded:: 1.6
on_skip : "warn", None, default="warn"
This parameter controls what happens when a check is skipped.
- "warn": A :class:`~sklearn.exceptions.SkipTestWarning` is logged
and running tests continue.
- None: No warning is logged and running tests continue.
.. versionadded:: 1.6
on_fail : {"raise", "warn"}, None, default="raise"
This parameter controls what happens when a check fails.
- "raise": The exception raised by the first failing check is raised and
running tests are aborted. This does not included tests that are expected
to fail.
- "warn": A :class:`~sklearn.exceptions.EstimatorCheckFailedWarning` is logged
and running tests continue.
- None: No exception is raised and no warning is logged.
Note that if ``on_fail != "raise"``, no exception is raised, even if the checks
fail. You'd need to inspect the return result of ``check_estimator`` to check
if any checks failed.
.. versionadded:: 1.6
callback : callable, or None, default=None
This callback will be called with the estimator and the check name,
the exception (if any), the status of the check (xfail, failed, skipped,
passed), and the reason for the expected failure if the check is
expected to fail. The callable's signature needs to be::
def callback(
estimator,
check_name: str,
exception: Exception,
status: Literal["xfail", "failed", "skipped", "passed"],
expected_to_fail: bool,
expected_to_fail_reason: str,
)
``callback`` cannot be provided together with ``on_fail="raise"``.
.. versionadded:: 1.6
Returns
-------
test_results : list
List of dictionaries with the results of the failing tests, of the form::
{
"estimator": estimator,
"check_name": check_name,
"exception": exception,
"status": status (one of "xfail", "failed", "skipped", "passed"),
"expected_to_fail": expected_to_fail,
"expected_to_fail_reason": expected_to_fail_reason,
}
estimator_checks_generator : generator
Generator that yields (estimator, check) tuples. Returned when
`generate_only=True`.
..
TODO(1.8): remove return value
.. deprecated:: 1.6
``generate_only`` will be removed in 1.8. Use
:func:`~sklearn.utils.estimator_checks.estimator_checks_generator` instead.
Raises
------
Exception
If ``on_fail="raise"``, the exception raised by the first failing check is
raised and running tests are aborted.
Note that if ``on_fail != "raise"``, no exception is raised, even if the checks
fail. You'd need to inspect the return result of ``check_estimator`` to check
if any checks failed.
See Also
--------
parametrize_with_checks : Pytest specific decorator for parametrizing estimator
checks.
estimator_checks_generator : Generator that yields (estimator, check) tuples.
Examples
--------
>>> from sklearn.utils.estimator_checks import check_estimator
>>> from sklearn.linear_model import LogisticRegression
>>> check_estimator(LogisticRegression())
[...]
"""
if isinstance(estimator, type):
msg = (
"Passing a class was deprecated in version 0.23 "
"and isn't supported anymore from 0.24."
"Please pass an instance instead."
)
raise TypeError(msg)
if on_fail == "raise" and callback is not None:
raise ValueError("callback cannot be provided together with on_fail='raise'")
name = type(estimator).__name__
# TODO(1.8): remove generate_only
if generate_only:
warnings.warn(
"`generate_only` is deprecated in 1.6 and will be removed in 1.8. "
"Use :func:`~sklearn.utils.estimator_checks.estimator_checks_generator` "
"instead.",
FutureWarning,
)
return estimator_checks_generator(
estimator, legacy=legacy, expected_failed_checks=None, mark="skip"
)
test_results = []
for estimator, check in estimator_checks_generator(
estimator,
legacy=legacy,
expected_failed_checks=expected_failed_checks,
# Not marking tests to be skipped here, we run and simulate an xfail behavior
mark=None,
):
test_can_fail, reason = _should_be_skipped_or_marked(
estimator, check, expected_failed_checks
)
try:
check(estimator)
except SkipTest as e:
# We get here if the test raises SkipTest, which is expected in cases where
# the check cannot run for instance if a required dependency is not
# installed.
check_result = {
"estimator": estimator,
"check_name": _check_name(check),
"exception": e,
"status": "skipped",
"expected_to_fail": test_can_fail,
"expected_to_fail_reason": reason,
}
if on_skip == "warn":
warnings.warn(
f"Skipping check {_check_name(check)} for {name} because it raised "
f"{type(e).__name__}: {e}",
SkipTestWarning,
)
except Exception as e:
if on_fail == "raise" and not test_can_fail:
raise
check_result = {
"estimator": estimator,
"check_name": _check_name(check),
"exception": e,
"expected_to_fail": test_can_fail,
"expected_to_fail_reason": reason,
}
if test_can_fail:
# This check failed, but could be expected to fail, therefore we mark it
# as xfail.
check_result["status"] = "xfail"
else:
check_result["status"] = "failed"
if on_fail == "warn":
warning = EstimatorCheckFailedWarning(**check_result)
warnings.warn(warning)
else:
check_result = {
"estimator": estimator,
"check_name": _check_name(check),
"exception": None,
"status": "passed",
"expected_to_fail": test_can_fail,
"expected_to_fail_reason": reason,
}
test_results.append(check_result)
if callback:
callback(**check_result)
return test_results
|
Check if estimator adheres to scikit-learn conventions.
This function will run an extensive test-suite for input validation,
shapes, etc, making sure that the estimator complies with `scikit-learn`
conventions as detailed in :ref:`rolling_your_own_estimator`.
Additional tests for classifiers, regressors, clustering or transformers
will be run if the Estimator class inherits from the corresponding mixin
from sklearn.base.
scikit-learn also provides a pytest specific decorator,
:func:`~sklearn.utils.estimator_checks.parametrize_with_checks`, making it
easier to test multiple estimators.
Checks are categorised into the following groups:
- API checks: a set of checks to ensure API compatibility with scikit-learn.
Refer to https://scikit-learn.org/dev/developers/develop.html a requirement of
scikit-learn estimators.
- legacy: a set of checks which gradually will be grouped into other categories.
Parameters
----------
estimator : estimator object
Estimator instance to check.
generate_only : bool, default=False
When `False`, checks are evaluated when `check_estimator` is called.
When `True`, `check_estimator` returns a generator that yields
(estimator, check) tuples. The check is run by calling
`check(estimator)`.
.. versionadded:: 0.22
.. deprecated:: 1.6
`generate_only` will be removed in 1.8. Use
:func:`~sklearn.utils.estimator_checks.estimator_checks_generator` instead.
legacy : bool, default=True
Whether to include legacy checks. Over time we remove checks from this category
and move them into their specific category.
.. versionadded:: 1.6
expected_failed_checks : dict, default=None
A dictionary of the form::
{
"check_name": "this check is expected to fail because ...",
}
Where `"check_name"` is the name of the check, and `"my reason"` is why
the check fails.
.. versionadded:: 1.6
on_skip : "warn", None, default="warn"
This parameter controls what happens when a check is skipped.
- "warn": A :class:`~sklearn.exceptions.SkipTestWarning` is logged
and running tests continue.
- None: No warning is logged and running tests continue.
.. versionadded:: 1.6
on_fail : {"raise", "warn"}, None, default="raise"
This parameter controls what happens when a check fails.
- "raise": The exception raised by the first failing check is raised and
running tests are aborted. This does not included tests that are expected
to fail.
- "warn": A :class:`~sklearn.exceptions.EstimatorCheckFailedWarning` is logged
and running tests continue.
- None: No exception is raised and no warning is logged.
Note that if ``on_fail != "raise"``, no exception is raised, even if the checks
fail. You'd need to inspect the return result of ``check_estimator`` to check
if any checks failed.
.. versionadded:: 1.6
callback : callable, or None, default=None
This callback will be called with the estimator and the check name,
the exception (if any), the status of the check (xfail, failed, skipped,
passed), and the reason for the expected failure if the check is
expected to fail. The callable's signature needs to be::
def callback(
estimator,
check_name: str,
exception: Exception,
status: Literal["xfail", "failed", "skipped", "passed"],
expected_to_fail: bool,
expected_to_fail_reason: str,
)
``callback`` cannot be provided together with ``on_fail="raise"``.
.. versionadded:: 1.6
Returns
-------
test_results : list
List of dictionaries with the results of the failing tests, of the form::
{
"estimator": estimator,
"check_name": check_name,
"exception": exception,
"status": status (one of "xfail", "failed", "skipped", "passed"),
"expected_to_fail": expected_to_fail,
"expected_to_fail_reason": expected_to_fail_reason,
}
estimator_checks_generator : generator
Generator that yields (estimator, check) tuples. Returned when
`generate_only=True`.
..
TODO(1.8): remove return value
.. deprecated:: 1.6
``generate_only`` will be removed in 1.8. Use
:func:`~sklearn.utils.estimator_checks.estimator_checks_generator` instead.
Raises
------
Exception
If ``on_fail="raise"``, the exception raised by the first failing check is
raised and running tests are aborted.
Note that if ``on_fail != "raise"``, no exception is raised, even if the checks
fail. You'd need to inspect the return result of ``check_estimator`` to check
if any checks failed.
See Also
--------
parametrize_with_checks : Pytest specific decorator for parametrizing estimator
checks.
estimator_checks_generator : Generator that yields (estimator, check) tuples.
Examples
--------
>>> from sklearn.utils.estimator_checks import check_estimator
>>> from sklearn.linear_model import LogisticRegression
>>> check_estimator(LogisticRegression())
[...]
|
check_estimator
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/estimator_checks.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/estimator_checks.py
|
BSD-3-Clause
|
def _is_pairwise_metric(estimator):
"""Returns True if estimator accepts pairwise metric.
Parameters
----------
estimator : object
Estimator object to test.
Returns
-------
out : bool
True if _pairwise is set to True and False otherwise.
"""
metric = getattr(estimator, "metric", None)
return bool(metric == "precomputed")
|
Returns True if estimator accepts pairwise metric.
Parameters
----------
estimator : object
Estimator object to test.
Returns
-------
out : bool
True if _pairwise is set to True and False otherwise.
|
_is_pairwise_metric
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/estimator_checks.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/estimator_checks.py
|
BSD-3-Clause
|
def _generate_sparse_data(X_csr):
"""Generate sparse matrices or arrays with {32,64}bit indices of diverse format.
Parameters
----------
X_csr: scipy.sparse.csr_matrix or scipy.sparse.csr_array
Input in CSR format.
Returns
-------
out: iter(Matrices) or iter(Arrays)
In format['dok', 'lil', 'dia', 'bsr', 'csr', 'csc', 'coo',
'coo_64', 'csc_64', 'csr_64']
"""
assert X_csr.format == "csr"
yield "csr", X_csr.copy()
for sparse_format in ["dok", "lil", "dia", "bsr", "csc", "coo"]:
yield sparse_format, X_csr.asformat(sparse_format)
# Generate large indices matrix only if its supported by scipy
X_coo = X_csr.asformat("coo")
X_coo.row = X_coo.row.astype("int64")
X_coo.col = X_coo.col.astype("int64")
yield "coo_64", X_coo
for sparse_format in ["csc", "csr"]:
X = X_csr.asformat(sparse_format)
X.indices = X.indices.astype("int64")
X.indptr = X.indptr.astype("int64")
yield sparse_format + "_64", X
|
Generate sparse matrices or arrays with {32,64}bit indices of diverse format.
Parameters
----------
X_csr: scipy.sparse.csr_matrix or scipy.sparse.csr_array
Input in CSR format.
Returns
-------
out: iter(Matrices) or iter(Arrays)
In format['dok', 'lil', 'dia', 'bsr', 'csr', 'csc', 'coo',
'coo_64', 'csc_64', 'csr_64']
|
_generate_sparse_data
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/estimator_checks.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/estimator_checks.py
|
BSD-3-Clause
|
def check_array_api_input(
name,
estimator_orig,
array_namespace,
device=None,
dtype_name="float64",
check_values=False,
):
"""Check that the estimator can work consistently with the Array API
By default, this just checks that the types and shapes of the arrays are
consistent with calling the same estimator with numpy arrays.
When check_values is True, it also checks that calling the estimator on the
array_api Array gives the same results as ndarrays.
"""
xp = _array_api_for_tests(array_namespace, device)
X, y = make_classification(random_state=42)
X = X.astype(dtype_name, copy=False)
X = _enforce_estimator_tags_X(estimator_orig, X)
y = _enforce_estimator_tags_y(estimator_orig, y)
est = clone(estimator_orig)
X_xp = xp.asarray(X, device=device)
y_xp = xp.asarray(y, device=device)
est.fit(X, y)
array_attributes = {
key: value for key, value in vars(est).items() if isinstance(value, np.ndarray)
}
est_xp = clone(est)
with config_context(array_api_dispatch=True):
est_xp.fit(X_xp, y_xp)
input_ns = get_namespace(X_xp)[0].__name__
# Fitted attributes which are arrays must have the same
# namespace as the one of the training data.
for key, attribute in array_attributes.items():
est_xp_param = getattr(est_xp, key)
with config_context(array_api_dispatch=True):
attribute_ns = get_namespace(est_xp_param)[0].__name__
assert attribute_ns == input_ns, (
f"'{key}' attribute is in wrong namespace, expected {input_ns} "
f"got {attribute_ns}"
)
assert array_device(est_xp_param) == array_device(X_xp)
est_xp_param_np = _convert_to_numpy(est_xp_param, xp=xp)
if check_values:
assert_allclose(
attribute,
est_xp_param_np,
err_msg=f"{key} not the same",
atol=_atol_for_type(X.dtype),
)
else:
assert attribute.shape == est_xp_param_np.shape
assert attribute.dtype == est_xp_param_np.dtype
# Check estimator methods, if supported, give the same results
methods = (
"score",
"score_samples",
"decision_function",
"predict",
"predict_log_proba",
"predict_proba",
"transform",
)
try:
np.asarray(X_xp)
np.asarray(y_xp)
# TODO There are a few errors in SearchCV with array-api-strict because
# we end up doing X[train_indices] where X is an array-api-strict array
# and train_indices is a numpy array. array-api-strict insists
# train_indices should be an array-api-strict array. On the other hand,
# all the array API libraries (PyTorch, jax, CuPy) accept indexing with a
# numpy array. This is probably not worth doing anything about for
# now since array-api-strict seems a bit too strict ...
numpy_asarray_works = xp.__name__ != "array_api_strict"
except (TypeError, RuntimeError):
# PyTorch with CUDA device and CuPy raise TypeError consistently.
# array-api-strict chose to raise RuntimeError instead. Exception type
# may need to be updated in the future for other libraries.
numpy_asarray_works = False
if numpy_asarray_works:
# In this case, array_api_dispatch is disabled and we rely on np.asarray
# being called to convert the non-NumPy inputs to NumPy arrays when needed.
est_fitted_with_as_array = clone(est).fit(X_xp, y_xp)
# We only do a smoke test for now, in order to avoid complicating the
# test function even further.
for method_name in methods:
method = getattr(est_fitted_with_as_array, method_name, None)
if method is None:
continue
if method_name == "score":
method(X_xp, y_xp)
else:
method(X_xp)
for method_name in methods:
method = getattr(est, method_name, None)
if method is None:
continue
if method_name == "score":
result = method(X, y)
with config_context(array_api_dispatch=True):
result_xp = getattr(est_xp, method_name)(X_xp, y_xp)
# score typically returns a Python float
assert isinstance(result, float)
assert isinstance(result_xp, float)
if check_values:
assert abs(result - result_xp) < _atol_for_type(X.dtype)
continue
else:
result = method(X)
with config_context(array_api_dispatch=True):
result_xp = getattr(est_xp, method_name)(X_xp)
with config_context(array_api_dispatch=True):
result_ns = get_namespace(result_xp)[0].__name__
assert result_ns == input_ns, (
f"'{method}' output is in wrong namespace, expected {input_ns}, "
f"got {result_ns}."
)
assert array_device(result_xp) == array_device(X_xp)
result_xp_np = _convert_to_numpy(result_xp, xp=xp)
if check_values:
assert_allclose(
result,
result_xp_np,
err_msg=f"{method} did not the return the same result",
atol=_atol_for_type(X.dtype),
)
else:
if hasattr(result, "shape"):
assert result.shape == result_xp_np.shape
assert result.dtype == result_xp_np.dtype
if method_name == "transform" and hasattr(est, "inverse_transform"):
inverse_result = est.inverse_transform(result)
with config_context(array_api_dispatch=True):
invese_result_xp = est_xp.inverse_transform(result_xp)
inverse_result_ns = get_namespace(invese_result_xp)[0].__name__
assert inverse_result_ns == input_ns, (
"'inverse_transform' output is in wrong namespace, expected"
f" {input_ns}, got {inverse_result_ns}."
)
assert array_device(invese_result_xp) == array_device(X_xp)
invese_result_xp_np = _convert_to_numpy(invese_result_xp, xp=xp)
if check_values:
assert_allclose(
inverse_result,
invese_result_xp_np,
err_msg="inverse_transform did not the return the same result",
atol=_atol_for_type(X.dtype),
)
else:
assert inverse_result.shape == invese_result_xp_np.shape
assert inverse_result.dtype == invese_result_xp_np.dtype
|
Check that the estimator can work consistently with the Array API
By default, this just checks that the types and shapes of the arrays are
consistent with calling the same estimator with numpy arrays.
When check_values is True, it also checks that calling the estimator on the
array_api Array gives the same results as ndarrays.
|
check_array_api_input
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/estimator_checks.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/estimator_checks.py
|
BSD-3-Clause
|
def check_estimator_sparse_tag(name, estimator_orig):
"""Check that estimator tag related with accepting sparse data is properly set."""
estimator = clone(estimator_orig)
rng = np.random.RandomState(0)
n_samples = 15 if name == "SpectralCoclustering" else 40
X = rng.uniform(size=(n_samples, 3))
X[X < 0.6] = 0
y = rng.randint(0, 3, size=n_samples)
X = _enforce_estimator_tags_X(estimator, X)
y = _enforce_estimator_tags_y(estimator, y)
X = sparse.csr_array(X)
tags = get_tags(estimator)
if tags.input_tags.sparse:
try:
estimator.fit(X, y) # should pass
except Exception as e:
err_msg = (
f"Estimator {name} raised an exception. "
f"The tag self.input_tags.sparse={tags.input_tags.sparse} "
"might not be consistent with the estimator's ability to "
"handle sparse data (i.e. controlled by the parameter `accept_sparse`"
" in `validate_data` or `check_array` functions)."
)
raise AssertionError(err_msg) from e
else:
err_msg = (
f"Estimator {name} raised an exception. "
"The estimator failed when fitted on sparse data in accordance "
f"with its tag self.input_tags.sparse={tags.input_tags.sparse} "
"but didn't raise the appropriate error: error message should "
"state explicitly that sparse input is not supported if this is "
"not the case, e.g. by using check_array(X, accept_sparse=False)."
)
try:
estimator.fit(X, y) # should fail with appropriate error
except (ValueError, TypeError) as e:
if re.search("[Ss]parse", str(e)):
# Got the right error type and mentioning sparse issue
return
raise AssertionError(err_msg) from e
except Exception as e:
raise AssertionError(err_msg) from e
raise AssertionError(
f"Estimator {name} didn't fail when fitted on sparse data "
"but should have according to its tag "
f"self.input_tags.sparse={tags.input_tags.sparse}. "
f"The tag is inconsistent and must be fixed."
)
|
Check that estimator tag related with accepting sparse data is properly set.
|
check_estimator_sparse_tag
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/estimator_checks.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/estimator_checks.py
|
BSD-3-Clause
|
def check_transformers_unfitted_stateless(name, transformer):
"""Check that using transform without prior fitting
doesn't raise a NotFittedError for stateless transformers.
"""
rng = np.random.RandomState(0)
X = rng.uniform(size=(20, 5))
X = _enforce_estimator_tags_X(transformer, X)
transformer = clone(transformer)
X_trans = transformer.transform(X)
assert X_trans.shape[0] == X.shape[0]
|
Check that using transform without prior fitting
doesn't raise a NotFittedError for stateless transformers.
|
check_transformers_unfitted_stateless
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/estimator_checks.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/estimator_checks.py
|
BSD-3-Clause
|
def check_mixin_order(name, estimator_orig):
"""Check that mixins are inherited in the correct order."""
# We define a list of edges, which in effect define a DAG of mixins and their
# required order of inheritance.
# This is of the form (mixin_a_should_be_before, mixin_b_should_be_after)
dag = [
(ClassifierMixin, BaseEstimator),
(RegressorMixin, BaseEstimator),
(ClusterMixin, BaseEstimator),
(TransformerMixin, BaseEstimator),
(BiclusterMixin, BaseEstimator),
(OneToOneFeatureMixin, BaseEstimator),
(ClassNamePrefixFeaturesOutMixin, BaseEstimator),
(DensityMixin, BaseEstimator),
(OutlierMixin, BaseEstimator),
(MetaEstimatorMixin, BaseEstimator),
(MultiOutputMixin, BaseEstimator),
]
violations = []
mro = type(estimator_orig).mro()
for mixin_a, mixin_b in dag:
if (
mixin_a in mro
and mixin_b in mro
and mro.index(mixin_a) > mro.index(mixin_b)
):
violations.append((mixin_a, mixin_b))
violation_str = "\n".join(
f"{mixin_a.__name__} comes before/left side of {mixin_b.__name__}"
for mixin_a, mixin_b in violations
)
assert not violations, (
f"{name} is inheriting from mixins in the wrong order. In general, in mixin "
"inheritance, more specialized mixins must come before more general ones. "
"This means, for instance, `BaseEstimator` should be on the right side of most "
"other mixins. You need to change the order so that:\n"
f"{violation_str}"
)
|
Check that mixins are inherited in the correct order.
|
check_mixin_order
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/estimator_checks.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/estimator_checks.py
|
BSD-3-Clause
|
def check_nonsquare_error(name, estimator_orig):
"""Test that error is thrown when non-square data provided."""
X, y = make_blobs(n_samples=20, n_features=10)
estimator = clone(estimator_orig)
with raises(
ValueError,
err_msg=(
f"The pairwise estimator {name} does not raise an error on non-square data"
),
):
estimator.fit(X, y)
|
Test that error is thrown when non-square data provided.
|
check_nonsquare_error
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/estimator_checks.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/estimator_checks.py
|
BSD-3-Clause
|
def check_estimators_pickle(name, estimator_orig, readonly_memmap=False):
"""Test that we can pickle all estimators."""
check_methods = ["predict", "transform", "decision_function", "predict_proba"]
X, y = make_blobs(
n_samples=30,
centers=[[0, 0, 0], [1, 1, 1]],
random_state=0,
n_features=2,
cluster_std=0.1,
)
X = _enforce_estimator_tags_X(estimator_orig, X, kernel=rbf_kernel)
tags = get_tags(estimator_orig)
# include NaN values when the estimator should deal with them
if tags.input_tags.allow_nan:
# set randomly 10 elements to np.nan
rng = np.random.RandomState(42)
mask = rng.choice(X.size, 10, replace=False)
X.reshape(-1)[mask] = np.nan
estimator = clone(estimator_orig)
y = _enforce_estimator_tags_y(estimator, y)
set_random_state(estimator)
estimator.fit(X, y)
if readonly_memmap:
unpickled_estimator = create_memmap_backed_data(estimator)
else:
# No need to touch the file system in that case.
pickled_estimator = pickle.dumps(estimator)
module_name = estimator.__module__
if module_name.startswith("sklearn.") and not (
"test_" in module_name or module_name.endswith("_testing")
):
# strict check for sklearn estimators that are not implemented in test
# modules.
assert b"_sklearn_version" in pickled_estimator
unpickled_estimator = pickle.loads(pickled_estimator)
result = dict()
for method in check_methods:
if hasattr(estimator, method):
result[method] = getattr(estimator, method)(X)
for method in result:
unpickled_result = getattr(unpickled_estimator, method)(X)
assert_allclose_dense_sparse(result[method], unpickled_result)
|
Test that we can pickle all estimators.
|
check_estimators_pickle
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/estimator_checks.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/estimator_checks.py
|
BSD-3-Clause
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.