code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
---|---|---|---|---|---|---|---|
def test_ohe_infrequent_two_levels_user_cats_one_frequent(kwargs):
"""'a' is the only frequent category, all other categories are infrequent."""
X_train = np.array([["a"] * 5 + ["e"] * 30], dtype=object).T
ohe = OneHotEncoder(
categories=[["c", "d", "a", "b"]],
sparse_output=False,
handle_unknown="infrequent_if_exist",
**kwargs,
).fit(X_train)
X_test = [["a"], ["b"], ["c"], ["d"], ["e"]]
expected = np.array([[1, 0], [0, 1], [0, 1], [0, 1], [0, 1]])
X_trans = ohe.transform(X_test)
assert_allclose(expected, X_trans)
# 'a' is dropped
drops = ["first", "if_binary", ["a"]]
X_test = [["a"], ["c"]]
for drop in drops:
ohe.set_params(drop=drop).fit(X_train)
assert_allclose([[0], [1]], ohe.transform(X_test))
|
'a' is the only frequent category, all other categories are infrequent.
|
test_ohe_infrequent_two_levels_user_cats_one_frequent
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_encoders.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_encoders.py
|
BSD-3-Clause
|
def test_ohe_infrequent_two_levels_user_cats():
"""Test that the order of the categories provided by a user is respected."""
X_train = np.array(
[["a"] * 5 + ["b"] * 20 + ["c"] * 10 + ["d"] * 3], dtype=object
).T
ohe = OneHotEncoder(
categories=[["c", "d", "a", "b"]],
sparse_output=False,
handle_unknown="infrequent_if_exist",
max_categories=2,
).fit(X_train)
assert_array_equal(ohe.infrequent_categories_, [["c", "d", "a"]])
X_test = [["b"], ["a"], ["c"], ["d"], ["e"]]
expected = np.array([[1, 0], [0, 1], [0, 1], [0, 1], [0, 1]])
X_trans = ohe.transform(X_test)
assert_allclose(expected, X_trans)
# 'infrequent' is used to denote the infrequent categories for
# `inverse_transform`
expected_inv = [[col] for col in ["b"] + ["infrequent_sklearn"] * 4]
X_inv = ohe.inverse_transform(X_trans)
assert_array_equal(expected_inv, X_inv)
|
Test that the order of the categories provided by a user is respected.
|
test_ohe_infrequent_two_levels_user_cats
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_encoders.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_encoders.py
|
BSD-3-Clause
|
def test_ohe_infrequent_three_levels_user_cats():
"""Test that the order of the categories provided by a user is respected.
In this case 'c' is encoded as the first category and 'b' is encoded
as the second one."""
X_train = np.array(
[["a"] * 5 + ["b"] * 20 + ["c"] * 10 + ["d"] * 3], dtype=object
).T
ohe = OneHotEncoder(
categories=[["c", "d", "b", "a"]],
sparse_output=False,
handle_unknown="infrequent_if_exist",
max_categories=3,
).fit(X_train)
assert_array_equal(ohe.infrequent_categories_, [["d", "a"]])
X_test = [["b"], ["a"], ["c"], ["d"], ["e"]]
expected = np.array([[0, 1, 0], [0, 0, 1], [1, 0, 0], [0, 0, 1], [0, 0, 1]])
X_trans = ohe.transform(X_test)
assert_allclose(expected, X_trans)
# 'infrequent' is used to denote the infrequent categories for
# `inverse_transform`
expected_inv = [
["b"],
["infrequent_sklearn"],
["c"],
["infrequent_sklearn"],
["infrequent_sklearn"],
]
X_inv = ohe.inverse_transform(X_trans)
assert_array_equal(expected_inv, X_inv)
|
Test that the order of the categories provided by a user is respected.
In this case 'c' is encoded as the first category and 'b' is encoded
as the second one.
|
test_ohe_infrequent_three_levels_user_cats
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_encoders.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_encoders.py
|
BSD-3-Clause
|
def test_ohe_infrequent_mixed():
"""Test infrequent categories where feature 0 has infrequent categories,
and feature 1 does not."""
# X[:, 0] 1 and 2 are infrequent
# X[:, 1] nothing is infrequent
X = np.c_[[0, 1, 3, 3, 3, 3, 2, 0, 3], [0, 0, 0, 0, 1, 1, 1, 1, 1]]
ohe = OneHotEncoder(max_categories=3, drop="if_binary", sparse_output=False)
ohe.fit(X)
X_test = [[3, 0], [1, 1]]
X_trans = ohe.transform(X_test)
# feature 1 is binary so it drops a category 0
assert_allclose(X_trans, [[0, 1, 0, 0], [0, 0, 1, 1]])
|
Test infrequent categories where feature 0 has infrequent categories,
and feature 1 does not.
|
test_ohe_infrequent_mixed
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_encoders.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_encoders.py
|
BSD-3-Clause
|
def test_ohe_infrequent_multiple_categories():
"""Test infrequent categories with feature matrix with 3 features."""
X = np.c_[
[0, 1, 3, 3, 3, 3, 2, 0, 3],
[0, 0, 5, 1, 1, 10, 5, 5, 0],
[1, 0, 1, 0, 1, 0, 1, 0, 1],
]
ohe = OneHotEncoder(
categories="auto", max_categories=3, handle_unknown="infrequent_if_exist"
)
# X[:, 0] 1 and 2 are infrequent
# X[:, 1] 1 and 10 are infrequent
# X[:, 2] nothing is infrequent
X_trans = ohe.fit_transform(X).toarray()
assert_array_equal(ohe.infrequent_categories_[0], [1, 2])
assert_array_equal(ohe.infrequent_categories_[1], [1, 10])
assert_array_equal(ohe.infrequent_categories_[2], None)
# 'infrequent' is used to denote the infrequent categories
# For the first column, 1 and 2 have the same frequency. In this case,
# 1 will be chosen to be the feature name because is smaller lexiconically
feature_names = ohe.get_feature_names_out()
assert_array_equal(
[
"x0_0",
"x0_3",
"x0_infrequent_sklearn",
"x1_0",
"x1_5",
"x1_infrequent_sklearn",
"x2_0",
"x2_1",
],
feature_names,
)
expected = [
[1, 0, 0, 1, 0, 0, 0, 1],
[0, 0, 1, 1, 0, 0, 1, 0],
[0, 1, 0, 0, 1, 0, 0, 1],
[0, 1, 0, 0, 0, 1, 1, 0],
[0, 1, 0, 0, 0, 1, 0, 1],
[0, 1, 0, 0, 0, 1, 1, 0],
[0, 0, 1, 0, 1, 0, 0, 1],
[1, 0, 0, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 0, 0, 1],
]
assert_allclose(expected, X_trans)
X_test = [[3, 1, 2], [4, 0, 3]]
X_test_trans = ohe.transform(X_test)
# X[:, 2] does not have an infrequent category, thus it is encoded as all
# zeros
expected = [[0, 1, 0, 0, 0, 1, 0, 0], [0, 0, 1, 1, 0, 0, 0, 0]]
assert_allclose(expected, X_test_trans.toarray())
X_inv = ohe.inverse_transform(X_test_trans)
expected_inv = np.array(
[[3, "infrequent_sklearn", None], ["infrequent_sklearn", 0, None]], dtype=object
)
assert_array_equal(expected_inv, X_inv)
# error for unknown categories
ohe = OneHotEncoder(
categories="auto", max_categories=3, handle_unknown="error"
).fit(X)
with pytest.raises(ValueError, match="Found unknown categories"):
ohe.transform(X_test)
# only infrequent or known categories
X_test = [[1, 1, 1], [3, 10, 0]]
X_test_trans = ohe.transform(X_test)
expected = [[0, 0, 1, 0, 0, 1, 0, 1], [0, 1, 0, 0, 0, 1, 1, 0]]
assert_allclose(expected, X_test_trans.toarray())
X_inv = ohe.inverse_transform(X_test_trans)
expected_inv = np.array(
[["infrequent_sklearn", "infrequent_sklearn", 1], [3, "infrequent_sklearn", 0]],
dtype=object,
)
assert_array_equal(expected_inv, X_inv)
|
Test infrequent categories with feature matrix with 3 features.
|
test_ohe_infrequent_multiple_categories
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_encoders.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_encoders.py
|
BSD-3-Clause
|
def test_ohe_infrequent_multiple_categories_dtypes():
"""Test infrequent categories with a pandas dataframe with multiple dtypes."""
pd = pytest.importorskip("pandas")
X = pd.DataFrame(
{
"str": ["a", "f", "c", "f", "f", "a", "c", "b", "b"],
"int": [5, 3, 0, 10, 10, 12, 0, 3, 5],
},
columns=["str", "int"],
)
ohe = OneHotEncoder(
categories="auto", max_categories=3, handle_unknown="infrequent_if_exist"
)
# X[:, 0] 'a', 'b', 'c' have the same frequency. 'a' and 'b' will be
# considered infrequent because they are greater
# X[:, 1] 0, 3, 5, 10 has frequency 2 and 12 has frequency 1.
# 0, 3, 12 will be considered infrequent
X_trans = ohe.fit_transform(X).toarray()
assert_array_equal(ohe.infrequent_categories_[0], ["a", "b"])
assert_array_equal(ohe.infrequent_categories_[1], [0, 3, 12])
expected = [
[0, 0, 1, 1, 0, 0],
[0, 1, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 1],
[0, 1, 0, 0, 1, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 1, 0, 0, 1],
[1, 0, 0, 0, 0, 1],
[0, 0, 1, 0, 0, 1],
[0, 0, 1, 1, 0, 0],
]
assert_allclose(expected, X_trans)
X_test = pd.DataFrame({"str": ["b", "f"], "int": [14, 12]}, columns=["str", "int"])
expected = [[0, 0, 1, 0, 0, 1], [0, 1, 0, 0, 0, 1]]
X_test_trans = ohe.transform(X_test)
assert_allclose(expected, X_test_trans.toarray())
X_inv = ohe.inverse_transform(X_test_trans)
expected_inv = np.array(
[["infrequent_sklearn", "infrequent_sklearn"], ["f", "infrequent_sklearn"]],
dtype=object,
)
assert_array_equal(expected_inv, X_inv)
# only infrequent or known categories
X_test = pd.DataFrame({"str": ["c", "b"], "int": [12, 5]}, columns=["str", "int"])
X_test_trans = ohe.transform(X_test).toarray()
expected = [[1, 0, 0, 0, 0, 1], [0, 0, 1, 1, 0, 0]]
assert_allclose(expected, X_test_trans)
X_inv = ohe.inverse_transform(X_test_trans)
expected_inv = np.array(
[["c", "infrequent_sklearn"], ["infrequent_sklearn", 5]], dtype=object
)
assert_array_equal(expected_inv, X_inv)
|
Test infrequent categories with a pandas dataframe with multiple dtypes.
|
test_ohe_infrequent_multiple_categories_dtypes
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_encoders.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_encoders.py
|
BSD-3-Clause
|
def test_ohe_infrequent_one_level_errors(kwargs):
"""All user provided categories are infrequent."""
X_train = np.array([["a"] * 5 + ["b"] * 20 + ["c"] * 10 + ["d"] * 2]).T
ohe = OneHotEncoder(
handle_unknown="infrequent_if_exist", sparse_output=False, **kwargs
)
ohe.fit(X_train)
X_trans = ohe.transform([["a"]])
assert_allclose(X_trans, [[1]])
|
All user provided categories are infrequent.
|
test_ohe_infrequent_one_level_errors
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_encoders.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_encoders.py
|
BSD-3-Clause
|
def test_ohe_infrequent_user_cats_unknown_training_errors(kwargs):
"""All user provided categories are infrequent."""
X_train = np.array([["e"] * 3], dtype=object).T
ohe = OneHotEncoder(
categories=[["c", "d", "a", "b"]],
sparse_output=False,
handle_unknown="infrequent_if_exist",
**kwargs,
).fit(X_train)
X_trans = ohe.transform([["a"], ["e"]])
assert_allclose(X_trans, [[1], [1]])
|
All user provided categories are infrequent.
|
test_ohe_infrequent_user_cats_unknown_training_errors
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_encoders.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_encoders.py
|
BSD-3-Clause
|
def test_encoders_string_categories(input_dtype, category_dtype, array_type):
"""Check that encoding work with object, unicode, and byte string dtypes.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/15616
https://github.com/scikit-learn/scikit-learn/issues/15726
https://github.com/scikit-learn/scikit-learn/issues/19677
"""
X = np.array([["b"], ["a"]], dtype=input_dtype)
categories = [np.array(["b", "a"], dtype=category_dtype)]
ohe = OneHotEncoder(categories=categories, sparse_output=False).fit(X)
X_test = _convert_container(
[["a"], ["a"], ["b"], ["a"]], array_type, dtype=input_dtype
)
X_trans = ohe.transform(X_test)
expected = np.array([[0, 1], [0, 1], [1, 0], [0, 1]])
assert_allclose(X_trans, expected)
oe = OrdinalEncoder(categories=categories).fit(X)
X_trans = oe.transform(X_test)
expected = np.array([[1], [1], [0], [1]])
assert_array_equal(X_trans, expected)
|
Check that encoding work with object, unicode, and byte string dtypes.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/15616
https://github.com/scikit-learn/scikit-learn/issues/15726
https://github.com/scikit-learn/scikit-learn/issues/19677
|
test_encoders_string_categories
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_encoders.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_encoders.py
|
BSD-3-Clause
|
def test_mixed_string_bytes_categoricals():
"""Check that this mixture of predefined categories and X raises an error.
Categories defined as bytes can not easily be compared to data that is
a string.
"""
# data as unicode
X = np.array([["b"], ["a"]], dtype="U")
# predefined categories as bytes
categories = [np.array(["b", "a"], dtype="S")]
ohe = OneHotEncoder(categories=categories, sparse_output=False)
msg = re.escape(
"In column 0, the predefined categories have type 'bytes' which is incompatible"
" with values of type 'str_'."
)
with pytest.raises(ValueError, match=msg):
ohe.fit(X)
|
Check that this mixture of predefined categories and X raises an error.
Categories defined as bytes can not easily be compared to data that is
a string.
|
test_mixed_string_bytes_categoricals
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_encoders.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_encoders.py
|
BSD-3-Clause
|
def test_ohe_drop_first_handle_unknown_ignore_warns(handle_unknown):
"""Check drop='first' and handle_unknown='ignore'/'infrequent_if_exist'
during transform."""
X = [["a", 0], ["b", 2], ["b", 1]]
ohe = OneHotEncoder(
drop="first", sparse_output=False, handle_unknown=handle_unknown
)
X_trans = ohe.fit_transform(X)
X_expected = np.array(
[
[0, 0, 0],
[1, 0, 1],
[1, 1, 0],
]
)
assert_allclose(X_trans, X_expected)
# Both categories are unknown
X_test = [["c", 3]]
X_expected = np.array([[0, 0, 0]])
warn_msg = (
r"Found unknown categories in columns \[0, 1\] during "
"transform. These unknown categories will be encoded as all "
"zeros"
)
with pytest.warns(UserWarning, match=warn_msg):
X_trans = ohe.transform(X_test)
assert_allclose(X_trans, X_expected)
# inverse_transform maps to None
X_inv = ohe.inverse_transform(X_expected)
assert_array_equal(X_inv, np.array([["a", 0]], dtype=object))
|
Check drop='first' and handle_unknown='ignore'/'infrequent_if_exist'
during transform.
|
test_ohe_drop_first_handle_unknown_ignore_warns
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_encoders.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_encoders.py
|
BSD-3-Clause
|
def test_ohe_drop_if_binary_handle_unknown_ignore_warns(handle_unknown):
"""Check drop='if_binary' and handle_unknown='ignore' during transform."""
X = [["a", 0], ["b", 2], ["b", 1]]
ohe = OneHotEncoder(
drop="if_binary", sparse_output=False, handle_unknown=handle_unknown
)
X_trans = ohe.fit_transform(X)
X_expected = np.array(
[
[0, 1, 0, 0],
[1, 0, 0, 1],
[1, 0, 1, 0],
]
)
assert_allclose(X_trans, X_expected)
# Both categories are unknown
X_test = [["c", 3]]
X_expected = np.array([[0, 0, 0, 0]])
warn_msg = (
r"Found unknown categories in columns \[0, 1\] during "
"transform. These unknown categories will be encoded as all "
"zeros"
)
with pytest.warns(UserWarning, match=warn_msg):
X_trans = ohe.transform(X_test)
assert_allclose(X_trans, X_expected)
# inverse_transform maps to None
X_inv = ohe.inverse_transform(X_expected)
assert_array_equal(X_inv, np.array([["a", None]], dtype=object))
|
Check drop='if_binary' and handle_unknown='ignore' during transform.
|
test_ohe_drop_if_binary_handle_unknown_ignore_warns
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_encoders.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_encoders.py
|
BSD-3-Clause
|
def test_ohe_drop_first_explicit_categories(handle_unknown):
"""Check drop='first' and handle_unknown='ignore'/'infrequent_if_exist'
during fit with categories passed in."""
X = [["a", 0], ["b", 2], ["b", 1]]
ohe = OneHotEncoder(
drop="first",
sparse_output=False,
handle_unknown=handle_unknown,
categories=[["b", "a"], [1, 2]],
)
ohe.fit(X)
X_test = [["c", 1]]
X_expected = np.array([[0, 0]])
warn_msg = (
r"Found unknown categories in columns \[0\] during transform. "
r"These unknown categories will be encoded as all zeros"
)
with pytest.warns(UserWarning, match=warn_msg):
X_trans = ohe.transform(X_test)
assert_allclose(X_trans, X_expected)
|
Check drop='first' and handle_unknown='ignore'/'infrequent_if_exist'
during fit with categories passed in.
|
test_ohe_drop_first_explicit_categories
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_encoders.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_encoders.py
|
BSD-3-Clause
|
def test_ohe_more_informative_error_message():
"""Raise informative error message when pandas output and sparse_output=True."""
pd = pytest.importorskip("pandas")
df = pd.DataFrame({"a": [1, 2, 3], "b": ["z", "b", "b"]}, columns=["a", "b"])
ohe = OneHotEncoder(sparse_output=True)
ohe.set_output(transform="pandas")
msg = (
"Pandas output does not support sparse data. Set "
"sparse_output=False to output pandas dataframes or disable Pandas output"
)
with pytest.raises(ValueError, match=msg):
ohe.fit_transform(df)
ohe.fit(df)
with pytest.raises(ValueError, match=msg):
ohe.transform(df)
|
Raise informative error message when pandas output and sparse_output=True.
|
test_ohe_more_informative_error_message
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_encoders.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_encoders.py
|
BSD-3-Clause
|
def test_ordinal_encoder_passthrough_missing_values_float_errors_dtype():
"""Test ordinal encoder with nan passthrough fails when dtype=np.int32."""
X = np.array([[np.nan, 3.0, 1.0, 3.0]]).T
oe = OrdinalEncoder(dtype=np.int32)
msg = (
r"There are missing values in features \[0\]. For OrdinalEncoder "
f"to encode missing values with dtype: {np.int32}"
)
with pytest.raises(ValueError, match=msg):
oe.fit(X)
|
Test ordinal encoder with nan passthrough fails when dtype=np.int32.
|
test_ordinal_encoder_passthrough_missing_values_float_errors_dtype
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_encoders.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_encoders.py
|
BSD-3-Clause
|
def test_ordinal_encoder_passthrough_missing_values_float(encoded_missing_value):
"""Test ordinal encoder with nan on float dtypes."""
X = np.array([[np.nan, 3.0, 1.0, 3.0]], dtype=np.float64).T
oe = OrdinalEncoder(encoded_missing_value=encoded_missing_value).fit(X)
assert len(oe.categories_) == 1
assert_allclose(oe.categories_[0], [1.0, 3.0, np.nan])
X_trans = oe.transform(X)
assert_allclose(X_trans, [[encoded_missing_value], [1.0], [0.0], [1.0]])
X_inverse = oe.inverse_transform(X_trans)
assert_allclose(X_inverse, X)
|
Test ordinal encoder with nan on float dtypes.
|
test_ordinal_encoder_passthrough_missing_values_float
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_encoders.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_encoders.py
|
BSD-3-Clause
|
def test_ordinal_encoder_missing_value_support_pandas_categorical(
pd_nan_type, encoded_missing_value
):
"""Check ordinal encoder is compatible with pandas."""
# checks pandas dataframe with categorical features
pd = pytest.importorskip("pandas")
pd_missing_value = pd.NA if pd_nan_type == "pd.NA" else np.nan
df = pd.DataFrame(
{
"col1": pd.Series(["c", "a", pd_missing_value, "b", "a"], dtype="category"),
}
)
oe = OrdinalEncoder(encoded_missing_value=encoded_missing_value).fit(df)
assert len(oe.categories_) == 1
assert_array_equal(oe.categories_[0][:3], ["a", "b", "c"])
assert np.isnan(oe.categories_[0][-1])
df_trans = oe.transform(df)
assert_allclose(df_trans, [[2.0], [0.0], [encoded_missing_value], [1.0], [0.0]])
X_inverse = oe.inverse_transform(df_trans)
assert X_inverse.shape == (5, 1)
assert_array_equal(X_inverse[:2, 0], ["c", "a"])
assert_array_equal(X_inverse[3:, 0], ["b", "a"])
assert np.isnan(X_inverse[2, 0])
|
Check ordinal encoder is compatible with pandas.
|
test_ordinal_encoder_missing_value_support_pandas_categorical
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_encoders.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_encoders.py
|
BSD-3-Clause
|
def test_ordinal_encoder_specified_categories_missing_passthrough(
X, X2, cats, cat_dtype
):
"""Test ordinal encoder for specified categories."""
oe = OrdinalEncoder(categories=cats)
exp = np.array([[0.0], [np.nan]])
assert_array_equal(oe.fit_transform(X), exp)
# manually specified categories should have same dtype as
# the data when coerced from lists
assert oe.categories_[0].dtype == cat_dtype
# when specifying categories manually, unknown categories should already
# raise when fitting
oe = OrdinalEncoder(categories=cats)
with pytest.raises(ValueError, match="Found unknown categories"):
oe.fit(X2)
|
Test ordinal encoder for specified categories.
|
test_ordinal_encoder_specified_categories_missing_passthrough
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_encoders.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_encoders.py
|
BSD-3-Clause
|
def test_encoder_duplicate_specified_categories(Encoder):
"""Test encoder for specified categories have duplicate values.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/27088
"""
cats = [np.array(["a", "b", "a"], dtype=object)]
enc = Encoder(categories=cats)
X = np.array([["a", "b"]], dtype=object).T
with pytest.raises(
ValueError, match="the predefined categories contain duplicate elements."
):
enc.fit(X)
|
Test encoder for specified categories have duplicate values.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/27088
|
test_encoder_duplicate_specified_categories
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_encoders.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_encoders.py
|
BSD-3-Clause
|
def test_ordinal_encoder_handle_missing_and_unknown(X, expected_X_trans, X_test):
"""Test the interaction between missing values and handle_unknown"""
oe = OrdinalEncoder(handle_unknown="use_encoded_value", unknown_value=-1)
X_trans = oe.fit_transform(X)
assert_allclose(X_trans, expected_X_trans)
assert_allclose(oe.transform(X_test), [[-1.0]])
|
Test the interaction between missing values and handle_unknown
|
test_ordinal_encoder_handle_missing_and_unknown
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_encoders.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_encoders.py
|
BSD-3-Clause
|
def test_ordinal_encoder_sparse(csr_container):
"""Check that we raise proper error with sparse input in OrdinalEncoder.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/19878
"""
X = np.array([[3, 2, 1], [0, 1, 1]])
X_sparse = csr_container(X)
encoder = OrdinalEncoder()
err_msg = "Sparse data was passed, but dense data is required"
with pytest.raises(TypeError, match=err_msg):
encoder.fit(X_sparse)
with pytest.raises(TypeError, match=err_msg):
encoder.fit_transform(X_sparse)
X_trans = encoder.fit_transform(X)
X_trans_sparse = csr_container(X_trans)
with pytest.raises(TypeError, match=err_msg):
encoder.inverse_transform(X_trans_sparse)
|
Check that we raise proper error with sparse input in OrdinalEncoder.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/19878
|
test_ordinal_encoder_sparse
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_encoders.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_encoders.py
|
BSD-3-Clause
|
def test_ordinal_encoder_fit_with_unseen_category():
"""Check OrdinalEncoder.fit works with unseen category when
`handle_unknown="use_encoded_value"`.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/19872
"""
X = np.array([0, 0, 1, 0, 2, 5])[:, np.newaxis]
oe = OrdinalEncoder(
categories=[[-1, 0, 1]], handle_unknown="use_encoded_value", unknown_value=-999
)
oe.fit(X)
oe = OrdinalEncoder(categories=[[-1, 0, 1]], handle_unknown="error")
with pytest.raises(ValueError, match="Found unknown categories"):
oe.fit(X)
|
Check OrdinalEncoder.fit works with unseen category when
`handle_unknown="use_encoded_value"`.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/19872
|
test_ordinal_encoder_fit_with_unseen_category
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_encoders.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_encoders.py
|
BSD-3-Clause
|
def test_ordinal_encoder_handle_unknown_string_dtypes(X_train, X_test):
"""Checks that `OrdinalEncoder` transforms string dtypes.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/19872
"""
enc = OrdinalEncoder(handle_unknown="use_encoded_value", unknown_value=-9)
enc.fit(X_train)
X_trans = enc.transform(X_test)
assert_allclose(X_trans, [[-9, 0]])
|
Checks that `OrdinalEncoder` transforms string dtypes.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/19872
|
test_ordinal_encoder_handle_unknown_string_dtypes
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_encoders.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_encoders.py
|
BSD-3-Clause
|
def test_ordinal_encoder_python_integer():
"""Check that `OrdinalEncoder` accepts Python integers that are potentially
larger than 64 bits.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/20721
"""
X = np.array(
[
44253463435747313673,
9867966753463435747313673,
44253462342215747313673,
442534634357764313673,
]
).reshape(-1, 1)
encoder = OrdinalEncoder().fit(X)
assert_array_equal(encoder.categories_, np.sort(X, axis=0).T)
X_trans = encoder.transform(X)
assert_array_equal(X_trans, [[0], [3], [2], [1]])
|
Check that `OrdinalEncoder` accepts Python integers that are potentially
larger than 64 bits.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/20721
|
test_ordinal_encoder_python_integer
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_encoders.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_encoders.py
|
BSD-3-Clause
|
def test_ordinal_encoder_features_names_out_pandas():
"""Check feature names out is same as the input."""
pd = pytest.importorskip("pandas")
names = ["b", "c", "a"]
X = pd.DataFrame([[1, 2, 3]], columns=names)
enc = OrdinalEncoder().fit(X)
feature_names_out = enc.get_feature_names_out()
assert_array_equal(names, feature_names_out)
|
Check feature names out is same as the input.
|
test_ordinal_encoder_features_names_out_pandas
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_encoders.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_encoders.py
|
BSD-3-Clause
|
def test_ordinal_encoder_unknown_missing_interaction():
"""Check interactions between encode_unknown and missing value encoding."""
X = np.array([["a"], ["b"], [np.nan]], dtype=object)
oe = OrdinalEncoder(
handle_unknown="use_encoded_value",
unknown_value=np.nan,
encoded_missing_value=-3,
).fit(X)
X_trans = oe.transform(X)
assert_allclose(X_trans, [[0], [1], [-3]])
# "c" is unknown and is mapped to np.nan
# "None" is a missing value and is set to -3
X_test = np.array([["c"], [np.nan]], dtype=object)
X_test_trans = oe.transform(X_test)
assert_allclose(X_test_trans, [[np.nan], [-3]])
# Non-regression test for #24082
X_roundtrip = oe.inverse_transform(X_test_trans)
# np.nan is unknown so it maps to None
assert X_roundtrip[0][0] is None
# -3 is the encoded missing value so it maps back to nan
assert np.isnan(X_roundtrip[1][0])
|
Check interactions between encode_unknown and missing value encoding.
|
test_ordinal_encoder_unknown_missing_interaction
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_encoders.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_encoders.py
|
BSD-3-Clause
|
def test_ordinal_encoder_encoded_missing_value_error(with_pandas):
"""Check OrdinalEncoder errors when encoded_missing_value is used by
an known category."""
X = np.array([["a", "dog"], ["b", "cat"], ["c", np.nan]], dtype=object)
# The 0-th feature has no missing values so it is not included in the list of
# features
error_msg = (
r"encoded_missing_value \(1\) is already used to encode a known category "
r"in features: "
)
if with_pandas:
pd = pytest.importorskip("pandas")
X = pd.DataFrame(X, columns=["letter", "pet"])
error_msg = error_msg + r"\['pet'\]"
else:
error_msg = error_msg + r"\[1\]"
oe = OrdinalEncoder(encoded_missing_value=1)
with pytest.raises(ValueError, match=error_msg):
oe.fit(X)
|
Check OrdinalEncoder errors when encoded_missing_value is used by
an known category.
|
test_ordinal_encoder_encoded_missing_value_error
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_encoders.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_encoders.py
|
BSD-3-Clause
|
def test_ordinal_encoder_unknown_missing_interaction_both_nan(
X_train, X_test_trans_expected, X_roundtrip_expected
):
"""Check transform when unknown_value and encoded_missing_value is nan.
Non-regression test for #24082.
"""
oe = OrdinalEncoder(
handle_unknown="use_encoded_value",
unknown_value=np.nan,
encoded_missing_value=np.nan,
).fit(X_train)
X_test = np.array([["1"], [np.nan], ["b"]])
X_test_trans = oe.transform(X_test)
# both nan and unknown are encoded as nan
assert_allclose(X_test_trans, X_test_trans_expected)
X_roundtrip = oe.inverse_transform(X_test_trans)
n_samples = X_roundtrip_expected.shape[0]
for i in range(n_samples):
expected_val = X_roundtrip_expected[i, 0]
val = X_roundtrip[i, 0]
if expected_val is None:
assert val is None
elif is_scalar_nan(expected_val):
assert np.isnan(val)
else:
assert val == expected_val
|
Check transform when unknown_value and encoded_missing_value is nan.
Non-regression test for #24082.
|
test_ordinal_encoder_unknown_missing_interaction_both_nan
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_encoders.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_encoders.py
|
BSD-3-Clause
|
def test_predefined_categories_dtype():
"""Check that the categories_ dtype is `object` for string categories
Regression test for gh-25171.
"""
categories = [["as", "mmas", "eas", "ras", "acs"], ["1", "2"]]
enc = OneHotEncoder(categories=categories)
enc.fit([["as", "1"]])
assert len(categories) == len(enc.categories_)
for n, cat in enumerate(enc.categories_):
assert cat.dtype == object
assert_array_equal(categories[n], cat)
|
Check that the categories_ dtype is `object` for string categories
Regression test for gh-25171.
|
test_predefined_categories_dtype
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_encoders.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_encoders.py
|
BSD-3-Clause
|
def test_ordinal_encoder_missing_unknown_encoding_max():
"""Check missing value or unknown encoding can equal the cardinality."""
X = np.array([["dog"], ["cat"], [np.nan]], dtype=object)
X_trans = OrdinalEncoder(encoded_missing_value=2).fit_transform(X)
assert_allclose(X_trans, [[1], [0], [2]])
enc = OrdinalEncoder(handle_unknown="use_encoded_value", unknown_value=2).fit(X)
X_test = np.array([["snake"]])
X_trans = enc.transform(X_test)
assert_allclose(X_trans, [[2]])
|
Check missing value or unknown encoding can equal the cardinality.
|
test_ordinal_encoder_missing_unknown_encoding_max
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_encoders.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_encoders.py
|
BSD-3-Clause
|
def test_drop_idx_infrequent_categories():
"""Check drop_idx is defined correctly with infrequent categories.
Non-regression test for gh-25550.
"""
X = np.array(
[["a"] * 2 + ["b"] * 4 + ["c"] * 4 + ["d"] * 4 + ["e"] * 4], dtype=object
).T
ohe = OneHotEncoder(min_frequency=4, sparse_output=False, drop="first").fit(X)
assert_array_equal(
ohe.get_feature_names_out(), ["x0_c", "x0_d", "x0_e", "x0_infrequent_sklearn"]
)
assert ohe.categories_[0][ohe.drop_idx_[0]] == "b"
X = np.array([["a"] * 2 + ["b"] * 2 + ["c"] * 10], dtype=object).T
ohe = OneHotEncoder(min_frequency=4, sparse_output=False, drop="if_binary").fit(X)
assert_array_equal(ohe.get_feature_names_out(), ["x0_infrequent_sklearn"])
assert ohe.categories_[0][ohe.drop_idx_[0]] == "c"
X = np.array(
[["a"] * 2 + ["b"] * 4 + ["c"] * 4 + ["d"] * 4 + ["e"] * 4], dtype=object
).T
ohe = OneHotEncoder(min_frequency=4, sparse_output=False, drop=["d"]).fit(X)
assert_array_equal(
ohe.get_feature_names_out(), ["x0_b", "x0_c", "x0_e", "x0_infrequent_sklearn"]
)
assert ohe.categories_[0][ohe.drop_idx_[0]] == "d"
ohe = OneHotEncoder(min_frequency=4, sparse_output=False, drop=None).fit(X)
assert_array_equal(
ohe.get_feature_names_out(),
["x0_b", "x0_c", "x0_d", "x0_e", "x0_infrequent_sklearn"],
)
assert ohe.drop_idx_ is None
|
Check drop_idx is defined correctly with infrequent categories.
Non-regression test for gh-25550.
|
test_drop_idx_infrequent_categories
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_encoders.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_encoders.py
|
BSD-3-Clause
|
def test_ordinal_encoder_infrequent_three_levels(kwargs):
"""Test parameters for grouping 'a', and 'd' into the infrequent category."""
X_train = np.array([["a"] * 5 + ["b"] * 20 + ["c"] * 10 + ["d"] * 3]).T
ordinal = OrdinalEncoder(
handle_unknown="use_encoded_value", unknown_value=-1, **kwargs
).fit(X_train)
assert_array_equal(ordinal.categories_, [["a", "b", "c", "d"]])
assert_array_equal(ordinal.infrequent_categories_, [["a", "d"]])
X_test = [["a"], ["b"], ["c"], ["d"], ["z"]]
expected_trans = [[2], [0], [1], [2], [-1]]
X_trans = ordinal.transform(X_test)
assert_allclose(X_trans, expected_trans)
X_inverse = ordinal.inverse_transform(X_trans)
expected_inverse = [
["infrequent_sklearn"],
["b"],
["c"],
["infrequent_sklearn"],
[None],
]
assert_array_equal(X_inverse, expected_inverse)
|
Test parameters for grouping 'a', and 'd' into the infrequent category.
|
test_ordinal_encoder_infrequent_three_levels
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_encoders.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_encoders.py
|
BSD-3-Clause
|
def test_ordinal_encoder_infrequent_three_levels_user_cats():
"""Test that the order of the categories provided by a user is respected.
In this case 'c' is encoded as the first category and 'b' is encoded
as the second one.
"""
X_train = np.array(
[["a"] * 5 + ["b"] * 20 + ["c"] * 10 + ["d"] * 3], dtype=object
).T
ordinal = OrdinalEncoder(
categories=[["c", "d", "b", "a"]],
max_categories=3,
handle_unknown="use_encoded_value",
unknown_value=-1,
).fit(X_train)
assert_array_equal(ordinal.categories_, [["c", "d", "b", "a"]])
assert_array_equal(ordinal.infrequent_categories_, [["d", "a"]])
X_test = [["a"], ["b"], ["c"], ["d"], ["z"]]
expected_trans = [[2], [1], [0], [2], [-1]]
X_trans = ordinal.transform(X_test)
assert_allclose(X_trans, expected_trans)
X_inverse = ordinal.inverse_transform(X_trans)
expected_inverse = [
["infrequent_sklearn"],
["b"],
["c"],
["infrequent_sklearn"],
[None],
]
assert_array_equal(X_inverse, expected_inverse)
|
Test that the order of the categories provided by a user is respected.
In this case 'c' is encoded as the first category and 'b' is encoded
as the second one.
|
test_ordinal_encoder_infrequent_three_levels_user_cats
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_encoders.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_encoders.py
|
BSD-3-Clause
|
def test_ordinal_encoder_infrequent_mixed():
"""Test when feature 0 has infrequent categories and feature 1 does not."""
X = np.column_stack(([0, 1, 3, 3, 3, 3, 2, 0, 3], [0, 0, 0, 0, 1, 1, 1, 1, 1]))
ordinal = OrdinalEncoder(max_categories=3).fit(X)
assert_array_equal(ordinal.infrequent_categories_[0], [1, 2])
assert ordinal.infrequent_categories_[1] is None
X_test = [[3, 0], [1, 1]]
expected_trans = [[1, 0], [2, 1]]
X_trans = ordinal.transform(X_test)
assert_allclose(X_trans, expected_trans)
X_inverse = ordinal.inverse_transform(X_trans)
expected_inverse = np.array([[3, 0], ["infrequent_sklearn", 1]], dtype=object)
assert_array_equal(X_inverse, expected_inverse)
|
Test when feature 0 has infrequent categories and feature 1 does not.
|
test_ordinal_encoder_infrequent_mixed
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_encoders.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_encoders.py
|
BSD-3-Clause
|
def test_ordinal_encoder_infrequent_multiple_categories_dtypes():
"""Test infrequent categories with a pandas DataFrame with multiple dtypes."""
pd = pytest.importorskip("pandas")
categorical_dtype = pd.CategoricalDtype(["bird", "cat", "dog", "snake"])
X = pd.DataFrame(
{
"str": ["a", "f", "c", "f", "f", "a", "c", "b", "b"],
"int": [5, 3, 0, 10, 10, 12, 0, 3, 5],
"categorical": pd.Series(
["dog"] * 4 + ["cat"] * 3 + ["snake"] + ["bird"],
dtype=categorical_dtype,
),
},
columns=["str", "int", "categorical"],
)
ordinal = OrdinalEncoder(max_categories=3).fit(X)
# X[:, 0] 'a', 'b', 'c' have the same frequency. 'a' and 'b' will be
# considered infrequent because they appear first when sorted
# X[:, 1] 0, 3, 5, 10 has frequency 2 and 12 has frequency 1.
# 0, 3, 12 will be considered infrequent because they appear first when
# sorted.
# X[:, 2] "snake" and "bird" or infrequent
assert_array_equal(ordinal.infrequent_categories_[0], ["a", "b"])
assert_array_equal(ordinal.infrequent_categories_[1], [0, 3, 12])
assert_array_equal(ordinal.infrequent_categories_[2], ["bird", "snake"])
X_test = pd.DataFrame(
{
"str": ["a", "b", "f", "c"],
"int": [12, 0, 10, 5],
"categorical": pd.Series(
["cat"] + ["snake"] + ["bird"] + ["dog"],
dtype=categorical_dtype,
),
},
columns=["str", "int", "categorical"],
)
expected_trans = [[2, 2, 0], [2, 2, 2], [1, 1, 2], [0, 0, 1]]
X_trans = ordinal.transform(X_test)
assert_allclose(X_trans, expected_trans)
|
Test infrequent categories with a pandas DataFrame with multiple dtypes.
|
test_ordinal_encoder_infrequent_multiple_categories_dtypes
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_encoders.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_encoders.py
|
BSD-3-Clause
|
def test_ordinal_encoder_infrequent_custom_mapping():
"""Check behavior of unknown_value and encoded_missing_value with infrequent."""
X_train = np.array(
[["a"] * 5 + ["b"] * 20 + ["c"] * 10 + ["d"] * 3 + [np.nan]], dtype=object
).T
ordinal = OrdinalEncoder(
handle_unknown="use_encoded_value",
unknown_value=2,
max_categories=2,
encoded_missing_value=3,
).fit(X_train)
assert_array_equal(ordinal.infrequent_categories_, [["a", "c", "d"]])
X_test = np.array([["a"], ["b"], ["c"], ["d"], ["e"], [np.nan]], dtype=object)
expected_trans = [[1], [0], [1], [1], [2], [3]]
X_trans = ordinal.transform(X_test)
assert_allclose(X_trans, expected_trans)
|
Check behavior of unknown_value and encoded_missing_value with infrequent.
|
test_ordinal_encoder_infrequent_custom_mapping
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_encoders.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_encoders.py
|
BSD-3-Clause
|
def test_ordinal_encoder_all_frequent(kwargs):
"""All categories are considered frequent have same encoding as default encoder."""
X_train = np.array(
[["a"] * 5 + ["b"] * 20 + ["c"] * 10 + ["d"] * 3], dtype=object
).T
adjusted_encoder = OrdinalEncoder(
**kwargs, handle_unknown="use_encoded_value", unknown_value=-1
).fit(X_train)
default_encoder = OrdinalEncoder(
handle_unknown="use_encoded_value", unknown_value=-1
).fit(X_train)
X_test = [["a"], ["b"], ["c"], ["d"], ["e"]]
assert_allclose(
adjusted_encoder.transform(X_test), default_encoder.transform(X_test)
)
|
All categories are considered frequent have same encoding as default encoder.
|
test_ordinal_encoder_all_frequent
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_encoders.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_encoders.py
|
BSD-3-Clause
|
def test_ordinal_encoder_all_infrequent(kwargs):
"""When all categories are infrequent, they are all encoded as zero."""
X_train = np.array(
[["a"] * 5 + ["b"] * 20 + ["c"] * 10 + ["d"] * 3], dtype=object
).T
encoder = OrdinalEncoder(
**kwargs, handle_unknown="use_encoded_value", unknown_value=-1
).fit(X_train)
X_test = [["a"], ["b"], ["c"], ["d"], ["e"]]
assert_allclose(encoder.transform(X_test), [[0], [0], [0], [0], [-1]])
|
When all categories are infrequent, they are all encoded as zero.
|
test_ordinal_encoder_all_infrequent
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_encoders.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_encoders.py
|
BSD-3-Clause
|
def test_ordinal_encoder_missing_appears_frequent():
"""Check behavior when missing value appears frequently."""
X = np.array(
[[np.nan] * 20 + ["dog"] * 10 + ["cat"] * 5 + ["snake"] + ["deer"]],
dtype=object,
).T
ordinal = OrdinalEncoder(max_categories=3).fit(X)
X_test = np.array([["snake", "cat", "dog", np.nan]], dtype=object).T
X_trans = ordinal.transform(X_test)
assert_allclose(X_trans, [[2], [0], [1], [np.nan]])
|
Check behavior when missing value appears frequently.
|
test_ordinal_encoder_missing_appears_frequent
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_encoders.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_encoders.py
|
BSD-3-Clause
|
def test_ordinal_encoder_missing_appears_infrequent():
"""Check behavior when missing value appears infrequently."""
# feature 0 has infrequent categories
# feature 1 has no infrequent categories
X = np.array(
[
[np.nan] + ["dog"] * 10 + ["cat"] * 5 + ["snake"] + ["deer"],
["red"] * 9 + ["green"] * 9,
],
dtype=object,
).T
ordinal = OrdinalEncoder(min_frequency=4).fit(X)
X_test = np.array(
[
["snake", "red"],
["deer", "green"],
[np.nan, "green"],
["dog", "green"],
["cat", "red"],
],
dtype=object,
)
X_trans = ordinal.transform(X_test)
assert_allclose(X_trans, [[2, 1], [2, 0], [np.nan, 0], [1, 0], [0, 1]])
|
Check behavior when missing value appears infrequently.
|
test_ordinal_encoder_missing_appears_infrequent
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_encoders.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_encoders.py
|
BSD-3-Clause
|
def test_encoder_not_fitted(Encoder):
"""Check that we raise a `NotFittedError` by calling transform before fit with
the encoders.
One could expect that the passing the `categories` argument to the encoder
would make it stateless. However, `fit` is making a couple of check, such as the
position of `np.nan`.
"""
X = np.array([["A"], ["B"], ["C"]], dtype=object)
encoder = Encoder(categories=[["A", "B", "C"]])
with pytest.raises(NotFittedError):
encoder.transform(X)
|
Check that we raise a `NotFittedError` by calling transform before fit with
the encoders.
One could expect that the passing the `categories` argument to the encoder
would make it stateless. However, `fit` is making a couple of check, such as the
position of `np.nan`.
|
test_encoder_not_fitted
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_encoders.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_encoders.py
|
BSD-3-Clause
|
def test_function_transformer_raise_error_with_mixed_dtype(X_type):
"""Check that `FunctionTransformer.check_inverse` raises error on mixed dtype."""
mapping = {"one": 1, "two": 2, "three": 3, 5: "five", 6: "six"}
inverse_mapping = {value: key for key, value in mapping.items()}
dtype = "object"
data = ["one", "two", "three", "one", "one", 5, 6]
data = _convert_container(data, X_type, columns_name=["value"], dtype=dtype)
def func(X):
return np.array([mapping[X[i]] for i in range(X.size)], dtype=object)
def inverse_func(X):
return _convert_container(
[inverse_mapping[x] for x in X],
X_type,
columns_name=["value"],
dtype=dtype,
)
transformer = FunctionTransformer(
func=func, inverse_func=inverse_func, validate=False, check_inverse=True
)
msg = "'check_inverse' is only supported when all the elements in `X` is numerical."
with pytest.raises(ValueError, match=msg):
transformer.fit(data)
|
Check that `FunctionTransformer.check_inverse` raises error on mixed dtype.
|
test_function_transformer_raise_error_with_mixed_dtype
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_function_transformer.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_function_transformer.py
|
BSD-3-Clause
|
def test_function_transformer_support_all_nummerical_dataframes_check_inverse_True():
"""Check support for dataframes with only numerical values."""
pd = pytest.importorskip("pandas")
df = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
transformer = FunctionTransformer(
func=lambda x: x + 2, inverse_func=lambda x: x - 2, check_inverse=True
)
# Does not raise an error
df_out = transformer.fit_transform(df)
assert_allclose_dense_sparse(df_out, df + 2)
|
Check support for dataframes with only numerical values.
|
test_function_transformer_support_all_nummerical_dataframes_check_inverse_True
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_function_transformer.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_function_transformer.py
|
BSD-3-Clause
|
def test_function_transformer_with_dataframe_and_check_inverse_True():
"""Check error is raised when check_inverse=True.
Non-regresion test for gh-25261.
"""
pd = pytest.importorskip("pandas")
transformer = FunctionTransformer(
func=lambda x: x, inverse_func=lambda x: x, check_inverse=True
)
df_mixed = pd.DataFrame({"a": [1, 2, 3], "b": ["a", "b", "c"]})
msg = "'check_inverse' is only supported when all the elements in `X` is numerical."
with pytest.raises(ValueError, match=msg):
transformer.fit(df_mixed)
|
Check error is raised when check_inverse=True.
Non-regresion test for gh-25261.
|
test_function_transformer_with_dataframe_and_check_inverse_True
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_function_transformer.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_function_transformer.py
|
BSD-3-Clause
|
def test_function_transformer_validate_inverse():
"""Test that function transformer does not reset estimator in
`inverse_transform`."""
def add_constant_feature(X):
X_one = np.ones((X.shape[0], 1))
return np.concatenate((X, X_one), axis=1)
def inverse_add_constant(X):
return X[:, :-1]
X = np.array([[1, 2], [3, 4], [3, 4]])
trans = FunctionTransformer(
func=add_constant_feature,
inverse_func=inverse_add_constant,
validate=True,
)
X_trans = trans.fit_transform(X)
assert trans.n_features_in_ == X.shape[1]
trans.inverse_transform(X_trans)
assert trans.n_features_in_ == X.shape[1]
|
Test that function transformer does not reset estimator in
`inverse_transform`.
|
test_function_transformer_validate_inverse
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_function_transformer.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_function_transformer.py
|
BSD-3-Clause
|
def test_get_feature_names_out_dataframe_with_string_data(
feature_names_out, expected, in_pipeline
):
"""Check that get_feature_names_out works with DataFrames with string data."""
pd = pytest.importorskip("pandas")
X = pd.DataFrame({"pet": ["dog", "cat"], "color": ["red", "green"]})
def func(X):
if feature_names_out == "one-to-one":
return X
else:
name = feature_names_out(None, X.columns)
return X.rename(columns=dict(zip(X.columns, name)))
transformer = FunctionTransformer(func=func, feature_names_out=feature_names_out)
if in_pipeline:
transformer = make_pipeline(transformer)
X_trans = transformer.fit_transform(X)
assert isinstance(X_trans, pd.DataFrame)
names = transformer.get_feature_names_out()
assert isinstance(names, np.ndarray)
assert names.dtype == object
assert_array_equal(names, expected)
|
Check that get_feature_names_out works with DataFrames with string data.
|
test_get_feature_names_out_dataframe_with_string_data
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_function_transformer.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_function_transformer.py
|
BSD-3-Clause
|
def test_set_output_func():
"""Check behavior of set_output with different settings."""
pd = pytest.importorskip("pandas")
X = pd.DataFrame({"a": [1, 2, 3], "b": [10, 20, 100]})
ft = FunctionTransformer(np.log, feature_names_out="one-to-one")
# no warning is raised when feature_names_out is defined
with warnings.catch_warnings():
warnings.simplefilter("error", UserWarning)
ft.set_output(transform="pandas")
X_trans = ft.fit_transform(X)
assert isinstance(X_trans, pd.DataFrame)
assert_array_equal(X_trans.columns, ["a", "b"])
ft = FunctionTransformer(lambda x: 2 * x)
ft.set_output(transform="pandas")
# no warning is raised when func returns a panda dataframe
with warnings.catch_warnings():
warnings.simplefilter("error", UserWarning)
X_trans = ft.fit_transform(X)
assert isinstance(X_trans, pd.DataFrame)
assert_array_equal(X_trans.columns, ["a", "b"])
# Warning is raised when func returns a ndarray
ft_np = FunctionTransformer(lambda x: np.asarray(x))
for transform in ("pandas", "polars"):
ft_np.set_output(transform=transform)
msg = (
f"When `set_output` is configured to be '{transform}'.*{transform} "
"DataFrame.*"
)
with pytest.warns(UserWarning, match=msg):
ft_np.fit_transform(X)
# default transform does not warn
ft_np.set_output(transform="default")
with warnings.catch_warnings():
warnings.simplefilter("error", UserWarning)
ft_np.fit_transform(X)
|
Check behavior of set_output with different settings.
|
test_set_output_func
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_function_transformer.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_function_transformer.py
|
BSD-3-Clause
|
def test_consistence_column_name_between_steps():
"""Check that we have a consistence between the feature names out of
`FunctionTransformer` and the feature names in of the next step in the pipeline.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/27695
"""
pd = pytest.importorskip("pandas")
def with_suffix(_, names):
return [name + "__log" for name in names]
pipeline = make_pipeline(
FunctionTransformer(np.log1p, feature_names_out=with_suffix), StandardScaler()
)
df = pd.DataFrame([[1, 2], [3, 4], [5, 6]], columns=["a", "b"])
X_trans = pipeline.fit_transform(df)
assert pipeline.get_feature_names_out().tolist() == ["a__log", "b__log"]
# StandardScaler will convert to a numpy array
assert isinstance(X_trans, np.ndarray)
|
Check that we have a consistence between the feature names out of
`FunctionTransformer` and the feature names in of the next step in the pipeline.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/27695
|
test_consistence_column_name_between_steps
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_function_transformer.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_function_transformer.py
|
BSD-3-Clause
|
def test_function_transformer_overwrite_column_names(dataframe_lib, transform_output):
"""Check that we overwrite the column names when we should."""
lib = pytest.importorskip(dataframe_lib)
if transform_output != "numpy":
pytest.importorskip(transform_output)
df = lib.DataFrame({"a": [1, 2, 3], "b": [10, 20, 100]})
def with_suffix(_, names):
return [name + "__log" for name in names]
transformer = FunctionTransformer(feature_names_out=with_suffix).set_output(
transform=transform_output
)
X_trans = transformer.fit_transform(df)
assert_array_equal(np.asarray(X_trans), np.asarray(df))
feature_names = transformer.get_feature_names_out()
assert list(X_trans.columns) == with_suffix(None, df.columns)
assert feature_names.tolist() == with_suffix(None, df.columns)
|
Check that we overwrite the column names when we should.
|
test_function_transformer_overwrite_column_names
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_function_transformer.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_function_transformer.py
|
BSD-3-Clause
|
def test_function_transformer_overwrite_column_names_numerical(feature_names_out):
"""Check the same as `test_function_transformer_overwrite_column_names`
but for the specific case of pandas where column names can be numerical."""
pd = pytest.importorskip("pandas")
df = pd.DataFrame({0: [1, 2, 3], 1: [10, 20, 100]})
transformer = FunctionTransformer(feature_names_out=feature_names_out)
X_trans = transformer.fit_transform(df)
assert_array_equal(np.asarray(X_trans), np.asarray(df))
feature_names = transformer.get_feature_names_out()
assert list(X_trans.columns) == list(feature_names)
|
Check the same as `test_function_transformer_overwrite_column_names`
but for the specific case of pandas where column names can be numerical.
|
test_function_transformer_overwrite_column_names_numerical
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_function_transformer.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_function_transformer.py
|
BSD-3-Clause
|
def test_function_transformer_error_column_inconsistent(
dataframe_lib, feature_names_out
):
"""Check that we raise an error when `func` returns a dataframe with new
column names that become inconsistent with `get_feature_names_out`."""
lib = pytest.importorskip(dataframe_lib)
df = lib.DataFrame({"a": [1, 2, 3], "b": [10, 20, 100]})
def func(df):
if dataframe_lib == "pandas":
return df.rename(columns={"a": "c"})
else:
return df.rename({"a": "c"})
transformer = FunctionTransformer(func=func, feature_names_out=feature_names_out)
err_msg = "The output generated by `func` have different column names"
with pytest.raises(ValueError, match=err_msg):
transformer.fit_transform(df).columns
|
Check that we raise an error when `func` returns a dataframe with new
column names that become inconsistent with `get_feature_names_out`.
|
test_function_transformer_error_column_inconsistent
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_function_transformer.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_function_transformer.py
|
BSD-3-Clause
|
def test_label_binarizer_pandas_nullable(dtype, unique_first):
"""Checks that LabelBinarizer works with pandas nullable dtypes.
Non-regression test for gh-25637.
"""
pd = pytest.importorskip("pandas")
y_true = pd.Series([1, 0, 0, 1, 0, 1, 1, 0, 1], dtype=dtype)
if unique_first:
# Calling unique creates a pandas array which has a different interface
# compared to a pandas Series. Specifically, pandas arrays do not have "iloc".
y_true = y_true.unique()
lb = LabelBinarizer().fit(y_true)
y_out = lb.transform([1, 0])
assert_array_equal(y_out, [[1], [0]])
|
Checks that LabelBinarizer works with pandas nullable dtypes.
Non-regression test for gh-25637.
|
test_label_binarizer_pandas_nullable
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_label.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_label.py
|
BSD-3-Clause
|
def test_nan_label_encoder():
"""Check that label encoder encodes nans in transform.
Non-regression test for #22628.
"""
le = LabelEncoder()
le.fit(["a", "a", "b", np.nan])
y_trans = le.transform([np.nan])
assert_array_equal(y_trans, [2])
|
Check that label encoder encodes nans in transform.
Non-regression test for #22628.
|
test_nan_label_encoder
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_label.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_label.py
|
BSD-3-Clause
|
def test_label_encoders_do_not_have_set_output(encoder):
"""Check that label encoders do not define set_output and work with y as a kwarg.
Non-regression test for #26854.
"""
assert not hasattr(encoder, "set_output")
y_encoded_with_kwarg = encoder.fit_transform(y=["a", "b", "c"])
y_encoded_positional = encoder.fit_transform(["a", "b", "c"])
assert_array_equal(y_encoded_with_kwarg, y_encoded_positional)
|
Check that label encoders do not define set_output and work with y as a kwarg.
Non-regression test for #26854.
|
test_label_encoders_do_not_have_set_output
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_label.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_label.py
|
BSD-3-Clause
|
def test_polynomial_and_spline_array_order(est):
"""Test that output array has the given order."""
X = np.arange(10).reshape(5, 2)
def is_c_contiguous(a):
return np.isfortran(a.T)
assert is_c_contiguous(est().fit_transform(X))
assert is_c_contiguous(est(order="C").fit_transform(X))
assert np.isfortran(est(order="F").fit_transform(X))
|
Test that output array has the given order.
|
test_polynomial_and_spline_array_order
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_polynomial.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_polynomial.py
|
BSD-3-Clause
|
def test_spline_transformer_input_validation(params, err_msg):
"""Test that we raise errors for invalid input in SplineTransformer."""
X = [[1], [2]]
with pytest.raises(ValueError, match=err_msg):
SplineTransformer(**params).fit(X)
|
Test that we raise errors for invalid input in SplineTransformer.
|
test_spline_transformer_input_validation
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_polynomial.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_polynomial.py
|
BSD-3-Clause
|
def test_spline_transformer_integer_knots(extrapolation):
"""Test that SplineTransformer accepts integer value knot positions."""
X = np.arange(20).reshape(10, 2)
knots = [[0, 1], [1, 2], [5, 5], [11, 10], [12, 11]]
_ = SplineTransformer(
degree=3, knots=knots, extrapolation=extrapolation
).fit_transform(X)
|
Test that SplineTransformer accepts integer value knot positions.
|
test_spline_transformer_integer_knots
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_polynomial.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_polynomial.py
|
BSD-3-Clause
|
def test_spline_transformer_feature_names():
"""Test that SplineTransformer generates correct features name."""
X = np.arange(20).reshape(10, 2)
splt = SplineTransformer(n_knots=3, degree=3, include_bias=True).fit(X)
feature_names = splt.get_feature_names_out()
assert_array_equal(
feature_names,
[
"x0_sp_0",
"x0_sp_1",
"x0_sp_2",
"x0_sp_3",
"x0_sp_4",
"x1_sp_0",
"x1_sp_1",
"x1_sp_2",
"x1_sp_3",
"x1_sp_4",
],
)
splt = SplineTransformer(n_knots=3, degree=3, include_bias=False).fit(X)
feature_names = splt.get_feature_names_out(["a", "b"])
assert_array_equal(
feature_names,
[
"a_sp_0",
"a_sp_1",
"a_sp_2",
"a_sp_3",
"b_sp_0",
"b_sp_1",
"b_sp_2",
"b_sp_3",
],
)
|
Test that SplineTransformer generates correct features name.
|
test_spline_transformer_feature_names
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_polynomial.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_polynomial.py
|
BSD-3-Clause
|
def test_split_transform_feature_names_extrapolation_degree(extrapolation, degree):
"""Test feature names are correct for different extrapolations and degree.
Non-regression test for gh-25292.
"""
X = np.arange(20).reshape(10, 2)
splt = SplineTransformer(degree=degree, extrapolation=extrapolation).fit(X)
feature_names = splt.get_feature_names_out(["a", "b"])
assert len(feature_names) == splt.n_features_out_
X_trans = splt.transform(X)
assert X_trans.shape[1] == len(feature_names)
|
Test feature names are correct for different extrapolations and degree.
Non-regression test for gh-25292.
|
test_split_transform_feature_names_extrapolation_degree
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_polynomial.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_polynomial.py
|
BSD-3-Clause
|
def test_spline_transformer_unity_decomposition(degree, n_knots, knots, extrapolation):
"""Test that B-splines are indeed a decomposition of unity.
Splines basis functions must sum up to 1 per row, if we stay in between boundaries.
"""
X = np.linspace(0, 1, 100)[:, None]
# make the boundaries 0 and 1 part of X_train, for sure.
X_train = np.r_[[[0]], X[::2, :], [[1]]]
X_test = X[1::2, :]
if extrapolation == "periodic":
n_knots = n_knots + degree # periodic splines require degree < n_knots
splt = SplineTransformer(
n_knots=n_knots,
degree=degree,
knots=knots,
include_bias=True,
extrapolation=extrapolation,
)
splt.fit(X_train)
for X in [X_train, X_test]:
assert_allclose(np.sum(splt.transform(X), axis=1), 1)
|
Test that B-splines are indeed a decomposition of unity.
Splines basis functions must sum up to 1 per row, if we stay in between boundaries.
|
test_spline_transformer_unity_decomposition
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_polynomial.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_polynomial.py
|
BSD-3-Clause
|
def test_spline_transformer_linear_regression(bias, intercept):
"""Test that B-splines fit a sinusodial curve pretty well."""
X = np.linspace(0, 10, 100)[:, None]
y = np.sin(X[:, 0]) + 2 # +2 to avoid the value 0 in assert_allclose
pipe = Pipeline(
steps=[
(
"spline",
SplineTransformer(
n_knots=15,
degree=3,
include_bias=bias,
extrapolation="constant",
),
),
("ols", LinearRegression(fit_intercept=intercept)),
]
)
pipe.fit(X, y)
assert_allclose(pipe.predict(X), y, rtol=1e-3)
|
Test that B-splines fit a sinusodial curve pretty well.
|
test_spline_transformer_linear_regression
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_polynomial.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_polynomial.py
|
BSD-3-Clause
|
def test_spline_transformer_get_base_knot_positions(
knots, n_knots, sample_weight, expected_knots
):
"""Check the behaviour to find knot positions with and without sample_weight."""
X = np.array([[0, 2], [0, 2], [2, 2], [3, 3], [4, 6], [5, 8], [6, 14]])
base_knots = SplineTransformer._get_base_knot_positions(
X=X, knots=knots, n_knots=n_knots, sample_weight=sample_weight
)
assert_allclose(base_knots, expected_knots)
|
Check the behaviour to find knot positions with and without sample_weight.
|
test_spline_transformer_get_base_knot_positions
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_polynomial.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_polynomial.py
|
BSD-3-Clause
|
def test_spline_transformer_periodic_linear_regression(bias, intercept):
"""Test that B-splines fit a periodic curve pretty well."""
# "+ 3" to avoid the value 0 in assert_allclose
def f(x):
return np.sin(2 * np.pi * x) - np.sin(8 * np.pi * x) + 3
X = np.linspace(0, 1, 101)[:, None]
pipe = Pipeline(
steps=[
(
"spline",
SplineTransformer(
n_knots=20,
degree=3,
include_bias=bias,
extrapolation="periodic",
),
),
("ols", LinearRegression(fit_intercept=intercept)),
]
)
pipe.fit(X, f(X[:, 0]))
# Generate larger array to check periodic extrapolation
X_ = np.linspace(-1, 2, 301)[:, None]
predictions = pipe.predict(X_)
assert_allclose(predictions, f(X_[:, 0]), atol=0.01, rtol=0.01)
assert_allclose(predictions[0:100], predictions[100:200], rtol=1e-3)
|
Test that B-splines fit a periodic curve pretty well.
|
test_spline_transformer_periodic_linear_regression
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_polynomial.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_polynomial.py
|
BSD-3-Clause
|
def test_spline_transformer_periodic_spline_backport():
"""Test that the backport of extrapolate="periodic" works correctly"""
X = np.linspace(-2, 3.5, 10)[:, None]
degree = 2
# Use periodic extrapolation backport in SplineTransformer
transformer = SplineTransformer(
degree=degree, extrapolation="periodic", knots=[[-1.0], [0.0], [1.0]]
)
Xt = transformer.fit_transform(X)
# Use periodic extrapolation in BSpline
coef = np.array([[1.0, 0.0], [0.0, 1.0], [1.0, 0.0], [0.0, 1.0]])
spl = BSpline(np.arange(-3, 4), coef, degree, "periodic")
Xspl = spl(X[:, 0])
assert_allclose(Xt, Xspl)
|
Test that the backport of extrapolate="periodic" works correctly
|
test_spline_transformer_periodic_spline_backport
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_polynomial.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_polynomial.py
|
BSD-3-Clause
|
def test_spline_transformer_periodic_splines_periodicity():
"""Test if shifted knots result in the same transformation up to permutation."""
X = np.linspace(0, 10, 101)[:, None]
transformer_1 = SplineTransformer(
degree=3,
extrapolation="periodic",
knots=[[0.0], [1.0], [3.0], [4.0], [5.0], [8.0]],
)
transformer_2 = SplineTransformer(
degree=3,
extrapolation="periodic",
knots=[[1.0], [3.0], [4.0], [5.0], [8.0], [9.0]],
)
Xt_1 = transformer_1.fit_transform(X)
Xt_2 = transformer_2.fit_transform(X)
assert_allclose(Xt_1, Xt_2[:, [4, 0, 1, 2, 3]])
|
Test if shifted knots result in the same transformation up to permutation.
|
test_spline_transformer_periodic_splines_periodicity
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_polynomial.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_polynomial.py
|
BSD-3-Clause
|
def test_spline_transformer_periodic_splines_smoothness(degree):
"""Test that spline transformation is smooth at first / last knot."""
X = np.linspace(-2, 10, 10_000)[:, None]
transformer = SplineTransformer(
degree=degree,
extrapolation="periodic",
knots=[[0.0], [1.0], [3.0], [4.0], [5.0], [8.0]],
)
Xt = transformer.fit_transform(X)
delta = (X.max() - X.min()) / len(X)
tol = 10 * delta
dXt = Xt
# We expect splines of degree `degree` to be (`degree`-1) times
# continuously differentiable. I.e. for d = 0, ..., `degree` - 1 the d-th
# derivative should be continuous. This is the case if the (d+1)-th
# numerical derivative is reasonably small (smaller than `tol` in absolute
# value). We thus compute d-th numeric derivatives for d = 1, ..., `degree`
# and compare them to `tol`.
#
# Note that the 0-th derivative is the function itself, such that we are
# also checking its continuity.
for d in range(1, degree + 1):
# Check continuity of the (d-1)-th derivative
diff = np.diff(dXt, axis=0)
assert np.abs(diff).max() < tol
# Compute d-th numeric derivative
dXt = diff / delta
# As degree `degree` splines are not `degree` times continuously
# differentiable at the knots, the `degree + 1`-th numeric derivative
# should have spikes at the knots.
diff = np.diff(dXt, axis=0)
assert np.abs(diff).max() > 1
|
Test that spline transformation is smooth at first / last knot.
|
test_spline_transformer_periodic_splines_smoothness
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_polynomial.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_polynomial.py
|
BSD-3-Clause
|
def test_spline_transformer_extrapolation(bias, intercept, degree):
"""Test that B-spline extrapolation works correctly."""
# we use a straight line for that
X = np.linspace(-1, 1, 100)[:, None]
y = X.squeeze()
# 'constant'
pipe = Pipeline(
[
[
"spline",
SplineTransformer(
n_knots=4,
degree=degree,
include_bias=bias,
extrapolation="constant",
),
],
["ols", LinearRegression(fit_intercept=intercept)],
]
)
pipe.fit(X, y)
assert_allclose(pipe.predict([[-10], [5]]), [-1, 1])
# 'linear'
pipe = Pipeline(
[
[
"spline",
SplineTransformer(
n_knots=4,
degree=degree,
include_bias=bias,
extrapolation="linear",
),
],
["ols", LinearRegression(fit_intercept=intercept)],
]
)
pipe.fit(X, y)
assert_allclose(pipe.predict([[-10], [5]]), [-10, 5])
# 'error'
splt = SplineTransformer(
n_knots=4, degree=degree, include_bias=bias, extrapolation="error"
)
splt.fit(X)
msg = "X contains values beyond the limits of the knots"
with pytest.raises(ValueError, match=msg):
splt.transform([[-10]])
with pytest.raises(ValueError, match=msg):
splt.transform([[5]])
|
Test that B-spline extrapolation works correctly.
|
test_spline_transformer_extrapolation
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_polynomial.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_polynomial.py
|
BSD-3-Clause
|
def test_spline_transformer_kbindiscretizer(global_random_seed):
"""Test that a B-spline of degree=0 is equivalent to KBinsDiscretizer."""
rng = np.random.RandomState(global_random_seed)
X = rng.randn(200).reshape(200, 1)
n_bins = 5
n_knots = n_bins + 1
splt = SplineTransformer(
n_knots=n_knots, degree=0, knots="quantile", include_bias=True
)
splines = splt.fit_transform(X)
kbd = KBinsDiscretizer(
n_bins=n_bins,
encode="onehot-dense",
strategy="quantile",
quantile_method="averaged_inverted_cdf",
)
kbins = kbd.fit_transform(X)
# Though they should be exactly equal, we test approximately with high
# accuracy.
assert_allclose(splines, kbins, rtol=1e-13)
|
Test that a B-spline of degree=0 is equivalent to KBinsDiscretizer.
|
test_spline_transformer_kbindiscretizer
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_polynomial.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_polynomial.py
|
BSD-3-Clause
|
def test_spline_transformer_n_features_out(
n_knots, include_bias, degree, extrapolation, sparse_output
):
"""Test that transform results in n_features_out_ features."""
splt = SplineTransformer(
n_knots=n_knots,
degree=degree,
include_bias=include_bias,
extrapolation=extrapolation,
sparse_output=sparse_output,
)
X = np.linspace(0, 1, 10)[:, None]
splt.fit(X)
assert splt.transform(X).shape[1] == splt.n_features_out_
|
Test that transform results in n_features_out_ features.
|
test_spline_transformer_n_features_out
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_polynomial.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_polynomial.py
|
BSD-3-Clause
|
def test_polynomial_features_input_validation(params, err_msg):
"""Test that we raise errors for invalid input in PolynomialFeatures."""
X = [[1], [2]]
with pytest.raises(ValueError, match=err_msg):
PolynomialFeatures(**params).fit(X)
|
Test that we raise errors for invalid input in PolynomialFeatures.
|
test_polynomial_features_input_validation
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_polynomial.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_polynomial.py
|
BSD-3-Clause
|
def test_polynomial_features_one_feature(
single_feature_degree3,
degree,
include_bias,
interaction_only,
indices,
X_container,
):
"""Test PolynomialFeatures on single feature up to degree 3."""
X, P = single_feature_degree3
if X_container is not None:
X = X_container(X)
tf = PolynomialFeatures(
degree=degree, include_bias=include_bias, interaction_only=interaction_only
).fit(X)
out = tf.transform(X)
if X_container is not None:
out = out.toarray()
assert_allclose(out, P[:, indices])
if tf.n_output_features_ > 0:
assert tf.powers_.shape == (tf.n_output_features_, tf.n_features_in_)
|
Test PolynomialFeatures on single feature up to degree 3.
|
test_polynomial_features_one_feature
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_polynomial.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_polynomial.py
|
BSD-3-Clause
|
def test_polynomial_features_two_features(
two_features_degree3,
degree,
include_bias,
interaction_only,
indices,
X_container,
):
"""Test PolynomialFeatures on 2 features up to degree 3."""
X, P = two_features_degree3
if X_container is not None:
X = X_container(X)
tf = PolynomialFeatures(
degree=degree, include_bias=include_bias, interaction_only=interaction_only
).fit(X)
out = tf.transform(X)
if X_container is not None:
out = out.toarray()
assert_allclose(out, P[:, indices])
if tf.n_output_features_ > 0:
assert tf.powers_.shape == (tf.n_output_features_, tf.n_features_in_)
|
Test PolynomialFeatures on 2 features up to degree 3.
|
test_polynomial_features_two_features
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_polynomial.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_polynomial.py
|
BSD-3-Clause
|
def test_csr_polynomial_expansion_index_overflow_non_regression(
interaction_only, include_bias, csr_container
):
"""Check the automatic index dtype promotion to `np.int64` when needed.
This ensures that sufficiently large input configurations get
properly promoted to use `np.int64` for index and indptr representation
while preserving data integrity. Non-regression test for gh-16803.
Note that this is only possible for Python runtimes with a 64 bit address
space. On 32 bit platforms, a `ValueError` is raised instead.
"""
def degree_2_calc(d, i, j):
if interaction_only:
return d * i - (i**2 + 3 * i) // 2 - 1 + j
else:
return d * i - (i**2 + i) // 2 + j
n_samples = 13
n_features = 120001
data_dtype = np.float32
data = np.arange(1, 5, dtype=np.int64)
row = np.array([n_samples - 2, n_samples - 2, n_samples - 1, n_samples - 1])
# An int64 dtype is required to avoid overflow error on Windows within the
# `degree_2_calc` function.
col = np.array(
[n_features - 2, n_features - 1, n_features - 2, n_features - 1], dtype=np.int64
)
X = csr_container(
(data, (row, col)),
shape=(n_samples, n_features),
dtype=data_dtype,
)
pf = PolynomialFeatures(
interaction_only=interaction_only, include_bias=include_bias, degree=2
)
# Calculate the number of combinations a-priori, and if needed check for
# the correct ValueError and terminate the test early.
num_combinations = pf._num_combinations(
n_features=n_features,
min_degree=0,
max_degree=2,
interaction_only=pf.interaction_only,
include_bias=pf.include_bias,
)
if num_combinations > np.iinfo(np.intp).max:
msg = (
r"The output that would result from the current configuration would have"
r" \d* features which is too large to be indexed"
)
with pytest.raises(ValueError, match=msg):
pf.fit(X)
return
X_trans = pf.fit_transform(X)
row_nonzero, col_nonzero = X_trans.nonzero()
n_degree_1_features_out = n_features + include_bias
max_degree_2_idx = (
degree_2_calc(n_features, col[int(not interaction_only)], col[1])
+ n_degree_1_features_out
)
# Account for bias of all samples except last one which will be handled
# separately since there are distinct data values before it
data_target = [1] * (n_samples - 2) if include_bias else []
col_nonzero_target = [0] * (n_samples - 2) if include_bias else []
for i in range(2):
x = data[2 * i]
y = data[2 * i + 1]
x_idx = col[2 * i]
y_idx = col[2 * i + 1]
if include_bias:
data_target.append(1)
col_nonzero_target.append(0)
data_target.extend([x, y])
col_nonzero_target.extend(
[x_idx + int(include_bias), y_idx + int(include_bias)]
)
if not interaction_only:
data_target.extend([x * x, x * y, y * y])
col_nonzero_target.extend(
[
degree_2_calc(n_features, x_idx, x_idx) + n_degree_1_features_out,
degree_2_calc(n_features, x_idx, y_idx) + n_degree_1_features_out,
degree_2_calc(n_features, y_idx, y_idx) + n_degree_1_features_out,
]
)
else:
data_target.extend([x * y])
col_nonzero_target.append(
degree_2_calc(n_features, x_idx, y_idx) + n_degree_1_features_out
)
nnz_per_row = int(include_bias) + 3 + 2 * int(not interaction_only)
assert pf.n_output_features_ == max_degree_2_idx + 1
assert X_trans.dtype == data_dtype
assert X_trans.shape == (n_samples, max_degree_2_idx + 1)
assert X_trans.indptr.dtype == X_trans.indices.dtype == np.int64
# Ensure that dtype promotion was actually required:
assert X_trans.indices.max() > np.iinfo(np.int32).max
row_nonzero_target = list(range(n_samples - 2)) if include_bias else []
row_nonzero_target.extend(
[n_samples - 2] * nnz_per_row + [n_samples - 1] * nnz_per_row
)
assert_allclose(X_trans.data, data_target)
assert_array_equal(row_nonzero, row_nonzero_target)
assert_array_equal(col_nonzero, col_nonzero_target)
|
Check the automatic index dtype promotion to `np.int64` when needed.
This ensures that sufficiently large input configurations get
properly promoted to use `np.int64` for index and indptr representation
while preserving data integrity. Non-regression test for gh-16803.
Note that this is only possible for Python runtimes with a 64 bit address
space. On 32 bit platforms, a `ValueError` is raised instead.
|
test_csr_polynomial_expansion_index_overflow_non_regression
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_polynomial.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_polynomial.py
|
BSD-3-Clause
|
def test_csr_polynomial_expansion_index_overflow(
degree, n_features, interaction_only, include_bias, csr_container
):
"""Tests known edge-cases to the dtype promotion strategy and custom
Cython code, including a current bug in the upstream
`scipy.sparse.hstack`.
"""
data = [1.0]
# Use int32 indices as much as we can
indices_dtype = np.int32 if n_features - 1 <= np.iinfo(np.int32).max else np.int64
row = np.array([0], dtype=indices_dtype)
col = np.array([n_features - 1], dtype=indices_dtype)
# First degree index
expected_indices = [
n_features - 1 + int(include_bias),
]
# Second degree index
expected_indices.append(n_features * (n_features + 1) // 2 + expected_indices[0])
# Third degree index
expected_indices.append(
n_features * (n_features + 1) * (n_features + 2) // 6 + expected_indices[1]
)
X = csr_container((data, (row, col)))
pf = PolynomialFeatures(
interaction_only=interaction_only, include_bias=include_bias, degree=degree
)
# Calculate the number of combinations a-priori, and if needed check for
# the correct ValueError and terminate the test early.
num_combinations = pf._num_combinations(
n_features=n_features,
min_degree=0,
max_degree=degree,
interaction_only=pf.interaction_only,
include_bias=pf.include_bias,
)
if num_combinations > np.iinfo(np.intp).max:
msg = (
r"The output that would result from the current configuration would have"
r" \d* features which is too large to be indexed"
)
with pytest.raises(ValueError, match=msg):
pf.fit(X)
return
# When `n_features>=65535`, `scipy.sparse.hstack` may not use the right
# dtype for representing indices and indptr if `n_features` is still
# small enough so that each block matrix's indices and indptr arrays
# can be represented with `np.int32`. We test `n_features==65535`
# since it is guaranteed to run into this bug.
if (
sp_version < parse_version("1.9.2")
and n_features == 65535
and degree == 2
and not interaction_only
): # pragma: no cover
msg = r"In scipy versions `<1.9.2`, the function `scipy.sparse.hstack`"
with pytest.raises(ValueError, match=msg):
X_trans = pf.fit_transform(X)
return
X_trans = pf.fit_transform(X)
expected_dtype = np.int64 if num_combinations > np.iinfo(np.int32).max else np.int32
# Terms higher than first degree
non_bias_terms = 1 + (degree - 1) * int(not interaction_only)
expected_nnz = int(include_bias) + non_bias_terms
assert X_trans.dtype == X.dtype
assert X_trans.shape == (1, pf.n_output_features_)
assert X_trans.indptr.dtype == X_trans.indices.dtype == expected_dtype
assert X_trans.nnz == expected_nnz
if include_bias:
assert X_trans[0, 0] == pytest.approx(1.0)
for idx in range(non_bias_terms):
assert X_trans[0, expected_indices[idx]] == pytest.approx(1.0)
offset = interaction_only * n_features
if degree == 3:
offset *= 1 + n_features
assert pf.n_output_features_ == expected_indices[degree - 1] + 1 - offset
|
Tests known edge-cases to the dtype promotion strategy and custom
Cython code, including a current bug in the upstream
`scipy.sparse.hstack`.
|
test_csr_polynomial_expansion_index_overflow
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_polynomial.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_polynomial.py
|
BSD-3-Clause
|
def test_polynomial_features_behaviour_on_zero_degree(sparse_container):
"""Check that PolynomialFeatures raises error when degree=0 and include_bias=False,
and output a single constant column when include_bias=True
"""
X = np.ones((10, 2))
poly = PolynomialFeatures(degree=0, include_bias=False)
err_msg = (
"Setting degree to zero and include_bias to False would result in"
" an empty output array."
)
with pytest.raises(ValueError, match=err_msg):
poly.fit_transform(X)
poly = PolynomialFeatures(degree=(0, 0), include_bias=False)
err_msg = (
"Setting both min_degree and max_degree to zero and include_bias to"
" False would result in an empty output array."
)
with pytest.raises(ValueError, match=err_msg):
poly.fit_transform(X)
for _X in [X, sparse_container(X)]:
poly = PolynomialFeatures(degree=0, include_bias=True)
output = poly.fit_transform(_X)
# convert to dense array if needed
if sparse.issparse(output):
output = output.toarray()
assert_array_equal(output, np.ones((X.shape[0], 1)))
|
Check that PolynomialFeatures raises error when degree=0 and include_bias=False,
and output a single constant column when include_bias=True
|
test_polynomial_features_behaviour_on_zero_degree
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_polynomial.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_polynomial.py
|
BSD-3-Clause
|
def _encode_target(X_ordinal, y_numeric, n_categories, smooth):
"""Simple Python implementation of target encoding."""
cur_encodings = np.zeros(n_categories, dtype=np.float64)
y_mean = np.mean(y_numeric)
if smooth == "auto":
y_variance = np.var(y_numeric)
for c in range(n_categories):
y_subset = y_numeric[X_ordinal == c]
n_i = y_subset.shape[0]
if n_i == 0:
cur_encodings[c] = y_mean
continue
y_subset_variance = np.var(y_subset)
m = y_subset_variance / y_variance
lambda_ = n_i / (n_i + m)
cur_encodings[c] = lambda_ * np.mean(y_subset) + (1 - lambda_) * y_mean
return cur_encodings
else: # float
for c in range(n_categories):
y_subset = y_numeric[X_ordinal == c]
current_sum = np.sum(y_subset) + y_mean * smooth
current_cnt = y_subset.shape[0] + smooth
cur_encodings[c] = current_sum / current_cnt
return cur_encodings
|
Simple Python implementation of target encoding.
|
_encode_target
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_target_encoder.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_target_encoder.py
|
BSD-3-Clause
|
def test_encoding(categories, unknown_value, global_random_seed, smooth, target_type):
"""Check encoding for binary and continuous targets.
Compare the values returned by `TargetEncoder.fit_transform` against the
expected encodings for cv splits from a naive reference Python
implementation in _encode_target.
"""
n_categories = 3
X_train_int_array = np.array([[0] * 20 + [1] * 30 + [2] * 40], dtype=np.int64).T
X_test_int_array = np.array([[0, 1, 2]], dtype=np.int64).T
n_samples = X_train_int_array.shape[0]
if categories == "auto":
X_train = X_train_int_array
X_test = X_test_int_array
else:
X_train = categories[0][X_train_int_array]
X_test = categories[0][X_test_int_array]
X_test = np.concatenate((X_test, [[unknown_value]]))
data_rng = np.random.RandomState(global_random_seed)
n_splits = 3
if target_type == "binary":
y_numeric = data_rng.randint(low=0, high=2, size=n_samples)
target_names = np.array(["cat", "dog"], dtype=object)
y_train = target_names[y_numeric]
else:
assert target_type == "continuous"
y_numeric = data_rng.uniform(low=-10, high=20, size=n_samples)
y_train = y_numeric
shuffled_idx = data_rng.permutation(n_samples)
X_train_int_array = X_train_int_array[shuffled_idx]
X_train = X_train[shuffled_idx]
y_train = y_train[shuffled_idx]
y_numeric = y_numeric[shuffled_idx]
# Define our CV splitting strategy
if target_type == "binary":
cv = StratifiedKFold(
n_splits=n_splits, random_state=global_random_seed, shuffle=True
)
else:
cv = KFold(n_splits=n_splits, random_state=global_random_seed, shuffle=True)
# Compute the expected values using our reference Python implementation of
# target encoding:
expected_X_fit_transform = np.empty_like(X_train_int_array, dtype=np.float64)
for train_idx, test_idx in cv.split(X_train_int_array, y_train):
X_, y_ = X_train_int_array[train_idx, 0], y_numeric[train_idx]
cur_encodings = _encode_target(X_, y_, n_categories, smooth)
expected_X_fit_transform[test_idx, 0] = cur_encodings[
X_train_int_array[test_idx, 0]
]
# Check that we can obtain the same encodings by calling `fit_transform` on
# the estimator with the same CV parameters:
target_encoder = TargetEncoder(
smooth=smooth,
categories=categories,
cv=n_splits,
random_state=global_random_seed,
)
X_fit_transform = target_encoder.fit_transform(X_train, y_train)
assert target_encoder.target_type_ == target_type
assert_allclose(X_fit_transform, expected_X_fit_transform)
assert len(target_encoder.encodings_) == 1
if target_type == "binary":
assert_array_equal(target_encoder.classes_, target_names)
else:
assert target_encoder.classes_ is None
# compute encodings for all data to validate `transform`
y_mean = np.mean(y_numeric)
expected_encodings = _encode_target(
X_train_int_array[:, 0], y_numeric, n_categories, smooth
)
assert_allclose(target_encoder.encodings_[0], expected_encodings)
assert target_encoder.target_mean_ == pytest.approx(y_mean)
# Transform on test data, the last value is unknown so it is encoded as the target
# mean
expected_X_test_transform = np.concatenate(
(expected_encodings, np.array([y_mean]))
).reshape(-1, 1)
X_test_transform = target_encoder.transform(X_test)
assert_allclose(X_test_transform, expected_X_test_transform)
|
Check encoding for binary and continuous targets.
Compare the values returned by `TargetEncoder.fit_transform` against the
expected encodings for cv splits from a naive reference Python
implementation in _encode_target.
|
test_encoding
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_target_encoder.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_target_encoder.py
|
BSD-3-Clause
|
def test_custom_categories(X, categories, smooth):
"""Custom categories with unknown categories that are not in training data."""
rng = np.random.RandomState(0)
y = rng.uniform(low=-10, high=20, size=X.shape[0])
enc = TargetEncoder(categories=categories, smooth=smooth, random_state=0).fit(X, y)
# The last element is unknown and encoded as the mean
y_mean = y.mean()
X_trans = enc.transform(X[-1:])
assert X_trans[0, 0] == pytest.approx(y_mean)
assert len(enc.encodings_) == 1
# custom category that is not in training data
assert enc.encodings_[0][-1] == pytest.approx(y_mean)
|
Custom categories with unknown categories that are not in training data.
|
test_custom_categories
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_target_encoder.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_target_encoder.py
|
BSD-3-Clause
|
def test_use_regression_target():
"""Check inferred and specified `target_type` on regression target."""
X = np.array([[0, 1, 0, 1, 0, 1]]).T
y = np.array([1.0, 2.0, 3.0, 2.0, 3.0, 4.0])
enc = TargetEncoder(cv=2)
with pytest.warns(
UserWarning,
match=re.escape(
"The least populated class in y has only 1 members, which is less than"
" n_splits=2."
),
):
enc.fit_transform(X, y)
assert enc.target_type_ == "multiclass"
enc = TargetEncoder(cv=2, target_type="continuous")
enc.fit_transform(X, y)
assert enc.target_type_ == "continuous"
|
Check inferred and specified `target_type` on regression target.
|
test_use_regression_target
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_target_encoder.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_target_encoder.py
|
BSD-3-Clause
|
def test_multiple_features_quick(to_pandas, smooth, target_type):
"""Check target encoder with multiple features."""
X_ordinal = np.array(
[[1, 1], [0, 1], [1, 1], [2, 1], [1, 0], [0, 1], [1, 0], [0, 0]], dtype=np.int64
)
if target_type == "binary-str":
y_train = np.array(["a", "b", "a", "a", "b", "b", "a", "b"])
y_integer = LabelEncoder().fit_transform(y_train)
cv = StratifiedKFold(2, random_state=0, shuffle=True)
elif target_type == "binary-ints":
y_train = np.array([3, 4, 3, 3, 3, 4, 4, 4])
y_integer = LabelEncoder().fit_transform(y_train)
cv = StratifiedKFold(2, random_state=0, shuffle=True)
else:
y_train = np.array([3.0, 5.1, 2.4, 3.5, 4.1, 5.5, 10.3, 7.3], dtype=np.float32)
y_integer = y_train
cv = KFold(2, random_state=0, shuffle=True)
y_mean = np.mean(y_integer)
categories = [[0, 1, 2], [0, 1]]
X_test = np.array(
[
[0, 1],
[3, 0], # 3 is unknown
[1, 10], # 10 is unknown
],
dtype=np.int64,
)
if to_pandas:
pd = pytest.importorskip("pandas")
# convert second feature to an object
X_train = pd.DataFrame(
{
"feat0": X_ordinal[:, 0],
"feat1": np.array(["cat", "dog"], dtype=object)[X_ordinal[:, 1]],
}
)
# "snake" is unknown
X_test = pd.DataFrame({"feat0": X_test[:, 0], "feat1": ["dog", "cat", "snake"]})
else:
X_train = X_ordinal
# manually compute encoding for fit_transform
expected_X_fit_transform = np.empty_like(X_ordinal, dtype=np.float64)
for f_idx, cats in enumerate(categories):
for train_idx, test_idx in cv.split(X_ordinal, y_integer):
X_, y_ = X_ordinal[train_idx, f_idx], y_integer[train_idx]
current_encoding = _encode_target(X_, y_, len(cats), smooth)
expected_X_fit_transform[test_idx, f_idx] = current_encoding[
X_ordinal[test_idx, f_idx]
]
# manually compute encoding for transform
expected_encodings = []
for f_idx, cats in enumerate(categories):
current_encoding = _encode_target(
X_ordinal[:, f_idx], y_integer, len(cats), smooth
)
expected_encodings.append(current_encoding)
expected_X_test_transform = np.array(
[
[expected_encodings[0][0], expected_encodings[1][1]],
[y_mean, expected_encodings[1][0]],
[expected_encodings[0][1], y_mean],
],
dtype=np.float64,
)
enc = TargetEncoder(smooth=smooth, cv=2, random_state=0)
X_fit_transform = enc.fit_transform(X_train, y_train)
assert_allclose(X_fit_transform, expected_X_fit_transform)
assert len(enc.encodings_) == 2
for i in range(2):
assert_allclose(enc.encodings_[i], expected_encodings[i])
X_test_transform = enc.transform(X_test)
assert_allclose(X_test_transform, expected_X_test_transform)
|
Check target encoder with multiple features.
|
test_multiple_features_quick
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_target_encoder.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_target_encoder.py
|
BSD-3-Clause
|
def test_constant_target_and_feature(y, y_mean, smooth):
"""Check edge case where feature and target is constant."""
X = np.array([[1] * 20]).T
n_samples = X.shape[0]
enc = TargetEncoder(cv=2, smooth=smooth, random_state=0)
X_trans = enc.fit_transform(X, y)
assert_allclose(X_trans, np.repeat([[y_mean]], n_samples, axis=0))
assert enc.encodings_[0][0] == pytest.approx(y_mean)
assert enc.target_mean_ == pytest.approx(y_mean)
X_test = np.array([[1], [0]])
X_test_trans = enc.transform(X_test)
assert_allclose(X_test_trans, np.repeat([[y_mean]], 2, axis=0))
|
Check edge case where feature and target is constant.
|
test_constant_target_and_feature
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_target_encoder.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_target_encoder.py
|
BSD-3-Clause
|
def test_smooth_zero():
"""Check edge case with zero smoothing and cv does not contain category."""
X = np.array([[0, 0, 0, 0, 0, 1, 1, 1, 1, 1]]).T
y = np.array([2.1, 4.3, 1.2, 3.1, 1.0, 9.0, 10.3, 14.2, 13.3, 15.0])
enc = TargetEncoder(smooth=0.0, shuffle=False, cv=2)
X_trans = enc.fit_transform(X, y)
# With cv = 2, category 0 does not exist in the second half, thus
# it will be encoded as the mean of the second half
assert_allclose(X_trans[0], np.mean(y[5:]))
# category 1 does not exist in the first half, thus it will be encoded as
# the mean of the first half
assert_allclose(X_trans[-1], np.mean(y[:5]))
|
Check edge case with zero smoothing and cv does not contain category.
|
test_smooth_zero
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_target_encoder.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_target_encoder.py
|
BSD-3-Clause
|
def test_pandas_copy_on_write():
"""
Test target-encoder cython code when y is read-only.
The numpy array underlying df["y"] is read-only when copy-on-write is enabled.
Non-regression test for gh-27879.
"""
pd = pytest.importorskip("pandas", minversion="2.0")
with pd.option_context("mode.copy_on_write", True):
df = pd.DataFrame({"x": ["a", "b", "b"], "y": [4.0, 5.0, 6.0]})
TargetEncoder(target_type="continuous").fit(df[["x"]], df["y"])
|
Test target-encoder cython code when y is read-only.
The numpy array underlying df["y"] is read-only when copy-on-write is enabled.
Non-regression test for gh-27879.
|
test_pandas_copy_on_write
|
python
|
scikit-learn/scikit-learn
|
sklearn/preprocessing/tests/test_target_encoder.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_target_encoder.py
|
BSD-3-Clause
|
def predict(self, X):
"""Perform inductive inference across the model.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data matrix.
Returns
-------
y : ndarray of shape (n_samples,)
Predictions for input data.
"""
# Note: since `predict` does not accept semi-supervised labels as input,
# `fit(X, y).predict(X) != fit(X, y).transduction_`.
# Hence, `fit_predict` is not implemented.
# See https://github.com/scikit-learn/scikit-learn/pull/24898
probas = self.predict_proba(X)
return self.classes_[np.argmax(probas, axis=1)].ravel()
|
Perform inductive inference across the model.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data matrix.
Returns
-------
y : ndarray of shape (n_samples,)
Predictions for input data.
|
predict
|
python
|
scikit-learn/scikit-learn
|
sklearn/semi_supervised/_label_propagation.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/semi_supervised/_label_propagation.py
|
BSD-3-Clause
|
def predict_proba(self, X):
"""Predict probability for each possible outcome.
Compute the probability estimates for each single sample in X
and each possible outcome seen during training (categorical
distribution).
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data matrix.
Returns
-------
probabilities : ndarray of shape (n_samples, n_classes)
Normalized probability distributions across
class labels.
"""
check_is_fitted(self)
X_2d = validate_data(
self,
X,
accept_sparse=["csc", "csr", "coo", "dok", "bsr", "lil", "dia"],
reset=False,
)
weight_matrices = self._get_kernel(self.X_, X_2d)
if self.kernel == "knn":
probabilities = np.array(
[
np.sum(self.label_distributions_[weight_matrix], axis=0)
for weight_matrix in weight_matrices
]
)
else:
weight_matrices = weight_matrices.T
probabilities = safe_sparse_dot(weight_matrices, self.label_distributions_)
normalizer = np.atleast_2d(np.sum(probabilities, axis=1)).T
probabilities /= normalizer
return probabilities
|
Predict probability for each possible outcome.
Compute the probability estimates for each single sample in X
and each possible outcome seen during training (categorical
distribution).
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data matrix.
Returns
-------
probabilities : ndarray of shape (n_samples, n_classes)
Normalized probability distributions across
class labels.
|
predict_proba
|
python
|
scikit-learn/scikit-learn
|
sklearn/semi_supervised/_label_propagation.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/semi_supervised/_label_propagation.py
|
BSD-3-Clause
|
def fit(self, X, y):
"""Fit a semi-supervised label propagation model to X.
The input samples (labeled and unlabeled) are provided by matrix X,
and target labels are provided by matrix y. We conventionally apply the
label -1 to unlabeled samples in matrix y in a semi-supervised
classification.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : array-like of shape (n_samples,)
Target class values with unlabeled points marked as -1.
All unlabeled samples will be transductively assigned labels
internally, which are stored in `transduction_`.
Returns
-------
self : object
Returns the instance itself.
"""
X, y = validate_data(
self,
X,
y,
accept_sparse=["csr", "csc"],
reset=True,
)
self.X_ = X
check_classification_targets(y)
# actual graph construction (implementations should override this)
graph_matrix = self._build_graph()
# label construction
# construct a categorical distribution for classification only
classes = np.unique(y)
classes = classes[classes != -1]
self.classes_ = classes
n_samples, n_classes = len(y), len(classes)
y = np.asarray(y)
unlabeled = y == -1
# initialize distributions
self.label_distributions_ = np.zeros((n_samples, n_classes))
for label in classes:
self.label_distributions_[y == label, classes == label] = 1
y_static = np.copy(self.label_distributions_)
if self._variant == "propagation":
# LabelPropagation
y_static[unlabeled] = 0
else:
# LabelSpreading
y_static *= 1 - self.alpha
l_previous = np.zeros((self.X_.shape[0], n_classes))
unlabeled = unlabeled[:, np.newaxis]
if sparse.issparse(graph_matrix):
graph_matrix = graph_matrix.tocsr()
for self.n_iter_ in range(self.max_iter):
if np.abs(self.label_distributions_ - l_previous).sum() < self.tol:
break
l_previous = self.label_distributions_
self.label_distributions_ = safe_sparse_dot(
graph_matrix, self.label_distributions_
)
if self._variant == "propagation":
normalizer = np.sum(self.label_distributions_, axis=1)[:, np.newaxis]
normalizer[normalizer == 0] = 1
self.label_distributions_ /= normalizer
self.label_distributions_ = np.where(
unlabeled, self.label_distributions_, y_static
)
else:
# clamp
self.label_distributions_ = (
np.multiply(self.alpha, self.label_distributions_) + y_static
)
else:
warnings.warn(
"max_iter=%d was reached without convergence." % self.max_iter,
category=ConvergenceWarning,
)
self.n_iter_ += 1
normalizer = np.sum(self.label_distributions_, axis=1)[:, np.newaxis]
normalizer[normalizer == 0] = 1
self.label_distributions_ /= normalizer
# set the transduction item
transduction = self.classes_[np.argmax(self.label_distributions_, axis=1)]
self.transduction_ = transduction.ravel()
return self
|
Fit a semi-supervised label propagation model to X.
The input samples (labeled and unlabeled) are provided by matrix X,
and target labels are provided by matrix y. We conventionally apply the
label -1 to unlabeled samples in matrix y in a semi-supervised
classification.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : array-like of shape (n_samples,)
Target class values with unlabeled points marked as -1.
All unlabeled samples will be transductively assigned labels
internally, which are stored in `transduction_`.
Returns
-------
self : object
Returns the instance itself.
|
fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/semi_supervised/_label_propagation.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/semi_supervised/_label_propagation.py
|
BSD-3-Clause
|
def _build_graph(self):
"""Matrix representing a fully connected graph between each sample
This basic implementation creates a non-stochastic affinity matrix, so
class distributions will exceed 1 (normalization may be desired).
"""
if self.kernel == "knn":
self.nn_fit = None
affinity_matrix = self._get_kernel(self.X_)
normalizer = affinity_matrix.sum(axis=0)
if sparse.issparse(affinity_matrix):
affinity_matrix.data /= np.diag(np.array(normalizer))
else:
affinity_matrix /= normalizer[:, np.newaxis]
return affinity_matrix
|
Matrix representing a fully connected graph between each sample
This basic implementation creates a non-stochastic affinity matrix, so
class distributions will exceed 1 (normalization may be desired).
|
_build_graph
|
python
|
scikit-learn/scikit-learn
|
sklearn/semi_supervised/_label_propagation.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/semi_supervised/_label_propagation.py
|
BSD-3-Clause
|
def _build_graph(self):
"""Graph matrix for Label Spreading computes the graph laplacian"""
# compute affinity matrix (or gram matrix)
if self.kernel == "knn":
self.nn_fit = None
n_samples = self.X_.shape[0]
affinity_matrix = self._get_kernel(self.X_)
laplacian = csgraph_laplacian(affinity_matrix, normed=True)
laplacian = -laplacian
if sparse.issparse(laplacian):
diag_mask = laplacian.row == laplacian.col
laplacian.data[diag_mask] = 0.0
else:
laplacian.flat[:: n_samples + 1] = 0.0 # set diag to 0.0
return laplacian
|
Graph matrix for Label Spreading computes the graph laplacian
|
_build_graph
|
python
|
scikit-learn/scikit-learn
|
sklearn/semi_supervised/_label_propagation.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/semi_supervised/_label_propagation.py
|
BSD-3-Clause
|
def _get_estimator(self):
"""Get the estimator.
Returns
-------
estimator_ : estimator object
The cloned estimator object.
"""
# TODO(1.8): remove and only keep clone(self.estimator)
if self.estimator is None and self.base_estimator != "deprecated":
estimator_ = clone(self.base_estimator)
warn(
(
"`base_estimator` has been deprecated in 1.6 and will be removed"
" in 1.8. Please use `estimator` instead."
),
FutureWarning,
)
# TODO(1.8) remove
elif self.estimator is None and self.base_estimator == "deprecated":
raise ValueError(
"You must pass an estimator to SelfTrainingClassifier. Use `estimator`."
)
elif self.estimator is not None and self.base_estimator != "deprecated":
raise ValueError(
"You must pass only one estimator to SelfTrainingClassifier."
" Use `estimator`."
)
else:
estimator_ = clone(self.estimator)
return estimator_
|
Get the estimator.
Returns
-------
estimator_ : estimator object
The cloned estimator object.
|
_get_estimator
|
python
|
scikit-learn/scikit-learn
|
sklearn/semi_supervised/_self_training.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/semi_supervised/_self_training.py
|
BSD-3-Clause
|
def fit(self, X, y, **params):
"""
Fit self-training classifier using `X`, `y` as training data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Array representing the data.
y : {array-like, sparse matrix} of shape (n_samples,)
Array representing the labels. Unlabeled samples should have the
label -1.
**params : dict
Parameters to pass to the underlying estimators.
.. versionadded:: 1.6
Only available if `enable_metadata_routing=True`,
which can be set by using
``sklearn.set_config(enable_metadata_routing=True)``.
See :ref:`Metadata Routing User Guide <metadata_routing>` for
more details.
Returns
-------
self : object
Fitted estimator.
"""
_raise_for_params(params, self, "fit")
self.estimator_ = self._get_estimator()
# we need row slicing support for sparse matrices, but costly finiteness check
# can be delegated to the base estimator.
X, y = validate_data(
self,
X,
y,
accept_sparse=["csr", "csc", "lil", "dok"],
ensure_all_finite=False,
)
if y.dtype.kind in ["U", "S"]:
raise ValueError(
"y has dtype string. If you wish to predict on "
"string targets, use dtype object, and use -1"
" as the label for unlabeled samples."
)
has_label = y != -1
if np.all(has_label):
warnings.warn("y contains no unlabeled samples", UserWarning)
if self.criterion == "k_best" and (
self.k_best > X.shape[0] - np.sum(has_label)
):
warnings.warn(
(
"k_best is larger than the amount of unlabeled "
"samples. All unlabeled samples will be labeled in "
"the first iteration"
),
UserWarning,
)
if _routing_enabled():
routed_params = process_routing(self, "fit", **params)
else:
routed_params = Bunch(estimator=Bunch(fit={}))
self.transduction_ = np.copy(y)
self.labeled_iter_ = np.full_like(y, -1)
self.labeled_iter_[has_label] = 0
self.n_iter_ = 0
while not np.all(has_label) and (
self.max_iter is None or self.n_iter_ < self.max_iter
):
self.n_iter_ += 1
self.estimator_.fit(
X[safe_mask(X, has_label)],
self.transduction_[has_label],
**routed_params.estimator.fit,
)
# Predict on the unlabeled samples
prob = self.estimator_.predict_proba(X[safe_mask(X, ~has_label)])
pred = self.estimator_.classes_[np.argmax(prob, axis=1)]
max_proba = np.max(prob, axis=1)
# Select new labeled samples
if self.criterion == "threshold":
selected = max_proba > self.threshold
else:
n_to_select = min(self.k_best, max_proba.shape[0])
if n_to_select == max_proba.shape[0]:
selected = np.ones_like(max_proba, dtype=bool)
else:
# NB these are indices, not a mask
selected = np.argpartition(-max_proba, n_to_select)[:n_to_select]
# Map selected indices into original array
selected_full = np.nonzero(~has_label)[0][selected]
# Add newly labeled confident predictions to the dataset
self.transduction_[selected_full] = pred[selected]
has_label[selected_full] = True
self.labeled_iter_[selected_full] = self.n_iter_
if selected_full.shape[0] == 0:
# no changed labels
self.termination_condition_ = "no_change"
break
if self.verbose:
print(
f"End of iteration {self.n_iter_},"
f" added {selected_full.shape[0]} new labels."
)
if self.n_iter_ == self.max_iter:
self.termination_condition_ = "max_iter"
if np.all(has_label):
self.termination_condition_ = "all_labeled"
self.estimator_.fit(
X[safe_mask(X, has_label)],
self.transduction_[has_label],
**routed_params.estimator.fit,
)
self.classes_ = self.estimator_.classes_
return self
|
Fit self-training classifier using `X`, `y` as training data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Array representing the data.
y : {array-like, sparse matrix} of shape (n_samples,)
Array representing the labels. Unlabeled samples should have the
label -1.
**params : dict
Parameters to pass to the underlying estimators.
.. versionadded:: 1.6
Only available if `enable_metadata_routing=True`,
which can be set by using
``sklearn.set_config(enable_metadata_routing=True)``.
See :ref:`Metadata Routing User Guide <metadata_routing>` for
more details.
Returns
-------
self : object
Fitted estimator.
|
fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/semi_supervised/_self_training.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/semi_supervised/_self_training.py
|
BSD-3-Clause
|
def predict(self, X, **params):
"""Predict the classes of `X`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Array representing the data.
**params : dict of str -> object
Parameters to pass to the underlying estimator's ``predict`` method.
.. versionadded:: 1.6
Only available if `enable_metadata_routing=True`,
which can be set by using
``sklearn.set_config(enable_metadata_routing=True)``.
See :ref:`Metadata Routing User Guide <metadata_routing>` for
more details.
Returns
-------
y : ndarray of shape (n_samples,)
Array with predicted labels.
"""
check_is_fitted(self)
_raise_for_params(params, self, "predict")
if _routing_enabled():
# metadata routing is enabled.
routed_params = process_routing(self, "predict", **params)
else:
routed_params = Bunch(estimator=Bunch(predict={}))
X = validate_data(
self,
X,
accept_sparse=True,
ensure_all_finite=False,
reset=False,
)
return self.estimator_.predict(X, **routed_params.estimator.predict)
|
Predict the classes of `X`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Array representing the data.
**params : dict of str -> object
Parameters to pass to the underlying estimator's ``predict`` method.
.. versionadded:: 1.6
Only available if `enable_metadata_routing=True`,
which can be set by using
``sklearn.set_config(enable_metadata_routing=True)``.
See :ref:`Metadata Routing User Guide <metadata_routing>` for
more details.
Returns
-------
y : ndarray of shape (n_samples,)
Array with predicted labels.
|
predict
|
python
|
scikit-learn/scikit-learn
|
sklearn/semi_supervised/_self_training.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/semi_supervised/_self_training.py
|
BSD-3-Clause
|
def predict_proba(self, X, **params):
"""Predict probability for each possible outcome.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Array representing the data.
**params : dict of str -> object
Parameters to pass to the underlying estimator's
``predict_proba`` method.
.. versionadded:: 1.6
Only available if `enable_metadata_routing=True`,
which can be set by using
``sklearn.set_config(enable_metadata_routing=True)``.
See :ref:`Metadata Routing User Guide <metadata_routing>` for
more details.
Returns
-------
y : ndarray of shape (n_samples, n_features)
Array with prediction probabilities.
"""
check_is_fitted(self)
_raise_for_params(params, self, "predict_proba")
if _routing_enabled():
# metadata routing is enabled.
routed_params = process_routing(self, "predict_proba", **params)
else:
routed_params = Bunch(estimator=Bunch(predict_proba={}))
X = validate_data(
self,
X,
accept_sparse=True,
ensure_all_finite=False,
reset=False,
)
return self.estimator_.predict_proba(X, **routed_params.estimator.predict_proba)
|
Predict probability for each possible outcome.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Array representing the data.
**params : dict of str -> object
Parameters to pass to the underlying estimator's
``predict_proba`` method.
.. versionadded:: 1.6
Only available if `enable_metadata_routing=True`,
which can be set by using
``sklearn.set_config(enable_metadata_routing=True)``.
See :ref:`Metadata Routing User Guide <metadata_routing>` for
more details.
Returns
-------
y : ndarray of shape (n_samples, n_features)
Array with prediction probabilities.
|
predict_proba
|
python
|
scikit-learn/scikit-learn
|
sklearn/semi_supervised/_self_training.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/semi_supervised/_self_training.py
|
BSD-3-Clause
|
def decision_function(self, X, **params):
"""Call decision function of the `estimator`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Array representing the data.
**params : dict of str -> object
Parameters to pass to the underlying estimator's
``decision_function`` method.
.. versionadded:: 1.6
Only available if `enable_metadata_routing=True`,
which can be set by using
``sklearn.set_config(enable_metadata_routing=True)``.
See :ref:`Metadata Routing User Guide <metadata_routing>` for
more details.
Returns
-------
y : ndarray of shape (n_samples, n_features)
Result of the decision function of the `estimator`.
"""
check_is_fitted(self)
_raise_for_params(params, self, "decision_function")
if _routing_enabled():
# metadata routing is enabled.
routed_params = process_routing(self, "decision_function", **params)
else:
routed_params = Bunch(estimator=Bunch(decision_function={}))
X = validate_data(
self,
X,
accept_sparse=True,
ensure_all_finite=False,
reset=False,
)
return self.estimator_.decision_function(
X, **routed_params.estimator.decision_function
)
|
Call decision function of the `estimator`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Array representing the data.
**params : dict of str -> object
Parameters to pass to the underlying estimator's
``decision_function`` method.
.. versionadded:: 1.6
Only available if `enable_metadata_routing=True`,
which can be set by using
``sklearn.set_config(enable_metadata_routing=True)``.
See :ref:`Metadata Routing User Guide <metadata_routing>` for
more details.
Returns
-------
y : ndarray of shape (n_samples, n_features)
Result of the decision function of the `estimator`.
|
decision_function
|
python
|
scikit-learn/scikit-learn
|
sklearn/semi_supervised/_self_training.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/semi_supervised/_self_training.py
|
BSD-3-Clause
|
def predict_log_proba(self, X, **params):
"""Predict log probability for each possible outcome.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Array representing the data.
**params : dict of str -> object
Parameters to pass to the underlying estimator's
``predict_log_proba`` method.
.. versionadded:: 1.6
Only available if `enable_metadata_routing=True`,
which can be set by using
``sklearn.set_config(enable_metadata_routing=True)``.
See :ref:`Metadata Routing User Guide <metadata_routing>` for
more details.
Returns
-------
y : ndarray of shape (n_samples, n_features)
Array with log prediction probabilities.
"""
check_is_fitted(self)
_raise_for_params(params, self, "predict_log_proba")
if _routing_enabled():
# metadata routing is enabled.
routed_params = process_routing(self, "predict_log_proba", **params)
else:
routed_params = Bunch(estimator=Bunch(predict_log_proba={}))
X = validate_data(
self,
X,
accept_sparse=True,
ensure_all_finite=False,
reset=False,
)
return self.estimator_.predict_log_proba(
X, **routed_params.estimator.predict_log_proba
)
|
Predict log probability for each possible outcome.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Array representing the data.
**params : dict of str -> object
Parameters to pass to the underlying estimator's
``predict_log_proba`` method.
.. versionadded:: 1.6
Only available if `enable_metadata_routing=True`,
which can be set by using
``sklearn.set_config(enable_metadata_routing=True)``.
See :ref:`Metadata Routing User Guide <metadata_routing>` for
more details.
Returns
-------
y : ndarray of shape (n_samples, n_features)
Array with log prediction probabilities.
|
predict_log_proba
|
python
|
scikit-learn/scikit-learn
|
sklearn/semi_supervised/_self_training.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/semi_supervised/_self_training.py
|
BSD-3-Clause
|
def score(self, X, y, **params):
"""Call score on the `estimator`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Array representing the data.
y : array-like of shape (n_samples,)
Array representing the labels.
**params : dict of str -> object
Parameters to pass to the underlying estimator's ``score`` method.
.. versionadded:: 1.6
Only available if `enable_metadata_routing=True`,
which can be set by using
``sklearn.set_config(enable_metadata_routing=True)``.
See :ref:`Metadata Routing User Guide <metadata_routing>` for
more details.
Returns
-------
score : float
Result of calling score on the `estimator`.
"""
check_is_fitted(self)
_raise_for_params(params, self, "score")
if _routing_enabled():
# metadata routing is enabled.
routed_params = process_routing(self, "score", **params)
else:
routed_params = Bunch(estimator=Bunch(score={}))
X = validate_data(
self,
X,
accept_sparse=True,
ensure_all_finite=False,
reset=False,
)
return self.estimator_.score(X, y, **routed_params.estimator.score)
|
Call score on the `estimator`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Array representing the data.
y : array-like of shape (n_samples,)
Array representing the labels.
**params : dict of str -> object
Parameters to pass to the underlying estimator's ``score`` method.
.. versionadded:: 1.6
Only available if `enable_metadata_routing=True`,
which can be set by using
``sklearn.set_config(enable_metadata_routing=True)``.
See :ref:`Metadata Routing User Guide <metadata_routing>` for
more details.
Returns
-------
score : float
Result of calling score on the `estimator`.
|
score
|
python
|
scikit-learn/scikit-learn
|
sklearn/semi_supervised/_self_training.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/semi_supervised/_self_training.py
|
BSD-3-Clause
|
def get_metadata_routing(self):
"""Get metadata routing of this object.
Please check :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
.. versionadded:: 1.6
Returns
-------
routing : MetadataRouter
A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating
routing information.
"""
router = MetadataRouter(owner=self.__class__.__name__)
router.add(
estimator=self.estimator,
method_mapping=(
MethodMapping()
.add(callee="fit", caller="fit")
.add(callee="score", caller="fit")
.add(callee="predict", caller="predict")
.add(callee="predict_proba", caller="predict_proba")
.add(callee="decision_function", caller="decision_function")
.add(callee="predict_log_proba", caller="predict_log_proba")
.add(callee="score", caller="score")
),
)
return router
|
Get metadata routing of this object.
Please check :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
.. versionadded:: 1.6
Returns
-------
routing : MetadataRouter
A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating
routing information.
|
get_metadata_routing
|
python
|
scikit-learn/scikit-learn
|
sklearn/semi_supervised/_self_training.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/semi_supervised/_self_training.py
|
BSD-3-Clause
|
def test_self_training_estimator_attribute_error():
"""Check that we raise the proper AttributeErrors when the `estimator`
does not implement the `predict_proba` method, which is called from within
`fit`, or `decision_function`, which is decorated with `available_if`.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/28108
"""
# `SVC` with `probability=False` does not implement 'predict_proba' that
# is required internally in `fit` of `SelfTrainingClassifier`. We expect
# an AttributeError to be raised.
estimator = SVC(probability=False, gamma="scale")
self_training = SelfTrainingClassifier(estimator)
with pytest.raises(AttributeError, match="has no attribute 'predict_proba'"):
self_training.fit(X_train, y_train_missing_labels)
# `DecisionTreeClassifier` does not implement 'decision_function' and
# should raise an AttributeError
self_training = SelfTrainingClassifier(estimator=DecisionTreeClassifier())
outer_msg = "This 'SelfTrainingClassifier' has no attribute 'decision_function'"
inner_msg = "'DecisionTreeClassifier' object has no attribute 'decision_function'"
with pytest.raises(AttributeError, match=outer_msg) as exec_info:
self_training.fit(X_train, y_train_missing_labels).decision_function(X_train)
assert isinstance(exec_info.value.__cause__, AttributeError)
assert inner_msg in str(exec_info.value.__cause__)
|
Check that we raise the proper AttributeErrors when the `estimator`
does not implement the `predict_proba` method, which is called from within
`fit`, or `decision_function`, which is decorated with `available_if`.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/28108
|
test_self_training_estimator_attribute_error
|
python
|
scikit-learn/scikit-learn
|
sklearn/semi_supervised/tests/test_self_training.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/semi_supervised/tests/test_self_training.py
|
BSD-3-Clause
|
def test_routing_passed_metadata_not_supported(method):
"""Test that the right error message is raised when metadata is passed while
not supported when `enable_metadata_routing=False`."""
est = SelfTrainingClassifier(estimator=SimpleEstimator())
with pytest.raises(
ValueError, match="is only supported if enable_metadata_routing=True"
):
est.fit([[1], [1]], [1, 1], sample_weight=[1], prop="a")
est = SelfTrainingClassifier(estimator=SimpleEstimator())
with pytest.raises(
ValueError, match="is only supported if enable_metadata_routing=True"
):
# make sure that the estimator thinks it is already fitted
est.fitted_params_ = True
getattr(est, method)([[1]], sample_weight=[1], prop="a")
|
Test that the right error message is raised when metadata is passed while
not supported when `enable_metadata_routing=False`.
|
test_routing_passed_metadata_not_supported
|
python
|
scikit-learn/scikit-learn
|
sklearn/semi_supervised/tests/test_self_training.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/semi_supervised/tests/test_self_training.py
|
BSD-3-Clause
|
def _one_vs_one_coef(dual_coef, n_support, support_vectors):
"""Generate primal coefficients from dual coefficients
for the one-vs-one multi class LibSVM in the case
of a linear kernel."""
# get 1vs1 weights for all n*(n-1) classifiers.
# this is somewhat messy.
# shape of dual_coef_ is nSV * (n_classes -1)
# see docs for details
n_class = dual_coef.shape[0] + 1
# XXX we could do preallocation of coef but
# would have to take care in the sparse case
coef = []
sv_locs = np.cumsum(np.hstack([[0], n_support]))
for class1 in range(n_class):
# SVs for class1:
sv1 = support_vectors[sv_locs[class1] : sv_locs[class1 + 1], :]
for class2 in range(class1 + 1, n_class):
# SVs for class1:
sv2 = support_vectors[sv_locs[class2] : sv_locs[class2 + 1], :]
# dual coef for class1 SVs:
alpha1 = dual_coef[class2 - 1, sv_locs[class1] : sv_locs[class1 + 1]]
# dual coef for class2 SVs:
alpha2 = dual_coef[class1, sv_locs[class2] : sv_locs[class2 + 1]]
# build weight for class1 vs class2
coef.append(safe_sparse_dot(alpha1, sv1) + safe_sparse_dot(alpha2, sv2))
return coef
|
Generate primal coefficients from dual coefficients
for the one-vs-one multi class LibSVM in the case
of a linear kernel.
|
_one_vs_one_coef
|
python
|
scikit-learn/scikit-learn
|
sklearn/svm/_base.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/svm/_base.py
|
BSD-3-Clause
|
def fit(self, X, y, sample_weight=None):
"""Fit the SVM model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features) \
or (n_samples, n_samples)
Training vectors, where `n_samples` is the number of samples
and `n_features` is the number of features.
For kernel="precomputed", the expected shape of X is
(n_samples, n_samples).
y : array-like of shape (n_samples,)
Target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape (n_samples,), default=None
Per-sample weights. Rescale C per sample. Higher weights
force the classifier to put more emphasis on these points.
Returns
-------
self : object
Fitted estimator.
Notes
-----
If X and y are not C-ordered and contiguous arrays of np.float64 and
X is not a scipy.sparse.csr_matrix, X and/or y may be copied.
If X is a dense array, then the other methods will not support sparse
matrices as input.
"""
rnd = check_random_state(self.random_state)
sparse = sp.issparse(X)
if sparse and self.kernel == "precomputed":
raise TypeError("Sparse precomputed kernels are not supported.")
self._sparse = sparse and not callable(self.kernel)
if callable(self.kernel):
check_consistent_length(X, y)
else:
X, y = validate_data(
self,
X,
y,
dtype=np.float64,
order="C",
accept_sparse="csr",
accept_large_sparse=False,
)
y = self._validate_targets(y)
sample_weight = np.asarray(
[] if sample_weight is None else sample_weight, dtype=np.float64
)
solver_type = LIBSVM_IMPL.index(self._impl)
# input validation
n_samples = _num_samples(X)
if solver_type != 2 and n_samples != y.shape[0]:
raise ValueError(
"X and y have incompatible shapes.\n"
+ "X has %s samples, but y has %s." % (n_samples, y.shape[0])
)
if self.kernel == "precomputed" and n_samples != X.shape[1]:
raise ValueError(
"Precomputed matrix must be a square matrix."
" Input is a {}x{} matrix.".format(X.shape[0], X.shape[1])
)
if sample_weight.shape[0] > 0 and sample_weight.shape[0] != n_samples:
raise ValueError(
"sample_weight and X have incompatible shapes: "
"%r vs %r\n"
"Note: Sparse matrices cannot be indexed w/"
"boolean masks (use `indices=True` in CV)."
% (sample_weight.shape, X.shape)
)
kernel = "precomputed" if callable(self.kernel) else self.kernel
if kernel == "precomputed":
# unused but needs to be a float for cython code that ignores
# it anyway
self._gamma = 0.0
elif isinstance(self.gamma, str):
if self.gamma == "scale":
# var = E[X^2] - E[X]^2 if sparse
X_var = (X.multiply(X)).mean() - (X.mean()) ** 2 if sparse else X.var()
self._gamma = 1.0 / (X.shape[1] * X_var) if X_var != 0 else 1.0
elif self.gamma == "auto":
self._gamma = 1.0 / X.shape[1]
elif isinstance(self.gamma, Real):
self._gamma = self.gamma
fit = self._sparse_fit if self._sparse else self._dense_fit
if self.verbose:
print("[LibSVM]", end="")
seed = rnd.randint(np.iinfo("i").max)
fit(X, y, sample_weight, solver_type, kernel, random_seed=seed)
# see comment on the other call to np.iinfo in this file
self.shape_fit_ = X.shape if hasattr(X, "shape") else (n_samples,)
# In binary case, we need to flip the sign of coef, intercept and
# decision function. Use self._intercept_ and self._dual_coef_
# internally.
self._intercept_ = self.intercept_.copy()
self._dual_coef_ = self.dual_coef_
if self._impl in ["c_svc", "nu_svc"] and len(self.classes_) == 2:
self.intercept_ *= -1
self.dual_coef_ = -self.dual_coef_
dual_coef = self._dual_coef_.data if self._sparse else self._dual_coef_
intercept_finiteness = np.isfinite(self._intercept_).all()
dual_coef_finiteness = np.isfinite(dual_coef).all()
if not (intercept_finiteness and dual_coef_finiteness):
raise ValueError(
"The dual coefficients or intercepts are not finite."
" The input data may contain large values and need to be"
" preprocessed."
)
# Since, in the case of SVC and NuSVC, the number of models optimized by
# libSVM could be greater than one (depending on the input), `n_iter_`
# stores an ndarray.
# For the other sub-classes (SVR, NuSVR, and OneClassSVM), the number of
# models optimized by libSVM is always one, so `n_iter_` stores an
# integer.
if self._impl in ["c_svc", "nu_svc"]:
self.n_iter_ = self._num_iter
else:
self.n_iter_ = self._num_iter.item()
return self
|
Fit the SVM model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features) or (n_samples, n_samples)
Training vectors, where `n_samples` is the number of samples
and `n_features` is the number of features.
For kernel="precomputed", the expected shape of X is
(n_samples, n_samples).
y : array-like of shape (n_samples,)
Target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape (n_samples,), default=None
Per-sample weights. Rescale C per sample. Higher weights
force the classifier to put more emphasis on these points.
Returns
-------
self : object
Fitted estimator.
Notes
-----
If X and y are not C-ordered and contiguous arrays of np.float64 and
X is not a scipy.sparse.csr_matrix, X and/or y may be copied.
If X is a dense array, then the other methods will not support sparse
matrices as input.
|
fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/svm/_base.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/svm/_base.py
|
BSD-3-Clause
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.