code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
---|---|---|---|---|---|---|---|
def check_clusterer_compute_labels_predict(name, clusterer_orig):
"""Check that predict is invariant of compute_labels."""
X, y = make_blobs(n_samples=20, random_state=0)
clusterer = clone(clusterer_orig)
set_random_state(clusterer)
if hasattr(clusterer, "compute_labels"):
# MiniBatchKMeans
X_pred1 = clusterer.fit(X).predict(X)
clusterer.set_params(compute_labels=False)
X_pred2 = clusterer.fit(X).predict(X)
assert_array_equal(X_pred1, X_pred2)
|
Check that predict is invariant of compute_labels.
|
check_clusterer_compute_labels_predict
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/estimator_checks.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/estimator_checks.py
|
BSD-3-Clause
|
def check_classifiers_one_label_sample_weights(name, classifier_orig):
"""Check that classifiers accepting sample_weight fit or throws a ValueError with
an explicit message if the problem is reduced to one class.
"""
error_fit = (
f"{name} failed when fitted on one label after sample_weight trimming. Error "
"message is not explicit, it should have 'class'."
)
error_predict = f"{name} prediction results should only output the remaining class."
rnd = np.random.RandomState(0)
# X should be square for test on SVC with precomputed kernel
X_train = rnd.uniform(size=(10, 10))
X_test = rnd.uniform(size=(10, 10))
y = np.arange(10) % 2
sample_weight = y.copy() # select a single class
classifier = clone(classifier_orig)
if has_fit_parameter(classifier, "sample_weight"):
match = [r"\bclass(es)?\b", error_predict]
err_type, err_msg = (AssertionError, ValueError), error_fit
else:
match = r"\bsample_weight\b"
err_type, err_msg = (TypeError, ValueError), None
with raises(err_type, match=match, may_pass=True, err_msg=err_msg) as cm:
classifier.fit(X_train, y, sample_weight=sample_weight)
if cm.raised_and_matched:
# raise the proper error type with the proper error message
return
# for estimators that do not fail, they should be able to predict the only
# class remaining during fit
assert_array_equal(
classifier.predict(X_test), np.ones(10), err_msg=error_predict
)
|
Check that classifiers accepting sample_weight fit or throws a ValueError with
an explicit message if the problem is reduced to one class.
|
check_classifiers_one_label_sample_weights
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/estimator_checks.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/estimator_checks.py
|
BSD-3-Clause
|
def check_classifiers_multilabel_output_format_predict(name, classifier_orig):
"""Check the output of the `predict` method for classifiers supporting
multilabel-indicator targets."""
classifier = clone(classifier_orig)
set_random_state(classifier)
n_samples, test_size, n_outputs = 100, 25, 5
X, y = make_multilabel_classification(
n_samples=n_samples,
n_features=2,
n_classes=n_outputs,
n_labels=3,
length=50,
allow_unlabeled=True,
random_state=0,
)
X = scale(X)
X_train, X_test = X[:-test_size], X[-test_size:]
y_train, y_test = y[:-test_size], y[-test_size:]
X_train, X_test = _enforce_estimator_tags_X(classifier_orig, X_train, X_test=X_test)
classifier.fit(X_train, y_train)
response_method_name = "predict"
predict_method = getattr(classifier, response_method_name, None)
if predict_method is None:
raise SkipTest(f"{name} does not have a {response_method_name} method.")
y_pred = predict_method(X_test)
# y_pred.shape -> y_test.shape with the same dtype
assert isinstance(y_pred, np.ndarray), (
f"{name}.predict is expected to output a NumPy array. Got "
f"{type(y_pred)} instead."
)
assert y_pred.shape == y_test.shape, (
f"{name}.predict outputs a NumPy array of shape {y_pred.shape} "
f"instead of {y_test.shape}."
)
assert y_pred.dtype == y_test.dtype, (
f"{name}.predict does not output the same dtype than the targets. "
f"Got {y_pred.dtype} instead of {y_test.dtype}."
)
|
Check the output of the `predict` method for classifiers supporting
multilabel-indicator targets.
|
check_classifiers_multilabel_output_format_predict
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/estimator_checks.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/estimator_checks.py
|
BSD-3-Clause
|
def check_classifiers_multilabel_output_format_predict_proba(name, classifier_orig):
"""Check the output of the `predict_proba` method for classifiers supporting
multilabel-indicator targets."""
classifier = clone(classifier_orig)
set_random_state(classifier)
n_samples, test_size, n_outputs = 100, 25, 5
X, y = make_multilabel_classification(
n_samples=n_samples,
n_features=2,
n_classes=n_outputs,
n_labels=3,
length=50,
allow_unlabeled=True,
random_state=0,
)
X = scale(X)
X_train, X_test = X[:-test_size], X[-test_size:]
y_train = y[:-test_size]
X_train, X_test = _enforce_estimator_tags_X(classifier_orig, X_train, X_test=X_test)
classifier.fit(X_train, y_train)
response_method_name = "predict_proba"
predict_proba_method = getattr(classifier, response_method_name, None)
if predict_proba_method is None:
raise SkipTest(f"{name} does not have a {response_method_name} method.")
y_pred = predict_proba_method(X_test)
# y_pred.shape -> 2 possibilities:
# - list of length n_outputs of shape (n_samples, 2);
# - ndarray of shape (n_samples, n_outputs).
# dtype should be floating
if isinstance(y_pred, list):
assert len(y_pred) == n_outputs, (
f"When {name}.predict_proba returns a list, the list should "
"be of length n_outputs and contain NumPy arrays. Got length "
f"of {len(y_pred)} instead of {n_outputs}."
)
for pred in y_pred:
assert pred.shape == (test_size, 2), (
f"When {name}.predict_proba returns a list, this list "
"should contain NumPy arrays of shape (n_samples, 2). Got "
f"NumPy arrays of shape {pred.shape} instead of "
f"{(test_size, 2)}."
)
assert pred.dtype.kind == "f", (
f"When {name}.predict_proba returns a list, it should "
"contain NumPy arrays with floating dtype. Got "
f"{pred.dtype} instead."
)
# check that we have the correct probabilities
err_msg = (
f"When {name}.predict_proba returns a list, each NumPy "
"array should contain probabilities for each class and "
"thus each row should sum to 1 (or close to 1 due to "
"numerical errors)."
)
assert_allclose(pred.sum(axis=1), 1, err_msg=err_msg)
elif isinstance(y_pred, np.ndarray):
assert y_pred.shape == (test_size, n_outputs), (
f"When {name}.predict_proba returns a NumPy array, the "
f"expected shape is (n_samples, n_outputs). Got {y_pred.shape}"
f" instead of {(test_size, n_outputs)}."
)
assert y_pred.dtype.kind == "f", (
f"When {name}.predict_proba returns a NumPy array, the "
f"expected data type is floating. Got {y_pred.dtype} instead."
)
err_msg = (
f"When {name}.predict_proba returns a NumPy array, this array "
"is expected to provide probabilities of the positive class "
"and should therefore contain values between 0 and 1."
)
assert_array_less(0, y_pred, err_msg=err_msg)
assert_array_less(y_pred, 1, err_msg=err_msg)
else:
raise ValueError(
f"Unknown returned type {type(y_pred)} by {name}."
"predict_proba. A list or a Numpy array is expected."
)
|
Check the output of the `predict_proba` method for classifiers supporting
multilabel-indicator targets.
|
check_classifiers_multilabel_output_format_predict_proba
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/estimator_checks.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/estimator_checks.py
|
BSD-3-Clause
|
def check_classifiers_multilabel_output_format_decision_function(name, classifier_orig):
"""Check the output of the `decision_function` method for classifiers supporting
multilabel-indicator targets."""
classifier = clone(classifier_orig)
set_random_state(classifier)
n_samples, test_size, n_outputs = 100, 25, 5
X, y = make_multilabel_classification(
n_samples=n_samples,
n_features=2,
n_classes=n_outputs,
n_labels=3,
length=50,
allow_unlabeled=True,
random_state=0,
)
X = scale(X)
X_train, X_test = X[:-test_size], X[-test_size:]
y_train = y[:-test_size]
X_train, X_test = _enforce_estimator_tags_X(classifier_orig, X_train, X_test=X_test)
classifier.fit(X_train, y_train)
response_method_name = "decision_function"
decision_function_method = getattr(classifier, response_method_name, None)
if decision_function_method is None:
raise SkipTest(f"{name} does not have a {response_method_name} method.")
y_pred = decision_function_method(X_test)
# y_pred.shape -> y_test.shape with floating dtype
assert isinstance(y_pred, np.ndarray), (
f"{name}.decision_function is expected to output a NumPy array."
f" Got {type(y_pred)} instead."
)
assert y_pred.shape == (test_size, n_outputs), (
f"{name}.decision_function is expected to provide a NumPy array "
f"of shape (n_samples, n_outputs). Got {y_pred.shape} instead of "
f"{(test_size, n_outputs)}."
)
assert y_pred.dtype.kind == "f", (
f"{name}.decision_function is expected to output a floating dtype."
f" Got {y_pred.dtype} instead."
)
|
Check the output of the `decision_function` method for classifiers supporting
multilabel-indicator targets.
|
check_classifiers_multilabel_output_format_decision_function
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/estimator_checks.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/estimator_checks.py
|
BSD-3-Clause
|
def check_get_feature_names_out_error(name, estimator_orig):
"""Check the error raised by get_feature_names_out when called before fit.
Unfitted estimators with get_feature_names_out should raise a NotFittedError.
"""
estimator = clone(estimator_orig)
err_msg = (
f"Estimator {name} should have raised a NotFitted error when fit is called"
" before get_feature_names_out"
)
with raises(NotFittedError, err_msg=err_msg):
estimator.get_feature_names_out()
|
Check the error raised by get_feature_names_out when called before fit.
Unfitted estimators with get_feature_names_out should raise a NotFittedError.
|
check_get_feature_names_out_error
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/estimator_checks.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/estimator_checks.py
|
BSD-3-Clause
|
def check_estimators_fit_returns_self(name, estimator_orig):
"""Check if self is returned when calling fit."""
X, y = make_blobs(random_state=0, n_samples=21)
X = _enforce_estimator_tags_X(estimator_orig, X)
estimator = clone(estimator_orig)
y = _enforce_estimator_tags_y(estimator, y)
set_random_state(estimator)
assert estimator.fit(X, y) is estimator
|
Check if self is returned when calling fit.
|
check_estimators_fit_returns_self
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/estimator_checks.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/estimator_checks.py
|
BSD-3-Clause
|
def check_readonly_memmap_input(name, estimator_orig):
"""Check that the estimator can handle readonly memmap backed data.
This is particularly needed to support joblib parallelisation.
"""
X, y = make_blobs(random_state=0, n_samples=21)
X = _enforce_estimator_tags_X(estimator_orig, X)
estimator = clone(estimator_orig)
y = _enforce_estimator_tags_y(estimator, y)
X, y = create_memmap_backed_data([X, y])
set_random_state(estimator)
# This should not raise an error and should return self
assert estimator.fit(X, y) is estimator
|
Check that the estimator can handle readonly memmap backed data.
This is particularly needed to support joblib parallelisation.
|
check_readonly_memmap_input
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/estimator_checks.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/estimator_checks.py
|
BSD-3-Clause
|
def check_estimators_unfitted(name, estimator_orig):
"""Check that predict raises an exception in an unfitted estimator.
Unfitted estimators should raise a NotFittedError.
"""
err_msg = (
"Estimator should raise a NotFittedError when calling `{method}` before fit. "
"Either call `check_is_fitted(self)` at the beginning of `{method}` or "
"set `tags.requires_fit=False` on estimator tags to disable this check.\n"
"- `check_is_fitted`: https://scikit-learn.org/dev/modules/generated/sklearn."
"utils.validation.check_is_fitted.html\n"
"- Estimator Tags: https://scikit-learn.org/dev/developers/develop."
"html#estimator-tags"
)
# Common test for Regressors, Classifiers and Outlier detection estimators
X, y = _regression_dataset()
estimator = clone(estimator_orig)
for method in (
"decision_function",
"predict",
"predict_proba",
"predict_log_proba",
):
if hasattr(estimator, method):
with raises(NotFittedError, err_msg=err_msg.format(method=method)):
getattr(estimator, method)(X)
|
Check that predict raises an exception in an unfitted estimator.
Unfitted estimators should raise a NotFittedError.
|
check_estimators_unfitted
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/estimator_checks.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/estimator_checks.py
|
BSD-3-Clause
|
def check_class_weight_balanced_linear_classifier(name, estimator_orig):
"""Test class weights with non-contiguous class labels."""
# this is run on classes, not instances, though this should be changed
X = np.array([[-1.0, -1.0], [-1.0, 0], [-0.8, -1.0], [1.0, 1.0], [1.0, 0.0]])
y = np.array([1, 1, 1, -1, -1])
classifier = clone(estimator_orig)
if hasattr(classifier, "n_iter"):
# This is a very small dataset, default n_iter are likely to prevent
# convergence
classifier.set_params(n_iter=1000)
if hasattr(classifier, "max_iter"):
classifier.set_params(max_iter=1000)
if hasattr(classifier, "cv"):
classifier.set_params(cv=3)
set_random_state(classifier)
# Let the model compute the class frequencies
classifier.set_params(class_weight="balanced")
coef_balanced = classifier.fit(X, y).coef_.copy()
# Count each label occurrence to reweight manually
n_samples = len(y)
n_classes = float(len(np.unique(y)))
class_weight = {
1: n_samples / (np.sum(y == 1) * n_classes),
-1: n_samples / (np.sum(y == -1) * n_classes),
}
classifier.set_params(class_weight=class_weight)
coef_manual = classifier.fit(X, y).coef_.copy()
assert_allclose(
coef_balanced,
coef_manual,
err_msg="Classifier %s is not computing class_weight=balanced properly." % name,
)
|
Test class weights with non-contiguous class labels.
|
check_class_weight_balanced_linear_classifier
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/estimator_checks.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/estimator_checks.py
|
BSD-3-Clause
|
def check_estimator_cloneable(name, estimator_orig):
"""Checks whether the estimator can be cloned."""
try:
clone(estimator_orig)
except Exception as e:
raise AssertionError(f"Cloning of {name} failed with error: {e}.") from e
|
Checks whether the estimator can be cloned.
|
check_estimator_cloneable
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/estimator_checks.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/estimator_checks.py
|
BSD-3-Clause
|
def check_estimator_repr(name, estimator_orig):
"""Check that the estimator has a functioning repr."""
estimator = clone(estimator_orig)
try:
repr(estimator)
except Exception as e:
raise AssertionError(f"Repr of {name} failed with error: {e}.") from e
|
Check that the estimator has a functioning repr.
|
check_estimator_repr
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/estimator_checks.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/estimator_checks.py
|
BSD-3-Clause
|
def param_default_value(p):
"""Identify hyper parameters of an estimator."""
return (
p.name != "self"
and p.kind != p.VAR_KEYWORD
and p.kind != p.VAR_POSITIONAL
# and it should have a default value for this test
and p.default != p.empty
)
|
Identify hyper parameters of an estimator.
|
param_default_value
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/estimator_checks.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/estimator_checks.py
|
BSD-3-Clause
|
def param_required(p):
"""Identify hyper parameters of an estimator."""
return (
p.name != "self"
and p.kind != p.VAR_KEYWORD
# technically VAR_POSITIONAL is also required, but we don't have a
# nice way to check for it. We assume there's no VAR_POSITIONAL in
# the constructor parameters.
#
# TODO(devtools): separately check that the constructor doesn't
# have *args.
and p.kind != p.VAR_POSITIONAL
# these are parameters that don't have a default value and are
# required to construct the estimator.
and p.default == p.empty
)
|
Identify hyper parameters of an estimator.
|
param_required
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/estimator_checks.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/estimator_checks.py
|
BSD-3-Clause
|
def check_positive_only_tag_during_fit(name, estimator_orig):
"""Test that the estimator correctly sets the tags.input_tags.positive_only
If the tag is False, the estimator should accept negative input regardless of the
tags.input_tags.pairwise flag.
"""
estimator = clone(estimator_orig)
tags = get_tags(estimator)
X, y = load_iris(return_X_y=True)
y = _enforce_estimator_tags_y(estimator, y)
set_random_state(estimator, 0)
X = _enforce_estimator_tags_X(estimator, X)
X -= X.mean()
if tags.input_tags.positive_only:
with raises(ValueError, match="Negative values in data"):
estimator.fit(X, y)
else:
# This should pass
try:
estimator.fit(X, y)
except Exception as e:
err_msg = (
f"Estimator {name!r} raised {e.__class__.__name__} unexpectedly."
" This happens when passing negative input values as X."
" If negative values are not supported for this estimator instance,"
" then the tags.input_tags.positive_only tag needs to be set to True."
)
raise AssertionError(err_msg) from e
|
Test that the estimator correctly sets the tags.input_tags.positive_only
If the tag is False, the estimator should accept negative input regardless of the
tags.input_tags.pairwise flag.
|
check_positive_only_tag_during_fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/estimator_checks.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/estimator_checks.py
|
BSD-3-Clause
|
def check_valid_tag_types(name, estimator):
"""Check that estimator tags are valid."""
assert hasattr(estimator, "__sklearn_tags__"), (
f"Estimator {name} does not have `__sklearn_tags__` method. This method is"
" implemented in BaseEstimator and returns a sklearn.utils.Tags instance."
)
err_msg = (
"Tag values need to be of a certain type. "
"Please refer to the documentation of `sklearn.utils.Tags` for more details."
)
tags = get_tags(estimator)
assert isinstance(tags.estimator_type, (str, type(None))), err_msg
assert isinstance(tags.target_tags, TargetTags), err_msg
assert isinstance(tags.classifier_tags, (ClassifierTags, type(None))), err_msg
assert isinstance(tags.regressor_tags, (RegressorTags, type(None))), err_msg
assert isinstance(tags.transformer_tags, (TransformerTags, type(None))), err_msg
assert isinstance(tags.input_tags, InputTags), err_msg
assert isinstance(tags.array_api_support, bool), err_msg
assert isinstance(tags.no_validation, bool), err_msg
assert isinstance(tags.non_deterministic, bool), err_msg
assert isinstance(tags.requires_fit, bool), err_msg
assert isinstance(tags._skip_test, bool), err_msg
assert isinstance(tags.target_tags.required, bool), err_msg
assert isinstance(tags.target_tags.one_d_labels, bool), err_msg
assert isinstance(tags.target_tags.two_d_labels, bool), err_msg
assert isinstance(tags.target_tags.positive_only, bool), err_msg
assert isinstance(tags.target_tags.multi_output, bool), err_msg
assert isinstance(tags.target_tags.single_output, bool), err_msg
assert isinstance(tags.input_tags.pairwise, bool), err_msg
assert isinstance(tags.input_tags.allow_nan, bool), err_msg
assert isinstance(tags.input_tags.sparse, bool), err_msg
assert isinstance(tags.input_tags.categorical, bool), err_msg
assert isinstance(tags.input_tags.string, bool), err_msg
assert isinstance(tags.input_tags.dict, bool), err_msg
assert isinstance(tags.input_tags.one_d_array, bool), err_msg
assert isinstance(tags.input_tags.two_d_array, bool), err_msg
assert isinstance(tags.input_tags.three_d_array, bool), err_msg
assert isinstance(tags.input_tags.positive_only, bool), err_msg
if tags.classifier_tags is not None:
assert isinstance(tags.classifier_tags.poor_score, bool), err_msg
assert isinstance(tags.classifier_tags.multi_class, bool), err_msg
assert isinstance(tags.classifier_tags.multi_label, bool), err_msg
if tags.regressor_tags is not None:
assert isinstance(tags.regressor_tags.poor_score, bool), err_msg
if tags.transformer_tags is not None:
assert isinstance(tags.transformer_tags.preserves_dtype, list), err_msg
|
Check that estimator tags are valid.
|
check_valid_tag_types
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/estimator_checks.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/estimator_checks.py
|
BSD-3-Clause
|
def _output_from_fit_transform(transformer, name, X, df, y):
"""Generate output to test `set_output` for different configuration:
- calling either `fit.transform` or `fit_transform`;
- passing either a dataframe or a numpy array to fit;
- passing either a dataframe or a numpy array to transform.
"""
outputs = {}
# fit then transform case:
cases = [
("fit.transform/df/df", df, df),
("fit.transform/df/array", df, X),
("fit.transform/array/df", X, df),
("fit.transform/array/array", X, X),
]
if all(hasattr(transformer, meth) for meth in ["fit", "transform"]):
for (
case,
data_fit,
data_transform,
) in cases:
transformer.fit(data_fit, y)
if name in CROSS_DECOMPOSITION:
X_trans, _ = transformer.transform(data_transform, y)
else:
X_trans = transformer.transform(data_transform)
outputs[case] = (X_trans, transformer.get_feature_names_out())
# fit_transform case:
cases = [
("fit_transform/df", df),
("fit_transform/array", X),
]
if hasattr(transformer, "fit_transform"):
for case, data in cases:
if name in CROSS_DECOMPOSITION:
X_trans, _ = transformer.fit_transform(data, y)
else:
X_trans = transformer.fit_transform(data, y)
outputs[case] = (X_trans, transformer.get_feature_names_out())
return outputs
|
Generate output to test `set_output` for different configuration:
- calling either `fit.transform` or `fit_transform`;
- passing either a dataframe or a numpy array to fit;
- passing either a dataframe or a numpy array to transform.
|
_output_from_fit_transform
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/estimator_checks.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/estimator_checks.py
|
BSD-3-Clause
|
def _check_generated_dataframe(
name,
case,
index,
outputs_default,
outputs_dataframe_lib,
is_supported_dataframe,
create_dataframe,
assert_frame_equal,
):
"""Check if the generated DataFrame by the transformer is valid.
The DataFrame implementation is specified through the parameters of this function.
Parameters
----------
name : str
The name of the transformer.
case : str
A single case from the cases generated by `_output_from_fit_transform`.
index : index or None
The index of the DataFrame. `None` if the library does not implement a DataFrame
with an index.
outputs_default : tuple
A tuple containing the output data and feature names for the default output.
outputs_dataframe_lib : tuple
A tuple containing the output data and feature names for the pandas case.
is_supported_dataframe : callable
A callable that takes a DataFrame instance as input and return whether or
E.g. `lambda X: isintance(X, pd.DataFrame)`.
create_dataframe : callable
A callable taking as parameters `data`, `columns`, and `index` and returns
a callable. Be aware that `index` can be ignored. For example, polars dataframes
would ignore the idnex.
assert_frame_equal : callable
A callable taking 2 dataframes to compare if they are equal.
"""
X_trans, feature_names_default = outputs_default
df_trans, feature_names_dataframe_lib = outputs_dataframe_lib
assert is_supported_dataframe(df_trans)
# We always rely on the output of `get_feature_names_out` of the
# transformer used to generate the dataframe as a ground-truth of the
# columns.
# If a dataframe is passed into transform, then the output should have the same
# index
expected_index = index if case.endswith("df") else None
expected_dataframe = create_dataframe(
X_trans, columns=feature_names_dataframe_lib, index=expected_index
)
try:
assert_frame_equal(df_trans, expected_dataframe)
except AssertionError as e:
raise AssertionError(
f"{name} does not generate a valid dataframe in the {case} "
"case. The generated dataframe is not equal to the expected "
f"dataframe. The error message is: {e}"
) from e
|
Check if the generated DataFrame by the transformer is valid.
The DataFrame implementation is specified through the parameters of this function.
Parameters
----------
name : str
The name of the transformer.
case : str
A single case from the cases generated by `_output_from_fit_transform`.
index : index or None
The index of the DataFrame. `None` if the library does not implement a DataFrame
with an index.
outputs_default : tuple
A tuple containing the output data and feature names for the default output.
outputs_dataframe_lib : tuple
A tuple containing the output data and feature names for the pandas case.
is_supported_dataframe : callable
A callable that takes a DataFrame instance as input and return whether or
E.g. `lambda X: isintance(X, pd.DataFrame)`.
create_dataframe : callable
A callable taking as parameters `data`, `columns`, and `index` and returns
a callable. Be aware that `index` can be ignored. For example, polars dataframes
would ignore the idnex.
assert_frame_equal : callable
A callable taking 2 dataframes to compare if they are equal.
|
_check_generated_dataframe
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/estimator_checks.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/estimator_checks.py
|
BSD-3-Clause
|
def _check_set_output_transform_dataframe(
name,
transformer_orig,
*,
dataframe_lib,
is_supported_dataframe,
create_dataframe,
assert_frame_equal,
context,
):
"""Check that a transformer can output a DataFrame when requested.
The DataFrame implementation is specified through the parameters of this function.
Parameters
----------
name : str
The name of the transformer.
transformer_orig : estimator
The original transformer instance.
dataframe_lib : str
The name of the library implementing the DataFrame.
is_supported_dataframe : callable
A callable that takes a DataFrame instance as input and returns whether or
not it is supported by the dataframe library.
E.g. `lambda X: isintance(X, pd.DataFrame)`.
create_dataframe : callable
A callable taking as parameters `data`, `columns`, and `index` and returns
a callable. Be aware that `index` can be ignored. For example, polars dataframes
will ignore the index.
assert_frame_equal : callable
A callable taking 2 dataframes to compare if they are equal.
context : {"local", "global"}
Whether to use a local context by setting `set_output(...)` on the transformer
or a global context by using the `with config_context(...)`
"""
# Check transformer.set_output configures the output of transform="pandas".
tags = get_tags(transformer_orig)
if not tags.input_tags.two_d_array or tags.no_validation:
return
rng = np.random.RandomState(0)
transformer = clone(transformer_orig)
X = rng.uniform(size=(20, 5))
X = _enforce_estimator_tags_X(transformer_orig, X)
y = rng.randint(0, 2, size=20)
y = _enforce_estimator_tags_y(transformer_orig, y)
set_random_state(transformer)
feature_names_in = [f"col{i}" for i in range(X.shape[1])]
index = [f"index{i}" for i in range(X.shape[0])]
df = create_dataframe(X, columns=feature_names_in, index=index)
transformer_default = clone(transformer).set_output(transform="default")
outputs_default = _output_from_fit_transform(transformer_default, name, X, df, y)
if context == "local":
transformer_df = clone(transformer).set_output(transform=dataframe_lib)
context_to_use = nullcontext()
else: # global
transformer_df = clone(transformer)
context_to_use = config_context(transform_output=dataframe_lib)
try:
with context_to_use:
outputs_df = _output_from_fit_transform(transformer_df, name, X, df, y)
except ValueError as e:
# transformer does not support sparse data
capitalized_lib = dataframe_lib.capitalize()
error_message = str(e)
assert (
f"{capitalized_lib} output does not support sparse data." in error_message
or "The transformer outputs a scipy sparse matrix." in error_message
), e
return
for case in outputs_default:
_check_generated_dataframe(
name,
case,
index,
outputs_default[case],
outputs_df[case],
is_supported_dataframe,
create_dataframe,
assert_frame_equal,
)
|
Check that a transformer can output a DataFrame when requested.
The DataFrame implementation is specified through the parameters of this function.
Parameters
----------
name : str
The name of the transformer.
transformer_orig : estimator
The original transformer instance.
dataframe_lib : str
The name of the library implementing the DataFrame.
is_supported_dataframe : callable
A callable that takes a DataFrame instance as input and returns whether or
not it is supported by the dataframe library.
E.g. `lambda X: isintance(X, pd.DataFrame)`.
create_dataframe : callable
A callable taking as parameters `data`, `columns`, and `index` and returns
a callable. Be aware that `index` can be ignored. For example, polars dataframes
will ignore the index.
assert_frame_equal : callable
A callable taking 2 dataframes to compare if they are equal.
context : {"local", "global"}
Whether to use a local context by setting `set_output(...)` on the transformer
or a global context by using the `with config_context(...)`
|
_check_set_output_transform_dataframe
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/estimator_checks.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/estimator_checks.py
|
BSD-3-Clause
|
def check_inplace_ensure_writeable(name, estimator_orig):
"""Check that estimators able to do inplace operations can work on read-only
input data even if a copy is not explicitly requested by the user.
Make sure that a copy is made and consequently that the input array and its
writeability are not modified by the estimator.
"""
rng = np.random.RandomState(0)
estimator = clone(estimator_orig)
set_random_state(estimator)
n_samples = 100
X, _ = make_blobs(n_samples=n_samples, n_features=3, random_state=rng)
X = _enforce_estimator_tags_X(estimator, X)
# These estimators can only work inplace with fortran ordered input
if name in ("Lasso", "ElasticNet", "MultiTaskElasticNet", "MultiTaskLasso"):
X = np.asfortranarray(X)
# Add a missing value for imputers so that transform has to do something
if hasattr(estimator, "missing_values"):
X[0, 0] = np.nan
if is_regressor(estimator):
y = rng.normal(size=n_samples)
else:
y = rng.randint(low=0, high=2, size=n_samples)
y = _enforce_estimator_tags_y(estimator, y)
X_copy = X.copy()
# Make X read-only
X.setflags(write=False)
estimator.fit(X, y)
if hasattr(estimator, "transform"):
estimator.transform(X)
assert not X.flags.writeable
assert_allclose(X, X_copy)
|
Check that estimators able to do inplace operations can work on read-only
input data even if a copy is not explicitly requested by the user.
Make sure that a copy is made and consequently that the input array and its
writeability are not modified by the estimator.
|
check_inplace_ensure_writeable
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/estimator_checks.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/estimator_checks.py
|
BSD-3-Clause
|
def check_do_not_raise_errors_in_init_or_set_params(name, estimator_orig):
"""Check that init or set_param does not raise errors."""
Estimator = type(estimator_orig)
params = signature(Estimator).parameters
smoke_test_values = [-1, 3.0, "helloworld", np.array([1.0, 4.0]), [1], {}, []]
for value in smoke_test_values:
new_params = {key: value for key in params}
# Does not raise
est = Estimator(**new_params)
# Also do does not raise
est.set_params(**new_params)
|
Check that init or set_param does not raise errors.
|
check_do_not_raise_errors_in_init_or_set_params
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/estimator_checks.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/estimator_checks.py
|
BSD-3-Clause
|
def check_classifier_not_supporting_multiclass(name, estimator_orig):
"""Check that if the classifier has tags.classifier_tags.multi_class=False,
then it should raise a ValueError when calling fit with a multiclass dataset.
This test is not yielded if the tag is not False.
"""
estimator = clone(estimator_orig)
set_random_state(estimator)
X, y = make_classification(
n_samples=100,
n_classes=3,
n_informative=3,
n_clusters_per_class=1,
random_state=0,
)
err_msg = """\
The estimator tag `tags.classifier_tags.multi_class` is False for {name}
which means it does not support multiclass classification. However, it does
not raise the right `ValueError` when calling fit with a multiclass dataset,
including the error message 'Only binary classification is supported.' This
can be achieved by the following pattern:
y_type = type_of_target(y, input_name='y', raise_unknown=True)
if y_type != 'binary':
raise ValueError(
'Only binary classification is supported. The type of the target '
f'is {{y_type}}.'
)
""".format(name=name)
err_msg = textwrap.dedent(err_msg)
with raises(
ValueError, match="Only binary classification is supported.", err_msg=err_msg
):
estimator.fit(X, y)
|
Check that if the classifier has tags.classifier_tags.multi_class=False,
then it should raise a ValueError when calling fit with a multiclass dataset.
This test is not yielded if the tag is not False.
|
check_classifier_not_supporting_multiclass
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/estimator_checks.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/estimator_checks.py
|
BSD-3-Clause
|
def squared_norm(x):
"""Squared Euclidean or Frobenius norm of x.
Faster than norm(x) ** 2.
Parameters
----------
x : array-like
The input array which could be either be a vector or a 2 dimensional array.
Returns
-------
float
The Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array).
"""
x = np.ravel(x, order="K")
if np.issubdtype(x.dtype, np.integer):
warnings.warn(
(
"Array type is integer, np.dot may overflow. "
"Data should be float type to avoid this issue"
),
UserWarning,
)
return np.dot(x, x)
|
Squared Euclidean or Frobenius norm of x.
Faster than norm(x) ** 2.
Parameters
----------
x : array-like
The input array which could be either be a vector or a 2 dimensional array.
Returns
-------
float
The Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array).
|
squared_norm
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/extmath.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/extmath.py
|
BSD-3-Clause
|
def row_norms(X, squared=False):
"""Row-wise (squared) Euclidean norm of X.
Equivalent to np.sqrt((X * X).sum(axis=1)), but also supports sparse
matrices and does not create an X.shape-sized temporary.
Performs no input validation.
Parameters
----------
X : array-like
The input array.
squared : bool, default=False
If True, return squared norms.
Returns
-------
array-like
The row-wise (squared) Euclidean norm of X.
"""
if sparse.issparse(X):
X = X.tocsr()
norms = csr_row_norms(X)
if not squared:
norms = np.sqrt(norms)
else:
xp, _ = get_namespace(X)
if _is_numpy_namespace(xp):
X = np.asarray(X)
norms = np.einsum("ij,ij->i", X, X)
norms = xp.asarray(norms)
else:
norms = xp.sum(xp.multiply(X, X), axis=1)
if not squared:
norms = xp.sqrt(norms)
return norms
|
Row-wise (squared) Euclidean norm of X.
Equivalent to np.sqrt((X * X).sum(axis=1)), but also supports sparse
matrices and does not create an X.shape-sized temporary.
Performs no input validation.
Parameters
----------
X : array-like
The input array.
squared : bool, default=False
If True, return squared norms.
Returns
-------
array-like
The row-wise (squared) Euclidean norm of X.
|
row_norms
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/extmath.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/extmath.py
|
BSD-3-Clause
|
def fast_logdet(A):
"""Compute logarithm of determinant of a square matrix.
The (natural) logarithm of the determinant of a square matrix
is returned if det(A) is non-negative and well defined.
If the determinant is zero or negative returns -Inf.
Equivalent to : np.log(np.det(A)) but more robust.
Parameters
----------
A : array_like of shape (n, n)
The square matrix.
Returns
-------
logdet : float
When det(A) is strictly positive, log(det(A)) is returned.
When det(A) is non-positive or not defined, then -inf is returned.
See Also
--------
numpy.linalg.slogdet : Compute the sign and (natural) logarithm of the determinant
of an array.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.extmath import fast_logdet
>>> a = np.array([[5, 1], [2, 8]])
>>> fast_logdet(a)
np.float64(3.6375861597263857)
"""
xp, _ = get_namespace(A)
sign, ld = xp.linalg.slogdet(A)
if not sign > 0:
return -xp.inf
return ld
|
Compute logarithm of determinant of a square matrix.
The (natural) logarithm of the determinant of a square matrix
is returned if det(A) is non-negative and well defined.
If the determinant is zero or negative returns -Inf.
Equivalent to : np.log(np.det(A)) but more robust.
Parameters
----------
A : array_like of shape (n, n)
The square matrix.
Returns
-------
logdet : float
When det(A) is strictly positive, log(det(A)) is returned.
When det(A) is non-positive or not defined, then -inf is returned.
See Also
--------
numpy.linalg.slogdet : Compute the sign and (natural) logarithm of the determinant
of an array.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.extmath import fast_logdet
>>> a = np.array([[5, 1], [2, 8]])
>>> fast_logdet(a)
np.float64(3.6375861597263857)
|
fast_logdet
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/extmath.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/extmath.py
|
BSD-3-Clause
|
def density(w):
"""Compute density of a sparse vector.
Parameters
----------
w : {ndarray, sparse matrix}
The input data can be numpy ndarray or a sparse matrix.
Returns
-------
float
The density of w, between 0 and 1.
Examples
--------
>>> from scipy import sparse
>>> from sklearn.utils.extmath import density
>>> X = sparse.random(10, 10, density=0.25, random_state=0)
>>> density(X)
0.25
"""
if hasattr(w, "toarray"):
d = float(w.nnz) / (w.shape[0] * w.shape[1])
else:
d = 0 if w is None else float((w != 0).sum()) / w.size
return d
|
Compute density of a sparse vector.
Parameters
----------
w : {ndarray, sparse matrix}
The input data can be numpy ndarray or a sparse matrix.
Returns
-------
float
The density of w, between 0 and 1.
Examples
--------
>>> from scipy import sparse
>>> from sklearn.utils.extmath import density
>>> X = sparse.random(10, 10, density=0.25, random_state=0)
>>> density(X)
0.25
|
density
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/extmath.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/extmath.py
|
BSD-3-Clause
|
def safe_sparse_dot(a, b, *, dense_output=False):
"""Dot product that handle the sparse matrix case correctly.
Parameters
----------
a : {ndarray, sparse matrix}
b : {ndarray, sparse matrix}
dense_output : bool, default=False
When False, ``a`` and ``b`` both being sparse will yield sparse output.
When True, output will always be a dense array.
Returns
-------
dot_product : {ndarray, sparse matrix}
Sparse if ``a`` and ``b`` are sparse and ``dense_output=False``.
Examples
--------
>>> from scipy.sparse import csr_matrix
>>> from sklearn.utils.extmath import safe_sparse_dot
>>> X = csr_matrix([[1, 2], [3, 4], [5, 6]])
>>> dot_product = safe_sparse_dot(X, X.T)
>>> dot_product.toarray()
array([[ 5, 11, 17],
[11, 25, 39],
[17, 39, 61]])
"""
xp, _ = get_namespace(a, b)
if a.ndim > 2 or b.ndim > 2:
if sparse.issparse(a):
# sparse is always 2D. Implies b is 3D+
# [i, j] @ [k, ..., l, m, n] -> [i, k, ..., l, n]
b_ = np.rollaxis(b, -2)
b_2d = b_.reshape((b.shape[-2], -1))
ret = a @ b_2d
ret = ret.reshape(a.shape[0], *b_.shape[1:])
elif sparse.issparse(b):
# sparse is always 2D. Implies a is 3D+
# [k, ..., l, m] @ [i, j] -> [k, ..., l, j]
a_2d = a.reshape(-1, a.shape[-1])
ret = a_2d @ b
ret = ret.reshape(*a.shape[:-1], b.shape[1])
else:
# Alternative for `np.dot` when dealing with a or b having
# more than 2 dimensions, that works with the array api.
# If b is 1-dim then the last axis for b is taken otherwise
# if b is >= 2-dim then the second to last axis is taken.
b_axis = -1 if b.ndim == 1 else -2
ret = xp.tensordot(a, b, axes=[-1, b_axis])
else:
ret = a @ b
if (
sparse.issparse(a)
and sparse.issparse(b)
and dense_output
and hasattr(ret, "toarray")
):
return ret.toarray()
return ret
|
Dot product that handle the sparse matrix case correctly.
Parameters
----------
a : {ndarray, sparse matrix}
b : {ndarray, sparse matrix}
dense_output : bool, default=False
When False, ``a`` and ``b`` both being sparse will yield sparse output.
When True, output will always be a dense array.
Returns
-------
dot_product : {ndarray, sparse matrix}
Sparse if ``a`` and ``b`` are sparse and ``dense_output=False``.
Examples
--------
>>> from scipy.sparse import csr_matrix
>>> from sklearn.utils.extmath import safe_sparse_dot
>>> X = csr_matrix([[1, 2], [3, 4], [5, 6]])
>>> dot_product = safe_sparse_dot(X, X.T)
>>> dot_product.toarray()
array([[ 5, 11, 17],
[11, 25, 39],
[17, 39, 61]])
|
safe_sparse_dot
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/extmath.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/extmath.py
|
BSD-3-Clause
|
def randomized_range_finder(
A, *, size, n_iter, power_iteration_normalizer="auto", random_state=None
):
"""Compute an orthonormal matrix whose range approximates the range of A.
Parameters
----------
A : {array-like, sparse matrix} of shape (n_samples, n_features)
The input data matrix.
size : int
Size of the return array.
n_iter : int
Number of power iterations used to stabilize the result.
power_iteration_normalizer : {'auto', 'QR', 'LU', 'none'}, default='auto'
Whether the power iterations are normalized with step-by-step
QR factorization (the slowest but most accurate), 'none'
(the fastest but numerically unstable when `n_iter` is large, e.g.
typically 5 or larger), or 'LU' factorization (numerically stable
but can lose slightly in accuracy). The 'auto' mode applies no
normalization if `n_iter` <= 2 and switches to LU otherwise.
.. versionadded:: 0.18
random_state : int, RandomState instance or None, default=None
The seed of the pseudo random number generator to use when shuffling
the data, i.e. getting the random vectors to initialize the algorithm.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
Q : ndarray of shape (size, size)
A projection matrix, the range of which approximates well the range of the
input matrix A.
Notes
-----
Follows Algorithm 4.3 of
:arxiv:`"Finding structure with randomness:
Stochastic algorithms for constructing approximate matrix decompositions"
<0909.4061>`
Halko, et al. (2009)
An implementation of a randomized algorithm for principal component
analysis
A. Szlam et al. 2014
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.extmath import randomized_range_finder
>>> A = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> randomized_range_finder(A, size=2, n_iter=2, random_state=42)
array([[-0.214, 0.887],
[-0.521, 0.249],
[-0.826, -0.388]])
"""
A = check_array(A, accept_sparse=True)
return _randomized_range_finder(
A,
size=size,
n_iter=n_iter,
power_iteration_normalizer=power_iteration_normalizer,
random_state=random_state,
)
|
Compute an orthonormal matrix whose range approximates the range of A.
Parameters
----------
A : {array-like, sparse matrix} of shape (n_samples, n_features)
The input data matrix.
size : int
Size of the return array.
n_iter : int
Number of power iterations used to stabilize the result.
power_iteration_normalizer : {'auto', 'QR', 'LU', 'none'}, default='auto'
Whether the power iterations are normalized with step-by-step
QR factorization (the slowest but most accurate), 'none'
(the fastest but numerically unstable when `n_iter` is large, e.g.
typically 5 or larger), or 'LU' factorization (numerically stable
but can lose slightly in accuracy). The 'auto' mode applies no
normalization if `n_iter` <= 2 and switches to LU otherwise.
.. versionadded:: 0.18
random_state : int, RandomState instance or None, default=None
The seed of the pseudo random number generator to use when shuffling
the data, i.e. getting the random vectors to initialize the algorithm.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
Q : ndarray of shape (size, size)
A projection matrix, the range of which approximates well the range of the
input matrix A.
Notes
-----
Follows Algorithm 4.3 of
:arxiv:`"Finding structure with randomness:
Stochastic algorithms for constructing approximate matrix decompositions"
<0909.4061>`
Halko, et al. (2009)
An implementation of a randomized algorithm for principal component
analysis
A. Szlam et al. 2014
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.extmath import randomized_range_finder
>>> A = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> randomized_range_finder(A, size=2, n_iter=2, random_state=42)
array([[-0.214, 0.887],
[-0.521, 0.249],
[-0.826, -0.388]])
|
randomized_range_finder
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/extmath.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/extmath.py
|
BSD-3-Clause
|
def randomized_svd(
M,
n_components,
*,
n_oversamples=10,
n_iter="auto",
power_iteration_normalizer="auto",
transpose="auto",
flip_sign=True,
random_state=None,
svd_lapack_driver="gesdd",
):
"""Compute a truncated randomized SVD.
This method solves the fixed-rank approximation problem described in [1]_
(problem (1.5), p5).
Refer to
:ref:`sphx_glr_auto_examples_applications_wikipedia_principal_eigenvector.py`
for a typical example where the power iteration algorithm is used to rank web pages.
This algorithm is also known to be used as a building block in Google's PageRank
algorithm.
Parameters
----------
M : {array-like, sparse matrix} of shape (n_samples, n_features)
Matrix to decompose.
n_components : int
Number of singular values and vectors to extract.
n_oversamples : int, default=10
Additional number of random vectors to sample the range of `M` so as
to ensure proper conditioning. The total number of random vectors
used to find the range of `M` is `n_components + n_oversamples`. Smaller
number can improve speed but can negatively impact the quality of
approximation of singular vectors and singular values. Users might wish
to increase this parameter up to `2*k - n_components` where k is the
effective rank, for large matrices, noisy problems, matrices with
slowly decaying spectrums, or to increase precision accuracy. See [1]_
(pages 5, 23 and 26).
n_iter : int or 'auto', default='auto'
Number of power iterations. It can be used to deal with very noisy
problems. When 'auto', it is set to 4, unless `n_components` is small
(< .1 * min(X.shape)) in which case `n_iter` is set to 7.
This improves precision with few components. Note that in general
users should rather increase `n_oversamples` before increasing `n_iter`
as the principle of the randomized method is to avoid usage of these
more costly power iterations steps. When `n_components` is equal
or greater to the effective matrix rank and the spectrum does not
present a slow decay, `n_iter=0` or `1` should even work fine in theory
(see [1]_ page 9).
.. versionchanged:: 0.18
power_iteration_normalizer : {'auto', 'QR', 'LU', 'none'}, default='auto'
Whether the power iterations are normalized with step-by-step
QR factorization (the slowest but most accurate), 'none'
(the fastest but numerically unstable when `n_iter` is large, e.g.
typically 5 or larger), or 'LU' factorization (numerically stable
but can lose slightly in accuracy). The 'auto' mode applies no
normalization if `n_iter` <= 2 and switches to LU otherwise.
.. versionadded:: 0.18
transpose : bool or 'auto', default='auto'
Whether the algorithm should be applied to M.T instead of M. The
result should approximately be the same. The 'auto' mode will
trigger the transposition if M.shape[1] > M.shape[0] since this
implementation of randomized SVD tend to be a little faster in that
case.
.. versionchanged:: 0.18
flip_sign : bool, default=True
The output of a singular value decomposition is only unique up to a
permutation of the signs of the singular vectors. If `flip_sign` is
set to `True`, the sign ambiguity is resolved by making the largest
loadings for each component in the left singular vectors positive.
random_state : int, RandomState instance or None, default='warn'
The seed of the pseudo random number generator to use when
shuffling the data, i.e. getting the random vectors to initialize
the algorithm. Pass an int for reproducible results across multiple
function calls. See :term:`Glossary <random_state>`.
.. versionchanged:: 1.2
The default value changed from 0 to None.
svd_lapack_driver : {"gesdd", "gesvd"}, default="gesdd"
Whether to use the more efficient divide-and-conquer approach
(`"gesdd"`) or more general rectangular approach (`"gesvd"`) to compute
the SVD of the matrix B, which is the projection of M into a low
dimensional subspace, as described in [1]_.
.. versionadded:: 1.2
Returns
-------
u : ndarray of shape (n_samples, n_components)
Unitary matrix having left singular vectors with signs flipped as columns.
s : ndarray of shape (n_components,)
The singular values, sorted in non-increasing order.
vh : ndarray of shape (n_components, n_features)
Unitary matrix having right singular vectors with signs flipped as rows.
Notes
-----
This algorithm finds a (usually very good) approximate truncated
singular value decomposition using randomization to speed up the
computations. It is particularly fast on large matrices on which
you wish to extract only a small number of components. In order to
obtain further speed up, `n_iter` can be set <=2 (at the cost of
loss of precision). To increase the precision it is recommended to
increase `n_oversamples`, up to `2*k-n_components` where k is the
effective rank. Usually, `n_components` is chosen to be greater than k
so increasing `n_oversamples` up to `n_components` should be enough.
References
----------
.. [1] :arxiv:`"Finding structure with randomness:
Stochastic algorithms for constructing approximate matrix decompositions"
<0909.4061>`
Halko, et al. (2009)
.. [2] A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert
.. [3] An implementation of a randomized algorithm for principal component
analysis A. Szlam et al. 2014
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.extmath import randomized_svd
>>> a = np.array([[1, 2, 3, 5],
... [3, 4, 5, 6],
... [7, 8, 9, 10]])
>>> U, s, Vh = randomized_svd(a, n_components=2, random_state=0)
>>> U.shape, s.shape, Vh.shape
((3, 2), (2,), (2, 4))
"""
M = check_array(M, accept_sparse=True)
return _randomized_svd(
M,
n_components=n_components,
n_oversamples=n_oversamples,
n_iter=n_iter,
power_iteration_normalizer=power_iteration_normalizer,
transpose=transpose,
flip_sign=flip_sign,
random_state=random_state,
svd_lapack_driver=svd_lapack_driver,
)
|
Compute a truncated randomized SVD.
This method solves the fixed-rank approximation problem described in [1]_
(problem (1.5), p5).
Refer to
:ref:`sphx_glr_auto_examples_applications_wikipedia_principal_eigenvector.py`
for a typical example where the power iteration algorithm is used to rank web pages.
This algorithm is also known to be used as a building block in Google's PageRank
algorithm.
Parameters
----------
M : {array-like, sparse matrix} of shape (n_samples, n_features)
Matrix to decompose.
n_components : int
Number of singular values and vectors to extract.
n_oversamples : int, default=10
Additional number of random vectors to sample the range of `M` so as
to ensure proper conditioning. The total number of random vectors
used to find the range of `M` is `n_components + n_oversamples`. Smaller
number can improve speed but can negatively impact the quality of
approximation of singular vectors and singular values. Users might wish
to increase this parameter up to `2*k - n_components` where k is the
effective rank, for large matrices, noisy problems, matrices with
slowly decaying spectrums, or to increase precision accuracy. See [1]_
(pages 5, 23 and 26).
n_iter : int or 'auto', default='auto'
Number of power iterations. It can be used to deal with very noisy
problems. When 'auto', it is set to 4, unless `n_components` is small
(< .1 * min(X.shape)) in which case `n_iter` is set to 7.
This improves precision with few components. Note that in general
users should rather increase `n_oversamples` before increasing `n_iter`
as the principle of the randomized method is to avoid usage of these
more costly power iterations steps. When `n_components` is equal
or greater to the effective matrix rank and the spectrum does not
present a slow decay, `n_iter=0` or `1` should even work fine in theory
(see [1]_ page 9).
.. versionchanged:: 0.18
power_iteration_normalizer : {'auto', 'QR', 'LU', 'none'}, default='auto'
Whether the power iterations are normalized with step-by-step
QR factorization (the slowest but most accurate), 'none'
(the fastest but numerically unstable when `n_iter` is large, e.g.
typically 5 or larger), or 'LU' factorization (numerically stable
but can lose slightly in accuracy). The 'auto' mode applies no
normalization if `n_iter` <= 2 and switches to LU otherwise.
.. versionadded:: 0.18
transpose : bool or 'auto', default='auto'
Whether the algorithm should be applied to M.T instead of M. The
result should approximately be the same. The 'auto' mode will
trigger the transposition if M.shape[1] > M.shape[0] since this
implementation of randomized SVD tend to be a little faster in that
case.
.. versionchanged:: 0.18
flip_sign : bool, default=True
The output of a singular value decomposition is only unique up to a
permutation of the signs of the singular vectors. If `flip_sign` is
set to `True`, the sign ambiguity is resolved by making the largest
loadings for each component in the left singular vectors positive.
random_state : int, RandomState instance or None, default='warn'
The seed of the pseudo random number generator to use when
shuffling the data, i.e. getting the random vectors to initialize
the algorithm. Pass an int for reproducible results across multiple
function calls. See :term:`Glossary <random_state>`.
.. versionchanged:: 1.2
The default value changed from 0 to None.
svd_lapack_driver : {"gesdd", "gesvd"}, default="gesdd"
Whether to use the more efficient divide-and-conquer approach
(`"gesdd"`) or more general rectangular approach (`"gesvd"`) to compute
the SVD of the matrix B, which is the projection of M into a low
dimensional subspace, as described in [1]_.
.. versionadded:: 1.2
Returns
-------
u : ndarray of shape (n_samples, n_components)
Unitary matrix having left singular vectors with signs flipped as columns.
s : ndarray of shape (n_components,)
The singular values, sorted in non-increasing order.
vh : ndarray of shape (n_components, n_features)
Unitary matrix having right singular vectors with signs flipped as rows.
Notes
-----
This algorithm finds a (usually very good) approximate truncated
singular value decomposition using randomization to speed up the
computations. It is particularly fast on large matrices on which
you wish to extract only a small number of components. In order to
obtain further speed up, `n_iter` can be set <=2 (at the cost of
loss of precision). To increase the precision it is recommended to
increase `n_oversamples`, up to `2*k-n_components` where k is the
effective rank. Usually, `n_components` is chosen to be greater than k
so increasing `n_oversamples` up to `n_components` should be enough.
References
----------
.. [1] :arxiv:`"Finding structure with randomness:
Stochastic algorithms for constructing approximate matrix decompositions"
<0909.4061>`
Halko, et al. (2009)
.. [2] A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert
.. [3] An implementation of a randomized algorithm for principal component
analysis A. Szlam et al. 2014
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.extmath import randomized_svd
>>> a = np.array([[1, 2, 3, 5],
... [3, 4, 5, 6],
... [7, 8, 9, 10]])
>>> U, s, Vh = randomized_svd(a, n_components=2, random_state=0)
>>> U.shape, s.shape, Vh.shape
((3, 2), (2,), (2, 4))
|
randomized_svd
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/extmath.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/extmath.py
|
BSD-3-Clause
|
def weighted_mode(a, w, *, axis=0):
"""Return an array of the weighted modal (most common) value in the passed array.
If there is more than one such value, only the first is returned.
The bin-count for the modal bins is also returned.
This is an extension of the algorithm in scipy.stats.mode.
Parameters
----------
a : array-like of shape (n_samples,)
Array of which values to find mode(s).
w : array-like of shape (n_samples,)
Array of weights for each value.
axis : int, default=0
Axis along which to operate. Default is 0, i.e. the first axis.
Returns
-------
vals : ndarray
Array of modal values.
score : ndarray
Array of weighted counts for each mode.
See Also
--------
scipy.stats.mode: Calculates the Modal (most common) value of array elements
along specified axis.
Examples
--------
>>> from sklearn.utils.extmath import weighted_mode
>>> x = [4, 1, 4, 2, 4, 2]
>>> weights = [1, 1, 1, 1, 1, 1]
>>> weighted_mode(x, weights)
(array([4.]), array([3.]))
The value 4 appears three times: with uniform weights, the result is
simply the mode of the distribution.
>>> weights = [1, 3, 0.5, 1.5, 1, 2] # deweight the 4's
>>> weighted_mode(x, weights)
(array([2.]), array([3.5]))
The value 2 has the highest score: it appears twice with weights of
1.5 and 2: the sum of these is 3.5.
"""
if axis is None:
a = np.ravel(a)
w = np.ravel(w)
axis = 0
else:
a = np.asarray(a)
w = np.asarray(w)
if a.shape != w.shape:
w = np.full(a.shape, w, dtype=w.dtype)
scores = np.unique(np.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[axis] = 1
oldmostfreq = np.zeros(testshape)
oldcounts = np.zeros(testshape)
for score in scores:
template = np.zeros(a.shape)
ind = a == score
template[ind] = w[ind]
counts = np.expand_dims(np.sum(template, axis), axis)
mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
oldcounts = np.maximum(counts, oldcounts)
oldmostfreq = mostfrequent
return mostfrequent, oldcounts
|
Return an array of the weighted modal (most common) value in the passed array.
If there is more than one such value, only the first is returned.
The bin-count for the modal bins is also returned.
This is an extension of the algorithm in scipy.stats.mode.
Parameters
----------
a : array-like of shape (n_samples,)
Array of which values to find mode(s).
w : array-like of shape (n_samples,)
Array of weights for each value.
axis : int, default=0
Axis along which to operate. Default is 0, i.e. the first axis.
Returns
-------
vals : ndarray
Array of modal values.
score : ndarray
Array of weighted counts for each mode.
See Also
--------
scipy.stats.mode: Calculates the Modal (most common) value of array elements
along specified axis.
Examples
--------
>>> from sklearn.utils.extmath import weighted_mode
>>> x = [4, 1, 4, 2, 4, 2]
>>> weights = [1, 1, 1, 1, 1, 1]
>>> weighted_mode(x, weights)
(array([4.]), array([3.]))
The value 4 appears three times: with uniform weights, the result is
simply the mode of the distribution.
>>> weights = [1, 3, 0.5, 1.5, 1, 2] # deweight the 4's
>>> weighted_mode(x, weights)
(array([2.]), array([3.5]))
The value 2 has the highest score: it appears twice with weights of
1.5 and 2: the sum of these is 3.5.
|
weighted_mode
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/extmath.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/extmath.py
|
BSD-3-Clause
|
def cartesian(arrays, out=None):
"""Generate a cartesian product of input arrays.
Parameters
----------
arrays : list of array-like
1-D arrays to form the cartesian product of.
out : ndarray of shape (M, len(arrays)), default=None
Array to place the cartesian product in.
Returns
-------
out : ndarray of shape (M, len(arrays))
Array containing the cartesian products formed of input arrays.
If not provided, the `dtype` of the output array is set to the most
permissive `dtype` of the input arrays, according to NumPy type
promotion.
.. versionadded:: 1.2
Add support for arrays of different types.
Notes
-----
This function may not be used on more than 32 arrays
because the underlying numpy functions do not support it.
Examples
--------
>>> from sklearn.utils.extmath import cartesian
>>> cartesian(([1, 2, 3], [4, 5], [6, 7]))
array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
"""
arrays = [np.asarray(x) for x in arrays]
shape = (len(x) for x in arrays)
ix = np.indices(shape)
ix = ix.reshape(len(arrays), -1).T
if out is None:
dtype = np.result_type(*arrays) # find the most permissive dtype
out = np.empty_like(ix, dtype=dtype)
for n, arr in enumerate(arrays):
out[:, n] = arrays[n][ix[:, n]]
return out
|
Generate a cartesian product of input arrays.
Parameters
----------
arrays : list of array-like
1-D arrays to form the cartesian product of.
out : ndarray of shape (M, len(arrays)), default=None
Array to place the cartesian product in.
Returns
-------
out : ndarray of shape (M, len(arrays))
Array containing the cartesian products formed of input arrays.
If not provided, the `dtype` of the output array is set to the most
permissive `dtype` of the input arrays, according to NumPy type
promotion.
.. versionadded:: 1.2
Add support for arrays of different types.
Notes
-----
This function may not be used on more than 32 arrays
because the underlying numpy functions do not support it.
Examples
--------
>>> from sklearn.utils.extmath import cartesian
>>> cartesian(([1, 2, 3], [4, 5], [6, 7]))
array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
|
cartesian
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/extmath.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/extmath.py
|
BSD-3-Clause
|
def svd_flip(u, v, u_based_decision=True):
"""Sign correction to ensure deterministic output from SVD.
Adjusts the columns of u and the rows of v such that the loadings in the
columns in u that are largest in absolute value are always positive.
If u_based_decision is False, then the same sign correction is applied to
so that the rows in v that are largest in absolute value are always
positive.
Parameters
----------
u : ndarray
Parameters u and v are the output of `linalg.svd` or
:func:`~sklearn.utils.extmath.randomized_svd`, with matching inner
dimensions so one can compute `np.dot(u * s, v)`.
u can be None if `u_based_decision` is False.
v : ndarray
Parameters u and v are the output of `linalg.svd` or
:func:`~sklearn.utils.extmath.randomized_svd`, with matching inner
dimensions so one can compute `np.dot(u * s, v)`. The input v should
really be called vt to be consistent with scipy's output.
v can be None if `u_based_decision` is True.
u_based_decision : bool, default=True
If True, use the columns of u as the basis for sign flipping.
Otherwise, use the rows of v. The choice of which variable to base the
decision on is generally algorithm dependent.
Returns
-------
u_adjusted : ndarray
Array u with adjusted columns and the same dimensions as u.
v_adjusted : ndarray
Array v with adjusted rows and the same dimensions as v.
"""
xp, _ = get_namespace(*[a for a in [u, v] if a is not None])
if u_based_decision:
# columns of u, rows of v, or equivalently rows of u.T and v
max_abs_u_cols = xp.argmax(xp.abs(u.T), axis=1)
shift = xp.arange(u.T.shape[0], device=device(u))
indices = max_abs_u_cols + shift * u.T.shape[1]
signs = xp.sign(xp.take(xp.reshape(u.T, (-1,)), indices, axis=0))
u *= signs[np.newaxis, :]
if v is not None:
v *= signs[:, np.newaxis]
else:
# rows of v, columns of u
max_abs_v_rows = xp.argmax(xp.abs(v), axis=1)
shift = xp.arange(v.shape[0], device=device(v))
indices = max_abs_v_rows + shift * v.shape[1]
signs = xp.sign(xp.take(xp.reshape(v, (-1,)), indices, axis=0))
if u is not None:
u *= signs[np.newaxis, :]
v *= signs[:, np.newaxis]
return u, v
|
Sign correction to ensure deterministic output from SVD.
Adjusts the columns of u and the rows of v such that the loadings in the
columns in u that are largest in absolute value are always positive.
If u_based_decision is False, then the same sign correction is applied to
so that the rows in v that are largest in absolute value are always
positive.
Parameters
----------
u : ndarray
Parameters u and v are the output of `linalg.svd` or
:func:`~sklearn.utils.extmath.randomized_svd`, with matching inner
dimensions so one can compute `np.dot(u * s, v)`.
u can be None if `u_based_decision` is False.
v : ndarray
Parameters u and v are the output of `linalg.svd` or
:func:`~sklearn.utils.extmath.randomized_svd`, with matching inner
dimensions so one can compute `np.dot(u * s, v)`. The input v should
really be called vt to be consistent with scipy's output.
v can be None if `u_based_decision` is True.
u_based_decision : bool, default=True
If True, use the columns of u as the basis for sign flipping.
Otherwise, use the rows of v. The choice of which variable to base the
decision on is generally algorithm dependent.
Returns
-------
u_adjusted : ndarray
Array u with adjusted columns and the same dimensions as u.
v_adjusted : ndarray
Array v with adjusted rows and the same dimensions as v.
|
svd_flip
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/extmath.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/extmath.py
|
BSD-3-Clause
|
def softmax(X, copy=True):
"""
Calculate the softmax function.
The softmax function is calculated by
np.exp(X) / np.sum(np.exp(X), axis=1)
This will cause overflow when large values are exponentiated.
Hence the largest value in each row is subtracted from each data
point to prevent this.
Parameters
----------
X : array-like of float of shape (M, N)
Argument to the logistic function.
copy : bool, default=True
Copy X or not.
Returns
-------
out : ndarray of shape (M, N)
Softmax function evaluated at every point in x.
"""
xp, is_array_api_compliant = get_namespace(X)
if copy:
X = xp.asarray(X, copy=True)
max_prob = xp.reshape(xp.max(X, axis=1), (-1, 1))
X -= max_prob
if _is_numpy_namespace(xp):
# optimization for NumPy arrays
np.exp(X, out=np.asarray(X))
else:
# array_api does not have `out=`
X = xp.exp(X)
sum_prob = xp.reshape(xp.sum(X, axis=1), (-1, 1))
X /= sum_prob
return X
|
Calculate the softmax function.
The softmax function is calculated by
np.exp(X) / np.sum(np.exp(X), axis=1)
This will cause overflow when large values are exponentiated.
Hence the largest value in each row is subtracted from each data
point to prevent this.
Parameters
----------
X : array-like of float of shape (M, N)
Argument to the logistic function.
copy : bool, default=True
Copy X or not.
Returns
-------
out : ndarray of shape (M, N)
Softmax function evaluated at every point in x.
|
softmax
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/extmath.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/extmath.py
|
BSD-3-Clause
|
def make_nonnegative(X, min_value=0):
"""Ensure `X.min()` >= `min_value`.
Parameters
----------
X : array-like
The matrix to make non-negative.
min_value : float, default=0
The threshold value.
Returns
-------
array-like
The thresholded array.
Raises
------
ValueError
When X is sparse.
"""
min_ = X.min()
if min_ < min_value:
if sparse.issparse(X):
raise ValueError(
"Cannot make the data matrix"
" nonnegative because it is sparse."
" Adding a value to every entry would"
" make it no longer sparse."
)
X = X + (min_value - min_)
return X
|
Ensure `X.min()` >= `min_value`.
Parameters
----------
X : array-like
The matrix to make non-negative.
min_value : float, default=0
The threshold value.
Returns
-------
array-like
The thresholded array.
Raises
------
ValueError
When X is sparse.
|
make_nonnegative
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/extmath.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/extmath.py
|
BSD-3-Clause
|
def _safe_accumulator_op(op, x, *args, **kwargs):
"""
This function provides numpy accumulator functions with a float64 dtype
when used on a floating point input. This prevents accumulator overflow on
smaller floating point dtypes.
Parameters
----------
op : function
A numpy accumulator function such as np.mean or np.sum.
x : ndarray
A numpy array to apply the accumulator function.
*args : positional arguments
Positional arguments passed to the accumulator function after the
input x.
**kwargs : keyword arguments
Keyword arguments passed to the accumulator function.
Returns
-------
result
The output of the accumulator function passed to this function.
"""
if np.issubdtype(x.dtype, np.floating) and x.dtype.itemsize < 8:
result = op(x, *args, **kwargs, dtype=np.float64)
else:
result = op(x, *args, **kwargs)
return result
|
This function provides numpy accumulator functions with a float64 dtype
when used on a floating point input. This prevents accumulator overflow on
smaller floating point dtypes.
Parameters
----------
op : function
A numpy accumulator function such as np.mean or np.sum.
x : ndarray
A numpy array to apply the accumulator function.
*args : positional arguments
Positional arguments passed to the accumulator function after the
input x.
**kwargs : keyword arguments
Keyword arguments passed to the accumulator function.
Returns
-------
result
The output of the accumulator function passed to this function.
|
_safe_accumulator_op
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/extmath.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/extmath.py
|
BSD-3-Clause
|
def _incremental_mean_and_var(
X, last_mean, last_variance, last_sample_count, sample_weight=None
):
"""Calculate mean update and a Youngs and Cramer variance update.
If sample_weight is given, the weighted mean and variance is computed.
Update a given mean and (possibly) variance according to new data given
in X. last_mean is always required to compute the new mean.
If last_variance is None, no variance is computed and None return for
updated_variance.
From the paper "Algorithms for computing the sample variance: analysis and
recommendations", by Chan, Golub, and LeVeque.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Data to use for variance update.
last_mean : array-like of shape (n_features,)
last_variance : array-like of shape (n_features,)
last_sample_count : array-like of shape (n_features,)
The number of samples encountered until now if sample_weight is None.
If sample_weight is not None, this is the sum of sample_weight
encountered.
sample_weight : array-like of shape (n_samples,) or None
Sample weights. If None, compute the unweighted mean/variance.
Returns
-------
updated_mean : ndarray of shape (n_features,)
updated_variance : ndarray of shape (n_features,)
None if last_variance was None.
updated_sample_count : ndarray of shape (n_features,)
Notes
-----
NaNs are ignored during the algorithm.
References
----------
T. Chan, G. Golub, R. LeVeque. Algorithms for computing the sample
variance: recommendations, The American Statistician, Vol. 37, No. 3,
pp. 242-247
Also, see the sparse implementation of this in
`utils.sparsefuncs.incr_mean_variance_axis` and
`utils.sparsefuncs_fast.incr_mean_variance_axis0`
"""
# old = stats until now
# new = the current increment
# updated = the aggregated stats
last_sum = last_mean * last_sample_count
X_nan_mask = np.isnan(X)
if np.any(X_nan_mask):
sum_op = np.nansum
else:
sum_op = np.sum
if sample_weight is not None:
# equivalent to np.nansum(X * sample_weight, axis=0)
# safer because np.float64(X*W) != np.float64(X)*np.float64(W)
new_sum = _safe_accumulator_op(
np.matmul, sample_weight, np.where(X_nan_mask, 0, X)
)
new_sample_count = _safe_accumulator_op(
np.sum, sample_weight[:, None] * (~X_nan_mask), axis=0
)
else:
new_sum = _safe_accumulator_op(sum_op, X, axis=0)
n_samples = X.shape[0]
new_sample_count = n_samples - np.sum(X_nan_mask, axis=0)
updated_sample_count = last_sample_count + new_sample_count
updated_mean = (last_sum + new_sum) / updated_sample_count
if last_variance is None:
updated_variance = None
else:
T = new_sum / new_sample_count
temp = X - T
if sample_weight is not None:
# equivalent to np.nansum((X-T)**2 * sample_weight, axis=0)
# safer because np.float64(X*W) != np.float64(X)*np.float64(W)
correction = _safe_accumulator_op(
np.matmul, sample_weight, np.where(X_nan_mask, 0, temp)
)
temp **= 2
new_unnormalized_variance = _safe_accumulator_op(
np.matmul, sample_weight, np.where(X_nan_mask, 0, temp)
)
else:
correction = _safe_accumulator_op(sum_op, temp, axis=0)
temp **= 2
new_unnormalized_variance = _safe_accumulator_op(sum_op, temp, axis=0)
# correction term of the corrected 2 pass algorithm.
# See "Algorithms for computing the sample variance: analysis
# and recommendations", by Chan, Golub, and LeVeque.
new_unnormalized_variance -= correction**2 / new_sample_count
last_unnormalized_variance = last_variance * last_sample_count
with np.errstate(divide="ignore", invalid="ignore"):
last_over_new_count = last_sample_count / new_sample_count
updated_unnormalized_variance = (
last_unnormalized_variance
+ new_unnormalized_variance
+ last_over_new_count
/ updated_sample_count
* (last_sum / last_over_new_count - new_sum) ** 2
)
zeros = last_sample_count == 0
updated_unnormalized_variance[zeros] = new_unnormalized_variance[zeros]
updated_variance = updated_unnormalized_variance / updated_sample_count
return updated_mean, updated_variance, updated_sample_count
|
Calculate mean update and a Youngs and Cramer variance update.
If sample_weight is given, the weighted mean and variance is computed.
Update a given mean and (possibly) variance according to new data given
in X. last_mean is always required to compute the new mean.
If last_variance is None, no variance is computed and None return for
updated_variance.
From the paper "Algorithms for computing the sample variance: analysis and
recommendations", by Chan, Golub, and LeVeque.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Data to use for variance update.
last_mean : array-like of shape (n_features,)
last_variance : array-like of shape (n_features,)
last_sample_count : array-like of shape (n_features,)
The number of samples encountered until now if sample_weight is None.
If sample_weight is not None, this is the sum of sample_weight
encountered.
sample_weight : array-like of shape (n_samples,) or None
Sample weights. If None, compute the unweighted mean/variance.
Returns
-------
updated_mean : ndarray of shape (n_features,)
updated_variance : ndarray of shape (n_features,)
None if last_variance was None.
updated_sample_count : ndarray of shape (n_features,)
Notes
-----
NaNs are ignored during the algorithm.
References
----------
T. Chan, G. Golub, R. LeVeque. Algorithms for computing the sample
variance: recommendations, The American Statistician, Vol. 37, No. 3,
pp. 242-247
Also, see the sparse implementation of this in
`utils.sparsefuncs.incr_mean_variance_axis` and
`utils.sparsefuncs_fast.incr_mean_variance_axis0`
|
_incremental_mean_and_var
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/extmath.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/extmath.py
|
BSD-3-Clause
|
def _deterministic_vector_sign_flip(u):
"""Modify the sign of vectors for reproducibility.
Flips the sign of elements of all the vectors (rows of u) such that
the absolute maximum element of each vector is positive.
Parameters
----------
u : ndarray
Array with vectors as its rows.
Returns
-------
u_flipped : ndarray with same shape as u
Array with the sign flipped vectors as its rows.
"""
max_abs_rows = np.argmax(np.abs(u), axis=1)
signs = np.sign(u[range(u.shape[0]), max_abs_rows])
u *= signs[:, np.newaxis]
return u
|
Modify the sign of vectors for reproducibility.
Flips the sign of elements of all the vectors (rows of u) such that
the absolute maximum element of each vector is positive.
Parameters
----------
u : ndarray
Array with vectors as its rows.
Returns
-------
u_flipped : ndarray with same shape as u
Array with the sign flipped vectors as its rows.
|
_deterministic_vector_sign_flip
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/extmath.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/extmath.py
|
BSD-3-Clause
|
def stable_cumsum(arr, axis=None, rtol=1e-05, atol=1e-08):
"""Use high precision for cumsum and check that final value matches sum.
Warns if the final cumulative sum does not match the sum (up to the chosen
tolerance).
Parameters
----------
arr : array-like
To be cumulatively summed as flat.
axis : int, default=None
Axis along which the cumulative sum is computed.
The default (None) is to compute the cumsum over the flattened array.
rtol : float, default=1e-05
Relative tolerance, see ``np.allclose``.
atol : float, default=1e-08
Absolute tolerance, see ``np.allclose``.
Returns
-------
out : ndarray
Array with the cumulative sums along the chosen axis.
"""
out = np.cumsum(arr, axis=axis, dtype=np.float64)
expected = np.sum(arr, axis=axis, dtype=np.float64)
if not np.allclose(
out.take(-1, axis=axis), expected, rtol=rtol, atol=atol, equal_nan=True
):
warnings.warn(
(
"cumsum was found to be unstable: "
"its last element does not correspond to sum"
),
RuntimeWarning,
)
return out
|
Use high precision for cumsum and check that final value matches sum.
Warns if the final cumulative sum does not match the sum (up to the chosen
tolerance).
Parameters
----------
arr : array-like
To be cumulatively summed as flat.
axis : int, default=None
Axis along which the cumulative sum is computed.
The default (None) is to compute the cumsum over the flattened array.
rtol : float, default=1e-05
Relative tolerance, see ``np.allclose``.
atol : float, default=1e-08
Absolute tolerance, see ``np.allclose``.
Returns
-------
out : ndarray
Array with the cumulative sums along the chosen axis.
|
stable_cumsum
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/extmath.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/extmath.py
|
BSD-3-Clause
|
def _nanaverage(a, weights=None):
"""Compute the weighted average, ignoring NaNs.
Parameters
----------
a : ndarray
Array containing data to be averaged.
weights : array-like, default=None
An array of weights associated with the values in a. Each value in a
contributes to the average according to its associated weight. The
weights array can either be 1-D of the same shape as a. If `weights=None`,
then all data in a are assumed to have a weight equal to one.
Returns
-------
weighted_average : float
The weighted average.
Notes
-----
This wrapper to combine :func:`numpy.average` and :func:`numpy.nanmean`, so
that :func:`np.nan` values are ignored from the average and weights can
be passed. Note that when possible, we delegate to the prime methods.
"""
xp, _ = get_namespace(a)
if a.shape[0] == 0:
return xp.nan
mask = xp.isnan(a)
if xp.all(mask):
return xp.nan
if weights is None:
return _nanmean(a, xp=xp)
weights = xp.asarray(weights)
a, weights = a[~mask], weights[~mask]
try:
return _average(a, weights=weights)
except ZeroDivisionError:
# this is when all weights are zero, then ignore them
return _average(a)
|
Compute the weighted average, ignoring NaNs.
Parameters
----------
a : ndarray
Array containing data to be averaged.
weights : array-like, default=None
An array of weights associated with the values in a. Each value in a
contributes to the average according to its associated weight. The
weights array can either be 1-D of the same shape as a. If `weights=None`,
then all data in a are assumed to have a weight equal to one.
Returns
-------
weighted_average : float
The weighted average.
Notes
-----
This wrapper to combine :func:`numpy.average` and :func:`numpy.nanmean`, so
that :func:`np.nan` values are ignored from the average and weights can
be passed. Note that when possible, we delegate to the prime methods.
|
_nanaverage
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/extmath.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/extmath.py
|
BSD-3-Clause
|
def safe_sqr(X, *, copy=True):
"""Element wise squaring of array-likes and sparse matrices.
Parameters
----------
X : {array-like, ndarray, sparse matrix}
copy : bool, default=True
Whether to create a copy of X and operate on it or to perform
inplace computation (default behaviour).
Returns
-------
X ** 2 : element wise square
Return the element-wise square of the input.
Examples
--------
>>> from sklearn.utils import safe_sqr
>>> safe_sqr([1, 2, 3])
array([1, 4, 9])
"""
X = check_array(X, accept_sparse=["csr", "csc", "coo"], ensure_2d=False)
if sparse.issparse(X):
if copy:
X = X.copy()
X.data **= 2
else:
if copy:
X = X**2
else:
X **= 2
return X
|
Element wise squaring of array-likes and sparse matrices.
Parameters
----------
X : {array-like, ndarray, sparse matrix}
copy : bool, default=True
Whether to create a copy of X and operate on it or to perform
inplace computation (default behaviour).
Returns
-------
X ** 2 : element wise square
Return the element-wise square of the input.
Examples
--------
>>> from sklearn.utils import safe_sqr
>>> safe_sqr([1, 2, 3])
array([1, 4, 9])
|
safe_sqr
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/extmath.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/extmath.py
|
BSD-3-Clause
|
def _approximate_mode(class_counts, n_draws, rng):
"""Computes approximate mode of multivariate hypergeometric.
This is an approximation to the mode of the multivariate
hypergeometric given by class_counts and n_draws.
It shouldn't be off by more than one.
It is the mostly likely outcome of drawing n_draws many
samples from the population given by class_counts.
Parameters
----------
class_counts : ndarray of int
Population per class.
n_draws : int
Number of draws (samples to draw) from the overall population.
rng : random state
Used to break ties.
Returns
-------
sampled_classes : ndarray of int
Number of samples drawn from each class.
np.sum(sampled_classes) == n_draws
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.extmath import _approximate_mode
>>> _approximate_mode(class_counts=np.array([4, 2]), n_draws=3, rng=0)
array([2, 1])
>>> _approximate_mode(class_counts=np.array([5, 2]), n_draws=4, rng=0)
array([3, 1])
>>> _approximate_mode(class_counts=np.array([2, 2, 2, 1]),
... n_draws=2, rng=0)
array([0, 1, 1, 0])
>>> _approximate_mode(class_counts=np.array([2, 2, 2, 1]),
... n_draws=2, rng=42)
array([1, 1, 0, 0])
"""
rng = check_random_state(rng)
# this computes a bad approximation to the mode of the
# multivariate hypergeometric given by class_counts and n_draws
continuous = class_counts / class_counts.sum() * n_draws
# floored means we don't overshoot n_samples, but probably undershoot
floored = np.floor(continuous)
# we add samples according to how much "left over" probability
# they had, until we arrive at n_samples
need_to_add = int(n_draws - floored.sum())
if need_to_add > 0:
remainder = continuous - floored
values = np.sort(np.unique(remainder))[::-1]
# add according to remainder, but break ties
# randomly to avoid biases
for value in values:
(inds,) = np.where(remainder == value)
# if we need_to_add less than what's in inds
# we draw randomly from them.
# if we need to add more, we add them all and
# go to the next value
add_now = min(len(inds), need_to_add)
inds = rng.choice(inds, size=add_now, replace=False)
floored[inds] += 1
need_to_add -= add_now
if need_to_add == 0:
break
return floored.astype(int)
|
Computes approximate mode of multivariate hypergeometric.
This is an approximation to the mode of the multivariate
hypergeometric given by class_counts and n_draws.
It shouldn't be off by more than one.
It is the mostly likely outcome of drawing n_draws many
samples from the population given by class_counts.
Parameters
----------
class_counts : ndarray of int
Population per class.
n_draws : int
Number of draws (samples to draw) from the overall population.
rng : random state
Used to break ties.
Returns
-------
sampled_classes : ndarray of int
Number of samples drawn from each class.
np.sum(sampled_classes) == n_draws
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.extmath import _approximate_mode
>>> _approximate_mode(class_counts=np.array([4, 2]), n_draws=3, rng=0)
array([2, 1])
>>> _approximate_mode(class_counts=np.array([5, 2]), n_draws=4, rng=0)
array([3, 1])
>>> _approximate_mode(class_counts=np.array([2, 2, 2, 1]),
... n_draws=2, rng=0)
array([0, 1, 1, 0])
>>> _approximate_mode(class_counts=np.array([2, 2, 2, 1]),
... n_draws=2, rng=42)
array([1, 1, 0, 0])
|
_approximate_mode
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/extmath.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/extmath.py
|
BSD-3-Clause
|
def _yeojohnson_lambda(_neg_log_likelihood, x):
"""Estimate the optimal Yeo-Johnson transformation parameter (lambda).
This function provides a compatibility workaround for versions of SciPy
older than 1.9.0, where `scipy.stats.yeojohnson` did not return
the estimated lambda directly.
Parameters
----------
_neg_log_likelihood : callable
A function that computes the negative log-likelihood of the Yeo-Johnson
transformation for a given lambda. Used only for SciPy versions < 1.9.0.
x : array-like
Input data to estimate the Yeo-Johnson transformation parameter.
Returns
-------
lmbda : float
The estimated lambda parameter for the Yeo-Johnson transformation.
"""
min_scipy_version = "1.9.0"
if sp_version < parse_version(min_scipy_version):
# choosing bracket -2, 2 like for boxcox
return optimize.brent(_neg_log_likelihood, brack=(-2, 2))
_, lmbda = scipy.stats.yeojohnson(x, lmbda=None)
return lmbda
|
Estimate the optimal Yeo-Johnson transformation parameter (lambda).
This function provides a compatibility workaround for versions of SciPy
older than 1.9.0, where `scipy.stats.yeojohnson` did not return
the estimated lambda directly.
Parameters
----------
_neg_log_likelihood : callable
A function that computes the negative log-likelihood of the Yeo-Johnson
transformation for a given lambda. Used only for SciPy versions < 1.9.0.
x : array-like
Input data to estimate the Yeo-Johnson transformation parameter.
Returns
-------
lmbda : float
The estimated lambda parameter for the Yeo-Johnson transformation.
|
_yeojohnson_lambda
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/fixes.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/fixes.py
|
BSD-3-Clause
|
def _preserve_dia_indices_dtype(
sparse_container, original_container_format, requested_sparse_format
):
"""Preserve indices dtype for SciPy < 1.12 when converting from DIA to CSR/CSC.
For SciPy < 1.12, DIA arrays indices are upcasted to `np.int64` that is
inconsistent with DIA matrices. We downcast the indices dtype to `np.int32` to
be consistent with DIA matrices.
The converted indices arrays are affected back inplace to the sparse container.
Parameters
----------
sparse_container : sparse container
Sparse container to be checked.
requested_sparse_format : str or bool
The type of format of `sparse_container`.
Notes
-----
See https://github.com/scipy/scipy/issues/19245 for more details.
"""
if original_container_format == "dia_array" and requested_sparse_format in (
"csr",
"coo",
):
if requested_sparse_format == "csr":
index_dtype = _smallest_admissible_index_dtype(
arrays=(sparse_container.indptr, sparse_container.indices),
maxval=max(sparse_container.nnz, sparse_container.shape[1]),
check_contents=True,
)
sparse_container.indices = sparse_container.indices.astype(
index_dtype, copy=False
)
sparse_container.indptr = sparse_container.indptr.astype(
index_dtype, copy=False
)
else: # requested_sparse_format == "coo"
index_dtype = _smallest_admissible_index_dtype(
maxval=max(sparse_container.shape)
)
sparse_container.row = sparse_container.row.astype(index_dtype, copy=False)
sparse_container.col = sparse_container.col.astype(index_dtype, copy=False)
|
Preserve indices dtype for SciPy < 1.12 when converting from DIA to CSR/CSC.
For SciPy < 1.12, DIA arrays indices are upcasted to `np.int64` that is
inconsistent with DIA matrices. We downcast the indices dtype to `np.int32` to
be consistent with DIA matrices.
The converted indices arrays are affected back inplace to the sparse container.
Parameters
----------
sparse_container : sparse container
Sparse container to be checked.
requested_sparse_format : str or bool
The type of format of `sparse_container`.
Notes
-----
See https://github.com/scipy/scipy/issues/19245 for more details.
|
_preserve_dia_indices_dtype
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/fixes.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/fixes.py
|
BSD-3-Clause
|
def _smallest_admissible_index_dtype(arrays=(), maxval=None, check_contents=False):
"""Based on input (integer) arrays `a`, determine a suitable index data
type that can hold the data in the arrays.
This function returns `np.int64` if it either required by `maxval` or based on the
largest precision of the dtype of the arrays passed as argument, or by their
contents (when `check_contents is True`). If none of the condition requires
`np.int64` then this function returns `np.int32`.
Parameters
----------
arrays : ndarray or tuple of ndarrays, default=()
Input arrays whose types/contents to check.
maxval : float, default=None
Maximum value needed.
check_contents : bool, default=False
Whether to check the values in the arrays and not just their types.
By default, check only the types.
Returns
-------
dtype : {np.int32, np.int64}
Suitable index data type (int32 or int64).
"""
int32min = np.int32(np.iinfo(np.int32).min)
int32max = np.int32(np.iinfo(np.int32).max)
if maxval is not None:
if maxval > np.iinfo(np.int64).max:
raise ValueError(
f"maxval={maxval} is to large to be represented as np.int64."
)
if maxval > int32max:
return np.int64
if isinstance(arrays, np.ndarray):
arrays = (arrays,)
for arr in arrays:
if not isinstance(arr, np.ndarray):
raise TypeError(
f"Arrays should be of type np.ndarray, got {type(arr)} instead."
)
if not np.issubdtype(arr.dtype, np.integer):
raise ValueError(
f"Array dtype {arr.dtype} is not supported for index dtype. We expect "
"integral values."
)
if not np.can_cast(arr.dtype, np.int32):
if not check_contents:
# when `check_contents` is False, we stay on the safe side and return
# np.int64.
return np.int64
if arr.size == 0:
# a bigger type not needed yet, let's look at the next array
continue
else:
maxval = arr.max()
minval = arr.min()
if minval < int32min or maxval > int32max:
# a big index type is actually needed
return np.int64
return np.int32
|
Based on input (integer) arrays `a`, determine a suitable index data
type that can hold the data in the arrays.
This function returns `np.int64` if it either required by `maxval` or based on the
largest precision of the dtype of the arrays passed as argument, or by their
contents (when `check_contents is True`). If none of the condition requires
`np.int64` then this function returns `np.int32`.
Parameters
----------
arrays : ndarray or tuple of ndarrays, default=()
Input arrays whose types/contents to check.
maxval : float, default=None
Maximum value needed.
check_contents : bool, default=False
Whether to check the values in the arrays and not just their types.
By default, check only the types.
Returns
-------
dtype : {np.int32, np.int64}
Suitable index data type (int32 or int64).
|
_smallest_admissible_index_dtype
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/fixes.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/fixes.py
|
BSD-3-Clause
|
def _in_unstable_openblas_configuration():
"""Return True if in an unstable configuration for OpenBLAS"""
# Import libraries which might load OpenBLAS.
import numpy # noqa: F401
import scipy # noqa: F401
modules_info = _get_threadpool_controller().info()
open_blas_used = any(info["internal_api"] == "openblas" for info in modules_info)
if not open_blas_used:
return False
# OpenBLAS 0.3.16 fixed instability for arm64, see:
# https://github.com/xianyi/OpenBLAS/blob/1b6db3dbba672b4f8af935bd43a1ff6cff4d20b7/Changelog.txt#L56-L58
openblas_arm64_stable_version = parse_version("0.3.16")
for info in modules_info:
if info["internal_api"] != "openblas":
continue
openblas_version = info.get("version")
openblas_architecture = info.get("architecture")
if openblas_version is None or openblas_architecture is None:
# Cannot be sure that OpenBLAS is good enough. Assume unstable:
return True # pragma: no cover
if (
openblas_architecture == "neoversen1"
and parse_version(openblas_version) < openblas_arm64_stable_version
):
# See discussions in https://github.com/numpy/numpy/issues/19411
return True # pragma: no cover
return False
|
Return True if in an unstable configuration for OpenBLAS
|
_in_unstable_openblas_configuration
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/fixes.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/fixes.py
|
BSD-3-Clause
|
def single_source_shortest_path_length(graph, source, *, cutoff=None):
"""Return the length of the shortest path from source to all reachable nodes.
Parameters
----------
graph : {array-like, sparse matrix} of shape (n_nodes, n_nodes)
Adjacency matrix of the graph. Sparse matrix of format LIL is
preferred.
source : int
Start node for path.
cutoff : int, default=None
Depth to stop the search - only paths of length <= cutoff are returned.
Returns
-------
paths : dict
Reachable end nodes mapped to length of path from source,
i.e. `{end: path_length}`.
Examples
--------
>>> from sklearn.utils.graph import single_source_shortest_path_length
>>> import numpy as np
>>> graph = np.array([[ 0, 1, 0, 0],
... [ 1, 0, 1, 0],
... [ 0, 1, 0, 0],
... [ 0, 0, 0, 0]])
>>> single_source_shortest_path_length(graph, 0)
{0: 0, 1: 1, 2: 2}
>>> graph = np.ones((6, 6))
>>> sorted(single_source_shortest_path_length(graph, 2).items())
[(0, 1), (1, 1), (2, 0), (3, 1), (4, 1), (5, 1)]
"""
if sparse.issparse(graph):
graph = graph.tolil()
else:
graph = sparse.lil_matrix(graph)
seen = {} # level (number of hops) when seen in BFS
level = 0 # the current level
next_level = [source] # dict of nodes to check at next level
while next_level:
this_level = next_level # advance to next level
next_level = set() # and start a new list (fringe)
for v in this_level:
if v not in seen:
seen[v] = level # set the level of vertex v
next_level.update(graph.rows[v])
if cutoff is not None and cutoff <= level:
break
level += 1
return seen # return all path lengths as dictionary
|
Return the length of the shortest path from source to all reachable nodes.
Parameters
----------
graph : {array-like, sparse matrix} of shape (n_nodes, n_nodes)
Adjacency matrix of the graph. Sparse matrix of format LIL is
preferred.
source : int
Start node for path.
cutoff : int, default=None
Depth to stop the search - only paths of length <= cutoff are returned.
Returns
-------
paths : dict
Reachable end nodes mapped to length of path from source,
i.e. `{end: path_length}`.
Examples
--------
>>> from sklearn.utils.graph import single_source_shortest_path_length
>>> import numpy as np
>>> graph = np.array([[ 0, 1, 0, 0],
... [ 1, 0, 1, 0],
... [ 0, 1, 0, 0],
... [ 0, 0, 0, 0]])
>>> single_source_shortest_path_length(graph, 0)
{0: 0, 1: 1, 2: 2}
>>> graph = np.ones((6, 6))
>>> sorted(single_source_shortest_path_length(graph, 2).items())
[(0, 1), (1, 1), (2, 0), (3, 1), (4, 1), (5, 1)]
|
single_source_shortest_path_length
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/graph.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/graph.py
|
BSD-3-Clause
|
def _fix_connected_components(
X,
graph,
n_connected_components,
component_labels,
mode="distance",
metric="euclidean",
**kwargs,
):
"""Add connections to sparse graph to connect unconnected components.
For each pair of unconnected components, compute all pairwise distances
from one component to the other, and add a connection on the closest pair
of samples. This is a hacky way to get a graph with a single connected
component, which is necessary for example to compute a shortest path
between all pairs of samples in the graph.
Parameters
----------
X : array of shape (n_samples, n_features) or (n_samples, n_samples)
Features to compute the pairwise distances. If `metric =
"precomputed"`, X is the matrix of pairwise distances.
graph : sparse matrix of shape (n_samples, n_samples)
Graph of connection between samples.
n_connected_components : int
Number of connected components, as computed by
`scipy.sparse.csgraph.connected_components`.
component_labels : array of shape (n_samples)
Labels of connected components, as computed by
`scipy.sparse.csgraph.connected_components`.
mode : {'connectivity', 'distance'}, default='distance'
Type of graph matrix: 'connectivity' corresponds to the connectivity
matrix with ones and zeros, and 'distance' corresponds to the distances
between neighbors according to the given metric.
metric : str
Metric used in `sklearn.metrics.pairwise.pairwise_distances`.
kwargs : kwargs
Keyword arguments passed to
`sklearn.metrics.pairwise.pairwise_distances`.
Returns
-------
graph : sparse matrix of shape (n_samples, n_samples)
Graph of connection between samples, with a single connected component.
"""
if metric == "precomputed" and sparse.issparse(X):
raise RuntimeError(
"_fix_connected_components with metric='precomputed' requires the "
"full distance matrix in X, and does not work with a sparse "
"neighbors graph."
)
for i in range(n_connected_components):
idx_i = np.flatnonzero(component_labels == i)
Xi = X[idx_i]
for j in range(i):
idx_j = np.flatnonzero(component_labels == j)
Xj = X[idx_j]
if metric == "precomputed":
D = X[np.ix_(idx_i, idx_j)]
else:
D = pairwise_distances(Xi, Xj, metric=metric, **kwargs)
ii, jj = np.unravel_index(D.argmin(axis=None), D.shape)
if mode == "connectivity":
graph[idx_i[ii], idx_j[jj]] = 1
graph[idx_j[jj], idx_i[ii]] = 1
elif mode == "distance":
graph[idx_i[ii], idx_j[jj]] = D[ii, jj]
graph[idx_j[jj], idx_i[ii]] = D[ii, jj]
else:
raise ValueError(
"Unknown mode=%r, should be one of ['connectivity', 'distance']."
% mode
)
return graph
|
Add connections to sparse graph to connect unconnected components.
For each pair of unconnected components, compute all pairwise distances
from one component to the other, and add a connection on the closest pair
of samples. This is a hacky way to get a graph with a single connected
component, which is necessary for example to compute a shortest path
between all pairs of samples in the graph.
Parameters
----------
X : array of shape (n_samples, n_features) or (n_samples, n_samples)
Features to compute the pairwise distances. If `metric =
"precomputed"`, X is the matrix of pairwise distances.
graph : sparse matrix of shape (n_samples, n_samples)
Graph of connection between samples.
n_connected_components : int
Number of connected components, as computed by
`scipy.sparse.csgraph.connected_components`.
component_labels : array of shape (n_samples)
Labels of connected components, as computed by
`scipy.sparse.csgraph.connected_components`.
mode : {'connectivity', 'distance'}, default='distance'
Type of graph matrix: 'connectivity' corresponds to the connectivity
matrix with ones and zeros, and 'distance' corresponds to the distances
between neighbors according to the given metric.
metric : str
Metric used in `sklearn.metrics.pairwise.pairwise_distances`.
kwargs : kwargs
Keyword arguments passed to
`sklearn.metrics.pairwise.pairwise_distances`.
Returns
-------
graph : sparse matrix of shape (n_samples, n_samples)
Graph of connection between samples, with a single connected component.
|
_fix_connected_components
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/graph.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/graph.py
|
BSD-3-Clause
|
def _safe_split(estimator, X, y, indices, train_indices=None):
"""Create subset of dataset and properly handle kernels.
Slice X, y according to indices for cross-validation, but take care of
precomputed kernel-matrices or pairwise affinities / distances.
If ``estimator._pairwise is True``, X needs to be square and
we slice rows and columns. If ``train_indices`` is not None,
we slice rows using ``indices`` (assumed the test set) and columns
using ``train_indices``, indicating the training set.
Labels y will always be indexed only along the first axis.
Parameters
----------
estimator : object
Estimator to determine whether we should slice only rows or rows and
columns.
X : array-like, sparse matrix or iterable
Data to be indexed. If ``estimator._pairwise is True``,
this needs to be a square array-like or sparse matrix.
y : array-like, sparse matrix or iterable
Targets to be indexed.
indices : array of int
Rows to select from X and y.
If ``estimator._pairwise is True`` and ``train_indices is None``
then ``indices`` will also be used to slice columns.
train_indices : array of int or None, default=None
If ``estimator._pairwise is True`` and ``train_indices is not None``,
then ``train_indices`` will be use to slice the columns of X.
Returns
-------
X_subset : array-like, sparse matrix or list
Indexed data.
y_subset : array-like, sparse matrix or list
Indexed targets.
"""
if get_tags(estimator).input_tags.pairwise:
if not hasattr(X, "shape"):
raise ValueError(
"Precomputed kernels or affinity matrices have "
"to be passed as arrays or sparse matrices."
)
# X is a precomputed square kernel matrix
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square kernel matrix")
if train_indices is None:
X_subset = X[np.ix_(indices, indices)]
else:
X_subset = X[np.ix_(indices, train_indices)]
else:
X_subset = _safe_indexing(X, indices)
if y is not None:
y_subset = _safe_indexing(y, indices)
else:
y_subset = None
return X_subset, y_subset
|
Create subset of dataset and properly handle kernels.
Slice X, y according to indices for cross-validation, but take care of
precomputed kernel-matrices or pairwise affinities / distances.
If ``estimator._pairwise is True``, X needs to be square and
we slice rows and columns. If ``train_indices`` is not None,
we slice rows using ``indices`` (assumed the test set) and columns
using ``train_indices``, indicating the training set.
Labels y will always be indexed only along the first axis.
Parameters
----------
estimator : object
Estimator to determine whether we should slice only rows or rows and
columns.
X : array-like, sparse matrix or iterable
Data to be indexed. If ``estimator._pairwise is True``,
this needs to be a square array-like or sparse matrix.
y : array-like, sparse matrix or iterable
Targets to be indexed.
indices : array of int
Rows to select from X and y.
If ``estimator._pairwise is True`` and ``train_indices is None``
then ``indices`` will also be used to slice columns.
train_indices : array of int or None, default=None
If ``estimator._pairwise is True`` and ``train_indices is not None``,
then ``train_indices`` will be use to slice the columns of X.
Returns
-------
X_subset : array-like, sparse matrix or list
Indexed data.
y_subset : array-like, sparse matrix or list
Indexed targets.
|
_safe_split
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/metaestimators.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/metaestimators.py
|
BSD-3-Clause
|
def unique_labels(*ys):
"""Extract an ordered array of unique labels.
We don't allow:
- mix of multilabel and multiclass (single label) targets
- mix of label indicator matrix and anything else,
because there are no explicit labels)
- mix of label indicator matrices of different sizes
- mix of string and integer labels
At the moment, we also don't allow "multiclass-multioutput" input type.
Parameters
----------
*ys : array-likes
Label values.
Returns
-------
out : ndarray of shape (n_unique_labels,)
An ordered array of unique labels.
Examples
--------
>>> from sklearn.utils.multiclass import unique_labels
>>> unique_labels([3, 5, 5, 5, 7, 7])
array([3, 5, 7])
>>> unique_labels([1, 2, 3, 4], [2, 2, 3, 4])
array([1, 2, 3, 4])
>>> unique_labels([1, 2, 10], [5, 11])
array([ 1, 2, 5, 10, 11])
"""
ys = attach_unique(*ys, return_tuple=True)
xp, is_array_api_compliant = get_namespace(*ys)
if len(ys) == 0:
raise ValueError("No argument has been passed.")
# Check that we don't mix label format
ys_types = set(type_of_target(x) for x in ys)
if ys_types == {"binary", "multiclass"}:
ys_types = {"multiclass"}
if len(ys_types) > 1:
raise ValueError("Mix type of y not allowed, got types %s" % ys_types)
label_type = ys_types.pop()
# Check consistency for the indicator format
if (
label_type == "multilabel-indicator"
and len(
set(
check_array(y, accept_sparse=["csr", "csc", "coo"]).shape[1] for y in ys
)
)
> 1
):
raise ValueError(
"Multi-label binary indicator input with different numbers of labels"
)
# Get the unique set of labels
_unique_labels = _FN_UNIQUE_LABELS.get(label_type, None)
if not _unique_labels:
raise ValueError("Unknown label type: %s" % repr(ys))
if is_array_api_compliant:
# array_api does not allow for mixed dtypes
unique_ys = xp.concat([_unique_labels(y, xp=xp) for y in ys])
return xp.unique_values(unique_ys)
ys_labels = set(
chain.from_iterable((i for i in _unique_labels(y, xp=xp)) for y in ys)
)
# Check that we don't mix string type with number type
if len(set(isinstance(label, str) for label in ys_labels)) > 1:
raise ValueError("Mix of label input types (string and number)")
return xp.asarray(sorted(ys_labels))
|
Extract an ordered array of unique labels.
We don't allow:
- mix of multilabel and multiclass (single label) targets
- mix of label indicator matrix and anything else,
because there are no explicit labels)
- mix of label indicator matrices of different sizes
- mix of string and integer labels
At the moment, we also don't allow "multiclass-multioutput" input type.
Parameters
----------
*ys : array-likes
Label values.
Returns
-------
out : ndarray of shape (n_unique_labels,)
An ordered array of unique labels.
Examples
--------
>>> from sklearn.utils.multiclass import unique_labels
>>> unique_labels([3, 5, 5, 5, 7, 7])
array([3, 5, 7])
>>> unique_labels([1, 2, 3, 4], [2, 2, 3, 4])
array([1, 2, 3, 4])
>>> unique_labels([1, 2, 10], [5, 11])
array([ 1, 2, 5, 10, 11])
|
unique_labels
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/multiclass.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/multiclass.py
|
BSD-3-Clause
|
def is_multilabel(y):
"""Check if ``y`` is in a multilabel format.
Parameters
----------
y : ndarray of shape (n_samples,)
Target values.
Returns
-------
out : bool
Return ``True``, if ``y`` is in a multilabel format, else ``False``.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.multiclass import is_multilabel
>>> is_multilabel([0, 1, 0, 1])
False
>>> is_multilabel([[1], [0, 2], []])
False
>>> is_multilabel(np.array([[1, 0], [0, 0]]))
True
>>> is_multilabel(np.array([[1], [0], [0]]))
False
>>> is_multilabel(np.array([[1, 0, 0]]))
True
"""
xp, is_array_api_compliant = get_namespace(y)
if hasattr(y, "__array__") or isinstance(y, Sequence) or is_array_api_compliant:
# DeprecationWarning will be replaced by ValueError, see NEP 34
# https://numpy.org/neps/nep-0034-infer-dtype-is-object.html
check_y_kwargs = dict(
accept_sparse=True,
allow_nd=True,
ensure_all_finite=False,
ensure_2d=False,
ensure_min_samples=0,
ensure_min_features=0,
)
with warnings.catch_warnings():
warnings.simplefilter("error", VisibleDeprecationWarning)
try:
y = check_array(y, dtype=None, **check_y_kwargs)
except (VisibleDeprecationWarning, ValueError) as e:
if str(e).startswith("Complex data not supported"):
raise
# dtype=object should be provided explicitly for ragged arrays,
# see NEP 34
y = check_array(y, dtype=object, **check_y_kwargs)
if not (hasattr(y, "shape") and y.ndim == 2 and y.shape[1] > 1):
return False
if issparse(y):
if y.format in ("dok", "lil"):
y = y.tocsr()
labels = xp.unique_values(y.data)
return len(y.data) == 0 or (
(labels.size == 1 or ((labels.size == 2) and (0 in labels)))
and (y.dtype.kind in "biu" or _is_integral_float(labels)) # bool, int, uint
)
else:
labels = cached_unique(y, xp=xp)
return labels.shape[0] < 3 and (
xp.isdtype(y.dtype, ("bool", "signed integer", "unsigned integer"))
or _is_integral_float(labels)
)
|
Check if ``y`` is in a multilabel format.
Parameters
----------
y : ndarray of shape (n_samples,)
Target values.
Returns
-------
out : bool
Return ``True``, if ``y`` is in a multilabel format, else ``False``.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.multiclass import is_multilabel
>>> is_multilabel([0, 1, 0, 1])
False
>>> is_multilabel([[1], [0, 2], []])
False
>>> is_multilabel(np.array([[1, 0], [0, 0]]))
True
>>> is_multilabel(np.array([[1], [0], [0]]))
False
>>> is_multilabel(np.array([[1, 0, 0]]))
True
|
is_multilabel
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/multiclass.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/multiclass.py
|
BSD-3-Clause
|
def check_classification_targets(y):
"""Ensure that target y is of a non-regression type.
Only the following target types (as defined in type_of_target) are allowed:
'binary', 'multiclass', 'multiclass-multioutput',
'multilabel-indicator', 'multilabel-sequences'
Parameters
----------
y : array-like
Target values.
"""
y_type = type_of_target(y, input_name="y")
if y_type not in [
"binary",
"multiclass",
"multiclass-multioutput",
"multilabel-indicator",
"multilabel-sequences",
]:
raise ValueError(
f"Unknown label type: {y_type}. Maybe you are trying to fit a "
"classifier, which expects discrete classes on a "
"regression target with continuous values."
)
|
Ensure that target y is of a non-regression type.
Only the following target types (as defined in type_of_target) are allowed:
'binary', 'multiclass', 'multiclass-multioutput',
'multilabel-indicator', 'multilabel-sequences'
Parameters
----------
y : array-like
Target values.
|
check_classification_targets
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/multiclass.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/multiclass.py
|
BSD-3-Clause
|
def type_of_target(y, input_name="", raise_unknown=False):
"""Determine the type of data indicated by the target.
Note that this type is the most specific type that can be inferred.
For example:
* ``binary`` is more specific but compatible with ``multiclass``.
* ``multiclass`` of integers is more specific but compatible with ``continuous``.
* ``multilabel-indicator`` is more specific but compatible with
``multiclass-multioutput``.
Parameters
----------
y : {array-like, sparse matrix}
Target values. If a sparse matrix, `y` is expected to be a
CSR/CSC matrix.
input_name : str, default=""
The data name used to construct the error message.
.. versionadded:: 1.1.0
raise_unknown : bool, default=False
If `True`, raise an error when the type of target returned by
:func:`~sklearn.utils.multiclass.type_of_target` is `"unknown"`.
.. versionadded:: 1.6
Returns
-------
target_type : str
One of:
* 'continuous': `y` is an array-like of floats that are not all
integers, and is 1d or a column vector.
* 'continuous-multioutput': `y` is a 2d array of floats that are
not all integers, and both dimensions are of size > 1.
* 'binary': `y` contains <= 2 discrete values and is 1d or a column
vector.
* 'multiclass': `y` contains more than two discrete values, is not a
sequence of sequences, and is 1d or a column vector.
* 'multiclass-multioutput': `y` is a 2d array that contains more
than two discrete values, is not a sequence of sequences, and both
dimensions are of size > 1.
* 'multilabel-indicator': `y` is a label indicator matrix, an array
of two dimensions with at least two columns, and at most 2 unique
values.
* 'unknown': `y` is array-like but none of the above, such as a 3d
array, sequence of sequences, or an array of non-sequence objects.
Examples
--------
>>> from sklearn.utils.multiclass import type_of_target
>>> import numpy as np
>>> type_of_target([0.1, 0.6])
'continuous'
>>> type_of_target([1, -1, -1, 1])
'binary'
>>> type_of_target(['a', 'b', 'a'])
'binary'
>>> type_of_target([1.0, 2.0])
'binary'
>>> type_of_target([1, 0, 2])
'multiclass'
>>> type_of_target([1.0, 0.0, 3.0])
'multiclass'
>>> type_of_target(['a', 'b', 'c'])
'multiclass'
>>> type_of_target(np.array([[1, 2], [3, 1]]))
'multiclass-multioutput'
>>> type_of_target([[1, 2]])
'multilabel-indicator'
>>> type_of_target(np.array([[1.5, 2.0], [3.0, 1.6]]))
'continuous-multioutput'
>>> type_of_target(np.array([[0, 1], [1, 1]]))
'multilabel-indicator'
"""
xp, is_array_api_compliant = get_namespace(y)
def _raise_or_return():
"""Depending on the value of raise_unknown, either raise an error or return
'unknown'.
"""
if raise_unknown:
input = input_name if input_name else "data"
raise ValueError(f"Unknown label type for {input}: {y!r}")
else:
return "unknown"
valid = (
(isinstance(y, Sequence) or issparse(y) or hasattr(y, "__array__"))
and not isinstance(y, str)
) or is_array_api_compliant
if not valid:
raise ValueError(
"Expected array-like (array or non-string sequence), got %r" % y
)
sparse_pandas = y.__class__.__name__ in ["SparseSeries", "SparseArray"]
if sparse_pandas:
raise ValueError("y cannot be class 'SparseSeries' or 'SparseArray'")
if is_multilabel(y):
return "multilabel-indicator"
# DeprecationWarning will be replaced by ValueError, see NEP 34
# https://numpy.org/neps/nep-0034-infer-dtype-is-object.html
# We therefore catch both deprecation (NumPy < 1.24) warning and
# value error (NumPy >= 1.24).
check_y_kwargs = dict(
accept_sparse=True,
allow_nd=True,
ensure_all_finite=False,
ensure_2d=False,
ensure_min_samples=0,
ensure_min_features=0,
)
with warnings.catch_warnings():
warnings.simplefilter("error", VisibleDeprecationWarning)
if not issparse(y):
try:
y = check_array(y, dtype=None, **check_y_kwargs)
except (VisibleDeprecationWarning, ValueError) as e:
if str(e).startswith("Complex data not supported"):
raise
# dtype=object should be provided explicitly for ragged arrays,
# see NEP 34
y = check_array(y, dtype=object, **check_y_kwargs)
try:
first_row_or_val = y[[0], :] if issparse(y) else y[0]
# labels in bytes format
if isinstance(first_row_or_val, bytes):
raise TypeError(
"Support for labels represented as bytes is not supported. Convert "
"the labels to a string or integer format."
)
# The old sequence of sequences format
if (
not hasattr(first_row_or_val, "__array__")
and isinstance(first_row_or_val, Sequence)
and not isinstance(first_row_or_val, str)
):
raise ValueError(
"You appear to be using a legacy multi-label data"
" representation. Sequence of sequences are no"
" longer supported; use a binary array or sparse"
" matrix instead - the MultiLabelBinarizer"
" transformer can convert to this format."
)
except IndexError:
pass
# Invalid inputs
if y.ndim not in (1, 2):
# Number of dimension greater than 2: [[[1, 2]]]
return _raise_or_return()
if not min(y.shape):
# Empty ndarray: []/[[]]
if y.ndim == 1:
# 1-D empty array: []
return "binary" # []
# 2-D empty array: [[]]
return _raise_or_return()
if not issparse(y) and y.dtype == object and not isinstance(y.flat[0], str):
# [obj_1] and not ["label_1"]
return _raise_or_return()
# Check if multioutput
if y.ndim == 2 and y.shape[1] > 1:
suffix = "-multioutput" # [[1, 2], [1, 2]]
else:
suffix = "" # [1, 2, 3] or [[1], [2], [3]]
# Check float and contains non-integer float values
if xp.isdtype(y.dtype, "real floating"):
# [.1, .2, 3] or [[.1, .2, 3]] or [[1., .2]] and not [1., 2., 3.]
data = y.data if issparse(y) else y
if xp.any(data != xp.astype(data, int)):
_assert_all_finite(data, input_name=input_name)
return "continuous" + suffix
# Check multiclass
if issparse(first_row_or_val):
first_row_or_val = first_row_or_val.data
classes = cached_unique(y)
if y.shape[0] > 20 and classes.shape[0] > round(0.5 * y.shape[0]):
# Only raise the warning when we have at least 20 samples.
warnings.warn(
"The number of unique classes is greater than 50% of the number "
"of samples.",
UserWarning,
stacklevel=2,
)
if classes.shape[0] > 2 or (y.ndim == 2 and len(first_row_or_val) > 1):
# [1, 2, 3] or [[1., 2., 3]] or [[1, 2]]
return "multiclass" + suffix
else:
return "binary" # [1, 2] or [["a"], ["b"]]
|
Determine the type of data indicated by the target.
Note that this type is the most specific type that can be inferred.
For example:
* ``binary`` is more specific but compatible with ``multiclass``.
* ``multiclass`` of integers is more specific but compatible with ``continuous``.
* ``multilabel-indicator`` is more specific but compatible with
``multiclass-multioutput``.
Parameters
----------
y : {array-like, sparse matrix}
Target values. If a sparse matrix, `y` is expected to be a
CSR/CSC matrix.
input_name : str, default=""
The data name used to construct the error message.
.. versionadded:: 1.1.0
raise_unknown : bool, default=False
If `True`, raise an error when the type of target returned by
:func:`~sklearn.utils.multiclass.type_of_target` is `"unknown"`.
.. versionadded:: 1.6
Returns
-------
target_type : str
One of:
* 'continuous': `y` is an array-like of floats that are not all
integers, and is 1d or a column vector.
* 'continuous-multioutput': `y` is a 2d array of floats that are
not all integers, and both dimensions are of size > 1.
* 'binary': `y` contains <= 2 discrete values and is 1d or a column
vector.
* 'multiclass': `y` contains more than two discrete values, is not a
sequence of sequences, and is 1d or a column vector.
* 'multiclass-multioutput': `y` is a 2d array that contains more
than two discrete values, is not a sequence of sequences, and both
dimensions are of size > 1.
* 'multilabel-indicator': `y` is a label indicator matrix, an array
of two dimensions with at least two columns, and at most 2 unique
values.
* 'unknown': `y` is array-like but none of the above, such as a 3d
array, sequence of sequences, or an array of non-sequence objects.
Examples
--------
>>> from sklearn.utils.multiclass import type_of_target
>>> import numpy as np
>>> type_of_target([0.1, 0.6])
'continuous'
>>> type_of_target([1, -1, -1, 1])
'binary'
>>> type_of_target(['a', 'b', 'a'])
'binary'
>>> type_of_target([1.0, 2.0])
'binary'
>>> type_of_target([1, 0, 2])
'multiclass'
>>> type_of_target([1.0, 0.0, 3.0])
'multiclass'
>>> type_of_target(['a', 'b', 'c'])
'multiclass'
>>> type_of_target(np.array([[1, 2], [3, 1]]))
'multiclass-multioutput'
>>> type_of_target([[1, 2]])
'multilabel-indicator'
>>> type_of_target(np.array([[1.5, 2.0], [3.0, 1.6]]))
'continuous-multioutput'
>>> type_of_target(np.array([[0, 1], [1, 1]]))
'multilabel-indicator'
|
type_of_target
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/multiclass.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/multiclass.py
|
BSD-3-Clause
|
def _raise_or_return():
"""Depending on the value of raise_unknown, either raise an error or return
'unknown'.
"""
if raise_unknown:
input = input_name if input_name else "data"
raise ValueError(f"Unknown label type for {input}: {y!r}")
else:
return "unknown"
|
Depending on the value of raise_unknown, either raise an error or return
'unknown'.
|
_raise_or_return
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/multiclass.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/multiclass.py
|
BSD-3-Clause
|
def _check_partial_fit_first_call(clf, classes=None):
"""Private helper function for factorizing common classes param logic.
Estimators that implement the ``partial_fit`` API need to be provided with
the list of possible classes at the first call to partial_fit.
Subsequent calls to partial_fit should check that ``classes`` is still
consistent with a previous value of ``clf.classes_`` when provided.
This function returns True if it detects that this was the first call to
``partial_fit`` on ``clf``. In that case the ``classes_`` attribute is also
set on ``clf``.
"""
if getattr(clf, "classes_", None) is None and classes is None:
raise ValueError("classes must be passed on the first call to partial_fit.")
elif classes is not None:
if getattr(clf, "classes_", None) is not None:
if not np.array_equal(clf.classes_, unique_labels(classes)):
raise ValueError(
"`classes=%r` is not the same as on last call "
"to partial_fit, was: %r" % (classes, clf.classes_)
)
else:
# This is the first call to partial_fit
clf.classes_ = unique_labels(classes)
return True
# classes is None and clf.classes_ has already previously been set:
# nothing to do
return False
|
Private helper function for factorizing common classes param logic.
Estimators that implement the ``partial_fit`` API need to be provided with
the list of possible classes at the first call to partial_fit.
Subsequent calls to partial_fit should check that ``classes`` is still
consistent with a previous value of ``clf.classes_`` when provided.
This function returns True if it detects that this was the first call to
``partial_fit`` on ``clf``. In that case the ``classes_`` attribute is also
set on ``clf``.
|
_check_partial_fit_first_call
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/multiclass.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/multiclass.py
|
BSD-3-Clause
|
def class_distribution(y, sample_weight=None):
"""Compute class priors from multioutput-multiclass target data.
Parameters
----------
y : {array-like, sparse matrix} of size (n_samples, n_outputs)
The labels for each example.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
classes : list of size n_outputs of ndarray of size (n_classes,)
List of classes for each column.
n_classes : list of int of size n_outputs
Number of classes in each column.
class_prior : list of size n_outputs of ndarray of size (n_classes,)
Class distribution of each column.
"""
classes = []
n_classes = []
class_prior = []
n_samples, n_outputs = y.shape
if sample_weight is not None:
sample_weight = np.asarray(sample_weight)
if issparse(y):
y = y.tocsc()
y_nnz = np.diff(y.indptr)
for k in range(n_outputs):
col_nonzero = y.indices[y.indptr[k] : y.indptr[k + 1]]
# separate sample weights for zero and non-zero elements
if sample_weight is not None:
nz_samp_weight = sample_weight[col_nonzero]
zeros_samp_weight_sum = np.sum(sample_weight) - np.sum(nz_samp_weight)
else:
nz_samp_weight = None
zeros_samp_weight_sum = y.shape[0] - y_nnz[k]
classes_k, y_k = np.unique(
y.data[y.indptr[k] : y.indptr[k + 1]], return_inverse=True
)
class_prior_k = np.bincount(y_k, weights=nz_samp_weight)
# An explicit zero was found, combine its weight with the weight
# of the implicit zeros
if 0 in classes_k:
class_prior_k[classes_k == 0] += zeros_samp_weight_sum
# If an there is an implicit zero and it is not in classes and
# class_prior, make an entry for it
if 0 not in classes_k and y_nnz[k] < y.shape[0]:
classes_k = np.insert(classes_k, 0, 0)
class_prior_k = np.insert(class_prior_k, 0, zeros_samp_weight_sum)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior.append(class_prior_k / class_prior_k.sum())
else:
for k in range(n_outputs):
classes_k, y_k = np.unique(y[:, k], return_inverse=True)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior_k = np.bincount(y_k, weights=sample_weight)
class_prior.append(class_prior_k / class_prior_k.sum())
return (classes, n_classes, class_prior)
|
Compute class priors from multioutput-multiclass target data.
Parameters
----------
y : {array-like, sparse matrix} of size (n_samples, n_outputs)
The labels for each example.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
classes : list of size n_outputs of ndarray of size (n_classes,)
List of classes for each column.
n_classes : list of int of size n_outputs
Number of classes in each column.
class_prior : list of size n_outputs of ndarray of size (n_classes,)
Class distribution of each column.
|
class_distribution
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/multiclass.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/multiclass.py
|
BSD-3-Clause
|
def _ovr_decision_function(predictions, confidences, n_classes):
"""Compute a continuous, tie-breaking OvR decision function from OvO.
It is important to include a continuous value, not only votes,
to make computing AUC or calibration meaningful.
Parameters
----------
predictions : array-like of shape (n_samples, n_classifiers)
Predicted classes for each binary classifier.
confidences : array-like of shape (n_samples, n_classifiers)
Decision functions or predicted probabilities for positive class
for each binary classifier.
n_classes : int
Number of classes. n_classifiers must be
``n_classes * (n_classes - 1 ) / 2``.
"""
n_samples = predictions.shape[0]
votes = np.zeros((n_samples, n_classes))
sum_of_confidences = np.zeros((n_samples, n_classes))
k = 0
for i in range(n_classes):
for j in range(i + 1, n_classes):
sum_of_confidences[:, i] -= confidences[:, k]
sum_of_confidences[:, j] += confidences[:, k]
votes[predictions[:, k] == 0, i] += 1
votes[predictions[:, k] == 1, j] += 1
k += 1
# Monotonically transform the sum_of_confidences to (-1/3, 1/3)
# and add it with votes. The monotonic transformation is
# f: x -> x / (3 * (|x| + 1)), it uses 1/3 instead of 1/2
# to ensure that we won't reach the limits and change vote order.
# The motivation is to use confidence levels as a way to break ties in
# the votes without switching any decision made based on a difference
# of 1 vote.
transformed_confidences = sum_of_confidences / (
3 * (np.abs(sum_of_confidences) + 1)
)
return votes + transformed_confidences
|
Compute a continuous, tie-breaking OvR decision function from OvO.
It is important to include a continuous value, not only votes,
to make computing AUC or calibration meaningful.
Parameters
----------
predictions : array-like of shape (n_samples, n_classifiers)
Predicted classes for each binary classifier.
confidences : array-like of shape (n_samples, n_classifiers)
Decision functions or predicted probabilities for positive class
for each binary classifier.
n_classes : int
Number of classes. n_classifiers must be
``n_classes * (n_classes - 1 ) / 2``.
|
_ovr_decision_function
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/multiclass.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/multiclass.py
|
BSD-3-Clause
|
def _line_search_wolfe12(
f, fprime, xk, pk, gfk, old_fval, old_old_fval, verbose=0, **kwargs
):
"""
Same as line_search_wolfe1, but fall back to line_search_wolfe2 if
suitable step length is not found, and raise an exception if a
suitable step length is not found.
Raises
------
_LineSearchError
If no suitable step size is found.
"""
is_verbose = verbose >= 2
eps = 16 * np.finfo(np.asarray(old_fval).dtype).eps
if is_verbose:
print(" Line Search")
print(f" eps=16 * finfo.eps={eps}")
print(" try line search wolfe1")
ret = line_search_wolfe1(f, fprime, xk, pk, gfk, old_fval, old_old_fval, **kwargs)
if is_verbose:
_not_ = "not " if ret[0] is None else ""
print(" wolfe1 line search was " + _not_ + "successful")
if ret[0] is None:
# Have a look at the line_search method of our NewtonSolver class. We borrow
# the logic from there
# Deal with relative loss differences around machine precision.
args = kwargs.get("args", tuple())
fval = f(xk + pk, *args)
tiny_loss = np.abs(old_fval * eps)
loss_improvement = fval - old_fval
check = np.abs(loss_improvement) <= tiny_loss
if is_verbose:
print(
" check loss |improvement| <= eps * |loss_old|:"
f" {np.abs(loss_improvement)} <= {tiny_loss} {check}"
)
if check:
# 2.1 Check sum of absolute gradients as alternative condition.
sum_abs_grad_old = scipy.linalg.norm(gfk, ord=1)
grad = fprime(xk + pk, *args)
sum_abs_grad = scipy.linalg.norm(grad, ord=1)
check = sum_abs_grad < sum_abs_grad_old
if is_verbose:
print(
" check sum(|gradient|) < sum(|gradient_old|): "
f"{sum_abs_grad} < {sum_abs_grad_old} {check}"
)
if check:
ret = (
1.0, # step size
ret[1] + 1, # number of function evaluations
ret[2] + 1, # number of gradient evaluations
fval,
old_fval,
grad,
)
if ret[0] is None:
# line search failed: try different one.
# TODO: It seems that the new check for the sum of absolute gradients above
# catches all cases that, earlier, ended up here. In fact, our tests never
# trigger this "if branch" here and we can consider to remove it.
if is_verbose:
print(" last resort: try line search wolfe2")
ret = line_search_wolfe2(
f, fprime, xk, pk, gfk, old_fval, old_old_fval, **kwargs
)
if is_verbose:
_not_ = "not " if ret[0] is None else ""
print(" wolfe2 line search was " + _not_ + "successful")
if ret[0] is None:
raise _LineSearchError()
return ret
|
Same as line_search_wolfe1, but fall back to line_search_wolfe2 if
suitable step length is not found, and raise an exception if a
suitable step length is not found.
Raises
------
_LineSearchError
If no suitable step size is found.
|
_line_search_wolfe12
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/optimize.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/optimize.py
|
BSD-3-Clause
|
def _cg(fhess_p, fgrad, maxiter, tol, verbose=0):
"""
Solve iteratively the linear system 'fhess_p . xsupi = fgrad'
with a conjugate gradient descent.
Parameters
----------
fhess_p : callable
Function that takes the gradient as a parameter and returns the
matrix product of the Hessian and gradient.
fgrad : ndarray of shape (n_features,) or (n_features + 1,)
Gradient vector.
maxiter : int
Number of CG iterations.
tol : float
Stopping criterion.
Returns
-------
xsupi : ndarray of shape (n_features,) or (n_features + 1,)
Estimated solution.
"""
eps = 16 * np.finfo(np.float64).eps
xsupi = np.zeros(len(fgrad), dtype=fgrad.dtype)
ri = np.copy(fgrad) # residual = fgrad - fhess_p @ xsupi
psupi = -ri
i = 0
dri0 = np.dot(ri, ri)
# We also keep track of |p_i|^2.
psupi_norm2 = dri0
is_verbose = verbose >= 2
while i <= maxiter:
if np.sum(np.abs(ri)) <= tol:
if is_verbose:
print(
f" Inner CG solver iteration {i} stopped with\n"
f" sum(|residuals|) <= tol: {np.sum(np.abs(ri))} <= {tol}"
)
break
Ap = fhess_p(psupi)
# check curvature
curv = np.dot(psupi, Ap)
if 0 <= curv <= eps * psupi_norm2:
# See https://arxiv.org/abs/1803.02924, Algo 1 Capped Conjugate Gradient.
if is_verbose:
print(
f" Inner CG solver iteration {i} stopped with\n"
f" tiny_|p| = eps * ||p||^2, eps = {eps}, "
f"squared L2 norm ||p||^2 = {psupi_norm2}\n"
f" curvature <= tiny_|p|: {curv} <= {eps * psupi_norm2}"
)
break
elif curv < 0:
if i > 0:
if is_verbose:
print(
f" Inner CG solver iteration {i} stopped with negative "
f"curvature, curvature = {curv}"
)
break
else:
# fall back to steepest descent direction
xsupi += dri0 / curv * psupi
if is_verbose:
print(" Inner CG solver iteration 0 fell back to steepest descent")
break
alphai = dri0 / curv
xsupi += alphai * psupi
ri += alphai * Ap
dri1 = np.dot(ri, ri)
betai = dri1 / dri0
psupi = -ri + betai * psupi
# We use |p_i|^2 = |r_i|^2 + beta_i^2 |p_{i-1}|^2
psupi_norm2 = dri1 + betai**2 * psupi_norm2
i = i + 1
dri0 = dri1 # update np.dot(ri,ri) for next time.
if is_verbose and i > maxiter:
print(
f" Inner CG solver stopped reaching maxiter={i - 1} with "
f"sum(|residuals|) = {np.sum(np.abs(ri))}"
)
return xsupi
|
Solve iteratively the linear system 'fhess_p . xsupi = fgrad'
with a conjugate gradient descent.
Parameters
----------
fhess_p : callable
Function that takes the gradient as a parameter and returns the
matrix product of the Hessian and gradient.
fgrad : ndarray of shape (n_features,) or (n_features + 1,)
Gradient vector.
maxiter : int
Number of CG iterations.
tol : float
Stopping criterion.
Returns
-------
xsupi : ndarray of shape (n_features,) or (n_features + 1,)
Estimated solution.
|
_cg
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/optimize.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/optimize.py
|
BSD-3-Clause
|
def _newton_cg(
grad_hess,
func,
grad,
x0,
args=(),
tol=1e-4,
maxiter=100,
maxinner=200,
line_search=True,
warn=True,
verbose=0,
):
"""
Minimization of scalar function of one or more variables using the
Newton-CG algorithm.
Parameters
----------
grad_hess : callable
Should return the gradient and a callable returning the matvec product
of the Hessian.
func : callable
Should return the value of the function.
grad : callable
Should return the function value and the gradient. This is used
by the linesearch functions.
x0 : array of float
Initial guess.
args : tuple, default=()
Arguments passed to func_grad_hess, func and grad.
tol : float, default=1e-4
Stopping criterion. The iteration will stop when
``max{|g_i | i = 1, ..., n} <= tol``
where ``g_i`` is the i-th component of the gradient.
maxiter : int, default=100
Number of Newton iterations.
maxinner : int, default=200
Number of CG iterations.
line_search : bool, default=True
Whether to use a line search or not.
warn : bool, default=True
Whether to warn when didn't converge.
Returns
-------
xk : ndarray of float
Estimated minimum.
"""
x0 = np.asarray(x0).flatten()
xk = np.copy(x0)
k = 0
if line_search:
old_fval = func(x0, *args)
old_old_fval = None
else:
old_fval = 0
is_verbose = verbose > 0
# Outer loop: our Newton iteration
while k < maxiter:
# Compute a search direction pk by applying the CG method to
# del2 f(xk) p = - fgrad f(xk) starting from 0.
fgrad, fhess_p = grad_hess(xk, *args)
absgrad = np.abs(fgrad)
max_absgrad = np.max(absgrad)
check = max_absgrad <= tol
if is_verbose:
print(f"Newton-CG iter = {k}")
print(" Check Convergence")
print(f" max |gradient| <= tol: {max_absgrad} <= {tol} {check}")
if check:
break
maggrad = np.sum(absgrad)
eta = min([0.5, np.sqrt(maggrad)])
termcond = eta * maggrad
# Inner loop: solve the Newton update by conjugate gradient, to
# avoid inverting the Hessian
xsupi = _cg(fhess_p, fgrad, maxiter=maxinner, tol=termcond, verbose=verbose)
alphak = 1.0
if line_search:
try:
alphak, fc, gc, old_fval, old_old_fval, gfkp1 = _line_search_wolfe12(
func,
grad,
xk,
xsupi,
fgrad,
old_fval,
old_old_fval,
verbose=verbose,
args=args,
)
except _LineSearchError:
warnings.warn("Line Search failed")
break
xk += alphak * xsupi # upcast if necessary
k += 1
if warn and k >= maxiter:
warnings.warn(
(
f"newton-cg failed to converge at loss = {old_fval}. Increase the"
" number of iterations."
),
ConvergenceWarning,
)
elif is_verbose:
print(f" Solver did converge at loss = {old_fval}.")
return xk, k
|
Minimization of scalar function of one or more variables using the
Newton-CG algorithm.
Parameters
----------
grad_hess : callable
Should return the gradient and a callable returning the matvec product
of the Hessian.
func : callable
Should return the value of the function.
grad : callable
Should return the function value and the gradient. This is used
by the linesearch functions.
x0 : array of float
Initial guess.
args : tuple, default=()
Arguments passed to func_grad_hess, func and grad.
tol : float, default=1e-4
Stopping criterion. The iteration will stop when
``max{|g_i | i = 1, ..., n} <= tol``
where ``g_i`` is the i-th component of the gradient.
maxiter : int, default=100
Number of Newton iterations.
maxinner : int, default=200
Number of CG iterations.
line_search : bool, default=True
Whether to use a line search or not.
warn : bool, default=True
Whether to warn when didn't converge.
Returns
-------
xk : ndarray of float
Estimated minimum.
|
_newton_cg
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/optimize.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/optimize.py
|
BSD-3-Clause
|
def _check_optimize_result(solver, result, max_iter=None, extra_warning_msg=None):
"""Check the OptimizeResult for successful convergence
Parameters
----------
solver : str
Solver name. Currently only `lbfgs` is supported.
result : OptimizeResult
Result of the scipy.optimize.minimize function.
max_iter : int, default=None
Expected maximum number of iterations.
extra_warning_msg : str, default=None
Extra warning message.
Returns
-------
n_iter : int
Number of iterations.
"""
# handle both scipy and scikit-learn solver names
if solver == "lbfgs":
if max_iter is not None:
# In scipy <= 1.0.0, nit may exceed maxiter for lbfgs.
# See https://github.com/scipy/scipy/issues/7854
n_iter_i = min(result.nit, max_iter)
else:
n_iter_i = result.nit
if result.status != 0:
warning_msg = (
f"{solver} failed to converge after {n_iter_i} iteration(s) "
f"(status={result.status}):\n"
f"{result.message}\n"
)
# Append a recommendation to increase iterations only when the
# number of iterations reaches the maximum allowed (max_iter),
# as this suggests the optimization may have been prematurely
# terminated due to the iteration limit.
if max_iter is not None and n_iter_i == max_iter:
warning_msg += (
f"\nIncrease the number of iterations to improve the "
f"convergence (max_iter={max_iter})."
)
warning_msg += (
"\nYou might also want to scale the data as shown in:\n"
" https://scikit-learn.org/stable/modules/"
"preprocessing.html"
)
if extra_warning_msg is not None:
warning_msg += "\n" + extra_warning_msg
warnings.warn(warning_msg, ConvergenceWarning, stacklevel=2)
else:
raise NotImplementedError
return n_iter_i
|
Check the OptimizeResult for successful convergence
Parameters
----------
solver : str
Solver name. Currently only `lbfgs` is supported.
result : OptimizeResult
Result of the scipy.optimize.minimize function.
max_iter : int, default=None
Expected maximum number of iterations.
extra_warning_msg : str, default=None
Extra warning message.
Returns
-------
n_iter : int
Number of iterations.
|
_check_optimize_result
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/optimize.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/optimize.py
|
BSD-3-Clause
|
def _with_config_and_warning_filters(delayed_func, config, warning_filters):
"""Helper function that intends to attach a config to a delayed function."""
if hasattr(delayed_func, "with_config_and_warning_filters"):
return delayed_func.with_config_and_warning_filters(config, warning_filters)
else:
warnings.warn(
(
"`sklearn.utils.parallel.Parallel` needs to be used in "
"conjunction with `sklearn.utils.parallel.delayed` instead of "
"`joblib.delayed` to correctly propagate the scikit-learn "
"configuration to the joblib workers."
),
UserWarning,
)
return delayed_func
|
Helper function that intends to attach a config to a delayed function.
|
_with_config_and_warning_filters
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/parallel.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/parallel.py
|
BSD-3-Clause
|
def __call__(self, iterable):
"""Dispatch the tasks and return the results.
Parameters
----------
iterable : iterable
Iterable containing tuples of (delayed_function, args, kwargs) that should
be consumed.
Returns
-------
results : list
List of results of the tasks.
"""
# Capture the thread-local scikit-learn configuration at the time
# Parallel.__call__ is issued since the tasks can be dispatched
# in a different thread depending on the backend and on the value of
# pre_dispatch and n_jobs.
config = get_config()
warning_filters = warnings.filters
iterable_with_config_and_warning_filters = (
(
_with_config_and_warning_filters(delayed_func, config, warning_filters),
args,
kwargs,
)
for delayed_func, args, kwargs in iterable
)
return super().__call__(iterable_with_config_and_warning_filters)
|
Dispatch the tasks and return the results.
Parameters
----------
iterable : iterable
Iterable containing tuples of (delayed_function, args, kwargs) that should
be consumed.
Returns
-------
results : list
List of results of the tasks.
|
__call__
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/parallel.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/parallel.py
|
BSD-3-Clause
|
def delayed(function):
"""Decorator used to capture the arguments of a function.
This alternative to `joblib.delayed` is meant to be used in conjunction
with `sklearn.utils.parallel.Parallel`. The latter captures the scikit-
learn configuration by calling `sklearn.get_config()` in the current
thread, prior to dispatching the first task. The captured configuration is
then propagated and enabled for the duration of the execution of the
delayed function in the joblib workers.
.. versionchanged:: 1.3
`delayed` was moved from `sklearn.utils.fixes` to `sklearn.utils.parallel`
in scikit-learn 1.3.
Parameters
----------
function : callable
The function to be delayed.
Returns
-------
output: tuple
Tuple containing the delayed function, the positional arguments, and the
keyword arguments.
"""
@functools.wraps(function)
def delayed_function(*args, **kwargs):
return _FuncWrapper(function), args, kwargs
return delayed_function
|
Decorator used to capture the arguments of a function.
This alternative to `joblib.delayed` is meant to be used in conjunction
with `sklearn.utils.parallel.Parallel`. The latter captures the scikit-
learn configuration by calling `sklearn.get_config()` in the current
thread, prior to dispatching the first task. The captured configuration is
then propagated and enabled for the duration of the execution of the
delayed function in the joblib workers.
.. versionchanged:: 1.3
`delayed` was moved from `sklearn.utils.fixes` to `sklearn.utils.parallel`
in scikit-learn 1.3.
Parameters
----------
function : callable
The function to be delayed.
Returns
-------
output: tuple
Tuple containing the delayed function, the positional arguments, and the
keyword arguments.
|
delayed
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/parallel.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/parallel.py
|
BSD-3-Clause
|
def _get_threadpool_controller():
"""Return the global threadpool controller instance."""
global _threadpool_controller
if _threadpool_controller is None:
_threadpool_controller = ThreadpoolController()
return _threadpool_controller
|
Return the global threadpool controller instance.
|
_get_threadpool_controller
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/parallel.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/parallel.py
|
BSD-3-Clause
|
def _random_choice_csc(n_samples, classes, class_probability=None, random_state=None):
"""Generate a sparse random matrix given column class distributions
Parameters
----------
n_samples : int,
Number of samples to draw in each column.
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
class_probability : list of size n_outputs of arrays of \
shape (n_classes,), default=None
Class distribution of each column. If None, uniform distribution is
assumed.
random_state : int, RandomState instance or None, default=None
Controls the randomness of the sampled classes.
See :term:`Glossary <random_state>`.
Returns
-------
random_matrix : sparse csc matrix of size (n_samples, n_outputs)
"""
data = array.array("i")
indices = array.array("i")
indptr = array.array("i", [0])
for j in range(len(classes)):
classes[j] = np.asarray(classes[j])
if classes[j].dtype.kind != "i":
raise ValueError("class dtype %s is not supported" % classes[j].dtype)
classes[j] = classes[j].astype(np.int64, copy=False)
# use uniform distribution if no class_probability is given
if class_probability is None:
class_prob_j = np.empty(shape=classes[j].shape[0])
class_prob_j.fill(1 / classes[j].shape[0])
else:
class_prob_j = np.asarray(class_probability[j])
if not np.isclose(np.sum(class_prob_j), 1.0):
raise ValueError(
"Probability array at index {0} does not sum to one".format(j)
)
if class_prob_j.shape[0] != classes[j].shape[0]:
raise ValueError(
"classes[{0}] (length {1}) and "
"class_probability[{0}] (length {2}) have "
"different length.".format(
j, classes[j].shape[0], class_prob_j.shape[0]
)
)
# If 0 is not present in the classes insert it with a probability 0.0
if 0 not in classes[j]:
classes[j] = np.insert(classes[j], 0, 0)
class_prob_j = np.insert(class_prob_j, 0, 0.0)
# If there are nonzero classes choose randomly using class_probability
rng = check_random_state(random_state)
if classes[j].shape[0] > 1:
index_class_0 = np.flatnonzero(classes[j] == 0).item()
p_nonzero = 1 - class_prob_j[index_class_0]
nnz = int(n_samples * p_nonzero)
ind_sample = sample_without_replacement(
n_population=n_samples, n_samples=nnz, random_state=random_state
)
indices.extend(ind_sample)
# Normalize probabilities for the nonzero elements
classes_j_nonzero = classes[j] != 0
class_probability_nz = class_prob_j[classes_j_nonzero]
class_probability_nz_norm = class_probability_nz / np.sum(
class_probability_nz
)
classes_ind = np.searchsorted(
class_probability_nz_norm.cumsum(), rng.uniform(size=nnz)
)
data.extend(classes[j][classes_j_nonzero][classes_ind])
indptr.append(len(indices))
return sp.csc_matrix((data, indices, indptr), (n_samples, len(classes)), dtype=int)
|
Generate a sparse random matrix given column class distributions
Parameters
----------
n_samples : int,
Number of samples to draw in each column.
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
class_probability : list of size n_outputs of arrays of shape (n_classes,), default=None
Class distribution of each column. If None, uniform distribution is
assumed.
random_state : int, RandomState instance or None, default=None
Controls the randomness of the sampled classes.
See :term:`Glossary <random_state>`.
Returns
-------
random_matrix : sparse csc matrix of size (n_samples, n_outputs)
|
_random_choice_csc
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/random.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/random.py
|
BSD-3-Clause
|
def _raise_typeerror(X):
"""Raises a TypeError if X is not a CSR or CSC matrix"""
input_type = X.format if sp.issparse(X) else type(X)
err = "Expected a CSR or CSC sparse matrix, got %s." % input_type
raise TypeError(err)
|
Raises a TypeError if X is not a CSR or CSC matrix
|
_raise_typeerror
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/sparsefuncs.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/sparsefuncs.py
|
BSD-3-Clause
|
def inplace_csr_column_scale(X, scale):
"""Inplace column scaling of a CSR matrix.
Scale each feature of the data matrix by multiplying with specific scale
provided by the caller assuming a (n_samples, n_features) shape.
Parameters
----------
X : sparse matrix of shape (n_samples, n_features)
Matrix to normalize using the variance of the features.
It should be of CSR format.
scale : ndarray of shape (n_features,), dtype={np.float32, np.float64}
Array of precomputed feature-wise values to use for scaling.
Examples
--------
>>> from sklearn.utils import sparsefuncs
>>> from scipy import sparse
>>> import numpy as np
>>> indptr = np.array([0, 3, 4, 4, 4])
>>> indices = np.array([0, 1, 2, 2])
>>> data = np.array([8, 1, 2, 5])
>>> scale = np.array([2, 3, 2])
>>> csr = sparse.csr_matrix((data, indices, indptr))
>>> csr.todense()
matrix([[8, 1, 2],
[0, 0, 5],
[0, 0, 0],
[0, 0, 0]])
>>> sparsefuncs.inplace_csr_column_scale(csr, scale)
>>> csr.todense()
matrix([[16, 3, 4],
[ 0, 0, 10],
[ 0, 0, 0],
[ 0, 0, 0]])
"""
assert scale.shape[0] == X.shape[1]
X.data *= scale.take(X.indices, mode="clip")
|
Inplace column scaling of a CSR matrix.
Scale each feature of the data matrix by multiplying with specific scale
provided by the caller assuming a (n_samples, n_features) shape.
Parameters
----------
X : sparse matrix of shape (n_samples, n_features)
Matrix to normalize using the variance of the features.
It should be of CSR format.
scale : ndarray of shape (n_features,), dtype={np.float32, np.float64}
Array of precomputed feature-wise values to use for scaling.
Examples
--------
>>> from sklearn.utils import sparsefuncs
>>> from scipy import sparse
>>> import numpy as np
>>> indptr = np.array([0, 3, 4, 4, 4])
>>> indices = np.array([0, 1, 2, 2])
>>> data = np.array([8, 1, 2, 5])
>>> scale = np.array([2, 3, 2])
>>> csr = sparse.csr_matrix((data, indices, indptr))
>>> csr.todense()
matrix([[8, 1, 2],
[0, 0, 5],
[0, 0, 0],
[0, 0, 0]])
>>> sparsefuncs.inplace_csr_column_scale(csr, scale)
>>> csr.todense()
matrix([[16, 3, 4],
[ 0, 0, 10],
[ 0, 0, 0],
[ 0, 0, 0]])
|
inplace_csr_column_scale
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/sparsefuncs.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/sparsefuncs.py
|
BSD-3-Clause
|
def inplace_csr_row_scale(X, scale):
"""Inplace row scaling of a CSR matrix.
Scale each sample of the data matrix by multiplying with specific scale
provided by the caller assuming a (n_samples, n_features) shape.
Parameters
----------
X : sparse matrix of shape (n_samples, n_features)
Matrix to be scaled. It should be of CSR format.
scale : ndarray of float of shape (n_samples,)
Array of precomputed sample-wise values to use for scaling.
"""
assert scale.shape[0] == X.shape[0]
X.data *= np.repeat(scale, np.diff(X.indptr))
|
Inplace row scaling of a CSR matrix.
Scale each sample of the data matrix by multiplying with specific scale
provided by the caller assuming a (n_samples, n_features) shape.
Parameters
----------
X : sparse matrix of shape (n_samples, n_features)
Matrix to be scaled. It should be of CSR format.
scale : ndarray of float of shape (n_samples,)
Array of precomputed sample-wise values to use for scaling.
|
inplace_csr_row_scale
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/sparsefuncs.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/sparsefuncs.py
|
BSD-3-Clause
|
def mean_variance_axis(X, axis, weights=None, return_sum_weights=False):
"""Compute mean and variance along an axis on a CSR or CSC matrix.
Parameters
----------
X : sparse matrix of shape (n_samples, n_features)
Input data. It can be of CSR or CSC format.
axis : {0, 1}
Axis along which the axis should be computed.
weights : ndarray of shape (n_samples,) or (n_features,), default=None
If axis is set to 0 shape is (n_samples,) or
if axis is set to 1 shape is (n_features,).
If it is set to None, then samples are equally weighted.
.. versionadded:: 0.24
return_sum_weights : bool, default=False
If True, returns the sum of weights seen for each feature
if `axis=0` or each sample if `axis=1`.
.. versionadded:: 0.24
Returns
-------
means : ndarray of shape (n_features,), dtype=floating
Feature-wise means.
variances : ndarray of shape (n_features,), dtype=floating
Feature-wise variances.
sum_weights : ndarray of shape (n_features,), dtype=floating
Returned if `return_sum_weights` is `True`.
Examples
--------
>>> from sklearn.utils import sparsefuncs
>>> from scipy import sparse
>>> import numpy as np
>>> indptr = np.array([0, 3, 4, 4, 4])
>>> indices = np.array([0, 1, 2, 2])
>>> data = np.array([8, 1, 2, 5])
>>> scale = np.array([2, 3, 2])
>>> csr = sparse.csr_matrix((data, indices, indptr))
>>> csr.todense()
matrix([[8, 1, 2],
[0, 0, 5],
[0, 0, 0],
[0, 0, 0]])
>>> sparsefuncs.mean_variance_axis(csr, axis=0)
(array([2. , 0.25, 1.75]), array([12. , 0.1875, 4.1875]))
"""
_raise_error_wrong_axis(axis)
if sp.issparse(X) and X.format == "csr":
if axis == 0:
return _csr_mean_var_axis0(
X, weights=weights, return_sum_weights=return_sum_weights
)
else:
return _csc_mean_var_axis0(
X.T, weights=weights, return_sum_weights=return_sum_weights
)
elif sp.issparse(X) and X.format == "csc":
if axis == 0:
return _csc_mean_var_axis0(
X, weights=weights, return_sum_weights=return_sum_weights
)
else:
return _csr_mean_var_axis0(
X.T, weights=weights, return_sum_weights=return_sum_weights
)
else:
_raise_typeerror(X)
|
Compute mean and variance along an axis on a CSR or CSC matrix.
Parameters
----------
X : sparse matrix of shape (n_samples, n_features)
Input data. It can be of CSR or CSC format.
axis : {0, 1}
Axis along which the axis should be computed.
weights : ndarray of shape (n_samples,) or (n_features,), default=None
If axis is set to 0 shape is (n_samples,) or
if axis is set to 1 shape is (n_features,).
If it is set to None, then samples are equally weighted.
.. versionadded:: 0.24
return_sum_weights : bool, default=False
If True, returns the sum of weights seen for each feature
if `axis=0` or each sample if `axis=1`.
.. versionadded:: 0.24
Returns
-------
means : ndarray of shape (n_features,), dtype=floating
Feature-wise means.
variances : ndarray of shape (n_features,), dtype=floating
Feature-wise variances.
sum_weights : ndarray of shape (n_features,), dtype=floating
Returned if `return_sum_weights` is `True`.
Examples
--------
>>> from sklearn.utils import sparsefuncs
>>> from scipy import sparse
>>> import numpy as np
>>> indptr = np.array([0, 3, 4, 4, 4])
>>> indices = np.array([0, 1, 2, 2])
>>> data = np.array([8, 1, 2, 5])
>>> scale = np.array([2, 3, 2])
>>> csr = sparse.csr_matrix((data, indices, indptr))
>>> csr.todense()
matrix([[8, 1, 2],
[0, 0, 5],
[0, 0, 0],
[0, 0, 0]])
>>> sparsefuncs.mean_variance_axis(csr, axis=0)
(array([2. , 0.25, 1.75]), array([12. , 0.1875, 4.1875]))
|
mean_variance_axis
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/sparsefuncs.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/sparsefuncs.py
|
BSD-3-Clause
|
def incr_mean_variance_axis(X, *, axis, last_mean, last_var, last_n, weights=None):
"""Compute incremental mean and variance along an axis on a CSR or CSC matrix.
last_mean, last_var are the statistics computed at the last step by this
function. Both must be initialized to 0-arrays of the proper size, i.e.
the number of features in X. last_n is the number of samples encountered
until now.
Parameters
----------
X : CSR or CSC sparse matrix of shape (n_samples, n_features)
Input data.
axis : {0, 1}
Axis along which the axis should be computed.
last_mean : ndarray of shape (n_features,) or (n_samples,), dtype=floating
Array of means to update with the new data X.
Should be of shape (n_features,) if axis=0 or (n_samples,) if axis=1.
last_var : ndarray of shape (n_features,) or (n_samples,), dtype=floating
Array of variances to update with the new data X.
Should be of shape (n_features,) if axis=0 or (n_samples,) if axis=1.
last_n : float or ndarray of shape (n_features,) or (n_samples,), \
dtype=floating
Sum of the weights seen so far, excluding the current weights
If not float, it should be of shape (n_features,) if
axis=0 or (n_samples,) if axis=1. If float it corresponds to
having same weights for all samples (or features).
weights : ndarray of shape (n_samples,) or (n_features,), default=None
If axis is set to 0 shape is (n_samples,) or
if axis is set to 1 shape is (n_features,).
If it is set to None, then samples are equally weighted.
.. versionadded:: 0.24
Returns
-------
means : ndarray of shape (n_features,) or (n_samples,), dtype=floating
Updated feature-wise means if axis = 0 or
sample-wise means if axis = 1.
variances : ndarray of shape (n_features,) or (n_samples,), dtype=floating
Updated feature-wise variances if axis = 0 or
sample-wise variances if axis = 1.
n : ndarray of shape (n_features,) or (n_samples,), dtype=integral
Updated number of seen samples per feature if axis=0
or number of seen features per sample if axis=1.
If weights is not None, n is a sum of the weights of the seen
samples or features instead of the actual number of seen
samples or features.
Notes
-----
NaNs are ignored in the algorithm.
Examples
--------
>>> from sklearn.utils import sparsefuncs
>>> from scipy import sparse
>>> import numpy as np
>>> indptr = np.array([0, 3, 4, 4, 4])
>>> indices = np.array([0, 1, 2, 2])
>>> data = np.array([8, 1, 2, 5])
>>> scale = np.array([2, 3, 2])
>>> csr = sparse.csr_matrix((data, indices, indptr))
>>> csr.todense()
matrix([[8, 1, 2],
[0, 0, 5],
[0, 0, 0],
[0, 0, 0]])
>>> sparsefuncs.incr_mean_variance_axis(
... csr, axis=0, last_mean=np.zeros(3), last_var=np.zeros(3), last_n=2
... )
(array([1.33, 0.167, 1.17]), array([8.88, 0.139, 3.47]),
array([6., 6., 6.]))
"""
_raise_error_wrong_axis(axis)
if not (sp.issparse(X) and X.format in ("csc", "csr")):
_raise_typeerror(X)
if np.size(last_n) == 1:
last_n = np.full(last_mean.shape, last_n, dtype=last_mean.dtype)
if not (np.size(last_mean) == np.size(last_var) == np.size(last_n)):
raise ValueError("last_mean, last_var, last_n do not have the same shapes.")
if axis == 1:
if np.size(last_mean) != X.shape[0]:
raise ValueError(
"If axis=1, then last_mean, last_n, last_var should be of "
f"size n_samples {X.shape[0]} (Got {np.size(last_mean)})."
)
else: # axis == 0
if np.size(last_mean) != X.shape[1]:
raise ValueError(
"If axis=0, then last_mean, last_n, last_var should be of "
f"size n_features {X.shape[1]} (Got {np.size(last_mean)})."
)
X = X.T if axis == 1 else X
if weights is not None:
weights = _check_sample_weight(weights, X, dtype=X.dtype)
return _incr_mean_var_axis0(
X, last_mean=last_mean, last_var=last_var, last_n=last_n, weights=weights
)
|
Compute incremental mean and variance along an axis on a CSR or CSC matrix.
last_mean, last_var are the statistics computed at the last step by this
function. Both must be initialized to 0-arrays of the proper size, i.e.
the number of features in X. last_n is the number of samples encountered
until now.
Parameters
----------
X : CSR or CSC sparse matrix of shape (n_samples, n_features)
Input data.
axis : {0, 1}
Axis along which the axis should be computed.
last_mean : ndarray of shape (n_features,) or (n_samples,), dtype=floating
Array of means to update with the new data X.
Should be of shape (n_features,) if axis=0 or (n_samples,) if axis=1.
last_var : ndarray of shape (n_features,) or (n_samples,), dtype=floating
Array of variances to update with the new data X.
Should be of shape (n_features,) if axis=0 or (n_samples,) if axis=1.
last_n : float or ndarray of shape (n_features,) or (n_samples,), dtype=floating
Sum of the weights seen so far, excluding the current weights
If not float, it should be of shape (n_features,) if
axis=0 or (n_samples,) if axis=1. If float it corresponds to
having same weights for all samples (or features).
weights : ndarray of shape (n_samples,) or (n_features,), default=None
If axis is set to 0 shape is (n_samples,) or
if axis is set to 1 shape is (n_features,).
If it is set to None, then samples are equally weighted.
.. versionadded:: 0.24
Returns
-------
means : ndarray of shape (n_features,) or (n_samples,), dtype=floating
Updated feature-wise means if axis = 0 or
sample-wise means if axis = 1.
variances : ndarray of shape (n_features,) or (n_samples,), dtype=floating
Updated feature-wise variances if axis = 0 or
sample-wise variances if axis = 1.
n : ndarray of shape (n_features,) or (n_samples,), dtype=integral
Updated number of seen samples per feature if axis=0
or number of seen features per sample if axis=1.
If weights is not None, n is a sum of the weights of the seen
samples or features instead of the actual number of seen
samples or features.
Notes
-----
NaNs are ignored in the algorithm.
Examples
--------
>>> from sklearn.utils import sparsefuncs
>>> from scipy import sparse
>>> import numpy as np
>>> indptr = np.array([0, 3, 4, 4, 4])
>>> indices = np.array([0, 1, 2, 2])
>>> data = np.array([8, 1, 2, 5])
>>> scale = np.array([2, 3, 2])
>>> csr = sparse.csr_matrix((data, indices, indptr))
>>> csr.todense()
matrix([[8, 1, 2],
[0, 0, 5],
[0, 0, 0],
[0, 0, 0]])
>>> sparsefuncs.incr_mean_variance_axis(
... csr, axis=0, last_mean=np.zeros(3), last_var=np.zeros(3), last_n=2
... )
(array([1.33, 0.167, 1.17]), array([8.88, 0.139, 3.47]),
array([6., 6., 6.]))
|
incr_mean_variance_axis
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/sparsefuncs.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/sparsefuncs.py
|
BSD-3-Clause
|
def inplace_column_scale(X, scale):
"""Inplace column scaling of a CSC/CSR matrix.
Scale each feature of the data matrix by multiplying with specific scale
provided by the caller assuming a (n_samples, n_features) shape.
Parameters
----------
X : sparse matrix of shape (n_samples, n_features)
Matrix to normalize using the variance of the features. It should be
of CSC or CSR format.
scale : ndarray of shape (n_features,), dtype={np.float32, np.float64}
Array of precomputed feature-wise values to use for scaling.
Examples
--------
>>> from sklearn.utils import sparsefuncs
>>> from scipy import sparse
>>> import numpy as np
>>> indptr = np.array([0, 3, 4, 4, 4])
>>> indices = np.array([0, 1, 2, 2])
>>> data = np.array([8, 1, 2, 5])
>>> scale = np.array([2, 3, 2])
>>> csr = sparse.csr_matrix((data, indices, indptr))
>>> csr.todense()
matrix([[8, 1, 2],
[0, 0, 5],
[0, 0, 0],
[0, 0, 0]])
>>> sparsefuncs.inplace_column_scale(csr, scale)
>>> csr.todense()
matrix([[16, 3, 4],
[ 0, 0, 10],
[ 0, 0, 0],
[ 0, 0, 0]])
"""
if sp.issparse(X) and X.format == "csc":
inplace_csr_row_scale(X.T, scale)
elif sp.issparse(X) and X.format == "csr":
inplace_csr_column_scale(X, scale)
else:
_raise_typeerror(X)
|
Inplace column scaling of a CSC/CSR matrix.
Scale each feature of the data matrix by multiplying with specific scale
provided by the caller assuming a (n_samples, n_features) shape.
Parameters
----------
X : sparse matrix of shape (n_samples, n_features)
Matrix to normalize using the variance of the features. It should be
of CSC or CSR format.
scale : ndarray of shape (n_features,), dtype={np.float32, np.float64}
Array of precomputed feature-wise values to use for scaling.
Examples
--------
>>> from sklearn.utils import sparsefuncs
>>> from scipy import sparse
>>> import numpy as np
>>> indptr = np.array([0, 3, 4, 4, 4])
>>> indices = np.array([0, 1, 2, 2])
>>> data = np.array([8, 1, 2, 5])
>>> scale = np.array([2, 3, 2])
>>> csr = sparse.csr_matrix((data, indices, indptr))
>>> csr.todense()
matrix([[8, 1, 2],
[0, 0, 5],
[0, 0, 0],
[0, 0, 0]])
>>> sparsefuncs.inplace_column_scale(csr, scale)
>>> csr.todense()
matrix([[16, 3, 4],
[ 0, 0, 10],
[ 0, 0, 0],
[ 0, 0, 0]])
|
inplace_column_scale
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/sparsefuncs.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/sparsefuncs.py
|
BSD-3-Clause
|
def inplace_row_scale(X, scale):
"""Inplace row scaling of a CSR or CSC matrix.
Scale each row of the data matrix by multiplying with specific scale
provided by the caller assuming a (n_samples, n_features) shape.
Parameters
----------
X : sparse matrix of shape (n_samples, n_features)
Matrix to be scaled. It should be of CSR or CSC format.
scale : ndarray of shape (n_features,), dtype={np.float32, np.float64}
Array of precomputed sample-wise values to use for scaling.
Examples
--------
>>> from sklearn.utils import sparsefuncs
>>> from scipy import sparse
>>> import numpy as np
>>> indptr = np.array([0, 2, 3, 4, 5])
>>> indices = np.array([0, 1, 2, 3, 3])
>>> data = np.array([8, 1, 2, 5, 6])
>>> scale = np.array([2, 3, 4, 5])
>>> csr = sparse.csr_matrix((data, indices, indptr))
>>> csr.todense()
matrix([[8, 1, 0, 0],
[0, 0, 2, 0],
[0, 0, 0, 5],
[0, 0, 0, 6]])
>>> sparsefuncs.inplace_row_scale(csr, scale)
>>> csr.todense()
matrix([[16, 2, 0, 0],
[ 0, 0, 6, 0],
[ 0, 0, 0, 20],
[ 0, 0, 0, 30]])
"""
if sp.issparse(X) and X.format == "csc":
inplace_csr_column_scale(X.T, scale)
elif sp.issparse(X) and X.format == "csr":
inplace_csr_row_scale(X, scale)
else:
_raise_typeerror(X)
|
Inplace row scaling of a CSR or CSC matrix.
Scale each row of the data matrix by multiplying with specific scale
provided by the caller assuming a (n_samples, n_features) shape.
Parameters
----------
X : sparse matrix of shape (n_samples, n_features)
Matrix to be scaled. It should be of CSR or CSC format.
scale : ndarray of shape (n_features,), dtype={np.float32, np.float64}
Array of precomputed sample-wise values to use for scaling.
Examples
--------
>>> from sklearn.utils import sparsefuncs
>>> from scipy import sparse
>>> import numpy as np
>>> indptr = np.array([0, 2, 3, 4, 5])
>>> indices = np.array([0, 1, 2, 3, 3])
>>> data = np.array([8, 1, 2, 5, 6])
>>> scale = np.array([2, 3, 4, 5])
>>> csr = sparse.csr_matrix((data, indices, indptr))
>>> csr.todense()
matrix([[8, 1, 0, 0],
[0, 0, 2, 0],
[0, 0, 0, 5],
[0, 0, 0, 6]])
>>> sparsefuncs.inplace_row_scale(csr, scale)
>>> csr.todense()
matrix([[16, 2, 0, 0],
[ 0, 0, 6, 0],
[ 0, 0, 0, 20],
[ 0, 0, 0, 30]])
|
inplace_row_scale
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/sparsefuncs.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/sparsefuncs.py
|
BSD-3-Clause
|
def inplace_swap_row_csc(X, m, n):
"""Swap two rows of a CSC matrix in-place.
Parameters
----------
X : sparse matrix of shape (n_samples, n_features)
Matrix whose two rows are to be swapped. It should be of
CSC format.
m : int
Index of the row of X to be swapped.
n : int
Index of the row of X to be swapped.
"""
for t in [m, n]:
if isinstance(t, np.ndarray):
raise TypeError("m and n should be valid integers")
if m < 0:
m += X.shape[0]
if n < 0:
n += X.shape[0]
m_mask = X.indices == m
X.indices[X.indices == n] = m
X.indices[m_mask] = n
|
Swap two rows of a CSC matrix in-place.
Parameters
----------
X : sparse matrix of shape (n_samples, n_features)
Matrix whose two rows are to be swapped. It should be of
CSC format.
m : int
Index of the row of X to be swapped.
n : int
Index of the row of X to be swapped.
|
inplace_swap_row_csc
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/sparsefuncs.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/sparsefuncs.py
|
BSD-3-Clause
|
def inplace_swap_row_csr(X, m, n):
"""Swap two rows of a CSR matrix in-place.
Parameters
----------
X : sparse matrix of shape (n_samples, n_features)
Matrix whose two rows are to be swapped. It should be of
CSR format.
m : int
Index of the row of X to be swapped.
n : int
Index of the row of X to be swapped.
"""
for t in [m, n]:
if isinstance(t, np.ndarray):
raise TypeError("m and n should be valid integers")
if m < 0:
m += X.shape[0]
if n < 0:
n += X.shape[0]
# The following swapping makes life easier since m is assumed to be the
# smaller integer below.
if m > n:
m, n = n, m
indptr = X.indptr
m_start = indptr[m]
m_stop = indptr[m + 1]
n_start = indptr[n]
n_stop = indptr[n + 1]
nz_m = m_stop - m_start
nz_n = n_stop - n_start
if nz_m != nz_n:
# Modify indptr first
X.indptr[m + 2 : n] += nz_n - nz_m
X.indptr[m + 1] = m_start + nz_n
X.indptr[n] = n_stop - nz_m
X.indices = np.concatenate(
[
X.indices[:m_start],
X.indices[n_start:n_stop],
X.indices[m_stop:n_start],
X.indices[m_start:m_stop],
X.indices[n_stop:],
]
)
X.data = np.concatenate(
[
X.data[:m_start],
X.data[n_start:n_stop],
X.data[m_stop:n_start],
X.data[m_start:m_stop],
X.data[n_stop:],
]
)
|
Swap two rows of a CSR matrix in-place.
Parameters
----------
X : sparse matrix of shape (n_samples, n_features)
Matrix whose two rows are to be swapped. It should be of
CSR format.
m : int
Index of the row of X to be swapped.
n : int
Index of the row of X to be swapped.
|
inplace_swap_row_csr
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/sparsefuncs.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/sparsefuncs.py
|
BSD-3-Clause
|
def inplace_swap_row(X, m, n):
"""
Swap two rows of a CSC/CSR matrix in-place.
Parameters
----------
X : sparse matrix of shape (n_samples, n_features)
Matrix whose two rows are to be swapped. It should be of CSR or
CSC format.
m : int
Index of the row of X to be swapped.
n : int
Index of the row of X to be swapped.
Examples
--------
>>> from sklearn.utils import sparsefuncs
>>> from scipy import sparse
>>> import numpy as np
>>> indptr = np.array([0, 2, 3, 3, 3])
>>> indices = np.array([0, 2, 2])
>>> data = np.array([8, 2, 5])
>>> csr = sparse.csr_matrix((data, indices, indptr))
>>> csr.todense()
matrix([[8, 0, 2],
[0, 0, 5],
[0, 0, 0],
[0, 0, 0]])
>>> sparsefuncs.inplace_swap_row(csr, 0, 1)
>>> csr.todense()
matrix([[0, 0, 5],
[8, 0, 2],
[0, 0, 0],
[0, 0, 0]])
"""
if sp.issparse(X) and X.format == "csc":
inplace_swap_row_csc(X, m, n)
elif sp.issparse(X) and X.format == "csr":
inplace_swap_row_csr(X, m, n)
else:
_raise_typeerror(X)
|
Swap two rows of a CSC/CSR matrix in-place.
Parameters
----------
X : sparse matrix of shape (n_samples, n_features)
Matrix whose two rows are to be swapped. It should be of CSR or
CSC format.
m : int
Index of the row of X to be swapped.
n : int
Index of the row of X to be swapped.
Examples
--------
>>> from sklearn.utils import sparsefuncs
>>> from scipy import sparse
>>> import numpy as np
>>> indptr = np.array([0, 2, 3, 3, 3])
>>> indices = np.array([0, 2, 2])
>>> data = np.array([8, 2, 5])
>>> csr = sparse.csr_matrix((data, indices, indptr))
>>> csr.todense()
matrix([[8, 0, 2],
[0, 0, 5],
[0, 0, 0],
[0, 0, 0]])
>>> sparsefuncs.inplace_swap_row(csr, 0, 1)
>>> csr.todense()
matrix([[0, 0, 5],
[8, 0, 2],
[0, 0, 0],
[0, 0, 0]])
|
inplace_swap_row
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/sparsefuncs.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/sparsefuncs.py
|
BSD-3-Clause
|
def inplace_swap_column(X, m, n):
"""
Swap two columns of a CSC/CSR matrix in-place.
Parameters
----------
X : sparse matrix of shape (n_samples, n_features)
Matrix whose two columns are to be swapped. It should be of
CSR or CSC format.
m : int
Index of the column of X to be swapped.
n : int
Index of the column of X to be swapped.
Examples
--------
>>> from sklearn.utils import sparsefuncs
>>> from scipy import sparse
>>> import numpy as np
>>> indptr = np.array([0, 2, 3, 3, 3])
>>> indices = np.array([0, 2, 2])
>>> data = np.array([8, 2, 5])
>>> csr = sparse.csr_matrix((data, indices, indptr))
>>> csr.todense()
matrix([[8, 0, 2],
[0, 0, 5],
[0, 0, 0],
[0, 0, 0]])
>>> sparsefuncs.inplace_swap_column(csr, 0, 1)
>>> csr.todense()
matrix([[0, 8, 2],
[0, 0, 5],
[0, 0, 0],
[0, 0, 0]])
"""
if m < 0:
m += X.shape[1]
if n < 0:
n += X.shape[1]
if sp.issparse(X) and X.format == "csc":
inplace_swap_row_csr(X, m, n)
elif sp.issparse(X) and X.format == "csr":
inplace_swap_row_csc(X, m, n)
else:
_raise_typeerror(X)
|
Swap two columns of a CSC/CSR matrix in-place.
Parameters
----------
X : sparse matrix of shape (n_samples, n_features)
Matrix whose two columns are to be swapped. It should be of
CSR or CSC format.
m : int
Index of the column of X to be swapped.
n : int
Index of the column of X to be swapped.
Examples
--------
>>> from sklearn.utils import sparsefuncs
>>> from scipy import sparse
>>> import numpy as np
>>> indptr = np.array([0, 2, 3, 3, 3])
>>> indices = np.array([0, 2, 2])
>>> data = np.array([8, 2, 5])
>>> csr = sparse.csr_matrix((data, indices, indptr))
>>> csr.todense()
matrix([[8, 0, 2],
[0, 0, 5],
[0, 0, 0],
[0, 0, 0]])
>>> sparsefuncs.inplace_swap_column(csr, 0, 1)
>>> csr.todense()
matrix([[0, 8, 2],
[0, 0, 5],
[0, 0, 0],
[0, 0, 0]])
|
inplace_swap_column
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/sparsefuncs.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/sparsefuncs.py
|
BSD-3-Clause
|
def min_max_axis(X, axis, ignore_nan=False):
"""Compute minimum and maximum along an axis on a CSR or CSC matrix.
Optionally ignore NaN values.
Parameters
----------
X : sparse matrix of shape (n_samples, n_features)
Input data. It should be of CSR or CSC format.
axis : {0, 1}
Axis along which the axis should be computed.
ignore_nan : bool, default=False
Ignore or passing through NaN values.
.. versionadded:: 0.20
Returns
-------
mins : ndarray of shape (n_features,), dtype={np.float32, np.float64}
Feature-wise minima.
maxs : ndarray of shape (n_features,), dtype={np.float32, np.float64}
Feature-wise maxima.
"""
if sp.issparse(X) and X.format in ("csr", "csc"):
if ignore_nan:
return _sparse_nan_min_max(X, axis=axis)
else:
return _sparse_min_max(X, axis=axis)
else:
_raise_typeerror(X)
|
Compute minimum and maximum along an axis on a CSR or CSC matrix.
Optionally ignore NaN values.
Parameters
----------
X : sparse matrix of shape (n_samples, n_features)
Input data. It should be of CSR or CSC format.
axis : {0, 1}
Axis along which the axis should be computed.
ignore_nan : bool, default=False
Ignore or passing through NaN values.
.. versionadded:: 0.20
Returns
-------
mins : ndarray of shape (n_features,), dtype={np.float32, np.float64}
Feature-wise minima.
maxs : ndarray of shape (n_features,), dtype={np.float32, np.float64}
Feature-wise maxima.
|
min_max_axis
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/sparsefuncs.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/sparsefuncs.py
|
BSD-3-Clause
|
def count_nonzero(X, axis=None, sample_weight=None):
"""A variant of X.getnnz() with extension to weighting on axis 0.
Useful in efficiently calculating multilabel metrics.
Parameters
----------
X : sparse matrix of shape (n_samples, n_labels)
Input data. It should be of CSR format.
axis : {0, 1}, default=None
The axis on which the data is aggregated.
sample_weight : array-like of shape (n_samples,), default=None
Weight for each row of X.
Returns
-------
nnz : int, float, ndarray of shape (n_samples,) or ndarray of shape (n_features,)
Number of non-zero values in the array along a given axis. Otherwise,
the total number of non-zero values in the array is returned.
"""
if axis == -1:
axis = 1
elif axis == -2:
axis = 0
elif X.format != "csr":
raise TypeError("Expected CSR sparse format, got {0}".format(X.format))
# We rely here on the fact that np.diff(Y.indptr) for a CSR
# will return the number of nonzero entries in each row.
# A bincount over Y.indices will return the number of nonzeros
# in each column. See ``csr_matrix.getnnz`` in scipy >= 0.14.
if axis is None:
if sample_weight is None:
return X.nnz
else:
return np.dot(np.diff(X.indptr), sample_weight)
elif axis == 1:
out = np.diff(X.indptr)
if sample_weight is None:
# astype here is for consistency with axis=0 dtype
return out.astype("intp")
return out * sample_weight
elif axis == 0:
if sample_weight is None:
return np.bincount(X.indices, minlength=X.shape[1])
else:
weights = np.repeat(sample_weight, np.diff(X.indptr))
return np.bincount(X.indices, minlength=X.shape[1], weights=weights)
else:
raise ValueError("Unsupported axis: {0}".format(axis))
|
A variant of X.getnnz() with extension to weighting on axis 0.
Useful in efficiently calculating multilabel metrics.
Parameters
----------
X : sparse matrix of shape (n_samples, n_labels)
Input data. It should be of CSR format.
axis : {0, 1}, default=None
The axis on which the data is aggregated.
sample_weight : array-like of shape (n_samples,), default=None
Weight for each row of X.
Returns
-------
nnz : int, float, ndarray of shape (n_samples,) or ndarray of shape (n_features,)
Number of non-zero values in the array along a given axis. Otherwise,
the total number of non-zero values in the array is returned.
|
count_nonzero
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/sparsefuncs.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/sparsefuncs.py
|
BSD-3-Clause
|
def _get_median(data, n_zeros):
"""Compute the median of data with n_zeros additional zeros.
This function is used to support sparse matrices; it modifies data
in-place.
"""
n_elems = len(data) + n_zeros
if not n_elems:
return np.nan
n_negative = np.count_nonzero(data < 0)
middle, is_odd = divmod(n_elems, 2)
data.sort()
if is_odd:
return _get_elem_at_rank(middle, data, n_negative, n_zeros)
return (
_get_elem_at_rank(middle - 1, data, n_negative, n_zeros)
+ _get_elem_at_rank(middle, data, n_negative, n_zeros)
) / 2.0
|
Compute the median of data with n_zeros additional zeros.
This function is used to support sparse matrices; it modifies data
in-place.
|
_get_median
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/sparsefuncs.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/sparsefuncs.py
|
BSD-3-Clause
|
def _get_elem_at_rank(rank, data, n_negative, n_zeros):
"""Find the value in data augmented with n_zeros for the given rank"""
if rank < n_negative:
return data[rank]
if rank - n_negative < n_zeros:
return 0
return data[rank - n_zeros]
|
Find the value in data augmented with n_zeros for the given rank
|
_get_elem_at_rank
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/sparsefuncs.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/sparsefuncs.py
|
BSD-3-Clause
|
def csc_median_axis_0(X):
"""Find the median across axis 0 of a CSC matrix.
It is equivalent to doing np.median(X, axis=0).
Parameters
----------
X : sparse matrix of shape (n_samples, n_features)
Input data. It should be of CSC format.
Returns
-------
median : ndarray of shape (n_features,)
Median.
"""
if not (sp.issparse(X) and X.format == "csc"):
raise TypeError("Expected matrix of CSC format, got %s" % X.format)
indptr = X.indptr
n_samples, n_features = X.shape
median = np.zeros(n_features)
for f_ind, (start, end) in enumerate(itertools.pairwise(indptr)):
# Prevent modifying X in place
data = np.copy(X.data[start:end])
nz = n_samples - data.size
median[f_ind] = _get_median(data, nz)
return median
|
Find the median across axis 0 of a CSC matrix.
It is equivalent to doing np.median(X, axis=0).
Parameters
----------
X : sparse matrix of shape (n_samples, n_features)
Input data. It should be of CSC format.
Returns
-------
median : ndarray of shape (n_features,)
Median.
|
csc_median_axis_0
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/sparsefuncs.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/sparsefuncs.py
|
BSD-3-Clause
|
def _implicit_column_offset(X, offset):
"""Create an implicitly offset linear operator.
This is used by PCA on sparse data to avoid densifying the whole data
matrix.
Params
------
X : sparse matrix of shape (n_samples, n_features)
offset : ndarray of shape (n_features,)
Returns
-------
centered : LinearOperator
"""
offset = offset[None, :]
XT = X.T
return LinearOperator(
matvec=lambda x: X @ x - offset @ x,
matmat=lambda x: X @ x - offset @ x,
rmatvec=lambda x: XT @ x - (offset * x.sum()),
rmatmat=lambda x: XT @ x - offset.T @ x.sum(axis=0)[None, :],
dtype=X.dtype,
shape=X.shape,
)
|
Create an implicitly offset linear operator.
This is used by PCA on sparse data to avoid densifying the whole data
matrix.
Params
------
X : sparse matrix of shape (n_samples, n_features)
offset : ndarray of shape (n_features,)
Returns
-------
centered : LinearOperator
|
_implicit_column_offset
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/sparsefuncs.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/sparsefuncs.py
|
BSD-3-Clause
|
def _weighted_percentile(array, sample_weight, percentile_rank=50, xp=None):
"""Compute the weighted percentile with method 'inverted_cdf'.
When the percentile lies between two data points of `array`, the function returns
the lower value.
If `array` is a 2D array, the `values` are selected along axis 0.
`NaN` values are ignored by setting their weights to 0. If `array` is 2D, this
is done in a column-isolated manner: a `NaN` in the second column, does not impact
the percentile computed for the first column even if `sample_weight` is 1D.
.. versionchanged:: 0.24
Accepts 2D `array`.
.. versionchanged:: 1.7
Supports handling of `NaN` values.
Parameters
----------
array : 1D or 2D array
Values to take the weighted percentile of.
sample_weight: 1D or 2D array
Weights for each value in `array`. Must be same shape as `array` or of shape
`(array.shape[0],)`.
percentile_rank: int or float, default=50
The probability level of the percentile to compute, in percent. Must be between
0 and 100.
xp : array_namespace, default=None
The standard-compatible namespace for `array`. Default: infer.
Returns
-------
percentile : scalar or 0D array if `array` 1D (or 0D), array if `array` 2D
Weighted percentile at the requested probability level.
"""
xp, _, device = get_namespace_and_device(array)
# `sample_weight` should follow `array` for dtypes
floating_dtype = _find_matching_floating_dtype(array, xp=xp)
array = xp.asarray(array, dtype=floating_dtype, device=device)
sample_weight = xp.asarray(sample_weight, dtype=floating_dtype, device=device)
n_dim = array.ndim
if n_dim == 0:
return array
if array.ndim == 1:
array = xp.reshape(array, (-1, 1))
# When sample_weight 1D, repeat for each array.shape[1]
if array.shape != sample_weight.shape and array.shape[0] == sample_weight.shape[0]:
sample_weight = xp.tile(sample_weight, (array.shape[1], 1)).T
# Sort `array` and `sample_weight` along axis=0:
sorted_idx = xp.argsort(array, axis=0)
sorted_weights = xp.take_along_axis(sample_weight, sorted_idx, axis=0)
# Set NaN values in `sample_weight` to 0. Only perform this operation if NaN
# values present to avoid temporary allocations of size `(n_samples, n_features)`.
n_features = array.shape[1]
largest_value_per_column = array[
sorted_idx[-1, ...], xp.arange(n_features, device=device)
]
# NaN values get sorted to end (largest value)
if xp.any(xp.isnan(largest_value_per_column)):
sorted_nan_mask = xp.take_along_axis(xp.isnan(array), sorted_idx, axis=0)
sorted_weights[sorted_nan_mask] = 0
# Compute the weighted cumulative distribution function (CDF) based on
# `sample_weight` and scale `percentile_rank` along it.
#
# Note: we call `xp.cumulative_sum` on the transposed `sorted_weights` to
# ensure that the result is of shape `(n_features, n_samples)` so
# `xp.searchsorted` calls take contiguous inputs as a result (for
# performance reasons).
weight_cdf = xp.cumulative_sum(sorted_weights.T, axis=1)
adjusted_percentile_rank = percentile_rank / 100 * weight_cdf[..., -1]
# Ignore leading `sample_weight=0` observations when `percentile_rank=0` (#20528)
mask = adjusted_percentile_rank == 0
adjusted_percentile_rank[mask] = xp.nextafter(
adjusted_percentile_rank[mask], adjusted_percentile_rank[mask] + 1
)
# For each feature with index j, find sample index i of the scalar value
# `adjusted_percentile_rank[j]` in 1D array `weight_cdf[j]`, such that:
# weight_cdf[j, i-1] < adjusted_percentile_rank[j] <= weight_cdf[j, i].
percentile_indices = xp.stack(
[
xp.searchsorted(
weight_cdf[feature_idx, ...], adjusted_percentile_rank[feature_idx]
)
for feature_idx in range(weight_cdf.shape[0])
],
)
# In rare cases, `percentile_indices` equals to `sorted_idx.shape[0]`
max_idx = sorted_idx.shape[0] - 1
percentile_indices = xp.clip(percentile_indices, 0, max_idx)
col_indices = xp.arange(array.shape[1], device=device)
percentile_in_sorted = sorted_idx[percentile_indices, col_indices]
result = array[percentile_in_sorted, col_indices]
return result[0] if n_dim == 1 else result
|
Compute the weighted percentile with method 'inverted_cdf'.
When the percentile lies between two data points of `array`, the function returns
the lower value.
If `array` is a 2D array, the `values` are selected along axis 0.
`NaN` values are ignored by setting their weights to 0. If `array` is 2D, this
is done in a column-isolated manner: a `NaN` in the second column, does not impact
the percentile computed for the first column even if `sample_weight` is 1D.
.. versionchanged:: 0.24
Accepts 2D `array`.
.. versionchanged:: 1.7
Supports handling of `NaN` values.
Parameters
----------
array : 1D or 2D array
Values to take the weighted percentile of.
sample_weight: 1D or 2D array
Weights for each value in `array`. Must be same shape as `array` or of shape
`(array.shape[0],)`.
percentile_rank: int or float, default=50
The probability level of the percentile to compute, in percent. Must be between
0 and 100.
xp : array_namespace, default=None
The standard-compatible namespace for `array`. Default: infer.
Returns
-------
percentile : scalar or 0D array if `array` 1D (or 0D), array if `array` 2D
Weighted percentile at the requested probability level.
|
_weighted_percentile
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/stats.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/stats.py
|
BSD-3-Clause
|
def _deprecate_positional_args(func=None, *, version="1.3"):
"""Decorator for methods that issues warnings for positional arguments.
Using the keyword-only argument syntax in pep 3102, arguments after the
* will issue a warning when passed as a positional argument.
Parameters
----------
func : callable, default=None
Function to check arguments on.
version : callable, default="1.3"
The version when positional arguments will result in error.
"""
def _inner_deprecate_positional_args(f):
sig = signature(f)
kwonly_args = []
all_args = []
for name, param in sig.parameters.items():
if param.kind == Parameter.POSITIONAL_OR_KEYWORD:
all_args.append(name)
elif param.kind == Parameter.KEYWORD_ONLY:
kwonly_args.append(name)
@wraps(f)
def inner_f(*args, **kwargs):
extra_args = len(args) - len(all_args)
if extra_args <= 0:
return f(*args, **kwargs)
# extra_args > 0
args_msg = [
"{}={}".format(name, arg)
for name, arg in zip(kwonly_args[:extra_args], args[-extra_args:])
]
args_msg = ", ".join(args_msg)
warnings.warn(
(
f"Pass {args_msg} as keyword args. From version "
f"{version} passing these as positional arguments "
"will result in an error"
),
FutureWarning,
)
kwargs.update(zip(sig.parameters, args))
return f(**kwargs)
return inner_f
if func is not None:
return _inner_deprecate_positional_args(func)
return _inner_deprecate_positional_args
|
Decorator for methods that issues warnings for positional arguments.
Using the keyword-only argument syntax in pep 3102, arguments after the
* will issue a warning when passed as a positional argument.
Parameters
----------
func : callable, default=None
Function to check arguments on.
version : callable, default="1.3"
The version when positional arguments will result in error.
|
_deprecate_positional_args
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/validation.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/validation.py
|
BSD-3-Clause
|
def assert_all_finite(
X,
*,
allow_nan=False,
estimator_name=None,
input_name="",
):
"""Throw a ValueError if X contains NaN or infinity.
Parameters
----------
X : {ndarray, sparse matrix}
The input data.
allow_nan : bool, default=False
If True, do not throw error when `X` contains NaN.
estimator_name : str, default=None
The estimator name, used to construct the error message.
input_name : str, default=""
The data name used to construct the error message. In particular
if `input_name` is "X" and the data has NaN values and
allow_nan is False, the error message will link to the imputer
documentation.
Examples
--------
>>> from sklearn.utils import assert_all_finite
>>> import numpy as np
>>> array = np.array([1, np.inf, np.nan, 4])
>>> try:
... assert_all_finite(array)
... print("Test passed: Array contains only finite values.")
... except ValueError:
... print("Test failed: Array contains non-finite values.")
Test failed: Array contains non-finite values.
"""
_assert_all_finite(
X.data if sp.issparse(X) else X,
allow_nan=allow_nan,
estimator_name=estimator_name,
input_name=input_name,
)
|
Throw a ValueError if X contains NaN or infinity.
Parameters
----------
X : {ndarray, sparse matrix}
The input data.
allow_nan : bool, default=False
If True, do not throw error when `X` contains NaN.
estimator_name : str, default=None
The estimator name, used to construct the error message.
input_name : str, default=""
The data name used to construct the error message. In particular
if `input_name` is "X" and the data has NaN values and
allow_nan is False, the error message will link to the imputer
documentation.
Examples
--------
>>> from sklearn.utils import assert_all_finite
>>> import numpy as np
>>> array = np.array([1, np.inf, np.nan, 4])
>>> try:
... assert_all_finite(array)
... print("Test passed: Array contains only finite values.")
... except ValueError:
... print("Test failed: Array contains non-finite values.")
Test failed: Array contains non-finite values.
|
assert_all_finite
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/validation.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/validation.py
|
BSD-3-Clause
|
def as_float_array(
X, *, copy=True, force_all_finite="deprecated", ensure_all_finite=None
):
"""Convert an array-like to an array of floats.
The new dtype will be np.float32 or np.float64, depending on the original
type. The function can create a copy or modify the argument depending
on the argument copy.
Parameters
----------
X : {array-like, sparse matrix}
The input data.
copy : bool, default=True
If True, a copy of X will be created. If False, a copy may still be
returned if X's dtype is not a floating point type.
force_all_finite : bool or 'allow-nan', default=True
Whether to raise an error on np.inf, np.nan, pd.NA in X. The
possibilities are:
- True: Force all values of X to be finite.
- False: accepts np.inf, np.nan, pd.NA in X.
- 'allow-nan': accepts only np.nan and pd.NA values in X. Values cannot
be infinite.
.. versionadded:: 0.20
``force_all_finite`` accepts the string ``'allow-nan'``.
.. versionchanged:: 0.23
Accepts `pd.NA` and converts it into `np.nan`
.. deprecated:: 1.6
`force_all_finite` was renamed to `ensure_all_finite` and will be removed
in 1.8.
ensure_all_finite : bool or 'allow-nan', default=True
Whether to raise an error on np.inf, np.nan, pd.NA in X. The
possibilities are:
- True: Force all values of X to be finite.
- False: accepts np.inf, np.nan, pd.NA in X.
- 'allow-nan': accepts only np.nan and pd.NA values in X. Values cannot
be infinite.
.. versionadded:: 1.6
`force_all_finite` was renamed to `ensure_all_finite`.
Returns
-------
XT : {ndarray, sparse matrix}
An array of type float.
Examples
--------
>>> from sklearn.utils import as_float_array
>>> import numpy as np
>>> array = np.array([0, 0, 1, 2, 2], dtype=np.int64)
>>> as_float_array(array)
array([0., 0., 1., 2., 2.])
"""
ensure_all_finite = _deprecate_force_all_finite(force_all_finite, ensure_all_finite)
if isinstance(X, np.matrix) or (
not isinstance(X, np.ndarray) and not sp.issparse(X)
):
return check_array(
X,
accept_sparse=["csr", "csc", "coo"],
dtype=np.float64,
copy=copy,
ensure_all_finite=ensure_all_finite,
ensure_2d=False,
)
elif sp.issparse(X) and X.dtype in [np.float32, np.float64]:
return X.copy() if copy else X
elif X.dtype in [np.float32, np.float64]: # is numpy array
return X.copy("F" if X.flags["F_CONTIGUOUS"] else "C") if copy else X
else:
if X.dtype.kind in "uib" and X.dtype.itemsize <= 4:
return_dtype = np.float32
else:
return_dtype = np.float64
return X.astype(return_dtype)
|
Convert an array-like to an array of floats.
The new dtype will be np.float32 or np.float64, depending on the original
type. The function can create a copy or modify the argument depending
on the argument copy.
Parameters
----------
X : {array-like, sparse matrix}
The input data.
copy : bool, default=True
If True, a copy of X will be created. If False, a copy may still be
returned if X's dtype is not a floating point type.
force_all_finite : bool or 'allow-nan', default=True
Whether to raise an error on np.inf, np.nan, pd.NA in X. The
possibilities are:
- True: Force all values of X to be finite.
- False: accepts np.inf, np.nan, pd.NA in X.
- 'allow-nan': accepts only np.nan and pd.NA values in X. Values cannot
be infinite.
.. versionadded:: 0.20
``force_all_finite`` accepts the string ``'allow-nan'``.
.. versionchanged:: 0.23
Accepts `pd.NA` and converts it into `np.nan`
.. deprecated:: 1.6
`force_all_finite` was renamed to `ensure_all_finite` and will be removed
in 1.8.
ensure_all_finite : bool or 'allow-nan', default=True
Whether to raise an error on np.inf, np.nan, pd.NA in X. The
possibilities are:
- True: Force all values of X to be finite.
- False: accepts np.inf, np.nan, pd.NA in X.
- 'allow-nan': accepts only np.nan and pd.NA values in X. Values cannot
be infinite.
.. versionadded:: 1.6
`force_all_finite` was renamed to `ensure_all_finite`.
Returns
-------
XT : {ndarray, sparse matrix}
An array of type float.
Examples
--------
>>> from sklearn.utils import as_float_array
>>> import numpy as np
>>> array = np.array([0, 0, 1, 2, 2], dtype=np.int64)
>>> as_float_array(array)
array([0., 0., 1., 2., 2.])
|
as_float_array
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/validation.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/validation.py
|
BSD-3-Clause
|
def _is_arraylike(x):
"""Returns whether the input is array-like."""
if sp.issparse(x):
return False
return hasattr(x, "__len__") or hasattr(x, "shape") or hasattr(x, "__array__")
|
Returns whether the input is array-like.
|
_is_arraylike
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/validation.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/validation.py
|
BSD-3-Clause
|
def _num_features(X):
"""Return the number of features in an array-like X.
This helper function tries hard to avoid to materialize an array version
of X unless necessary. For instance, if X is a list of lists,
this function will return the length of the first element, assuming
that subsequent elements are all lists of the same length without
checking.
Parameters
----------
X : array-like
array-like to get the number of features.
Returns
-------
features : int
Number of features
"""
type_ = type(X)
if type_.__module__ == "builtins":
type_name = type_.__qualname__
else:
type_name = f"{type_.__module__}.{type_.__qualname__}"
message = f"Unable to find the number of features from X of type {type_name}"
if not hasattr(X, "__len__") and not hasattr(X, "shape"):
if not hasattr(X, "__array__"):
raise TypeError(message)
# Only convert X to a numpy array if there is no cheaper, heuristic
# option.
X = np.asarray(X)
if hasattr(X, "shape"):
if not hasattr(X.shape, "__len__") or len(X.shape) <= 1:
message += f" with shape {X.shape}"
raise TypeError(message)
return X.shape[1]
first_sample = X[0]
# Do not consider an array-like of strings or dicts to be a 2D array
if isinstance(first_sample, (str, bytes, dict)):
message += f" where the samples are of type {type(first_sample).__qualname__}"
raise TypeError(message)
try:
# If X is a list of lists, for instance, we assume that all nested
# lists have the same length without checking or converting to
# a numpy array to keep this function call as cheap as possible.
return len(first_sample)
except Exception as err:
raise TypeError(message) from err
|
Return the number of features in an array-like X.
This helper function tries hard to avoid to materialize an array version
of X unless necessary. For instance, if X is a list of lists,
this function will return the length of the first element, assuming
that subsequent elements are all lists of the same length without
checking.
Parameters
----------
X : array-like
array-like to get the number of features.
Returns
-------
features : int
Number of features
|
_num_features
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/validation.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/validation.py
|
BSD-3-Clause
|
def _num_samples(x):
"""Return number of samples in array-like x."""
message = "Expected sequence or array-like, got %s" % type(x)
if hasattr(x, "fit") and callable(x.fit):
# Don't get num_samples from an ensembles length!
raise TypeError(message)
if _use_interchange_protocol(x):
return x.__dataframe__().num_rows()
if not hasattr(x, "__len__") and not hasattr(x, "shape"):
if hasattr(x, "__array__"):
xp, _ = get_namespace(x)
x = xp.asarray(x)
else:
raise TypeError(message)
if hasattr(x, "shape") and x.shape is not None:
if len(x.shape) == 0:
raise TypeError(
"Input should have at least 1 dimension i.e. satisfy "
f"`len(x.shape) > 0`, got scalar `{x!r}` instead."
)
# Check that shape is returning an integer or default to len
# Dask dataframes may not return numeric shape[0] value
if isinstance(x.shape[0], numbers.Integral):
return x.shape[0]
try:
return len(x)
except TypeError as type_error:
raise TypeError(message) from type_error
|
Return number of samples in array-like x.
|
_num_samples
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/validation.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/validation.py
|
BSD-3-Clause
|
def check_memory(memory):
"""Check that ``memory`` is joblib.Memory-like.
joblib.Memory-like means that ``memory`` can be converted into a
joblib.Memory instance (typically a str denoting the ``location``)
or has the same interface (has a ``cache`` method).
Parameters
----------
memory : None, str or object with the joblib.Memory interface
- If string, the location where to create the `joblib.Memory` interface.
- If None, no caching is done and the Memory object is completely transparent.
Returns
-------
memory : object with the joblib.Memory interface
A correct joblib.Memory object.
Raises
------
ValueError
If ``memory`` is not joblib.Memory-like.
Examples
--------
>>> from sklearn.utils.validation import check_memory
>>> check_memory("caching_dir")
Memory(location=caching_dir/joblib)
"""
if memory is None or isinstance(memory, str):
memory = joblib.Memory(location=memory, verbose=0)
elif not hasattr(memory, "cache"):
raise ValueError(
"'memory' should be None, a string or have the same"
" interface as joblib.Memory."
" Got memory='{}' instead.".format(memory)
)
return memory
|
Check that ``memory`` is joblib.Memory-like.
joblib.Memory-like means that ``memory`` can be converted into a
joblib.Memory instance (typically a str denoting the ``location``)
or has the same interface (has a ``cache`` method).
Parameters
----------
memory : None, str or object with the joblib.Memory interface
- If string, the location where to create the `joblib.Memory` interface.
- If None, no caching is done and the Memory object is completely transparent.
Returns
-------
memory : object with the joblib.Memory interface
A correct joblib.Memory object.
Raises
------
ValueError
If ``memory`` is not joblib.Memory-like.
Examples
--------
>>> from sklearn.utils.validation import check_memory
>>> check_memory("caching_dir")
Memory(location=caching_dir/joblib)
|
check_memory
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/validation.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/validation.py
|
BSD-3-Clause
|
def check_consistent_length(*arrays):
"""Check that all arrays have consistent first dimensions.
Checks whether all objects in arrays have the same shape or length.
Parameters
----------
*arrays : list or tuple of input objects.
Objects that will be checked for consistent length.
Examples
--------
>>> from sklearn.utils.validation import check_consistent_length
>>> a = [1, 2, 3]
>>> b = [2, 3, 4]
>>> check_consistent_length(a, b)
"""
lengths = [_num_samples(X) for X in arrays if X is not None]
if len(set(lengths)) > 1:
raise ValueError(
"Found input variables with inconsistent numbers of samples: %r"
% [int(l) for l in lengths]
)
|
Check that all arrays have consistent first dimensions.
Checks whether all objects in arrays have the same shape or length.
Parameters
----------
*arrays : list or tuple of input objects.
Objects that will be checked for consistent length.
Examples
--------
>>> from sklearn.utils.validation import check_consistent_length
>>> a = [1, 2, 3]
>>> b = [2, 3, 4]
>>> check_consistent_length(a, b)
|
check_consistent_length
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/validation.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/validation.py
|
BSD-3-Clause
|
def _make_indexable(iterable):
"""Ensure iterable supports indexing or convert to an indexable variant.
Convert sparse matrices to csr and other non-indexable iterable to arrays.
Let `None` and indexable objects (e.g. pandas dataframes) pass unchanged.
Parameters
----------
iterable : {list, dataframe, ndarray, sparse matrix} or None
Object to be converted to an indexable iterable.
"""
if sp.issparse(iterable):
return iterable.tocsr()
elif hasattr(iterable, "__getitem__") or hasattr(iterable, "iloc"):
return iterable
elif iterable is None:
return iterable
return np.array(iterable)
|
Ensure iterable supports indexing or convert to an indexable variant.
Convert sparse matrices to csr and other non-indexable iterable to arrays.
Let `None` and indexable objects (e.g. pandas dataframes) pass unchanged.
Parameters
----------
iterable : {list, dataframe, ndarray, sparse matrix} or None
Object to be converted to an indexable iterable.
|
_make_indexable
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/validation.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/validation.py
|
BSD-3-Clause
|
def indexable(*iterables):
"""Make arrays indexable for cross-validation.
Checks consistent length, passes through None, and ensures that everything
can be indexed by converting sparse matrices to csr and converting
non-iterable objects to arrays.
Parameters
----------
*iterables : {lists, dataframes, ndarrays, sparse matrices}
List of objects to ensure sliceability.
Returns
-------
result : list of {ndarray, sparse matrix, dataframe} or None
Returns a list containing indexable arrays (i.e. NumPy array,
sparse matrix, or dataframe) or `None`.
Examples
--------
>>> from sklearn.utils import indexable
>>> from scipy.sparse import csr_matrix
>>> import numpy as np
>>> iterables = [
... [1, 2, 3], np.array([2, 3, 4]), None, csr_matrix([[5], [6], [7]])
... ]
>>> indexable(*iterables)
[[1, 2, 3], array([2, 3, 4]), None, <...Sparse...dtype 'int64'...shape (3, 1)>]
"""
result = [_make_indexable(X) for X in iterables]
check_consistent_length(*result)
return result
|
Make arrays indexable for cross-validation.
Checks consistent length, passes through None, and ensures that everything
can be indexed by converting sparse matrices to csr and converting
non-iterable objects to arrays.
Parameters
----------
*iterables : {lists, dataframes, ndarrays, sparse matrices}
List of objects to ensure sliceability.
Returns
-------
result : list of {ndarray, sparse matrix, dataframe} or None
Returns a list containing indexable arrays (i.e. NumPy array,
sparse matrix, or dataframe) or `None`.
Examples
--------
>>> from sklearn.utils import indexable
>>> from scipy.sparse import csr_matrix
>>> import numpy as np
>>> iterables = [
... [1, 2, 3], np.array([2, 3, 4]), None, csr_matrix([[5], [6], [7]])
... ]
>>> indexable(*iterables)
[[1, 2, 3], array([2, 3, 4]), None, <...Sparse...dtype 'int64'...shape (3, 1)>]
|
indexable
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/validation.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/validation.py
|
BSD-3-Clause
|
def _ensure_sparse_format(
sparse_container,
accept_sparse,
dtype,
copy,
ensure_all_finite,
accept_large_sparse,
estimator_name=None,
input_name="",
):
"""Convert a sparse container to a given format.
Checks the sparse format of `sparse_container` and converts if necessary.
Parameters
----------
sparse_container : sparse matrix or array
Input to validate and convert.
accept_sparse : str, bool or list/tuple of str
String[s] representing allowed sparse matrix formats ('csc',
'csr', 'coo', 'dok', 'bsr', 'lil', 'dia'). If the input is sparse but
not in the allowed format, it will be converted to the first listed
format. True allows the input to be any format. False means
that a sparse matrix input will raise an error.
dtype : str, type or None
Data type of result. If None, the dtype of the input is preserved.
copy : bool
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
ensure_all_finite : bool or 'allow-nan'
Whether to raise an error on np.inf, np.nan, pd.NA in X. The
possibilities are:
- True: Force all values of X to be finite.
- False: accepts np.inf, np.nan, pd.NA in X.
- 'allow-nan': accepts only np.nan and pd.NA values in X. Values cannot
be infinite.
.. versionadded:: 0.20
``ensure_all_finite`` accepts the string ``'allow-nan'``.
.. versionchanged:: 0.23
Accepts `pd.NA` and converts it into `np.nan`
estimator_name : str, default=None
The estimator name, used to construct the error message.
input_name : str, default=""
The data name used to construct the error message. In particular
if `input_name` is "X" and the data has NaN values and
allow_nan is False, the error message will link to the imputer
documentation.
Returns
-------
sparse_container_converted : sparse matrix or array
Sparse container (matrix/array) that is ensured to have an allowed type.
"""
if dtype is None:
dtype = sparse_container.dtype
changed_format = False
sparse_container_type_name = type(sparse_container).__name__
if isinstance(accept_sparse, str):
accept_sparse = [accept_sparse]
# Indices dtype validation
_check_large_sparse(sparse_container, accept_large_sparse)
if accept_sparse is False:
padded_input = " for " + input_name if input_name else ""
raise TypeError(
f"Sparse data was passed{padded_input}, but dense data is required. "
"Use '.toarray()' to convert to a dense numpy array."
)
elif isinstance(accept_sparse, (list, tuple)):
if len(accept_sparse) == 0:
raise ValueError(
"When providing 'accept_sparse' as a tuple or list, it must contain at "
"least one string value."
)
# ensure correct sparse format
if sparse_container.format not in accept_sparse:
# create new with correct sparse
sparse_container = sparse_container.asformat(accept_sparse[0])
changed_format = True
elif accept_sparse is not True:
# any other type
raise ValueError(
"Parameter 'accept_sparse' should be a string, boolean or list of strings."
f" You provided 'accept_sparse={accept_sparse}'."
)
if dtype != sparse_container.dtype:
# convert dtype
sparse_container = sparse_container.astype(dtype)
elif copy and not changed_format:
# force copy
sparse_container = sparse_container.copy()
if ensure_all_finite:
if not hasattr(sparse_container, "data"):
warnings.warn(
f"Can't check {sparse_container.format} sparse matrix for nan or inf.",
stacklevel=2,
)
else:
_assert_all_finite(
sparse_container.data,
allow_nan=ensure_all_finite == "allow-nan",
estimator_name=estimator_name,
input_name=input_name,
)
# TODO: Remove when the minimum version of SciPy supported is 1.12
# With SciPy sparse arrays, conversion from DIA format to COO, CSR, or BSR
# triggers the use of `np.int64` indices even if the data is such that it could
# be more efficiently represented with `np.int32` indices.
# https://github.com/scipy/scipy/issues/19245 Since not all scikit-learn
# algorithms support large indices, the following code downcasts to `np.int32`
# indices when it's safe to do so.
if changed_format:
# accept_sparse is specified to a specific format and a conversion occurred
requested_sparse_format = accept_sparse[0]
_preserve_dia_indices_dtype(
sparse_container, sparse_container_type_name, requested_sparse_format
)
return sparse_container
|
Convert a sparse container to a given format.
Checks the sparse format of `sparse_container` and converts if necessary.
Parameters
----------
sparse_container : sparse matrix or array
Input to validate and convert.
accept_sparse : str, bool or list/tuple of str
String[s] representing allowed sparse matrix formats ('csc',
'csr', 'coo', 'dok', 'bsr', 'lil', 'dia'). If the input is sparse but
not in the allowed format, it will be converted to the first listed
format. True allows the input to be any format. False means
that a sparse matrix input will raise an error.
dtype : str, type or None
Data type of result. If None, the dtype of the input is preserved.
copy : bool
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
ensure_all_finite : bool or 'allow-nan'
Whether to raise an error on np.inf, np.nan, pd.NA in X. The
possibilities are:
- True: Force all values of X to be finite.
- False: accepts np.inf, np.nan, pd.NA in X.
- 'allow-nan': accepts only np.nan and pd.NA values in X. Values cannot
be infinite.
.. versionadded:: 0.20
``ensure_all_finite`` accepts the string ``'allow-nan'``.
.. versionchanged:: 0.23
Accepts `pd.NA` and converts it into `np.nan`
estimator_name : str, default=None
The estimator name, used to construct the error message.
input_name : str, default=""
The data name used to construct the error message. In particular
if `input_name` is "X" and the data has NaN values and
allow_nan is False, the error message will link to the imputer
documentation.
Returns
-------
sparse_container_converted : sparse matrix or array
Sparse container (matrix/array) that is ensured to have an allowed type.
|
_ensure_sparse_format
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/validation.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/validation.py
|
BSD-3-Clause
|
def _pandas_dtype_needs_early_conversion(pd_dtype):
"""Return True if pandas extension pd_dtype need to be converted early."""
# Check these early for pandas versions without extension dtypes
from pandas import SparseDtype
from pandas.api.types import (
is_bool_dtype,
is_float_dtype,
is_integer_dtype,
)
if is_bool_dtype(pd_dtype):
# bool and extension booleans need early conversion because __array__
# converts mixed dtype dataframes into object dtypes
return True
if isinstance(pd_dtype, SparseDtype):
# Sparse arrays will be converted later in `check_array`
return False
try:
from pandas.api.types import is_extension_array_dtype
except ImportError:
return False
if isinstance(pd_dtype, SparseDtype) or not is_extension_array_dtype(pd_dtype):
# Sparse arrays will be converted later in `check_array`
# Only handle extension arrays for integer and floats
return False
elif is_float_dtype(pd_dtype):
# Float ndarrays can normally support nans. They need to be converted
# first to map pd.NA to np.nan
return True
elif is_integer_dtype(pd_dtype):
# XXX: Warn when converting from a high integer to a float
return True
return False
|
Return True if pandas extension pd_dtype need to be converted early.
|
_pandas_dtype_needs_early_conversion
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/validation.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/validation.py
|
BSD-3-Clause
|
def check_array(
array,
accept_sparse=False,
*,
accept_large_sparse=True,
dtype="numeric",
order=None,
copy=False,
force_writeable=False,
force_all_finite="deprecated",
ensure_all_finite=None,
ensure_non_negative=False,
ensure_2d=True,
allow_nd=False,
ensure_min_samples=1,
ensure_min_features=1,
estimator=None,
input_name="",
):
"""Input validation on an array, list, sparse matrix or similar.
By default, the input is checked to be a non-empty 2D array containing
only finite values. If the dtype of the array is object, attempt
converting to float, raising on failure.
Parameters
----------
array : object
Input object to check / convert.
accept_sparse : str, bool or list/tuple of str, default=False
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. If the input is sparse but not in the allowed format,
it will be converted to the first listed format. True allows the input
to be any format. False means that a sparse matrix input will
raise an error.
accept_large_sparse : bool, default=True
If a CSR, CSC, COO or BSR sparse matrix is supplied and accepted by
accept_sparse, accept_large_sparse=False will cause it to be accepted
only if its indices are stored with a 32-bit dtype.
.. versionadded:: 0.20
dtype : 'numeric', type, list of type or None, default='numeric'
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : {'F', 'C'} or None, default=None
Whether an array will be forced to be fortran or c-style.
When order is None (default), then if copy=False, nothing is ensured
about the memory layout of the output array; otherwise (copy=True)
the memory layout of the returned array is kept as close as possible
to the original array.
copy : bool, default=False
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_writeable : bool, default=False
Whether to force the output array to be writeable. If True, the returned array
is guaranteed to be writeable, which may require a copy. Otherwise the
writeability of the input array is preserved.
.. versionadded:: 1.6
force_all_finite : bool or 'allow-nan', default=True
Whether to raise an error on np.inf, np.nan, pd.NA in array. The
possibilities are:
- True: Force all values of array to be finite.
- False: accepts np.inf, np.nan, pd.NA in array.
- 'allow-nan': accepts only np.nan and pd.NA values in array. Values
cannot be infinite.
.. versionadded:: 0.20
``force_all_finite`` accepts the string ``'allow-nan'``.
.. versionchanged:: 0.23
Accepts `pd.NA` and converts it into `np.nan`
.. deprecated:: 1.6
`force_all_finite` was renamed to `ensure_all_finite` and will be removed
in 1.8.
ensure_all_finite : bool or 'allow-nan', default=True
Whether to raise an error on np.inf, np.nan, pd.NA in array. The
possibilities are:
- True: Force all values of array to be finite.
- False: accepts np.inf, np.nan, pd.NA in array.
- 'allow-nan': accepts only np.nan and pd.NA values in array. Values
cannot be infinite.
.. versionadded:: 1.6
`force_all_finite` was renamed to `ensure_all_finite`.
ensure_non_negative : bool, default=False
Make sure the array has only non-negative values. If True, an array that
contains negative values will raise a ValueError.
.. versionadded:: 1.6
ensure_2d : bool, default=True
Whether to raise a value error if array is not 2D.
allow_nd : bool, default=False
Whether to allow array.ndim > 2.
ensure_min_samples : int, default=1
Make sure that the array has a minimum number of samples in its first
axis (rows for a 2D array). Setting to 0 disables this check.
ensure_min_features : int, default=1
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when the input data has effectively 2
dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0
disables this check.
estimator : str or estimator instance, default=None
If passed, include the name of the estimator in warning messages.
input_name : str, default=""
The data name used to construct the error message. In particular
if `input_name` is "X" and the data has NaN values and
allow_nan is False, the error message will link to the imputer
documentation.
.. versionadded:: 1.1.0
Returns
-------
array_converted : object
The converted and validated array.
Examples
--------
>>> from sklearn.utils.validation import check_array
>>> X = [[1, 2, 3], [4, 5, 6]]
>>> X_checked = check_array(X)
>>> X_checked
array([[1, 2, 3], [4, 5, 6]])
"""
ensure_all_finite = _deprecate_force_all_finite(force_all_finite, ensure_all_finite)
if isinstance(array, np.matrix):
raise TypeError(
"np.matrix is not supported. Please convert to a numpy array with "
"np.asarray. For more information see: "
"https://numpy.org/doc/stable/reference/generated/numpy.matrix.html"
)
xp, is_array_api_compliant = get_namespace(array)
# store reference to original array to check if copy is needed when
# function returns
array_orig = array
# store whether originally we wanted numeric dtype
dtype_numeric = isinstance(dtype, str) and dtype == "numeric"
dtype_orig = getattr(array, "dtype", None)
if not is_array_api_compliant and not hasattr(dtype_orig, "kind"):
# not a data type (e.g. a column named dtype in a pandas DataFrame)
dtype_orig = None
# check if the object contains several dtypes (typically a pandas
# DataFrame), and store them. If not, store None.
dtypes_orig = None
pandas_requires_conversion = False
# track if we have a Series-like object to raise a better error message
type_if_series = None
if hasattr(array, "dtypes") and hasattr(array.dtypes, "__array__"):
# throw warning if columns are sparse. If all columns are sparse, then
# array.sparse exists and sparsity will be preserved (later).
with suppress(ImportError):
from pandas import SparseDtype
def is_sparse(dtype):
return isinstance(dtype, SparseDtype)
if not hasattr(array, "sparse") and array.dtypes.apply(is_sparse).any():
warnings.warn(
"pandas.DataFrame with sparse columns found."
"It will be converted to a dense numpy array."
)
dtypes_orig = list(array.dtypes)
pandas_requires_conversion = any(
_pandas_dtype_needs_early_conversion(i) for i in dtypes_orig
)
if all(isinstance(dtype_iter, np.dtype) for dtype_iter in dtypes_orig):
dtype_orig = np.result_type(*dtypes_orig)
elif pandas_requires_conversion and any(d == object for d in dtypes_orig):
# Force object if any of the dtypes is an object
dtype_orig = object
elif (_is_extension_array_dtype(array) or hasattr(array, "iloc")) and hasattr(
array, "dtype"
):
# array is a pandas series
type_if_series = type(array)
pandas_requires_conversion = _pandas_dtype_needs_early_conversion(array.dtype)
if isinstance(array.dtype, np.dtype):
dtype_orig = array.dtype
else:
# Set to None to let array.astype work out the best dtype
dtype_orig = None
if dtype_numeric:
if (
dtype_orig is not None
and hasattr(dtype_orig, "kind")
and dtype_orig.kind == "O"
):
# if input is object, convert to float.
dtype = xp.float64
else:
dtype = None
if isinstance(dtype, (list, tuple)):
if dtype_orig is not None and dtype_orig in dtype:
# no dtype conversion required
dtype = None
else:
# dtype conversion required. Let's select the first element of the
# list of accepted types.
dtype = dtype[0]
if pandas_requires_conversion:
# pandas dataframe requires conversion earlier to handle extension dtypes with
# nans
# Use the original dtype for conversion if dtype is None
new_dtype = dtype_orig if dtype is None else dtype
array = array.astype(new_dtype)
# Since we converted here, we do not need to convert again later
dtype = None
if ensure_all_finite not in (True, False, "allow-nan"):
raise ValueError(
"ensure_all_finite should be a bool or 'allow-nan'. Got "
f"{ensure_all_finite!r} instead."
)
if dtype is not None and _is_numpy_namespace(xp):
# convert to dtype object to conform to Array API to be use `xp.isdtype` later
dtype = np.dtype(dtype)
estimator_name = _check_estimator_name(estimator)
context = " by %s" % estimator_name if estimator is not None else ""
# When all dataframe columns are sparse, convert to a sparse array
if hasattr(array, "sparse") and array.ndim > 1:
with suppress(ImportError):
from pandas import SparseDtype
def is_sparse(dtype):
return isinstance(dtype, SparseDtype)
if array.dtypes.apply(is_sparse).all():
# DataFrame.sparse only supports `to_coo`
array = array.sparse.to_coo()
if array.dtype == np.dtype("object"):
unique_dtypes = set([dt.subtype.name for dt in array_orig.dtypes])
if len(unique_dtypes) > 1:
raise ValueError(
"Pandas DataFrame with mixed sparse extension arrays "
"generated a sparse matrix with object dtype which "
"can not be converted to a scipy sparse matrix."
"Sparse extension arrays should all have the same "
"numeric type."
)
if sp.issparse(array):
_ensure_no_complex_data(array)
array = _ensure_sparse_format(
array,
accept_sparse=accept_sparse,
dtype=dtype,
copy=copy,
ensure_all_finite=ensure_all_finite,
accept_large_sparse=accept_large_sparse,
estimator_name=estimator_name,
input_name=input_name,
)
if ensure_2d and array.ndim < 2:
raise ValueError(
f"Expected 2D input, got input with shape {array.shape}.\n"
"Reshape your data either using array.reshape(-1, 1) if "
"your data has a single feature or array.reshape(1, -1) "
"if it contains a single sample."
)
else:
# If np.array(..) gives ComplexWarning, then we convert the warning
# to an error. This is needed because specifying a non complex
# dtype to the function converts complex to real dtype,
# thereby passing the test made in the lines following the scope
# of warnings context manager.
with warnings.catch_warnings():
try:
warnings.simplefilter("error", ComplexWarning)
if dtype is not None and xp.isdtype(dtype, "integral"):
# Conversion float -> int should not contain NaN or
# inf (numpy#14412). We cannot use casting='safe' because
# then conversion float -> int would be disallowed.
array = _asarray_with_order(array, order=order, xp=xp)
if xp.isdtype(array.dtype, ("real floating", "complex floating")):
_assert_all_finite(
array,
allow_nan=False,
msg_dtype=dtype,
estimator_name=estimator_name,
input_name=input_name,
)
array = xp.astype(array, dtype, copy=False)
else:
array = _asarray_with_order(array, order=order, dtype=dtype, xp=xp)
except ComplexWarning as complex_warning:
raise ValueError(
"Complex data not supported\n{}\n".format(array)
) from complex_warning
# It is possible that the np.array(..) gave no warning. This happens
# when no dtype conversion happened, for example dtype = None. The
# result is that np.array(..) produces an array of complex dtype
# and we need to catch and raise exception for such cases.
_ensure_no_complex_data(array)
if ensure_2d:
# If input is scalar raise error
if array.ndim == 0:
raise ValueError(
"Expected 2D array, got scalar array instead:\narray={}.\n"
"Reshape your data either using array.reshape(-1, 1) if "
"your data has a single feature or array.reshape(1, -1) "
"if it contains a single sample.".format(array)
)
# If input is 1D raise error
if array.ndim == 1:
# If input is a Series-like object (eg. pandas Series or polars Series)
if type_if_series is not None:
msg = (
f"Expected a 2-dimensional container but got {type_if_series} "
"instead. Pass a DataFrame containing a single row (i.e. "
"single sample) or a single column (i.e. single feature) "
"instead."
)
else:
msg = (
f"Expected 2D array, got 1D array instead:\narray={array}.\n"
"Reshape your data either using array.reshape(-1, 1) if "
"your data has a single feature or array.reshape(1, -1) "
"if it contains a single sample."
)
raise ValueError(msg)
if dtype_numeric and hasattr(array.dtype, "kind") and array.dtype.kind in "USV":
raise ValueError(
"dtype='numeric' is not compatible with arrays of bytes/strings."
"Convert your data to numeric values explicitly instead."
)
if not allow_nd and array.ndim >= 3:
raise ValueError(
f"Found array with dim {array.ndim},"
f" while dim <= 2 is required{context}."
)
if ensure_all_finite:
_assert_all_finite(
array,
input_name=input_name,
estimator_name=estimator_name,
allow_nan=ensure_all_finite == "allow-nan",
)
if copy:
if _is_numpy_namespace(xp):
# only make a copy if `array` and `array_orig` may share memory`
if np.may_share_memory(array, array_orig):
array = _asarray_with_order(
array, dtype=dtype, order=order, copy=True, xp=xp
)
else:
# always make a copy for non-numpy arrays
array = _asarray_with_order(
array, dtype=dtype, order=order, copy=True, xp=xp
)
if ensure_min_samples > 0:
n_samples = _num_samples(array)
if n_samples < ensure_min_samples:
raise ValueError(
"Found array with %d sample(s) (shape=%s) while a"
" minimum of %d is required%s."
% (n_samples, array.shape, ensure_min_samples, context)
)
if ensure_min_features > 0 and array.ndim == 2:
n_features = array.shape[1]
if n_features < ensure_min_features:
raise ValueError(
"Found array with %d feature(s) (shape=%s) while"
" a minimum of %d is required%s."
% (n_features, array.shape, ensure_min_features, context)
)
if ensure_non_negative:
whom = input_name
if estimator_name:
whom += f" in {estimator_name}"
check_non_negative(array, whom)
if force_writeable:
# By default, array.copy() creates a C-ordered copy. We set order=K to
# preserve the order of the array.
copy_params = {"order": "K"} if not sp.issparse(array) else {}
array_data = array.data if sp.issparse(array) else array
flags = getattr(array_data, "flags", None)
if not getattr(flags, "writeable", True):
# This situation can only happen when copy=False, the array is read-only and
# a writeable output is requested. This is an ambiguous setting so we chose
# to always (except for one specific setting, see below) make a copy to
# ensure that the output is writeable, even if avoidable, to not overwrite
# the user's data by surprise.
if _is_pandas_df_or_series(array_orig):
try:
# In pandas >= 3, np.asarray(df), called earlier in check_array,
# returns a read-only intermediate array. It can be made writeable
# safely without copy because if the original DataFrame was backed
# by a read-only array, trying to change the flag would raise an
# error, in which case we make a copy.
array_data.flags.writeable = True
except ValueError:
array = array.copy(**copy_params)
else:
array = array.copy(**copy_params)
return array
|
Input validation on an array, list, sparse matrix or similar.
By default, the input is checked to be a non-empty 2D array containing
only finite values. If the dtype of the array is object, attempt
converting to float, raising on failure.
Parameters
----------
array : object
Input object to check / convert.
accept_sparse : str, bool or list/tuple of str, default=False
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. If the input is sparse but not in the allowed format,
it will be converted to the first listed format. True allows the input
to be any format. False means that a sparse matrix input will
raise an error.
accept_large_sparse : bool, default=True
If a CSR, CSC, COO or BSR sparse matrix is supplied and accepted by
accept_sparse, accept_large_sparse=False will cause it to be accepted
only if its indices are stored with a 32-bit dtype.
.. versionadded:: 0.20
dtype : 'numeric', type, list of type or None, default='numeric'
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : {'F', 'C'} or None, default=None
Whether an array will be forced to be fortran or c-style.
When order is None (default), then if copy=False, nothing is ensured
about the memory layout of the output array; otherwise (copy=True)
the memory layout of the returned array is kept as close as possible
to the original array.
copy : bool, default=False
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_writeable : bool, default=False
Whether to force the output array to be writeable. If True, the returned array
is guaranteed to be writeable, which may require a copy. Otherwise the
writeability of the input array is preserved.
.. versionadded:: 1.6
force_all_finite : bool or 'allow-nan', default=True
Whether to raise an error on np.inf, np.nan, pd.NA in array. The
possibilities are:
- True: Force all values of array to be finite.
- False: accepts np.inf, np.nan, pd.NA in array.
- 'allow-nan': accepts only np.nan and pd.NA values in array. Values
cannot be infinite.
.. versionadded:: 0.20
``force_all_finite`` accepts the string ``'allow-nan'``.
.. versionchanged:: 0.23
Accepts `pd.NA` and converts it into `np.nan`
.. deprecated:: 1.6
`force_all_finite` was renamed to `ensure_all_finite` and will be removed
in 1.8.
ensure_all_finite : bool or 'allow-nan', default=True
Whether to raise an error on np.inf, np.nan, pd.NA in array. The
possibilities are:
- True: Force all values of array to be finite.
- False: accepts np.inf, np.nan, pd.NA in array.
- 'allow-nan': accepts only np.nan and pd.NA values in array. Values
cannot be infinite.
.. versionadded:: 1.6
`force_all_finite` was renamed to `ensure_all_finite`.
ensure_non_negative : bool, default=False
Make sure the array has only non-negative values. If True, an array that
contains negative values will raise a ValueError.
.. versionadded:: 1.6
ensure_2d : bool, default=True
Whether to raise a value error if array is not 2D.
allow_nd : bool, default=False
Whether to allow array.ndim > 2.
ensure_min_samples : int, default=1
Make sure that the array has a minimum number of samples in its first
axis (rows for a 2D array). Setting to 0 disables this check.
ensure_min_features : int, default=1
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when the input data has effectively 2
dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0
disables this check.
estimator : str or estimator instance, default=None
If passed, include the name of the estimator in warning messages.
input_name : str, default=""
The data name used to construct the error message. In particular
if `input_name` is "X" and the data has NaN values and
allow_nan is False, the error message will link to the imputer
documentation.
.. versionadded:: 1.1.0
Returns
-------
array_converted : object
The converted and validated array.
Examples
--------
>>> from sklearn.utils.validation import check_array
>>> X = [[1, 2, 3], [4, 5, 6]]
>>> X_checked = check_array(X)
>>> X_checked
array([[1, 2, 3], [4, 5, 6]])
|
check_array
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/validation.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/validation.py
|
BSD-3-Clause
|
def _check_large_sparse(X, accept_large_sparse=False):
"""Raise a ValueError if X has 64bit indices and accept_large_sparse=False"""
if not accept_large_sparse:
supported_indices = ["int32"]
if X.format == "coo":
index_keys = ["col", "row"]
elif X.format in ["csr", "csc", "bsr"]:
index_keys = ["indices", "indptr"]
else:
return
for key in index_keys:
indices_datatype = getattr(X, key).dtype
if indices_datatype not in supported_indices:
raise ValueError(
"Only sparse matrices with 32-bit integer indices are accepted."
f" Got {indices_datatype} indices. Please do report a minimal"
" reproducer on scikit-learn issue tracker so that support for"
" your use-case can be studied by maintainers. See:"
" https://scikit-learn.org/dev/developers/minimal_reproducer.html"
)
|
Raise a ValueError if X has 64bit indices and accept_large_sparse=False
|
_check_large_sparse
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/validation.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/validation.py
|
BSD-3-Clause
|
def check_X_y(
X,
y,
accept_sparse=False,
*,
accept_large_sparse=True,
dtype="numeric",
order=None,
copy=False,
force_writeable=False,
force_all_finite="deprecated",
ensure_all_finite=None,
ensure_2d=True,
allow_nd=False,
multi_output=False,
ensure_min_samples=1,
ensure_min_features=1,
y_numeric=False,
estimator=None,
):
"""Input validation for standard estimators.
Checks X and y for consistent length, enforces X to be 2D and y 1D. By
default, X is checked to be non-empty and containing only finite values.
Standard input checks are also applied to y, such as checking that y
does not have np.nan or np.inf targets. For multi-label y, set
multi_output=True to allow 2D and sparse y. If the dtype of X is
object, attempt converting to float, raising on failure.
Parameters
----------
X : {ndarray, list, sparse matrix}
Input data.
y : {ndarray, list, sparse matrix}
Labels.
accept_sparse : str, bool or list of str, default=False
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. If the input is sparse but not in the allowed format,
it will be converted to the first listed format. True allows the input
to be any format. False means that a sparse matrix input will
raise an error.
accept_large_sparse : bool, default=True
If a CSR, CSC, COO or BSR sparse matrix is supplied and accepted by
accept_sparse, accept_large_sparse will cause it to be accepted only
if its indices are stored with a 32-bit dtype.
.. versionadded:: 0.20
dtype : 'numeric', type, list of type or None, default='numeric'
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : {'F', 'C'}, default=None
Whether an array will be forced to be fortran or c-style. If
`None`, then the input data's order is preserved when possible.
copy : bool, default=False
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_writeable : bool, default=False
Whether to force the output array to be writeable. If True, the returned array
is guaranteed to be writeable, which may require a copy. Otherwise the
writeability of the input array is preserved.
.. versionadded:: 1.6
force_all_finite : bool or 'allow-nan', default=True
Whether to raise an error on np.inf, np.nan, pd.NA in array. This parameter
does not influence whether y can have np.inf, np.nan, pd.NA values.
The possibilities are:
- True: Force all values of X to be finite.
- False: accepts np.inf, np.nan, pd.NA in X.
- 'allow-nan': accepts only np.nan or pd.NA values in X. Values cannot
be infinite.
.. versionadded:: 0.20
``force_all_finite`` accepts the string ``'allow-nan'``.
.. versionchanged:: 0.23
Accepts `pd.NA` and converts it into `np.nan`
.. deprecated:: 1.6
`force_all_finite` was renamed to `ensure_all_finite` and will be removed
in 1.8.
ensure_all_finite : bool or 'allow-nan', default=True
Whether to raise an error on np.inf, np.nan, pd.NA in array. This parameter
does not influence whether y can have np.inf, np.nan, pd.NA values.
The possibilities are:
- True: Force all values of X to be finite.
- False: accepts np.inf, np.nan, pd.NA in X.
- 'allow-nan': accepts only np.nan or pd.NA values in X. Values cannot
be infinite.
.. versionadded:: 1.6
`force_all_finite` was renamed to `ensure_all_finite`.
ensure_2d : bool, default=True
Whether to raise a value error if X is not 2D.
allow_nd : bool, default=False
Whether to allow X.ndim > 2.
multi_output : bool, default=False
Whether to allow 2D y (array or sparse matrix). If false, y will be
validated as a vector. y cannot have np.nan or np.inf values if
multi_output=True.
ensure_min_samples : int, default=1
Make sure that X has a minimum number of samples in its first
axis (rows for a 2D array).
ensure_min_features : int, default=1
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when X has effectively 2 dimensions or
is originally 1D and ``ensure_2d`` is True. Setting to 0 disables
this check.
y_numeric : bool, default=False
Whether to ensure that y has a numeric type. If dtype of y is object,
it is converted to float64. Should only be used for regression
algorithms.
estimator : str or estimator instance, default=None
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
y_converted : object
The converted and validated y.
Examples
--------
>>> from sklearn.utils.validation import check_X_y
>>> X = [[1, 2], [3, 4], [5, 6]]
>>> y = [1, 2, 3]
>>> X, y = check_X_y(X, y)
>>> X
array([[1, 2],
[3, 4],
[5, 6]])
>>> y
array([1, 2, 3])
"""
if y is None:
if estimator is None:
estimator_name = "estimator"
else:
estimator_name = _check_estimator_name(estimator)
raise ValueError(
f"{estimator_name} requires y to be passed, but the target y is None"
)
ensure_all_finite = _deprecate_force_all_finite(force_all_finite, ensure_all_finite)
X = check_array(
X,
accept_sparse=accept_sparse,
accept_large_sparse=accept_large_sparse,
dtype=dtype,
order=order,
copy=copy,
force_writeable=force_writeable,
ensure_all_finite=ensure_all_finite,
ensure_2d=ensure_2d,
allow_nd=allow_nd,
ensure_min_samples=ensure_min_samples,
ensure_min_features=ensure_min_features,
estimator=estimator,
input_name="X",
)
y = _check_y(y, multi_output=multi_output, y_numeric=y_numeric, estimator=estimator)
check_consistent_length(X, y)
return X, y
|
Input validation for standard estimators.
Checks X and y for consistent length, enforces X to be 2D and y 1D. By
default, X is checked to be non-empty and containing only finite values.
Standard input checks are also applied to y, such as checking that y
does not have np.nan or np.inf targets. For multi-label y, set
multi_output=True to allow 2D and sparse y. If the dtype of X is
object, attempt converting to float, raising on failure.
Parameters
----------
X : {ndarray, list, sparse matrix}
Input data.
y : {ndarray, list, sparse matrix}
Labels.
accept_sparse : str, bool or list of str, default=False
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. If the input is sparse but not in the allowed format,
it will be converted to the first listed format. True allows the input
to be any format. False means that a sparse matrix input will
raise an error.
accept_large_sparse : bool, default=True
If a CSR, CSC, COO or BSR sparse matrix is supplied and accepted by
accept_sparse, accept_large_sparse will cause it to be accepted only
if its indices are stored with a 32-bit dtype.
.. versionadded:: 0.20
dtype : 'numeric', type, list of type or None, default='numeric'
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : {'F', 'C'}, default=None
Whether an array will be forced to be fortran or c-style. If
`None`, then the input data's order is preserved when possible.
copy : bool, default=False
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_writeable : bool, default=False
Whether to force the output array to be writeable. If True, the returned array
is guaranteed to be writeable, which may require a copy. Otherwise the
writeability of the input array is preserved.
.. versionadded:: 1.6
force_all_finite : bool or 'allow-nan', default=True
Whether to raise an error on np.inf, np.nan, pd.NA in array. This parameter
does not influence whether y can have np.inf, np.nan, pd.NA values.
The possibilities are:
- True: Force all values of X to be finite.
- False: accepts np.inf, np.nan, pd.NA in X.
- 'allow-nan': accepts only np.nan or pd.NA values in X. Values cannot
be infinite.
.. versionadded:: 0.20
``force_all_finite`` accepts the string ``'allow-nan'``.
.. versionchanged:: 0.23
Accepts `pd.NA` and converts it into `np.nan`
.. deprecated:: 1.6
`force_all_finite` was renamed to `ensure_all_finite` and will be removed
in 1.8.
ensure_all_finite : bool or 'allow-nan', default=True
Whether to raise an error on np.inf, np.nan, pd.NA in array. This parameter
does not influence whether y can have np.inf, np.nan, pd.NA values.
The possibilities are:
- True: Force all values of X to be finite.
- False: accepts np.inf, np.nan, pd.NA in X.
- 'allow-nan': accepts only np.nan or pd.NA values in X. Values cannot
be infinite.
.. versionadded:: 1.6
`force_all_finite` was renamed to `ensure_all_finite`.
ensure_2d : bool, default=True
Whether to raise a value error if X is not 2D.
allow_nd : bool, default=False
Whether to allow X.ndim > 2.
multi_output : bool, default=False
Whether to allow 2D y (array or sparse matrix). If false, y will be
validated as a vector. y cannot have np.nan or np.inf values if
multi_output=True.
ensure_min_samples : int, default=1
Make sure that X has a minimum number of samples in its first
axis (rows for a 2D array).
ensure_min_features : int, default=1
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when X has effectively 2 dimensions or
is originally 1D and ``ensure_2d`` is True. Setting to 0 disables
this check.
y_numeric : bool, default=False
Whether to ensure that y has a numeric type. If dtype of y is object,
it is converted to float64. Should only be used for regression
algorithms.
estimator : str or estimator instance, default=None
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
y_converted : object
The converted and validated y.
Examples
--------
>>> from sklearn.utils.validation import check_X_y
>>> X = [[1, 2], [3, 4], [5, 6]]
>>> y = [1, 2, 3]
>>> X, y = check_X_y(X, y)
>>> X
array([[1, 2],
[3, 4],
[5, 6]])
>>> y
array([1, 2, 3])
|
check_X_y
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/validation.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/validation.py
|
BSD-3-Clause
|
def _check_y(y, multi_output=False, y_numeric=False, estimator=None):
"""Isolated part of check_X_y dedicated to y validation"""
if multi_output:
y = check_array(
y,
accept_sparse="csr",
ensure_all_finite=True,
ensure_2d=False,
dtype=None,
input_name="y",
estimator=estimator,
)
else:
estimator_name = _check_estimator_name(estimator)
y = column_or_1d(y, warn=True)
_assert_all_finite(y, input_name="y", estimator_name=estimator_name)
_ensure_no_complex_data(y)
if y_numeric and hasattr(y.dtype, "kind") and y.dtype.kind == "O":
y = y.astype(np.float64)
return y
|
Isolated part of check_X_y dedicated to y validation
|
_check_y
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/validation.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/validation.py
|
BSD-3-Clause
|
def column_or_1d(y, *, dtype=None, warn=False, device=None):
"""Ravel column or 1d numpy array, else raises an error.
Parameters
----------
y : array-like
Input data.
dtype : data-type, default=None
Data type for `y`.
.. versionadded:: 1.2
warn : bool, default=False
To control display of warnings.
device : device, default=None
`device` object.
See the :ref:`Array API User Guide <array_api>` for more details.
.. versionadded:: 1.6
Returns
-------
y : ndarray
Output data.
Raises
------
ValueError
If `y` is not a 1D array or a 2D array with a single row or column.
Examples
--------
>>> from sklearn.utils.validation import column_or_1d
>>> column_or_1d([1, 1])
array([1, 1])
"""
xp, _ = get_namespace(y)
y = check_array(
y,
ensure_2d=False,
dtype=dtype,
input_name="y",
ensure_all_finite=False,
ensure_min_samples=0,
)
shape = y.shape
if len(shape) == 1:
return _asarray_with_order(
xp.reshape(y, (-1,)), order="C", xp=xp, device=device
)
if len(shape) == 2 and shape[1] == 1:
if warn:
warnings.warn(
(
"A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel()."
),
DataConversionWarning,
stacklevel=2,
)
return _asarray_with_order(
xp.reshape(y, (-1,)), order="C", xp=xp, device=device
)
raise ValueError(
"y should be a 1d array, got an array of shape {} instead.".format(shape)
)
|
Ravel column or 1d numpy array, else raises an error.
Parameters
----------
y : array-like
Input data.
dtype : data-type, default=None
Data type for `y`.
.. versionadded:: 1.2
warn : bool, default=False
To control display of warnings.
device : device, default=None
`device` object.
See the :ref:`Array API User Guide <array_api>` for more details.
.. versionadded:: 1.6
Returns
-------
y : ndarray
Output data.
Raises
------
ValueError
If `y` is not a 1D array or a 2D array with a single row or column.
Examples
--------
>>> from sklearn.utils.validation import column_or_1d
>>> column_or_1d([1, 1])
array([1, 1])
|
column_or_1d
|
python
|
scikit-learn/scikit-learn
|
sklearn/utils/validation.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/validation.py
|
BSD-3-Clause
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.