code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
---|---|---|---|---|---|---|---|
def set_score_request(self, **kwargs):
"""Set requested parameters by the scorer.
Please see :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
.. versionadded:: 1.5
Parameters
----------
kwargs : dict
Arguments should be of the form ``param_name=alias``, and `alias`
can be one of ``{True, False, None, str}``.
"""
if not _routing_enabled():
raise RuntimeError(
"This method is only available when metadata routing is enabled."
" You can enable it using"
" sklearn.set_config(enable_metadata_routing=True)."
)
for param, alias in kwargs.items():
self._metadata_request.score.add_request(param=param, alias=alias)
return self
|
Set requested parameters by the scorer.
Please see :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
.. versionadded:: 1.5
Parameters
----------
kwargs : dict
Arguments should be of the form ``param_name=alias``, and `alias`
can be one of ``{True, False, None, str}``.
|
set_score_request
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/_scorer.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/_scorer.py
|
BSD-3-Clause
|
def _check_multimetric_scoring(estimator, scoring):
"""Check the scoring parameter in cases when multiple metrics are allowed.
In addition, multimetric scoring leverages a caching mechanism to not call the same
estimator response method multiple times. Hence, the scorer is modified to only use
a single response method given a list of response methods and the estimator.
Parameters
----------
estimator : sklearn estimator instance
The estimator for which the scoring will be applied.
scoring : list, tuple or dict
Strategy to evaluate the performance of the cross-validated model on
the test set.
The possibilities are:
- a list or tuple of unique strings;
- a callable returning a dictionary where they keys are the metric
names and the values are the metric scores;
- a dictionary with metric names as keys and callables a values.
See :ref:`multimetric_grid_search` for an example.
Returns
-------
scorers_dict : dict
A dict mapping each scorer name to its validated scorer.
"""
err_msg_generic = (
f"scoring is invalid (got {scoring!r}). Refer to the "
"scoring glossary for details: "
"https://scikit-learn.org/stable/glossary.html#term-scoring"
)
if isinstance(scoring, (list, tuple, set)):
err_msg = (
"The list/tuple elements must be unique strings of predefined scorers. "
)
try:
keys = set(scoring)
except TypeError as e:
raise ValueError(err_msg) from e
if len(keys) != len(scoring):
raise ValueError(
f"{err_msg} Duplicate elements were found in"
f" the given list. {scoring!r}"
)
elif len(keys) > 0:
if not all(isinstance(k, str) for k in keys):
if any(callable(k) for k in keys):
raise ValueError(
f"{err_msg} One or more of the elements "
"were callables. Use a dict of score "
"name mapped to the scorer callable. "
f"Got {scoring!r}"
)
else:
raise ValueError(
f"{err_msg} Non-string types were found "
f"in the given list. Got {scoring!r}"
)
scorers = {
scorer: check_scoring(estimator, scoring=scorer) for scorer in scoring
}
else:
raise ValueError(f"{err_msg} Empty list was given. {scoring!r}")
elif isinstance(scoring, dict):
keys = set(scoring)
if not all(isinstance(k, str) for k in keys):
raise ValueError(
"Non-string types were found in the keys of "
f"the given dict. scoring={scoring!r}"
)
if len(keys) == 0:
raise ValueError(f"An empty dict was passed. {scoring!r}")
scorers = {
key: check_scoring(estimator, scoring=scorer)
for key, scorer in scoring.items()
}
else:
raise ValueError(err_msg_generic)
return scorers
|
Check the scoring parameter in cases when multiple metrics are allowed.
In addition, multimetric scoring leverages a caching mechanism to not call the same
estimator response method multiple times. Hence, the scorer is modified to only use
a single response method given a list of response methods and the estimator.
Parameters
----------
estimator : sklearn estimator instance
The estimator for which the scoring will be applied.
scoring : list, tuple or dict
Strategy to evaluate the performance of the cross-validated model on
the test set.
The possibilities are:
- a list or tuple of unique strings;
- a callable returning a dictionary where they keys are the metric
names and the values are the metric scores;
- a dictionary with metric names as keys and callables a values.
See :ref:`multimetric_grid_search` for an example.
Returns
-------
scorers_dict : dict
A dict mapping each scorer name to its validated scorer.
|
_check_multimetric_scoring
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/_scorer.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/_scorer.py
|
BSD-3-Clause
|
def make_scorer(
score_func, *, response_method="default", greater_is_better=True, **kwargs
):
"""Make a scorer from a performance metric or loss function.
A scorer is a wrapper around an arbitrary metric or loss function that is called
with the signature `scorer(estimator, X, y_true, **kwargs)`.
It is accepted in all scikit-learn estimators or functions allowing a `scoring`
parameter.
The parameter `response_method` allows to specify which method of the estimator
should be used to feed the scoring/loss function.
Read more in the :ref:`User Guide <scoring_callable>`.
Parameters
----------
score_func : callable
Score function (or loss function) with signature
``score_func(y, y_pred, **kwargs)``.
response_method : {"predict_proba", "decision_function", "predict"} or \
list/tuple of such str, default=None
Specifies the response method to use get prediction from an estimator
(i.e. :term:`predict_proba`, :term:`decision_function` or
:term:`predict`). Possible choices are:
- if `str`, it corresponds to the name to the method to return;
- if a list or tuple of `str`, it provides the method names in order of
preference. The method returned corresponds to the first method in
the list and which is implemented by `estimator`.
- if `None`, it is equivalent to `"predict"`.
.. versionadded:: 1.4
.. deprecated:: 1.6
None is equivalent to 'predict' and is deprecated. It will be removed in
version 1.8.
greater_is_better : bool, default=True
Whether `score_func` is a score function (default), meaning high is
good, or a loss function, meaning low is good. In the latter case, the
scorer object will sign-flip the outcome of the `score_func`.
**kwargs : additional arguments
Additional parameters to be passed to `score_func`.
Returns
-------
scorer : callable
Callable object that returns a scalar score; greater is better.
Examples
--------
>>> from sklearn.metrics import fbeta_score, make_scorer
>>> ftwo_scorer = make_scorer(fbeta_score, beta=2)
>>> ftwo_scorer
make_scorer(fbeta_score, response_method='predict', beta=2)
>>> from sklearn.model_selection import GridSearchCV
>>> from sklearn.svm import LinearSVC
>>> grid = GridSearchCV(LinearSVC(), param_grid={'C': [1, 10]},
... scoring=ftwo_scorer)
"""
sign = 1 if greater_is_better else -1
if response_method is None:
warnings.warn(
"response_method=None is deprecated in version 1.6 and will be removed "
"in version 1.8. Leave it to its default value to avoid this warning.",
FutureWarning,
)
response_method = "predict"
elif response_method == "default":
response_method = "predict"
return _Scorer(score_func, sign, kwargs, response_method)
|
Make a scorer from a performance metric or loss function.
A scorer is a wrapper around an arbitrary metric or loss function that is called
with the signature `scorer(estimator, X, y_true, **kwargs)`.
It is accepted in all scikit-learn estimators or functions allowing a `scoring`
parameter.
The parameter `response_method` allows to specify which method of the estimator
should be used to feed the scoring/loss function.
Read more in the :ref:`User Guide <scoring_callable>`.
Parameters
----------
score_func : callable
Score function (or loss function) with signature
``score_func(y, y_pred, **kwargs)``.
response_method : {"predict_proba", "decision_function", "predict"} or list/tuple of such str, default=None
Specifies the response method to use get prediction from an estimator
(i.e. :term:`predict_proba`, :term:`decision_function` or
:term:`predict`). Possible choices are:
- if `str`, it corresponds to the name to the method to return;
- if a list or tuple of `str`, it provides the method names in order of
preference. The method returned corresponds to the first method in
the list and which is implemented by `estimator`.
- if `None`, it is equivalent to `"predict"`.
.. versionadded:: 1.4
.. deprecated:: 1.6
None is equivalent to 'predict' and is deprecated. It will be removed in
version 1.8.
greater_is_better : bool, default=True
Whether `score_func` is a score function (default), meaning high is
good, or a loss function, meaning low is good. In the latter case, the
scorer object will sign-flip the outcome of the `score_func`.
**kwargs : additional arguments
Additional parameters to be passed to `score_func`.
Returns
-------
scorer : callable
Callable object that returns a scalar score; greater is better.
Examples
--------
>>> from sklearn.metrics import fbeta_score, make_scorer
>>> ftwo_scorer = make_scorer(fbeta_score, beta=2)
>>> ftwo_scorer
make_scorer(fbeta_score, response_method='predict', beta=2)
>>> from sklearn.model_selection import GridSearchCV
>>> from sklearn.svm import LinearSVC
>>> grid = GridSearchCV(LinearSVC(), param_grid={'C': [1, 10]},
... scoring=ftwo_scorer)
|
make_scorer
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/_scorer.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/_scorer.py
|
BSD-3-Clause
|
def check_scoring(estimator=None, scoring=None, *, allow_none=False, raise_exc=True):
"""Determine scorer from user options.
A TypeError will be thrown if the estimator cannot be scored.
Parameters
----------
estimator : estimator object implementing 'fit' or None, default=None
The object to use to fit the data. If `None`, then this function may error
depending on `allow_none`.
scoring : str, callable, list, tuple, set, or dict, default=None
Scorer to use. If `scoring` represents a single score, one can use:
- a single string (see :ref:`scoring_string_names`);
- a callable (see :ref:`scoring_callable`) that returns a single value;
- `None`, the `estimator`'s
:ref:`default evaluation criterion <scoring_api_overview>` is used.
If `scoring` represents multiple scores, one can use:
- a list, tuple or set of unique strings;
- a callable returning a dictionary where the keys are the metric names and the
values are the metric scorers;
- a dictionary with metric names as keys and callables a values. The callables
need to have the signature `callable(estimator, X, y)`.
allow_none : bool, default=False
Whether to return None or raise an error if no `scoring` is specified and the
estimator has no `score` method.
raise_exc : bool, default=True
Whether to raise an exception (if a subset of the scorers in multimetric scoring
fails) or to return an error code.
- If set to `True`, raises the failing scorer's exception.
- If set to `False`, a formatted string of the exception details is passed as
result of the failing scorer(s).
This applies if `scoring` is list, tuple, set, or dict. Ignored if `scoring` is
a str or a callable.
.. versionadded:: 1.6
Returns
-------
scoring : callable
A scorer callable object / function with signature ``scorer(estimator, X, y)``.
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.metrics import check_scoring
>>> from sklearn.tree import DecisionTreeClassifier
>>> X, y = load_iris(return_X_y=True)
>>> classifier = DecisionTreeClassifier(max_depth=2).fit(X, y)
>>> scorer = check_scoring(classifier, scoring='accuracy')
>>> scorer(classifier, X, y)
0.96...
>>> from sklearn.metrics import make_scorer, accuracy_score, mean_squared_log_error
>>> X, y = load_iris(return_X_y=True)
>>> y *= -1
>>> clf = DecisionTreeClassifier().fit(X, y)
>>> scoring = {
... "accuracy": make_scorer(accuracy_score),
... "mean_squared_log_error": make_scorer(mean_squared_log_error),
... }
>>> scoring_call = check_scoring(estimator=clf, scoring=scoring, raise_exc=False)
>>> scores = scoring_call(clf, X, y)
>>> scores
{'accuracy': 1.0, 'mean_squared_log_error': 'Traceback ...'}
"""
if isinstance(scoring, str):
return get_scorer(scoring)
if callable(scoring):
# Heuristic to ensure user has not passed a metric
module = getattr(scoring, "__module__", None)
if (
hasattr(module, "startswith")
and module.startswith("sklearn.metrics.")
and not module.startswith("sklearn.metrics._scorer")
and not module.startswith("sklearn.metrics.tests.")
):
raise ValueError(
"scoring value %r looks like it is a metric "
"function rather than a scorer. A scorer should "
"require an estimator as its first parameter. "
"Please use `make_scorer` to convert a metric "
"to a scorer." % scoring
)
return get_scorer(scoring)
if isinstance(scoring, (list, tuple, set, dict)):
scorers = _check_multimetric_scoring(estimator, scoring=scoring)
return _MultimetricScorer(scorers=scorers, raise_exc=raise_exc)
if scoring is None:
if hasattr(estimator, "score"):
return _PassthroughScorer(estimator)
elif allow_none:
return None
else:
raise TypeError(
"If no scoring is specified, the estimator passed should "
"have a 'score' method. The estimator %r does not." % estimator
)
|
Determine scorer from user options.
A TypeError will be thrown if the estimator cannot be scored.
Parameters
----------
estimator : estimator object implementing 'fit' or None, default=None
The object to use to fit the data. If `None`, then this function may error
depending on `allow_none`.
scoring : str, callable, list, tuple, set, or dict, default=None
Scorer to use. If `scoring` represents a single score, one can use:
- a single string (see :ref:`scoring_string_names`);
- a callable (see :ref:`scoring_callable`) that returns a single value;
- `None`, the `estimator`'s
:ref:`default evaluation criterion <scoring_api_overview>` is used.
If `scoring` represents multiple scores, one can use:
- a list, tuple or set of unique strings;
- a callable returning a dictionary where the keys are the metric names and the
values are the metric scorers;
- a dictionary with metric names as keys and callables a values. The callables
need to have the signature `callable(estimator, X, y)`.
allow_none : bool, default=False
Whether to return None or raise an error if no `scoring` is specified and the
estimator has no `score` method.
raise_exc : bool, default=True
Whether to raise an exception (if a subset of the scorers in multimetric scoring
fails) or to return an error code.
- If set to `True`, raises the failing scorer's exception.
- If set to `False`, a formatted string of the exception details is passed as
result of the failing scorer(s).
This applies if `scoring` is list, tuple, set, or dict. Ignored if `scoring` is
a str or a callable.
.. versionadded:: 1.6
Returns
-------
scoring : callable
A scorer callable object / function with signature ``scorer(estimator, X, y)``.
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.metrics import check_scoring
>>> from sklearn.tree import DecisionTreeClassifier
>>> X, y = load_iris(return_X_y=True)
>>> classifier = DecisionTreeClassifier(max_depth=2).fit(X, y)
>>> scorer = check_scoring(classifier, scoring='accuracy')
>>> scorer(classifier, X, y)
0.96...
>>> from sklearn.metrics import make_scorer, accuracy_score, mean_squared_log_error
>>> X, y = load_iris(return_X_y=True)
>>> y *= -1
>>> clf = DecisionTreeClassifier().fit(X, y)
>>> scoring = {
... "accuracy": make_scorer(accuracy_score),
... "mean_squared_log_error": make_scorer(mean_squared_log_error),
... }
>>> scoring_call = check_scoring(estimator=clf, scoring=scoring, raise_exc=False)
>>> scores = scoring_call(clf, X, y)
>>> scores
{'accuracy': 1.0, 'mean_squared_log_error': 'Traceback ...'}
|
check_scoring
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/_scorer.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/_scorer.py
|
BSD-3-Clause
|
def _threshold_scores_to_class_labels(y_score, threshold, classes, pos_label):
"""Threshold `y_score` and return the associated class labels."""
if pos_label is None:
map_thresholded_score_to_label = np.array([0, 1])
else:
pos_label_idx = np.flatnonzero(classes == pos_label)[0]
neg_label_idx = np.flatnonzero(classes != pos_label)[0]
map_thresholded_score_to_label = np.array([neg_label_idx, pos_label_idx])
return classes[map_thresholded_score_to_label[(y_score >= threshold).astype(int)]]
|
Threshold `y_score` and return the associated class labels.
|
_threshold_scores_to_class_labels
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/_scorer.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/_scorer.py
|
BSD-3-Clause
|
def from_scorer(cls, scorer, response_method, thresholds):
"""Create a continuous scorer from a normal scorer."""
instance = cls(
score_func=scorer._score_func,
sign=scorer._sign,
response_method=response_method,
thresholds=thresholds,
kwargs=scorer._kwargs,
)
# transfer the metadata request
instance._metadata_request = scorer._get_metadata_request()
return instance
|
Create a continuous scorer from a normal scorer.
|
from_scorer
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/_scorer.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/_scorer.py
|
BSD-3-Clause
|
def _score(self, method_caller, estimator, X, y_true, **kwargs):
"""Evaluate predicted target values for X relative to y_true.
Parameters
----------
method_caller : callable
Returns predictions given an estimator, method name, and other
arguments, potentially caching results.
estimator : object
Trained estimator to use for scoring.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Test data that will be fed to estimator.predict.
y_true : array-like of shape (n_samples,)
Gold standard target values for X.
**kwargs : dict
Other parameters passed to the scorer. Refer to
:func:`set_score_request` for more details.
Returns
-------
scores : ndarray of shape (thresholds,)
The scores associated to each threshold.
potential_thresholds : ndarray of shape (thresholds,)
The potential thresholds used to compute the scores.
"""
pos_label = self._get_pos_label()
y_score = method_caller(
estimator, self._response_method, X, pos_label=pos_label
)
scoring_kwargs = {**self._kwargs, **kwargs}
if isinstance(self._thresholds, Integral):
potential_thresholds = np.linspace(
np.min(y_score), np.max(y_score), self._thresholds
)
else:
potential_thresholds = np.asarray(self._thresholds)
score_thresholds = [
self._sign
* self._score_func(
y_true,
_threshold_scores_to_class_labels(
y_score, th, estimator.classes_, pos_label
),
**scoring_kwargs,
)
for th in potential_thresholds
]
return np.array(score_thresholds), potential_thresholds
|
Evaluate predicted target values for X relative to y_true.
Parameters
----------
method_caller : callable
Returns predictions given an estimator, method name, and other
arguments, potentially caching results.
estimator : object
Trained estimator to use for scoring.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Test data that will be fed to estimator.predict.
y_true : array-like of shape (n_samples,)
Gold standard target values for X.
**kwargs : dict
Other parameters passed to the scorer. Refer to
:func:`set_score_request` for more details.
Returns
-------
scores : ndarray of shape (thresholds,)
The scores associated to each threshold.
potential_thresholds : ndarray of shape (thresholds,)
The potential thresholds used to compute the scores.
|
_score
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/_scorer.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/_scorer.py
|
BSD-3-Clause
|
def _check_rows_and_columns(a, b):
"""Unpacks the row and column arrays and checks their shape."""
check_consistent_length(*a)
check_consistent_length(*b)
checks = lambda x: check_array(x, ensure_2d=False)
a_rows, a_cols = map(checks, a)
b_rows, b_cols = map(checks, b)
return a_rows, a_cols, b_rows, b_cols
|
Unpacks the row and column arrays and checks their shape.
|
_check_rows_and_columns
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/cluster/_bicluster.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/cluster/_bicluster.py
|
BSD-3-Clause
|
def _jaccard(a_rows, a_cols, b_rows, b_cols):
"""Jaccard coefficient on the elements of the two biclusters."""
intersection = (a_rows * b_rows).sum() * (a_cols * b_cols).sum()
a_size = a_rows.sum() * a_cols.sum()
b_size = b_rows.sum() * b_cols.sum()
return intersection / (a_size + b_size - intersection)
|
Jaccard coefficient on the elements of the two biclusters.
|
_jaccard
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/cluster/_bicluster.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/cluster/_bicluster.py
|
BSD-3-Clause
|
def _pairwise_similarity(a, b, similarity):
"""Computes pairwise similarity matrix.
result[i, j] is the Jaccard coefficient of a's bicluster i and b's
bicluster j.
"""
a_rows, a_cols, b_rows, b_cols = _check_rows_and_columns(a, b)
n_a = a_rows.shape[0]
n_b = b_rows.shape[0]
result = np.array(
[
[similarity(a_rows[i], a_cols[i], b_rows[j], b_cols[j]) for j in range(n_b)]
for i in range(n_a)
]
)
return result
|
Computes pairwise similarity matrix.
result[i, j] is the Jaccard coefficient of a's bicluster i and b's
bicluster j.
|
_pairwise_similarity
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/cluster/_bicluster.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/cluster/_bicluster.py
|
BSD-3-Clause
|
def consensus_score(a, b, *, similarity="jaccard"):
"""The similarity of two sets of biclusters.
Similarity between individual biclusters is computed. Then the best
matching between sets is found by solving a linear sum assignment problem,
using a modified Jonker-Volgenant algorithm.
The final score is the sum of similarities divided by the size of
the larger set.
Read more in the :ref:`User Guide <biclustering>`.
Parameters
----------
a : tuple (rows, columns)
Tuple of row and column indicators for a set of biclusters.
b : tuple (rows, columns)
Another set of biclusters like ``a``.
similarity : 'jaccard' or callable, default='jaccard'
May be the string "jaccard" to use the Jaccard coefficient, or
any function that takes four arguments, each of which is a 1d
indicator vector: (a_rows, a_columns, b_rows, b_columns).
Returns
-------
consensus_score : float
Consensus score, a non-negative value, sum of similarities
divided by size of larger set.
See Also
--------
scipy.optimize.linear_sum_assignment : Solve the linear sum assignment problem.
References
----------
* Hochreiter, Bodenhofer, et. al., 2010. `FABIA: factor analysis
for bicluster acquisition
<https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2881408/>`__.
Examples
--------
>>> from sklearn.metrics import consensus_score
>>> a = ([[True, False], [False, True]], [[False, True], [True, False]])
>>> b = ([[False, True], [True, False]], [[True, False], [False, True]])
>>> consensus_score(a, b, similarity='jaccard')
1.0
"""
if similarity == "jaccard":
similarity = _jaccard
matrix = _pairwise_similarity(a, b, similarity)
row_indices, col_indices = linear_sum_assignment(1.0 - matrix)
n_a = len(a[0])
n_b = len(b[0])
return float(matrix[row_indices, col_indices].sum() / max(n_a, n_b))
|
The similarity of two sets of biclusters.
Similarity between individual biclusters is computed. Then the best
matching between sets is found by solving a linear sum assignment problem,
using a modified Jonker-Volgenant algorithm.
The final score is the sum of similarities divided by the size of
the larger set.
Read more in the :ref:`User Guide <biclustering>`.
Parameters
----------
a : tuple (rows, columns)
Tuple of row and column indicators for a set of biclusters.
b : tuple (rows, columns)
Another set of biclusters like ``a``.
similarity : 'jaccard' or callable, default='jaccard'
May be the string "jaccard" to use the Jaccard coefficient, or
any function that takes four arguments, each of which is a 1d
indicator vector: (a_rows, a_columns, b_rows, b_columns).
Returns
-------
consensus_score : float
Consensus score, a non-negative value, sum of similarities
divided by size of larger set.
See Also
--------
scipy.optimize.linear_sum_assignment : Solve the linear sum assignment problem.
References
----------
* Hochreiter, Bodenhofer, et. al., 2010. `FABIA: factor analysis
for bicluster acquisition
<https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2881408/>`__.
Examples
--------
>>> from sklearn.metrics import consensus_score
>>> a = ([[True, False], [False, True]], [[False, True], [True, False]])
>>> b = ([[False, True], [True, False]], [[True, False], [False, True]])
>>> consensus_score(a, b, similarity='jaccard')
1.0
|
consensus_score
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/cluster/_bicluster.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/cluster/_bicluster.py
|
BSD-3-Clause
|
def check_clusterings(labels_true, labels_pred):
"""Check that the labels arrays are 1D and of same dimension.
Parameters
----------
labels_true : array-like of shape (n_samples,)
The true labels.
labels_pred : array-like of shape (n_samples,)
The predicted labels.
"""
labels_true = check_array(
labels_true,
ensure_2d=False,
ensure_min_samples=0,
dtype=None,
)
labels_pred = check_array(
labels_pred,
ensure_2d=False,
ensure_min_samples=0,
dtype=None,
)
type_label = type_of_target(labels_true)
type_pred = type_of_target(labels_pred)
if "continuous" in (type_pred, type_label):
msg = (
"Clustering metrics expects discrete values but received"
f" {type_label} values for label, and {type_pred} values "
"for target"
)
warnings.warn(msg, UserWarning)
# input checks
if labels_true.ndim != 1:
raise ValueError("labels_true must be 1D: shape is %r" % (labels_true.shape,))
if labels_pred.ndim != 1:
raise ValueError("labels_pred must be 1D: shape is %r" % (labels_pred.shape,))
check_consistent_length(labels_true, labels_pred)
return labels_true, labels_pred
|
Check that the labels arrays are 1D and of same dimension.
Parameters
----------
labels_true : array-like of shape (n_samples,)
The true labels.
labels_pred : array-like of shape (n_samples,)
The predicted labels.
|
check_clusterings
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/cluster/_supervised.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/cluster/_supervised.py
|
BSD-3-Clause
|
def _generalized_average(U, V, average_method):
"""Return a particular mean of two numbers."""
if average_method == "min":
return min(U, V)
elif average_method == "geometric":
return np.sqrt(U * V)
elif average_method == "arithmetic":
return np.mean([U, V])
elif average_method == "max":
return max(U, V)
else:
raise ValueError(
"'average_method' must be 'min', 'geometric', 'arithmetic', or 'max'"
)
|
Return a particular mean of two numbers.
|
_generalized_average
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/cluster/_supervised.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/cluster/_supervised.py
|
BSD-3-Clause
|
def contingency_matrix(
labels_true, labels_pred, *, eps=None, sparse=False, dtype=np.int64
):
"""Build a contingency matrix describing the relationship between labels.
Read more in the :ref:`User Guide <contingency_matrix>`.
Parameters
----------
labels_true : array-like of shape (n_samples,)
Ground truth class labels to be used as a reference.
labels_pred : array-like of shape (n_samples,)
Cluster labels to evaluate.
eps : float, default=None
If a float, that value is added to all values in the contingency
matrix. This helps to stop NaN propagation.
If ``None``, nothing is adjusted.
sparse : bool, default=False
If `True`, return a sparse CSR contingency matrix. If `eps` is not
`None` and `sparse` is `True` will raise ValueError.
.. versionadded:: 0.18
dtype : numeric type, default=np.int64
Output dtype. Ignored if `eps` is not `None`.
.. versionadded:: 0.24
Returns
-------
contingency : {array-like, sparse}, shape=[n_classes_true, n_classes_pred]
Matrix :math:`C` such that :math:`C_{i, j}` is the number of samples in
true class :math:`i` and in predicted class :math:`j`. If
``eps is None``, the dtype of this array will be integer unless set
otherwise with the ``dtype`` argument. If ``eps`` is given, the dtype
will be float.
Will be a ``sklearn.sparse.csr_matrix`` if ``sparse=True``.
Examples
--------
>>> from sklearn.metrics.cluster import contingency_matrix
>>> labels_true = [0, 0, 1, 1, 2, 2]
>>> labels_pred = [1, 0, 2, 1, 0, 2]
>>> contingency_matrix(labels_true, labels_pred)
array([[1, 1, 0],
[0, 1, 1],
[1, 0, 1]])
"""
if eps is not None and sparse:
raise ValueError("Cannot set 'eps' when sparse=True")
classes, class_idx = np.unique(labels_true, return_inverse=True)
clusters, cluster_idx = np.unique(labels_pred, return_inverse=True)
n_classes = classes.shape[0]
n_clusters = clusters.shape[0]
# Using coo_matrix to accelerate simple histogram calculation,
# i.e. bins are consecutive integers
# Currently, coo_matrix is faster than histogram2d for simple cases
contingency = sp.coo_matrix(
(np.ones(class_idx.shape[0]), (class_idx, cluster_idx)),
shape=(n_classes, n_clusters),
dtype=dtype,
)
if sparse:
contingency = contingency.tocsr()
contingency.sum_duplicates()
else:
contingency = contingency.toarray()
if eps is not None:
# don't use += as contingency is integer
contingency = contingency + eps
return contingency
|
Build a contingency matrix describing the relationship between labels.
Read more in the :ref:`User Guide <contingency_matrix>`.
Parameters
----------
labels_true : array-like of shape (n_samples,)
Ground truth class labels to be used as a reference.
labels_pred : array-like of shape (n_samples,)
Cluster labels to evaluate.
eps : float, default=None
If a float, that value is added to all values in the contingency
matrix. This helps to stop NaN propagation.
If ``None``, nothing is adjusted.
sparse : bool, default=False
If `True`, return a sparse CSR contingency matrix. If `eps` is not
`None` and `sparse` is `True` will raise ValueError.
.. versionadded:: 0.18
dtype : numeric type, default=np.int64
Output dtype. Ignored if `eps` is not `None`.
.. versionadded:: 0.24
Returns
-------
contingency : {array-like, sparse}, shape=[n_classes_true, n_classes_pred]
Matrix :math:`C` such that :math:`C_{i, j}` is the number of samples in
true class :math:`i` and in predicted class :math:`j`. If
``eps is None``, the dtype of this array will be integer unless set
otherwise with the ``dtype`` argument. If ``eps`` is given, the dtype
will be float.
Will be a ``sklearn.sparse.csr_matrix`` if ``sparse=True``.
Examples
--------
>>> from sklearn.metrics.cluster import contingency_matrix
>>> labels_true = [0, 0, 1, 1, 2, 2]
>>> labels_pred = [1, 0, 2, 1, 0, 2]
>>> contingency_matrix(labels_true, labels_pred)
array([[1, 1, 0],
[0, 1, 1],
[1, 0, 1]])
|
contingency_matrix
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/cluster/_supervised.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/cluster/_supervised.py
|
BSD-3-Clause
|
def homogeneity_completeness_v_measure(labels_true, labels_pred, *, beta=1.0):
"""Compute the homogeneity and completeness and V-Measure scores at once.
Those metrics are based on normalized conditional entropy measures of
the clustering labeling to evaluate given the knowledge of a Ground
Truth class labels of the same samples.
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
Both scores have positive values between 0.0 and 1.0, larger values
being desirable.
Those 3 metrics are independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score values in any way.
V-Measure is furthermore symmetric: swapping ``labels_true`` and
``label_pred`` will give the same score. This does not hold for
homogeneity and completeness. V-Measure is identical to
:func:`normalized_mutual_info_score` with the arithmetic averaging
method.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : array-like of shape (n_samples,)
Ground truth class labels to be used as a reference.
labels_pred : array-like of shape (n_samples,)
Cluster labels to evaluate.
beta : float, default=1.0
Ratio of weight attributed to ``homogeneity`` vs ``completeness``.
If ``beta`` is greater than 1, ``completeness`` is weighted more
strongly in the calculation. If ``beta`` is less than 1,
``homogeneity`` is weighted more strongly.
Returns
-------
homogeneity : float
Score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling.
completeness : float
Score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling.
v_measure : float
Harmonic mean of the first two.
See Also
--------
homogeneity_score : Homogeneity metric of cluster labeling.
completeness_score : Completeness metric of cluster labeling.
v_measure_score : V-Measure (NMI with arithmetic mean option).
Examples
--------
>>> from sklearn.metrics import homogeneity_completeness_v_measure
>>> y_true, y_pred = [0, 0, 1, 1, 2, 2], [0, 0, 1, 2, 2, 2]
>>> homogeneity_completeness_v_measure(y_true, y_pred)
(0.71, 0.771, 0.74)
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
if len(labels_true) == 0:
return 1.0, 1.0, 1.0
entropy_C = entropy(labels_true)
entropy_K = entropy(labels_pred)
contingency = contingency_matrix(labels_true, labels_pred, sparse=True)
MI = mutual_info_score(None, None, contingency=contingency)
homogeneity = MI / (entropy_C) if entropy_C else 1.0
completeness = MI / (entropy_K) if entropy_K else 1.0
if homogeneity + completeness == 0.0:
v_measure_score = 0.0
else:
v_measure_score = (
(1 + beta)
* homogeneity
* completeness
/ (beta * homogeneity + completeness)
)
return float(homogeneity), float(completeness), float(v_measure_score)
|
Compute the homogeneity and completeness and V-Measure scores at once.
Those metrics are based on normalized conditional entropy measures of
the clustering labeling to evaluate given the knowledge of a Ground
Truth class labels of the same samples.
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
Both scores have positive values between 0.0 and 1.0, larger values
being desirable.
Those 3 metrics are independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score values in any way.
V-Measure is furthermore symmetric: swapping ``labels_true`` and
``label_pred`` will give the same score. This does not hold for
homogeneity and completeness. V-Measure is identical to
:func:`normalized_mutual_info_score` with the arithmetic averaging
method.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : array-like of shape (n_samples,)
Ground truth class labels to be used as a reference.
labels_pred : array-like of shape (n_samples,)
Cluster labels to evaluate.
beta : float, default=1.0
Ratio of weight attributed to ``homogeneity`` vs ``completeness``.
If ``beta`` is greater than 1, ``completeness`` is weighted more
strongly in the calculation. If ``beta`` is less than 1,
``homogeneity`` is weighted more strongly.
Returns
-------
homogeneity : float
Score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling.
completeness : float
Score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling.
v_measure : float
Harmonic mean of the first two.
See Also
--------
homogeneity_score : Homogeneity metric of cluster labeling.
completeness_score : Completeness metric of cluster labeling.
v_measure_score : V-Measure (NMI with arithmetic mean option).
Examples
--------
>>> from sklearn.metrics import homogeneity_completeness_v_measure
>>> y_true, y_pred = [0, 0, 1, 1, 2, 2], [0, 0, 1, 2, 2, 2]
>>> homogeneity_completeness_v_measure(y_true, y_pred)
(0.71, 0.771, 0.74)
|
homogeneity_completeness_v_measure
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/cluster/_supervised.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/cluster/_supervised.py
|
BSD-3-Clause
|
def mutual_info_score(labels_true, labels_pred, *, contingency=None):
"""Mutual Information between two clusterings.
The Mutual Information is a measure of the similarity between two labels
of the same data. Where :math:`|U_i|` is the number of the samples
in cluster :math:`U_i` and :math:`|V_j|` is the number of the
samples in cluster :math:`V_j`, the Mutual Information
between clusterings :math:`U` and :math:`V` is given as:
.. math::
MI(U,V)=\\sum_{i=1}^{|U|} \\sum_{j=1}^{|V|} \\frac{|U_i\\cap V_j|}{N}
\\log\\frac{N|U_i \\cap V_j|}{|U_i||V_j|}
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching :math:`U` (i.e
``label_true``) with :math:`V` (i.e. ``label_pred``) will return the
same score value. This can be useful to measure the agreement of two
independent label assignments strategies on the same dataset when the
real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : array-like of shape (n_samples,), dtype=integral
A clustering of the data into disjoint subsets, called :math:`U` in
the above formula.
labels_pred : array-like of shape (n_samples,), dtype=integral
A clustering of the data into disjoint subsets, called :math:`V` in
the above formula.
contingency : {array-like, sparse matrix} of shape \
(n_classes_true, n_classes_pred), default=None
A contingency matrix given by the
:func:`~sklearn.metrics.cluster.contingency_matrix` function. If value
is ``None``, it will be computed, otherwise the given value is used,
with ``labels_true`` and ``labels_pred`` ignored.
Returns
-------
mi : float
Mutual information, a non-negative value, measured in nats using the
natural logarithm.
See Also
--------
adjusted_mutual_info_score : Adjusted against chance Mutual Information.
normalized_mutual_info_score : Normalized Mutual Information.
Notes
-----
The logarithm used is the natural logarithm (base-e).
Examples
--------
>>> from sklearn.metrics import mutual_info_score
>>> labels_true = [0, 1, 1, 0, 1, 0]
>>> labels_pred = [0, 1, 0, 0, 1, 1]
>>> mutual_info_score(labels_true, labels_pred)
0.0566
"""
if contingency is None:
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
contingency = contingency_matrix(labels_true, labels_pred, sparse=True)
else:
contingency = check_array(
contingency,
accept_sparse=["csr", "csc", "coo"],
dtype=[int, np.int32, np.int64],
)
if isinstance(contingency, np.ndarray):
# For an array
nzx, nzy = np.nonzero(contingency)
nz_val = contingency[nzx, nzy]
else:
# For a sparse matrix
nzx, nzy, nz_val = sp.find(contingency)
contingency_sum = contingency.sum()
pi = np.ravel(contingency.sum(axis=1))
pj = np.ravel(contingency.sum(axis=0))
# Since MI <= min(H(X), H(Y)), any labelling with zero entropy, i.e. containing a
# single cluster, implies MI = 0
if pi.size == 1 or pj.size == 1:
return 0.0
log_contingency_nm = np.log(nz_val)
contingency_nm = nz_val / contingency_sum
# Don't need to calculate the full outer product, just for non-zeroes
outer = pi.take(nzx).astype(np.int64, copy=False) * pj.take(nzy).astype(
np.int64, copy=False
)
log_outer = -np.log(outer) + log(pi.sum()) + log(pj.sum())
mi = (
contingency_nm * (log_contingency_nm - log(contingency_sum))
+ contingency_nm * log_outer
)
mi = np.where(np.abs(mi) < np.finfo(mi.dtype).eps, 0.0, mi)
return float(np.clip(mi.sum(), 0.0, None))
|
Mutual Information between two clusterings.
The Mutual Information is a measure of the similarity between two labels
of the same data. Where :math:`|U_i|` is the number of the samples
in cluster :math:`U_i` and :math:`|V_j|` is the number of the
samples in cluster :math:`V_j`, the Mutual Information
between clusterings :math:`U` and :math:`V` is given as:
.. math::
MI(U,V)=\sum_{i=1}^{|U|} \sum_{j=1}^{|V|} \frac{|U_i\cap V_j|}{N}
\log\frac{N|U_i \cap V_j|}{|U_i||V_j|}
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching :math:`U` (i.e
``label_true``) with :math:`V` (i.e. ``label_pred``) will return the
same score value. This can be useful to measure the agreement of two
independent label assignments strategies on the same dataset when the
real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : array-like of shape (n_samples,), dtype=integral
A clustering of the data into disjoint subsets, called :math:`U` in
the above formula.
labels_pred : array-like of shape (n_samples,), dtype=integral
A clustering of the data into disjoint subsets, called :math:`V` in
the above formula.
contingency : {array-like, sparse matrix} of shape (n_classes_true, n_classes_pred), default=None
A contingency matrix given by the
:func:`~sklearn.metrics.cluster.contingency_matrix` function. If value
is ``None``, it will be computed, otherwise the given value is used,
with ``labels_true`` and ``labels_pred`` ignored.
Returns
-------
mi : float
Mutual information, a non-negative value, measured in nats using the
natural logarithm.
See Also
--------
adjusted_mutual_info_score : Adjusted against chance Mutual Information.
normalized_mutual_info_score : Normalized Mutual Information.
Notes
-----
The logarithm used is the natural logarithm (base-e).
Examples
--------
>>> from sklearn.metrics import mutual_info_score
>>> labels_true = [0, 1, 1, 0, 1, 0]
>>> labels_pred = [0, 1, 0, 0, 1, 1]
>>> mutual_info_score(labels_true, labels_pred)
0.0566
|
mutual_info_score
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/cluster/_supervised.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/cluster/_supervised.py
|
BSD-3-Clause
|
def normalized_mutual_info_score(
labels_true, labels_pred, *, average_method="arithmetic"
):
"""Normalized Mutual Information between two clusterings.
Normalized Mutual Information (NMI) is a normalization of the Mutual
Information (MI) score to scale the results between 0 (no mutual
information) and 1 (perfect correlation). In this function, mutual
information is normalized by some generalized mean of ``H(labels_true)``
and ``H(labels_pred))``, defined by the `average_method`.
This measure is not adjusted for chance. Therefore
:func:`adjusted_mutual_info_score` might be preferred.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array-like of shape (n_samples,)
A clustering of the data into disjoint subsets.
labels_pred : int array-like of shape (n_samples,)
A clustering of the data into disjoint subsets.
average_method : {'min', 'geometric', 'arithmetic', 'max'}, default='arithmetic'
How to compute the normalizer in the denominator.
.. versionadded:: 0.20
.. versionchanged:: 0.22
The default value of ``average_method`` changed from 'geometric' to
'arithmetic'.
Returns
-------
nmi : float
Score between 0.0 and 1.0 in normalized nats (based on the natural
logarithm). 1.0 stands for perfectly complete labeling.
See Also
--------
v_measure_score : V-Measure (NMI with arithmetic mean option).
adjusted_rand_score : Adjusted Rand Index.
adjusted_mutual_info_score : Adjusted Mutual Information (adjusted
against chance).
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import normalized_mutual_info_score
>>> normalized_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> normalized_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the NMI is null::
>>> normalized_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# It corresponds to both labellings having zero entropy.
# This is a perfect match hence return 1.0.
if (
classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0
):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred, sparse=True)
contingency = contingency.astype(np.float64, copy=False)
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred, contingency=contingency)
# At this point mi = 0 can't be a perfect match (the special case of a single
# cluster has been dealt with before). Hence, if mi = 0, the nmi must be 0 whatever
# the normalization.
if mi == 0:
return 0.0
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
normalizer = _generalized_average(h_true, h_pred, average_method)
return float(mi / normalizer)
|
Normalized Mutual Information between two clusterings.
Normalized Mutual Information (NMI) is a normalization of the Mutual
Information (MI) score to scale the results between 0 (no mutual
information) and 1 (perfect correlation). In this function, mutual
information is normalized by some generalized mean of ``H(labels_true)``
and ``H(labels_pred))``, defined by the `average_method`.
This measure is not adjusted for chance. Therefore
:func:`adjusted_mutual_info_score` might be preferred.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array-like of shape (n_samples,)
A clustering of the data into disjoint subsets.
labels_pred : int array-like of shape (n_samples,)
A clustering of the data into disjoint subsets.
average_method : {'min', 'geometric', 'arithmetic', 'max'}, default='arithmetic'
How to compute the normalizer in the denominator.
.. versionadded:: 0.20
.. versionchanged:: 0.22
The default value of ``average_method`` changed from 'geometric' to
'arithmetic'.
Returns
-------
nmi : float
Score between 0.0 and 1.0 in normalized nats (based on the natural
logarithm). 1.0 stands for perfectly complete labeling.
See Also
--------
v_measure_score : V-Measure (NMI with arithmetic mean option).
adjusted_rand_score : Adjusted Rand Index.
adjusted_mutual_info_score : Adjusted Mutual Information (adjusted
against chance).
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import normalized_mutual_info_score
>>> normalized_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> normalized_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the NMI is null::
>>> normalized_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
|
normalized_mutual_info_score
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/cluster/_supervised.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/cluster/_supervised.py
|
BSD-3-Clause
|
def fowlkes_mallows_score(labels_true, labels_pred, *, sparse="deprecated"):
"""Measure the similarity of two clusterings of a set of points.
.. versionadded:: 0.18
The Fowlkes-Mallows index (FMI) is defined as the geometric mean of
the precision and recall::
FMI = TP / sqrt((TP + FP) * (TP + FN))
Where ``TP`` is the number of **True Positive** (i.e. the number of pairs of
points that belong to the same cluster in both ``labels_true`` and
``labels_pred``), ``FP`` is the number of **False Positive** (i.e. the
number of pairs of points that belong to the same cluster in
``labels_pred`` but not in ``labels_true``) and ``FN`` is the number of
**False Negative** (i.e. the number of pairs of points that belong to the
same cluster in ``labels_true`` but not in ``labels_pred``).
The score ranges from 0 to 1. A high value indicates a good similarity
between two clusters.
Read more in the :ref:`User Guide <fowlkes_mallows_scores>`.
Parameters
----------
labels_true : array-like of shape (n_samples,), dtype=int
A clustering of the data into disjoint subsets.
labels_pred : array-like of shape (n_samples,), dtype=int
A clustering of the data into disjoint subsets.
sparse : bool, default=False
Compute contingency matrix internally with sparse matrix.
.. deprecated:: 1.7
The ``sparse`` parameter is deprecated and will be removed in 1.9. It has
no effect.
Returns
-------
score : float
The resulting Fowlkes-Mallows score.
References
----------
.. [1] `E. B. Fowkles and C. L. Mallows, 1983. "A method for comparing two
hierarchical clusterings". Journal of the American Statistical
Association
<https://www.tandfonline.com/doi/abs/10.1080/01621459.1983.10478008>`_
.. [2] `Wikipedia entry for the Fowlkes-Mallows Index
<https://en.wikipedia.org/wiki/Fowlkes-Mallows_index>`_
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import fowlkes_mallows_score
>>> fowlkes_mallows_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> fowlkes_mallows_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally random, hence the FMI is null::
>>> fowlkes_mallows_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
"""
# TODO(1.9): remove the sparse parameter
if sparse != "deprecated":
warnings.warn(
"The 'sparse' parameter was deprecated in 1.7 and will be removed in 1.9. "
"It has no effect. Leave it to its default value to silence this warning.",
FutureWarning,
)
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
(n_samples,) = labels_true.shape
c = contingency_matrix(labels_true, labels_pred, sparse=True)
c = c.astype(np.int64, copy=False)
tk = np.dot(c.data, c.data) - n_samples
pk = np.sum(np.asarray(c.sum(axis=0)).ravel() ** 2) - n_samples
qk = np.sum(np.asarray(c.sum(axis=1)).ravel() ** 2) - n_samples
return float(np.sqrt(tk / pk) * np.sqrt(tk / qk)) if tk != 0.0 else 0.0
|
Measure the similarity of two clusterings of a set of points.
.. versionadded:: 0.18
The Fowlkes-Mallows index (FMI) is defined as the geometric mean of
the precision and recall::
FMI = TP / sqrt((TP + FP) * (TP + FN))
Where ``TP`` is the number of **True Positive** (i.e. the number of pairs of
points that belong to the same cluster in both ``labels_true`` and
``labels_pred``), ``FP`` is the number of **False Positive** (i.e. the
number of pairs of points that belong to the same cluster in
``labels_pred`` but not in ``labels_true``) and ``FN`` is the number of
**False Negative** (i.e. the number of pairs of points that belong to the
same cluster in ``labels_true`` but not in ``labels_pred``).
The score ranges from 0 to 1. A high value indicates a good similarity
between two clusters.
Read more in the :ref:`User Guide <fowlkes_mallows_scores>`.
Parameters
----------
labels_true : array-like of shape (n_samples,), dtype=int
A clustering of the data into disjoint subsets.
labels_pred : array-like of shape (n_samples,), dtype=int
A clustering of the data into disjoint subsets.
sparse : bool, default=False
Compute contingency matrix internally with sparse matrix.
.. deprecated:: 1.7
The ``sparse`` parameter is deprecated and will be removed in 1.9. It has
no effect.
Returns
-------
score : float
The resulting Fowlkes-Mallows score.
References
----------
.. [1] `E. B. Fowkles and C. L. Mallows, 1983. "A method for comparing two
hierarchical clusterings". Journal of the American Statistical
Association
<https://www.tandfonline.com/doi/abs/10.1080/01621459.1983.10478008>`_
.. [2] `Wikipedia entry for the Fowlkes-Mallows Index
<https://en.wikipedia.org/wiki/Fowlkes-Mallows_index>`_
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import fowlkes_mallows_score
>>> fowlkes_mallows_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> fowlkes_mallows_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally random, hence the FMI is null::
>>> fowlkes_mallows_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
|
fowlkes_mallows_score
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/cluster/_supervised.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/cluster/_supervised.py
|
BSD-3-Clause
|
def entropy(labels):
"""Calculate the entropy for a labeling.
Parameters
----------
labels : array-like of shape (n_samples,), dtype=int
The labels.
Returns
-------
entropy : float
The entropy for a labeling.
Notes
-----
The logarithm used is the natural logarithm (base-e).
"""
xp, is_array_api_compliant, device_ = get_namespace_and_device(labels)
labels_len = labels.shape[0] if is_array_api_compliant else len(labels)
if labels_len == 0:
return 1.0
pi = xp.astype(xp.unique_counts(labels)[1], _max_precision_float_dtype(xp, device_))
# single cluster => zero entropy
if pi.size == 1:
return 0.0
pi_sum = xp.sum(pi)
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
# Always convert the result as a Python scalar (on CPU) instead of a device
# specific scalar array.
return float(-xp.sum((pi / pi_sum) * (xp.log(pi) - log(pi_sum))))
|
Calculate the entropy for a labeling.
Parameters
----------
labels : array-like of shape (n_samples,), dtype=int
The labels.
Returns
-------
entropy : float
The entropy for a labeling.
Notes
-----
The logarithm used is the natural logarithm (base-e).
|
entropy
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/cluster/_supervised.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/cluster/_supervised.py
|
BSD-3-Clause
|
def check_number_of_labels(n_labels, n_samples):
"""Check that number of labels are valid.
Parameters
----------
n_labels : int
Number of labels.
n_samples : int
Number of samples.
"""
if not 1 < n_labels < n_samples:
raise ValueError(
"Number of labels is %d. Valid values are 2 to n_samples - 1 (inclusive)"
% n_labels
)
|
Check that number of labels are valid.
Parameters
----------
n_labels : int
Number of labels.
n_samples : int
Number of samples.
|
check_number_of_labels
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/cluster/_unsupervised.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/cluster/_unsupervised.py
|
BSD-3-Clause
|
def silhouette_score(
X, labels, *, metric="euclidean", sample_size=None, random_state=None, **kwds
):
"""Compute the mean Silhouette Coefficient of all samples.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``. To clarify, ``b`` is the distance between a sample and the nearest
cluster that the sample is not a part of.
Note that Silhouette Coefficient is only defined if number of labels
is ``2 <= n_labels <= n_samples - 1``.
This function returns the mean Silhouette Coefficient over all samples.
To obtain the values for each sample, use :func:`silhouette_samples`.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters. Negative values generally indicate that a sample has
been assigned to the wrong cluster, as a different cluster is more similar.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples_a, n_samples_a) if metric == \
"precomputed" or (n_samples_a, n_features) otherwise
An array of pairwise distances between samples, or a feature array.
labels : array-like of shape (n_samples,)
Predicted labels for each sample.
metric : str or callable, default='euclidean'
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`~sklearn.metrics.pairwise_distances`. If ``X`` is
the distance array itself, use ``metric="precomputed"``.
sample_size : int, default=None
The size of the sample to use when computing the Silhouette Coefficient
on a random subset of the data.
If ``sample_size is None``, no sampling is used.
random_state : int, RandomState instance or None, default=None
Determines random number generation for selecting a subset of samples.
Used when ``sample_size is not None``.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
**kwds : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : float
Mean Silhouette Coefficient for all samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<https://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<https://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
Examples
--------
>>> from sklearn.datasets import make_blobs
>>> from sklearn.cluster import KMeans
>>> from sklearn.metrics import silhouette_score
>>> X, y = make_blobs(random_state=42)
>>> kmeans = KMeans(n_clusters=2, random_state=42)
>>> silhouette_score(X, kmeans.fit_predict(X))
0.49...
"""
if sample_size is not None:
X, labels = check_X_y(X, labels, accept_sparse=["csc", "csr"])
random_state = check_random_state(random_state)
indices = random_state.permutation(X.shape[0])[:sample_size]
if metric == "precomputed":
X, labels = X[indices].T[indices].T, labels[indices]
else:
X, labels = X[indices], labels[indices]
return float(np.mean(silhouette_samples(X, labels, metric=metric, **kwds)))
|
Compute the mean Silhouette Coefficient of all samples.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``. To clarify, ``b`` is the distance between a sample and the nearest
cluster that the sample is not a part of.
Note that Silhouette Coefficient is only defined if number of labels
is ``2 <= n_labels <= n_samples - 1``.
This function returns the mean Silhouette Coefficient over all samples.
To obtain the values for each sample, use :func:`silhouette_samples`.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters. Negative values generally indicate that a sample has
been assigned to the wrong cluster, as a different cluster is more similar.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples_a, n_samples_a) if metric == "precomputed" or (n_samples_a, n_features) otherwise
An array of pairwise distances between samples, or a feature array.
labels : array-like of shape (n_samples,)
Predicted labels for each sample.
metric : str or callable, default='euclidean'
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`~sklearn.metrics.pairwise_distances`. If ``X`` is
the distance array itself, use ``metric="precomputed"``.
sample_size : int, default=None
The size of the sample to use when computing the Silhouette Coefficient
on a random subset of the data.
If ``sample_size is None``, no sampling is used.
random_state : int, RandomState instance or None, default=None
Determines random number generation for selecting a subset of samples.
Used when ``sample_size is not None``.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
**kwds : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : float
Mean Silhouette Coefficient for all samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<https://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<https://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
Examples
--------
>>> from sklearn.datasets import make_blobs
>>> from sklearn.cluster import KMeans
>>> from sklearn.metrics import silhouette_score
>>> X, y = make_blobs(random_state=42)
>>> kmeans = KMeans(n_clusters=2, random_state=42)
>>> silhouette_score(X, kmeans.fit_predict(X))
0.49...
|
silhouette_score
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/cluster/_unsupervised.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/cluster/_unsupervised.py
|
BSD-3-Clause
|
def _silhouette_reduce(D_chunk, start, labels, label_freqs):
"""Accumulate silhouette statistics for vertical chunk of X.
Parameters
----------
D_chunk : {array-like, sparse matrix} of shape (n_chunk_samples, n_samples)
Precomputed distances for a chunk. If a sparse matrix is provided,
only CSR format is accepted.
start : int
First index in the chunk.
labels : array-like of shape (n_samples,)
Corresponding cluster labels, encoded as {0, ..., n_clusters-1}.
label_freqs : array-like
Distribution of cluster labels in ``labels``.
"""
n_chunk_samples = D_chunk.shape[0]
# accumulate distances from each sample to each cluster
cluster_distances = np.zeros(
(n_chunk_samples, len(label_freqs)), dtype=D_chunk.dtype
)
if issparse(D_chunk):
if D_chunk.format != "csr":
raise TypeError(
"Expected CSR matrix. Please pass sparse matrix in CSR format."
)
for i in range(n_chunk_samples):
indptr = D_chunk.indptr
indices = D_chunk.indices[indptr[i] : indptr[i + 1]]
sample_weights = D_chunk.data[indptr[i] : indptr[i + 1]]
sample_labels = np.take(labels, indices)
cluster_distances[i] += np.bincount(
sample_labels, weights=sample_weights, minlength=len(label_freqs)
)
else:
for i in range(n_chunk_samples):
sample_weights = D_chunk[i]
sample_labels = labels
cluster_distances[i] += np.bincount(
sample_labels, weights=sample_weights, minlength=len(label_freqs)
)
# intra_index selects intra-cluster distances within cluster_distances
end = start + n_chunk_samples
intra_index = (np.arange(n_chunk_samples), labels[start:end])
# intra_cluster_distances are averaged over cluster size outside this function
intra_cluster_distances = cluster_distances[intra_index]
# of the remaining distances we normalise and extract the minimum
cluster_distances[intra_index] = np.inf
cluster_distances /= label_freqs
inter_cluster_distances = cluster_distances.min(axis=1)
return intra_cluster_distances, inter_cluster_distances
|
Accumulate silhouette statistics for vertical chunk of X.
Parameters
----------
D_chunk : {array-like, sparse matrix} of shape (n_chunk_samples, n_samples)
Precomputed distances for a chunk. If a sparse matrix is provided,
only CSR format is accepted.
start : int
First index in the chunk.
labels : array-like of shape (n_samples,)
Corresponding cluster labels, encoded as {0, ..., n_clusters-1}.
label_freqs : array-like
Distribution of cluster labels in ``labels``.
|
_silhouette_reduce
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/cluster/_unsupervised.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/cluster/_unsupervised.py
|
BSD-3-Clause
|
def silhouette_samples(X, labels, *, metric="euclidean", **kwds):
"""Compute the Silhouette Coefficient for each sample.
The Silhouette Coefficient is a measure of how well samples are clustered
with samples that are similar to themselves. Clustering models with a high
Silhouette Coefficient are said to be dense, where samples in the same
cluster are similar to each other, and well separated, where samples in
different clusters are not very similar to each other.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``.
Note that Silhouette Coefficient is only defined if number of labels
is 2 ``<= n_labels <= n_samples - 1``.
This function returns the Silhouette Coefficient for each sample.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples_a, n_samples_a) if metric == \
"precomputed" or (n_samples_a, n_features) otherwise
An array of pairwise distances between samples, or a feature array. If
a sparse matrix is provided, CSR format should be favoured avoiding
an additional copy.
labels : array-like of shape (n_samples,)
Label values for each sample.
metric : str or callable, default='euclidean'
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`~sklearn.metrics.pairwise_distances`.
If ``X`` is the distance array itself, use "precomputed" as the metric.
Precomputed distance matrices must have 0 along the diagonal.
**kwds : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a ``scipy.spatial.distance`` metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : array-like of shape (n_samples,)
Silhouette Coefficients for each sample.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<https://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<https://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
Examples
--------
>>> from sklearn.metrics import silhouette_samples
>>> from sklearn.datasets import make_blobs
>>> from sklearn.cluster import KMeans
>>> X, y = make_blobs(n_samples=50, random_state=42)
>>> kmeans = KMeans(n_clusters=3, random_state=42)
>>> labels = kmeans.fit_predict(X)
>>> silhouette_samples(X, labels)
array([...])
"""
X, labels = check_X_y(X, labels, accept_sparse=["csr"])
# Check for non-zero diagonal entries in precomputed distance matrix
if metric == "precomputed":
error_msg = ValueError(
"The precomputed distance matrix contains non-zero "
"elements on the diagonal. Use np.fill_diagonal(X, 0)."
)
if X.dtype.kind == "f":
atol = _atol_for_type(X.dtype)
if np.any(np.abs(X.diagonal()) > atol):
raise error_msg
elif np.any(X.diagonal() != 0): # integral dtype
raise error_msg
le = LabelEncoder()
labels = le.fit_transform(labels)
n_samples = len(labels)
label_freqs = np.bincount(labels)
check_number_of_labels(len(le.classes_), n_samples)
kwds["metric"] = metric
reduce_func = functools.partial(
_silhouette_reduce, labels=labels, label_freqs=label_freqs
)
results = zip(*pairwise_distances_chunked(X, reduce_func=reduce_func, **kwds))
intra_clust_dists, inter_clust_dists = results
intra_clust_dists = np.concatenate(intra_clust_dists)
inter_clust_dists = np.concatenate(inter_clust_dists)
denom = (label_freqs - 1).take(labels, mode="clip")
with np.errstate(divide="ignore", invalid="ignore"):
intra_clust_dists /= denom
sil_samples = inter_clust_dists - intra_clust_dists
with np.errstate(divide="ignore", invalid="ignore"):
sil_samples /= np.maximum(intra_clust_dists, inter_clust_dists)
# nan values are for clusters of size 1, and should be 0
return np.nan_to_num(sil_samples)
|
Compute the Silhouette Coefficient for each sample.
The Silhouette Coefficient is a measure of how well samples are clustered
with samples that are similar to themselves. Clustering models with a high
Silhouette Coefficient are said to be dense, where samples in the same
cluster are similar to each other, and well separated, where samples in
different clusters are not very similar to each other.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``.
Note that Silhouette Coefficient is only defined if number of labels
is 2 ``<= n_labels <= n_samples - 1``.
This function returns the Silhouette Coefficient for each sample.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples_a, n_samples_a) if metric == "precomputed" or (n_samples_a, n_features) otherwise
An array of pairwise distances between samples, or a feature array. If
a sparse matrix is provided, CSR format should be favoured avoiding
an additional copy.
labels : array-like of shape (n_samples,)
Label values for each sample.
metric : str or callable, default='euclidean'
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`~sklearn.metrics.pairwise_distances`.
If ``X`` is the distance array itself, use "precomputed" as the metric.
Precomputed distance matrices must have 0 along the diagonal.
**kwds : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a ``scipy.spatial.distance`` metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : array-like of shape (n_samples,)
Silhouette Coefficients for each sample.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<https://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<https://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
Examples
--------
>>> from sklearn.metrics import silhouette_samples
>>> from sklearn.datasets import make_blobs
>>> from sklearn.cluster import KMeans
>>> X, y = make_blobs(n_samples=50, random_state=42)
>>> kmeans = KMeans(n_clusters=3, random_state=42)
>>> labels = kmeans.fit_predict(X)
>>> silhouette_samples(X, labels)
array([...])
|
silhouette_samples
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/cluster/_unsupervised.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/cluster/_unsupervised.py
|
BSD-3-Clause
|
def calinski_harabasz_score(X, labels):
"""Compute the Calinski and Harabasz score.
It is also known as the Variance Ratio Criterion.
The score is defined as ratio of the sum of between-cluster dispersion and
of within-cluster dispersion.
Read more in the :ref:`User Guide <calinski_harabasz_index>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
A list of ``n_features``-dimensional data points. Each row corresponds
to a single data point.
labels : array-like of shape (n_samples,)
Predicted labels for each sample.
Returns
-------
score : float
The resulting Calinski-Harabasz score.
References
----------
.. [1] `T. Calinski and J. Harabasz, 1974. "A dendrite method for cluster
analysis". Communications in Statistics
<https://www.tandfonline.com/doi/abs/10.1080/03610927408827101>`_
Examples
--------
>>> from sklearn.datasets import make_blobs
>>> from sklearn.cluster import KMeans
>>> from sklearn.metrics import calinski_harabasz_score
>>> X, _ = make_blobs(random_state=0)
>>> kmeans = KMeans(n_clusters=3, random_state=0,).fit(X)
>>> calinski_harabasz_score(X, kmeans.labels_)
114.8...
"""
X, labels = check_X_y(X, labels)
le = LabelEncoder()
labels = le.fit_transform(labels)
n_samples, _ = X.shape
n_labels = len(le.classes_)
check_number_of_labels(n_labels, n_samples)
extra_disp, intra_disp = 0.0, 0.0
mean = np.mean(X, axis=0)
for k in range(n_labels):
cluster_k = X[labels == k]
mean_k = np.mean(cluster_k, axis=0)
extra_disp += len(cluster_k) * np.sum((mean_k - mean) ** 2)
intra_disp += np.sum((cluster_k - mean_k) ** 2)
return float(
1.0
if intra_disp == 0.0
else extra_disp * (n_samples - n_labels) / (intra_disp * (n_labels - 1.0))
)
|
Compute the Calinski and Harabasz score.
It is also known as the Variance Ratio Criterion.
The score is defined as ratio of the sum of between-cluster dispersion and
of within-cluster dispersion.
Read more in the :ref:`User Guide <calinski_harabasz_index>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
A list of ``n_features``-dimensional data points. Each row corresponds
to a single data point.
labels : array-like of shape (n_samples,)
Predicted labels for each sample.
Returns
-------
score : float
The resulting Calinski-Harabasz score.
References
----------
.. [1] `T. Calinski and J. Harabasz, 1974. "A dendrite method for cluster
analysis". Communications in Statistics
<https://www.tandfonline.com/doi/abs/10.1080/03610927408827101>`_
Examples
--------
>>> from sklearn.datasets import make_blobs
>>> from sklearn.cluster import KMeans
>>> from sklearn.metrics import calinski_harabasz_score
>>> X, _ = make_blobs(random_state=0)
>>> kmeans = KMeans(n_clusters=3, random_state=0,).fit(X)
>>> calinski_harabasz_score(X, kmeans.labels_)
114.8...
|
calinski_harabasz_score
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/cluster/_unsupervised.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/cluster/_unsupervised.py
|
BSD-3-Clause
|
def davies_bouldin_score(X, labels):
"""Compute the Davies-Bouldin score.
The score is defined as the average similarity measure of each cluster with
its most similar cluster, where similarity is the ratio of within-cluster
distances to between-cluster distances. Thus, clusters which are farther
apart and less dispersed will result in a better score.
The minimum score is zero, with lower values indicating better clustering.
Read more in the :ref:`User Guide <davies-bouldin_index>`.
.. versionadded:: 0.20
Parameters
----------
X : array-like of shape (n_samples, n_features)
A list of ``n_features``-dimensional data points. Each row corresponds
to a single data point.
labels : array-like of shape (n_samples,)
Predicted labels for each sample.
Returns
-------
score: float
The resulting Davies-Bouldin score.
References
----------
.. [1] Davies, David L.; Bouldin, Donald W. (1979).
`"A Cluster Separation Measure"
<https://ieeexplore.ieee.org/document/4766909>`__.
IEEE Transactions on Pattern Analysis and Machine Intelligence.
PAMI-1 (2): 224-227
Examples
--------
>>> from sklearn.metrics import davies_bouldin_score
>>> X = [[0, 1], [1, 1], [3, 4]]
>>> labels = [0, 0, 1]
>>> davies_bouldin_score(X, labels)
0.12...
"""
X, labels = check_X_y(X, labels)
le = LabelEncoder()
labels = le.fit_transform(labels)
n_samples, _ = X.shape
n_labels = len(le.classes_)
check_number_of_labels(n_labels, n_samples)
intra_dists = np.zeros(n_labels)
centroids = np.zeros((n_labels, len(X[0])), dtype=float)
for k in range(n_labels):
cluster_k = _safe_indexing(X, labels == k)
centroid = cluster_k.mean(axis=0)
centroids[k] = centroid
intra_dists[k] = np.average(pairwise_distances(cluster_k, [centroid]))
centroid_distances = pairwise_distances(centroids)
if np.allclose(intra_dists, 0) or np.allclose(centroid_distances, 0):
return 0.0
centroid_distances[centroid_distances == 0] = np.inf
combined_intra_dists = intra_dists[:, None] + intra_dists
scores = np.max(combined_intra_dists / centroid_distances, axis=1)
return float(np.mean(scores))
|
Compute the Davies-Bouldin score.
The score is defined as the average similarity measure of each cluster with
its most similar cluster, where similarity is the ratio of within-cluster
distances to between-cluster distances. Thus, clusters which are farther
apart and less dispersed will result in a better score.
The minimum score is zero, with lower values indicating better clustering.
Read more in the :ref:`User Guide <davies-bouldin_index>`.
.. versionadded:: 0.20
Parameters
----------
X : array-like of shape (n_samples, n_features)
A list of ``n_features``-dimensional data points. Each row corresponds
to a single data point.
labels : array-like of shape (n_samples,)
Predicted labels for each sample.
Returns
-------
score: float
The resulting Davies-Bouldin score.
References
----------
.. [1] Davies, David L.; Bouldin, Donald W. (1979).
`"A Cluster Separation Measure"
<https://ieeexplore.ieee.org/document/4766909>`__.
IEEE Transactions on Pattern Analysis and Machine Intelligence.
PAMI-1 (2): 224-227
Examples
--------
>>> from sklearn.metrics import davies_bouldin_score
>>> X = [[0, 1], [1, 1], [3, 4]]
>>> labels = [0, 0, 1]
>>> davies_bouldin_score(X, labels)
0.12...
|
davies_bouldin_score
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/cluster/_unsupervised.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/cluster/_unsupervised.py
|
BSD-3-Clause
|
def test_consensus_score_issue2445():
"""Different number of biclusters in A and B"""
a_rows = np.array(
[
[True, True, False, False],
[False, False, True, True],
[False, False, False, True],
]
)
a_cols = np.array(
[
[True, True, False, False],
[False, False, True, True],
[False, False, False, True],
]
)
idx = [0, 2]
s = consensus_score((a_rows, a_cols), (a_rows[idx], a_cols[idx]))
# B contains 2 of the 3 biclusters in A, so score should be 2/3
assert_almost_equal(s, 2.0 / 3.0)
|
Different number of biclusters in A and B
|
test_consensus_score_issue2445
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/cluster/tests/test_bicluster.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/cluster/tests/test_bicluster.py
|
BSD-3-Clause
|
def test_returned_value_consistency(name):
"""Ensure that the returned values of all metrics are consistent.
It can only be a float. It should not be a numpy float64 or float32.
"""
rng = np.random.RandomState(0)
X = rng.randint(10, size=(20, 10))
labels_true = rng.randint(0, 3, size=(20,))
labels_pred = rng.randint(0, 3, size=(20,))
if name in SUPERVISED_METRICS:
metric = SUPERVISED_METRICS[name]
score = metric(labels_true, labels_pred)
else:
metric = UNSUPERVISED_METRICS[name]
score = metric(X, labels_pred)
assert isinstance(score, float)
assert not isinstance(score, (np.float64, np.float32))
|
Ensure that the returned values of all metrics are consistent.
It can only be a float. It should not be a numpy float64 or float32.
|
test_returned_value_consistency
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/cluster/tests/test_common.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/cluster/tests/test_common.py
|
BSD-3-Clause
|
def test_adjusted_rand_score_overflow():
"""Check that large amount of data will not lead to overflow in
`adjusted_rand_score`.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/20305
"""
rng = np.random.RandomState(0)
y_true = rng.randint(0, 2, 100_000, dtype=np.int8)
y_pred = rng.randint(0, 2, 100_000, dtype=np.int8)
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
adjusted_rand_score(y_true, y_pred)
|
Check that large amount of data will not lead to overflow in
`adjusted_rand_score`.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/20305
|
test_adjusted_rand_score_overflow
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/cluster/tests/test_supervised.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/cluster/tests/test_supervised.py
|
BSD-3-Clause
|
def test_normalized_mutual_info_score_bounded(average_method):
"""Check that nmi returns a score between 0 (included) and 1 (excluded
for non-perfect match)
Non-regression test for issue #13836
"""
labels1 = [0] * 469
labels2 = [1] + labels1[1:]
labels3 = [0, 1] + labels1[2:]
# labels1 is constant. The mutual info between labels1 and any other labelling is 0.
nmi = normalized_mutual_info_score(labels1, labels2, average_method=average_method)
assert nmi == 0
# non constant, non perfect matching labels
nmi = normalized_mutual_info_score(labels2, labels3, average_method=average_method)
assert 0 <= nmi < 1
|
Check that nmi returns a score between 0 (included) and 1 (excluded
for non-perfect match)
Non-regression test for issue #13836
|
test_normalized_mutual_info_score_bounded
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/cluster/tests/test_supervised.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/cluster/tests/test_supervised.py
|
BSD-3-Clause
|
def test_fowlkes_mallows_sparse_deprecated(sparse):
"""Check deprecation warning for 'sparse' parameter of fowlkes_mallows_score."""
with pytest.warns(
FutureWarning, match="The 'sparse' parameter was deprecated in 1.7"
):
fowlkes_mallows_score([0, 1], [1, 1], sparse=sparse)
|
Check deprecation warning for 'sparse' parameter of fowlkes_mallows_score.
|
test_fowlkes_mallows_sparse_deprecated
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/cluster/tests/test_supervised.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/cluster/tests/test_supervised.py
|
BSD-3-Clause
|
def test_silhouette_samples_precomputed_sparse(sparse_container):
"""Check that silhouette_samples works for sparse matrices correctly."""
X = np.array([[0.2, 0.1, 0.1, 0.2, 0.1, 1.6, 0.2, 0.1]], dtype=np.float32).T
y = [0, 0, 0, 0, 1, 1, 1, 1]
pdist_dense = pairwise_distances(X)
pdist_sparse = sparse_container(pdist_dense)
assert issparse(pdist_sparse)
output_with_sparse_input = silhouette_samples(pdist_sparse, y, metric="precomputed")
output_with_dense_input = silhouette_samples(pdist_dense, y, metric="precomputed")
assert_allclose(output_with_sparse_input, output_with_dense_input)
|
Check that silhouette_samples works for sparse matrices correctly.
|
test_silhouette_samples_precomputed_sparse
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/cluster/tests/test_unsupervised.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/cluster/tests/test_unsupervised.py
|
BSD-3-Clause
|
def test_silhouette_samples_euclidean_sparse(sparse_container):
"""Check that silhouette_samples works for sparse matrices correctly."""
X = np.array([[0.2, 0.1, 0.1, 0.2, 0.1, 1.6, 0.2, 0.1]], dtype=np.float32).T
y = [0, 0, 0, 0, 1, 1, 1, 1]
pdist_dense = pairwise_distances(X)
pdist_sparse = sparse_container(pdist_dense)
assert issparse(pdist_sparse)
output_with_sparse_input = silhouette_samples(pdist_sparse, y)
output_with_dense_input = silhouette_samples(pdist_dense, y)
assert_allclose(output_with_sparse_input, output_with_dense_input)
|
Check that silhouette_samples works for sparse matrices correctly.
|
test_silhouette_samples_euclidean_sparse
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/cluster/tests/test_unsupervised.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/cluster/tests/test_unsupervised.py
|
BSD-3-Clause
|
def test_silhouette_reduce(sparse_container):
"""Check for non-CSR input to private method `_silhouette_reduce`."""
X = np.array([[0.2, 0.1, 0.1, 0.2, 0.1, 1.6, 0.2, 0.1]], dtype=np.float32).T
pdist_dense = pairwise_distances(X)
pdist_sparse = sparse_container(pdist_dense)
y = [0, 0, 0, 0, 1, 1, 1, 1]
label_freqs = np.bincount(y)
with pytest.raises(
TypeError,
match="Expected CSR matrix. Please pass sparse matrix in CSR format.",
):
_silhouette_reduce(pdist_sparse, start=0, labels=y, label_freqs=label_freqs)
|
Check for non-CSR input to private method `_silhouette_reduce`.
|
test_silhouette_reduce
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/cluster/tests/test_unsupervised.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/cluster/tests/test_unsupervised.py
|
BSD-3-Clause
|
def assert_raises_on_only_one_label(func):
"""Assert message when there is only one label"""
rng = np.random.RandomState(seed=0)
with pytest.raises(ValueError, match="Number of labels is"):
func(rng.rand(10, 2), np.zeros(10))
|
Assert message when there is only one label
|
assert_raises_on_only_one_label
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/cluster/tests/test_unsupervised.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/cluster/tests/test_unsupervised.py
|
BSD-3-Clause
|
def assert_raises_on_all_points_same_cluster(func):
"""Assert message when all point are in different clusters"""
rng = np.random.RandomState(seed=0)
with pytest.raises(ValueError, match="Number of labels is"):
func(rng.rand(10, 2), np.arange(10))
|
Assert message when all point are in different clusters
|
assert_raises_on_all_points_same_cluster
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/cluster/tests/test_unsupervised.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/cluster/tests/test_unsupervised.py
|
BSD-3-Clause
|
def test_silhouette_score_integer_precomputed():
"""Check that silhouette_score works for precomputed metrics that are integers.
Non-regression test for #22107.
"""
result = silhouette_score(
[[0, 1, 2], [1, 0, 1], [2, 1, 0]], [0, 0, 1], metric="precomputed"
)
assert result == pytest.approx(1 / 6)
# non-zero on diagonal for ints raises an error
with pytest.raises(ValueError, match="contains non-zero"):
silhouette_score(
[[1, 1, 2], [1, 0, 1], [2, 1, 0]], [0, 0, 1], metric="precomputed"
)
|
Check that silhouette_score works for precomputed metrics that are integers.
Non-regression test for #22107.
|
test_silhouette_score_integer_precomputed
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/cluster/tests/test_unsupervised.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/cluster/tests/test_unsupervised.py
|
BSD-3-Clause
|
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel="linear", probability=True, random_state=0)
y_pred_proba = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
y_pred_proba = y_pred_proba[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, y_pred_proba
|
Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
|
make_prediction
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/tests/test_classification.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/tests/test_classification.py
|
BSD-3-Clause
|
def test_classification_report_labels_subset_superset(labels, show_micro_avg):
"""Check the behaviour of passing `labels` as a superset or subset of the labels.
WHen a superset, we expect to show the "accuracy" in the report while it should be
the micro-averaging if this is a subset.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/27927
"""
y_true, y_pred = [0, 1], [0, 1]
report = classification_report(y_true, y_pred, labels=labels, output_dict=True)
if show_micro_avg:
assert "micro avg" in report
assert "accuracy" not in report
else: # accuracy should be shown
assert "accuracy" in report
assert "micro avg" not in report
|
Check the behaviour of passing `labels` as a superset or subset of the labels.
WHen a superset, we expect to show the "accuracy" in the report while it should be
the micro-averaging if this is a subset.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/27927
|
test_classification_report_labels_subset_superset
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/tests/test_classification.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/tests/test_classification.py
|
BSD-3-Clause
|
def test_confusion_matrix_single_label():
"""Test `confusion_matrix` warns when only one label found."""
y_test = [0, 0, 0, 0]
y_pred = [0, 0, 0, 0]
with pytest.warns(UserWarning, match="A single label was found in"):
confusion_matrix(y_pred, y_test)
|
Test `confusion_matrix` warns when only one label found.
|
test_confusion_matrix_single_label
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/tests/test_classification.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/tests/test_classification.py
|
BSD-3-Clause
|
def test_likelihood_ratios_raise_warning_deprecation(raise_warning):
"""Test that class_likelihood_ratios raises a `FutureWarning` when `raise_warning`
param is set."""
y_true = np.array([1, 0])
y_pred = np.array([1, 0])
msg = "`raise_warning` was deprecated in version 1.7 and will be removed in 1.9."
with pytest.warns(FutureWarning, match=msg):
class_likelihood_ratios(y_true, y_pred, raise_warning=raise_warning)
|
Test that class_likelihood_ratios raises a `FutureWarning` when `raise_warning`
param is set.
|
test_likelihood_ratios_raise_warning_deprecation
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/tests/test_classification.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/tests/test_classification.py
|
BSD-3-Clause
|
def test_likelihood_ratios_replace_undefined_by_worst():
"""Test that class_likelihood_ratios returns the worst scores `1.0` for both LR+ and
LR- when `replace_undefined_by=1` is set."""
# This data causes fp=0 (0 false positives) in the confusion_matrix and a division
# by zero that affects the positive_likelihood_ratio:
y_true = np.array([1, 1, 0])
y_pred = np.array([1, 0, 0])
positive_likelihood_ratio, _ = class_likelihood_ratios(
y_true, y_pred, replace_undefined_by=1
)
assert positive_likelihood_ratio == pytest.approx(1.0)
# This data causes tn=0 (0 true negatives) in the confusion_matrix and a division
# by zero that affects the negative_likelihood_ratio:
y_true = np.array([1, 0, 0])
y_pred = np.array([1, 1, 1])
_, negative_likelihood_ratio = class_likelihood_ratios(
y_true, y_pred, replace_undefined_by=1
)
assert negative_likelihood_ratio == pytest.approx(1.0)
|
Test that class_likelihood_ratios returns the worst scores `1.0` for both LR+ and
LR- when `replace_undefined_by=1` is set.
|
test_likelihood_ratios_replace_undefined_by_worst
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/tests/test_classification.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/tests/test_classification.py
|
BSD-3-Clause
|
def test_likelihood_ratios_wrong_dict_replace_undefined_by(replace_undefined_by):
"""Test that class_likelihood_ratios raises a `ValueError` if the input dict for
`replace_undefined_by` is in the wrong format or contains impossible values."""
y_true = np.array([1, 0])
y_pred = np.array([1, 0])
msg = "The dictionary passed as `replace_undefined_by` needs to be in the form"
with pytest.raises(ValueError, match=msg):
class_likelihood_ratios(
y_true, y_pred, replace_undefined_by=replace_undefined_by
)
|
Test that class_likelihood_ratios raises a `ValueError` if the input dict for
`replace_undefined_by` is in the wrong format or contains impossible values.
|
test_likelihood_ratios_wrong_dict_replace_undefined_by
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/tests/test_classification.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/tests/test_classification.py
|
BSD-3-Clause
|
def test_likelihood_ratios_replace_undefined_by_0_fp(replace_undefined_by, expected):
"""Test that the `replace_undefined_by` param returns the right value for the
positive_likelihood_ratio as defined by the user."""
# This data causes fp=0 (0 false positives) in the confusion_matrix and a division
# by zero that affects the positive_likelihood_ratio:
y_true = np.array([1, 1, 0])
y_pred = np.array([1, 0, 0])
positive_likelihood_ratio, _ = class_likelihood_ratios(
y_true, y_pred, replace_undefined_by=replace_undefined_by
)
if np.isnan(expected):
assert np.isnan(positive_likelihood_ratio)
else:
assert positive_likelihood_ratio == pytest.approx(expected)
|
Test that the `replace_undefined_by` param returns the right value for the
positive_likelihood_ratio as defined by the user.
|
test_likelihood_ratios_replace_undefined_by_0_fp
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/tests/test_classification.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/tests/test_classification.py
|
BSD-3-Clause
|
def test_likelihood_ratios_replace_undefined_by_0_tn(replace_undefined_by, expected):
"""Test that the `replace_undefined_by` param returns the right value for the
negative_likelihood_ratio as defined by the user."""
# This data causes tn=0 (0 true negatives) in the confusion_matrix and a division
# by zero that affects the negative_likelihood_ratio:
y_true = np.array([1, 0, 0])
y_pred = np.array([1, 1, 1])
_, negative_likelihood_ratio = class_likelihood_ratios(
y_true, y_pred, replace_undefined_by=replace_undefined_by
)
if np.isnan(expected):
assert np.isnan(negative_likelihood_ratio)
else:
assert negative_likelihood_ratio == pytest.approx(expected)
|
Test that the `replace_undefined_by` param returns the right value for the
negative_likelihood_ratio as defined by the user.
|
test_likelihood_ratios_replace_undefined_by_0_tn
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/tests/test_classification.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/tests/test_classification.py
|
BSD-3-Clause
|
def test_cohen_kappa_score_error_wrong_label():
"""Test that correct error is raised when users pass labels that are not in y1."""
labels = [1, 2]
y1 = np.array(["a"] * 5 + ["b"] * 5)
y2 = np.array(["b"] * 10)
with pytest.raises(
ValueError, match="At least one label in `labels` must be present in `y1`"
):
cohen_kappa_score(y1, y2, labels=labels)
|
Test that correct error is raised when users pass labels that are not in y1.
|
test_cohen_kappa_score_error_wrong_label
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/tests/test_classification.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/tests/test_classification.py
|
BSD-3-Clause
|
def test_zero_division_nan_no_warning(metric, y_true, y_pred, zero_division):
"""Check the behaviour of `zero_division` when setting to 0, 1 or np.nan.
No warnings should be raised.
"""
with warnings.catch_warnings():
warnings.simplefilter("error")
result = metric(y_true, y_pred, zero_division=zero_division)
if np.isnan(zero_division):
assert np.isnan(result)
else:
assert result == zero_division
|
Check the behaviour of `zero_division` when setting to 0, 1 or np.nan.
No warnings should be raised.
|
test_zero_division_nan_no_warning
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/tests/test_classification.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/tests/test_classification.py
|
BSD-3-Clause
|
def test_zero_division_nan_warning(metric, y_true, y_pred):
"""Check the behaviour of `zero_division` when setting to "warn".
A `UndefinedMetricWarning` should be raised.
"""
with pytest.warns(UndefinedMetricWarning):
result = metric(y_true, y_pred, zero_division="warn")
assert result == 0.0
|
Check the behaviour of `zero_division` when setting to "warn".
A `UndefinedMetricWarning` should be raised.
|
test_zero_division_nan_warning
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/tests/test_classification.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/tests/test_classification.py
|
BSD-3-Clause
|
def test_confusion_matrix_pandas_nullable(dtype):
"""Checks that confusion_matrix works with pandas nullable dtypes.
Non-regression test for gh-25635.
"""
pd = pytest.importorskip("pandas")
y_ndarray = np.array([1, 0, 0, 1, 0, 1, 1, 0, 1])
y_true = pd.Series(y_ndarray, dtype=dtype)
y_predicted = pd.Series([0, 0, 1, 1, 0, 1, 1, 1, 1], dtype="int64")
output = confusion_matrix(y_true, y_predicted)
expected_output = confusion_matrix(y_ndarray, y_predicted)
assert_array_equal(output, expected_output)
|
Checks that confusion_matrix works with pandas nullable dtypes.
Non-regression test for gh-25635.
|
test_confusion_matrix_pandas_nullable
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/tests/test_classification.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/tests/test_classification.py
|
BSD-3-Clause
|
def test_log_loss_eps(dtype):
"""Check the behaviour internal eps that changes depending on the input dtype.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/24315
"""
y_true = np.array([0, 1], dtype=dtype)
y_pred = np.array([1, 0], dtype=dtype)
loss = log_loss(y_true, y_pred)
assert np.isfinite(loss)
|
Check the behaviour internal eps that changes depending on the input dtype.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/24315
|
test_log_loss_eps
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/tests/test_classification.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/tests/test_classification.py
|
BSD-3-Clause
|
def test_log_loss_not_probabilities_warning(dtype):
"""Check that log_loss raises a warning when y_pred values don't sum to 1."""
y_true = np.array([0, 1, 1, 0])
y_pred = np.array([[0.2, 0.7], [0.6, 0.3], [0.4, 0.7], [0.8, 0.3]], dtype=dtype)
with pytest.warns(UserWarning, match="The y_prob values do not sum to one."):
log_loss(y_true, y_pred)
|
Check that log_loss raises a warning when y_pred values don't sum to 1.
|
test_log_loss_not_probabilities_warning
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/tests/test_classification.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/tests/test_classification.py
|
BSD-3-Clause
|
def test_log_loss_perfect_predictions(y_true, y_pred):
"""Check that log_loss returns 0 for perfect predictions."""
# Because of the clipping, the result is not exactly 0
assert log_loss(y_true, y_pred) == pytest.approx(0)
|
Check that log_loss returns 0 for perfect predictions.
|
test_log_loss_perfect_predictions
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/tests/test_classification.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/tests/test_classification.py
|
BSD-3-Clause
|
def test_classification_metric_pos_label_types(metric, classes):
"""Check that the metric works with different types of `pos_label`.
We can expect `pos_label` to be a bool, an integer, a float, a string.
No error should be raised for those types.
"""
rng = np.random.RandomState(42)
n_samples, pos_label = 10, classes[-1]
y_true = rng.choice(classes, size=n_samples, replace=True)
if metric is brier_score_loss:
# brier score loss requires probabilities
y_pred = rng.uniform(size=n_samples)
else:
y_pred = y_true.copy()
result = metric(y_true, y_pred, pos_label=pos_label)
assert not np.any(np.isnan(result))
|
Check that the metric works with different types of `pos_label`.
We can expect `pos_label` to be a bool, an integer, a float, a string.
No error should be raised for those types.
|
test_classification_metric_pos_label_types
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/tests/test_classification.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/tests/test_classification.py
|
BSD-3-Clause
|
def test_classification_metric_division_by_zero_nan_validaton(scoring):
"""Check that we validate `np.nan` properly for classification metrics.
With `n_jobs=2` in cross-validation, the `np.nan` used for the singleton will be
different in the sub-process and we should not use the `is` operator but
`math.isnan`.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/27563
"""
X, y = datasets.make_classification(random_state=0)
classifier = DecisionTreeClassifier(max_depth=3, random_state=0).fit(X, y)
cross_val_score(classifier, X, y, scoring=scoring, n_jobs=2, error_score="raise")
|
Check that we validate `np.nan` properly for classification metrics.
With `n_jobs=2` in cross-validation, the `np.nan` used for the singleton will be
different in the sub-process and we should not use the `is` operator but
`math.isnan`.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/27563
|
test_classification_metric_division_by_zero_nan_validaton
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/tests/test_classification.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/tests/test_classification.py
|
BSD-3-Clause
|
def test_d2_log_loss_score_missing_labels():
"""Check that d2_log_loss_score works when not all labels are present in y_true
non-regression test for https://github.com/scikit-learn/scikit-learn/issues/30713
"""
y_true = [2, 0, 2, 0]
labels = [0, 1, 2]
sample_weight = [1.4, 0.6, 0.7, 0.3]
y_pred = np.tile([1, 0, 0], (4, 1))
log_loss_obs = log_loss(y_true, y_pred, sample_weight=sample_weight, labels=labels)
# Null model consists of weighted average of the classes.
# Given that the sum of the weights is 3,
# - weighted average of 0s is (0.6 + 0.3) / 3 = 0.3
# - weighted average of 1s is 0
# - weighted average of 2s is (1.4 + 0.7) / 3 = 0.7
y_pred_null = np.tile([0.3, 0, 0.7], (4, 1))
log_loss_null = log_loss(
y_true, y_pred_null, sample_weight=sample_weight, labels=labels
)
expected_d2_score = 1 - log_loss_obs / log_loss_null
d2_score = d2_log_loss_score(
y_true, y_pred, sample_weight=sample_weight, labels=labels
)
assert_allclose(d2_score, expected_d2_score)
|
Check that d2_log_loss_score works when not all labels are present in y_true
non-regression test for https://github.com/scikit-learn/scikit-learn/issues/30713
|
test_d2_log_loss_score_missing_labels
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/tests/test_classification.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/tests/test_classification.py
|
BSD-3-Clause
|
def test_d2_log_loss_score_label_order():
"""Check that d2_log_loss_score doesn't depend on the order of the labels."""
y_true = [2, 0, 2, 0]
y_pred = np.tile([1, 0, 0], (4, 1))
d2_score = d2_log_loss_score(y_true, y_pred, labels=[0, 1, 2])
d2_score_other = d2_log_loss_score(y_true, y_pred, labels=[0, 2, 1])
assert_allclose(d2_score, d2_score_other)
|
Check that d2_log_loss_score doesn't depend on the order of the labels.
|
test_d2_log_loss_score_label_order
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/tests/test_classification.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/tests/test_classification.py
|
BSD-3-Clause
|
def test_d2_log_loss_score_raises():
"""Test that d2_log_loss_score raises the appropriate errors on
invalid inputs."""
y_true = [0, 1, 2]
y_pred = [[0.2, 0.8], [0.5, 0.5], [0.4, 0.6]]
err = "contain different number of classes"
with pytest.raises(ValueError, match=err):
d2_log_loss_score(y_true, y_pred)
# check error if the number of classes in labels do not match the number
# of classes in y_pred.
y_true = [0, 1, 2]
y_pred = [[0.5, 0.5], [0.5, 0.5], [0.5, 0.5]]
labels = [0, 1, 2]
err = "number of classes in labels is different"
with pytest.raises(ValueError, match=err):
d2_log_loss_score(y_true, y_pred, labels=labels)
# check error if y_true and y_pred do not have equal lengths
y_true = [0, 1, 2]
y_pred = [[0.5, 0.5, 0.5], [0.6, 0.3, 0.1]]
err = "inconsistent numbers of samples"
with pytest.raises(ValueError, match=err):
d2_log_loss_score(y_true, y_pred)
# check warning for samples < 2
y_true = [1]
y_pred = [[0.5, 0.5]]
err = "score is not well-defined"
with pytest.warns(UndefinedMetricWarning, match=err):
d2_log_loss_score(y_true, y_pred)
# check error when y_true only has 1 label
y_true = [1, 1, 1]
y_pred = [[0.5, 0.5], [0.5, 0.5], [0.5, 0.5]]
err = "y_true contains only one label"
with pytest.raises(ValueError, match=err):
d2_log_loss_score(y_true, y_pred)
# check error when y_true only has 1 label and labels also has
# only 1 label
y_true = [1, 1, 1]
labels = [1]
y_pred = [[0.5, 0.5], [0.5, 0.5], [0.5, 0.5]]
err = "The labels array needs to contain at least two"
with pytest.raises(ValueError, match=err):
d2_log_loss_score(y_true, y_pred, labels=labels)
|
Test that d2_log_loss_score raises the appropriate errors on
invalid inputs.
|
test_d2_log_loss_score_raises
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/tests/test_classification.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/tests/test_classification.py
|
BSD-3-Clause
|
def precision_recall_curve_padded_thresholds(*args, **kwargs):
"""
The dimensions of precision-recall pairs and the threshold array as
returned by the precision_recall_curve do not match. See
func:`sklearn.metrics.precision_recall_curve`
This prevents implicit conversion of return value triple to an higher
dimensional np.array of dtype('float64') (it will be of dtype('object)
instead). This again is needed for assert_array_equal to work correctly.
As a workaround we pad the threshold array with NaN values to match
the dimension of precision and recall arrays respectively.
"""
precision, recall, thresholds = precision_recall_curve(*args, **kwargs)
pad_threshholds = len(precision) - len(thresholds)
return np.array(
[
precision,
recall,
np.pad(
thresholds.astype(np.float64),
pad_width=(0, pad_threshholds),
mode="constant",
constant_values=[np.nan],
),
]
)
|
The dimensions of precision-recall pairs and the threshold array as
returned by the precision_recall_curve do not match. See
func:`sklearn.metrics.precision_recall_curve`
This prevents implicit conversion of return value triple to an higher
dimensional np.array of dtype('float64') (it will be of dtype('object)
instead). This again is needed for assert_array_equal to work correctly.
As a workaround we pad the threshold array with NaN values to match
the dimension of precision and recall arrays respectively.
|
precision_recall_curve_padded_thresholds
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/tests/test_common.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/tests/test_common.py
|
BSD-3-Clause
|
def test_classification_inf_nan_input(metric, y_true, y_score):
"""check that classification metrics raise a message mentioning the
occurrence of non-finite values in the target vectors."""
if not np.isfinite(y_true).all():
input_name = "y_true"
if np.isnan(y_true).any():
unexpected_value = "NaN"
else:
unexpected_value = "infinity or a value too large"
else:
input_name = "y_pred"
if np.isnan(y_score).any():
unexpected_value = "NaN"
else:
unexpected_value = "infinity or a value too large"
err_msg = f"Input {input_name} contains {unexpected_value}"
with pytest.raises(ValueError, match=err_msg):
metric(y_true, y_score)
|
check that classification metrics raise a message mentioning the
occurrence of non-finite values in the target vectors.
|
test_classification_inf_nan_input
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/tests/test_common.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/tests/test_common.py
|
BSD-3-Clause
|
def test_classification_binary_continuous_input(metric):
"""check that classification metrics raise a message of mixed type data
with continuous/binary target vectors."""
y_true, y_score = ["a", "b", "a"], [0.1, 0.2, 0.3]
err_msg = (
"Classification metrics can't handle a mix of binary and continuous targets"
)
with pytest.raises(ValueError, match=err_msg):
metric(y_true, y_score)
|
check that classification metrics raise a message of mixed type data
with continuous/binary target vectors.
|
test_classification_binary_continuous_input
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/tests/test_common.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/tests/test_common.py
|
BSD-3-Clause
|
def _get_metric_kwargs_for_array_api_testing(metric, params):
"""Helper function to enable specifying a variety of additional params and
their corresponding values, so that they can be passed to a metric function
when testing for array api compliance."""
metric_kwargs_combinations = [{}]
for param, values in params.items():
if param not in signature(metric).parameters:
continue
new_combinations = []
for kwargs in metric_kwargs_combinations:
for value in values:
new_kwargs = kwargs.copy()
new_kwargs[param] = value
new_combinations.append(new_kwargs)
metric_kwargs_combinations = new_combinations
return metric_kwargs_combinations
|
Helper function to enable specifying a variety of additional params and
their corresponding values, so that they can be passed to a metric function
when testing for array api compliance.
|
_get_metric_kwargs_for_array_api_testing
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/tests/test_common.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/tests/test_common.py
|
BSD-3-Clause
|
def test_returned_value_consistency(name):
"""Ensure that the returned values of all metrics are consistent.
It can either be a float, a numpy array, or a tuple of floats or numpy arrays.
It should not be a numpy float64 or float32.
"""
rng = np.random.RandomState(0)
y_true = rng.randint(0, 2, size=(20,))
y_pred = rng.randint(0, 2, size=(20,))
if name in METRICS_REQUIRE_POSITIVE_Y:
y_true, y_pred = _require_positive_targets(y_true, y_pred)
if name in METRIC_UNDEFINED_BINARY:
y_true = rng.randint(0, 2, size=(20, 3))
y_pred = rng.randint(0, 2, size=(20, 3))
metric = ALL_METRICS[name]
score = metric(y_true, y_pred)
assert isinstance(score, (float, np.ndarray, tuple))
assert not isinstance(score, (np.float64, np.float32))
if isinstance(score, tuple):
assert all(isinstance(v, float) for v in score) or all(
isinstance(v, np.ndarray) for v in score
)
|
Ensure that the returned values of all metrics are consistent.
It can either be a float, a numpy array, or a tuple of floats or numpy arrays.
It should not be a numpy float64 or float32.
|
test_returned_value_consistency
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/tests/test_common.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/tests/test_common.py
|
BSD-3-Clause
|
def test_nan_euclidean_support(pairwise_distances_func):
"""Check that `nan_euclidean` is lenient with `nan` values."""
X = [[0, 1], [1, np.nan], [2, 3], [3, 5]]
output = pairwise_distances_func(X, X, metric="nan_euclidean")
assert not np.isnan(output).any()
|
Check that `nan_euclidean` is lenient with `nan` values.
|
test_nan_euclidean_support
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/tests/test_pairwise.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/tests/test_pairwise.py
|
BSD-3-Clause
|
def test_nan_euclidean_constant_input_argmin():
"""Check that the behavior of constant input is the same in the case of
full of nan vector and full of zero vector.
"""
X_nan = [[np.nan, np.nan], [np.nan, np.nan], [np.nan, np.nan]]
argmin_nan = pairwise_distances_argmin(X_nan, X_nan, metric="nan_euclidean")
X_const = [[0, 0], [0, 0], [0, 0]]
argmin_const = pairwise_distances_argmin(X_const, X_const, metric="nan_euclidean")
assert_allclose(argmin_nan, argmin_const)
|
Check that the behavior of constant input is the same in the case of
full of nan vector and full of zero vector.
|
test_nan_euclidean_constant_input_argmin
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/tests/test_pairwise.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/tests/test_pairwise.py
|
BSD-3-Clause
|
def test_pairwise_dist_custom_metric_for_string(X, Y, expected_distance):
"""Check pairwise_distances with lists of strings as input."""
def dummy_string_similarity(x, y):
return np.abs(len(x) - len(y))
actual_distance = pairwise_distances(X=X, Y=Y, metric=dummy_string_similarity)
assert_allclose(actual_distance, expected_distance)
|
Check pairwise_distances with lists of strings as input.
|
test_pairwise_dist_custom_metric_for_string
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/tests/test_pairwise.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/tests/test_pairwise.py
|
BSD-3-Clause
|
def test_pairwise_dist_custom_metric_for_bool():
"""Check that pairwise_distances does not convert boolean input to float
when using a custom metric.
"""
def dummy_bool_dist(v1, v2):
# dummy distance func using `&` and thus relying on the input data being boolean
return 1 - (v1 & v2).sum() / (v1 | v2).sum()
X = np.array([[1, 0, 0, 0], [1, 0, 1, 0], [1, 1, 1, 1]], dtype=bool)
expected_distance = np.array(
[
[0.0, 0.5, 0.75],
[0.5, 0.0, 0.5],
[0.75, 0.5, 0.0],
]
)
actual_distance = pairwise_distances(X=X, metric=dummy_bool_dist)
assert_allclose(actual_distance, expected_distance)
|
Check that pairwise_distances does not convert boolean input to float
when using a custom metric.
|
test_pairwise_dist_custom_metric_for_bool
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/tests/test_pairwise.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/tests/test_pairwise.py
|
BSD-3-Clause
|
def _get_metric_params_list(metric: str, n_features: int, seed: int = 1):
"""Return list of dummy DistanceMetric kwargs for tests."""
# Distinguishing on cases not to compute unneeded datastructures.
rng = np.random.RandomState(seed)
if metric == "minkowski":
minkowski_kwargs = [
dict(p=1.5),
dict(p=2),
dict(p=3),
dict(p=np.inf),
dict(p=3, w=rng.rand(n_features)),
]
return minkowski_kwargs
if metric == "seuclidean":
return [dict(V=rng.rand(n_features))]
# Case of: "euclidean", "manhattan", "chebyshev", "haversine" or any other metric.
# In those cases, no kwargs is needed.
return [{}]
|
Return list of dummy DistanceMetric kwargs for tests.
|
_get_metric_params_list
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/tests/test_pairwise_distances_reduction.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/tests/test_pairwise_distances_reduction.py
|
BSD-3-Clause
|
def assert_same_distances_for_common_neighbors(
query_idx,
dist_row_a,
dist_row_b,
indices_row_a,
indices_row_b,
rtol,
atol,
):
"""Check that the distances of common neighbors are equal up to tolerance.
This does not check if there are missing neighbors in either result set.
Missingness is handled by assert_no_missing_neighbors.
"""
# Compute a mapping from indices to distances for each result set and
# check that the computed neighbors with matching indices are within
# the expected distance tolerance.
indices_to_dist_a = dict(zip(indices_row_a, dist_row_a))
indices_to_dist_b = dict(zip(indices_row_b, dist_row_b))
common_indices = set(indices_row_a).intersection(set(indices_row_b))
for idx in common_indices:
dist_a = indices_to_dist_a[idx]
dist_b = indices_to_dist_b[idx]
try:
assert_allclose(dist_a, dist_b, rtol=rtol, atol=atol)
except AssertionError as e:
# Wrap exception to provide more context while also including
# the original exception with the computed absolute and
# relative differences.
raise AssertionError(
f"Query vector with index {query_idx} lead to different distances"
f" for common neighbor with index {idx}:"
f" dist_a={dist_a} vs dist_b={dist_b} (with atol={atol} and"
f" rtol={rtol})"
) from e
|
Check that the distances of common neighbors are equal up to tolerance.
This does not check if there are missing neighbors in either result set.
Missingness is handled by assert_no_missing_neighbors.
|
assert_same_distances_for_common_neighbors
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/tests/test_pairwise_distances_reduction.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/tests/test_pairwise_distances_reduction.py
|
BSD-3-Clause
|
def assert_no_missing_neighbors(
query_idx,
dist_row_a,
dist_row_b,
indices_row_a,
indices_row_b,
threshold,
):
"""Compare the indices of neighbors in two results sets.
Any neighbor index with a distance below the precision threshold should
match one in the other result set. We ignore the last few neighbors beyond
the threshold as those can typically be missing due to rounding errors.
For radius queries, the threshold is just the radius minus the expected
precision level.
For k-NN queries, it is the maximum distance to the k-th neighbor minus the
expected precision level.
"""
mask_a = dist_row_a < threshold
mask_b = dist_row_b < threshold
missing_from_b = np.setdiff1d(indices_row_a[mask_a], indices_row_b)
missing_from_a = np.setdiff1d(indices_row_b[mask_b], indices_row_a)
if len(missing_from_a) > 0 or len(missing_from_b) > 0:
raise AssertionError(
f"Query vector with index {query_idx} lead to mismatched result indices:\n"
f"neighbors in b missing from a: {missing_from_a}\n"
f"neighbors in a missing from b: {missing_from_b}\n"
f"dist_row_a={dist_row_a}\n"
f"dist_row_b={dist_row_b}\n"
f"indices_row_a={indices_row_a}\n"
f"indices_row_b={indices_row_b}\n"
)
|
Compare the indices of neighbors in two results sets.
Any neighbor index with a distance below the precision threshold should
match one in the other result set. We ignore the last few neighbors beyond
the threshold as those can typically be missing due to rounding errors.
For radius queries, the threshold is just the radius minus the expected
precision level.
For k-NN queries, it is the maximum distance to the k-th neighbor minus the
expected precision level.
|
assert_no_missing_neighbors
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/tests/test_pairwise_distances_reduction.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/tests/test_pairwise_distances_reduction.py
|
BSD-3-Clause
|
def assert_compatible_argkmin_results(
neighbors_dists_a,
neighbors_dists_b,
neighbors_indices_a,
neighbors_indices_b,
rtol=1e-5,
atol=1e-6,
):
"""Assert that argkmin results are valid up to rounding errors.
This function asserts that the results of argkmin queries are valid up to:
- rounding error tolerance on distance values;
- permutations of indices for distances values that differ up to the
expected precision level.
Furthermore, the distances must be sorted.
To be used for testing neighbors queries on float32 datasets: we accept
neighbors rank swaps only if they are caused by small rounding errors on
the distance computations.
"""
is_sorted = lambda a: np.all(a[:-1] <= a[1:])
assert (
neighbors_dists_a.shape
== neighbors_dists_b.shape
== neighbors_indices_a.shape
== neighbors_indices_b.shape
), "Arrays of results have incompatible shapes."
n_queries, _ = neighbors_dists_a.shape
# Asserting equality results one row at a time
for query_idx in range(n_queries):
dist_row_a = neighbors_dists_a[query_idx]
dist_row_b = neighbors_dists_b[query_idx]
indices_row_a = neighbors_indices_a[query_idx]
indices_row_b = neighbors_indices_b[query_idx]
assert is_sorted(dist_row_a), f"Distances aren't sorted on row {query_idx}"
assert is_sorted(dist_row_b), f"Distances aren't sorted on row {query_idx}"
assert_same_distances_for_common_neighbors(
query_idx,
dist_row_a,
dist_row_b,
indices_row_a,
indices_row_b,
rtol,
atol,
)
# Check that any neighbor with distances below the rounding error
# threshold have matching indices. The threshold is the distance to the
# k-th neighbors minus the expected precision level:
#
# (1 - rtol) * dist_k - atol
#
# Where dist_k is defined as the maximum distance to the kth-neighbor
# among the two result sets. This way of defining the threshold is
# stricter than taking the minimum of the two.
threshold = (1 - rtol) * np.maximum(
np.max(dist_row_a), np.max(dist_row_b)
) - atol
assert_no_missing_neighbors(
query_idx,
dist_row_a,
dist_row_b,
indices_row_a,
indices_row_b,
threshold,
)
|
Assert that argkmin results are valid up to rounding errors.
This function asserts that the results of argkmin queries are valid up to:
- rounding error tolerance on distance values;
- permutations of indices for distances values that differ up to the
expected precision level.
Furthermore, the distances must be sorted.
To be used for testing neighbors queries on float32 datasets: we accept
neighbors rank swaps only if they are caused by small rounding errors on
the distance computations.
|
assert_compatible_argkmin_results
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/tests/test_pairwise_distances_reduction.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/tests/test_pairwise_distances_reduction.py
|
BSD-3-Clause
|
def assert_compatible_radius_results(
neighbors_dists_a,
neighbors_dists_b,
neighbors_indices_a,
neighbors_indices_b,
radius,
check_sorted=True,
rtol=1e-5,
atol=1e-6,
):
"""Assert that radius neighborhood results are valid up to:
- relative and absolute tolerance on computed distance values
- permutations of indices for distances values that differ up to
a precision level
- missing or extra last elements if their distance is
close to the radius
To be used for testing neighbors queries on float32 datasets: we
accept neighbors rank swaps only if they are caused by small
rounding errors on the distance computations.
Input arrays must be sorted w.r.t distances.
"""
is_sorted = lambda a: np.all(a[:-1] <= a[1:])
assert (
len(neighbors_dists_a)
== len(neighbors_dists_b)
== len(neighbors_indices_a)
== len(neighbors_indices_b)
)
n_queries = len(neighbors_dists_a)
# Asserting equality of results one vector at a time
for query_idx in range(n_queries):
dist_row_a = neighbors_dists_a[query_idx]
dist_row_b = neighbors_dists_b[query_idx]
indices_row_a = neighbors_indices_a[query_idx]
indices_row_b = neighbors_indices_b[query_idx]
if check_sorted:
assert is_sorted(dist_row_a), f"Distances aren't sorted on row {query_idx}"
assert is_sorted(dist_row_b), f"Distances aren't sorted on row {query_idx}"
assert len(dist_row_a) == len(indices_row_a)
assert len(dist_row_b) == len(indices_row_b)
# Check that all distances are within the requested radius
if len(dist_row_a) > 0:
max_dist_a = np.max(dist_row_a)
assert max_dist_a <= radius, (
f"Largest returned distance {max_dist_a} not within requested"
f" radius {radius} on row {query_idx}"
)
if len(dist_row_b) > 0:
max_dist_b = np.max(dist_row_b)
assert max_dist_b <= radius, (
f"Largest returned distance {max_dist_b} not within requested"
f" radius {radius} on row {query_idx}"
)
assert_same_distances_for_common_neighbors(
query_idx,
dist_row_a,
dist_row_b,
indices_row_a,
indices_row_b,
rtol,
atol,
)
threshold = (1 - rtol) * radius - atol
assert_no_missing_neighbors(
query_idx,
dist_row_a,
dist_row_b,
indices_row_a,
indices_row_b,
threshold,
)
|
Assert that radius neighborhood results are valid up to:
- relative and absolute tolerance on computed distance values
- permutations of indices for distances values that differ up to
a precision level
- missing or extra last elements if their distance is
close to the radius
To be used for testing neighbors queries on float32 datasets: we
accept neighbors rank swaps only if they are caused by small
rounding errors on the distance computations.
Input arrays must be sorted w.r.t distances.
|
assert_compatible_radius_results
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/tests/test_pairwise_distances_reduction.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/tests/test_pairwise_distances_reduction.py
|
BSD-3-Clause
|
def test_chunk_size_agnosticism(
global_random_seed,
Dispatcher,
dtype,
n_features=100,
):
"""Check that results do not depend on the chunk size."""
rng = np.random.RandomState(global_random_seed)
spread = 100
n_samples_X, n_samples_Y = rng.choice([97, 100, 101, 500], size=2, replace=False)
X = rng.rand(n_samples_X, n_features).astype(dtype) * spread
Y = rng.rand(n_samples_Y, n_features).astype(dtype) * spread
if Dispatcher is ArgKmin:
parameter = 10
check_parameters = {}
compute_parameters = {}
else:
radius = _non_trivial_radius(X=X, Y=Y, metric="euclidean")
parameter = radius
check_parameters = {"radius": radius}
compute_parameters = {"sort_results": True}
ref_dist, ref_indices = Dispatcher.compute(
X,
Y,
parameter,
chunk_size=256, # default
metric="manhattan",
return_distance=True,
**compute_parameters,
)
dist, indices = Dispatcher.compute(
X,
Y,
parameter,
chunk_size=41,
metric="manhattan",
return_distance=True,
**compute_parameters,
)
ASSERT_RESULT[(Dispatcher, dtype)](
ref_dist, dist, ref_indices, indices, **check_parameters
)
|
Check that results do not depend on the chunk size.
|
test_chunk_size_agnosticism
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/tests/test_pairwise_distances_reduction.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/tests/test_pairwise_distances_reduction.py
|
BSD-3-Clause
|
def test_n_threads_agnosticism(
global_random_seed,
Dispatcher,
dtype,
n_features=100,
):
"""Check that results do not depend on the number of threads."""
rng = np.random.RandomState(global_random_seed)
n_samples_X, n_samples_Y = rng.choice([97, 100, 101, 500], size=2, replace=False)
spread = 100
X = rng.rand(n_samples_X, n_features).astype(dtype) * spread
Y = rng.rand(n_samples_Y, n_features).astype(dtype) * spread
if Dispatcher is ArgKmin:
parameter = 10
check_parameters = {}
compute_parameters = {}
else:
radius = _non_trivial_radius(X=X, Y=Y, metric="euclidean")
parameter = radius
check_parameters = {"radius": radius}
compute_parameters = {"sort_results": True}
ref_dist, ref_indices = Dispatcher.compute(
X,
Y,
parameter,
chunk_size=25, # make sure we use multiple threads
return_distance=True,
**compute_parameters,
)
with _get_threadpool_controller().limit(limits=1, user_api="openmp"):
dist, indices = Dispatcher.compute(
X,
Y,
parameter,
chunk_size=25,
return_distance=True,
**compute_parameters,
)
ASSERT_RESULT[(Dispatcher, dtype)](
ref_dist, dist, ref_indices, indices, **check_parameters
)
|
Check that results do not depend on the number of threads.
|
test_n_threads_agnosticism
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/tests/test_pairwise_distances_reduction.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/tests/test_pairwise_distances_reduction.py
|
BSD-3-Clause
|
def test_format_agnosticism(
global_random_seed,
Dispatcher,
dtype,
csr_container,
):
"""Check that results do not depend on the format (dense, sparse) of the input."""
rng = np.random.RandomState(global_random_seed)
spread = 100
n_samples, n_features = 100, 100
X = rng.rand(n_samples, n_features).astype(dtype) * spread
Y = rng.rand(n_samples, n_features).astype(dtype) * spread
X_csr = csr_container(X)
Y_csr = csr_container(Y)
if Dispatcher is ArgKmin:
parameter = 10
check_parameters = {}
compute_parameters = {}
else:
# Adjusting the radius to ensure that the expected results is neither
# trivially empty nor too large.
radius = _non_trivial_radius(X=X, Y=Y, metric="euclidean")
parameter = radius
check_parameters = {"radius": radius}
compute_parameters = {"sort_results": True}
dist_dense, indices_dense = Dispatcher.compute(
X,
Y,
parameter,
chunk_size=50,
return_distance=True,
**compute_parameters,
)
for _X, _Y in itertools.product((X, X_csr), (Y, Y_csr)):
if _X is X and _Y is Y:
continue
dist, indices = Dispatcher.compute(
_X,
_Y,
parameter,
chunk_size=50,
return_distance=True,
**compute_parameters,
)
ASSERT_RESULT[(Dispatcher, dtype)](
dist_dense,
dist,
indices_dense,
indices,
**check_parameters,
)
|
Check that results do not depend on the format (dense, sparse) of the input.
|
test_format_agnosticism
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/tests/test_pairwise_distances_reduction.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/tests/test_pairwise_distances_reduction.py
|
BSD-3-Clause
|
def test_strategies_consistency(
global_random_seed,
global_dtype,
Dispatcher,
n_features=10,
):
"""Check that the results do not depend on the strategy used."""
rng = np.random.RandomState(global_random_seed)
metric = rng.choice(
np.array(
[
"euclidean",
"minkowski",
"manhattan",
"haversine",
],
dtype=object,
)
)
n_samples_X, n_samples_Y = rng.choice([97, 100, 101, 500], size=2, replace=False)
spread = 100
X = rng.rand(n_samples_X, n_features).astype(global_dtype) * spread
Y = rng.rand(n_samples_Y, n_features).astype(global_dtype) * spread
# Haversine distance only accepts 2D data
if metric == "haversine":
X = np.ascontiguousarray(X[:, :2])
Y = np.ascontiguousarray(Y[:, :2])
if Dispatcher is ArgKmin:
parameter = 10
check_parameters = {}
compute_parameters = {}
else:
radius = _non_trivial_radius(X=X, Y=Y, metric=metric)
parameter = radius
check_parameters = {"radius": radius}
compute_parameters = {"sort_results": True}
dist_par_X, indices_par_X = Dispatcher.compute(
X,
Y,
parameter,
metric=metric,
# Taking the first
metric_kwargs=_get_metric_params_list(
metric, n_features, seed=global_random_seed
)[0],
# To be sure to use parallelization
chunk_size=n_samples_X // 4,
strategy="parallel_on_X",
return_distance=True,
**compute_parameters,
)
dist_par_Y, indices_par_Y = Dispatcher.compute(
X,
Y,
parameter,
metric=metric,
# Taking the first
metric_kwargs=_get_metric_params_list(
metric, n_features, seed=global_random_seed
)[0],
# To be sure to use parallelization
chunk_size=n_samples_Y // 4,
strategy="parallel_on_Y",
return_distance=True,
**compute_parameters,
)
ASSERT_RESULT[(Dispatcher, global_dtype)](
dist_par_X, dist_par_Y, indices_par_X, indices_par_Y, **check_parameters
)
|
Check that the results do not depend on the strategy used.
|
test_strategies_consistency
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/tests/test_pairwise_distances_reduction.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/tests/test_pairwise_distances_reduction.py
|
BSD-3-Clause
|
def test_memmap_backed_data(
metric,
Dispatcher,
dtype,
):
"""Check that the results do not depend on the datasets writability."""
rng = np.random.RandomState(0)
spread = 100
n_samples, n_features = 128, 10
X = rng.rand(n_samples, n_features).astype(dtype) * spread
Y = rng.rand(n_samples, n_features).astype(dtype) * spread
# Create read only datasets
X_mm, Y_mm = create_memmap_backed_data([X, Y])
if Dispatcher is ArgKmin:
parameter = 10
check_parameters = {}
compute_parameters = {}
else:
# Scaling the radius slightly with the numbers of dimensions
radius = 10 ** np.log(n_features)
parameter = radius
check_parameters = {"radius": radius}
compute_parameters = {"sort_results": True}
ref_dist, ref_indices = Dispatcher.compute(
X,
Y,
parameter,
metric=metric,
return_distance=True,
**compute_parameters,
)
dist_mm, indices_mm = Dispatcher.compute(
X_mm,
Y_mm,
parameter,
metric=metric,
return_distance=True,
**compute_parameters,
)
ASSERT_RESULT[(Dispatcher, dtype)](
ref_dist, dist_mm, ref_indices, indices_mm, **check_parameters
)
|
Check that the results do not depend on the datasets writability.
|
test_memmap_backed_data
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/tests/test_pairwise_distances_reduction.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/tests/test_pairwise_distances_reduction.py
|
BSD-3-Clause
|
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel="linear", probability=True, random_state=0)
y_score = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
y_score = y_score[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, y_score
|
Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
|
make_prediction
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/tests/test_ranking.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/tests/test_ranking.py
|
BSD-3-Clause
|
def _auc(y_true, y_score):
"""Alternative implementation to check for correctness of
`roc_auc_score`."""
pos_label = np.unique(y_true)[1]
# Count the number of times positive samples are correctly ranked above
# negative samples.
pos = y_score[y_true == pos_label]
neg = y_score[y_true != pos_label]
diff_matrix = pos.reshape(1, -1) - neg.reshape(-1, 1)
n_correct = np.sum(diff_matrix > 0)
return n_correct / float(len(pos) * len(neg))
|
Alternative implementation to check for correctness of
`roc_auc_score`.
|
_auc
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/tests/test_ranking.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/tests/test_ranking.py
|
BSD-3-Clause
|
def _average_precision(y_true, y_score):
"""Alternative implementation to check for correctness of
`average_precision_score`.
Note that this implementation fails on some edge cases.
For example, for constant predictions e.g. [0.5, 0.5, 0.5],
y_true = [1, 0, 0] returns an average precision of 0.33...
but y_true = [0, 0, 1] returns 1.0.
"""
pos_label = np.unique(y_true)[1]
n_pos = np.sum(y_true == pos_label)
order = np.argsort(y_score)[::-1]
y_score = y_score[order]
y_true = y_true[order]
score = 0
for i in range(len(y_score)):
if y_true[i] == pos_label:
# Compute precision up to document i
# i.e, percentage of relevant documents up to document i.
prec = 0
for j in range(0, i + 1):
if y_true[j] == pos_label:
prec += 1.0
prec /= i + 1.0
score += prec
return score / n_pos
|
Alternative implementation to check for correctness of
`average_precision_score`.
Note that this implementation fails on some edge cases.
For example, for constant predictions e.g. [0.5, 0.5, 0.5],
y_true = [1, 0, 0] returns an average precision of 0.33...
but y_true = [0, 0, 1] returns 1.0.
|
_average_precision
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/tests/test_ranking.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/tests/test_ranking.py
|
BSD-3-Clause
|
def _average_precision_slow(y_true, y_score):
"""A second alternative implementation of average precision that closely
follows the Wikipedia article's definition (see References). This should
give identical results as `average_precision_score` for all inputs.
References
----------
.. [1] `Wikipedia entry for the Average precision
<https://en.wikipedia.org/wiki/Average_precision>`_
"""
precision, recall, threshold = precision_recall_curve(y_true, y_score)
precision = list(reversed(precision))
recall = list(reversed(recall))
average_precision = 0
for i in range(1, len(precision)):
average_precision += precision[i] * (recall[i] - recall[i - 1])
return average_precision
|
A second alternative implementation of average precision that closely
follows the Wikipedia article's definition (see References). This should
give identical results as `average_precision_score` for all inputs.
References
----------
.. [1] `Wikipedia entry for the Average precision
<https://en.wikipedia.org/wiki/Average_precision>`_
|
_average_precision_slow
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/tests/test_ranking.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/tests/test_ranking.py
|
BSD-3-Clause
|
def _partial_roc_auc_score(y_true, y_predict, max_fpr):
"""Alternative implementation to check for correctness of `roc_auc_score`
with `max_fpr` set.
"""
def _partial_roc(y_true, y_predict, max_fpr):
fpr, tpr, _ = roc_curve(y_true, y_predict)
new_fpr = fpr[fpr <= max_fpr]
new_fpr = np.append(new_fpr, max_fpr)
new_tpr = tpr[fpr <= max_fpr]
idx_out = np.argmax(fpr > max_fpr)
idx_in = idx_out - 1
x_interp = [fpr[idx_in], fpr[idx_out]]
y_interp = [tpr[idx_in], tpr[idx_out]]
new_tpr = np.append(new_tpr, np.interp(max_fpr, x_interp, y_interp))
return (new_fpr, new_tpr)
new_fpr, new_tpr = _partial_roc(y_true, y_predict, max_fpr)
partial_auc = auc(new_fpr, new_tpr)
# Formula (5) from McClish 1989
fpr1 = 0
fpr2 = max_fpr
min_area = 0.5 * (fpr2 - fpr1) * (fpr2 + fpr1)
max_area = fpr2 - fpr1
return 0.5 * (1 + (partial_auc - min_area) / (max_area - min_area))
|
Alternative implementation to check for correctness of `roc_auc_score`
with `max_fpr` set.
|
_partial_roc_auc_score
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/tests/test_ranking.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/tests/test_ranking.py
|
BSD-3-Clause
|
def test_precision_recall_curve_drop_intermediate():
"""Check the behaviour of the `drop_intermediate` parameter."""
y_true = [0, 0, 0, 0, 1, 1]
y_score = [0.0, 0.2, 0.5, 0.6, 0.7, 1.0]
precision, recall, thresholds = precision_recall_curve(
y_true, y_score, drop_intermediate=True
)
assert_allclose(thresholds, [0.0, 0.7, 1.0])
# Test dropping thresholds with repeating scores
y_true = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
y_score = [0.0, 0.1, 0.6, 0.6, 0.7, 0.8, 0.9, 0.6, 0.7, 0.8, 0.9, 0.9, 1.0]
precision, recall, thresholds = precision_recall_curve(
y_true, y_score, drop_intermediate=True
)
assert_allclose(thresholds, [0.0, 0.6, 0.7, 0.8, 0.9, 1.0])
# Test all false keeps only endpoints
y_true = [0, 0, 0, 0]
y_score = [0.0, 0.1, 0.2, 0.3]
precision, recall, thresholds = precision_recall_curve(
y_true, y_score, drop_intermediate=True
)
assert_allclose(thresholds, [0.0, 0.3])
# Test all true keeps all thresholds
y_true = [1, 1, 1, 1]
y_score = [0.0, 0.1, 0.2, 0.3]
precision, recall, thresholds = precision_recall_curve(
y_true, y_score, drop_intermediate=True
)
assert_allclose(thresholds, [0.0, 0.1, 0.2, 0.3])
|
Check the behaviour of the `drop_intermediate` parameter.
|
test_precision_recall_curve_drop_intermediate
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/tests/test_ranking.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/tests/test_ranking.py
|
BSD-3-Clause
|
def _my_lrap(y_true, y_score):
"""Simple implementation of label ranking average precision"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true)
y_score = check_array(y_score)
n_samples, n_labels = y_true.shape
score = np.empty((n_samples,))
for i in range(n_samples):
# The best rank correspond to 1. Rank higher than 1 are worse.
# The best inverse ranking correspond to n_labels.
unique_rank, inv_rank = np.unique(y_score[i], return_inverse=True)
n_ranks = unique_rank.size
rank = n_ranks - inv_rank
# Rank need to be corrected to take into account ties
# ex: rank 1 ex aequo means that both label are rank 2.
corr_rank = np.bincount(rank, minlength=n_ranks + 1).cumsum()
rank = corr_rank[rank]
relevant = y_true[i].nonzero()[0]
if relevant.size == 0 or relevant.size == n_labels:
score[i] = 1
continue
score[i] = 0.0
for label in relevant:
# Let's count the number of relevant label with better rank
# (smaller rank).
n_ranked_above = sum(rank[r] <= rank[label] for r in relevant)
# Weight by the rank of the actual label
score[i] += n_ranked_above / rank[label]
score[i] /= relevant.size
return score.mean()
|
Simple implementation of label ranking average precision
|
_my_lrap
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/tests/test_ranking.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/tests/test_ranking.py
|
BSD-3-Clause
|
def test_ndcg_negative_ndarray_error():
"""Check `ndcg_score` exception when `y_true` contains negative values."""
y_true = np.array([[-0.89, -0.53, -0.47, 0.39, 0.56]])
y_score = np.array([[0.07, 0.31, 0.75, 0.33, 0.27]])
expected_message = "ndcg_score should not be used on negative y_true values"
with pytest.raises(ValueError, match=expected_message):
ndcg_score(y_true, y_score)
|
Check `ndcg_score` exception when `y_true` contains negative values.
|
test_ndcg_negative_ndarray_error
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/tests/test_ranking.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/tests/test_ranking.py
|
BSD-3-Clause
|
def test_ndcg_error_single_document():
"""Check that we raise an informative error message when trying to
compute NDCG with a single document."""
err_msg = (
"Computing NDCG is only meaningful when there is more than 1 document. "
"Got 1 instead."
)
with pytest.raises(ValueError, match=err_msg):
ndcg_score([[1]], [[1]])
|
Check that we raise an informative error message when trying to
compute NDCG with a single document.
|
test_ndcg_error_single_document
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/tests/test_ranking.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/tests/test_ranking.py
|
BSD-3-Clause
|
def test_top_k_accuracy_score_multiclass_with_labels(
y_true, true_score, labels, labels_as_ndarray
):
"""Test when labels and y_score are multiclass."""
if labels_as_ndarray:
labels = np.asarray(labels)
y_score = np.array(
[
[0.4, 0.3, 0.2, 0.1],
[0.1, 0.3, 0.4, 0.2],
[0.4, 0.1, 0.2, 0.3],
[0.3, 0.2, 0.4, 0.1],
]
)
score = top_k_accuracy_score(y_true, y_score, k=2, labels=labels)
assert score == pytest.approx(true_score)
|
Test when labels and y_score are multiclass.
|
test_top_k_accuracy_score_multiclass_with_labels
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/tests/test_ranking.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/tests/test_ranking.py
|
BSD-3-Clause
|
def test_ranking_metric_pos_label_types(metric, classes):
"""Check that the metric works with different types of `pos_label`.
We can expect `pos_label` to be a bool, an integer, a float, a string.
No error should be raised for those types.
"""
rng = np.random.RandomState(42)
n_samples, pos_label = 10, classes[-1]
y_true = rng.choice(classes, size=n_samples, replace=True)
y_proba = rng.rand(n_samples)
result = metric(y_true, y_proba, pos_label=pos_label)
if isinstance(result, float):
assert not np.isnan(result)
else:
metric_1, metric_2, thresholds = result
assert not np.isnan(metric_1).any()
assert not np.isnan(metric_2).any()
assert not np.isnan(thresholds).any()
|
Check that the metric works with different types of `pos_label`.
We can expect `pos_label` to be a bool, an integer, a float, a string.
No error should be raised for those types.
|
test_ranking_metric_pos_label_types
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/tests/test_ranking.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/tests/test_ranking.py
|
BSD-3-Clause
|
def test_roc_curve_with_probablity_estimates(global_random_seed):
"""Check that thresholds do not exceed 1.0 when `y_score` is a probability
estimate.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/26193
"""
rng = np.random.RandomState(global_random_seed)
y_true = rng.randint(0, 2, size=10)
y_score = rng.rand(10)
_, _, thresholds = roc_curve(y_true, y_score)
assert np.isinf(thresholds[0])
|
Check that thresholds do not exceed 1.0 when `y_score` is a probability
estimate.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/26193
|
test_roc_curve_with_probablity_estimates
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/tests/test_ranking.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/tests/test_ranking.py
|
BSD-3-Clause
|
def test_multimetric_scorer_exception_handling(raise_exc):
"""Check that the calling of the `_MultimetricScorer` returns
exception messages in the result dict for the failing scorers
in case of `raise_exc` is `False` and if `raise_exc` is `True`,
then the proper exception is raised.
"""
scorers = {
"failing_1": "neg_mean_squared_log_error",
"non_failing": "neg_median_absolute_error",
"failing_2": "neg_mean_squared_log_error",
}
X, y = make_classification(
n_samples=50, n_features=2, n_redundant=0, random_state=0
)
# neg_mean_squared_log_error fails if y contains values less than or equal to -1
y *= -1
clf = DecisionTreeClassifier().fit(X, y)
scorer_dict = _check_multimetric_scoring(clf, scorers)
multi_scorer = _MultimetricScorer(scorers=scorer_dict, raise_exc=raise_exc)
error_msg = (
"Mean Squared Logarithmic Error cannot be used when "
"targets contain values less than or equal to -1."
)
if raise_exc:
with pytest.raises(ValueError, match=error_msg):
multi_scorer(clf, X, y)
else:
result = multi_scorer(clf, X, y)
exception_message_1 = result["failing_1"]
score = result["non_failing"]
exception_message_2 = result["failing_2"]
assert isinstance(exception_message_1, str) and error_msg in exception_message_1
assert isinstance(score, float)
assert isinstance(exception_message_2, str) and error_msg in exception_message_2
|
Check that the calling of the `_MultimetricScorer` returns
exception messages in the result dict for the failing scorers
in case of `raise_exc` is `False` and if `raise_exc` is `True`,
then the proper exception is raised.
|
test_multimetric_scorer_exception_handling
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/tests/test_score_objects.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/tests/test_score_objects.py
|
BSD-3-Clause
|
def string_labeled_classification_problem():
"""Train a classifier on binary problem with string target.
The classifier is trained on a binary classification problem where the
minority class of interest has a string label that is intentionally not the
greatest class label using the lexicographic order. In this case, "cancer"
is the positive label, and `classifier.classes_` is
`["cancer", "not cancer"]`.
In addition, the dataset is imbalanced to better identify problems when
using non-symmetric performance metrics such as f1-score, average precision
and so on.
Returns
-------
classifier : estimator object
Trained classifier on the binary problem.
X_test : ndarray of shape (n_samples, n_features)
Data to be used as testing set in tests.
y_test : ndarray of shape (n_samples,), dtype=object
Binary target where labels are strings.
y_pred : ndarray of shape (n_samples,), dtype=object
Prediction of `classifier` when predicting for `X_test`.
y_pred_proba : ndarray of shape (n_samples, 2), dtype=np.float64
Probabilities of `classifier` when predicting for `X_test`.
y_pred_decision : ndarray of shape (n_samples,), dtype=np.float64
Decision function values of `classifier` when predicting on `X_test`.
"""
from sklearn.datasets import load_breast_cancer
from sklearn.utils import shuffle
X, y = load_breast_cancer(return_X_y=True)
# create an highly imbalanced classification task
idx_positive = np.flatnonzero(y == 1)
idx_negative = np.flatnonzero(y == 0)
idx_selected = np.hstack([idx_negative, idx_positive[:25]])
X, y = X[idx_selected], y[idx_selected]
X, y = shuffle(X, y, random_state=42)
# only use 2 features to make the problem even harder
X = X[:, :2]
y = np.array(["cancer" if c == 1 else "not cancer" for c in y], dtype=object)
X_train, X_test, y_train, y_test = train_test_split(
X,
y,
stratify=y,
random_state=0,
)
classifier = LogisticRegression().fit(X_train, y_train)
y_pred = classifier.predict(X_test)
y_pred_proba = classifier.predict_proba(X_test)
y_pred_decision = classifier.decision_function(X_test)
return classifier, X_test, y_test, y_pred, y_pred_proba, y_pred_decision
|
Train a classifier on binary problem with string target.
The classifier is trained on a binary classification problem where the
minority class of interest has a string label that is intentionally not the
greatest class label using the lexicographic order. In this case, "cancer"
is the positive label, and `classifier.classes_` is
`["cancer", "not cancer"]`.
In addition, the dataset is imbalanced to better identify problems when
using non-symmetric performance metrics such as f1-score, average precision
and so on.
Returns
-------
classifier : estimator object
Trained classifier on the binary problem.
X_test : ndarray of shape (n_samples, n_features)
Data to be used as testing set in tests.
y_test : ndarray of shape (n_samples,), dtype=object
Binary target where labels are strings.
y_pred : ndarray of shape (n_samples,), dtype=object
Prediction of `classifier` when predicting for `X_test`.
y_pred_proba : ndarray of shape (n_samples, 2), dtype=np.float64
Probabilities of `classifier` when predicting for `X_test`.
y_pred_decision : ndarray of shape (n_samples,), dtype=np.float64
Decision function values of `classifier` when predicting on `X_test`.
|
string_labeled_classification_problem
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/tests/test_score_objects.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/tests/test_score_objects.py
|
BSD-3-Clause
|
def test_scorer_set_score_request_raises(name):
"""Test that set_score_request is only available when feature flag is on."""
# Make sure they expose the routing methods.
scorer = get_scorer(name)
with pytest.raises(RuntimeError, match="This method is only available"):
scorer.set_score_request()
|
Test that set_score_request is only available when feature flag is on.
|
test_scorer_set_score_request_raises
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/tests/test_score_objects.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/tests/test_score_objects.py
|
BSD-3-Clause
|
def test_scorer_metadata_request(name):
"""Testing metadata requests for scorers.
This test checks many small things in a large test, to reduce the
boilerplate required for each section.
"""
# Make sure they expose the routing methods.
scorer = get_scorer(name)
assert hasattr(scorer, "set_score_request")
assert hasattr(scorer, "get_metadata_routing")
# Check that by default no metadata is requested.
assert_request_is_empty(scorer.get_metadata_routing())
weighted_scorer = scorer.set_score_request(sample_weight=True)
# set_score_request should mutate the instance, rather than returning a
# new instance
assert weighted_scorer is scorer
# make sure the scorer doesn't request anything on methods other than
# `score`, and that the requested value on `score` is correct.
assert_request_is_empty(weighted_scorer.get_metadata_routing(), exclude="score")
assert (
weighted_scorer.get_metadata_routing().score.requests["sample_weight"] is True
)
# make sure putting the scorer in a router doesn't request anything by
# default
router = MetadataRouter(owner="test").add(
scorer=get_scorer(name),
method_mapping=MethodMapping().add(caller="score", callee="score"),
)
# make sure `sample_weight` is refused if passed.
with pytest.raises(TypeError, match="got unexpected argument"):
router.validate_metadata(params={"sample_weight": 1}, method="score")
# make sure `sample_weight` is not routed even if passed.
routed_params = router.route_params(params={"sample_weight": 1}, caller="score")
assert not routed_params.scorer.score
# make sure putting weighted_scorer in a router requests sample_weight
router = MetadataRouter(owner="test").add(
scorer=weighted_scorer,
method_mapping=MethodMapping().add(caller="score", callee="score"),
)
router.validate_metadata(params={"sample_weight": 1}, method="score")
routed_params = router.route_params(params={"sample_weight": 1}, caller="score")
assert list(routed_params.scorer.score.keys()) == ["sample_weight"]
|
Testing metadata requests for scorers.
This test checks many small things in a large test, to reduce the
boilerplate required for each section.
|
test_scorer_metadata_request
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/tests/test_score_objects.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/tests/test_score_objects.py
|
BSD-3-Clause
|
def test_metadata_kwarg_conflict():
"""This test makes sure the right warning is raised if the user passes
some metadata both as a constructor to make_scorer, and during __call__.
"""
X, y = make_classification(
n_classes=3, n_informative=3, n_samples=20, random_state=0
)
lr = LogisticRegression().fit(X, y)
scorer = make_scorer(
roc_auc_score,
response_method="predict_proba",
multi_class="ovo",
labels=lr.classes_,
)
with pytest.warns(UserWarning, match="already set as kwargs"):
scorer.set_score_request(labels=True)
with pytest.warns(UserWarning, match="There is an overlap"):
scorer(lr, X, y, labels=lr.classes_)
|
This test makes sure the right warning is raised if the user passes
some metadata both as a constructor to make_scorer, and during __call__.
|
test_metadata_kwarg_conflict
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/tests/test_score_objects.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/tests/test_score_objects.py
|
BSD-3-Clause
|
def test_PassthroughScorer_set_score_request():
"""Test that _PassthroughScorer.set_score_request adds the correct metadata request
on itself and doesn't change its estimator's routing."""
est = LogisticRegression().set_score_request(sample_weight="estimator_weights")
# make a `_PassthroughScorer` with `check_scoring`:
scorer = check_scoring(est, None)
assert (
scorer.get_metadata_routing().score.requests["sample_weight"]
== "estimator_weights"
)
scorer.set_score_request(sample_weight="scorer_weights")
assert (
scorer.get_metadata_routing().score.requests["sample_weight"]
== "scorer_weights"
)
# making sure changing the passthrough object doesn't affect the estimator.
assert (
est.get_metadata_routing().score.requests["sample_weight"]
== "estimator_weights"
)
|
Test that _PassthroughScorer.set_score_request adds the correct metadata request
on itself and doesn't change its estimator's routing.
|
test_PassthroughScorer_set_score_request
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/tests/test_score_objects.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/tests/test_score_objects.py
|
BSD-3-Clause
|
def test_PassthroughScorer_set_score_request_raises_without_routing_enabled():
"""Test that _PassthroughScorer.set_score_request raises if metadata routing is
disabled."""
scorer = check_scoring(LogisticRegression(), None)
msg = "This method is only available when metadata routing is enabled."
with pytest.raises(RuntimeError, match=msg):
scorer.set_score_request(sample_weight="my_weights")
|
Test that _PassthroughScorer.set_score_request raises if metadata routing is
disabled.
|
test_PassthroughScorer_set_score_request_raises_without_routing_enabled
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/tests/test_score_objects.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/tests/test_score_objects.py
|
BSD-3-Clause
|
def test_get_scorer_multilabel_indicator():
"""Check that our scorer deal with multi-label indicator matrices.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/26817
"""
X, Y = make_multilabel_classification(n_samples=72, n_classes=3, random_state=0)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, random_state=0)
estimator = KNeighborsClassifier().fit(X_train, Y_train)
score = get_scorer("average_precision")(estimator, X_test, Y_test)
assert score > 0.8
|
Check that our scorer deal with multi-label indicator matrices.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/26817
|
test_get_scorer_multilabel_indicator
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/tests/test_score_objects.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/tests/test_score_objects.py
|
BSD-3-Clause
|
def test_get_scorer_multimetric(pass_estimator):
"""Check that check_scoring is compatible with multi-metric configurations."""
X, y = make_classification(n_samples=150, n_features=10, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LogisticRegression(random_state=0)
if pass_estimator:
check_scoring_ = check_scoring
else:
check_scoring_ = partial(check_scoring, clf)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
y_proba = clf.predict_proba(X_test)
expected_results = {
"r2": r2_score(y_test, y_pred),
"roc_auc": roc_auc_score(y_test, y_proba[:, 1]),
"accuracy": accuracy_score(y_test, y_pred),
}
for container in [set, list, tuple]:
scoring = check_scoring_(scoring=container(["r2", "roc_auc", "accuracy"]))
result = scoring(clf, X_test, y_test)
assert result.keys() == expected_results.keys()
for name in result:
assert result[name] == pytest.approx(expected_results[name])
def double_accuracy(y_true, y_pred):
return 2 * accuracy_score(y_true, y_pred)
custom_scorer = make_scorer(double_accuracy, response_method="predict")
# dict with different names
dict_scoring = check_scoring_(
scoring={
"my_r2": "r2",
"my_roc_auc": "roc_auc",
"double_accuracy": custom_scorer,
}
)
dict_result = dict_scoring(clf, X_test, y_test)
assert len(dict_result) == 3
assert dict_result["my_r2"] == pytest.approx(expected_results["r2"])
assert dict_result["my_roc_auc"] == pytest.approx(expected_results["roc_auc"])
assert dict_result["double_accuracy"] == pytest.approx(
2 * expected_results["accuracy"]
)
|
Check that check_scoring is compatible with multi-metric configurations.
|
test_get_scorer_multimetric
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/tests/test_score_objects.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/tests/test_score_objects.py
|
BSD-3-Clause
|
def test_check_scoring_multimetric_raise_exc():
"""Test that check_scoring returns error code for a subset of scorers in
multimetric scoring if raise_exc=False and raises otherwise."""
def raising_scorer(estimator, X, y):
raise ValueError("That doesn't work.")
X, y = make_classification(n_samples=150, n_features=10, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LogisticRegression().fit(X_train, y_train)
# "raising_scorer" is raising ValueError and should return an string representation
# of the error of the last scorer:
scoring = {
"accuracy": make_scorer(accuracy_score),
"raising_scorer": raising_scorer,
}
scoring_call = check_scoring(estimator=clf, scoring=scoring, raise_exc=False)
scores = scoring_call(clf, X_test, y_test)
assert "That doesn't work." in scores["raising_scorer"]
# should raise an error
scoring_call = check_scoring(estimator=clf, scoring=scoring, raise_exc=True)
err_msg = "That doesn't work."
with pytest.raises(ValueError, match=err_msg):
scores = scoring_call(clf, X_test, y_test)
|
Test that check_scoring returns error code for a subset of scorers in
multimetric scoring if raise_exc=False and raises otherwise.
|
test_check_scoring_multimetric_raise_exc
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/tests/test_score_objects.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/tests/test_score_objects.py
|
BSD-3-Clause
|
def test_metadata_routing_multimetric_metadata_routing(enable_metadata_routing):
"""Test multimetric scorer works with and without metadata routing enabled when
there is no actual metadata to pass.
Non-regression test for https://github.com/scikit-learn/scikit-learn/issues/28256
"""
X, y = make_classification(n_samples=50, n_features=10, random_state=0)
estimator = EstimatorWithFitAndPredict().fit(X, y)
multimetric_scorer = _MultimetricScorer(scorers={"acc": get_scorer("accuracy")})
with config_context(enable_metadata_routing=enable_metadata_routing):
multimetric_scorer(estimator, X, y)
|
Test multimetric scorer works with and without metadata routing enabled when
there is no actual metadata to pass.
Non-regression test for https://github.com/scikit-learn/scikit-learn/issues/28256
|
test_metadata_routing_multimetric_metadata_routing
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/tests/test_score_objects.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/tests/test_score_objects.py
|
BSD-3-Clause
|
def test_curve_scorer():
"""Check the behaviour of the `_CurveScorer` class."""
X, y = make_classification(random_state=0)
estimator = LogisticRegression().fit(X, y)
curve_scorer = _CurveScorer(
balanced_accuracy_score,
sign=1,
response_method="predict_proba",
thresholds=10,
kwargs={},
)
scores, thresholds = curve_scorer(estimator, X, y)
assert thresholds.shape == scores.shape
# check that the thresholds are probabilities with extreme values close to 0 and 1.
# they are not exactly 0 and 1 because they are the extremum of the
# `estimator.predict_proba(X)` values.
assert 0 <= thresholds.min() <= 0.01
assert 0.99 <= thresholds.max() <= 1
# balanced accuracy should be between 0.5 and 1 when it is not adjusted
assert 0.5 <= scores.min() <= 1
# check that passing kwargs to the scorer works
curve_scorer = _CurveScorer(
balanced_accuracy_score,
sign=1,
response_method="predict_proba",
thresholds=10,
kwargs={"adjusted": True},
)
scores, thresholds = curve_scorer(estimator, X, y)
# balanced accuracy should be between 0.5 and 1 when it is not adjusted
assert 0 <= scores.min() <= 0.5
# check that we can inverse the sign of the score when dealing with `neg_*` scorer
curve_scorer = _CurveScorer(
balanced_accuracy_score,
sign=-1,
response_method="predict_proba",
thresholds=10,
kwargs={"adjusted": True},
)
scores, thresholds = curve_scorer(estimator, X, y)
assert all(scores <= 0)
|
Check the behaviour of the `_CurveScorer` class.
|
test_curve_scorer
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/tests/test_score_objects.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/tests/test_score_objects.py
|
BSD-3-Clause
|
def test_curve_scorer_pos_label(global_random_seed):
"""Check that we propagate properly the `pos_label` parameter to the scorer."""
n_samples = 30
X, y = make_classification(
n_samples=n_samples, weights=[0.9, 0.1], random_state=global_random_seed
)
estimator = LogisticRegression().fit(X, y)
curve_scorer = _CurveScorer(
recall_score,
sign=1,
response_method="predict_proba",
thresholds=10,
kwargs={"pos_label": 1},
)
scores_pos_label_1, thresholds_pos_label_1 = curve_scorer(estimator, X, y)
curve_scorer = _CurveScorer(
recall_score,
sign=1,
response_method="predict_proba",
thresholds=10,
kwargs={"pos_label": 0},
)
scores_pos_label_0, thresholds_pos_label_0 = curve_scorer(estimator, X, y)
# Since `pos_label` is forwarded to the curve_scorer, the thresholds are not equal.
assert not (thresholds_pos_label_1 == thresholds_pos_label_0).all()
# The min-max range for the thresholds is defined by the probabilities of the
# `pos_label` class (the column of `predict_proba`).
y_pred = estimator.predict_proba(X)
assert thresholds_pos_label_0.min() == pytest.approx(y_pred.min(axis=0)[0])
assert thresholds_pos_label_0.max() == pytest.approx(y_pred.max(axis=0)[0])
assert thresholds_pos_label_1.min() == pytest.approx(y_pred.min(axis=0)[1])
assert thresholds_pos_label_1.max() == pytest.approx(y_pred.max(axis=0)[1])
# The recall cannot be negative and `pos_label=1` should have a higher recall
# since there is less samples to be considered.
assert 0.0 < scores_pos_label_0.min() < scores_pos_label_1.min()
assert scores_pos_label_0.max() == pytest.approx(1.0)
assert scores_pos_label_1.max() == pytest.approx(1.0)
|
Check that we propagate properly the `pos_label` parameter to the scorer.
|
test_curve_scorer_pos_label
|
python
|
scikit-learn/scikit-learn
|
sklearn/metrics/tests/test_score_objects.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/tests/test_score_objects.py
|
BSD-3-Clause
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.