code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
---|---|---|---|---|---|---|---|
def _compute_log_det_cholesky(matrix_chol, covariance_type, n_features):
"""Compute the log-det of the cholesky decomposition of matrices.
Parameters
----------
matrix_chol : array-like
Cholesky decompositions of the matrices.
'full' : shape of (n_components, n_features, n_features)
'tied' : shape of (n_features, n_features)
'diag' : shape of (n_components, n_features)
'spherical' : shape of (n_components,)
covariance_type : {'full', 'tied', 'diag', 'spherical'}
n_features : int
Number of features.
Returns
-------
log_det_precision_chol : array-like of shape (n_components,)
The determinant of the precision matrix for each component.
"""
if covariance_type == "full":
n_components, _, _ = matrix_chol.shape
log_det_chol = np.sum(
np.log(matrix_chol.reshape(n_components, -1)[:, :: n_features + 1]), axis=1
)
elif covariance_type == "tied":
log_det_chol = np.sum(np.log(np.diag(matrix_chol)))
elif covariance_type == "diag":
log_det_chol = np.sum(np.log(matrix_chol), axis=1)
else:
log_det_chol = n_features * np.log(matrix_chol)
return log_det_chol
|
Compute the log-det of the cholesky decomposition of matrices.
Parameters
----------
matrix_chol : array-like
Cholesky decompositions of the matrices.
'full' : shape of (n_components, n_features, n_features)
'tied' : shape of (n_features, n_features)
'diag' : shape of (n_components, n_features)
'spherical' : shape of (n_components,)
covariance_type : {'full', 'tied', 'diag', 'spherical'}
n_features : int
Number of features.
Returns
-------
log_det_precision_chol : array-like of shape (n_components,)
The determinant of the precision matrix for each component.
|
_compute_log_det_cholesky
|
python
|
scikit-learn/scikit-learn
|
sklearn/mixture/_gaussian_mixture.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/mixture/_gaussian_mixture.py
|
BSD-3-Clause
|
def _estimate_log_gaussian_prob(X, means, precisions_chol, covariance_type):
"""Estimate the log Gaussian probability.
Parameters
----------
X : array-like of shape (n_samples, n_features)
means : array-like of shape (n_components, n_features)
precisions_chol : array-like
Cholesky decompositions of the precision matrices.
'full' : shape of (n_components, n_features, n_features)
'tied' : shape of (n_features, n_features)
'diag' : shape of (n_components, n_features)
'spherical' : shape of (n_components,)
covariance_type : {'full', 'tied', 'diag', 'spherical'}
Returns
-------
log_prob : array, shape (n_samples, n_components)
"""
n_samples, n_features = X.shape
n_components, _ = means.shape
# The determinant of the precision matrix from the Cholesky decomposition
# corresponds to the negative half of the determinant of the full precision
# matrix.
# In short: det(precision_chol) = - det(precision) / 2
log_det = _compute_log_det_cholesky(precisions_chol, covariance_type, n_features)
if covariance_type == "full":
log_prob = np.empty((n_samples, n_components), dtype=X.dtype)
for k, (mu, prec_chol) in enumerate(zip(means, precisions_chol)):
y = np.dot(X, prec_chol) - np.dot(mu, prec_chol)
log_prob[:, k] = np.sum(np.square(y), axis=1)
elif covariance_type == "tied":
log_prob = np.empty((n_samples, n_components), dtype=X.dtype)
for k, mu in enumerate(means):
y = np.dot(X, precisions_chol) - np.dot(mu, precisions_chol)
log_prob[:, k] = np.sum(np.square(y), axis=1)
elif covariance_type == "diag":
precisions = precisions_chol**2
log_prob = (
np.sum((means**2 * precisions), 1)
- 2.0 * np.dot(X, (means * precisions).T)
+ np.dot(X**2, precisions.T)
)
elif covariance_type == "spherical":
precisions = precisions_chol**2
log_prob = (
np.sum(means**2, 1) * precisions
- 2 * np.dot(X, means.T * precisions)
+ np.outer(row_norms(X, squared=True), precisions)
)
# Since we are using the precision of the Cholesky decomposition,
# `- 0.5 * log_det_precision` becomes `+ log_det_precision_chol`
return -0.5 * (n_features * np.log(2 * np.pi).astype(X.dtype) + log_prob) + log_det
|
Estimate the log Gaussian probability.
Parameters
----------
X : array-like of shape (n_samples, n_features)
means : array-like of shape (n_components, n_features)
precisions_chol : array-like
Cholesky decompositions of the precision matrices.
'full' : shape of (n_components, n_features, n_features)
'tied' : shape of (n_features, n_features)
'diag' : shape of (n_components, n_features)
'spherical' : shape of (n_components,)
covariance_type : {'full', 'tied', 'diag', 'spherical'}
Returns
-------
log_prob : array, shape (n_samples, n_components)
|
_estimate_log_gaussian_prob
|
python
|
scikit-learn/scikit-learn
|
sklearn/mixture/_gaussian_mixture.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/mixture/_gaussian_mixture.py
|
BSD-3-Clause
|
def _check_parameters(self, X):
"""Check the Gaussian mixture parameters are well defined."""
_, n_features = X.shape
if self.weights_init is not None:
self.weights_init = _check_weights(self.weights_init, self.n_components)
if self.means_init is not None:
self.means_init = _check_means(
self.means_init, self.n_components, n_features
)
if self.precisions_init is not None:
self.precisions_init = _check_precisions(
self.precisions_init,
self.covariance_type,
self.n_components,
n_features,
)
|
Check the Gaussian mixture parameters are well defined.
|
_check_parameters
|
python
|
scikit-learn/scikit-learn
|
sklearn/mixture/_gaussian_mixture.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/mixture/_gaussian_mixture.py
|
BSD-3-Clause
|
def _initialize(self, X, resp):
"""Initialization of the Gaussian mixture parameters.
Parameters
----------
X : array-like of shape (n_samples, n_features)
resp : array-like of shape (n_samples, n_components)
"""
n_samples, _ = X.shape
weights, means, covariances = None, None, None
if resp is not None:
weights, means, covariances = _estimate_gaussian_parameters(
X, resp, self.reg_covar, self.covariance_type
)
if self.weights_init is None:
weights /= n_samples
self.weights_ = weights if self.weights_init is None else self.weights_init
self.means_ = means if self.means_init is None else self.means_init
if self.precisions_init is None:
self.covariances_ = covariances
self.precisions_cholesky_ = _compute_precision_cholesky(
covariances, self.covariance_type
)
else:
self.precisions_cholesky_ = _compute_precision_cholesky_from_precisions(
self.precisions_init, self.covariance_type
)
|
Initialization of the Gaussian mixture parameters.
Parameters
----------
X : array-like of shape (n_samples, n_features)
resp : array-like of shape (n_samples, n_components)
|
_initialize
|
python
|
scikit-learn/scikit-learn
|
sklearn/mixture/_gaussian_mixture.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/mixture/_gaussian_mixture.py
|
BSD-3-Clause
|
def _m_step(self, X, log_resp):
"""M step.
Parameters
----------
X : array-like of shape (n_samples, n_features)
log_resp : array-like of shape (n_samples, n_components)
Logarithm of the posterior probabilities (or responsibilities) of
the point of each sample in X.
"""
self.weights_, self.means_, self.covariances_ = _estimate_gaussian_parameters(
X, np.exp(log_resp), self.reg_covar, self.covariance_type
)
self.weights_ /= self.weights_.sum()
self.precisions_cholesky_ = _compute_precision_cholesky(
self.covariances_, self.covariance_type
)
|
M step.
Parameters
----------
X : array-like of shape (n_samples, n_features)
log_resp : array-like of shape (n_samples, n_components)
Logarithm of the posterior probabilities (or responsibilities) of
the point of each sample in X.
|
_m_step
|
python
|
scikit-learn/scikit-learn
|
sklearn/mixture/_gaussian_mixture.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/mixture/_gaussian_mixture.py
|
BSD-3-Clause
|
def _n_parameters(self):
"""Return the number of free parameters in the model."""
_, n_features = self.means_.shape
if self.covariance_type == "full":
cov_params = self.n_components * n_features * (n_features + 1) / 2.0
elif self.covariance_type == "diag":
cov_params = self.n_components * n_features
elif self.covariance_type == "tied":
cov_params = n_features * (n_features + 1) / 2.0
elif self.covariance_type == "spherical":
cov_params = self.n_components
mean_params = n_features * self.n_components
return int(cov_params + mean_params + self.n_components - 1)
|
Return the number of free parameters in the model.
|
_n_parameters
|
python
|
scikit-learn/scikit-learn
|
sklearn/mixture/_gaussian_mixture.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/mixture/_gaussian_mixture.py
|
BSD-3-Clause
|
def test_gaussian_mixture_setting_best_params():
"""`GaussianMixture`'s best_parameters, `n_iter_` and `lower_bound_`
must be set appropriately in the case of divergence.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/18216
"""
rnd = np.random.RandomState(0)
n_samples = 30
X = rnd.uniform(size=(n_samples, 3))
# following initialization parameters were found to lead to divergence
means_init = np.array(
[
[0.670637869618158, 0.21038256107384043, 0.12892629765485303],
[0.09394051075844147, 0.5759464955561779, 0.929296197576212],
[0.5033230372781258, 0.9569852381759425, 0.08654043447295741],
[0.18578301420435747, 0.5531158970919143, 0.19388943970532435],
[0.4548589928173794, 0.35182513658825276, 0.568146063202464],
[0.609279894978321, 0.7929063819678847, 0.9620097270828052],
]
)
precisions_init = np.array(
[
999999.999604483,
999999.9990869573,
553.7603944542167,
204.78596008931834,
15.867423501783637,
85.4595728389735,
]
)
weights_init = [
0.03333333333333341,
0.03333333333333341,
0.06666666666666674,
0.06666666666666674,
0.7000000000000001,
0.10000000000000007,
]
gmm = GaussianMixture(
covariance_type="spherical",
reg_covar=0,
means_init=means_init,
weights_init=weights_init,
random_state=rnd,
n_components=len(weights_init),
precisions_init=precisions_init,
max_iter=1,
)
# ensure that no error is thrown during fit
gmm.fit(X)
# check that the fit did not converge
assert not gmm.converged_
# check that parameters are set for gmm
for attr in [
"weights_",
"means_",
"covariances_",
"precisions_cholesky_",
"n_iter_",
"lower_bound_",
"lower_bounds_",
]:
assert hasattr(gmm, attr)
|
`GaussianMixture`'s best_parameters, `n_iter_` and `lower_bound_`
must be set appropriately in the case of divergence.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/18216
|
test_gaussian_mixture_setting_best_params
|
python
|
scikit-learn/scikit-learn
|
sklearn/mixture/tests/test_gaussian_mixture.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/mixture/tests/test_gaussian_mixture.py
|
BSD-3-Clause
|
def test_gaussian_mixture_precisions_init_diag(global_dtype):
"""Check that we properly initialize `precision_cholesky_` when we manually
provide the precision matrix.
In this regard, we check the consistency between estimating the precision
matrix and providing the same precision matrix as initialization. It should
lead to the same results with the same number of iterations.
If the initialization is wrong then the number of iterations will increase.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/16944
"""
# generate a toy dataset
n_samples = 300
rng = np.random.RandomState(0)
shifted_gaussian = rng.randn(n_samples, 2) + np.array([20, 20])
C = np.array([[0.0, -0.7], [3.5, 0.7]])
stretched_gaussian = np.dot(rng.randn(n_samples, 2), C)
X = np.vstack([shifted_gaussian, stretched_gaussian]).astype(global_dtype)
# common parameters to check the consistency of precision initialization
n_components, covariance_type, reg_covar, random_state = 2, "diag", 1e-6, 0
# execute the manual initialization to compute the precision matrix:
# - run KMeans to have an initial guess
# - estimate the covariance
# - compute the precision matrix from the estimated covariance
resp = np.zeros((X.shape[0], n_components)).astype(global_dtype)
label = (
KMeans(n_clusters=n_components, n_init=1, random_state=random_state)
.fit(X)
.labels_
)
resp[np.arange(X.shape[0]), label] = 1
_, _, covariance = _estimate_gaussian_parameters(
X, resp, reg_covar=reg_covar, covariance_type=covariance_type
)
assert covariance.dtype == global_dtype
precisions_init = 1 / covariance
gm_with_init = GaussianMixture(
n_components=n_components,
covariance_type=covariance_type,
reg_covar=reg_covar,
precisions_init=precisions_init,
random_state=random_state,
).fit(X)
assert gm_with_init.means_.dtype == global_dtype
assert gm_with_init.covariances_.dtype == global_dtype
assert gm_with_init.precisions_cholesky_.dtype == global_dtype
gm_without_init = GaussianMixture(
n_components=n_components,
covariance_type=covariance_type,
reg_covar=reg_covar,
random_state=random_state,
).fit(X)
assert gm_without_init.means_.dtype == global_dtype
assert gm_without_init.covariances_.dtype == global_dtype
assert gm_without_init.precisions_cholesky_.dtype == global_dtype
assert gm_without_init.n_iter_ == gm_with_init.n_iter_
assert_allclose(
gm_with_init.precisions_cholesky_, gm_without_init.precisions_cholesky_
)
|
Check that we properly initialize `precision_cholesky_` when we manually
provide the precision matrix.
In this regard, we check the consistency between estimating the precision
matrix and providing the same precision matrix as initialization. It should
lead to the same results with the same number of iterations.
If the initialization is wrong then the number of iterations will increase.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/16944
|
test_gaussian_mixture_precisions_init_diag
|
python
|
scikit-learn/scikit-learn
|
sklearn/mixture/tests/test_gaussian_mixture.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/mixture/tests/test_gaussian_mixture.py
|
BSD-3-Clause
|
def _calculate_precisions(X, resp, covariance_type):
"""Calculate precision matrix of X and its Cholesky decomposition
for the given covariance type.
"""
reg_covar = 1e-6
weights, means, covariances = _estimate_gaussian_parameters(
X, resp, reg_covar, covariance_type
)
precisions_cholesky = _compute_precision_cholesky(covariances, covariance_type)
_, n_components = resp.shape
# Instantiate a `GaussianMixture` model in order to use its
# `_set_parameters` method to return the `precisions_` and
# `precisions_cholesky_` from matching the `covariance_type`
# provided.
gmm = GaussianMixture(n_components=n_components, covariance_type=covariance_type)
params = (weights, means, covariances, precisions_cholesky)
gmm._set_parameters(params)
return gmm.precisions_, gmm.precisions_cholesky_
|
Calculate precision matrix of X and its Cholesky decomposition
for the given covariance type.
|
_calculate_precisions
|
python
|
scikit-learn/scikit-learn
|
sklearn/mixture/tests/test_gaussian_mixture.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/mixture/tests/test_gaussian_mixture.py
|
BSD-3-Clause
|
def test_gaussian_mixture_single_component_stable():
"""
Non-regression test for #23032 ensuring 1-component GM works on only a
few samples.
"""
rng = np.random.RandomState(0)
X = rng.multivariate_normal(np.zeros(2), np.identity(2), size=3)
gm = GaussianMixture(n_components=1)
gm.fit(X).sample()
|
Non-regression test for #23032 ensuring 1-component GM works on only a
few samples.
|
test_gaussian_mixture_single_component_stable
|
python
|
scikit-learn/scikit-learn
|
sklearn/mixture/tests/test_gaussian_mixture.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/mixture/tests/test_gaussian_mixture.py
|
BSD-3-Clause
|
def test_gaussian_mixture_all_init_does_not_estimate_gaussian_parameters(
monkeypatch,
global_random_seed,
):
"""When all init parameters are provided, the Gaussian parameters
are not estimated.
Non-regression test for gh-26015.
"""
mock = Mock(side_effect=_estimate_gaussian_parameters)
monkeypatch.setattr(
sklearn.mixture._gaussian_mixture, "_estimate_gaussian_parameters", mock
)
rng = np.random.RandomState(global_random_seed)
rand_data = RandomData(rng)
gm = GaussianMixture(
n_components=rand_data.n_components,
weights_init=rand_data.weights,
means_init=rand_data.means,
precisions_init=rand_data.precisions["full"],
random_state=rng,
)
gm.fit(rand_data.X["full"])
# The initial gaussian parameters are not estimated. They are estimated for every
# m_step.
assert mock.call_count == gm.n_iter_
|
When all init parameters are provided, the Gaussian parameters
are not estimated.
Non-regression test for gh-26015.
|
test_gaussian_mixture_all_init_does_not_estimate_gaussian_parameters
|
python
|
scikit-learn/scikit-learn
|
sklearn/mixture/tests/test_gaussian_mixture.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/mixture/tests/test_gaussian_mixture.py
|
BSD-3-Clause
|
def fit(self, X, y, **params):
"""Fit the classifier.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,)
Target values.
**params : dict
Parameters to pass to the `fit` method of the underlying
classifier.
Returns
-------
self : object
Returns an instance of self.
"""
_raise_for_params(params, self, None)
X, y = indexable(X, y)
y_type = type_of_target(y, input_name="y")
if y_type != "binary":
raise ValueError(
f"Only binary classification is supported. Unknown label type: {y_type}"
)
self._fit(X, y, **params)
if hasattr(self.estimator_, "n_features_in_"):
self.n_features_in_ = self.estimator_.n_features_in_
if hasattr(self.estimator_, "feature_names_in_"):
self.feature_names_in_ = self.estimator_.feature_names_in_
return self
|
Fit the classifier.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,)
Target values.
**params : dict
Parameters to pass to the `fit` method of the underlying
classifier.
Returns
-------
self : object
Returns an instance of self.
|
fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/_classification_threshold.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/_classification_threshold.py
|
BSD-3-Clause
|
def predict_proba(self, X):
"""Predict class probabilities for `X` using the fitted estimator.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of features.
Returns
-------
probabilities : ndarray of shape (n_samples, n_classes)
The class probabilities of the input samples.
"""
_check_is_fitted(self)
estimator = getattr(self, "estimator_", self.estimator)
return estimator.predict_proba(X)
|
Predict class probabilities for `X` using the fitted estimator.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of features.
Returns
-------
probabilities : ndarray of shape (n_samples, n_classes)
The class probabilities of the input samples.
|
predict_proba
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/_classification_threshold.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/_classification_threshold.py
|
BSD-3-Clause
|
def predict_log_proba(self, X):
"""Predict logarithm class probabilities for `X` using the fitted estimator.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of features.
Returns
-------
log_probabilities : ndarray of shape (n_samples, n_classes)
The logarithm class probabilities of the input samples.
"""
_check_is_fitted(self)
estimator = getattr(self, "estimator_", self.estimator)
return estimator.predict_log_proba(X)
|
Predict logarithm class probabilities for `X` using the fitted estimator.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of features.
Returns
-------
log_probabilities : ndarray of shape (n_samples, n_classes)
The logarithm class probabilities of the input samples.
|
predict_log_proba
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/_classification_threshold.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/_classification_threshold.py
|
BSD-3-Clause
|
def decision_function(self, X):
"""Decision function for samples in `X` using the fitted estimator.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of features.
Returns
-------
decisions : ndarray of shape (n_samples,)
The decision function computed the fitted estimator.
"""
_check_is_fitted(self)
estimator = getattr(self, "estimator_", self.estimator)
return estimator.decision_function(X)
|
Decision function for samples in `X` using the fitted estimator.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of features.
Returns
-------
decisions : ndarray of shape (n_samples,)
The decision function computed the fitted estimator.
|
decision_function
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/_classification_threshold.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/_classification_threshold.py
|
BSD-3-Clause
|
def _fit(self, X, y, **params):
"""Fit the classifier.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,)
Target values.
**params : dict
Parameters to pass to the `fit` method of the underlying
classifier.
Returns
-------
self : object
Returns an instance of self.
"""
routed_params = process_routing(self, "fit", **params)
self.estimator_ = clone(self.estimator).fit(X, y, **routed_params.estimator.fit)
return self
|
Fit the classifier.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,)
Target values.
**params : dict
Parameters to pass to the `fit` method of the underlying
classifier.
Returns
-------
self : object
Returns an instance of self.
|
_fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/_classification_threshold.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/_classification_threshold.py
|
BSD-3-Clause
|
def predict(self, X):
"""Predict the target of new samples.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The samples, as accepted by `estimator.predict`.
Returns
-------
class_labels : ndarray of shape (n_samples,)
The predicted class.
"""
_check_is_fitted(self)
estimator = getattr(self, "estimator_", self.estimator)
y_score, _, response_method_used = _get_response_values_binary(
estimator,
X,
self._get_response_method(),
pos_label=self.pos_label,
return_response_method_used=True,
)
if self.threshold == "auto":
decision_threshold = 0.5 if response_method_used == "predict_proba" else 0.0
else:
decision_threshold = self.threshold
return _threshold_scores_to_class_labels(
y_score, decision_threshold, self.classes_, self.pos_label
)
|
Predict the target of new samples.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The samples, as accepted by `estimator.predict`.
Returns
-------
class_labels : ndarray of shape (n_samples,)
The predicted class.
|
predict
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/_classification_threshold.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/_classification_threshold.py
|
BSD-3-Clause
|
def get_metadata_routing(self):
"""Get metadata routing of this object.
Please check :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
Returns
-------
routing : MetadataRouter
A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating
routing information.
"""
router = MetadataRouter(owner=self.__class__.__name__).add(
estimator=self.estimator,
method_mapping=MethodMapping().add(callee="fit", caller="fit"),
)
return router
|
Get metadata routing of this object.
Please check :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
Returns
-------
routing : MetadataRouter
A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating
routing information.
|
get_metadata_routing
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/_classification_threshold.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/_classification_threshold.py
|
BSD-3-Clause
|
def _fit_and_score_over_thresholds(
classifier,
X,
y,
*,
fit_params,
train_idx,
val_idx,
curve_scorer,
score_params,
):
"""Fit a classifier and compute the scores for different decision thresholds.
Parameters
----------
classifier : estimator instance
The classifier to fit and use for scoring. If `classifier` is already fitted,
it will be used as is.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The entire dataset.
y : array-like of shape (n_samples,)
The entire target vector.
fit_params : dict
Parameters to pass to the `fit` method of the underlying classifier.
train_idx : ndarray of shape (n_train_samples,) or None
The indices of the training set. If `None`, `classifier` is expected to be
already fitted.
val_idx : ndarray of shape (n_val_samples,)
The indices of the validation set used to score `classifier`. If `train_idx`,
the entire set will be used.
curve_scorer : scorer instance
The scorer taking `classifier` and the validation set as input and outputting
decision thresholds and scores as a curve. Note that this is different from
the usual scorer that outputs a single score value as `curve_scorer`
outputs a single score value for each threshold.
score_params : dict
Parameters to pass to the `score` method of the underlying scorer.
Returns
-------
scores : ndarray of shape (thresholds,) or tuple of such arrays
The scores computed for each decision threshold. When TPR/TNR or precision/
recall are computed, `scores` is a tuple of two arrays.
potential_thresholds : ndarray of shape (thresholds,)
The decision thresholds used to compute the scores. They are returned in
ascending order.
"""
if train_idx is not None:
X_train, X_val = _safe_indexing(X, train_idx), _safe_indexing(X, val_idx)
y_train, y_val = _safe_indexing(y, train_idx), _safe_indexing(y, val_idx)
fit_params_train = _check_method_params(X, fit_params, indices=train_idx)
score_params_val = _check_method_params(X, score_params, indices=val_idx)
classifier.fit(X_train, y_train, **fit_params_train)
else: # prefit estimator, only a validation set is provided
X_val, y_val, score_params_val = X, y, score_params
return curve_scorer(classifier, X_val, y_val, **score_params_val)
|
Fit a classifier and compute the scores for different decision thresholds.
Parameters
----------
classifier : estimator instance
The classifier to fit and use for scoring. If `classifier` is already fitted,
it will be used as is.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The entire dataset.
y : array-like of shape (n_samples,)
The entire target vector.
fit_params : dict
Parameters to pass to the `fit` method of the underlying classifier.
train_idx : ndarray of shape (n_train_samples,) or None
The indices of the training set. If `None`, `classifier` is expected to be
already fitted.
val_idx : ndarray of shape (n_val_samples,)
The indices of the validation set used to score `classifier`. If `train_idx`,
the entire set will be used.
curve_scorer : scorer instance
The scorer taking `classifier` and the validation set as input and outputting
decision thresholds and scores as a curve. Note that this is different from
the usual scorer that outputs a single score value as `curve_scorer`
outputs a single score value for each threshold.
score_params : dict
Parameters to pass to the `score` method of the underlying scorer.
Returns
-------
scores : ndarray of shape (thresholds,) or tuple of such arrays
The scores computed for each decision threshold. When TPR/TNR or precision/
recall are computed, `scores` is a tuple of two arrays.
potential_thresholds : ndarray of shape (thresholds,)
The decision thresholds used to compute the scores. They are returned in
ascending order.
|
_fit_and_score_over_thresholds
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/_classification_threshold.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/_classification_threshold.py
|
BSD-3-Clause
|
def _mean_interpolated_score(target_thresholds, cv_thresholds, cv_scores):
"""Compute the mean interpolated score across folds by defining common thresholds.
Parameters
----------
target_thresholds : ndarray of shape (thresholds,)
The thresholds to use to compute the mean score.
cv_thresholds : ndarray of shape (n_folds, thresholds_fold)
The thresholds used to compute the scores for each fold.
cv_scores : ndarray of shape (n_folds, thresholds_fold)
The scores computed for each threshold for each fold.
Returns
-------
mean_score : ndarray of shape (thresholds,)
The mean score across all folds for each target threshold.
"""
return np.mean(
[
np.interp(target_thresholds, split_thresholds, split_score)
for split_thresholds, split_score in zip(cv_thresholds, cv_scores)
],
axis=0,
)
|
Compute the mean interpolated score across folds by defining common thresholds.
Parameters
----------
target_thresholds : ndarray of shape (thresholds,)
The thresholds to use to compute the mean score.
cv_thresholds : ndarray of shape (n_folds, thresholds_fold)
The thresholds used to compute the scores for each fold.
cv_scores : ndarray of shape (n_folds, thresholds_fold)
The scores computed for each threshold for each fold.
Returns
-------
mean_score : ndarray of shape (thresholds,)
The mean score across all folds for each target threshold.
|
_mean_interpolated_score
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/_classification_threshold.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/_classification_threshold.py
|
BSD-3-Clause
|
def _fit(self, X, y, **params):
"""Fit the classifier and post-tune the decision threshold.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,)
Target values.
**params : dict
Parameters to pass to the `fit` method of the underlying
classifier and to the `scoring` scorer.
Returns
-------
self : object
Returns an instance of self.
"""
if isinstance(self.cv, Real) and 0 < self.cv < 1:
cv = StratifiedShuffleSplit(
n_splits=1, test_size=self.cv, random_state=self.random_state
)
elif self.cv == "prefit":
if self.refit is True:
raise ValueError("When cv='prefit', refit cannot be True.")
try:
check_is_fitted(self.estimator, "classes_")
except NotFittedError as exc:
raise NotFittedError(
"""When cv='prefit', `estimator` must be fitted."""
) from exc
cv = self.cv
else:
cv = check_cv(self.cv, y=y, classifier=True)
if self.refit is False and cv.get_n_splits() > 1:
raise ValueError("When cv has several folds, refit cannot be False.")
routed_params = process_routing(self, "fit", **params)
self._curve_scorer = self._get_curve_scorer()
# in the following block, we:
# - define the final classifier `self.estimator_` and train it if necessary
# - define `classifier` to be used to post-tune the decision threshold
# - define `split` to be used to fit/score `classifier`
if cv == "prefit":
self.estimator_ = self.estimator
classifier = self.estimator_
splits = [(None, range(_num_samples(X)))]
else:
self.estimator_ = clone(self.estimator)
classifier = clone(self.estimator)
splits = cv.split(X, y, **routed_params.splitter.split)
if self.refit:
# train on the whole dataset
X_train, y_train, fit_params_train = X, y, routed_params.estimator.fit
else:
# single split cross-validation
train_idx, _ = next(cv.split(X, y, **routed_params.splitter.split))
X_train = _safe_indexing(X, train_idx)
y_train = _safe_indexing(y, train_idx)
fit_params_train = _check_method_params(
X, routed_params.estimator.fit, indices=train_idx
)
self.estimator_.fit(X_train, y_train, **fit_params_train)
cv_scores, cv_thresholds = zip(
*Parallel(n_jobs=self.n_jobs)(
delayed(_fit_and_score_over_thresholds)(
clone(classifier) if cv != "prefit" else classifier,
X,
y,
fit_params=routed_params.estimator.fit,
train_idx=train_idx,
val_idx=val_idx,
curve_scorer=self._curve_scorer,
score_params=routed_params.scorer.score,
)
for train_idx, val_idx in splits
)
)
if any(np.isclose(th[0], th[-1]) for th in cv_thresholds):
raise ValueError(
"The provided estimator makes constant predictions. Therefore, it is "
"impossible to optimize the decision threshold."
)
# find the global min and max thresholds across all folds
min_threshold = min(
split_thresholds.min() for split_thresholds in cv_thresholds
)
max_threshold = max(
split_thresholds.max() for split_thresholds in cv_thresholds
)
if isinstance(self.thresholds, Integral):
decision_thresholds = np.linspace(
min_threshold, max_threshold, num=self.thresholds
)
else:
decision_thresholds = np.asarray(self.thresholds)
objective_scores = _mean_interpolated_score(
decision_thresholds, cv_thresholds, cv_scores
)
best_idx = objective_scores.argmax()
self.best_score_ = objective_scores[best_idx]
self.best_threshold_ = decision_thresholds[best_idx]
if self.store_cv_results:
self.cv_results_ = {
"thresholds": decision_thresholds,
"scores": objective_scores,
}
return self
|
Fit the classifier and post-tune the decision threshold.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,)
Target values.
**params : dict
Parameters to pass to the `fit` method of the underlying
classifier and to the `scoring` scorer.
Returns
-------
self : object
Returns an instance of self.
|
_fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/_classification_threshold.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/_classification_threshold.py
|
BSD-3-Clause
|
def predict(self, X):
"""Predict the target of new samples.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The samples, as accepted by `estimator.predict`.
Returns
-------
class_labels : ndarray of shape (n_samples,)
The predicted class.
"""
check_is_fitted(self, "estimator_")
pos_label = self._curve_scorer._get_pos_label()
y_score, _ = _get_response_values_binary(
self.estimator_,
X,
self._get_response_method(),
pos_label=pos_label,
)
return _threshold_scores_to_class_labels(
y_score, self.best_threshold_, self.classes_, pos_label
)
|
Predict the target of new samples.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The samples, as accepted by `estimator.predict`.
Returns
-------
class_labels : ndarray of shape (n_samples,)
The predicted class.
|
predict
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/_classification_threshold.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/_classification_threshold.py
|
BSD-3-Clause
|
def get_metadata_routing(self):
"""Get metadata routing of this object.
Please check :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
Returns
-------
routing : MetadataRouter
A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating
routing information.
"""
router = (
MetadataRouter(owner=self.__class__.__name__)
.add(
estimator=self.estimator,
method_mapping=MethodMapping().add(callee="fit", caller="fit"),
)
.add(
splitter=self.cv,
method_mapping=MethodMapping().add(callee="split", caller="fit"),
)
.add(
scorer=self._get_curve_scorer(),
method_mapping=MethodMapping().add(callee="score", caller="fit"),
)
)
return router
|
Get metadata routing of this object.
Please check :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
Returns
-------
routing : MetadataRouter
A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating
routing information.
|
get_metadata_routing
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/_classification_threshold.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/_classification_threshold.py
|
BSD-3-Clause
|
def _get_curve_scorer(self):
"""Get the curve scorer based on the objective metric used."""
scoring = check_scoring(self.estimator, scoring=self.scoring)
curve_scorer = _CurveScorer.from_scorer(
scoring, self._get_response_method(), self.thresholds
)
return curve_scorer
|
Get the curve scorer based on the objective metric used.
|
_get_curve_scorer
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/_classification_threshold.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/_classification_threshold.py
|
BSD-3-Clause
|
def plot(
self,
ax=None,
*,
negate_score=False,
score_name=None,
score_type="both",
std_display_style="fill_between",
line_kw=None,
fill_between_kw=None,
errorbar_kw=None,
):
"""Plot visualization.
Parameters
----------
ax : matplotlib Axes, default=None
Axes object to plot on. If `None`, a new figure and axes is
created.
negate_score : bool, default=False
Whether or not to negate the scores obtained through
:func:`~sklearn.model_selection.learning_curve`. This is
particularly useful when using the error denoted by `neg_*` in
`scikit-learn`.
score_name : str, default=None
The name of the score used to decorate the y-axis of the plot. It will
override the name inferred from the `scoring` parameter. If `score` is
`None`, we use `"Score"` if `negate_score` is `False` and `"Negative score"`
otherwise. If `scoring` is a string or a callable, we infer the name. We
replace `_` by spaces and capitalize the first letter. We remove `neg_` and
replace it by `"Negative"` if `negate_score` is
`False` or just remove it otherwise.
score_type : {"test", "train", "both"}, default="both"
The type of score to plot. Can be one of `"test"`, `"train"`, or
`"both"`.
std_display_style : {"errorbar", "fill_between"} or None, default="fill_between"
The style used to display the score standard deviation around the
mean score. If None, no standard deviation representation is
displayed.
line_kw : dict, default=None
Additional keyword arguments passed to the `plt.plot` used to draw
the mean score.
fill_between_kw : dict, default=None
Additional keyword arguments passed to the `plt.fill_between` used
to draw the score standard deviation.
errorbar_kw : dict, default=None
Additional keyword arguments passed to the `plt.errorbar` used to
draw mean score and standard deviation score.
Returns
-------
display : :class:`~sklearn.model_selection.LearningCurveDisplay`
Object that stores computed values.
"""
self._plot_curve(
self.train_sizes,
ax=ax,
negate_score=negate_score,
score_name=score_name,
score_type=score_type,
std_display_style=std_display_style,
line_kw=line_kw,
fill_between_kw=fill_between_kw,
errorbar_kw=errorbar_kw,
)
self.ax_.set_xlabel("Number of samples in the training set")
return self
|
Plot visualization.
Parameters
----------
ax : matplotlib Axes, default=None
Axes object to plot on. If `None`, a new figure and axes is
created.
negate_score : bool, default=False
Whether or not to negate the scores obtained through
:func:`~sklearn.model_selection.learning_curve`. This is
particularly useful when using the error denoted by `neg_*` in
`scikit-learn`.
score_name : str, default=None
The name of the score used to decorate the y-axis of the plot. It will
override the name inferred from the `scoring` parameter. If `score` is
`None`, we use `"Score"` if `negate_score` is `False` and `"Negative score"`
otherwise. If `scoring` is a string or a callable, we infer the name. We
replace `_` by spaces and capitalize the first letter. We remove `neg_` and
replace it by `"Negative"` if `negate_score` is
`False` or just remove it otherwise.
score_type : {"test", "train", "both"}, default="both"
The type of score to plot. Can be one of `"test"`, `"train"`, or
`"both"`.
std_display_style : {"errorbar", "fill_between"} or None, default="fill_between"
The style used to display the score standard deviation around the
mean score. If None, no standard deviation representation is
displayed.
line_kw : dict, default=None
Additional keyword arguments passed to the `plt.plot` used to draw
the mean score.
fill_between_kw : dict, default=None
Additional keyword arguments passed to the `plt.fill_between` used
to draw the score standard deviation.
errorbar_kw : dict, default=None
Additional keyword arguments passed to the `plt.errorbar` used to
draw mean score and standard deviation score.
Returns
-------
display : :class:`~sklearn.model_selection.LearningCurveDisplay`
Object that stores computed values.
|
plot
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/_plot.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/_plot.py
|
BSD-3-Clause
|
def from_estimator(
cls,
estimator,
X,
y,
*,
groups=None,
train_sizes=np.linspace(0.1, 1.0, 5),
cv=None,
scoring=None,
exploit_incremental_learning=False,
n_jobs=None,
pre_dispatch="all",
verbose=0,
shuffle=False,
random_state=None,
error_score=np.nan,
fit_params=None,
ax=None,
negate_score=False,
score_name=None,
score_type="both",
std_display_style="fill_between",
line_kw=None,
fill_between_kw=None,
errorbar_kw=None,
):
"""Create a learning curve display from an estimator.
Read more in the :ref:`User Guide <visualizations>` for general
information about the visualization API and :ref:`detailed
documentation <learning_curve>` regarding the learning curve
visualization.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None
Target relative to X for classification or regression;
None for unsupervised learning.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set. Only used in conjunction with a "Group" :term:`cv`
instance (e.g., :class:`GroupKFold`).
train_sizes : array-like of shape (n_ticks,), \
default=np.linspace(0.1, 1.0, 5)
Relative or absolute numbers of training examples that will be used
to generate the learning curve. If the dtype is float, it is
regarded as a fraction of the maximum size of the training set
(that is determined by the selected validation method), i.e. it has
to be within (0, 1]. Otherwise it is interpreted as absolute sizes
of the training sets. Note that for classification the number of
samples usually have to be big enough to contain at least one
sample from each class.
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross validation,
- int, to specify the number of folds in a `(Stratified)KFold`,
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For int/None inputs, if the estimator is a classifier and `y` is
either binary or multiclass,
:class:`~sklearn.model_selection.StratifiedKFold` is used. In all
other cases, :class:`~sklearn.model_selection.KFold` is used. These
splitters are instantiated with `shuffle=False` so the splits will
be the same across calls.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
scoring : str or callable, default=None
The scoring method to use when calculating the learning curve. Options:
- str: see :ref:`scoring_string_names` for options.
- callable: a scorer callable object (e.g., function) with signature
``scorer(estimator, X, y)``. See :ref:`scoring_callable` for details.
- `None`: the `estimator`'s
:ref:`default evaluation criterion <scoring_api_overview>` is used.
exploit_incremental_learning : bool, default=False
If the estimator supports incremental learning, this will be
used to speed up fitting for different training set sizes.
n_jobs : int, default=None
Number of jobs to run in parallel. Training the estimator and
computing the score are parallelized over the different training
and test sets. `None` means 1 unless in a
:obj:`joblib.parallel_backend` context. `-1` means using all
processors. See :term:`Glossary <n_jobs>` for more details.
pre_dispatch : int or str, default='all'
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The str can
be an expression like '2*n_jobs'.
verbose : int, default=0
Controls the verbosity: the higher, the more messages.
shuffle : bool, default=False
Whether to shuffle training data before taking prefixes of it
based on`train_sizes`.
random_state : int, RandomState instance or None, default=None
Used when `shuffle` is True. Pass an int for reproducible
output across multiple function calls.
See :term:`Glossary <random_state>`.
error_score : 'raise' or numeric, default=np.nan
Value to assign to the score if an error occurs in estimator
fitting. If set to 'raise', the error is raised. If a numeric value
is given, FitFailedWarning is raised.
fit_params : dict, default=None
Parameters to pass to the fit method of the estimator.
ax : matplotlib Axes, default=None
Axes object to plot on. If `None`, a new figure and axes is
created.
negate_score : bool, default=False
Whether or not to negate the scores obtained through
:func:`~sklearn.model_selection.learning_curve`. This is
particularly useful when using the error denoted by `neg_*` in
`scikit-learn`.
score_name : str, default=None
The name of the score used to decorate the y-axis of the plot. It will
override the name inferred from the `scoring` parameter. If `score` is
`None`, we use `"Score"` if `negate_score` is `False` and `"Negative score"`
otherwise. If `scoring` is a string or a callable, we infer the name. We
replace `_` by spaces and capitalize the first letter. We remove `neg_` and
replace it by `"Negative"` if `negate_score` is
`False` or just remove it otherwise.
score_type : {"test", "train", "both"}, default="both"
The type of score to plot. Can be one of `"test"`, `"train"`, or
`"both"`.
std_display_style : {"errorbar", "fill_between"} or None, default="fill_between"
The style used to display the score standard deviation around the
mean score. If `None`, no representation of the standard deviation
is displayed.
line_kw : dict, default=None
Additional keyword arguments passed to the `plt.plot` used to draw
the mean score.
fill_between_kw : dict, default=None
Additional keyword arguments passed to the `plt.fill_between` used
to draw the score standard deviation.
errorbar_kw : dict, default=None
Additional keyword arguments passed to the `plt.errorbar` used to
draw mean score and standard deviation score.
Returns
-------
display : :class:`~sklearn.model_selection.LearningCurveDisplay`
Object that stores computed values.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from sklearn.datasets import load_iris
>>> from sklearn.model_selection import LearningCurveDisplay
>>> from sklearn.tree import DecisionTreeClassifier
>>> X, y = load_iris(return_X_y=True)
>>> tree = DecisionTreeClassifier(random_state=0)
>>> LearningCurveDisplay.from_estimator(tree, X, y)
<...>
>>> plt.show()
"""
check_matplotlib_support(f"{cls.__name__}.from_estimator")
score_name = _validate_score_name(score_name, scoring, negate_score)
train_sizes, train_scores, test_scores = learning_curve(
estimator,
X,
y,
groups=groups,
train_sizes=train_sizes,
cv=cv,
scoring=scoring,
exploit_incremental_learning=exploit_incremental_learning,
n_jobs=n_jobs,
pre_dispatch=pre_dispatch,
verbose=verbose,
shuffle=shuffle,
random_state=random_state,
error_score=error_score,
return_times=False,
fit_params=fit_params,
)
viz = cls(
train_sizes=train_sizes,
train_scores=train_scores,
test_scores=test_scores,
score_name=score_name,
)
return viz.plot(
ax=ax,
negate_score=negate_score,
score_type=score_type,
std_display_style=std_display_style,
line_kw=line_kw,
fill_between_kw=fill_between_kw,
errorbar_kw=errorbar_kw,
)
|
Create a learning curve display from an estimator.
Read more in the :ref:`User Guide <visualizations>` for general
information about the visualization API and :ref:`detailed
documentation <learning_curve>` regarding the learning curve
visualization.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None
Target relative to X for classification or regression;
None for unsupervised learning.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set. Only used in conjunction with a "Group" :term:`cv`
instance (e.g., :class:`GroupKFold`).
train_sizes : array-like of shape (n_ticks,), default=np.linspace(0.1, 1.0, 5)
Relative or absolute numbers of training examples that will be used
to generate the learning curve. If the dtype is float, it is
regarded as a fraction of the maximum size of the training set
(that is determined by the selected validation method), i.e. it has
to be within (0, 1]. Otherwise it is interpreted as absolute sizes
of the training sets. Note that for classification the number of
samples usually have to be big enough to contain at least one
sample from each class.
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross validation,
- int, to specify the number of folds in a `(Stratified)KFold`,
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For int/None inputs, if the estimator is a classifier and `y` is
either binary or multiclass,
:class:`~sklearn.model_selection.StratifiedKFold` is used. In all
other cases, :class:`~sklearn.model_selection.KFold` is used. These
splitters are instantiated with `shuffle=False` so the splits will
be the same across calls.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
scoring : str or callable, default=None
The scoring method to use when calculating the learning curve. Options:
- str: see :ref:`scoring_string_names` for options.
- callable: a scorer callable object (e.g., function) with signature
``scorer(estimator, X, y)``. See :ref:`scoring_callable` for details.
- `None`: the `estimator`'s
:ref:`default evaluation criterion <scoring_api_overview>` is used.
exploit_incremental_learning : bool, default=False
If the estimator supports incremental learning, this will be
used to speed up fitting for different training set sizes.
n_jobs : int, default=None
Number of jobs to run in parallel. Training the estimator and
computing the score are parallelized over the different training
and test sets. `None` means 1 unless in a
:obj:`joblib.parallel_backend` context. `-1` means using all
processors. See :term:`Glossary <n_jobs>` for more details.
pre_dispatch : int or str, default='all'
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The str can
be an expression like '2*n_jobs'.
verbose : int, default=0
Controls the verbosity: the higher, the more messages.
shuffle : bool, default=False
Whether to shuffle training data before taking prefixes of it
based on`train_sizes`.
random_state : int, RandomState instance or None, default=None
Used when `shuffle` is True. Pass an int for reproducible
output across multiple function calls.
See :term:`Glossary <random_state>`.
error_score : 'raise' or numeric, default=np.nan
Value to assign to the score if an error occurs in estimator
fitting. If set to 'raise', the error is raised. If a numeric value
is given, FitFailedWarning is raised.
fit_params : dict, default=None
Parameters to pass to the fit method of the estimator.
ax : matplotlib Axes, default=None
Axes object to plot on. If `None`, a new figure and axes is
created.
negate_score : bool, default=False
Whether or not to negate the scores obtained through
:func:`~sklearn.model_selection.learning_curve`. This is
particularly useful when using the error denoted by `neg_*` in
`scikit-learn`.
score_name : str, default=None
The name of the score used to decorate the y-axis of the plot. It will
override the name inferred from the `scoring` parameter. If `score` is
`None`, we use `"Score"` if `negate_score` is `False` and `"Negative score"`
otherwise. If `scoring` is a string or a callable, we infer the name. We
replace `_` by spaces and capitalize the first letter. We remove `neg_` and
replace it by `"Negative"` if `negate_score` is
`False` or just remove it otherwise.
score_type : {"test", "train", "both"}, default="both"
The type of score to plot. Can be one of `"test"`, `"train"`, or
`"both"`.
std_display_style : {"errorbar", "fill_between"} or None, default="fill_between"
The style used to display the score standard deviation around the
mean score. If `None`, no representation of the standard deviation
is displayed.
line_kw : dict, default=None
Additional keyword arguments passed to the `plt.plot` used to draw
the mean score.
fill_between_kw : dict, default=None
Additional keyword arguments passed to the `plt.fill_between` used
to draw the score standard deviation.
errorbar_kw : dict, default=None
Additional keyword arguments passed to the `plt.errorbar` used to
draw mean score and standard deviation score.
Returns
-------
display : :class:`~sklearn.model_selection.LearningCurveDisplay`
Object that stores computed values.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from sklearn.datasets import load_iris
>>> from sklearn.model_selection import LearningCurveDisplay
>>> from sklearn.tree import DecisionTreeClassifier
>>> X, y = load_iris(return_X_y=True)
>>> tree = DecisionTreeClassifier(random_state=0)
>>> LearningCurveDisplay.from_estimator(tree, X, y)
<...>
>>> plt.show()
|
from_estimator
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/_plot.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/_plot.py
|
BSD-3-Clause
|
def plot(
self,
ax=None,
*,
negate_score=False,
score_name=None,
score_type="both",
std_display_style="fill_between",
line_kw=None,
fill_between_kw=None,
errorbar_kw=None,
):
"""Plot visualization.
Parameters
----------
ax : matplotlib Axes, default=None
Axes object to plot on. If `None`, a new figure and axes is
created.
negate_score : bool, default=False
Whether or not to negate the scores obtained through
:func:`~sklearn.model_selection.validation_curve`. This is
particularly useful when using the error denoted by `neg_*` in
`scikit-learn`.
score_name : str, default=None
The name of the score used to decorate the y-axis of the plot. It will
override the name inferred from the `scoring` parameter. If `score` is
`None`, we use `"Score"` if `negate_score` is `False` and `"Negative score"`
otherwise. If `scoring` is a string or a callable, we infer the name. We
replace `_` by spaces and capitalize the first letter. We remove `neg_` and
replace it by `"Negative"` if `negate_score` is
`False` or just remove it otherwise.
score_type : {"test", "train", "both"}, default="both"
The type of score to plot. Can be one of `"test"`, `"train"`, or
`"both"`.
std_display_style : {"errorbar", "fill_between"} or None, default="fill_between"
The style used to display the score standard deviation around the
mean score. If None, no standard deviation representation is
displayed.
line_kw : dict, default=None
Additional keyword arguments passed to the `plt.plot` used to draw
the mean score.
fill_between_kw : dict, default=None
Additional keyword arguments passed to the `plt.fill_between` used
to draw the score standard deviation.
errorbar_kw : dict, default=None
Additional keyword arguments passed to the `plt.errorbar` used to
draw mean score and standard deviation score.
Returns
-------
display : :class:`~sklearn.model_selection.ValidationCurveDisplay`
Object that stores computed values.
"""
self._plot_curve(
self.param_range,
ax=ax,
negate_score=negate_score,
score_name=score_name,
score_type=score_type,
std_display_style=std_display_style,
line_kw=line_kw,
fill_between_kw=fill_between_kw,
errorbar_kw=errorbar_kw,
)
self.ax_.set_xlabel(f"{self.param_name}")
return self
|
Plot visualization.
Parameters
----------
ax : matplotlib Axes, default=None
Axes object to plot on. If `None`, a new figure and axes is
created.
negate_score : bool, default=False
Whether or not to negate the scores obtained through
:func:`~sklearn.model_selection.validation_curve`. This is
particularly useful when using the error denoted by `neg_*` in
`scikit-learn`.
score_name : str, default=None
The name of the score used to decorate the y-axis of the plot. It will
override the name inferred from the `scoring` parameter. If `score` is
`None`, we use `"Score"` if `negate_score` is `False` and `"Negative score"`
otherwise. If `scoring` is a string or a callable, we infer the name. We
replace `_` by spaces and capitalize the first letter. We remove `neg_` and
replace it by `"Negative"` if `negate_score` is
`False` or just remove it otherwise.
score_type : {"test", "train", "both"}, default="both"
The type of score to plot. Can be one of `"test"`, `"train"`, or
`"both"`.
std_display_style : {"errorbar", "fill_between"} or None, default="fill_between"
The style used to display the score standard deviation around the
mean score. If None, no standard deviation representation is
displayed.
line_kw : dict, default=None
Additional keyword arguments passed to the `plt.plot` used to draw
the mean score.
fill_between_kw : dict, default=None
Additional keyword arguments passed to the `plt.fill_between` used
to draw the score standard deviation.
errorbar_kw : dict, default=None
Additional keyword arguments passed to the `plt.errorbar` used to
draw mean score and standard deviation score.
Returns
-------
display : :class:`~sklearn.model_selection.ValidationCurveDisplay`
Object that stores computed values.
|
plot
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/_plot.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/_plot.py
|
BSD-3-Clause
|
def from_estimator(
cls,
estimator,
X,
y,
*,
param_name,
param_range,
groups=None,
cv=None,
scoring=None,
n_jobs=None,
pre_dispatch="all",
verbose=0,
error_score=np.nan,
fit_params=None,
ax=None,
negate_score=False,
score_name=None,
score_type="both",
std_display_style="fill_between",
line_kw=None,
fill_between_kw=None,
errorbar_kw=None,
):
"""Create a validation curve display from an estimator.
Read more in the :ref:`User Guide <visualizations>` for general
information about the visualization API and :ref:`detailed
documentation <validation_curve>` regarding the validation curve
visualization.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None
Target relative to X for classification or regression;
None for unsupervised learning.
param_name : str
Name of the parameter that will be varied.
param_range : array-like of shape (n_values,)
The values of the parameter that will be evaluated.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set. Only used in conjunction with a "Group" :term:`cv`
instance (e.g., :class:`GroupKFold`).
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross validation,
- int, to specify the number of folds in a `(Stratified)KFold`,
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For int/None inputs, if the estimator is a classifier and `y` is
either binary or multiclass,
:class:`~sklearn.model_selection.StratifiedKFold` is used. In all
other cases, :class:`~sklearn.model_selection.KFold` is used. These
splitters are instantiated with `shuffle=False` so the splits will
be the same across calls.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
scoring : str or callable, default=None
Scoring method to use when computing the validation curve. Options:
- str: see :ref:`scoring_string_names` for options.
- callable: a scorer callable object (e.g., function) with signature
``scorer(estimator, X, y)``. See :ref:`scoring_callable` for details.
- `None`: the `estimator`'s
:ref:`default evaluation criterion <scoring_api_overview>` is used.
n_jobs : int, default=None
Number of jobs to run in parallel. Training the estimator and
computing the score are parallelized over the different training
and test sets. `None` means 1 unless in a
:obj:`joblib.parallel_backend` context. `-1` means using all
processors. See :term:`Glossary <n_jobs>` for more details.
pre_dispatch : int or str, default='all'
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The str can
be an expression like '2*n_jobs'.
verbose : int, default=0
Controls the verbosity: the higher, the more messages.
error_score : 'raise' or numeric, default=np.nan
Value to assign to the score if an error occurs in estimator
fitting. If set to 'raise', the error is raised. If a numeric value
is given, FitFailedWarning is raised.
fit_params : dict, default=None
Parameters to pass to the fit method of the estimator.
ax : matplotlib Axes, default=None
Axes object to plot on. If `None`, a new figure and axes is
created.
negate_score : bool, default=False
Whether or not to negate the scores obtained through
:func:`~sklearn.model_selection.validation_curve`. This is
particularly useful when using the error denoted by `neg_*` in
`scikit-learn`.
score_name : str, default=None
The name of the score used to decorate the y-axis of the plot. It will
override the name inferred from the `scoring` parameter. If `score` is
`None`, we use `"Score"` if `negate_score` is `False` and `"Negative score"`
otherwise. If `scoring` is a string or a callable, we infer the name. We
replace `_` by spaces and capitalize the first letter. We remove `neg_` and
replace it by `"Negative"` if `negate_score` is
`False` or just remove it otherwise.
score_type : {"test", "train", "both"}, default="both"
The type of score to plot. Can be one of `"test"`, `"train"`, or
`"both"`.
std_display_style : {"errorbar", "fill_between"} or None, default="fill_between"
The style used to display the score standard deviation around the
mean score. If `None`, no representation of the standard deviation
is displayed.
line_kw : dict, default=None
Additional keyword arguments passed to the `plt.plot` used to draw
the mean score.
fill_between_kw : dict, default=None
Additional keyword arguments passed to the `plt.fill_between` used
to draw the score standard deviation.
errorbar_kw : dict, default=None
Additional keyword arguments passed to the `plt.errorbar` used to
draw mean score and standard deviation score.
Returns
-------
display : :class:`~sklearn.model_selection.ValidationCurveDisplay`
Object that stores computed values.
Examples
--------
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from sklearn.datasets import make_classification
>>> from sklearn.model_selection import ValidationCurveDisplay
>>> from sklearn.linear_model import LogisticRegression
>>> X, y = make_classification(n_samples=1_000, random_state=0)
>>> logistic_regression = LogisticRegression()
>>> param_name, param_range = "C", np.logspace(-8, 3, 10)
>>> ValidationCurveDisplay.from_estimator(
... logistic_regression, X, y, param_name=param_name,
... param_range=param_range,
... )
<...>
>>> plt.show()
"""
check_matplotlib_support(f"{cls.__name__}.from_estimator")
score_name = _validate_score_name(score_name, scoring, negate_score)
train_scores, test_scores = validation_curve(
estimator,
X,
y,
param_name=param_name,
param_range=param_range,
groups=groups,
cv=cv,
scoring=scoring,
n_jobs=n_jobs,
pre_dispatch=pre_dispatch,
verbose=verbose,
error_score=error_score,
fit_params=fit_params,
)
viz = cls(
param_name=param_name,
param_range=np.asarray(param_range),
train_scores=train_scores,
test_scores=test_scores,
score_name=score_name,
)
return viz.plot(
ax=ax,
negate_score=negate_score,
score_type=score_type,
std_display_style=std_display_style,
line_kw=line_kw,
fill_between_kw=fill_between_kw,
errorbar_kw=errorbar_kw,
)
|
Create a validation curve display from an estimator.
Read more in the :ref:`User Guide <visualizations>` for general
information about the visualization API and :ref:`detailed
documentation <validation_curve>` regarding the validation curve
visualization.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None
Target relative to X for classification or regression;
None for unsupervised learning.
param_name : str
Name of the parameter that will be varied.
param_range : array-like of shape (n_values,)
The values of the parameter that will be evaluated.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set. Only used in conjunction with a "Group" :term:`cv`
instance (e.g., :class:`GroupKFold`).
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross validation,
- int, to specify the number of folds in a `(Stratified)KFold`,
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For int/None inputs, if the estimator is a classifier and `y` is
either binary or multiclass,
:class:`~sklearn.model_selection.StratifiedKFold` is used. In all
other cases, :class:`~sklearn.model_selection.KFold` is used. These
splitters are instantiated with `shuffle=False` so the splits will
be the same across calls.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
scoring : str or callable, default=None
Scoring method to use when computing the validation curve. Options:
- str: see :ref:`scoring_string_names` for options.
- callable: a scorer callable object (e.g., function) with signature
``scorer(estimator, X, y)``. See :ref:`scoring_callable` for details.
- `None`: the `estimator`'s
:ref:`default evaluation criterion <scoring_api_overview>` is used.
n_jobs : int, default=None
Number of jobs to run in parallel. Training the estimator and
computing the score are parallelized over the different training
and test sets. `None` means 1 unless in a
:obj:`joblib.parallel_backend` context. `-1` means using all
processors. See :term:`Glossary <n_jobs>` for more details.
pre_dispatch : int or str, default='all'
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The str can
be an expression like '2*n_jobs'.
verbose : int, default=0
Controls the verbosity: the higher, the more messages.
error_score : 'raise' or numeric, default=np.nan
Value to assign to the score if an error occurs in estimator
fitting. If set to 'raise', the error is raised. If a numeric value
is given, FitFailedWarning is raised.
fit_params : dict, default=None
Parameters to pass to the fit method of the estimator.
ax : matplotlib Axes, default=None
Axes object to plot on. If `None`, a new figure and axes is
created.
negate_score : bool, default=False
Whether or not to negate the scores obtained through
:func:`~sklearn.model_selection.validation_curve`. This is
particularly useful when using the error denoted by `neg_*` in
`scikit-learn`.
score_name : str, default=None
The name of the score used to decorate the y-axis of the plot. It will
override the name inferred from the `scoring` parameter. If `score` is
`None`, we use `"Score"` if `negate_score` is `False` and `"Negative score"`
otherwise. If `scoring` is a string or a callable, we infer the name. We
replace `_` by spaces and capitalize the first letter. We remove `neg_` and
replace it by `"Negative"` if `negate_score` is
`False` or just remove it otherwise.
score_type : {"test", "train", "both"}, default="both"
The type of score to plot. Can be one of `"test"`, `"train"`, or
`"both"`.
std_display_style : {"errorbar", "fill_between"} or None, default="fill_between"
The style used to display the score standard deviation around the
mean score. If `None`, no representation of the standard deviation
is displayed.
line_kw : dict, default=None
Additional keyword arguments passed to the `plt.plot` used to draw
the mean score.
fill_between_kw : dict, default=None
Additional keyword arguments passed to the `plt.fill_between` used
to draw the score standard deviation.
errorbar_kw : dict, default=None
Additional keyword arguments passed to the `plt.errorbar` used to
draw mean score and standard deviation score.
Returns
-------
display : :class:`~sklearn.model_selection.ValidationCurveDisplay`
Object that stores computed values.
Examples
--------
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from sklearn.datasets import make_classification
>>> from sklearn.model_selection import ValidationCurveDisplay
>>> from sklearn.linear_model import LogisticRegression
>>> X, y = make_classification(n_samples=1_000, random_state=0)
>>> logistic_regression = LogisticRegression()
>>> param_name, param_range = "C", np.logspace(-8, 3, 10)
>>> ValidationCurveDisplay.from_estimator(
... logistic_regression, X, y, param_name=param_name,
... param_range=param_range,
... )
<...>
>>> plt.show()
|
from_estimator
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/_plot.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/_plot.py
|
BSD-3-Clause
|
def __iter__(self):
"""Iterate over the points in the grid.
Returns
-------
params : iterator over dict of str to any
Yields dictionaries mapping each estimator parameter to one of its
allowed values.
"""
for p in self.param_grid:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(p.items())
if not items:
yield {}
else:
keys, values = zip(*items)
for v in product(*values):
params = dict(zip(keys, v))
yield params
|
Iterate over the points in the grid.
Returns
-------
params : iterator over dict of str to any
Yields dictionaries mapping each estimator parameter to one of its
allowed values.
|
__iter__
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/_search.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/_search.py
|
BSD-3-Clause
|
def __len__(self):
"""Number of points on the grid."""
# Product function that can handle iterables (np.prod can't).
product = partial(reduce, operator.mul)
return sum(
product(len(v) for v in p.values()) if p else 1 for p in self.param_grid
)
|
Number of points on the grid.
|
__len__
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/_search.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/_search.py
|
BSD-3-Clause
|
def __getitem__(self, ind):
"""Get the parameters that would be ``ind``th in iteration
Parameters
----------
ind : int
The iteration index
Returns
-------
params : dict of str to any
Equal to list(self)[ind]
"""
# This is used to make discrete sampling without replacement memory
# efficient.
for sub_grid in self.param_grid:
# XXX: could memoize information used here
if not sub_grid:
if ind == 0:
return {}
else:
ind -= 1
continue
# Reverse so most frequent cycling parameter comes first
keys, values_lists = zip(*sorted(sub_grid.items())[::-1])
sizes = [len(v_list) for v_list in values_lists]
total = np.prod(sizes)
if ind >= total:
# Try the next grid
ind -= total
else:
out = {}
for key, v_list, n in zip(keys, values_lists, sizes):
ind, offset = divmod(ind, n)
out[key] = v_list[offset]
return out
raise IndexError("ParameterGrid index out of range")
|
Get the parameters that would be ``ind``th in iteration
Parameters
----------
ind : int
The iteration index
Returns
-------
params : dict of str to any
Equal to list(self)[ind]
|
__getitem__
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/_search.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/_search.py
|
BSD-3-Clause
|
def __len__(self):
"""Number of points that will be sampled."""
if self._is_all_lists():
grid_size = len(ParameterGrid(self.param_distributions))
return min(self.n_iter, grid_size)
else:
return self.n_iter
|
Number of points that will be sampled.
|
__len__
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/_search.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/_search.py
|
BSD-3-Clause
|
def _search_estimator_has(attr):
"""Check if we can delegate a method to the underlying estimator.
Calling a prediction method will only be available if `refit=True`. In
such case, we check first the fitted best estimator. If it is not
fitted, we check the unfitted estimator.
Checking the unfitted estimator allows to use `hasattr` on the `SearchCV`
instance even before calling `fit`.
"""
def check(self):
_check_refit(self, attr)
if hasattr(self, "best_estimator_"):
# raise an AttributeError if `attr` does not exist
getattr(self.best_estimator_, attr)
return True
# raise an AttributeError if `attr` does not exist
getattr(self.estimator, attr)
return True
return check
|
Check if we can delegate a method to the underlying estimator.
Calling a prediction method will only be available if `refit=True`. In
such case, we check first the fitted best estimator. If it is not
fitted, we check the unfitted estimator.
Checking the unfitted estimator allows to use `hasattr` on the `SearchCV`
instance even before calling `fit`.
|
_search_estimator_has
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/_search.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/_search.py
|
BSD-3-Clause
|
def _yield_masked_array_for_each_param(candidate_params):
"""
Yield a masked array for each candidate param.
`candidate_params` is a sequence of params which were used in
a `GridSearchCV`. We use masked arrays for the results, as not
all params are necessarily present in each element of
`candidate_params`. For example, if using `GridSearchCV` with
a `SVC` model, then one might search over params like:
- kernel=["rbf"], gamma=[0.1, 1]
- kernel=["poly"], degree=[1, 2]
and then param `'gamma'` would not be present in entries of
`candidate_params` corresponding to `kernel='poly'`.
"""
n_candidates = len(candidate_params)
param_results = defaultdict(dict)
for cand_idx, params in enumerate(candidate_params):
for name, value in params.items():
param_results["param_%s" % name][cand_idx] = value
for key, param_result in param_results.items():
param_list = list(param_result.values())
try:
arr = np.array(param_list)
except ValueError:
# This can happen when param_list contains lists of different
# lengths, for example:
# param_list=[[1], [2, 3]]
arr_dtype = np.dtype(object)
else:
# There are two cases when we don't use the automatically inferred
# dtype when creating the array and we use object instead:
# - string dtype
# - when array.ndim > 1, that means that param_list was something
# like a list of same-size sequences, which gets turned into a
# multi-dimensional array but we want a 1d array
arr_dtype = arr.dtype if arr.dtype.kind != "U" and arr.ndim == 1 else object
# Use one MaskedArray and mask all the places where the param is not
# applicable for that candidate (which may not contain all the params).
ma = MaskedArray(np.empty(n_candidates, dtype=arr_dtype), mask=True)
for index, value in param_result.items():
# Setting the value at an index unmasks that index
ma[index] = value
yield (key, ma)
|
Yield a masked array for each candidate param.
`candidate_params` is a sequence of params which were used in
a `GridSearchCV`. We use masked arrays for the results, as not
all params are necessarily present in each element of
`candidate_params`. For example, if using `GridSearchCV` with
a `SVC` model, then one might search over params like:
- kernel=["rbf"], gamma=[0.1, 1]
- kernel=["poly"], degree=[1, 2]
and then param `'gamma'` would not be present in entries of
`candidate_params` corresponding to `kernel='poly'`.
|
_yield_masked_array_for_each_param
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/_search.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/_search.py
|
BSD-3-Clause
|
def score(self, X, y=None, **params):
"""Return the score on the given data, if the estimator has been refit.
This uses the score defined by ``scoring`` where provided, and the
``best_estimator_.score`` method otherwise.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input data, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples, n_output) \
or (n_samples,), default=None
Target relative to X for classification or regression;
None for unsupervised learning.
**params : dict
Parameters to be passed to the underlying scorer(s).
.. versionadded:: 1.4
Only available if `enable_metadata_routing=True`. See
:ref:`Metadata Routing User Guide <metadata_routing>` for more
details.
Returns
-------
score : float
The score defined by ``scoring`` if provided, and the
``best_estimator_.score`` method otherwise.
"""
_check_refit(self, "score")
check_is_fitted(self)
_raise_for_params(params, self, "score")
if _routing_enabled():
score_params = process_routing(self, "score", **params).scorer["score"]
else:
score_params = dict()
if self.scorer_ is None:
raise ValueError(
"No score function explicitly defined, "
"and the estimator doesn't provide one %s" % self.best_estimator_
)
if isinstance(self.scorer_, dict):
if self.multimetric_:
scorer = self.scorer_[self.refit]
else:
scorer = self.scorer_
return scorer(self.best_estimator_, X, y, **score_params)
# callable
score = self.scorer_(self.best_estimator_, X, y, **score_params)
if self.multimetric_:
score = score[self.refit]
return score
|
Return the score on the given data, if the estimator has been refit.
This uses the score defined by ``scoring`` where provided, and the
``best_estimator_.score`` method otherwise.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input data, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples, n_output) or (n_samples,), default=None
Target relative to X for classification or regression;
None for unsupervised learning.
**params : dict
Parameters to be passed to the underlying scorer(s).
.. versionadded:: 1.4
Only available if `enable_metadata_routing=True`. See
:ref:`Metadata Routing User Guide <metadata_routing>` for more
details.
Returns
-------
score : float
The score defined by ``scoring`` if provided, and the
``best_estimator_.score`` method otherwise.
|
score
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/_search.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/_search.py
|
BSD-3-Clause
|
def n_features_in_(self):
"""Number of features seen during :term:`fit`.
Only available when `refit=True`.
"""
# For consistency with other estimators we raise a AttributeError so
# that hasattr() fails if the search estimator isn't fitted.
try:
check_is_fitted(self)
except NotFittedError as nfe:
raise AttributeError(
"{} object has no n_features_in_ attribute.".format(
self.__class__.__name__
)
) from nfe
return self.best_estimator_.n_features_in_
|
Number of features seen during :term:`fit`.
Only available when `refit=True`.
|
n_features_in_
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/_search.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/_search.py
|
BSD-3-Clause
|
def _check_refit_for_multimetric(self, scores):
"""Check `refit` is compatible with `scores` is valid"""
multimetric_refit_msg = (
"For multi-metric scoring, the parameter refit must be set to a "
"scorer key or a callable to refit an estimator with the best "
"parameter setting on the whole data and make the best_* "
"attributes available for that metric. If this is not needed, "
f"refit should be set to False explicitly. {self.refit!r} was "
"passed."
)
valid_refit_dict = isinstance(self.refit, str) and self.refit in scores
if (
self.refit is not False
and not valid_refit_dict
and not callable(self.refit)
):
raise ValueError(multimetric_refit_msg)
|
Check `refit` is compatible with `scores` is valid
|
_check_refit_for_multimetric
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/_search.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/_search.py
|
BSD-3-Clause
|
def _select_best_index(refit, refit_metric, results):
"""Select index of the best combination of hyperparemeters."""
if callable(refit):
# If callable, refit is expected to return the index of the best
# parameter set.
best_index = refit(results)
if not isinstance(best_index, numbers.Integral):
raise TypeError("best_index_ returned is not an integer")
if best_index < 0 or best_index >= len(results["params"]):
raise IndexError("best_index_ index out of range")
else:
best_index = results[f"rank_test_{refit_metric}"].argmin()
return best_index
|
Select index of the best combination of hyperparemeters.
|
_select_best_index
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/_search.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/_search.py
|
BSD-3-Clause
|
def _get_scorers(self):
"""Get the scorer(s) to be used.
This is used in ``fit`` and ``get_metadata_routing``.
Returns
-------
scorers, refit_metric
"""
refit_metric = "score"
if callable(self.scoring):
scorers = self.scoring
elif self.scoring is None or isinstance(self.scoring, str):
scorers = check_scoring(self.estimator, self.scoring)
else:
scorers = _check_multimetric_scoring(self.estimator, self.scoring)
self._check_refit_for_multimetric(scorers)
refit_metric = self.refit
scorers = _MultimetricScorer(
scorers=scorers, raise_exc=(self.error_score == "raise")
)
return scorers, refit_metric
|
Get the scorer(s) to be used.
This is used in ``fit`` and ``get_metadata_routing``.
Returns
-------
scorers, refit_metric
|
_get_scorers
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/_search.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/_search.py
|
BSD-3-Clause
|
def _get_routed_params_for_fit(self, params):
"""Get the parameters to be used for routing.
This is a method instead of a snippet in ``fit`` since it's used twice,
here in ``fit``, and in ``HalvingRandomSearchCV.fit``.
"""
if _routing_enabled():
routed_params = process_routing(self, "fit", **params)
else:
params = params.copy()
groups = params.pop("groups", None)
routed_params = Bunch(
estimator=Bunch(fit=params),
splitter=Bunch(split={"groups": groups}),
scorer=Bunch(score={}),
)
# NOTE: sample_weight is forwarded to the scorer if sample_weight
# is not None and scorers accept sample_weight. For _MultimetricScorer,
# sample_weight is forwarded if any scorer accepts sample_weight
if (
params.get("sample_weight") is not None
and self._check_scorers_accept_sample_weight()
):
routed_params.scorer.score["sample_weight"] = params["sample_weight"]
return routed_params
|
Get the parameters to be used for routing.
This is a method instead of a snippet in ``fit`` since it's used twice,
here in ``fit``, and in ``HalvingRandomSearchCV.fit``.
|
_get_routed_params_for_fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/_search.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/_search.py
|
BSD-3-Clause
|
def fit(self, X, y=None, **params):
"""Run fit with all sets of parameters.
Parameters
----------
X : array-like of shape (n_samples, n_features) or (n_samples, n_samples)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of features. For precomputed kernel or
distance matrix, the expected shape of X is (n_samples, n_samples).
y : array-like of shape (n_samples, n_output) \
or (n_samples,), default=None
Target relative to X for classification or regression;
None for unsupervised learning.
**params : dict of str -> object
Parameters passed to the ``fit`` method of the estimator, the scorer,
and the CV splitter.
If a fit parameter is an array-like whose length is equal to
`num_samples` then it will be split by cross-validation along with
`X` and `y`. For example, the :term:`sample_weight` parameter is
split because `len(sample_weights) = len(X)`. However, this behavior
does not apply to `groups` which is passed to the splitter configured
via the `cv` parameter of the constructor. Thus, `groups` is used
*to perform the split* and determines which samples are
assigned to the each side of the a split.
Returns
-------
self : object
Instance of fitted estimator.
"""
estimator = self.estimator
scorers, refit_metric = self._get_scorers()
X, y = indexable(X, y)
params = _check_method_params(X, params=params)
routed_params = self._get_routed_params_for_fit(params)
cv_orig = check_cv(self.cv, y, classifier=is_classifier(estimator))
n_splits = cv_orig.get_n_splits(X, y, **routed_params.splitter.split)
base_estimator = clone(self.estimator)
parallel = Parallel(n_jobs=self.n_jobs, pre_dispatch=self.pre_dispatch)
fit_and_score_kwargs = dict(
scorer=scorers,
fit_params=routed_params.estimator.fit,
score_params=routed_params.scorer.score,
return_train_score=self.return_train_score,
return_n_test_samples=True,
return_times=True,
return_parameters=False,
error_score=self.error_score,
verbose=self.verbose,
)
results = {}
with parallel:
all_candidate_params = []
all_out = []
all_more_results = defaultdict(list)
def evaluate_candidates(candidate_params, cv=None, more_results=None):
cv = cv or cv_orig
candidate_params = list(candidate_params)
n_candidates = len(candidate_params)
if self.verbose > 0:
print(
"Fitting {0} folds for each of {1} candidates,"
" totalling {2} fits".format(
n_splits, n_candidates, n_candidates * n_splits
)
)
out = parallel(
delayed(_fit_and_score)(
clone(base_estimator),
X,
y,
train=train,
test=test,
parameters=parameters,
split_progress=(split_idx, n_splits),
candidate_progress=(cand_idx, n_candidates),
**fit_and_score_kwargs,
)
for (cand_idx, parameters), (split_idx, (train, test)) in product(
enumerate(candidate_params),
enumerate(cv.split(X, y, **routed_params.splitter.split)),
)
)
if len(out) < 1:
raise ValueError(
"No fits were performed. "
"Was the CV iterator empty? "
"Were there no candidates?"
)
elif len(out) != n_candidates * n_splits:
raise ValueError(
"cv.split and cv.get_n_splits returned "
"inconsistent results. Expected {} "
"splits, got {}".format(n_splits, len(out) // n_candidates)
)
_warn_or_raise_about_fit_failures(out, self.error_score)
# For callable self.scoring, the return type is only know after
# calling. If the return type is a dictionary, the error scores
# can now be inserted with the correct key. The type checking
# of out will be done in `_insert_error_scores`.
if callable(self.scoring):
_insert_error_scores(out, self.error_score)
all_candidate_params.extend(candidate_params)
all_out.extend(out)
if more_results is not None:
for key, value in more_results.items():
all_more_results[key].extend(value)
nonlocal results
results = self._format_results(
all_candidate_params, n_splits, all_out, all_more_results
)
return results
self._run_search(evaluate_candidates)
# multimetric is determined here because in the case of a callable
# self.scoring the return type is only known after calling
first_test_score = all_out[0]["test_scores"]
self.multimetric_ = isinstance(first_test_score, dict)
# check refit_metric now for a callable scorer that is multimetric
if callable(self.scoring) and self.multimetric_:
self._check_refit_for_multimetric(first_test_score)
refit_metric = self.refit
# For multi-metric evaluation, store the best_index_, best_params_ and
# best_score_ iff refit is one of the scorer names
# In single metric evaluation, refit_metric is "score"
if self.refit or not self.multimetric_:
self.best_index_ = self._select_best_index(
self.refit, refit_metric, results
)
if not callable(self.refit):
# With a non-custom callable, we can select the best score
# based on the best index
self.best_score_ = results[f"mean_test_{refit_metric}"][
self.best_index_
]
self.best_params_ = results["params"][self.best_index_]
if self.refit:
# here we clone the estimator as well as the parameters, since
# sometimes the parameters themselves might be estimators, e.g.
# when we search over different estimators in a pipeline.
# ref: https://github.com/scikit-learn/scikit-learn/pull/26786
self.best_estimator_ = clone(base_estimator).set_params(
**clone(self.best_params_, safe=False)
)
refit_start_time = time.time()
if y is not None:
self.best_estimator_.fit(X, y, **routed_params.estimator.fit)
else:
self.best_estimator_.fit(X, **routed_params.estimator.fit)
refit_end_time = time.time()
self.refit_time_ = refit_end_time - refit_start_time
if hasattr(self.best_estimator_, "feature_names_in_"):
self.feature_names_in_ = self.best_estimator_.feature_names_in_
# Store the only scorer not as a dict for single metric evaluation
if isinstance(scorers, _MultimetricScorer):
self.scorer_ = scorers._scorers
else:
self.scorer_ = scorers
self.cv_results_ = results
self.n_splits_ = n_splits
return self
|
Run fit with all sets of parameters.
Parameters
----------
X : array-like of shape (n_samples, n_features) or (n_samples, n_samples)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of features. For precomputed kernel or
distance matrix, the expected shape of X is (n_samples, n_samples).
y : array-like of shape (n_samples, n_output) or (n_samples,), default=None
Target relative to X for classification or regression;
None for unsupervised learning.
**params : dict of str -> object
Parameters passed to the ``fit`` method of the estimator, the scorer,
and the CV splitter.
If a fit parameter is an array-like whose length is equal to
`num_samples` then it will be split by cross-validation along with
`X` and `y`. For example, the :term:`sample_weight` parameter is
split because `len(sample_weights) = len(X)`. However, this behavior
does not apply to `groups` which is passed to the splitter configured
via the `cv` parameter of the constructor. Thus, `groups` is used
*to perform the split* and determines which samples are
assigned to the each side of the a split.
Returns
-------
self : object
Instance of fitted estimator.
|
fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/_search.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/_search.py
|
BSD-3-Clause
|
def _store(key_name, array, weights=None, splits=False, rank=False):
"""A small helper to store the scores/times to the cv_results_"""
# When iterated first by splits, then by parameters
# We want `array` to have `n_candidates` rows and `n_splits` cols.
array = np.array(array, dtype=np.float64).reshape(n_candidates, n_splits)
if splits:
for split_idx in range(n_splits):
# Uses closure to alter the results
results["split%d_%s" % (split_idx, key_name)] = array[:, split_idx]
array_means = np.average(array, axis=1, weights=weights)
results["mean_%s" % key_name] = array_means
if key_name.startswith(("train_", "test_")) and np.any(
~np.isfinite(array_means)
):
warnings.warn(
(
f"One or more of the {key_name.split('_')[0]} scores "
f"are non-finite: {array_means}"
),
category=UserWarning,
)
# Weighted std is not directly available in numpy
array_stds = np.sqrt(
np.average(
(array - array_means[:, np.newaxis]) ** 2, axis=1, weights=weights
)
)
results["std_%s" % key_name] = array_stds
if rank:
# When the fit/scoring fails `array_means` contains NaNs, we
# will exclude them from the ranking process and consider them
# as tied with the worst performers.
if np.isnan(array_means).all():
# All fit/scoring routines failed.
rank_result = np.ones_like(array_means, dtype=np.int32)
else:
min_array_means = np.nanmin(array_means) - 1
array_means = np.nan_to_num(array_means, nan=min_array_means)
rank_result = rankdata(-array_means, method="min").astype(
np.int32, copy=False
)
results["rank_%s" % key_name] = rank_result
|
A small helper to store the scores/times to the cv_results_
|
_store
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/_search.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/_search.py
|
BSD-3-Clause
|
def get_metadata_routing(self):
"""Get metadata routing of this object.
Please check :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
.. versionadded:: 1.4
Returns
-------
routing : MetadataRouter
A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating
routing information.
"""
router = MetadataRouter(owner=self.__class__.__name__)
router.add(
estimator=self.estimator,
method_mapping=MethodMapping().add(caller="fit", callee="fit"),
)
scorer, _ = self._get_scorers()
router.add(
scorer=scorer,
method_mapping=MethodMapping()
.add(caller="score", callee="score")
.add(caller="fit", callee="score"),
)
router.add(
splitter=self.cv,
method_mapping=MethodMapping().add(caller="fit", callee="split"),
)
return router
|
Get metadata routing of this object.
Please check :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
.. versionadded:: 1.4
Returns
-------
routing : MetadataRouter
A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating
routing information.
|
get_metadata_routing
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/_search.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/_search.py
|
BSD-3-Clause
|
def _select_best_index(refit, refit_metric, results):
"""Custom refit callable to return the index of the best candidate.
We want the best candidate out of the last iteration. By default
BaseSearchCV would return the best candidate out of all iterations.
Currently, we only support for a single metric thus `refit` and
`refit_metric` are not required.
"""
last_iter = np.max(results["iter"])
last_iter_indices = np.flatnonzero(results["iter"] == last_iter)
test_scores = results["mean_test_score"][last_iter_indices]
# If all scores are NaNs there is no way to pick between them,
# so we (arbitrarily) declare the zero'th entry the best one
if np.isnan(test_scores).all():
best_idx = 0
else:
best_idx = np.nanargmax(test_scores)
return last_iter_indices[best_idx]
|
Custom refit callable to return the index of the best candidate.
We want the best candidate out of the last iteration. By default
BaseSearchCV would return the best candidate out of all iterations.
Currently, we only support for a single metric thus `refit` and
`refit_metric` are not required.
|
_select_best_index
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/_search_successive_halving.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/_search_successive_halving.py
|
BSD-3-Clause
|
def fit(self, X, y=None, **params):
"""Run fit with all sets of parameters.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like, shape (n_samples,) or (n_samples, n_output), optional
Target relative to X for classification or regression;
None for unsupervised learning.
**params : dict of string -> object
Parameters passed to the ``fit`` method of the estimator.
Returns
-------
self : object
Instance of fitted estimator.
"""
self._checked_cv_orig = check_cv(
self.cv, y, classifier=is_classifier(self.estimator)
)
routed_params = self._get_routed_params_for_fit(params)
self._check_input_parameters(
X=X, y=y, split_params=routed_params.splitter.split
)
self._n_samples_orig = _num_samples(X)
super().fit(X, y=y, **params)
# Set best_score_: BaseSearchCV does not set it, as refit is a callable
self.best_score_ = self.cv_results_["mean_test_score"][self.best_index_]
return self
|
Run fit with all sets of parameters.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like, shape (n_samples,) or (n_samples, n_output), optional
Target relative to X for classification or regression;
None for unsupervised learning.
**params : dict of string -> object
Parameters passed to the ``fit`` method of the estimator.
Returns
-------
self : object
Instance of fitted estimator.
|
fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/_search_successive_halving.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/_search_successive_halving.py
|
BSD-3-Clause
|
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : array-like of shape (n_samples,)
The target variable for supervised learning problems.
groups : object
Always ignored, exists for compatibility.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
if groups is not None:
warnings.warn(
f"The groups parameter is ignored by {self.__class__.__name__}",
UserWarning,
)
return super().split(X, y, groups=groups)
|
Generate indices to split data into training and test set.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : array-like of shape (n_samples,)
The target variable for supervised learning problems.
groups : object
Always ignored, exists for compatibility.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
|
split
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/_split.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/_split.py
|
BSD-3-Clause
|
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : array-like of shape (n_samples,)
The target variable for supervised learning problems.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, groups = indexable(X, y, groups)
indices = np.arange(_num_samples(X))
for test_index in self._iter_test_masks(X, y, groups):
train_index = indices[np.logical_not(test_index)]
test_index = indices[test_index]
yield train_index, test_index
|
Generate indices to split data into training and test set.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : array-like of shape (n_samples,)
The target variable for supervised learning problems.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
|
split
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/_split.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/_split.py
|
BSD-3-Clause
|
def _iter_test_masks(self, X=None, y=None, groups=None):
"""Generates boolean masks corresponding to test sets.
By default, delegates to _iter_test_indices(X, y, groups)
"""
for test_index in self._iter_test_indices(X, y, groups):
test_mask = np.zeros(_num_samples(X), dtype=bool)
test_mask[test_index] = True
yield test_mask
|
Generates boolean masks corresponding to test sets.
By default, delegates to _iter_test_indices(X, y, groups)
|
_iter_test_masks
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/_split.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/_split.py
|
BSD-3-Clause
|
def get_n_splits(self, X, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
if X is None:
raise ValueError("The 'X' parameter should not be None.")
return _num_samples(X)
|
Returns the number of splitting iterations in the cross-validator.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
|
get_n_splits
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/_split.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/_split.py
|
BSD-3-Clause
|
def get_n_splits(self, X, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
"""
if X is None:
raise ValueError("The 'X' parameter should not be None.")
return int(comb(_num_samples(X), self.p, exact=True))
|
Returns the number of splitting iterations in the cross-validator.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
|
get_n_splits
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/_split.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/_split.py
|
BSD-3-Clause
|
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : array-like of shape (n_samples,), default=None
The target variable for supervised learning problems.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, groups = indexable(X, y, groups)
n_samples = _num_samples(X)
if self.n_splits > n_samples:
raise ValueError(
(
"Cannot have number of splits n_splits={0} greater"
" than the number of samples: n_samples={1}."
).format(self.n_splits, n_samples)
)
for train, test in super().split(X, y, groups):
yield train, test
|
Generate indices to split data into training and test set.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : array-like of shape (n_samples,), default=None
The target variable for supervised learning problems.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
|
split
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/_split.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/_split.py
|
BSD-3-Clause
|
def split(self, X, y, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
Note that providing ``y`` is sufficient to generate the splits and
hence ``np.zeros(n_samples)`` may be used as a placeholder for
``X`` instead of actual training data.
y : array-like of shape (n_samples,)
The target variable for supervised learning problems.
Stratification is done based on the y labels.
groups : object
Always ignored, exists for compatibility.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
Notes
-----
Randomized CV splitters may return different results for each call of
split. You can make the results identical by setting `random_state`
to an integer.
"""
if groups is not None:
warnings.warn(
f"The groups parameter is ignored by {self.__class__.__name__}",
UserWarning,
)
y = check_array(y, input_name="y", ensure_2d=False, dtype=None)
return super().split(X, y, groups)
|
Generate indices to split data into training and test set.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
Note that providing ``y`` is sufficient to generate the splits and
hence ``np.zeros(n_samples)`` may be used as a placeholder for
``X`` instead of actual training data.
y : array-like of shape (n_samples,)
The target variable for supervised learning problems.
Stratification is done based on the y labels.
groups : object
Always ignored, exists for compatibility.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
Notes
-----
Randomized CV splitters may return different results for each call of
split. You can make the results identical by setting `random_state`
to an integer.
|
split
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/_split.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/_split.py
|
BSD-3-Clause
|
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : array-like of shape (n_samples,)
Always ignored, exists for compatibility.
groups : array-like of shape (n_samples,)
Always ignored, exists for compatibility.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
if groups is not None:
warnings.warn(
f"The groups parameter is ignored by {self.__class__.__name__}",
UserWarning,
)
return self._split(X)
|
Generate indices to split data into training and test set.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : array-like of shape (n_samples,)
Always ignored, exists for compatibility.
groups : array-like of shape (n_samples,)
Always ignored, exists for compatibility.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
|
split
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/_split.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/_split.py
|
BSD-3-Clause
|
def _split(self, X):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
(X,) = indexable(X)
n_samples = _num_samples(X)
n_splits = self.n_splits
n_folds = n_splits + 1
gap = self.gap
test_size = (
self.test_size if self.test_size is not None else n_samples // n_folds
)
# Make sure we have enough samples for the given split parameters
if n_folds > n_samples:
raise ValueError(
f"Cannot have number of folds={n_folds} greater"
f" than the number of samples={n_samples}."
)
if n_samples - gap - (test_size * n_splits) <= 0:
raise ValueError(
f"Too many splits={n_splits} for number of samples"
f"={n_samples} with test_size={test_size} and gap={gap}."
)
indices = np.arange(n_samples)
test_starts = range(n_samples - n_splits * test_size, n_samples, test_size)
for test_start in test_starts:
train_end = test_start - gap
if self.max_train_size and self.max_train_size < train_end:
yield (
indices[train_end - self.max_train_size : train_end],
indices[test_start : test_start + test_size],
)
else:
yield (
indices[:train_end],
indices[test_start : test_start + test_size],
)
|
Generate indices to split data into training and test set.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
|
_split
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/_split.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/_split.py
|
BSD-3-Clause
|
def split(self, X, y=None, groups=None):
"""Generates indices to split data into training and test set.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : array-like of shape (n_samples,)
The target variable for supervised learning problems.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
n_repeats = self.n_repeats
rng = check_random_state(self.random_state)
for idx in range(n_repeats):
cv = self.cv(random_state=rng, shuffle=True, **self.cvargs)
for train_index, test_index in cv.split(X, y, groups):
yield train_index, test_index
|
Generates indices to split data into training and test set.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : array-like of shape (n_samples,)
The target variable for supervised learning problems.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
|
split
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/_split.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/_split.py
|
BSD-3-Clause
|
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator.
Parameters
----------
X : object
Always ignored, exists for compatibility.
``np.zeros(n_samples)`` may be used as a placeholder.
y : object
Always ignored, exists for compatibility.
``np.zeros(n_samples)`` may be used as a placeholder.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
rng = check_random_state(self.random_state)
cv = self.cv(random_state=rng, shuffle=True, **self.cvargs)
return cv.get_n_splits(X, y, groups) * self.n_repeats
|
Returns the number of splitting iterations in the cross-validator.
Parameters
----------
X : object
Always ignored, exists for compatibility.
``np.zeros(n_samples)`` may be used as a placeholder.
y : object
Always ignored, exists for compatibility.
``np.zeros(n_samples)`` may be used as a placeholder.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
|
get_n_splits
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/_split.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/_split.py
|
BSD-3-Clause
|
def split(self, X, y, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
Note that providing ``y`` is sufficient to generate the splits and
hence ``np.zeros(n_samples)`` may be used as a placeholder for
``X`` instead of actual training data.
y : array-like of shape (n_samples,)
The target variable for supervised learning problems.
Stratification is done based on the y labels.
groups : object
Always ignored, exists for compatibility.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
Notes
-----
Randomized CV splitters may return different results for each call of
split. You can make the results identical by setting `random_state`
to an integer.
"""
y = check_array(y, input_name="y", ensure_2d=False, dtype=None)
return super().split(X, y, groups=groups)
|
Generate indices to split data into training and test set.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
Note that providing ``y`` is sufficient to generate the splits and
hence ``np.zeros(n_samples)`` may be used as a placeholder for
``X`` instead of actual training data.
y : array-like of shape (n_samples,)
The target variable for supervised learning problems.
Stratification is done based on the y labels.
groups : object
Always ignored, exists for compatibility.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
Notes
-----
Randomized CV splitters may return different results for each call of
split. You can make the results identical by setting `random_state`
to an integer.
|
split
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/_split.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/_split.py
|
BSD-3-Clause
|
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : array-like of shape (n_samples,)
The target variable for supervised learning problems.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
Notes
-----
Randomized CV splitters may return different results for each call of
split. You can make the results identical by setting `random_state`
to an integer.
"""
X, y, groups = indexable(X, y, groups)
for train, test in self._iter_indices(X, y, groups):
yield train, test
|
Generate indices to split data into training and test set.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : array-like of shape (n_samples,)
The target variable for supervised learning problems.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
Notes
-----
Randomized CV splitters may return different results for each call of
split. You can make the results identical by setting `random_state`
to an integer.
|
split
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/_split.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/_split.py
|
BSD-3-Clause
|
def split(self, X, y, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
Note that providing ``y`` is sufficient to generate the splits and
hence ``np.zeros(n_samples)`` may be used as a placeholder for
``X`` instead of actual training data.
y : array-like of shape (n_samples,) or (n_samples, n_labels)
The target variable for supervised learning problems.
Stratification is done based on the y labels.
groups : object
Always ignored, exists for compatibility.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
Notes
-----
Randomized CV splitters may return different results for each call of
split. You can make the results identical by setting `random_state`
to an integer.
"""
if groups is not None:
warnings.warn(
f"The groups parameter is ignored by {self.__class__.__name__}",
UserWarning,
)
y = check_array(y, input_name="y", ensure_2d=False, dtype=None)
return super().split(X, y, groups)
|
Generate indices to split data into training and test set.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
Note that providing ``y`` is sufficient to generate the splits and
hence ``np.zeros(n_samples)`` may be used as a placeholder for
``X`` instead of actual training data.
y : array-like of shape (n_samples,) or (n_samples, n_labels)
The target variable for supervised learning problems.
Stratification is done based on the y labels.
groups : object
Always ignored, exists for compatibility.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
Notes
-----
Randomized CV splitters may return different results for each call of
split. You can make the results identical by setting `random_state`
to an integer.
|
split
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/_split.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/_split.py
|
BSD-3-Clause
|
def _validate_shuffle_split(n_samples, test_size, train_size, default_test_size=None):
"""
Validation helper to check if the train/test sizes are meaningful w.r.t. the
size of the data (n_samples).
"""
if test_size is None and train_size is None:
test_size = default_test_size
test_size_type = np.asarray(test_size).dtype.kind
train_size_type = np.asarray(train_size).dtype.kind
if (test_size_type == "i" and (test_size >= n_samples or test_size <= 0)) or (
test_size_type == "f" and (test_size <= 0 or test_size >= 1)
):
raise ValueError(
"test_size={0} should be either positive and smaller"
" than the number of samples {1} or a float in the "
"(0, 1) range".format(test_size, n_samples)
)
if (train_size_type == "i" and (train_size >= n_samples or train_size <= 0)) or (
train_size_type == "f" and (train_size <= 0 or train_size >= 1)
):
raise ValueError(
"train_size={0} should be either positive and smaller"
" than the number of samples {1} or a float in the "
"(0, 1) range".format(train_size, n_samples)
)
if train_size is not None and train_size_type not in ("i", "f"):
raise ValueError("Invalid value for train_size: {}".format(train_size))
if test_size is not None and test_size_type not in ("i", "f"):
raise ValueError("Invalid value for test_size: {}".format(test_size))
if train_size_type == "f" and test_size_type == "f" and train_size + test_size > 1:
raise ValueError(
"The sum of test_size and train_size = {}, should be in the (0, 1)"
" range. Reduce test_size and/or train_size.".format(train_size + test_size)
)
if test_size_type == "f":
n_test = ceil(test_size * n_samples)
elif test_size_type == "i":
n_test = float(test_size)
if train_size_type == "f":
n_train = floor(train_size * n_samples)
elif train_size_type == "i":
n_train = float(train_size)
if train_size is None:
n_train = n_samples - n_test
elif test_size is None:
n_test = n_samples - n_train
if n_train + n_test > n_samples:
raise ValueError(
"The sum of train_size and test_size = %d, "
"should be smaller than the number of "
"samples %d. Reduce test_size and/or "
"train_size." % (n_train + n_test, n_samples)
)
n_train, n_test = int(n_train), int(n_test)
if n_train == 0:
raise ValueError(
"With n_samples={}, test_size={} and train_size={}, the "
"resulting train set will be empty. Adjust any of the "
"aforementioned parameters.".format(n_samples, test_size, train_size)
)
return n_train, n_test
|
Validation helper to check if the train/test sizes are meaningful w.r.t. the
size of the data (n_samples).
|
_validate_shuffle_split
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/_split.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/_split.py
|
BSD-3-Clause
|
def split(self, X=None, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
if groups is not None:
warnings.warn(
f"The groups parameter is ignored by {self.__class__.__name__}",
UserWarning,
)
return self._split()
|
Generate indices to split data into training and test set.
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
|
split
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/_split.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/_split.py
|
BSD-3-Clause
|
def _split(self):
"""Generate indices to split data into training and test set.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
ind = np.arange(len(self.test_fold))
for test_index in self._iter_test_masks():
train_index = ind[np.logical_not(test_index)]
test_index = ind[test_index]
yield train_index, test_index
|
Generate indices to split data into training and test set.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
|
_split
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/_split.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/_split.py
|
BSD-3-Clause
|
def _iter_test_masks(self):
"""Generates boolean masks corresponding to test sets."""
for f in self.unique_folds:
test_index = np.where(self.test_fold == f)[0]
test_mask = np.zeros(len(self.test_fold), dtype=bool)
test_mask[test_index] = True
yield test_mask
|
Generates boolean masks corresponding to test sets.
|
_iter_test_masks
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/_split.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/_split.py
|
BSD-3-Clause
|
def split(self, X=None, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
for train, test in self.cv:
yield train, test
|
Generate indices to split data into training and test set.
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
|
split
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/_split.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/_split.py
|
BSD-3-Clause
|
def check_cv(cv=5, y=None, *, classifier=False):
"""Input checker utility for building a cross-validator.
Parameters
----------
cv : int, cross-validation generator, iterable or None, default=5
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross validation,
- integer, to specify the number of folds.
- :term:`CV splitter`,
- An iterable that generates (train, test) splits as arrays of indices.
For integer/None inputs, if classifier is True and ``y`` is either
binary or multiclass, :class:`StratifiedKFold` is used. In all other
cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.22
``cv`` default value changed from 3-fold to 5-fold.
y : array-like, default=None
The target variable for supervised learning problems.
classifier : bool, default=False
Whether the task is a classification task, in which case
stratified KFold will be used.
Returns
-------
checked_cv : a cross-validator instance.
The return value is a cross-validator which generates the train/test
splits via the ``split`` method.
Examples
--------
>>> from sklearn.model_selection import check_cv
>>> check_cv(cv=5, y=None, classifier=False)
KFold(...)
>>> check_cv(cv=5, y=[1, 1, 0, 0, 0, 0], classifier=True)
StratifiedKFold(...)
"""
cv = 5 if cv is None else cv
if isinstance(cv, numbers.Integral):
if (
classifier
and (y is not None)
and (type_of_target(y, input_name="y") in ("binary", "multiclass"))
):
return StratifiedKFold(cv)
else:
return KFold(cv)
if not hasattr(cv, "split") or isinstance(cv, str):
if not isinstance(cv, Iterable) or isinstance(cv, str):
raise ValueError(
"Expected cv as an integer, cross-validation "
"object (from sklearn.model_selection) "
"or an iterable. Got %s." % cv
)
return _CVIterableWrapper(cv)
return cv # New style cv objects are passed without any modification
|
Input checker utility for building a cross-validator.
Parameters
----------
cv : int, cross-validation generator, iterable or None, default=5
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross validation,
- integer, to specify the number of folds.
- :term:`CV splitter`,
- An iterable that generates (train, test) splits as arrays of indices.
For integer/None inputs, if classifier is True and ``y`` is either
binary or multiclass, :class:`StratifiedKFold` is used. In all other
cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.22
``cv`` default value changed from 3-fold to 5-fold.
y : array-like, default=None
The target variable for supervised learning problems.
classifier : bool, default=False
Whether the task is a classification task, in which case
stratified KFold will be used.
Returns
-------
checked_cv : a cross-validator instance.
The return value is a cross-validator which generates the train/test
splits via the ``split`` method.
Examples
--------
>>> from sklearn.model_selection import check_cv
>>> check_cv(cv=5, y=None, classifier=False)
KFold(...)
>>> check_cv(cv=5, y=[1, 1, 0, 0, 0, 0], classifier=True)
StratifiedKFold(...)
|
check_cv
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/_split.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/_split.py
|
BSD-3-Clause
|
def train_test_split(
*arrays,
test_size=None,
train_size=None,
random_state=None,
shuffle=True,
stratify=None,
):
"""Split arrays or matrices into random train and test subsets.
Quick utility that wraps input validation,
``next(ShuffleSplit().split(X, y))``, and application to input data
into a single call for splitting (and optionally subsampling) data into a
one-liner.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
*arrays : sequence of indexables with same length / shape[0]
Allowed inputs are lists, numpy arrays, scipy-sparse
matrices or pandas dataframes.
test_size : float or int, default=None
If float, should be between 0.0 and 1.0 and represent the proportion
of the dataset to include in the test split. If int, represents the
absolute number of test samples. If None, the value is set to the
complement of the train size. If ``train_size`` is also None, it will
be set to 0.25.
train_size : float or int, default=None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int, RandomState instance or None, default=None
Controls the shuffling applied to the data before applying the split.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
shuffle : bool, default=True
Whether or not to shuffle the data before splitting. If shuffle=False
then stratify must be None.
stratify : array-like, default=None
If not None, data is split in a stratified fashion, using this as
the class labels.
Read more in the :ref:`User Guide <stratification>`.
Returns
-------
splitting : list, length=2 * len(arrays)
List containing train-test split of inputs.
.. versionadded:: 0.16
If the input is sparse, the output will be a
``scipy.sparse.csr_matrix``. Else, output type is the same as the
input type.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import train_test_split
>>> X, y = np.arange(10).reshape((5, 2)), range(5)
>>> X
array([[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9]])
>>> list(y)
[0, 1, 2, 3, 4]
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, test_size=0.33, random_state=42)
...
>>> X_train
array([[4, 5],
[0, 1],
[6, 7]])
>>> y_train
[2, 0, 3]
>>> X_test
array([[2, 3],
[8, 9]])
>>> y_test
[1, 4]
>>> train_test_split(y, shuffle=False)
[[0, 1, 2], [3, 4]]
>>> from sklearn import datasets
>>> iris = datasets.load_iris(as_frame=True)
>>> X, y = iris['data'], iris['target']
>>> X.head()
sepal length (cm) sepal width (cm) petal length (cm) petal width (cm)
0 5.1 3.5 1.4 0.2
1 4.9 3.0 1.4 0.2
2 4.7 3.2 1.3 0.2
3 4.6 3.1 1.5 0.2
4 5.0 3.6 1.4 0.2
>>> y.head()
0 0
1 0
2 0
3 0
4 0
...
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, test_size=0.33, random_state=42)
...
>>> X_train.head()
sepal length (cm) sepal width (cm) petal length (cm) petal width (cm)
96 5.7 2.9 4.2 1.3
105 7.6 3.0 6.6 2.1
66 5.6 3.0 4.5 1.5
0 5.1 3.5 1.4 0.2
122 7.7 2.8 6.7 2.0
>>> y_train.head()
96 1
105 2
66 1
0 0
122 2
...
>>> X_test.head()
sepal length (cm) sepal width (cm) petal length (cm) petal width (cm)
73 6.1 2.8 4.7 1.2
18 5.7 3.8 1.7 0.3
118 7.7 2.6 6.9 2.3
78 6.0 2.9 4.5 1.5
76 6.8 2.8 4.8 1.4
>>> y_test.head()
73 1
18 0
118 2
78 1
76 1
...
"""
n_arrays = len(arrays)
if n_arrays == 0:
raise ValueError("At least one array required as input")
arrays = indexable(*arrays)
n_samples = _num_samples(arrays[0])
n_train, n_test = _validate_shuffle_split(
n_samples, test_size, train_size, default_test_size=0.25
)
if shuffle is False:
if stratify is not None:
raise ValueError(
"Stratified train/test split is not implemented for shuffle=False"
)
train = np.arange(n_train)
test = np.arange(n_train, n_train + n_test)
else:
if stratify is not None:
CVClass = StratifiedShuffleSplit
else:
CVClass = ShuffleSplit
cv = CVClass(test_size=n_test, train_size=n_train, random_state=random_state)
train, test = next(cv.split(X=arrays[0], y=stratify))
train, test = ensure_common_namespace_device(arrays[0], train, test)
return list(
chain.from_iterable(
(_safe_indexing(a, train), _safe_indexing(a, test)) for a in arrays
)
)
|
Split arrays or matrices into random train and test subsets.
Quick utility that wraps input validation,
``next(ShuffleSplit().split(X, y))``, and application to input data
into a single call for splitting (and optionally subsampling) data into a
one-liner.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
*arrays : sequence of indexables with same length / shape[0]
Allowed inputs are lists, numpy arrays, scipy-sparse
matrices or pandas dataframes.
test_size : float or int, default=None
If float, should be between 0.0 and 1.0 and represent the proportion
of the dataset to include in the test split. If int, represents the
absolute number of test samples. If None, the value is set to the
complement of the train size. If ``train_size`` is also None, it will
be set to 0.25.
train_size : float or int, default=None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int, RandomState instance or None, default=None
Controls the shuffling applied to the data before applying the split.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
shuffle : bool, default=True
Whether or not to shuffle the data before splitting. If shuffle=False
then stratify must be None.
stratify : array-like, default=None
If not None, data is split in a stratified fashion, using this as
the class labels.
Read more in the :ref:`User Guide <stratification>`.
Returns
-------
splitting : list, length=2 * len(arrays)
List containing train-test split of inputs.
.. versionadded:: 0.16
If the input is sparse, the output will be a
``scipy.sparse.csr_matrix``. Else, output type is the same as the
input type.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import train_test_split
>>> X, y = np.arange(10).reshape((5, 2)), range(5)
>>> X
array([[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9]])
>>> list(y)
[0, 1, 2, 3, 4]
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, test_size=0.33, random_state=42)
...
>>> X_train
array([[4, 5],
[0, 1],
[6, 7]])
>>> y_train
[2, 0, 3]
>>> X_test
array([[2, 3],
[8, 9]])
>>> y_test
[1, 4]
>>> train_test_split(y, shuffle=False)
[[0, 1, 2], [3, 4]]
>>> from sklearn import datasets
>>> iris = datasets.load_iris(as_frame=True)
>>> X, y = iris['data'], iris['target']
>>> X.head()
sepal length (cm) sepal width (cm) petal length (cm) petal width (cm)
0 5.1 3.5 1.4 0.2
1 4.9 3.0 1.4 0.2
2 4.7 3.2 1.3 0.2
3 4.6 3.1 1.5 0.2
4 5.0 3.6 1.4 0.2
>>> y.head()
0 0
1 0
2 0
3 0
4 0
...
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, test_size=0.33, random_state=42)
...
>>> X_train.head()
sepal length (cm) sepal width (cm) petal length (cm) petal width (cm)
96 5.7 2.9 4.2 1.3
105 7.6 3.0 6.6 2.1
66 5.6 3.0 4.5 1.5
0 5.1 3.5 1.4 0.2
122 7.7 2.8 6.7 2.0
>>> y_train.head()
96 1
105 2
66 1
0 0
122 2
...
>>> X_test.head()
sepal length (cm) sepal width (cm) petal length (cm) petal width (cm)
73 6.1 2.8 4.7 1.2
18 5.7 3.8 1.7 0.3
118 7.7 2.6 6.9 2.3
78 6.0 2.9 4.5 1.5
76 6.8 2.8 4.8 1.4
>>> y_test.head()
73 1
18 0
118 2
78 1
76 1
...
|
train_test_split
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/_split.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/_split.py
|
BSD-3-Clause
|
def _pprint(params, offset=0, printer=repr):
"""Pretty print the dictionary 'params'
Parameters
----------
params : dict
The dictionary to pretty print
offset : int, default=0
The offset in characters to add at the begin of each line.
printer : callable, default=repr
The function to convert entries to strings, typically
the builtin str or repr
"""
# Do a multi-line justified repr:
options = np.get_printoptions()
np.set_printoptions(precision=5, threshold=64, edgeitems=2)
params_list = list()
this_line_length = offset
line_sep = ",\n" + (1 + offset // 2) * " "
for i, (k, v) in enumerate(sorted(params.items())):
if isinstance(v, float):
# use str for representing floating point numbers
# this way we get consistent representation across
# architectures and versions.
this_repr = "%s=%s" % (k, str(v))
else:
# use repr of the rest
this_repr = "%s=%s" % (k, printer(v))
if len(this_repr) > 500:
this_repr = this_repr[:300] + "..." + this_repr[-100:]
if i > 0:
if this_line_length + len(this_repr) >= 75 or "\n" in this_repr:
params_list.append(line_sep)
this_line_length = len(line_sep)
else:
params_list.append(", ")
this_line_length += 2
params_list.append(this_repr)
this_line_length += len(this_repr)
np.set_printoptions(**options)
lines = "".join(params_list)
# Strip trailing space to avoid nightmare in doctests
lines = "\n".join(l.rstrip(" ") for l in lines.split("\n"))
return lines
|
Pretty print the dictionary 'params'
Parameters
----------
params : dict
The dictionary to pretty print
offset : int, default=0
The offset in characters to add at the begin of each line.
printer : callable, default=repr
The function to convert entries to strings, typically
the builtin str or repr
|
_pprint
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/_split.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/_split.py
|
BSD-3-Clause
|
def _check_params_groups_deprecation(fit_params, params, groups, version):
"""A helper function to check deprecations on `groups` and `fit_params`.
# TODO(SLEP6): To be removed when set_config(enable_metadata_routing=False) is not
# possible.
"""
if params is not None and fit_params is not None:
raise ValueError(
"`params` and `fit_params` cannot both be provided. Pass parameters "
"via `params`. `fit_params` is deprecated and will be removed in "
f"version {version}."
)
elif fit_params is not None:
warnings.warn(
(
"`fit_params` is deprecated and will be removed in version {version}. "
"Pass parameters via `params` instead."
),
FutureWarning,
)
params = fit_params
params = {} if params is None else params
_check_groups_routing_disabled(groups)
return params
|
A helper function to check deprecations on `groups` and `fit_params`.
# TODO(SLEP6): To be removed when set_config(enable_metadata_routing=False) is not
# possible.
|
_check_params_groups_deprecation
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/_validation.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/_validation.py
|
BSD-3-Clause
|
def cross_validate(
estimator,
X,
y=None,
*,
groups=None,
scoring=None,
cv=None,
n_jobs=None,
verbose=0,
params=None,
pre_dispatch="2*n_jobs",
return_train_score=False,
return_estimator=False,
return_indices=False,
error_score=np.nan,
):
"""Evaluate metric(s) by cross-validation and also record fit/score times.
Read more in the :ref:`User Guide <multimetric_cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data to fit. Can be for example a list, or an array.
y : array-like of shape (n_samples,) or (n_samples, n_outputs), default=None
The target variable to try to predict in the case of
supervised learning.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set. Only used in conjunction with a "Group" :term:`cv`
instance (e.g., :class:`GroupKFold`).
.. versionchanged:: 1.4
``groups`` can only be passed if metadata routing is not enabled
via ``sklearn.set_config(enable_metadata_routing=True)``. When routing
is enabled, pass ``groups`` alongside other metadata via the ``params``
argument instead. E.g.:
``cross_validate(..., params={'groups': groups})``.
scoring : str, callable, list, tuple, or dict, default=None
Strategy to evaluate the performance of the `estimator` across cross-validation
splits.
If `scoring` represents a single score, one can use:
- a single string (see :ref:`scoring_string_names`);
- a callable (see :ref:`scoring_callable`) that returns a single value.
- `None`, the `estimator`'s
:ref:`default evaluation criterion <scoring_api_overview>` is used.
If `scoring` represents multiple scores, one can use:
- a list or tuple of unique strings;
- a callable returning a dictionary where the keys are the metric
names and the values are the metric scores;
- a dictionary with metric names as keys and callables a values.
See :ref:`multimetric_grid_search` for an example.
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross validation,
- int, to specify the number of folds in a `(Stratified)KFold`,
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For int/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used. These splitters are instantiated
with `shuffle=False` so the splits will be the same across calls.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.22
``cv`` default value if None changed from 3-fold to 5-fold.
n_jobs : int, default=None
Number of jobs to run in parallel. Training the estimator and computing
the score are parallelized over the cross-validation splits.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
verbose : int, default=0
The verbosity level.
params : dict, default=None
Parameters to pass to the underlying estimator's ``fit``, the scorer,
and the CV splitter.
.. versionadded:: 1.4
pre_dispatch : int or str, default='2*n_jobs'
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- An int, giving the exact number of total jobs that are spawned
- A str, giving an expression as a function of n_jobs, as in '2*n_jobs'
return_train_score : bool, default=False
Whether to include train scores.
Computing training scores is used to get insights on how different
parameter settings impact the overfitting/underfitting trade-off.
However computing the scores on the training set can be computationally
expensive and is not strictly required to select the parameters that
yield the best generalization performance.
.. versionadded:: 0.19
.. versionchanged:: 0.21
Default value was changed from ``True`` to ``False``
return_estimator : bool, default=False
Whether to return the estimators fitted on each split.
.. versionadded:: 0.20
return_indices : bool, default=False
Whether to return the train-test indices selected for each split.
.. versionadded:: 1.3
error_score : 'raise' or numeric, default=np.nan
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised.
If a numeric value is given, FitFailedWarning is raised.
.. versionadded:: 0.20
Returns
-------
scores : dict of float arrays of shape (n_splits,)
Array of scores of the estimator for each run of the cross validation.
A dict of arrays containing the score/time arrays for each scorer is
returned. The possible keys for this ``dict`` are:
``test_score``
The score array for test scores on each cv split.
Suffix ``_score`` in ``test_score`` changes to a specific
metric like ``test_r2`` or ``test_auc`` if there are
multiple scoring metrics in the scoring parameter.
``train_score``
The score array for train scores on each cv split.
Suffix ``_score`` in ``train_score`` changes to a specific
metric like ``train_r2`` or ``train_auc`` if there are
multiple scoring metrics in the scoring parameter.
This is available only if ``return_train_score`` parameter
is ``True``.
``fit_time``
The time for fitting the estimator on the train
set for each cv split.
``score_time``
The time for scoring the estimator on the test set for each
cv split. (Note: time for scoring on the train set is not
included even if ``return_train_score`` is set to ``True``).
``estimator``
The estimator objects for each cv split.
This is available only if ``return_estimator`` parameter
is set to ``True``.
``indices``
The train/test positional indices for each cv split. A dictionary
is returned where the keys are either `"train"` or `"test"`
and the associated values are a list of integer-dtyped NumPy
arrays with the indices. Available only if `return_indices=True`.
See Also
--------
cross_val_score : Run cross-validation for single metric evaluation.
cross_val_predict : Get predictions from each split of cross-validation for
diagnostic purposes.
sklearn.metrics.make_scorer : Make a scorer from a performance metric or
loss function.
Examples
--------
>>> from sklearn import datasets, linear_model
>>> from sklearn.model_selection import cross_validate
>>> from sklearn.metrics import make_scorer
>>> from sklearn.metrics import confusion_matrix
>>> from sklearn.svm import LinearSVC
>>> diabetes = datasets.load_diabetes()
>>> X = diabetes.data[:150]
>>> y = diabetes.target[:150]
>>> lasso = linear_model.Lasso()
Single metric evaluation using ``cross_validate``
>>> cv_results = cross_validate(lasso, X, y, cv=3)
>>> sorted(cv_results.keys())
['fit_time', 'score_time', 'test_score']
>>> cv_results['test_score']
array([0.3315057 , 0.08022103, 0.03531816])
Multiple metric evaluation using ``cross_validate``
(please refer the ``scoring`` parameter doc for more information)
>>> scores = cross_validate(lasso, X, y, cv=3,
... scoring=('r2', 'neg_mean_squared_error'),
... return_train_score=True)
>>> print(scores['test_neg_mean_squared_error'])
[-3635.5 -3573.3 -6114.7]
>>> print(scores['train_r2'])
[0.28009951 0.3908844 0.22784907]
"""
_check_groups_routing_disabled(groups)
X, y = indexable(X, y)
params = {} if params is None else params
cv = check_cv(cv, y, classifier=is_classifier(estimator))
scorers = check_scoring(
estimator, scoring=scoring, raise_exc=(error_score == "raise")
)
if _routing_enabled():
# For estimators, a MetadataRouter is created in get_metadata_routing
# methods. For these router methods, we create the router to use
# `process_routing` on it.
router = (
MetadataRouter(owner="cross_validate")
.add(
splitter=cv,
method_mapping=MethodMapping().add(caller="fit", callee="split"),
)
.add(
estimator=estimator,
# TODO(SLEP6): also pass metadata to the predict method for
# scoring?
method_mapping=MethodMapping().add(caller="fit", callee="fit"),
)
.add(
scorer=scorers,
method_mapping=MethodMapping().add(caller="fit", callee="score"),
)
)
try:
routed_params = process_routing(router, "fit", **params)
except UnsetMetadataPassedError as e:
# The default exception would mention `fit` since in the above
# `process_routing` code, we pass `fit` as the caller. However,
# the user is not calling `fit` directly, so we change the message
# to make it more suitable for this case.
raise UnsetMetadataPassedError(
message=str(e).replace("cross_validate.fit", "cross_validate"),
unrequested_params=e.unrequested_params,
routed_params=e.routed_params,
)
else:
routed_params = Bunch()
routed_params.splitter = Bunch(split={"groups": groups})
routed_params.estimator = Bunch(fit=params)
routed_params.scorer = Bunch(score={})
indices = cv.split(X, y, **routed_params.splitter.split)
if return_indices:
# materialize the indices since we need to store them in the returned dict
indices = list(indices)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose, pre_dispatch=pre_dispatch)
results = parallel(
delayed(_fit_and_score)(
clone(estimator),
X,
y,
scorer=scorers,
train=train,
test=test,
verbose=verbose,
parameters=None,
fit_params=routed_params.estimator.fit,
score_params=routed_params.scorer.score,
return_train_score=return_train_score,
return_times=True,
return_estimator=return_estimator,
error_score=error_score,
)
for train, test in indices
)
_warn_or_raise_about_fit_failures(results, error_score)
# For callable scoring, the return type is only know after calling. If the
# return type is a dictionary, the error scores can now be inserted with
# the correct key.
if callable(scoring):
_insert_error_scores(results, error_score)
results = _aggregate_score_dicts(results)
ret = {}
ret["fit_time"] = results["fit_time"]
ret["score_time"] = results["score_time"]
if return_estimator:
ret["estimator"] = results["estimator"]
if return_indices:
ret["indices"] = {}
ret["indices"]["train"], ret["indices"]["test"] = zip(*indices)
test_scores_dict = _normalize_score_results(results["test_scores"])
if return_train_score:
train_scores_dict = _normalize_score_results(results["train_scores"])
for name in test_scores_dict:
ret["test_%s" % name] = test_scores_dict[name]
if return_train_score:
key = "train_%s" % name
ret[key] = train_scores_dict[name]
return ret
|
Evaluate metric(s) by cross-validation and also record fit/score times.
Read more in the :ref:`User Guide <multimetric_cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data to fit. Can be for example a list, or an array.
y : array-like of shape (n_samples,) or (n_samples, n_outputs), default=None
The target variable to try to predict in the case of
supervised learning.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set. Only used in conjunction with a "Group" :term:`cv`
instance (e.g., :class:`GroupKFold`).
.. versionchanged:: 1.4
``groups`` can only be passed if metadata routing is not enabled
via ``sklearn.set_config(enable_metadata_routing=True)``. When routing
is enabled, pass ``groups`` alongside other metadata via the ``params``
argument instead. E.g.:
``cross_validate(..., params={'groups': groups})``.
scoring : str, callable, list, tuple, or dict, default=None
Strategy to evaluate the performance of the `estimator` across cross-validation
splits.
If `scoring` represents a single score, one can use:
- a single string (see :ref:`scoring_string_names`);
- a callable (see :ref:`scoring_callable`) that returns a single value.
- `None`, the `estimator`'s
:ref:`default evaluation criterion <scoring_api_overview>` is used.
If `scoring` represents multiple scores, one can use:
- a list or tuple of unique strings;
- a callable returning a dictionary where the keys are the metric
names and the values are the metric scores;
- a dictionary with metric names as keys and callables a values.
See :ref:`multimetric_grid_search` for an example.
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross validation,
- int, to specify the number of folds in a `(Stratified)KFold`,
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For int/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used. These splitters are instantiated
with `shuffle=False` so the splits will be the same across calls.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.22
``cv`` default value if None changed from 3-fold to 5-fold.
n_jobs : int, default=None
Number of jobs to run in parallel. Training the estimator and computing
the score are parallelized over the cross-validation splits.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
verbose : int, default=0
The verbosity level.
params : dict, default=None
Parameters to pass to the underlying estimator's ``fit``, the scorer,
and the CV splitter.
.. versionadded:: 1.4
pre_dispatch : int or str, default='2*n_jobs'
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- An int, giving the exact number of total jobs that are spawned
- A str, giving an expression as a function of n_jobs, as in '2*n_jobs'
return_train_score : bool, default=False
Whether to include train scores.
Computing training scores is used to get insights on how different
parameter settings impact the overfitting/underfitting trade-off.
However computing the scores on the training set can be computationally
expensive and is not strictly required to select the parameters that
yield the best generalization performance.
.. versionadded:: 0.19
.. versionchanged:: 0.21
Default value was changed from ``True`` to ``False``
return_estimator : bool, default=False
Whether to return the estimators fitted on each split.
.. versionadded:: 0.20
return_indices : bool, default=False
Whether to return the train-test indices selected for each split.
.. versionadded:: 1.3
error_score : 'raise' or numeric, default=np.nan
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised.
If a numeric value is given, FitFailedWarning is raised.
.. versionadded:: 0.20
Returns
-------
scores : dict of float arrays of shape (n_splits,)
Array of scores of the estimator for each run of the cross validation.
A dict of arrays containing the score/time arrays for each scorer is
returned. The possible keys for this ``dict`` are:
``test_score``
The score array for test scores on each cv split.
Suffix ``_score`` in ``test_score`` changes to a specific
metric like ``test_r2`` or ``test_auc`` if there are
multiple scoring metrics in the scoring parameter.
``train_score``
The score array for train scores on each cv split.
Suffix ``_score`` in ``train_score`` changes to a specific
metric like ``train_r2`` or ``train_auc`` if there are
multiple scoring metrics in the scoring parameter.
This is available only if ``return_train_score`` parameter
is ``True``.
``fit_time``
The time for fitting the estimator on the train
set for each cv split.
``score_time``
The time for scoring the estimator on the test set for each
cv split. (Note: time for scoring on the train set is not
included even if ``return_train_score`` is set to ``True``).
``estimator``
The estimator objects for each cv split.
This is available only if ``return_estimator`` parameter
is set to ``True``.
``indices``
The train/test positional indices for each cv split. A dictionary
is returned where the keys are either `"train"` or `"test"`
and the associated values are a list of integer-dtyped NumPy
arrays with the indices. Available only if `return_indices=True`.
See Also
--------
cross_val_score : Run cross-validation for single metric evaluation.
cross_val_predict : Get predictions from each split of cross-validation for
diagnostic purposes.
sklearn.metrics.make_scorer : Make a scorer from a performance metric or
loss function.
Examples
--------
>>> from sklearn import datasets, linear_model
>>> from sklearn.model_selection import cross_validate
>>> from sklearn.metrics import make_scorer
>>> from sklearn.metrics import confusion_matrix
>>> from sklearn.svm import LinearSVC
>>> diabetes = datasets.load_diabetes()
>>> X = diabetes.data[:150]
>>> y = diabetes.target[:150]
>>> lasso = linear_model.Lasso()
Single metric evaluation using ``cross_validate``
>>> cv_results = cross_validate(lasso, X, y, cv=3)
>>> sorted(cv_results.keys())
['fit_time', 'score_time', 'test_score']
>>> cv_results['test_score']
array([0.3315057 , 0.08022103, 0.03531816])
Multiple metric evaluation using ``cross_validate``
(please refer the ``scoring`` parameter doc for more information)
>>> scores = cross_validate(lasso, X, y, cv=3,
... scoring=('r2', 'neg_mean_squared_error'),
... return_train_score=True)
>>> print(scores['test_neg_mean_squared_error'])
[-3635.5 -3573.3 -6114.7]
>>> print(scores['train_r2'])
[0.28009951 0.3908844 0.22784907]
|
cross_validate
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/_validation.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/_validation.py
|
BSD-3-Clause
|
def _insert_error_scores(results, error_score):
"""Insert error in `results` by replacing them inplace with `error_score`.
This only applies to multimetric scores because `_fit_and_score` will
handle the single metric case.
"""
successful_score = None
failed_indices = []
for i, result in enumerate(results):
if result["fit_error"] is not None:
failed_indices.append(i)
elif successful_score is None:
successful_score = result["test_scores"]
if isinstance(successful_score, dict):
formatted_error = {name: error_score for name in successful_score}
for i in failed_indices:
results[i]["test_scores"] = formatted_error.copy()
if "train_scores" in results[i]:
results[i]["train_scores"] = formatted_error.copy()
|
Insert error in `results` by replacing them inplace with `error_score`.
This only applies to multimetric scores because `_fit_and_score` will
handle the single metric case.
|
_insert_error_scores
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/_validation.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/_validation.py
|
BSD-3-Clause
|
def _normalize_score_results(scores, scaler_score_key="score"):
"""Creates a scoring dictionary based on the type of `scores`"""
if isinstance(scores[0], dict):
# multimetric scoring
return _aggregate_score_dicts(scores)
# scaler
return {scaler_score_key: scores}
|
Creates a scoring dictionary based on the type of `scores`
|
_normalize_score_results
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/_validation.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/_validation.py
|
BSD-3-Clause
|
def _fit_and_score(
estimator,
X,
y,
*,
scorer,
train,
test,
verbose,
parameters,
fit_params,
score_params,
return_train_score=False,
return_parameters=False,
return_n_test_samples=False,
return_times=False,
return_estimator=False,
split_progress=None,
candidate_progress=None,
error_score=np.nan,
):
"""Fit estimator and compute scores for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape (n_samples, n_features)
The data to fit.
y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None
The target variable to try to predict in the case of
supervised learning.
scorer : A single callable or dict mapping scorer name to the callable
If it is a single callable, the return value for ``train_scores`` and
``test_scores`` is a single float.
For a dict, it should be one mapping the scorer name to the scorer
callable object / function.
The callable object / fn should have signature
``scorer(estimator, X, y)``.
train : array-like of shape (n_train_samples,)
Indices of training samples.
test : array-like of shape (n_test_samples,)
Indices of test samples.
verbose : int
The verbosity level.
error_score : 'raise' or numeric, default=np.nan
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised.
If a numeric value is given, FitFailedWarning is raised.
parameters : dict or None
Parameters to be set on the estimator.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
score_params : dict or None
Parameters that will be passed to the scorer.
return_train_score : bool, default=False
Compute and return score on training set.
return_parameters : bool, default=False
Return parameters that has been used for the estimator.
split_progress : {list, tuple} of int, default=None
A list or tuple of format (<current_split_id>, <total_num_of_splits>).
candidate_progress : {list, tuple} of int, default=None
A list or tuple of format
(<current_candidate_id>, <total_number_of_candidates>).
return_n_test_samples : bool, default=False
Whether to return the ``n_test_samples``.
return_times : bool, default=False
Whether to return the fit/score times.
return_estimator : bool, default=False
Whether to return the fitted estimator.
Returns
-------
result : dict with the following attributes
train_scores : dict of scorer name -> float
Score on training set (for all the scorers),
returned only if `return_train_score` is `True`.
test_scores : dict of scorer name -> float
Score on testing set (for all the scorers).
n_test_samples : int
Number of test samples.
fit_time : float
Time spent for fitting in seconds.
score_time : float
Time spent for scoring in seconds.
parameters : dict or None
The parameters that have been evaluated.
estimator : estimator object
The fitted estimator.
fit_error : str or None
Traceback str if the fit failed, None if the fit succeeded.
"""
xp, _ = get_namespace(X)
X_device = device(X)
# Make sure that we can fancy index X even if train and test are provided
# as NumPy arrays by NumPy only cross-validation splitters.
train, test = xp.asarray(train, device=X_device), xp.asarray(test, device=X_device)
if not isinstance(error_score, numbers.Number) and error_score != "raise":
raise ValueError(
"error_score must be the string 'raise' or a numeric value. "
"(Hint: if using 'raise', please make sure that it has been "
"spelled correctly.)"
)
progress_msg = ""
if verbose > 2:
if split_progress is not None:
progress_msg = f" {split_progress[0] + 1}/{split_progress[1]}"
if candidate_progress and verbose > 9:
progress_msg += f"; {candidate_progress[0] + 1}/{candidate_progress[1]}"
if verbose > 1:
if parameters is None:
params_msg = ""
else:
sorted_keys = sorted(parameters) # Ensure deterministic o/p
params_msg = ", ".join(f"{k}={parameters[k]}" for k in sorted_keys)
if verbose > 9:
start_msg = f"[CV{progress_msg}] START {params_msg}"
print(f"{start_msg}{(80 - len(start_msg)) * '.'}")
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = _check_method_params(X, params=fit_params, indices=train)
score_params = score_params if score_params is not None else {}
score_params_train = _check_method_params(X, params=score_params, indices=train)
score_params_test = _check_method_params(X, params=score_params, indices=test)
if parameters is not None:
# here we clone the parameters, since sometimes the parameters
# themselves might be estimators, e.g. when we search over different
# estimators in a pipeline.
# ref: https://github.com/scikit-learn/scikit-learn/pull/26786
estimator = estimator.set_params(**clone(parameters, safe=False))
start_time = time.time()
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
result = {}
try:
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
except Exception:
# Note fit time as time until error
fit_time = time.time() - start_time
score_time = 0.0
if error_score == "raise":
raise
elif isinstance(error_score, numbers.Number):
if isinstance(scorer, _MultimetricScorer):
test_scores = {name: error_score for name in scorer._scorers}
if return_train_score:
train_scores = test_scores.copy()
else:
test_scores = error_score
if return_train_score:
train_scores = error_score
result["fit_error"] = format_exc()
else:
result["fit_error"] = None
fit_time = time.time() - start_time
test_scores = _score(
estimator, X_test, y_test, scorer, score_params_test, error_score
)
score_time = time.time() - start_time - fit_time
if return_train_score:
train_scores = _score(
estimator, X_train, y_train, scorer, score_params_train, error_score
)
if verbose > 1:
total_time = score_time + fit_time
end_msg = f"[CV{progress_msg}] END "
result_msg = params_msg + (";" if params_msg else "")
if verbose > 2:
if isinstance(test_scores, dict):
for scorer_name in sorted(test_scores):
result_msg += f" {scorer_name}: ("
if return_train_score:
scorer_scores = train_scores[scorer_name]
result_msg += f"train={scorer_scores:.3f}, "
result_msg += f"test={test_scores[scorer_name]:.3f})"
else:
result_msg += ", score="
if return_train_score:
result_msg += f"(train={train_scores:.3f}, test={test_scores:.3f})"
else:
result_msg += f"{test_scores:.3f}"
result_msg += f" total time={logger.short_format_time(total_time)}"
# Right align the result_msg
end_msg += "." * (80 - len(end_msg) - len(result_msg))
end_msg += result_msg
print(end_msg)
result["test_scores"] = test_scores
if return_train_score:
result["train_scores"] = train_scores
if return_n_test_samples:
result["n_test_samples"] = _num_samples(X_test)
if return_times:
result["fit_time"] = fit_time
result["score_time"] = score_time
if return_parameters:
result["parameters"] = parameters
if return_estimator:
result["estimator"] = estimator
return result
|
Fit estimator and compute scores for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape (n_samples, n_features)
The data to fit.
y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None
The target variable to try to predict in the case of
supervised learning.
scorer : A single callable or dict mapping scorer name to the callable
If it is a single callable, the return value for ``train_scores`` and
``test_scores`` is a single float.
For a dict, it should be one mapping the scorer name to the scorer
callable object / function.
The callable object / fn should have signature
``scorer(estimator, X, y)``.
train : array-like of shape (n_train_samples,)
Indices of training samples.
test : array-like of shape (n_test_samples,)
Indices of test samples.
verbose : int
The verbosity level.
error_score : 'raise' or numeric, default=np.nan
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised.
If a numeric value is given, FitFailedWarning is raised.
parameters : dict or None
Parameters to be set on the estimator.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
score_params : dict or None
Parameters that will be passed to the scorer.
return_train_score : bool, default=False
Compute and return score on training set.
return_parameters : bool, default=False
Return parameters that has been used for the estimator.
split_progress : {list, tuple} of int, default=None
A list or tuple of format (<current_split_id>, <total_num_of_splits>).
candidate_progress : {list, tuple} of int, default=None
A list or tuple of format
(<current_candidate_id>, <total_number_of_candidates>).
return_n_test_samples : bool, default=False
Whether to return the ``n_test_samples``.
return_times : bool, default=False
Whether to return the fit/score times.
return_estimator : bool, default=False
Whether to return the fitted estimator.
Returns
-------
result : dict with the following attributes
train_scores : dict of scorer name -> float
Score on training set (for all the scorers),
returned only if `return_train_score` is `True`.
test_scores : dict of scorer name -> float
Score on testing set (for all the scorers).
n_test_samples : int
Number of test samples.
fit_time : float
Time spent for fitting in seconds.
score_time : float
Time spent for scoring in seconds.
parameters : dict or None
The parameters that have been evaluated.
estimator : estimator object
The fitted estimator.
fit_error : str or None
Traceback str if the fit failed, None if the fit succeeded.
|
_fit_and_score
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/_validation.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/_validation.py
|
BSD-3-Clause
|
def _score(estimator, X_test, y_test, scorer, score_params, error_score="raise"):
"""Compute the score(s) of an estimator on a given test set.
Will return a dict of floats if `scorer` is a _MultiMetricScorer, otherwise a single
float is returned.
"""
score_params = {} if score_params is None else score_params
try:
if y_test is None:
scores = scorer(estimator, X_test, **score_params)
else:
scores = scorer(estimator, X_test, y_test, **score_params)
except Exception:
if isinstance(scorer, _MultimetricScorer):
# If `_MultimetricScorer` raises exception, the `error_score`
# parameter is equal to "raise".
raise
else:
if error_score == "raise":
raise
else:
scores = error_score
warnings.warn(
(
"Scoring failed. The score on this train-test partition for "
f"these parameters will be set to {error_score}. Details: \n"
f"{format_exc()}"
),
UserWarning,
)
# Check non-raised error messages in `_MultimetricScorer`
if isinstance(scorer, _MultimetricScorer):
exception_messages = [
(name, str_e) for name, str_e in scores.items() if isinstance(str_e, str)
]
if exception_messages:
# error_score != "raise"
for name, str_e in exception_messages:
scores[name] = error_score
warnings.warn(
(
"Scoring failed. The score on this train-test partition for "
f"these parameters will be set to {error_score}. Details: \n"
f"{str_e}"
),
UserWarning,
)
error_msg = "scoring must return a number, got %s (%s) instead. (scorer=%s)"
if isinstance(scores, dict):
for name, score in scores.items():
if hasattr(score, "item"):
with suppress(ValueError):
# e.g. unwrap memmapped scalars
score = score.item()
if not isinstance(score, numbers.Number):
raise ValueError(error_msg % (score, type(score), name))
scores[name] = score
else: # scalar
if hasattr(scores, "item"):
with suppress(ValueError):
# e.g. unwrap memmapped scalars
scores = scores.item()
if not isinstance(scores, numbers.Number):
raise ValueError(error_msg % (scores, type(scores), scorer))
return scores
|
Compute the score(s) of an estimator on a given test set.
Will return a dict of floats if `scorer` is a _MultiMetricScorer, otherwise a single
float is returned.
|
_score
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/_validation.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/_validation.py
|
BSD-3-Clause
|
def cross_val_predict(
estimator,
X,
y=None,
*,
groups=None,
cv=None,
n_jobs=None,
verbose=0,
params=None,
pre_dispatch="2*n_jobs",
method="predict",
):
"""Generate cross-validated estimates for each input data point.
The data is split according to the cv parameter. Each sample belongs
to exactly one test set, and its prediction is computed with an
estimator fitted on the corresponding training set.
Passing these predictions into an evaluation metric may not be a valid
way to measure generalization performance. Results can differ from
:func:`cross_validate` and :func:`cross_val_score` unless all tests sets
have equal size and the metric decomposes over samples.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator
The estimator instance to use to fit the data. It must implement a `fit`
method and the method given by the `method` parameter.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data to fit. Can be, for example a list, or an array at least 2d.
y : {array-like, sparse matrix} of shape (n_samples,) or (n_samples, n_outputs), \
default=None
The target variable to try to predict in the case of
supervised learning.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set. Only used in conjunction with a "Group" :term:`cv`
instance (e.g., :class:`GroupKFold`).
.. versionchanged:: 1.4
``groups`` can only be passed if metadata routing is not enabled
via ``sklearn.set_config(enable_metadata_routing=True)``. When routing
is enabled, pass ``groups`` alongside other metadata via the ``params``
argument instead. E.g.:
``cross_val_predict(..., params={'groups': groups})``.
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross validation,
- int, to specify the number of folds in a `(Stratified)KFold`,
- :term:`CV splitter`,
- An iterable that generates (train, test) splits as arrays of indices.
For int/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used. These splitters are instantiated
with `shuffle=False` so the splits will be the same across calls.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.22
``cv`` default value if None changed from 3-fold to 5-fold.
n_jobs : int, default=None
Number of jobs to run in parallel. Training the estimator and
predicting are parallelized over the cross-validation splits.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
verbose : int, default=0
The verbosity level.
params : dict, default=None
Parameters to pass to the underlying estimator's ``fit`` and the CV
splitter.
.. versionadded:: 1.4
pre_dispatch : int or str, default='2*n_jobs'
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately created and spawned. Use
this for lightweight and fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are spawned
- A str, giving an expression as a function of n_jobs, as in '2*n_jobs'
method : {'predict', 'predict_proba', 'predict_log_proba', \
'decision_function'}, default='predict'
The method to be invoked by `estimator`.
Returns
-------
predictions : ndarray
This is the result of calling `method`. Shape:
- When `method` is 'predict' and in special case where `method` is
'decision_function' and the target is binary: (n_samples,)
- When `method` is one of {'predict_proba', 'predict_log_proba',
'decision_function'} (unless special case above):
(n_samples, n_classes)
- If `estimator` is :term:`multioutput`, an extra dimension
'n_outputs' is added to the end of each shape above.
See Also
--------
cross_val_score : Calculate score for each CV split.
cross_validate : Calculate one or more scores and timings for each CV
split.
Notes
-----
In the case that one or more classes are absent in a training portion, a
default score needs to be assigned to all instances for that class if
``method`` produces columns per class, as in {'decision_function',
'predict_proba', 'predict_log_proba'}. For ``predict_proba`` this value is
0. In order to ensure finite output, we approximate negative infinity by
the minimum finite float value for the dtype in other cases.
Examples
--------
>>> from sklearn import datasets, linear_model
>>> from sklearn.model_selection import cross_val_predict
>>> diabetes = datasets.load_diabetes()
>>> X = diabetes.data[:150]
>>> y = diabetes.target[:150]
>>> lasso = linear_model.Lasso()
>>> y_pred = cross_val_predict(lasso, X, y, cv=3)
"""
_check_groups_routing_disabled(groups)
X, y = indexable(X, y)
params = {} if params is None else params
if _routing_enabled():
# For estimators, a MetadataRouter is created in get_metadata_routing
# methods. For these router methods, we create the router to use
# `process_routing` on it.
router = (
MetadataRouter(owner="cross_val_predict")
.add(
splitter=cv,
method_mapping=MethodMapping().add(caller="fit", callee="split"),
)
.add(
estimator=estimator,
# TODO(SLEP6): also pass metadata for the predict method.
method_mapping=MethodMapping().add(caller="fit", callee="fit"),
)
)
try:
routed_params = process_routing(router, "fit", **params)
except UnsetMetadataPassedError as e:
# The default exception would mention `fit` since in the above
# `process_routing` code, we pass `fit` as the caller. However,
# the user is not calling `fit` directly, so we change the message
# to make it more suitable for this case.
raise UnsetMetadataPassedError(
message=str(e).replace("cross_val_predict.fit", "cross_val_predict"),
unrequested_params=e.unrequested_params,
routed_params=e.routed_params,
)
else:
routed_params = Bunch()
routed_params.splitter = Bunch(split={"groups": groups})
routed_params.estimator = Bunch(fit=params)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
splits = list(cv.split(X, y, **routed_params.splitter.split))
test_indices = np.concatenate([test for _, test in splits])
if not _check_is_permutation(test_indices, _num_samples(X)):
raise ValueError("cross_val_predict only works for partitions")
# If classification methods produce multiple columns of output,
# we need to manually encode classes to ensure consistent column ordering.
encode = (
method in ["decision_function", "predict_proba", "predict_log_proba"]
and y is not None
)
if encode:
y = np.asarray(y)
if y.ndim == 1:
le = LabelEncoder()
y = le.fit_transform(y)
elif y.ndim == 2:
y_enc = np.zeros_like(y, dtype=int)
for i_label in range(y.shape[1]):
y_enc[:, i_label] = LabelEncoder().fit_transform(y[:, i_label])
y = y_enc
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose, pre_dispatch=pre_dispatch)
predictions = parallel(
delayed(_fit_and_predict)(
clone(estimator),
X,
y,
train,
test,
routed_params.estimator.fit,
method,
)
for train, test in splits
)
inv_test_indices = np.empty(len(test_indices), dtype=int)
inv_test_indices[test_indices] = np.arange(len(test_indices))
if sp.issparse(predictions[0]):
predictions = sp.vstack(predictions, format=predictions[0].format)
elif encode and isinstance(predictions[0], list):
# `predictions` is a list of method outputs from each fold.
# If each of those is also a list, then treat this as a
# multioutput-multiclass task. We need to separately concatenate
# the method outputs for each label into an `n_labels` long list.
n_labels = y.shape[1]
concat_pred = []
for i_label in range(n_labels):
label_preds = np.concatenate([p[i_label] for p in predictions])
concat_pred.append(label_preds)
predictions = concat_pred
else:
predictions = np.concatenate(predictions)
if isinstance(predictions, list):
return [p[inv_test_indices] for p in predictions]
else:
return predictions[inv_test_indices]
|
Generate cross-validated estimates for each input data point.
The data is split according to the cv parameter. Each sample belongs
to exactly one test set, and its prediction is computed with an
estimator fitted on the corresponding training set.
Passing these predictions into an evaluation metric may not be a valid
way to measure generalization performance. Results can differ from
:func:`cross_validate` and :func:`cross_val_score` unless all tests sets
have equal size and the metric decomposes over samples.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator
The estimator instance to use to fit the data. It must implement a `fit`
method and the method given by the `method` parameter.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data to fit. Can be, for example a list, or an array at least 2d.
y : {array-like, sparse matrix} of shape (n_samples,) or (n_samples, n_outputs), default=None
The target variable to try to predict in the case of
supervised learning.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set. Only used in conjunction with a "Group" :term:`cv`
instance (e.g., :class:`GroupKFold`).
.. versionchanged:: 1.4
``groups`` can only be passed if metadata routing is not enabled
via ``sklearn.set_config(enable_metadata_routing=True)``. When routing
is enabled, pass ``groups`` alongside other metadata via the ``params``
argument instead. E.g.:
``cross_val_predict(..., params={'groups': groups})``.
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross validation,
- int, to specify the number of folds in a `(Stratified)KFold`,
- :term:`CV splitter`,
- An iterable that generates (train, test) splits as arrays of indices.
For int/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used. These splitters are instantiated
with `shuffle=False` so the splits will be the same across calls.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.22
``cv`` default value if None changed from 3-fold to 5-fold.
n_jobs : int, default=None
Number of jobs to run in parallel. Training the estimator and
predicting are parallelized over the cross-validation splits.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
verbose : int, default=0
The verbosity level.
params : dict, default=None
Parameters to pass to the underlying estimator's ``fit`` and the CV
splitter.
.. versionadded:: 1.4
pre_dispatch : int or str, default='2*n_jobs'
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately created and spawned. Use
this for lightweight and fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are spawned
- A str, giving an expression as a function of n_jobs, as in '2*n_jobs'
method : {'predict', 'predict_proba', 'predict_log_proba', 'decision_function'}, default='predict'
The method to be invoked by `estimator`.
Returns
-------
predictions : ndarray
This is the result of calling `method`. Shape:
- When `method` is 'predict' and in special case where `method` is
'decision_function' and the target is binary: (n_samples,)
- When `method` is one of {'predict_proba', 'predict_log_proba',
'decision_function'} (unless special case above):
(n_samples, n_classes)
- If `estimator` is :term:`multioutput`, an extra dimension
'n_outputs' is added to the end of each shape above.
See Also
--------
cross_val_score : Calculate score for each CV split.
cross_validate : Calculate one or more scores and timings for each CV
split.
Notes
-----
In the case that one or more classes are absent in a training portion, a
default score needs to be assigned to all instances for that class if
``method`` produces columns per class, as in {'decision_function',
'predict_proba', 'predict_log_proba'}. For ``predict_proba`` this value is
0. In order to ensure finite output, we approximate negative infinity by
the minimum finite float value for the dtype in other cases.
Examples
--------
>>> from sklearn import datasets, linear_model
>>> from sklearn.model_selection import cross_val_predict
>>> diabetes = datasets.load_diabetes()
>>> X = diabetes.data[:150]
>>> y = diabetes.target[:150]
>>> lasso = linear_model.Lasso()
>>> y_pred = cross_val_predict(lasso, X, y, cv=3)
|
cross_val_predict
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/_validation.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/_validation.py
|
BSD-3-Clause
|
def _fit_and_predict(estimator, X, y, train, test, fit_params, method):
"""Fit estimator and predict values for a given dataset split.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like of shape (n_samples, n_features)
The data to fit.
.. versionchanged:: 0.20
X is only required to be an object with finite length or shape now
y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None
The target variable to try to predict in the case of
supervised learning.
train : array-like of shape (n_train_samples,)
Indices of training samples.
test : array-like of shape (n_test_samples,)
Indices of test samples.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
method : str
Invokes the passed method name of the passed estimator.
Returns
-------
predictions : sequence
Result of calling 'estimator.method'
"""
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = _check_method_params(X, params=fit_params, indices=train)
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, _ = _safe_split(estimator, X, y, test, train)
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
func = getattr(estimator, method)
predictions = func(X_test)
encode = (
method in ["decision_function", "predict_proba", "predict_log_proba"]
and y is not None
)
if encode:
if isinstance(predictions, list):
predictions = [
_enforce_prediction_order(
estimator.classes_[i_label],
predictions[i_label],
n_classes=len(set(y[:, i_label])),
method=method,
)
for i_label in range(len(predictions))
]
else:
# A 2D y array should be a binary label indicator matrix
n_classes = len(set(y)) if y.ndim == 1 else y.shape[1]
predictions = _enforce_prediction_order(
estimator.classes_, predictions, n_classes, method
)
return predictions
|
Fit estimator and predict values for a given dataset split.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like of shape (n_samples, n_features)
The data to fit.
.. versionchanged:: 0.20
X is only required to be an object with finite length or shape now
y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None
The target variable to try to predict in the case of
supervised learning.
train : array-like of shape (n_train_samples,)
Indices of training samples.
test : array-like of shape (n_test_samples,)
Indices of test samples.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
method : str
Invokes the passed method name of the passed estimator.
Returns
-------
predictions : sequence
Result of calling 'estimator.method'
|
_fit_and_predict
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/_validation.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/_validation.py
|
BSD-3-Clause
|
def _enforce_prediction_order(classes, predictions, n_classes, method):
"""Ensure that prediction arrays have correct column order
When doing cross-validation, if one or more classes are
not present in the subset of data used for training,
then the output prediction array might not have the same
columns as other folds. Use the list of class names
(assumed to be ints) to enforce the correct column order.
Note that `classes` is the list of classes in this fold
(a subset of the classes in the full training set)
and `n_classes` is the number of classes in the full training set.
"""
if n_classes != len(classes):
recommendation = (
"To fix this, use a cross-validation "
"technique resulting in properly "
"stratified folds"
)
warnings.warn(
"Number of classes in training fold ({}) does "
"not match total number of classes ({}). "
"Results may not be appropriate for your use case. "
"{}".format(len(classes), n_classes, recommendation),
RuntimeWarning,
)
if method == "decision_function":
if predictions.ndim == 2 and predictions.shape[1] != len(classes):
# This handles the case when the shape of predictions
# does not match the number of classes used to train
# it with. This case is found when sklearn.svm.SVC is
# set to `decision_function_shape='ovo'`.
raise ValueError(
"Output shape {} of {} does not match "
"number of classes ({}) in fold. "
"Irregular decision_function outputs "
"are not currently supported by "
"cross_val_predict".format(predictions.shape, method, len(classes))
)
if len(classes) <= 2:
# In this special case, `predictions` contains a 1D array.
raise ValueError(
"Only {} class/es in training fold, but {} "
"in overall dataset. This "
"is not supported for decision_function "
"with imbalanced folds. {}".format(
len(classes), n_classes, recommendation
)
)
float_min = np.finfo(predictions.dtype).min
default_values = {
"decision_function": float_min,
"predict_log_proba": float_min,
"predict_proba": 0,
}
predictions_for_all_classes = np.full(
(_num_samples(predictions), n_classes),
default_values[method],
dtype=predictions.dtype,
)
predictions_for_all_classes[:, classes] = predictions
predictions = predictions_for_all_classes
return predictions
|
Ensure that prediction arrays have correct column order
When doing cross-validation, if one or more classes are
not present in the subset of data used for training,
then the output prediction array might not have the same
columns as other folds. Use the list of class names
(assumed to be ints) to enforce the correct column order.
Note that `classes` is the list of classes in this fold
(a subset of the classes in the full training set)
and `n_classes` is the number of classes in the full training set.
|
_enforce_prediction_order
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/_validation.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/_validation.py
|
BSD-3-Clause
|
def _check_is_permutation(indices, n_samples):
"""Check whether indices is a reordering of the array np.arange(n_samples)
Parameters
----------
indices : ndarray
int array to test
n_samples : int
number of expected elements
Returns
-------
is_partition : bool
True iff sorted(indices) is np.arange(n)
"""
if len(indices) != n_samples:
return False
hit = np.zeros(n_samples, dtype=bool)
hit[indices] = True
if not np.all(hit):
return False
return True
|
Check whether indices is a reordering of the array np.arange(n_samples)
Parameters
----------
indices : ndarray
int array to test
n_samples : int
number of expected elements
Returns
-------
is_partition : bool
True iff sorted(indices) is np.arange(n)
|
_check_is_permutation
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/_validation.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/_validation.py
|
BSD-3-Clause
|
def permutation_test_score(
estimator,
X,
y,
*,
groups=None,
cv=None,
n_permutations=100,
n_jobs=None,
random_state=0,
verbose=0,
scoring=None,
fit_params=None,
params=None,
):
"""Evaluate the significance of a cross-validated score with permutations.
Permutes targets to generate 'randomized data' and compute the empirical
p-value against the null hypothesis that features and targets are
independent.
The p-value represents the fraction of randomized data sets where the
estimator performed as well or better than on the original data. A small
p-value suggests that there is a real dependency between features and
targets which has been used by the estimator to give good predictions.
A large p-value may be due to lack of real dependency between features
and targets or the estimator was not able to use the dependency to
give good predictions.
Read more in the :ref:`User Guide <permutation_test_score>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None
The target variable to try to predict in the case of
supervised learning.
groups : array-like of shape (n_samples,), default=None
Labels to constrain permutation within groups, i.e. ``y`` values
are permuted among samples with the same group identifier.
When not specified, ``y`` values are permuted among all samples.
When a grouped cross-validator is used, the group labels are
also passed on to the ``split`` method of the cross-validator. The
cross-validator uses them for grouping the samples while splitting
the dataset into train/test set.
.. versionchanged:: 1.6
``groups`` can only be passed if metadata routing is not enabled
via ``sklearn.set_config(enable_metadata_routing=True)``. When routing
is enabled, pass ``groups`` alongside other metadata via the ``params``
argument instead. E.g.:
``permutation_test_score(..., params={'groups': groups})``.
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- `None`, to use the default 5-fold cross validation,
- int, to specify the number of folds in a `(Stratified)KFold`,
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For `int`/`None` inputs, if the estimator is a classifier and `y` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used. These splitters are instantiated
with `shuffle=False` so the splits will be the same across calls.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.22
`cv` default value if `None` changed from 3-fold to 5-fold.
n_permutations : int, default=100
Number of times to permute ``y``.
n_jobs : int, default=None
Number of jobs to run in parallel. Training the estimator and computing
the cross-validated score are parallelized over the permutations.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
random_state : int, RandomState instance or None, default=0
Pass an int for reproducible output for permutation of
``y`` values among samples. See :term:`Glossary <random_state>`.
verbose : int, default=0
The verbosity level.
scoring : str or callable, default=None
Scoring method to use to evaluate the predictions on the validation set.
- str: see :ref:`scoring_string_names` for options.
- callable: a scorer callable object (e.g., function) with signature
``scorer(estimator, X, y)``, which should return only a single value.
See :ref:`scoring_callable` for details.
- `None`: the `estimator`'s
:ref:`default evaluation criterion <scoring_api_overview>` is used.
fit_params : dict, default=None
Parameters to pass to the fit method of the estimator.
.. deprecated:: 1.6
This parameter is deprecated and will be removed in version 1.6. Use
``params`` instead.
params : dict, default=None
Parameters to pass to the `fit` method of the estimator, the scorer
and the cv splitter.
- If `enable_metadata_routing=False` (default): Parameters directly passed to
the `fit` method of the estimator.
- If `enable_metadata_routing=True`: Parameters safely routed to the `fit`
method of the estimator, `cv` object and `scorer`. See :ref:`Metadata Routing
User Guide <metadata_routing>` for more details.
.. versionadded:: 1.6
Returns
-------
score : float
The true score without permuting targets.
permutation_scores : array of shape (n_permutations,)
The scores obtained for each permutations.
pvalue : float
The p-value, which approximates the probability that the score would
be obtained by chance. This is calculated as:
`(C + 1) / (n_permutations + 1)`
Where C is the number of permutations whose score >= the true score.
The best possible p-value is 1/(n_permutations + 1), the worst is 1.0.
Notes
-----
This function implements Test 1 in:
Ojala and Garriga. `Permutation Tests for Studying Classifier Performance
<http://www.jmlr.org/papers/volume11/ojala10a/ojala10a.pdf>`_. The
Journal of Machine Learning Research (2010) vol. 11
Examples
--------
>>> from sklearn.datasets import make_classification
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.model_selection import permutation_test_score
>>> X, y = make_classification(random_state=0)
>>> estimator = LogisticRegression()
>>> score, permutation_scores, pvalue = permutation_test_score(
... estimator, X, y, random_state=0
... )
>>> print(f"Original Score: {score:.3f}")
Original Score: 0.810
>>> print(
... f"Permutation Scores: {permutation_scores.mean():.3f} +/- "
... f"{permutation_scores.std():.3f}"
... )
Permutation Scores: 0.505 +/- 0.057
>>> print(f"P-value: {pvalue:.3f}")
P-value: 0.010
"""
params = _check_params_groups_deprecation(fit_params, params, groups, "1.8")
X, y, groups = indexable(X, y, groups)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
random_state = check_random_state(random_state)
if _routing_enabled():
router = (
MetadataRouter(owner="permutation_test_score")
.add(
estimator=estimator,
# TODO(SLEP6): also pass metadata to the predict method for
# scoring?
method_mapping=MethodMapping().add(caller="fit", callee="fit"),
)
.add(
splitter=cv,
method_mapping=MethodMapping().add(caller="fit", callee="split"),
)
.add(
scorer=scorer,
method_mapping=MethodMapping().add(caller="fit", callee="score"),
)
)
try:
routed_params = process_routing(router, "fit", **params)
except UnsetMetadataPassedError as e:
# The default exception would mention `fit` since in the above
# `process_routing` code, we pass `fit` as the caller. However,
# the user is not calling `fit` directly, so we change the message
# to make it more suitable for this case.
raise UnsetMetadataPassedError(
message=str(e).replace(
"permutation_test_score.fit", "permutation_test_score"
),
unrequested_params=e.unrequested_params,
routed_params=e.routed_params,
)
else:
routed_params = Bunch()
routed_params.estimator = Bunch(fit=params)
routed_params.splitter = Bunch(split={"groups": groups})
routed_params.scorer = Bunch(score={})
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
score = _permutation_test_score(
clone(estimator),
X,
y,
cv,
scorer,
split_params=routed_params.splitter.split,
fit_params=routed_params.estimator.fit,
score_params=routed_params.scorer.score,
)
permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_permutation_test_score)(
clone(estimator),
X,
_shuffle(y, groups, random_state),
cv,
scorer,
split_params=routed_params.splitter.split,
fit_params=routed_params.estimator.fit,
score_params=routed_params.scorer.score,
)
for _ in range(n_permutations)
)
permutation_scores = np.array(permutation_scores)
pvalue = (np.sum(permutation_scores >= score) + 1.0) / (n_permutations + 1)
return score, permutation_scores, pvalue
|
Evaluate the significance of a cross-validated score with permutations.
Permutes targets to generate 'randomized data' and compute the empirical
p-value against the null hypothesis that features and targets are
independent.
The p-value represents the fraction of randomized data sets where the
estimator performed as well or better than on the original data. A small
p-value suggests that there is a real dependency between features and
targets which has been used by the estimator to give good predictions.
A large p-value may be due to lack of real dependency between features
and targets or the estimator was not able to use the dependency to
give good predictions.
Read more in the :ref:`User Guide <permutation_test_score>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None
The target variable to try to predict in the case of
supervised learning.
groups : array-like of shape (n_samples,), default=None
Labels to constrain permutation within groups, i.e. ``y`` values
are permuted among samples with the same group identifier.
When not specified, ``y`` values are permuted among all samples.
When a grouped cross-validator is used, the group labels are
also passed on to the ``split`` method of the cross-validator. The
cross-validator uses them for grouping the samples while splitting
the dataset into train/test set.
.. versionchanged:: 1.6
``groups`` can only be passed if metadata routing is not enabled
via ``sklearn.set_config(enable_metadata_routing=True)``. When routing
is enabled, pass ``groups`` alongside other metadata via the ``params``
argument instead. E.g.:
``permutation_test_score(..., params={'groups': groups})``.
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- `None`, to use the default 5-fold cross validation,
- int, to specify the number of folds in a `(Stratified)KFold`,
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For `int`/`None` inputs, if the estimator is a classifier and `y` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used. These splitters are instantiated
with `shuffle=False` so the splits will be the same across calls.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.22
`cv` default value if `None` changed from 3-fold to 5-fold.
n_permutations : int, default=100
Number of times to permute ``y``.
n_jobs : int, default=None
Number of jobs to run in parallel. Training the estimator and computing
the cross-validated score are parallelized over the permutations.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
random_state : int, RandomState instance or None, default=0
Pass an int for reproducible output for permutation of
``y`` values among samples. See :term:`Glossary <random_state>`.
verbose : int, default=0
The verbosity level.
scoring : str or callable, default=None
Scoring method to use to evaluate the predictions on the validation set.
- str: see :ref:`scoring_string_names` for options.
- callable: a scorer callable object (e.g., function) with signature
``scorer(estimator, X, y)``, which should return only a single value.
See :ref:`scoring_callable` for details.
- `None`: the `estimator`'s
:ref:`default evaluation criterion <scoring_api_overview>` is used.
fit_params : dict, default=None
Parameters to pass to the fit method of the estimator.
.. deprecated:: 1.6
This parameter is deprecated and will be removed in version 1.6. Use
``params`` instead.
params : dict, default=None
Parameters to pass to the `fit` method of the estimator, the scorer
and the cv splitter.
- If `enable_metadata_routing=False` (default): Parameters directly passed to
the `fit` method of the estimator.
- If `enable_metadata_routing=True`: Parameters safely routed to the `fit`
method of the estimator, `cv` object and `scorer`. See :ref:`Metadata Routing
User Guide <metadata_routing>` for more details.
.. versionadded:: 1.6
Returns
-------
score : float
The true score without permuting targets.
permutation_scores : array of shape (n_permutations,)
The scores obtained for each permutations.
pvalue : float
The p-value, which approximates the probability that the score would
be obtained by chance. This is calculated as:
`(C + 1) / (n_permutations + 1)`
Where C is the number of permutations whose score >= the true score.
The best possible p-value is 1/(n_permutations + 1), the worst is 1.0.
Notes
-----
This function implements Test 1 in:
Ojala and Garriga. `Permutation Tests for Studying Classifier Performance
<http://www.jmlr.org/papers/volume11/ojala10a/ojala10a.pdf>`_. The
Journal of Machine Learning Research (2010) vol. 11
Examples
--------
>>> from sklearn.datasets import make_classification
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.model_selection import permutation_test_score
>>> X, y = make_classification(random_state=0)
>>> estimator = LogisticRegression()
>>> score, permutation_scores, pvalue = permutation_test_score(
... estimator, X, y, random_state=0
... )
>>> print(f"Original Score: {score:.3f}")
Original Score: 0.810
>>> print(
... f"Permutation Scores: {permutation_scores.mean():.3f} +/- "
... f"{permutation_scores.std():.3f}"
... )
Permutation Scores: 0.505 +/- 0.057
>>> print(f"P-value: {pvalue:.3f}")
P-value: 0.010
|
permutation_test_score
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/_validation.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/_validation.py
|
BSD-3-Clause
|
def _shuffle(y, groups, random_state):
"""Return a shuffled copy of y eventually shuffle among same groups."""
if groups is None:
indices = random_state.permutation(len(y))
else:
indices = np.arange(len(groups))
for group in np.unique(groups):
this_mask = groups == group
indices[this_mask] = random_state.permutation(indices[this_mask])
return _safe_indexing(y, indices)
|
Return a shuffled copy of y eventually shuffle among same groups.
|
_shuffle
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/_validation.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/_validation.py
|
BSD-3-Clause
|
def learning_curve(
estimator,
X,
y,
*,
groups=None,
train_sizes=np.linspace(0.1, 1.0, 5),
cv=None,
scoring=None,
exploit_incremental_learning=False,
n_jobs=None,
pre_dispatch="all",
verbose=0,
shuffle=False,
random_state=None,
error_score=np.nan,
return_times=False,
fit_params=None,
params=None,
):
"""Learning curve.
Determines cross-validated training and test scores for different training
set sizes.
A cross-validation generator splits the whole dataset k times in training
and test data. Subsets of the training set with varying sizes will be used
to train the estimator and a score for each training subset size and the
test set will be computed. Afterwards, the scores will be averaged over
all k runs for each training subset size.
Read more in the :ref:`User Guide <learning_curve>`.
Parameters
----------
estimator : object type that implements the "fit" method
An object of that type which is cloned for each validation. It must
also implement "predict" unless `scoring` is a callable that doesn't
rely on "predict" to compute a score.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None
Target relative to X for classification or regression;
None for unsupervised learning.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set. Only used in conjunction with a "Group" :term:`cv`
instance (e.g., :class:`GroupKFold`).
.. versionchanged:: 1.6
``groups`` can only be passed if metadata routing is not enabled
via ``sklearn.set_config(enable_metadata_routing=True)``. When routing
is enabled, pass ``groups`` alongside other metadata via the ``params``
argument instead. E.g.:
``learning_curve(..., params={'groups': groups})``.
train_sizes : array-like of shape (n_ticks,), \
default=np.linspace(0.1, 1.0, 5)
Relative or absolute numbers of training examples that will be used to
generate the learning curve. If the dtype is float, it is regarded as a
fraction of the maximum size of the training set (that is determined
by the selected validation method), i.e. it has to be within (0, 1].
Otherwise it is interpreted as absolute sizes of the training sets.
Note that for classification the number of samples usually has to
be big enough to contain at least one sample from each class.
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross validation,
- int, to specify the number of folds in a `(Stratified)KFold`,
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For int/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used. These splitters are instantiated
with `shuffle=False` so the splits will be the same across calls.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.22
``cv`` default value if None changed from 3-fold to 5-fold.
scoring : str or callable, default=None
Scoring method to use to evaluate the training and test sets.
- str: see :ref:`scoring_string_names` for options.
- callable: a scorer callable object (e.g., function) with signature
``scorer(estimator, X, y)``. See :ref:`scoring_callable` for details.
- `None`: the `estimator`'s
:ref:`default evaluation criterion <scoring_api_overview>` is used.
exploit_incremental_learning : bool, default=False
If the estimator supports incremental learning, this will be
used to speed up fitting for different training set sizes.
n_jobs : int, default=None
Number of jobs to run in parallel. Training the estimator and computing
the score are parallelized over the different training and test sets.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
pre_dispatch : int or str, default='all'
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The str can
be an expression like '2*n_jobs'.
verbose : int, default=0
Controls the verbosity: the higher, the more messages.
shuffle : bool, default=False
Whether to shuffle training data before taking prefixes of it
based on``train_sizes``.
random_state : int, RandomState instance or None, default=None
Used when ``shuffle`` is True. Pass an int for reproducible
output across multiple function calls.
See :term:`Glossary <random_state>`.
error_score : 'raise' or numeric, default=np.nan
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised.
If a numeric value is given, FitFailedWarning is raised.
.. versionadded:: 0.20
return_times : bool, default=False
Whether to return the fit and score times.
fit_params : dict, default=None
Parameters to pass to the fit method of the estimator.
.. deprecated:: 1.6
This parameter is deprecated and will be removed in version 1.8. Use
``params`` instead.
params : dict, default=None
Parameters to pass to the `fit` method of the estimator and to the scorer.
- If `enable_metadata_routing=False` (default): Parameters directly passed to
the `fit` method of the estimator.
- If `enable_metadata_routing=True`: Parameters safely routed to the `fit`
method of the estimator. See :ref:`Metadata Routing User Guide
<metadata_routing>` for more details.
.. versionadded:: 1.6
Returns
-------
train_sizes_abs : array of shape (n_unique_ticks,)
Numbers of training examples that has been used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
train_scores : array of shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array of shape (n_ticks, n_cv_folds)
Scores on test set.
fit_times : array of shape (n_ticks, n_cv_folds)
Times spent for fitting in seconds. Only present if ``return_times``
is True.
score_times : array of shape (n_ticks, n_cv_folds)
Times spent for scoring in seconds. Only present if ``return_times``
is True.
See Also
--------
LearningCurveDisplay.from_estimator : Plot a learning curve using an
estimator and data.
Examples
--------
>>> from sklearn.datasets import make_classification
>>> from sklearn.tree import DecisionTreeClassifier
>>> from sklearn.model_selection import learning_curve
>>> X, y = make_classification(n_samples=100, n_features=10, random_state=42)
>>> tree = DecisionTreeClassifier(max_depth=4, random_state=42)
>>> train_size_abs, train_scores, test_scores = learning_curve(
... tree, X, y, train_sizes=[0.3, 0.6, 0.9]
... )
>>> for train_size, cv_train_scores, cv_test_scores in zip(
... train_size_abs, train_scores, test_scores
... ):
... print(f"{train_size} samples were used to train the model")
... print(f"The average train accuracy is {cv_train_scores.mean():.2f}")
... print(f"The average test accuracy is {cv_test_scores.mean():.2f}")
24 samples were used to train the model
The average train accuracy is 1.00
The average test accuracy is 0.85
48 samples were used to train the model
The average train accuracy is 1.00
The average test accuracy is 0.90
72 samples were used to train the model
The average train accuracy is 1.00
The average test accuracy is 0.93
"""
if exploit_incremental_learning and not hasattr(estimator, "partial_fit"):
raise ValueError(
"An estimator must support the partial_fit interface "
"to exploit incremental learning"
)
params = _check_params_groups_deprecation(fit_params, params, groups, "1.8")
X, y, groups = indexable(X, y, groups)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
if _routing_enabled():
router = (
MetadataRouter(owner="learning_curve")
.add(
estimator=estimator,
# TODO(SLEP6): also pass metadata to the predict method for
# scoring?
method_mapping=MethodMapping()
.add(caller="fit", callee="fit")
.add(caller="fit", callee="partial_fit"),
)
.add(
splitter=cv,
method_mapping=MethodMapping().add(caller="fit", callee="split"),
)
.add(
scorer=scorer,
method_mapping=MethodMapping().add(caller="fit", callee="score"),
)
)
try:
routed_params = process_routing(router, "fit", **params)
except UnsetMetadataPassedError as e:
# The default exception would mention `fit` since in the above
# `process_routing` code, we pass `fit` as the caller. However,
# the user is not calling `fit` directly, so we change the message
# to make it more suitable for this case.
raise UnsetMetadataPassedError(
message=str(e).replace("learning_curve.fit", "learning_curve"),
unrequested_params=e.unrequested_params,
routed_params=e.routed_params,
)
else:
routed_params = Bunch()
routed_params.estimator = Bunch(fit=params, partial_fit=params)
routed_params.splitter = Bunch(split={"groups": groups})
routed_params.scorer = Bunch(score={})
# Store cv as list as we will be iterating over the list multiple times
cv_iter = list(cv.split(X, y, **routed_params.splitter.split))
n_max_training_samples = len(cv_iter[0][0])
# Because the lengths of folds can be significantly different, it is
# not guaranteed that we use all of the available training data when we
# use the first 'n_max_training_samples' samples.
train_sizes_abs = _translate_train_sizes(train_sizes, n_max_training_samples)
n_unique_ticks = train_sizes_abs.shape[0]
if verbose > 0:
print("[learning_curve] Training set sizes: " + str(train_sizes_abs))
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch, verbose=verbose)
if shuffle:
rng = check_random_state(random_state)
cv_iter = ((rng.permutation(train), test) for train, test in cv_iter)
if exploit_incremental_learning:
classes = np.unique(y) if is_classifier(estimator) else None
out = parallel(
delayed(_incremental_fit_estimator)(
clone(estimator),
X,
y,
classes,
train,
test,
train_sizes_abs,
scorer,
return_times,
error_score=error_score,
fit_params=routed_params.estimator.partial_fit,
score_params=routed_params.scorer.score,
)
for train, test in cv_iter
)
out = np.asarray(out).transpose((2, 1, 0))
else:
train_test_proportions = []
for train, test in cv_iter:
for n_train_samples in train_sizes_abs:
train_test_proportions.append((train[:n_train_samples], test))
results = parallel(
delayed(_fit_and_score)(
clone(estimator),
X,
y,
scorer=scorer,
train=train,
test=test,
verbose=verbose,
parameters=None,
fit_params=routed_params.estimator.fit,
score_params=routed_params.scorer.score,
return_train_score=True,
error_score=error_score,
return_times=return_times,
)
for train, test in train_test_proportions
)
_warn_or_raise_about_fit_failures(results, error_score)
results = _aggregate_score_dicts(results)
train_scores = results["train_scores"].reshape(-1, n_unique_ticks).T
test_scores = results["test_scores"].reshape(-1, n_unique_ticks).T
out = [train_scores, test_scores]
if return_times:
fit_times = results["fit_time"].reshape(-1, n_unique_ticks).T
score_times = results["score_time"].reshape(-1, n_unique_ticks).T
out.extend([fit_times, score_times])
ret = train_sizes_abs, out[0], out[1]
if return_times:
ret = ret + (out[2], out[3])
return ret
|
Learning curve.
Determines cross-validated training and test scores for different training
set sizes.
A cross-validation generator splits the whole dataset k times in training
and test data. Subsets of the training set with varying sizes will be used
to train the estimator and a score for each training subset size and the
test set will be computed. Afterwards, the scores will be averaged over
all k runs for each training subset size.
Read more in the :ref:`User Guide <learning_curve>`.
Parameters
----------
estimator : object type that implements the "fit" method
An object of that type which is cloned for each validation. It must
also implement "predict" unless `scoring` is a callable that doesn't
rely on "predict" to compute a score.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None
Target relative to X for classification or regression;
None for unsupervised learning.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set. Only used in conjunction with a "Group" :term:`cv`
instance (e.g., :class:`GroupKFold`).
.. versionchanged:: 1.6
``groups`` can only be passed if metadata routing is not enabled
via ``sklearn.set_config(enable_metadata_routing=True)``. When routing
is enabled, pass ``groups`` alongside other metadata via the ``params``
argument instead. E.g.:
``learning_curve(..., params={'groups': groups})``.
train_sizes : array-like of shape (n_ticks,), default=np.linspace(0.1, 1.0, 5)
Relative or absolute numbers of training examples that will be used to
generate the learning curve. If the dtype is float, it is regarded as a
fraction of the maximum size of the training set (that is determined
by the selected validation method), i.e. it has to be within (0, 1].
Otherwise it is interpreted as absolute sizes of the training sets.
Note that for classification the number of samples usually has to
be big enough to contain at least one sample from each class.
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross validation,
- int, to specify the number of folds in a `(Stratified)KFold`,
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For int/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used. These splitters are instantiated
with `shuffle=False` so the splits will be the same across calls.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.22
``cv`` default value if None changed from 3-fold to 5-fold.
scoring : str or callable, default=None
Scoring method to use to evaluate the training and test sets.
- str: see :ref:`scoring_string_names` for options.
- callable: a scorer callable object (e.g., function) with signature
``scorer(estimator, X, y)``. See :ref:`scoring_callable` for details.
- `None`: the `estimator`'s
:ref:`default evaluation criterion <scoring_api_overview>` is used.
exploit_incremental_learning : bool, default=False
If the estimator supports incremental learning, this will be
used to speed up fitting for different training set sizes.
n_jobs : int, default=None
Number of jobs to run in parallel. Training the estimator and computing
the score are parallelized over the different training and test sets.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
pre_dispatch : int or str, default='all'
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The str can
be an expression like '2*n_jobs'.
verbose : int, default=0
Controls the verbosity: the higher, the more messages.
shuffle : bool, default=False
Whether to shuffle training data before taking prefixes of it
based on``train_sizes``.
random_state : int, RandomState instance or None, default=None
Used when ``shuffle`` is True. Pass an int for reproducible
output across multiple function calls.
See :term:`Glossary <random_state>`.
error_score : 'raise' or numeric, default=np.nan
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised.
If a numeric value is given, FitFailedWarning is raised.
.. versionadded:: 0.20
return_times : bool, default=False
Whether to return the fit and score times.
fit_params : dict, default=None
Parameters to pass to the fit method of the estimator.
.. deprecated:: 1.6
This parameter is deprecated and will be removed in version 1.8. Use
``params`` instead.
params : dict, default=None
Parameters to pass to the `fit` method of the estimator and to the scorer.
- If `enable_metadata_routing=False` (default): Parameters directly passed to
the `fit` method of the estimator.
- If `enable_metadata_routing=True`: Parameters safely routed to the `fit`
method of the estimator. See :ref:`Metadata Routing User Guide
<metadata_routing>` for more details.
.. versionadded:: 1.6
Returns
-------
train_sizes_abs : array of shape (n_unique_ticks,)
Numbers of training examples that has been used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
train_scores : array of shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array of shape (n_ticks, n_cv_folds)
Scores on test set.
fit_times : array of shape (n_ticks, n_cv_folds)
Times spent for fitting in seconds. Only present if ``return_times``
is True.
score_times : array of shape (n_ticks, n_cv_folds)
Times spent for scoring in seconds. Only present if ``return_times``
is True.
See Also
--------
LearningCurveDisplay.from_estimator : Plot a learning curve using an
estimator and data.
Examples
--------
>>> from sklearn.datasets import make_classification
>>> from sklearn.tree import DecisionTreeClassifier
>>> from sklearn.model_selection import learning_curve
>>> X, y = make_classification(n_samples=100, n_features=10, random_state=42)
>>> tree = DecisionTreeClassifier(max_depth=4, random_state=42)
>>> train_size_abs, train_scores, test_scores = learning_curve(
... tree, X, y, train_sizes=[0.3, 0.6, 0.9]
... )
>>> for train_size, cv_train_scores, cv_test_scores in zip(
... train_size_abs, train_scores, test_scores
... ):
... print(f"{train_size} samples were used to train the model")
... print(f"The average train accuracy is {cv_train_scores.mean():.2f}")
... print(f"The average test accuracy is {cv_test_scores.mean():.2f}")
24 samples were used to train the model
The average train accuracy is 1.00
The average test accuracy is 0.85
48 samples were used to train the model
The average train accuracy is 1.00
The average test accuracy is 0.90
72 samples were used to train the model
The average train accuracy is 1.00
The average test accuracy is 0.93
|
learning_curve
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/_validation.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/_validation.py
|
BSD-3-Clause
|
def _translate_train_sizes(train_sizes, n_max_training_samples):
"""Determine absolute sizes of training subsets and validate 'train_sizes'.
Examples:
_translate_train_sizes([0.5, 1.0], 10) -> [5, 10]
_translate_train_sizes([5, 10], 10) -> [5, 10]
Parameters
----------
train_sizes : array-like of shape (n_ticks,)
Numbers of training examples that will be used to generate the
learning curve. If the dtype is float, it is regarded as a
fraction of 'n_max_training_samples', i.e. it has to be within (0, 1].
n_max_training_samples : int
Maximum number of training samples (upper bound of 'train_sizes').
Returns
-------
train_sizes_abs : array of shape (n_unique_ticks,)
Numbers of training examples that will be used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
"""
train_sizes_abs = np.asarray(train_sizes)
n_ticks = train_sizes_abs.shape[0]
n_min_required_samples = np.min(train_sizes_abs)
n_max_required_samples = np.max(train_sizes_abs)
if np.issubdtype(train_sizes_abs.dtype, np.floating):
if n_min_required_samples <= 0.0 or n_max_required_samples > 1.0:
raise ValueError(
"train_sizes has been interpreted as fractions "
"of the maximum number of training samples and "
"must be within (0, 1], but is within [%f, %f]."
% (n_min_required_samples, n_max_required_samples)
)
train_sizes_abs = (train_sizes_abs * n_max_training_samples).astype(
dtype=int, copy=False
)
train_sizes_abs = np.clip(train_sizes_abs, 1, n_max_training_samples)
else:
if (
n_min_required_samples <= 0
or n_max_required_samples > n_max_training_samples
):
raise ValueError(
"train_sizes has been interpreted as absolute "
"numbers of training samples and must be within "
"(0, %d], but is within [%d, %d]."
% (
n_max_training_samples,
n_min_required_samples,
n_max_required_samples,
)
)
train_sizes_abs = np.unique(train_sizes_abs)
if n_ticks > train_sizes_abs.shape[0]:
warnings.warn(
"Removed duplicate entries from 'train_sizes'. Number "
"of ticks will be less than the size of "
"'train_sizes': %d instead of %d." % (train_sizes_abs.shape[0], n_ticks),
RuntimeWarning,
)
return train_sizes_abs
|
Determine absolute sizes of training subsets and validate 'train_sizes'.
Examples:
_translate_train_sizes([0.5, 1.0], 10) -> [5, 10]
_translate_train_sizes([5, 10], 10) -> [5, 10]
Parameters
----------
train_sizes : array-like of shape (n_ticks,)
Numbers of training examples that will be used to generate the
learning curve. If the dtype is float, it is regarded as a
fraction of 'n_max_training_samples', i.e. it has to be within (0, 1].
n_max_training_samples : int
Maximum number of training samples (upper bound of 'train_sizes').
Returns
-------
train_sizes_abs : array of shape (n_unique_ticks,)
Numbers of training examples that will be used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
|
_translate_train_sizes
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/_validation.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/_validation.py
|
BSD-3-Clause
|
def _incremental_fit_estimator(
estimator,
X,
y,
classes,
train,
test,
train_sizes,
scorer,
return_times,
error_score,
fit_params,
score_params,
):
"""Train estimator on training subsets incrementally and compute scores."""
train_scores, test_scores, fit_times, score_times = [], [], [], []
partitions = zip(train_sizes, np.split(train, train_sizes)[:-1])
if fit_params is None:
fit_params = {}
if classes is None:
partial_fit_func = partial(estimator.partial_fit, **fit_params)
else:
partial_fit_func = partial(estimator.partial_fit, classes=classes, **fit_params)
score_params = score_params if score_params is not None else {}
score_params_train = _check_method_params(X, params=score_params, indices=train)
score_params_test = _check_method_params(X, params=score_params, indices=test)
for n_train_samples, partial_train in partitions:
train_subset = train[:n_train_samples]
X_train, y_train = _safe_split(estimator, X, y, train_subset)
X_partial_train, y_partial_train = _safe_split(estimator, X, y, partial_train)
X_test, y_test = _safe_split(estimator, X, y, test, train_subset)
start_fit = time.time()
if y_partial_train is None:
partial_fit_func(X_partial_train)
else:
partial_fit_func(X_partial_train, y_partial_train)
fit_time = time.time() - start_fit
fit_times.append(fit_time)
start_score = time.time()
test_scores.append(
_score(
estimator,
X_test,
y_test,
scorer,
score_params=score_params_test,
error_score=error_score,
)
)
train_scores.append(
_score(
estimator,
X_train,
y_train,
scorer,
score_params=score_params_train,
error_score=error_score,
)
)
score_time = time.time() - start_score
score_times.append(score_time)
ret = (
(train_scores, test_scores, fit_times, score_times)
if return_times
else (train_scores, test_scores)
)
return np.array(ret).T
|
Train estimator on training subsets incrementally and compute scores.
|
_incremental_fit_estimator
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/_validation.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/_validation.py
|
BSD-3-Clause
|
def validation_curve(
estimator,
X,
y,
*,
param_name,
param_range,
groups=None,
cv=None,
scoring=None,
n_jobs=None,
pre_dispatch="all",
verbose=0,
error_score=np.nan,
fit_params=None,
params=None,
):
"""Validation curve.
Determine training and test scores for varying parameter values.
Compute scores for an estimator with different values of a specified
parameter. This is similar to grid search with one parameter. However, this
will also compute training scores and is merely a utility for plotting the
results.
Read more in the :ref:`User Guide <validation_curve>`.
Parameters
----------
estimator : object type that implements the "fit" method
An object of that type which is cloned for each validation. It must
also implement "predict" unless `scoring` is a callable that doesn't
rely on "predict" to compute a score.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None
Target relative to X for classification or regression;
None for unsupervised learning.
param_name : str
Name of the parameter that will be varied.
param_range : array-like of shape (n_values,)
The values of the parameter that will be evaluated.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set. Only used in conjunction with a "Group" :term:`cv`
instance (e.g., :class:`GroupKFold`).
.. versionchanged:: 1.6
``groups`` can only be passed if metadata routing is not enabled
via ``sklearn.set_config(enable_metadata_routing=True)``. When routing
is enabled, pass ``groups`` alongside other metadata via the ``params``
argument instead. E.g.:
``validation_curve(..., params={'groups': groups})``.
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross validation,
- int, to specify the number of folds in a `(Stratified)KFold`,
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For int/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used. These splitters are instantiated
with `shuffle=False` so the splits will be the same across calls.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.22
``cv`` default value if None changed from 3-fold to 5-fold.
scoring : str or callable, default=None
Scoring method to use to evaluate the training and test sets.
- str: see :ref:`scoring_string_names` for options.
- callable: a scorer callable object (e.g., function) with signature
``scorer(estimator, X, y)``. See :ref:`scoring_callable` for details.
- `None`: the `estimator`'s
:ref:`default evaluation criterion <scoring_api_overview>` is used.
n_jobs : int, default=None
Number of jobs to run in parallel. Training the estimator and computing
the score are parallelized over the combinations of each parameter
value and each cross-validation split.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
pre_dispatch : int or str, default='all'
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The str can
be an expression like '2*n_jobs'.
verbose : int, default=0
Controls the verbosity: the higher, the more messages.
error_score : 'raise' or numeric, default=np.nan
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised.
If a numeric value is given, FitFailedWarning is raised.
.. versionadded:: 0.20
fit_params : dict, default=None
Parameters to pass to the fit method of the estimator.
.. deprecated:: 1.6
This parameter is deprecated and will be removed in version 1.8. Use
``params`` instead.
params : dict, default=None
Parameters to pass to the estimator, scorer and cross-validation object.
- If `enable_metadata_routing=False` (default): Parameters directly passed to
the `fit` method of the estimator.
- If `enable_metadata_routing=True`: Parameters safely routed to the `fit`
method of the estimator, to the scorer and to the cross-validation object.
See :ref:`Metadata Routing User Guide <metadata_routing>` for more details.
.. versionadded:: 1.6
Returns
-------
train_scores : array of shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array of shape (n_ticks, n_cv_folds)
Scores on test set.
See Also
--------
ValidationCurveDisplay.from_estimator : Plot the validation curve
given an estimator, the data, and the parameter to vary.
Notes
-----
See :ref:`sphx_glr_auto_examples_model_selection_plot_train_error_vs_test_error.py`
Examples
--------
>>> import numpy as np
>>> from sklearn.datasets import make_classification
>>> from sklearn.model_selection import validation_curve
>>> from sklearn.linear_model import LogisticRegression
>>> X, y = make_classification(n_samples=1_000, random_state=0)
>>> logistic_regression = LogisticRegression()
>>> param_name, param_range = "C", np.logspace(-8, 3, 10)
>>> train_scores, test_scores = validation_curve(
... logistic_regression, X, y, param_name=param_name, param_range=param_range
... )
>>> print(f"The average train accuracy is {train_scores.mean():.2f}")
The average train accuracy is 0.81
>>> print(f"The average test accuracy is {test_scores.mean():.2f}")
The average test accuracy is 0.81
"""
params = _check_params_groups_deprecation(fit_params, params, groups, "1.8")
X, y, groups = indexable(X, y, groups)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
if _routing_enabled():
router = (
MetadataRouter(owner="validation_curve")
.add(
estimator=estimator,
method_mapping=MethodMapping().add(caller="fit", callee="fit"),
)
.add(
splitter=cv,
method_mapping=MethodMapping().add(caller="fit", callee="split"),
)
.add(
scorer=scorer,
method_mapping=MethodMapping().add(caller="fit", callee="score"),
)
)
try:
routed_params = process_routing(router, "fit", **params)
except UnsetMetadataPassedError as e:
# The default exception would mention `fit` since in the above
# `process_routing` code, we pass `fit` as the caller. However,
# the user is not calling `fit` directly, so we change the message
# to make it more suitable for this case.
raise UnsetMetadataPassedError(
message=str(e).replace("validation_curve.fit", "validation_curve"),
unrequested_params=e.unrequested_params,
routed_params=e.routed_params,
)
else:
routed_params = Bunch()
routed_params.estimator = Bunch(fit=params)
routed_params.splitter = Bunch(split={"groups": groups})
routed_params.scorer = Bunch(score={})
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch, verbose=verbose)
results = parallel(
delayed(_fit_and_score)(
clone(estimator),
X,
y,
scorer=scorer,
train=train,
test=test,
verbose=verbose,
parameters={param_name: v},
fit_params=routed_params.estimator.fit,
score_params=routed_params.scorer.score,
return_train_score=True,
error_score=error_score,
)
# NOTE do not change order of iteration to allow one time cv splitters
for train, test in cv.split(X, y, **routed_params.splitter.split)
for v in param_range
)
n_params = len(param_range)
results = _aggregate_score_dicts(results)
train_scores = results["train_scores"].reshape(-1, n_params).T
test_scores = results["test_scores"].reshape(-1, n_params).T
return train_scores, test_scores
|
Validation curve.
Determine training and test scores for varying parameter values.
Compute scores for an estimator with different values of a specified
parameter. This is similar to grid search with one parameter. However, this
will also compute training scores and is merely a utility for plotting the
results.
Read more in the :ref:`User Guide <validation_curve>`.
Parameters
----------
estimator : object type that implements the "fit" method
An object of that type which is cloned for each validation. It must
also implement "predict" unless `scoring` is a callable that doesn't
rely on "predict" to compute a score.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None
Target relative to X for classification or regression;
None for unsupervised learning.
param_name : str
Name of the parameter that will be varied.
param_range : array-like of shape (n_values,)
The values of the parameter that will be evaluated.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set. Only used in conjunction with a "Group" :term:`cv`
instance (e.g., :class:`GroupKFold`).
.. versionchanged:: 1.6
``groups`` can only be passed if metadata routing is not enabled
via ``sklearn.set_config(enable_metadata_routing=True)``. When routing
is enabled, pass ``groups`` alongside other metadata via the ``params``
argument instead. E.g.:
``validation_curve(..., params={'groups': groups})``.
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross validation,
- int, to specify the number of folds in a `(Stratified)KFold`,
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For int/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used. These splitters are instantiated
with `shuffle=False` so the splits will be the same across calls.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.22
``cv`` default value if None changed from 3-fold to 5-fold.
scoring : str or callable, default=None
Scoring method to use to evaluate the training and test sets.
- str: see :ref:`scoring_string_names` for options.
- callable: a scorer callable object (e.g., function) with signature
``scorer(estimator, X, y)``. See :ref:`scoring_callable` for details.
- `None`: the `estimator`'s
:ref:`default evaluation criterion <scoring_api_overview>` is used.
n_jobs : int, default=None
Number of jobs to run in parallel. Training the estimator and computing
the score are parallelized over the combinations of each parameter
value and each cross-validation split.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
pre_dispatch : int or str, default='all'
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The str can
be an expression like '2*n_jobs'.
verbose : int, default=0
Controls the verbosity: the higher, the more messages.
error_score : 'raise' or numeric, default=np.nan
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised.
If a numeric value is given, FitFailedWarning is raised.
.. versionadded:: 0.20
fit_params : dict, default=None
Parameters to pass to the fit method of the estimator.
.. deprecated:: 1.6
This parameter is deprecated and will be removed in version 1.8. Use
``params`` instead.
params : dict, default=None
Parameters to pass to the estimator, scorer and cross-validation object.
- If `enable_metadata_routing=False` (default): Parameters directly passed to
the `fit` method of the estimator.
- If `enable_metadata_routing=True`: Parameters safely routed to the `fit`
method of the estimator, to the scorer and to the cross-validation object.
See :ref:`Metadata Routing User Guide <metadata_routing>` for more details.
.. versionadded:: 1.6
Returns
-------
train_scores : array of shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array of shape (n_ticks, n_cv_folds)
Scores on test set.
See Also
--------
ValidationCurveDisplay.from_estimator : Plot the validation curve
given an estimator, the data, and the parameter to vary.
Notes
-----
See :ref:`sphx_glr_auto_examples_model_selection_plot_train_error_vs_test_error.py`
Examples
--------
>>> import numpy as np
>>> from sklearn.datasets import make_classification
>>> from sklearn.model_selection import validation_curve
>>> from sklearn.linear_model import LogisticRegression
>>> X, y = make_classification(n_samples=1_000, random_state=0)
>>> logistic_regression = LogisticRegression()
>>> param_name, param_range = "C", np.logspace(-8, 3, 10)
>>> train_scores, test_scores = validation_curve(
... logistic_regression, X, y, param_name=param_name, param_range=param_range
... )
>>> print(f"The average train accuracy is {train_scores.mean():.2f}")
The average train accuracy is 0.81
>>> print(f"The average test accuracy is {test_scores.mean():.2f}")
The average test accuracy is 0.81
|
validation_curve
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/_validation.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/_validation.py
|
BSD-3-Clause
|
def _aggregate_score_dicts(scores):
"""Aggregate the list of dict to dict of np ndarray
The aggregated output of _aggregate_score_dicts will be a list of dict
of form [{'prec': 0.1, 'acc':1.0}, {'prec': 0.1, 'acc':1.0}, ...]
Convert it to a dict of array {'prec': np.array([0.1 ...]), ...}
Parameters
----------
scores : list of dict
List of dicts of the scores for all scorers. This is a flat list,
assumed originally to be of row major order.
Example
-------
>>> scores = [{'a': 1, 'b':10}, {'a': 2, 'b':2}, {'a': 3, 'b':3},
... {'a': 10, 'b': 10}] # doctest: +SKIP
>>> _aggregate_score_dicts(scores) # doctest: +SKIP
{'a': array([1, 2, 3, 10]),
'b': array([10, 2, 3, 10])}
"""
return {
key: (
np.asarray([score[key] for score in scores])
if isinstance(scores[0][key], numbers.Number)
else [score[key] for score in scores]
)
for key in scores[0]
}
|
Aggregate the list of dict to dict of np ndarray
The aggregated output of _aggregate_score_dicts will be a list of dict
of form [{'prec': 0.1, 'acc':1.0}, {'prec': 0.1, 'acc':1.0}, ...]
Convert it to a dict of array {'prec': np.array([0.1 ...]), ...}
Parameters
----------
scores : list of dict
List of dicts of the scores for all scorers. This is a flat list,
assumed originally to be of row major order.
Example
-------
>>> scores = [{'a': 1, 'b':10}, {'a': 2, 'b':2}, {'a': 3, 'b':3},
... {'a': 10, 'b': 10}] # doctest: +SKIP
>>> _aggregate_score_dicts(scores) # doctest: +SKIP
{'a': array([1, 2, 3, 10]),
'b': array([10, 2, 3, 10])}
|
_aggregate_score_dicts
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/_validation.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/_validation.py
|
BSD-3-Clause
|
def test_fit_and_score_over_thresholds_curve_scorers():
"""Check that `_fit_and_score_over_thresholds` returns thresholds in ascending order
for the different accepted curve scorers."""
X, y = make_classification(n_samples=100, random_state=0)
train_idx, val_idx = np.arange(50), np.arange(50, 100)
classifier = LogisticRegression()
curve_scorer = _CurveScorer(
score_func=balanced_accuracy_score,
sign=1,
response_method="predict_proba",
thresholds=10,
kwargs={},
)
scores, thresholds = _fit_and_score_over_thresholds(
classifier,
X,
y,
fit_params={},
train_idx=train_idx,
val_idx=val_idx,
curve_scorer=curve_scorer,
score_params={},
)
assert np.all(thresholds[:-1] <= thresholds[1:])
assert isinstance(scores, np.ndarray)
assert np.logical_and(scores >= 0, scores <= 1).all()
|
Check that `_fit_and_score_over_thresholds` returns thresholds in ascending order
for the different accepted curve scorers.
|
test_fit_and_score_over_thresholds_curve_scorers
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/tests/test_classification_threshold.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/tests/test_classification_threshold.py
|
BSD-3-Clause
|
def test_fit_and_score_over_thresholds_prefit():
"""Check the behaviour with a prefit classifier."""
X, y = make_classification(n_samples=100, random_state=0)
# `train_idx is None` to indicate that the classifier is prefit
train_idx, val_idx = None, np.arange(50, 100)
classifier = DecisionTreeClassifier(random_state=0).fit(X, y)
# make sure that the classifier memorized the full dataset such that
# we get perfect predictions and thus match the expected score
assert classifier.score(X[val_idx], y[val_idx]) == pytest.approx(1.0)
curve_scorer = _CurveScorer(
score_func=balanced_accuracy_score,
sign=1,
response_method="predict_proba",
thresholds=2,
kwargs={},
)
scores, thresholds = _fit_and_score_over_thresholds(
classifier,
X,
y,
fit_params={},
train_idx=train_idx,
val_idx=val_idx,
curve_scorer=curve_scorer,
score_params={},
)
assert np.all(thresholds[:-1] <= thresholds[1:])
assert_allclose(scores, [0.5, 1.0])
|
Check the behaviour with a prefit classifier.
|
test_fit_and_score_over_thresholds_prefit
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/tests/test_classification_threshold.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/tests/test_classification_threshold.py
|
BSD-3-Clause
|
def test_fit_and_score_over_thresholds_sample_weight():
"""Check that we dispatch the sample-weight to fit and score the classifier."""
X, y = load_iris(return_X_y=True)
X, y = X[:100], y[:100] # only 2 classes
# create a dataset and repeat twice the sample of class #0
X_repeated, y_repeated = np.vstack([X, X[y == 0]]), np.hstack([y, y[y == 0]])
# create a sample weight vector that is equivalent to the repeated dataset
sample_weight = np.ones_like(y)
sample_weight[:50] *= 2
classifier = LogisticRegression()
train_repeated_idx = np.arange(X_repeated.shape[0])
val_repeated_idx = np.arange(X_repeated.shape[0])
curve_scorer = _CurveScorer(
score_func=balanced_accuracy_score,
sign=1,
response_method="predict_proba",
thresholds=10,
kwargs={},
)
scores_repeated, thresholds_repeated = _fit_and_score_over_thresholds(
classifier,
X_repeated,
y_repeated,
fit_params={},
train_idx=train_repeated_idx,
val_idx=val_repeated_idx,
curve_scorer=curve_scorer,
score_params={},
)
train_idx, val_idx = np.arange(X.shape[0]), np.arange(X.shape[0])
scores, thresholds = _fit_and_score_over_thresholds(
classifier.set_fit_request(sample_weight=True),
X,
y,
fit_params={"sample_weight": sample_weight},
train_idx=train_idx,
val_idx=val_idx,
curve_scorer=curve_scorer.set_score_request(sample_weight=True),
score_params={"sample_weight": sample_weight},
)
assert_allclose(thresholds_repeated, thresholds)
assert_allclose(scores_repeated, scores)
|
Check that we dispatch the sample-weight to fit and score the classifier.
|
test_fit_and_score_over_thresholds_sample_weight
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/tests/test_classification_threshold.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/tests/test_classification_threshold.py
|
BSD-3-Clause
|
def test_fit_and_score_over_thresholds_fit_params(fit_params_type):
"""Check that we pass `fit_params` to the classifier when calling `fit`."""
X, y = make_classification(n_samples=100, random_state=0)
fit_params = {
"a": _convert_container(y, fit_params_type),
"b": _convert_container(y, fit_params_type),
}
classifier = CheckingClassifier(expected_fit_params=["a", "b"], random_state=0)
classifier.set_fit_request(a=True, b=True)
train_idx, val_idx = np.arange(50), np.arange(50, 100)
curve_scorer = _CurveScorer(
score_func=balanced_accuracy_score,
sign=1,
response_method="predict_proba",
thresholds=10,
kwargs={},
)
_fit_and_score_over_thresholds(
classifier,
X,
y,
fit_params=fit_params,
train_idx=train_idx,
val_idx=val_idx,
curve_scorer=curve_scorer,
score_params={},
)
|
Check that we pass `fit_params` to the classifier when calling `fit`.
|
test_fit_and_score_over_thresholds_fit_params
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/tests/test_classification_threshold.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/tests/test_classification_threshold.py
|
BSD-3-Clause
|
def test_tuned_threshold_classifier_no_binary(data):
"""Check that we raise an informative error message for non-binary problem."""
err_msg = "Only binary classification is supported."
with pytest.raises(ValueError, match=err_msg):
TunedThresholdClassifierCV(LogisticRegression()).fit(*data)
|
Check that we raise an informative error message for non-binary problem.
|
test_tuned_threshold_classifier_no_binary
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/tests/test_classification_threshold.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/tests/test_classification_threshold.py
|
BSD-3-Clause
|
def test_tuned_threshold_classifier_conflict_cv_refit(params, err_type, err_msg):
"""Check that we raise an informative error message when `cv` and `refit`
cannot be used together.
"""
X, y = make_classification(n_samples=100, random_state=0)
with pytest.raises(err_type, match=err_msg):
TunedThresholdClassifierCV(LogisticRegression(), **params).fit(X, y)
|
Check that we raise an informative error message when `cv` and `refit`
cannot be used together.
|
test_tuned_threshold_classifier_conflict_cv_refit
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/tests/test_classification_threshold.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/tests/test_classification_threshold.py
|
BSD-3-Clause
|
def test_threshold_classifier_estimator_response_methods(
ThresholdClassifier, estimator, response_method
):
"""Check that `TunedThresholdClassifierCV` exposes the same response methods as the
underlying estimator.
"""
X, y = make_classification(n_samples=100, random_state=0)
model = ThresholdClassifier(estimator=estimator)
assert hasattr(model, response_method) == hasattr(estimator, response_method)
model.fit(X, y)
assert hasattr(model, response_method) == hasattr(estimator, response_method)
if hasattr(model, response_method):
y_pred_cutoff = getattr(model, response_method)(X)
y_pred_underlying_estimator = getattr(model.estimator_, response_method)(X)
assert_allclose(y_pred_cutoff, y_pred_underlying_estimator)
|
Check that `TunedThresholdClassifierCV` exposes the same response methods as the
underlying estimator.
|
test_threshold_classifier_estimator_response_methods
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/tests/test_classification_threshold.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/tests/test_classification_threshold.py
|
BSD-3-Clause
|
def test_tuned_threshold_classifier_without_constraint_value(response_method):
"""Check that `TunedThresholdClassifierCV` is optimizing a given objective
metric."""
X, y = load_breast_cancer(return_X_y=True)
# remove feature to degrade performances
X = X[:, :5]
# make the problem completely imbalanced such that the balanced accuracy is low
indices_pos = np.flatnonzero(y == 1)
indices_pos = indices_pos[: indices_pos.size // 50]
indices_neg = np.flatnonzero(y == 0)
X = np.vstack([X[indices_neg], X[indices_pos]])
y = np.hstack([y[indices_neg], y[indices_pos]])
lr = make_pipeline(StandardScaler(), LogisticRegression()).fit(X, y)
thresholds = 100
model = TunedThresholdClassifierCV(
estimator=lr,
scoring="balanced_accuracy",
response_method=response_method,
thresholds=thresholds,
store_cv_results=True,
)
score_optimized = balanced_accuracy_score(y, model.fit(X, y).predict(X))
score_baseline = balanced_accuracy_score(y, lr.predict(X))
assert score_optimized > score_baseline
assert model.cv_results_["thresholds"].shape == (thresholds,)
assert model.cv_results_["scores"].shape == (thresholds,)
|
Check that `TunedThresholdClassifierCV` is optimizing a given objective
metric.
|
test_tuned_threshold_classifier_without_constraint_value
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/tests/test_classification_threshold.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/tests/test_classification_threshold.py
|
BSD-3-Clause
|
def test_tuned_threshold_classifier_metric_with_parameter():
"""Check that we can pass a metric with a parameter in addition check that
`f_beta` with `beta=1` is equivalent to `f1` and different from `f_beta` with
`beta=2`.
"""
X, y = load_breast_cancer(return_X_y=True)
lr = make_pipeline(StandardScaler(), LogisticRegression()).fit(X, y)
model_fbeta_1 = TunedThresholdClassifierCV(
estimator=lr, scoring=make_scorer(fbeta_score, beta=1)
).fit(X, y)
model_fbeta_2 = TunedThresholdClassifierCV(
estimator=lr, scoring=make_scorer(fbeta_score, beta=2)
).fit(X, y)
model_f1 = TunedThresholdClassifierCV(
estimator=lr, scoring=make_scorer(f1_score)
).fit(X, y)
assert model_fbeta_1.best_threshold_ == pytest.approx(model_f1.best_threshold_)
assert model_fbeta_1.best_threshold_ != pytest.approx(model_fbeta_2.best_threshold_)
|
Check that we can pass a metric with a parameter in addition check that
`f_beta` with `beta=1` is equivalent to `f1` and different from `f_beta` with
`beta=2`.
|
test_tuned_threshold_classifier_metric_with_parameter
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/tests/test_classification_threshold.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/tests/test_classification_threshold.py
|
BSD-3-Clause
|
def test_tuned_threshold_classifier_with_string_targets(response_method, metric):
"""Check that targets represented by str are properly managed.
Also, check with several metrics to be sure that `pos_label` is properly
dispatched.
"""
X, y = load_breast_cancer(return_X_y=True)
# Encode numeric targets by meaningful strings. We purposely designed the class
# names such that the `pos_label` is the first alphabetically sorted class and thus
# encoded as 0.
classes = np.array(["cancer", "healthy"], dtype=object)
y = classes[y]
model = TunedThresholdClassifierCV(
estimator=make_pipeline(StandardScaler(), LogisticRegression()),
scoring=metric,
response_method=response_method,
thresholds=100,
).fit(X, y)
assert_array_equal(model.classes_, np.sort(classes))
y_pred = model.predict(X)
assert_array_equal(np.unique(y_pred), np.sort(classes))
|
Check that targets represented by str are properly managed.
Also, check with several metrics to be sure that `pos_label` is properly
dispatched.
|
test_tuned_threshold_classifier_with_string_targets
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/tests/test_classification_threshold.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/tests/test_classification_threshold.py
|
BSD-3-Clause
|
def test_tuned_threshold_classifier_refit(with_sample_weight, global_random_seed):
"""Check the behaviour of the `refit` parameter."""
rng = np.random.RandomState(global_random_seed)
X, y = make_classification(n_samples=100, random_state=0)
if with_sample_weight:
sample_weight = rng.randn(X.shape[0])
sample_weight = np.abs(sample_weight, out=sample_weight)
else:
sample_weight = None
# check that `estimator_` if fitted on the full dataset when `refit=True`
estimator = LogisticRegression().set_fit_request(sample_weight=True)
model = TunedThresholdClassifierCV(estimator, refit=True).fit(
X, y, sample_weight=sample_weight
)
assert model.estimator_ is not estimator
estimator.fit(X, y, sample_weight=sample_weight)
assert_allclose(model.estimator_.coef_, estimator.coef_)
assert_allclose(model.estimator_.intercept_, estimator.intercept_)
# check that `estimator_` was not altered when `refit=False` and `cv="prefit"`
estimator = LogisticRegression().set_fit_request(sample_weight=True)
estimator.fit(X, y, sample_weight=sample_weight)
coef = estimator.coef_.copy()
model = TunedThresholdClassifierCV(estimator, cv="prefit", refit=False).fit(
X, y, sample_weight=sample_weight
)
assert model.estimator_ is estimator
assert_allclose(model.estimator_.coef_, coef)
# check that we train `estimator_` on the training split of a given cross-validation
estimator = LogisticRegression().set_fit_request(sample_weight=True)
cv = [
(np.arange(50), np.arange(50, 100)),
] # single split
model = TunedThresholdClassifierCV(estimator, cv=cv, refit=False).fit(
X, y, sample_weight=sample_weight
)
assert model.estimator_ is not estimator
if with_sample_weight:
sw_train = sample_weight[cv[0][0]]
else:
sw_train = None
estimator.fit(X[cv[0][0]], y[cv[0][0]], sample_weight=sw_train)
assert_allclose(model.estimator_.coef_, estimator.coef_)
|
Check the behaviour of the `refit` parameter.
|
test_tuned_threshold_classifier_refit
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/tests/test_classification_threshold.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/tests/test_classification_threshold.py
|
BSD-3-Clause
|
def test_tuned_threshold_classifier_fit_params(fit_params_type):
"""Check that we pass `fit_params` to the classifier when calling `fit`."""
X, y = make_classification(n_samples=100, random_state=0)
fit_params = {
"a": _convert_container(y, fit_params_type),
"b": _convert_container(y, fit_params_type),
}
classifier = CheckingClassifier(expected_fit_params=["a", "b"], random_state=0)
classifier.set_fit_request(a=True, b=True)
model = TunedThresholdClassifierCV(classifier)
model.fit(X, y, **fit_params)
|
Check that we pass `fit_params` to the classifier when calling `fit`.
|
test_tuned_threshold_classifier_fit_params
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/tests/test_classification_threshold.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/tests/test_classification_threshold.py
|
BSD-3-Clause
|
def test_tuned_threshold_classifier_cv_zeros_sample_weights_equivalence():
"""Check that passing removing some sample from the dataset `X` is
equivalent to passing a `sample_weight` with a factor 0."""
X, y = load_iris(return_X_y=True)
# Scale the data to avoid any convergence issue
X = StandardScaler().fit_transform(X)
# Only use 2 classes and select samples such that 2-fold cross-validation
# split will lead to an equivalence with a `sample_weight` of 0
X = np.vstack((X[:40], X[50:90]))
y = np.hstack((y[:40], y[50:90]))
sample_weight = np.zeros_like(y)
sample_weight[::2] = 1
estimator = LogisticRegression().set_fit_request(sample_weight=True)
model_without_weights = TunedThresholdClassifierCV(estimator, cv=2)
model_with_weights = clone(model_without_weights)
model_with_weights.fit(X, y, sample_weight=sample_weight)
model_without_weights.fit(X[::2], y[::2])
assert_allclose(
model_with_weights.estimator_.coef_, model_without_weights.estimator_.coef_
)
y_pred_with_weights = model_with_weights.predict_proba(X)
y_pred_without_weights = model_without_weights.predict_proba(X)
assert_allclose(y_pred_with_weights, y_pred_without_weights)
|
Check that passing removing some sample from the dataset `X` is
equivalent to passing a `sample_weight` with a factor 0.
|
test_tuned_threshold_classifier_cv_zeros_sample_weights_equivalence
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/tests/test_classification_threshold.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/tests/test_classification_threshold.py
|
BSD-3-Clause
|
def test_tuned_threshold_classifier_thresholds_array():
"""Check that we can pass an array to `thresholds` and it is used as candidate
threshold internally."""
X, y = make_classification(random_state=0)
estimator = LogisticRegression()
thresholds = np.linspace(0, 1, 11)
tuned_model = TunedThresholdClassifierCV(
estimator,
thresholds=thresholds,
response_method="predict_proba",
store_cv_results=True,
).fit(X, y)
assert_allclose(tuned_model.cv_results_["thresholds"], thresholds)
|
Check that we can pass an array to `thresholds` and it is used as candidate
threshold internally.
|
test_tuned_threshold_classifier_thresholds_array
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/tests/test_classification_threshold.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/tests/test_classification_threshold.py
|
BSD-3-Clause
|
def test_tuned_threshold_classifier_store_cv_results(store_cv_results):
"""Check that if `cv_results_` exists depending on `store_cv_results`."""
X, y = make_classification(random_state=0)
estimator = LogisticRegression()
tuned_model = TunedThresholdClassifierCV(
estimator, store_cv_results=store_cv_results
).fit(X, y)
if store_cv_results:
assert hasattr(tuned_model, "cv_results_")
else:
assert not hasattr(tuned_model, "cv_results_")
|
Check that if `cv_results_` exists depending on `store_cv_results`.
|
test_tuned_threshold_classifier_store_cv_results
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/tests/test_classification_threshold.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/tests/test_classification_threshold.py
|
BSD-3-Clause
|
def test_tuned_threshold_classifier_cv_float():
"""Check the behaviour when `cv` is set to a float."""
X, y = make_classification(random_state=0)
# case where `refit=False` and cv is a float: the underlying estimator will be fit
# on the training set given by a ShuffleSplit. We check that we get the same model
# coefficients.
test_size = 0.3
estimator = LogisticRegression()
tuned_model = TunedThresholdClassifierCV(
estimator, cv=test_size, refit=False, random_state=0
).fit(X, y)
tuned_model.fit(X, y)
cv = StratifiedShuffleSplit(n_splits=1, test_size=test_size, random_state=0)
train_idx, val_idx = next(cv.split(X, y))
cloned_estimator = clone(estimator).fit(X[train_idx], y[train_idx])
assert_allclose(tuned_model.estimator_.coef_, cloned_estimator.coef_)
# case where `refit=True`, then the underlying estimator is fitted on the full
# dataset.
tuned_model.set_params(refit=True).fit(X, y)
cloned_estimator = clone(estimator).fit(X, y)
assert_allclose(tuned_model.estimator_.coef_, cloned_estimator.coef_)
|
Check the behaviour when `cv` is set to a float.
|
test_tuned_threshold_classifier_cv_float
|
python
|
scikit-learn/scikit-learn
|
sklearn/model_selection/tests/test_classification_threshold.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/tests/test_classification_threshold.py
|
BSD-3-Clause
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.