code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
---|---|---|---|---|---|---|---|
def predict(self, X):
"""Perform regression on samples in X.
For an one-class model, +1 (inlier) or -1 (outlier) is returned.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
For kernel="precomputed", the expected shape of X is
(n_samples_test, n_samples_train).
Returns
-------
y_pred : ndarray of shape (n_samples,)
The predicted values.
"""
X = self._validate_for_predict(X)
predict = self._sparse_predict if self._sparse else self._dense_predict
return predict(X)
|
Perform regression on samples in X.
For an one-class model, +1 (inlier) or -1 (outlier) is returned.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
For kernel="precomputed", the expected shape of X is
(n_samples_test, n_samples_train).
Returns
-------
y_pred : ndarray of shape (n_samples,)
The predicted values.
|
predict
|
python
|
scikit-learn/scikit-learn
|
sklearn/svm/_base.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/svm/_base.py
|
BSD-3-Clause
|
def _compute_kernel(self, X):
"""Return the data transformed by a callable kernel"""
if callable(self.kernel):
# in the case of precomputed kernel given as a function, we
# have to compute explicitly the kernel matrix
kernel = self.kernel(X, self.__Xfit)
if sp.issparse(kernel):
kernel = kernel.toarray()
X = np.asarray(kernel, dtype=np.float64, order="C")
return X
|
Return the data transformed by a callable kernel
|
_compute_kernel
|
python
|
scikit-learn/scikit-learn
|
sklearn/svm/_base.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/svm/_base.py
|
BSD-3-Clause
|
def _decision_function(self, X):
"""Evaluates the decision function for the samples in X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Returns
-------
X : array-like of shape (n_samples, n_class * (n_class-1) / 2)
Returns the decision function of the sample for each class
in the model.
"""
# NOTE: _validate_for_predict contains check for is_fitted
# hence must be placed before any other attributes are used.
X = self._validate_for_predict(X)
X = self._compute_kernel(X)
if self._sparse:
dec_func = self._sparse_decision_function(X)
else:
dec_func = self._dense_decision_function(X)
# In binary case, we need to flip the sign of coef, intercept and
# decision function.
if self._impl in ["c_svc", "nu_svc"] and len(self.classes_) == 2:
return -dec_func.ravel()
return dec_func
|
Evaluates the decision function for the samples in X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Returns
-------
X : array-like of shape (n_samples, n_class * (n_class-1) / 2)
Returns the decision function of the sample for each class
in the model.
|
_decision_function
|
python
|
scikit-learn/scikit-learn
|
sklearn/svm/_base.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/svm/_base.py
|
BSD-3-Clause
|
def coef_(self):
"""Weights assigned to the features when `kernel="linear"`.
Returns
-------
ndarray of shape (n_features, n_classes)
"""
if self.kernel != "linear":
raise AttributeError("coef_ is only available when using a linear kernel")
coef = self._get_coef()
# coef_ being a read-only property, it's better to mark the value as
# immutable to avoid hiding potential bugs for the unsuspecting user.
if sp.issparse(coef):
# sparse matrix do not have global flags
coef.data.flags.writeable = False
else:
# regular dense array
coef.flags.writeable = False
return coef
|
Weights assigned to the features when `kernel="linear"`.
Returns
-------
ndarray of shape (n_features, n_classes)
|
coef_
|
python
|
scikit-learn/scikit-learn
|
sklearn/svm/_base.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/svm/_base.py
|
BSD-3-Clause
|
def n_support_(self):
"""Number of support vectors for each class."""
try:
check_is_fitted(self)
except NotFittedError:
raise AttributeError
svm_type = LIBSVM_IMPL.index(self._impl)
if svm_type in (0, 1):
return self._n_support
else:
# SVR and OneClass
# _n_support has size 2, we make it size 1
return np.array([self._n_support[0]])
|
Number of support vectors for each class.
|
n_support_
|
python
|
scikit-learn/scikit-learn
|
sklearn/svm/_base.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/svm/_base.py
|
BSD-3-Clause
|
def decision_function(self, X):
"""Evaluate the decision function for the samples in X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input samples.
Returns
-------
X : ndarray of shape (n_samples, n_classes * (n_classes-1) / 2)
Returns the decision function of the sample for each class
in the model.
If decision_function_shape='ovr', the shape is (n_samples,
n_classes).
Notes
-----
If decision_function_shape='ovo', the function values are proportional
to the distance of the samples X to the separating hyperplane. If the
exact distances are required, divide the function values by the norm of
the weight vector (``coef_``). See also `this question
<https://stats.stackexchange.com/questions/14876/
interpreting-distance-from-hyperplane-in-svm>`_ for further details.
If decision_function_shape='ovr', the decision function is a monotonic
transformation of ovo decision function.
"""
dec = self._decision_function(X)
if self.decision_function_shape == "ovr" and len(self.classes_) > 2:
return _ovr_decision_function(dec < 0, -dec, len(self.classes_))
return dec
|
Evaluate the decision function for the samples in X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input samples.
Returns
-------
X : ndarray of shape (n_samples, n_classes * (n_classes-1) / 2)
Returns the decision function of the sample for each class
in the model.
If decision_function_shape='ovr', the shape is (n_samples,
n_classes).
Notes
-----
If decision_function_shape='ovo', the function values are proportional
to the distance of the samples X to the separating hyperplane. If the
exact distances are required, divide the function values by the norm of
the weight vector (``coef_``). See also `this question
<https://stats.stackexchange.com/questions/14876/
interpreting-distance-from-hyperplane-in-svm>`_ for further details.
If decision_function_shape='ovr', the decision function is a monotonic
transformation of ovo decision function.
|
decision_function
|
python
|
scikit-learn/scikit-learn
|
sklearn/svm/_base.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/svm/_base.py
|
BSD-3-Clause
|
def predict(self, X):
"""Perform classification on samples in X.
For an one-class model, +1 or -1 is returned.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features) or \
(n_samples_test, n_samples_train)
For kernel="precomputed", the expected shape of X is
(n_samples_test, n_samples_train).
Returns
-------
y_pred : ndarray of shape (n_samples,)
Class labels for samples in X.
"""
check_is_fitted(self)
if self.break_ties and self.decision_function_shape == "ovo":
raise ValueError(
"break_ties must be False when decision_function_shape is 'ovo'"
)
if (
self.break_ties
and self.decision_function_shape == "ovr"
and len(self.classes_) > 2
):
y = np.argmax(self.decision_function(X), axis=1)
else:
y = super().predict(X)
return self.classes_.take(np.asarray(y, dtype=np.intp))
|
Perform classification on samples in X.
For an one-class model, +1 or -1 is returned.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features) or (n_samples_test, n_samples_train)
For kernel="precomputed", the expected shape of X is
(n_samples_test, n_samples_train).
Returns
-------
y_pred : ndarray of shape (n_samples,)
Class labels for samples in X.
|
predict
|
python
|
scikit-learn/scikit-learn
|
sklearn/svm/_base.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/svm/_base.py
|
BSD-3-Clause
|
def predict_proba(self, X):
"""Compute probabilities of possible outcomes for samples in X.
The model needs to have probability information computed at training
time: fit with attribute `probability` set to True.
Parameters
----------
X : array-like of shape (n_samples, n_features)
For kernel="precomputed", the expected shape of X is
(n_samples_test, n_samples_train).
Returns
-------
T : ndarray of shape (n_samples, n_classes)
Returns the probability of the sample for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute :term:`classes_`.
Notes
-----
The probability model is created using cross validation, so
the results can be slightly different than those obtained by
predict. Also, it will produce meaningless results on very small
datasets.
"""
X = self._validate_for_predict(X)
if self.probA_.size == 0 or self.probB_.size == 0:
raise NotFittedError(
"predict_proba is not available when fitted with probability=False"
)
pred_proba = (
self._sparse_predict_proba if self._sparse else self._dense_predict_proba
)
return pred_proba(X)
|
Compute probabilities of possible outcomes for samples in X.
The model needs to have probability information computed at training
time: fit with attribute `probability` set to True.
Parameters
----------
X : array-like of shape (n_samples, n_features)
For kernel="precomputed", the expected shape of X is
(n_samples_test, n_samples_train).
Returns
-------
T : ndarray of shape (n_samples, n_classes)
Returns the probability of the sample for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute :term:`classes_`.
Notes
-----
The probability model is created using cross validation, so
the results can be slightly different than those obtained by
predict. Also, it will produce meaningless results on very small
datasets.
|
predict_proba
|
python
|
scikit-learn/scikit-learn
|
sklearn/svm/_base.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/svm/_base.py
|
BSD-3-Clause
|
def _get_liblinear_solver_type(multi_class, penalty, loss, dual):
"""Find the liblinear magic number for the solver.
This number depends on the values of the following attributes:
- multi_class
- penalty
- loss
- dual
The same number is also internally used by LibLinear to determine
which solver to use.
"""
# nested dicts containing level 1: available loss functions,
# level2: available penalties for the given loss function,
# level3: whether the dual solver is available for the specified
# combination of loss function and penalty
_solver_type_dict = {
"logistic_regression": {"l1": {False: 6}, "l2": {False: 0, True: 7}},
"hinge": {"l2": {True: 3}},
"squared_hinge": {"l1": {False: 5}, "l2": {False: 2, True: 1}},
"epsilon_insensitive": {"l2": {True: 13}},
"squared_epsilon_insensitive": {"l2": {False: 11, True: 12}},
"crammer_singer": 4,
}
if multi_class == "crammer_singer":
return _solver_type_dict[multi_class]
elif multi_class != "ovr":
raise ValueError(
"`multi_class` must be one of `ovr`, `crammer_singer`, got %r" % multi_class
)
_solver_pen = _solver_type_dict.get(loss, None)
if _solver_pen is None:
error_string = "loss='%s' is not supported" % loss
else:
_solver_dual = _solver_pen.get(penalty, None)
if _solver_dual is None:
error_string = (
"The combination of penalty='%s' and loss='%s' is not supported"
% (penalty, loss)
)
else:
solver_num = _solver_dual.get(dual, None)
if solver_num is None:
error_string = (
"The combination of penalty='%s' and "
"loss='%s' are not supported when dual=%s" % (penalty, loss, dual)
)
else:
return solver_num
raise ValueError(
"Unsupported set of arguments: %s, Parameters: penalty=%r, loss=%r, dual=%r"
% (error_string, penalty, loss, dual)
)
|
Find the liblinear magic number for the solver.
This number depends on the values of the following attributes:
- multi_class
- penalty
- loss
- dual
The same number is also internally used by LibLinear to determine
which solver to use.
|
_get_liblinear_solver_type
|
python
|
scikit-learn/scikit-learn
|
sklearn/svm/_base.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/svm/_base.py
|
BSD-3-Clause
|
def _fit_liblinear(
X,
y,
C,
fit_intercept,
intercept_scaling,
class_weight,
penalty,
dual,
verbose,
max_iter,
tol,
random_state=None,
multi_class="ovr",
loss="logistic_regression",
epsilon=0.1,
sample_weight=None,
):
"""Used by Logistic Regression (and CV) and LinearSVC/LinearSVR.
Preprocessing is done in this function before supplying it to liblinear.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,)
Target vector relative to X
C : float
Inverse of cross-validation parameter. The lower the C, the higher
the penalization.
fit_intercept : bool
Whether or not to fit an intercept. If set to True, the feature vector
is extended to include an intercept term: ``[x_1, ..., x_n, 1]``, where
1 corresponds to the intercept. If set to False, no intercept will be
used in calculations (i.e. data is expected to be already centered).
intercept_scaling : float
Liblinear internally penalizes the intercept, treating it like any
other term in the feature vector. To reduce the impact of the
regularization on the intercept, the `intercept_scaling` parameter can
be set to a value greater than 1; the higher the value of
`intercept_scaling`, the lower the impact of regularization on it.
Then, the weights become `[w_x_1, ..., w_x_n,
w_intercept*intercept_scaling]`, where `w_x_1, ..., w_x_n` represent
the feature weights and the intercept weight is scaled by
`intercept_scaling`. This scaling allows the intercept term to have a
different regularization behavior compared to the other features.
class_weight : dict or 'balanced', default=None
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
penalty : {'l1', 'l2'}
The norm of the penalty used in regularization.
dual : bool
Dual or primal formulation,
verbose : int
Set verbose to any positive number for verbosity.
max_iter : int
Number of iterations.
tol : float
Stopping condition.
random_state : int, RandomState instance or None, default=None
Controls the pseudo random number generation for shuffling the data.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
multi_class : {'ovr', 'crammer_singer'}, default='ovr'
`ovr` trains n_classes one-vs-rest classifiers, while `crammer_singer`
optimizes a joint objective over all classes.
While `crammer_singer` is interesting from an theoretical perspective
as it is consistent it is seldom used in practice and rarely leads to
better accuracy and is more expensive to compute.
If `crammer_singer` is chosen, the options loss, penalty and dual will
be ignored.
loss : {'logistic_regression', 'hinge', 'squared_hinge', \
'epsilon_insensitive', 'squared_epsilon_insensitive}, \
default='logistic_regression'
The loss function used to fit the model.
epsilon : float, default=0.1
Epsilon parameter in the epsilon-insensitive loss function. Note
that the value of this parameter depends on the scale of the target
variable y. If unsure, set epsilon=0.
sample_weight : array-like of shape (n_samples,), default=None
Weights assigned to each sample.
Returns
-------
coef_ : ndarray of shape (n_features, n_features + 1)
The coefficient vector got by minimizing the objective function.
intercept_ : float
The intercept term added to the vector.
n_iter_ : array of int
Number of iterations run across for each class.
"""
if loss not in ["epsilon_insensitive", "squared_epsilon_insensitive"]:
enc = LabelEncoder()
y_ind = enc.fit_transform(y)
classes_ = enc.classes_
if len(classes_) < 2:
raise ValueError(
"This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % classes_[0]
)
class_weight_ = compute_class_weight(
class_weight, classes=classes_, y=y, sample_weight=sample_weight
)
else:
class_weight_ = np.empty(0, dtype=np.float64)
y_ind = y
liblinear.set_verbosity_wrap(verbose)
rnd = check_random_state(random_state)
if verbose:
print("[LibLinear]", end="")
# LinearSVC breaks when intercept_scaling is <= 0
bias = -1.0
if fit_intercept:
if intercept_scaling <= 0:
raise ValueError(
"Intercept scaling is %r but needs to be greater "
"than 0. To disable fitting an intercept,"
" set fit_intercept=False." % intercept_scaling
)
else:
bias = intercept_scaling
libsvm.set_verbosity_wrap(verbose)
libsvm_sparse.set_verbosity_wrap(verbose)
liblinear.set_verbosity_wrap(verbose)
# Liblinear doesn't support 64bit sparse matrix indices yet
if sp.issparse(X):
_check_large_sparse(X)
# LibLinear wants targets as doubles, even for classification
y_ind = np.asarray(y_ind, dtype=np.float64).ravel()
y_ind = np.require(y_ind, requirements="W")
sample_weight = _check_sample_weight(sample_weight, X, dtype=np.float64)
solver_type = _get_liblinear_solver_type(multi_class, penalty, loss, dual)
raw_coef_, n_iter_ = liblinear.train_wrap(
X,
y_ind,
sp.issparse(X),
solver_type,
tol,
bias,
C,
class_weight_,
max_iter,
rnd.randint(np.iinfo("i").max),
epsilon,
sample_weight,
)
# Regarding rnd.randint(..) in the above signature:
# seed for srand in range [0..INT_MAX); due to limitations in Numpy
# on 32-bit platforms, we can't get to the UINT_MAX limit that
# srand supports
n_iter_max = max(n_iter_)
if n_iter_max >= max_iter:
warnings.warn(
"Liblinear failed to converge, increase the number of iterations.",
ConvergenceWarning,
)
if fit_intercept:
coef_ = raw_coef_[:, :-1]
intercept_ = intercept_scaling * raw_coef_[:, -1]
else:
coef_ = raw_coef_
intercept_ = 0.0
return coef_, intercept_, n_iter_
|
Used by Logistic Regression (and CV) and LinearSVC/LinearSVR.
Preprocessing is done in this function before supplying it to liblinear.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,)
Target vector relative to X
C : float
Inverse of cross-validation parameter. The lower the C, the higher
the penalization.
fit_intercept : bool
Whether or not to fit an intercept. If set to True, the feature vector
is extended to include an intercept term: ``[x_1, ..., x_n, 1]``, where
1 corresponds to the intercept. If set to False, no intercept will be
used in calculations (i.e. data is expected to be already centered).
intercept_scaling : float
Liblinear internally penalizes the intercept, treating it like any
other term in the feature vector. To reduce the impact of the
regularization on the intercept, the `intercept_scaling` parameter can
be set to a value greater than 1; the higher the value of
`intercept_scaling`, the lower the impact of regularization on it.
Then, the weights become `[w_x_1, ..., w_x_n,
w_intercept*intercept_scaling]`, where `w_x_1, ..., w_x_n` represent
the feature weights and the intercept weight is scaled by
`intercept_scaling`. This scaling allows the intercept term to have a
different regularization behavior compared to the other features.
class_weight : dict or 'balanced', default=None
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
penalty : {'l1', 'l2'}
The norm of the penalty used in regularization.
dual : bool
Dual or primal formulation,
verbose : int
Set verbose to any positive number for verbosity.
max_iter : int
Number of iterations.
tol : float
Stopping condition.
random_state : int, RandomState instance or None, default=None
Controls the pseudo random number generation for shuffling the data.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
multi_class : {'ovr', 'crammer_singer'}, default='ovr'
`ovr` trains n_classes one-vs-rest classifiers, while `crammer_singer`
optimizes a joint objective over all classes.
While `crammer_singer` is interesting from an theoretical perspective
as it is consistent it is seldom used in practice and rarely leads to
better accuracy and is more expensive to compute.
If `crammer_singer` is chosen, the options loss, penalty and dual will
be ignored.
loss : {'logistic_regression', 'hinge', 'squared_hinge', 'epsilon_insensitive', 'squared_epsilon_insensitive}, default='logistic_regression'
The loss function used to fit the model.
epsilon : float, default=0.1
Epsilon parameter in the epsilon-insensitive loss function. Note
that the value of this parameter depends on the scale of the target
variable y. If unsure, set epsilon=0.
sample_weight : array-like of shape (n_samples,), default=None
Weights assigned to each sample.
Returns
-------
coef_ : ndarray of shape (n_features, n_features + 1)
The coefficient vector got by minimizing the objective function.
intercept_ : float
The intercept term added to the vector.
n_iter_ : array of int
Number of iterations run across for each class.
|
_fit_liblinear
|
python
|
scikit-learn/scikit-learn
|
sklearn/svm/_base.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/svm/_base.py
|
BSD-3-Clause
|
def l1_min_c(X, y, *, loss="squared_hinge", fit_intercept=True, intercept_scaling=1.0):
"""Return the lowest bound for `C`.
The lower bound for `C` is computed such that for `C` in `(l1_min_C, infinity)`
the model is guaranteed not to be empty. This applies to l1 penalized
classifiers, such as :class:`sklearn.svm.LinearSVC` with penalty='l1' and
:class:`sklearn.linear_model.LogisticRegression` with penalty='l1'.
This value is valid if `class_weight` parameter in `fit()` is not set.
For an example of how to use this function, see
:ref:`sphx_glr_auto_examples_linear_model_plot_logistic_path.py`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,)
Target vector relative to X.
loss : {'squared_hinge', 'log'}, default='squared_hinge'
Specifies the loss function.
With 'squared_hinge' it is the squared hinge loss (a.k.a. L2 loss).
With 'log' it is the loss of logistic regression models.
fit_intercept : bool, default=True
Specifies if the intercept should be fitted by the model.
It must match the fit() method parameter.
intercept_scaling : float, default=1.0
When fit_intercept is True, instance vector x becomes
[x, intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
It must match the fit() method parameter.
Returns
-------
l1_min_c : float
Minimum value for C.
Examples
--------
>>> from sklearn.svm import l1_min_c
>>> from sklearn.datasets import make_classification
>>> X, y = make_classification(n_samples=100, n_features=20, random_state=42)
>>> print(f"{l1_min_c(X, y, loss='squared_hinge', fit_intercept=True):.4f}")
0.0044
"""
X = check_array(X, accept_sparse="csc")
check_consistent_length(X, y)
Y = LabelBinarizer(neg_label=-1).fit_transform(y).T
# maximum absolute value over classes and features
den = np.max(np.abs(safe_sparse_dot(Y, X)))
if fit_intercept:
bias = np.full(
(np.size(y), 1), intercept_scaling, dtype=np.array(intercept_scaling).dtype
)
den = max(den, abs(np.dot(Y, bias)).max())
if den == 0.0:
raise ValueError(
"Ill-posed l1_min_c calculation: l1 will always "
"select zero coefficients for this data"
)
if loss == "squared_hinge":
return 0.5 / den
else: # loss == 'log':
return 2.0 / den
|
Return the lowest bound for `C`.
The lower bound for `C` is computed such that for `C` in `(l1_min_C, infinity)`
the model is guaranteed not to be empty. This applies to l1 penalized
classifiers, such as :class:`sklearn.svm.LinearSVC` with penalty='l1' and
:class:`sklearn.linear_model.LogisticRegression` with penalty='l1'.
This value is valid if `class_weight` parameter in `fit()` is not set.
For an example of how to use this function, see
:ref:`sphx_glr_auto_examples_linear_model_plot_logistic_path.py`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,)
Target vector relative to X.
loss : {'squared_hinge', 'log'}, default='squared_hinge'
Specifies the loss function.
With 'squared_hinge' it is the squared hinge loss (a.k.a. L2 loss).
With 'log' it is the loss of logistic regression models.
fit_intercept : bool, default=True
Specifies if the intercept should be fitted by the model.
It must match the fit() method parameter.
intercept_scaling : float, default=1.0
When fit_intercept is True, instance vector x becomes
[x, intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
It must match the fit() method parameter.
Returns
-------
l1_min_c : float
Minimum value for C.
Examples
--------
>>> from sklearn.svm import l1_min_c
>>> from sklearn.datasets import make_classification
>>> X, y = make_classification(n_samples=100, n_features=20, random_state=42)
>>> print(f"{l1_min_c(X, y, loss='squared_hinge', fit_intercept=True):.4f}")
0.0044
|
l1_min_c
|
python
|
scikit-learn/scikit-learn
|
sklearn/svm/_bounds.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/svm/_bounds.py
|
BSD-3-Clause
|
def _validate_dual_parameter(dual, loss, penalty, multi_class, X):
"""Helper function to assign the value of dual parameter."""
if dual == "auto":
if X.shape[0] < X.shape[1]:
try:
_get_liblinear_solver_type(multi_class, penalty, loss, True)
return True
except ValueError: # dual not supported for the combination
return False
else:
try:
_get_liblinear_solver_type(multi_class, penalty, loss, False)
return False
except ValueError: # primal not supported by the combination
return True
else:
return dual
|
Helper function to assign the value of dual parameter.
|
_validate_dual_parameter
|
python
|
scikit-learn/scikit-learn
|
sklearn/svm/_classes.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/svm/_classes.py
|
BSD-3-Clause
|
def fit(self, X, y, sample_weight=None):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,)
Target vector relative to X.
sample_weight : array-like of shape (n_samples,), default=None
Array of weights that are assigned to individual
samples. If not provided,
then each sample is given unit weight.
.. versionadded:: 0.18
Returns
-------
self : object
An instance of the estimator.
"""
X, y = validate_data(
self,
X,
y,
accept_sparse="csr",
dtype=np.float64,
order="C",
accept_large_sparse=False,
)
check_classification_targets(y)
self.classes_ = np.unique(y)
_dual = _validate_dual_parameter(
self.dual, self.loss, self.penalty, self.multi_class, X
)
self.coef_, self.intercept_, n_iter_ = _fit_liblinear(
X,
y,
self.C,
self.fit_intercept,
self.intercept_scaling,
self.class_weight,
self.penalty,
_dual,
self.verbose,
self.max_iter,
self.tol,
self.random_state,
self.multi_class,
self.loss,
sample_weight=sample_weight,
)
# Backward compatibility: _fit_liblinear is used both by LinearSVC/R
# and LogisticRegression but LogisticRegression sets a structured
# `n_iter_` attribute with information about the underlying OvR fits
# while LinearSVC/R only reports the maximum value.
self.n_iter_ = n_iter_.max().item()
if self.multi_class == "crammer_singer" and len(self.classes_) == 2:
self.coef_ = (self.coef_[1] - self.coef_[0]).reshape(1, -1)
if self.fit_intercept:
intercept = self.intercept_[1] - self.intercept_[0]
self.intercept_ = np.array([intercept])
return self
|
Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,)
Target vector relative to X.
sample_weight : array-like of shape (n_samples,), default=None
Array of weights that are assigned to individual
samples. If not provided,
then each sample is given unit weight.
.. versionadded:: 0.18
Returns
-------
self : object
An instance of the estimator.
|
fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/svm/_classes.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/svm/_classes.py
|
BSD-3-Clause
|
def fit(self, X, y, sample_weight=None):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,)
Target vector relative to X.
sample_weight : array-like of shape (n_samples,), default=None
Array of weights that are assigned to individual
samples. If not provided,
then each sample is given unit weight.
.. versionadded:: 0.18
Returns
-------
self : object
An instance of the estimator.
"""
X, y = validate_data(
self,
X,
y,
accept_sparse="csr",
dtype=np.float64,
order="C",
accept_large_sparse=False,
)
penalty = "l2" # SVR only accepts l2 penalty
_dual = _validate_dual_parameter(self.dual, self.loss, penalty, "ovr", X)
self.coef_, self.intercept_, n_iter_ = _fit_liblinear(
X,
y,
self.C,
self.fit_intercept,
self.intercept_scaling,
None,
penalty,
_dual,
self.verbose,
self.max_iter,
self.tol,
self.random_state,
loss=self.loss,
epsilon=self.epsilon,
sample_weight=sample_weight,
)
self.coef_ = self.coef_.ravel()
# Backward compatibility: _fit_liblinear is used both by LinearSVC/R
# and LogisticRegression but LogisticRegression sets a structured
# `n_iter_` attribute with information about the underlying OvR fits
# while LinearSVC/R only reports the maximum value.
self.n_iter_ = n_iter_.max().item()
return self
|
Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,)
Target vector relative to X.
sample_weight : array-like of shape (n_samples,), default=None
Array of weights that are assigned to individual
samples. If not provided,
then each sample is given unit weight.
.. versionadded:: 0.18
Returns
-------
self : object
An instance of the estimator.
|
fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/svm/_classes.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/svm/_classes.py
|
BSD-3-Clause
|
def fit(self, X, y=None, sample_weight=None):
"""Detect the soft boundary of the set of samples X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Set of samples, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
sample_weight : array-like of shape (n_samples,), default=None
Per-sample weights. Rescale C per sample. Higher weights
force the classifier to put more emphasis on these points.
Returns
-------
self : object
Fitted estimator.
Notes
-----
If X is not a C-ordered contiguous array it is copied.
"""
super().fit(X, np.ones(_num_samples(X)), sample_weight=sample_weight)
self.offset_ = -self._intercept_
return self
|
Detect the soft boundary of the set of samples X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Set of samples, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
sample_weight : array-like of shape (n_samples,), default=None
Per-sample weights. Rescale C per sample. Higher weights
force the classifier to put more emphasis on these points.
Returns
-------
self : object
Fitted estimator.
Notes
-----
If X is not a C-ordered contiguous array it is copied.
|
fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/svm/_classes.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/svm/_classes.py
|
BSD-3-Clause
|
def predict(self, X):
"""Perform classification on samples in X.
For a one-class model, +1 or -1 is returned.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features) or \
(n_samples_test, n_samples_train)
For kernel="precomputed", the expected shape of X is
(n_samples_test, n_samples_train).
Returns
-------
y_pred : ndarray of shape (n_samples,)
Class labels for samples in X.
"""
y = super().predict(X)
return np.asarray(y, dtype=np.intp)
|
Perform classification on samples in X.
For a one-class model, +1 or -1 is returned.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features) or (n_samples_test, n_samples_train)
For kernel="precomputed", the expected shape of X is
(n_samples_test, n_samples_train).
Returns
-------
y_pred : ndarray of shape (n_samples,)
Class labels for samples in X.
|
predict
|
python
|
scikit-learn/scikit-learn
|
sklearn/svm/_classes.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/svm/_classes.py
|
BSD-3-Clause
|
def test_newrand_default():
"""Test that bounded_rand_int_wrap without seeding respects the range
Note this test should pass either if executed alone, or in conjunctions
with other tests that call set_seed explicit in any order: it checks
invariants on the RNG instead of specific values.
"""
generated = [bounded_rand_int_wrap(100) for _ in range(10)]
assert all(0 <= x < 100 for x in generated)
assert not all(x == generated[0] for x in generated)
|
Test that bounded_rand_int_wrap without seeding respects the range
Note this test should pass either if executed alone, or in conjunctions
with other tests that call set_seed explicit in any order: it checks
invariants on the RNG instead of specific values.
|
test_newrand_default
|
python
|
scikit-learn/scikit-learn
|
sklearn/svm/tests/test_bounds.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/svm/tests/test_bounds.py
|
BSD-3-Clause
|
def test_svc(X_train, y_train, X_test, kernel, sparse_container):
"""Check that sparse SVC gives the same result as SVC."""
X_train = sparse_container(X_train)
clf = svm.SVC(
gamma=1,
kernel=kernel,
probability=True,
random_state=0,
decision_function_shape="ovo",
)
check_svm_model_equal(clf, X_train, y_train, X_test)
|
Check that sparse SVC gives the same result as SVC.
|
test_svc
|
python
|
scikit-learn/scikit-learn
|
sklearn/svm/tests/test_sparse.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/svm/tests/test_sparse.py
|
BSD-3-Clause
|
def test_svc_ovr_tie_breaking(SVCClass):
"""Test if predict breaks ties in OVR mode.
Related issue: https://github.com/scikit-learn/scikit-learn/issues/8277
"""
if SVCClass.__name__ == "NuSVC" and _IS_32BIT:
# XXX: known failure to be investigated. Either the code needs to be
# fixed or the test itself might need to be made less sensitive to
# random changes in test data and rounding errors more generally.
# https://github.com/scikit-learn/scikit-learn/issues/29633
pytest.xfail("Failing test on 32bit OS")
X, y = make_blobs(random_state=0, n_samples=20, n_features=2)
xs = np.linspace(X[:, 0].min(), X[:, 0].max(), 100)
ys = np.linspace(X[:, 1].min(), X[:, 1].max(), 100)
xx, yy = np.meshgrid(xs, ys)
common_params = dict(
kernel="rbf", gamma=1e6, random_state=42, decision_function_shape="ovr"
)
svm = SVCClass(
break_ties=False,
**common_params,
).fit(X, y)
pred = svm.predict(np.c_[xx.ravel(), yy.ravel()])
dv = svm.decision_function(np.c_[xx.ravel(), yy.ravel()])
assert not np.all(pred == np.argmax(dv, axis=1))
svm = SVCClass(
break_ties=True,
**common_params,
).fit(X, y)
pred = svm.predict(np.c_[xx.ravel(), yy.ravel()])
dv = svm.decision_function(np.c_[xx.ravel(), yy.ravel()])
assert np.all(pred == np.argmax(dv, axis=1))
|
Test if predict breaks ties in OVR mode.
Related issue: https://github.com/scikit-learn/scikit-learn/issues/8277
|
test_svc_ovr_tie_breaking
|
python
|
scikit-learn/scikit-learn
|
sklearn/svm/tests/test_svm.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/svm/tests/test_svm.py
|
BSD-3-Clause
|
def test_custom_kernel_not_array_input(Estimator):
"""Test using a custom kernel that is not fed with array-like for floats"""
data = ["A A", "A", "B", "B B", "A B"]
X = np.array([[2, 0], [1, 0], [0, 1], [0, 2], [1, 1]]) # count encoding
y = np.array([1, 1, 2, 2, 1])
def string_kernel(X1, X2):
assert isinstance(X1[0], str)
n_samples1 = _num_samples(X1)
n_samples2 = _num_samples(X2)
K = np.zeros((n_samples1, n_samples2))
for ii in range(n_samples1):
for jj in range(ii, n_samples2):
K[ii, jj] = X1[ii].count("A") * X2[jj].count("A")
K[ii, jj] += X1[ii].count("B") * X2[jj].count("B")
K[jj, ii] = K[ii, jj]
return K
K = string_kernel(data, data)
assert_array_equal(np.dot(X, X.T), K)
svc1 = Estimator(kernel=string_kernel).fit(data, y)
svc2 = Estimator(kernel="linear").fit(X, y)
svc3 = Estimator(kernel="precomputed").fit(K, y)
assert svc1.score(data, y) == svc3.score(K, y)
assert svc1.score(data, y) == svc2.score(X, y)
if hasattr(svc1, "decision_function"): # classifier
assert_allclose(svc1.decision_function(data), svc2.decision_function(X))
assert_allclose(svc1.decision_function(data), svc3.decision_function(K))
assert_array_equal(svc1.predict(data), svc2.predict(X))
assert_array_equal(svc1.predict(data), svc3.predict(K))
else: # regressor
assert_allclose(svc1.predict(data), svc2.predict(X))
assert_allclose(svc1.predict(data), svc3.predict(K))
|
Test using a custom kernel that is not fed with array-like for floats
|
test_custom_kernel_not_array_input
|
python
|
scikit-learn/scikit-learn
|
sklearn/svm/tests/test_svm.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/svm/tests/test_svm.py
|
BSD-3-Clause
|
def test_svc_raises_error_internal_representation():
"""Check that SVC raises error when internal representation is altered.
Non-regression test for #18891 and https://nvd.nist.gov/vuln/detail/CVE-2020-28975
"""
clf = svm.SVC(kernel="linear").fit(X, Y)
clf._n_support[0] = 1000000
msg = "The internal representation of SVC was altered"
with pytest.raises(ValueError, match=msg):
clf.predict(X)
|
Check that SVC raises error when internal representation is altered.
Non-regression test for #18891 and https://nvd.nist.gov/vuln/detail/CVE-2020-28975
|
test_svc_raises_error_internal_representation
|
python
|
scikit-learn/scikit-learn
|
sklearn/svm/tests/test_svm.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/svm/tests/test_svm.py
|
BSD-3-Clause
|
def test_svm_with_infinite_C(Estimator, make_dataset, C_inf, global_random_seed):
"""Check that we can pass `C=inf` that is equivalent to a very large C value.
Non-regression test for
https://github.com/scikit-learn/scikit-learn/issues/29772
"""
X, y = make_dataset(random_state=global_random_seed)
estimator_C_inf = Estimator(C=C_inf).fit(X, y)
estimator_C_large = Estimator(C=1e10).fit(X, y)
assert_allclose(estimator_C_large.predict(X), estimator_C_inf.predict(X))
|
Check that we can pass `C=inf` that is equivalent to a very large C value.
Non-regression test for
https://github.com/scikit-learn/scikit-learn/issues/29772
|
test_svm_with_infinite_C
|
python
|
scikit-learn/scikit-learn
|
sklearn/svm/tests/test_svm.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/svm/tests/test_svm.py
|
BSD-3-Clause
|
def record_metadata(obj, record_default=True, **kwargs):
"""Utility function to store passed metadata to a method of obj.
If record_default is False, kwargs whose values are "default" are skipped.
This is so that checks on keyword arguments whose default was not changed
are skipped.
"""
stack = inspect.stack()
callee = stack[1].function
caller = stack[2].function
if not hasattr(obj, "_records"):
obj._records = defaultdict(lambda: defaultdict(list))
if not record_default:
kwargs = {
key: val
for key, val in kwargs.items()
if not isinstance(val, str) or (val != "default")
}
obj._records[callee][caller].append(kwargs)
|
Utility function to store passed metadata to a method of obj.
If record_default is False, kwargs whose values are "default" are skipped.
This is so that checks on keyword arguments whose default was not changed
are skipped.
|
record_metadata
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/metadata_routing_common.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/metadata_routing_common.py
|
BSD-3-Clause
|
def check_recorded_metadata(obj, method, parent, split_params=tuple(), **kwargs):
"""Check whether the expected metadata is passed to the object's method.
Parameters
----------
obj : estimator object
sub-estimator to check routed params for
method : str
sub-estimator's method where metadata is routed to, or otherwise in
the context of metadata routing referred to as 'callee'
parent : str
the parent method which should have called `method`, or otherwise in
the context of metadata routing referred to as 'caller'
split_params : tuple, default=empty
specifies any parameters which are to be checked as being a subset
of the original values
**kwargs : dict
passed metadata
"""
all_records = (
getattr(obj, "_records", dict()).get(method, dict()).get(parent, list())
)
for record in all_records:
# first check that the names of the metadata passed are the same as
# expected. The names are stored as keys in `record`.
assert set(kwargs.keys()) == set(record.keys()), (
f"Expected {kwargs.keys()} vs {record.keys()}"
)
for key, value in kwargs.items():
recorded_value = record[key]
# The following condition is used to check for any specified parameters
# being a subset of the original values
if key in split_params and recorded_value is not None:
assert np.isin(recorded_value, value).all()
else:
if isinstance(recorded_value, np.ndarray):
assert_array_equal(recorded_value, value)
else:
assert recorded_value is value, (
f"Expected {recorded_value} vs {value}. Method: {method}"
)
|
Check whether the expected metadata is passed to the object's method.
Parameters
----------
obj : estimator object
sub-estimator to check routed params for
method : str
sub-estimator's method where metadata is routed to, or otherwise in
the context of metadata routing referred to as 'callee'
parent : str
the parent method which should have called `method`, or otherwise in
the context of metadata routing referred to as 'caller'
split_params : tuple, default=empty
specifies any parameters which are to be checked as being a subset
of the original values
**kwargs : dict
passed metadata
|
check_recorded_metadata
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/metadata_routing_common.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/metadata_routing_common.py
|
BSD-3-Clause
|
def assert_request_is_empty(metadata_request, exclude=None):
"""Check if a metadata request dict is empty.
One can exclude a method or a list of methods from the check using the
``exclude`` parameter. If metadata_request is a MetadataRouter, then
``exclude`` can be of the form ``{"object" : [method, ...]}``.
"""
if isinstance(metadata_request, MetadataRouter):
for name, route_mapping in metadata_request:
if exclude is not None and name in exclude:
_exclude = exclude[name]
else:
_exclude = None
assert_request_is_empty(route_mapping.router, exclude=_exclude)
return
exclude = [] if exclude is None else exclude
for method in SIMPLE_METHODS:
if method in exclude:
continue
mmr = getattr(metadata_request, method)
props = [
prop
for prop, alias in mmr.requests.items()
if isinstance(alias, str) or alias is not None
]
assert not props
|
Check if a metadata request dict is empty.
One can exclude a method or a list of methods from the check using the
``exclude`` parameter. If metadata_request is a MetadataRouter, then
``exclude`` can be of the form ``{"object" : [method, ...]}``.
|
assert_request_is_empty
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/metadata_routing_common.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/metadata_routing_common.py
|
BSD-3-Clause
|
def test_clone_protocol():
"""Checks that clone works with `__sklearn_clone__` protocol."""
class FrozenEstimator(BaseEstimator):
def __init__(self, fitted_estimator):
self.fitted_estimator = fitted_estimator
def __getattr__(self, name):
return getattr(self.fitted_estimator, name)
def __sklearn_clone__(self):
return self
def fit(self, *args, **kwargs):
return self
def fit_transform(self, *args, **kwargs):
return self.fitted_estimator.transform(*args, **kwargs)
X = np.array([[-1, -1], [-2, -1], [-3, -2]])
pca = PCA().fit(X)
components = pca.components_
frozen_pca = FrozenEstimator(pca)
assert_allclose(frozen_pca.components_, components)
# Calling PCA methods such as `get_feature_names_out` still works
assert_array_equal(frozen_pca.get_feature_names_out(), pca.get_feature_names_out())
# Fitting on a new data does not alter `components_`
X_new = np.asarray([[-1, 2], [3, 4], [1, 2]])
frozen_pca.fit(X_new)
assert_allclose(frozen_pca.components_, components)
# `fit_transform` does not alter state
frozen_pca.fit_transform(X_new)
assert_allclose(frozen_pca.components_, components)
# Cloning estimator is a no-op
clone_frozen_pca = clone(frozen_pca)
assert clone_frozen_pca is frozen_pca
assert_allclose(clone_frozen_pca.components_, components)
|
Checks that clone works with `__sklearn_clone__` protocol.
|
test_clone_protocol
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_base.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_base.py
|
BSD-3-Clause
|
def test_n_features_in_validation():
"""Check that `_check_n_features` validates data when reset=False"""
est = MyEstimator()
X_train = [[1, 2, 3], [4, 5, 6]]
_check_n_features(est, X_train, reset=True)
assert est.n_features_in_ == 3
msg = "X does not contain any features, but MyEstimator is expecting 3 features"
with pytest.raises(ValueError, match=msg):
_check_n_features(est, "invalid X", reset=False)
|
Check that `_check_n_features` validates data when reset=False
|
test_n_features_in_validation
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_base.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_base.py
|
BSD-3-Clause
|
def test_n_features_in_no_validation():
"""Check that `_check_n_features` does not validate data when
n_features_in_ is not defined."""
est = MyEstimator()
_check_n_features(est, "invalid X", reset=True)
assert not hasattr(est, "n_features_in_")
# does not raise
_check_n_features(est, "invalid X", reset=False)
|
Check that `_check_n_features` does not validate data when
n_features_in_ is not defined.
|
test_n_features_in_no_validation
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_base.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_base.py
|
BSD-3-Clause
|
def test_clone_keeps_output_config():
"""Check that clone keeps the set_output config."""
ss = StandardScaler().set_output(transform="pandas")
config = _get_output_config("transform", ss)
ss_clone = clone(ss)
config_clone = _get_output_config("transform", ss_clone)
assert config == config_clone
|
Check that clone keeps the set_output config.
|
test_clone_keeps_output_config
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_base.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_base.py
|
BSD-3-Clause
|
def test_estimator_empty_instance_dict(estimator):
"""Check that ``__getstate__`` returns an empty ``dict`` with an empty
instance.
Python 3.11+ changed behaviour by returning ``None`` instead of raising an
``AttributeError``. Non-regression test for gh-25188.
"""
state = estimator.__getstate__()
expected = {"_sklearn_version": sklearn.__version__}
assert state == expected
# this should not raise
pickle.loads(pickle.dumps(BaseEstimator()))
|
Check that ``__getstate__`` returns an empty ``dict`` with an empty
instance.
Python 3.11+ changed behaviour by returning ``None`` instead of raising an
``AttributeError``. Non-regression test for gh-25188.
|
test_estimator_empty_instance_dict
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_base.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_base.py
|
BSD-3-Clause
|
def test_estimator_getstate_using_slots_error_message():
"""Using a `BaseEstimator` with `__slots__` is not supported."""
class WithSlots:
__slots__ = ("x",)
class Estimator(BaseEstimator, WithSlots):
pass
msg = (
"You cannot use `__slots__` in objects inheriting from "
"`sklearn.base.BaseEstimator`"
)
with pytest.raises(TypeError, match=msg):
Estimator().__getstate__()
with pytest.raises(TypeError, match=msg):
pickle.dumps(Estimator())
|
Using a `BaseEstimator` with `__slots__` is not supported.
|
test_estimator_getstate_using_slots_error_message
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_base.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_base.py
|
BSD-3-Clause
|
def test_dataframe_protocol(constructor_name, minversion):
"""Uses the dataframe exchange protocol to get feature names."""
data = [[1, 4, 2], [3, 3, 6]]
columns = ["col_0", "col_1", "col_2"]
df = _convert_container(
data, constructor_name, columns_name=columns, minversion=minversion
)
class NoOpTransformer(TransformerMixin, BaseEstimator):
def fit(self, X, y=None):
validate_data(self, X)
return self
def transform(self, X):
return validate_data(self, X, reset=False)
no_op = NoOpTransformer()
no_op.fit(df)
assert_array_equal(no_op.feature_names_in_, columns)
X_out = no_op.transform(df)
if constructor_name != "pyarrow":
# pyarrow does not work with `np.asarray`
# https://github.com/apache/arrow/issues/34886
assert_allclose(df, X_out)
bad_names = ["a", "b", "c"]
df_bad = _convert_container(data, constructor_name, columns_name=bad_names)
with pytest.raises(ValueError, match="The feature names should match"):
no_op.transform(df_bad)
|
Uses the dataframe exchange protocol to get feature names.
|
test_dataframe_protocol
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_base.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_base.py
|
BSD-3-Clause
|
def test_transformer_fit_transform_with_metadata_in_transform():
"""Test that having a transformer with metadata for transform raises a
warning when calling fit_transform."""
class CustomTransformer(BaseEstimator, TransformerMixin):
def fit(self, X, y=None, prop=None):
return self
def transform(self, X, prop=None):
return X
# passing the metadata to `fit_transform` should raise a warning since it
# could potentially be consumed by `transform`
with pytest.warns(UserWarning, match="`transform` method which consumes metadata"):
CustomTransformer().set_transform_request(prop=True).fit_transform(
[[1]], [1], prop=1
)
# not passing a metadata which can potentially be consumed by `transform` should
# not raise a warning
with warnings.catch_warnings(record=True) as record:
CustomTransformer().set_transform_request(prop=True).fit_transform([[1]], [1])
assert len(record) == 0
|
Test that having a transformer with metadata for transform raises a
warning when calling fit_transform.
|
test_transformer_fit_transform_with_metadata_in_transform
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_base.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_base.py
|
BSD-3-Clause
|
def test_outlier_mixin_fit_predict_with_metadata_in_predict():
"""Test that having an OutlierMixin with metadata for predict raises a
warning when calling fit_predict."""
class CustomOutlierDetector(BaseEstimator, OutlierMixin):
def fit(self, X, y=None, prop=None):
return self
def predict(self, X, prop=None):
return X
# passing the metadata to `fit_predict` should raise a warning since it
# could potentially be consumed by `predict`
with pytest.warns(UserWarning, match="`predict` method which consumes metadata"):
CustomOutlierDetector().set_predict_request(prop=True).fit_predict(
[[1]], [1], prop=1
)
# not passing a metadata which can potentially be consumed by `predict` should
# not raise a warning
with warnings.catch_warnings(record=True) as record:
CustomOutlierDetector().set_predict_request(prop=True).fit_predict([[1]], [1])
assert len(record) == 0
|
Test that having an OutlierMixin with metadata for predict raises a
warning when calling fit_predict.
|
test_outlier_mixin_fit_predict_with_metadata_in_predict
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_base.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_base.py
|
BSD-3-Clause
|
def test_get_params_html():
"""Check the behaviour of the `_get_params_html` method."""
est = MyEstimator(empty="test")
assert est._get_params_html() == {"l1": 0, "empty": "test"}
assert est._get_params_html().non_default == ("empty",)
|
Check the behaviour of the `_get_params_html` method.
|
test_get_params_html
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_base.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_base.py
|
BSD-3-Clause
|
def test_sigmoid_calibration():
"""Test calibration values with Platt sigmoid model"""
exF = np.array([5, -4, 1.0])
exY = np.array([1, -1, -1])
# computed from my python port of the C++ code in LibSVM
AB_lin_libsvm = np.array([-0.20261354391187855, 0.65236314980010512])
assert_array_almost_equal(AB_lin_libsvm, _sigmoid_calibration(exF, exY), 3)
lin_prob = 1.0 / (1.0 + np.exp(AB_lin_libsvm[0] * exF + AB_lin_libsvm[1]))
sk_prob = _SigmoidCalibration().fit(exF, exY).predict(exF)
assert_array_almost_equal(lin_prob, sk_prob, 6)
# check that _SigmoidCalibration().fit only accepts 1d array or 2d column
# arrays
with pytest.raises(ValueError):
_SigmoidCalibration().fit(np.vstack((exF, exF)), exY)
|
Test calibration values with Platt sigmoid model
|
test_sigmoid_calibration
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_calibration.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_calibration.py
|
BSD-3-Clause
|
def test_calibration_nan_imputer(ensemble):
"""Test that calibration can accept nan"""
X, y = make_classification(
n_samples=10, n_features=2, n_informative=2, n_redundant=0, random_state=42
)
X[0, 0] = np.nan
clf = Pipeline(
[("imputer", SimpleImputer()), ("rf", RandomForestClassifier(n_estimators=1))]
)
clf_c = CalibratedClassifierCV(clf, cv=2, method="isotonic", ensemble=ensemble)
clf_c.fit(X, y)
clf_c.predict(X)
|
Test that calibration can accept nan
|
test_calibration_nan_imputer
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_calibration.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_calibration.py
|
BSD-3-Clause
|
def test_calibration_accepts_ndarray(X):
"""Test that calibration accepts n-dimensional arrays as input"""
y = [1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0]
class MockTensorClassifier(ClassifierMixin, BaseEstimator):
"""A toy estimator that accepts tensor inputs"""
def fit(self, X, y):
self.classes_ = np.unique(y)
return self
def decision_function(self, X):
# toy decision function that just needs to have the right shape:
return X.reshape(X.shape[0], -1).sum(axis=1)
calibrated_clf = CalibratedClassifierCV(MockTensorClassifier())
# we should be able to fit this classifier with no error
calibrated_clf.fit(X, y)
|
Test that calibration accepts n-dimensional arrays as input
|
test_calibration_accepts_ndarray
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_calibration.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_calibration.py
|
BSD-3-Clause
|
def test_calibration_dict_pipeline(dict_data, dict_data_pipeline):
"""Test that calibration works in prefit pipeline with transformer
`X` is not array-like, sparse matrix or dataframe at the start.
See https://github.com/scikit-learn/scikit-learn/issues/8710
Also test it can predict without running into validation errors.
See https://github.com/scikit-learn/scikit-learn/issues/19637
"""
X, y = dict_data
clf = dict_data_pipeline
calib_clf = CalibratedClassifierCV(FrozenEstimator(clf), cv=2)
calib_clf.fit(X, y)
# Check attributes are obtained from fitted estimator
assert_array_equal(calib_clf.classes_, clf.classes_)
# Neither the pipeline nor the calibration meta-estimator
# expose the n_features_in_ check on this kind of data.
assert not hasattr(clf, "n_features_in_")
assert not hasattr(calib_clf, "n_features_in_")
# Ensure that no error is thrown with predict and predict_proba
calib_clf.predict(X)
calib_clf.predict_proba(X)
|
Test that calibration works in prefit pipeline with transformer
`X` is not array-like, sparse matrix or dataframe at the start.
See https://github.com/scikit-learn/scikit-learn/issues/8710
Also test it can predict without running into validation errors.
See https://github.com/scikit-learn/scikit-learn/issues/19637
|
test_calibration_dict_pipeline
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_calibration.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_calibration.py
|
BSD-3-Clause
|
def test_calibration_curve_pos_label_error_str(dtype_y_str):
"""Check error message when a `pos_label` is not specified with `str` targets."""
rng = np.random.RandomState(42)
y1 = np.array(["spam"] * 3 + ["eggs"] * 2, dtype=dtype_y_str)
y2 = rng.randint(0, 2, size=y1.size)
err_msg = (
"y_true takes value in {'eggs', 'spam'} and pos_label is not "
"specified: either make y_true take value in {0, 1} or {-1, 1} or "
"pass pos_label explicitly"
)
with pytest.raises(ValueError, match=err_msg):
calibration_curve(y1, y2)
|
Check error message when a `pos_label` is not specified with `str` targets.
|
test_calibration_curve_pos_label_error_str
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_calibration.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_calibration.py
|
BSD-3-Clause
|
def test_calibration_curve_pos_label(dtype_y_str):
"""Check the behaviour when passing explicitly `pos_label`."""
y_true = np.array([0, 0, 0, 1, 1, 1, 1, 1, 1])
classes = np.array(["spam", "egg"], dtype=dtype_y_str)
y_true_str = classes[y_true]
y_pred = np.array([0.1, 0.2, 0.3, 0.4, 0.65, 0.7, 0.8, 0.9, 1.0])
# default case
prob_true, _ = calibration_curve(y_true, y_pred, n_bins=4)
assert_allclose(prob_true, [0, 0.5, 1, 1])
# if `y_true` contains `str`, then `pos_label` is required
prob_true, _ = calibration_curve(y_true_str, y_pred, n_bins=4, pos_label="egg")
assert_allclose(prob_true, [0, 0.5, 1, 1])
prob_true, _ = calibration_curve(y_true, 1 - y_pred, n_bins=4, pos_label=0)
assert_allclose(prob_true, [0, 0, 0.5, 1])
prob_true, _ = calibration_curve(y_true_str, 1 - y_pred, n_bins=4, pos_label="spam")
assert_allclose(prob_true, [0, 0, 0.5, 1])
|
Check the behaviour when passing explicitly `pos_label`.
|
test_calibration_curve_pos_label
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_calibration.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_calibration.py
|
BSD-3-Clause
|
def test_calibration_display_kwargs(pyplot, iris_data_binary, kwargs):
"""Check that matplotlib aliases are handled."""
X, y = iris_data_binary
lr = LogisticRegression().fit(X, y)
viz = CalibrationDisplay.from_estimator(lr, X, y, **kwargs)
assert viz.line_.get_color() == "red"
assert viz.line_.get_linewidth() == 2
assert viz.line_.get_linestyle() == "-."
|
Check that matplotlib aliases are handled.
|
test_calibration_display_kwargs
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_calibration.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_calibration.py
|
BSD-3-Clause
|
def test_calibration_display_pos_label(
pyplot, iris_data_binary, pos_label, expected_pos_label
):
"""Check the behaviour of `pos_label` in the `CalibrationDisplay`."""
X, y = iris_data_binary
lr = LogisticRegression().fit(X, y)
viz = CalibrationDisplay.from_estimator(lr, X, y, pos_label=pos_label)
y_prob = lr.predict_proba(X)[:, expected_pos_label]
prob_true, prob_pred = calibration_curve(y, y_prob, pos_label=pos_label)
assert_allclose(viz.prob_true, prob_true)
assert_allclose(viz.prob_pred, prob_pred)
assert_allclose(viz.y_prob, y_prob)
assert (
viz.ax_.get_xlabel()
== f"Mean predicted probability (Positive class: {expected_pos_label})"
)
assert (
viz.ax_.get_ylabel()
== f"Fraction of positives (Positive class: {expected_pos_label})"
)
expected_legend_labels = [lr.__class__.__name__, "Perfectly calibrated"]
legend_labels = viz.ax_.get_legend().get_texts()
assert len(legend_labels) == len(expected_legend_labels)
for labels in legend_labels:
assert labels.get_text() in expected_legend_labels
|
Check the behaviour of `pos_label` in the `CalibrationDisplay`.
|
test_calibration_display_pos_label
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_calibration.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_calibration.py
|
BSD-3-Clause
|
def test_calibrated_classifier_cv_double_sample_weights_equivalence(method, ensemble):
"""Check that passing repeating twice the dataset `X` is equivalent to
passing a `sample_weight` with a factor 2."""
X, y = load_iris(return_X_y=True)
# Scale the data to avoid any convergence issue
X = StandardScaler().fit_transform(X)
# Only use 2 classes
X, y = X[:100], y[:100]
sample_weight = np.ones_like(y) * 2
# Interlace the data such that a 2-fold cross-validation will be equivalent
# to using the original dataset with a sample weights of 2
X_twice = np.zeros((X.shape[0] * 2, X.shape[1]), dtype=X.dtype)
X_twice[::2, :] = X
X_twice[1::2, :] = X
y_twice = np.zeros(y.shape[0] * 2, dtype=y.dtype)
y_twice[::2] = y
y_twice[1::2] = y
estimator = LogisticRegression()
calibrated_clf_without_weights = CalibratedClassifierCV(
estimator,
method=method,
ensemble=ensemble,
cv=2,
)
calibrated_clf_with_weights = clone(calibrated_clf_without_weights)
calibrated_clf_with_weights.fit(X, y, sample_weight=sample_weight)
calibrated_clf_without_weights.fit(X_twice, y_twice)
# Check that the underlying fitted estimators have the same coefficients
for est_with_weights, est_without_weights in zip(
calibrated_clf_with_weights.calibrated_classifiers_,
calibrated_clf_without_weights.calibrated_classifiers_,
):
assert_allclose(
est_with_weights.estimator.coef_,
est_without_weights.estimator.coef_,
)
# Check that the predictions are the same
y_pred_with_weights = calibrated_clf_with_weights.predict_proba(X)
y_pred_without_weights = calibrated_clf_without_weights.predict_proba(X)
assert_allclose(y_pred_with_weights, y_pred_without_weights)
|
Check that passing repeating twice the dataset `X` is equivalent to
passing a `sample_weight` with a factor 2.
|
test_calibrated_classifier_cv_double_sample_weights_equivalence
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_calibration.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_calibration.py
|
BSD-3-Clause
|
def test_calibration_with_fit_params(fit_params_type, data):
"""Tests that fit_params are passed to the underlying base estimator.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/12384
"""
X, y = data
fit_params = {
"a": _convert_container(y, fit_params_type),
"b": _convert_container(y, fit_params_type),
}
clf = CheckingClassifier(expected_fit_params=["a", "b"])
pc_clf = CalibratedClassifierCV(clf)
pc_clf.fit(X, y, **fit_params)
|
Tests that fit_params are passed to the underlying base estimator.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/12384
|
test_calibration_with_fit_params
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_calibration.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_calibration.py
|
BSD-3-Clause
|
def test_calibration_with_sample_weight_estimator(sample_weight, data):
"""Tests that sample_weight is passed to the underlying base
estimator.
"""
X, y = data
clf = CheckingClassifier(expected_sample_weight=True)
pc_clf = CalibratedClassifierCV(clf)
pc_clf.fit(X, y, sample_weight=sample_weight)
|
Tests that sample_weight is passed to the underlying base
estimator.
|
test_calibration_with_sample_weight_estimator
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_calibration.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_calibration.py
|
BSD-3-Clause
|
def test_calibration_without_sample_weight_estimator(data):
"""Check that even if the estimator doesn't support
sample_weight, fitting with sample_weight still works.
There should be a warning, since the sample_weight is not passed
on to the estimator.
"""
X, y = data
sample_weight = np.ones_like(y)
class ClfWithoutSampleWeight(CheckingClassifier):
def fit(self, X, y, **fit_params):
assert "sample_weight" not in fit_params
return super().fit(X, y, **fit_params)
clf = ClfWithoutSampleWeight()
pc_clf = CalibratedClassifierCV(clf)
with pytest.warns(UserWarning):
pc_clf.fit(X, y, sample_weight=sample_weight)
|
Check that even if the estimator doesn't support
sample_weight, fitting with sample_weight still works.
There should be a warning, since the sample_weight is not passed
on to the estimator.
|
test_calibration_without_sample_weight_estimator
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_calibration.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_calibration.py
|
BSD-3-Clause
|
def test_calibration_with_non_sample_aligned_fit_param(data):
"""Check that CalibratedClassifierCV does not enforce sample alignment
for fit parameters."""
class TestClassifier(LogisticRegression):
def fit(self, X, y, sample_weight=None, fit_param=None):
assert fit_param is not None
return super().fit(X, y, sample_weight=sample_weight)
CalibratedClassifierCV(estimator=TestClassifier()).fit(
*data, fit_param=np.ones(len(data[1]) + 1)
)
|
Check that CalibratedClassifierCV does not enforce sample alignment
for fit parameters.
|
test_calibration_with_non_sample_aligned_fit_param
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_calibration.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_calibration.py
|
BSD-3-Clause
|
def test_calibrated_classifier_cv_works_with_large_confidence_scores(
global_random_seed,
):
"""Test that :class:`CalibratedClassifierCV` works with large confidence
scores when using the `sigmoid` method, particularly with the
:class:`SGDClassifier`.
Non-regression test for issue #26766.
"""
prob = 0.67
n = 1000
random_noise = np.random.default_rng(global_random_seed).normal(size=n)
y = np.array([1] * int(n * prob) + [0] * (n - int(n * prob)))
X = 1e5 * y.reshape((-1, 1)) + random_noise
# Check that the decision function of SGDClassifier produces predicted
# values that are quite large, for the data under consideration.
cv = check_cv(cv=None, y=y, classifier=True)
indices = cv.split(X, y)
for train, test in indices:
X_train, y_train = X[train], y[train]
X_test = X[test]
sgd_clf = SGDClassifier(loss="squared_hinge", random_state=global_random_seed)
sgd_clf.fit(X_train, y_train)
predictions = sgd_clf.decision_function(X_test)
assert (predictions > 1e4).any()
# Compare the CalibratedClassifierCV using the sigmoid method with the
# CalibratedClassifierCV using the isotonic method. The isotonic method
# is used for comparison because it is numerically stable.
clf_sigmoid = CalibratedClassifierCV(
SGDClassifier(loss="squared_hinge", random_state=global_random_seed),
method="sigmoid",
)
score_sigmoid = cross_val_score(clf_sigmoid, X, y, scoring="roc_auc")
# The isotonic method is used for comparison because it is numerically
# stable.
clf_isotonic = CalibratedClassifierCV(
SGDClassifier(loss="squared_hinge", random_state=global_random_seed),
method="isotonic",
)
score_isotonic = cross_val_score(clf_isotonic, X, y, scoring="roc_auc")
# The AUC score should be the same because it is invariant under
# strictly monotonic conditions
assert_allclose(score_sigmoid, score_isotonic)
|
Test that :class:`CalibratedClassifierCV` works with large confidence
scores when using the `sigmoid` method, particularly with the
:class:`SGDClassifier`.
Non-regression test for issue #26766.
|
test_calibrated_classifier_cv_works_with_large_confidence_scores
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_calibration.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_calibration.py
|
BSD-3-Clause
|
def test_float32_predict_proba(data, use_sample_weight, method):
"""Check that CalibratedClassifierCV works with float32 predict proba.
Non-regression test for gh-28245 and gh-28247.
"""
if use_sample_weight:
# Use dtype=np.float64 to check that this does not trigger an
# unintentional upcasting: the dtype of the base estimator should
# control the dtype of the final model. In particular, the
# sigmoid calibrator relies on inputs (predictions and sample weights)
# with consistent dtypes because it is partially written in Cython.
# As this test forces the predictions to be `float32`, we want to check
# that `CalibratedClassifierCV` internally converts `sample_weight` to
# the same dtype to avoid crashing the Cython call.
sample_weight = np.ones_like(data[1], dtype=np.float64)
else:
sample_weight = None
class DummyClassifer32(DummyClassifier):
def predict_proba(self, X):
return super().predict_proba(X).astype(np.float32)
model = DummyClassifer32()
calibrator = CalibratedClassifierCV(model, method=method)
# Does not raise an error.
calibrator.fit(*data, sample_weight=sample_weight)
# Check with frozen prefit model
model = DummyClassifer32().fit(*data, sample_weight=sample_weight)
calibrator = CalibratedClassifierCV(FrozenEstimator(model), method=method)
# Does not raise an error.
calibrator.fit(*data, sample_weight=sample_weight)
# TODO(1.8): remove me once the deprecation period is over.
# Check with prefit model using the deprecated cv="prefit" argument:
model = DummyClassifer32().fit(*data, sample_weight=sample_weight)
calibrator = CalibratedClassifierCV(model, method=method, cv="prefit")
# Does not raise an error.
with pytest.warns(FutureWarning):
calibrator.fit(*data, sample_weight=sample_weight)
|
Check that CalibratedClassifierCV works with float32 predict proba.
Non-regression test for gh-28245 and gh-28247.
|
test_float32_predict_proba
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_calibration.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_calibration.py
|
BSD-3-Clause
|
def test_error_less_class_samples_than_folds():
"""Check that CalibratedClassifierCV works with string targets.
non-regression test for issue #28841.
"""
X = np.random.normal(size=(20, 3))
y = ["a"] * 10 + ["b"] * 10
CalibratedClassifierCV(cv=3).fit(X, y)
|
Check that CalibratedClassifierCV works with string targets.
non-regression test for issue #28841.
|
test_error_less_class_samples_than_folds
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_calibration.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_calibration.py
|
BSD-3-Clause
|
def test_check_estimator_generate_only_deprecation():
"""Check that check_estimator with generate_only=True raises a deprecation
warning."""
with pytest.warns(FutureWarning, match="`generate_only` is deprecated in 1.6"):
all_instance_gen_checks = check_estimator(
LogisticRegression(), generate_only=True
)
assert isgenerator(all_instance_gen_checks)
|
Check that check_estimator with generate_only=True raises a deprecation
warning.
|
test_check_estimator_generate_only_deprecation
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_common.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_common.py
|
BSD-3-Clause
|
def set_assume_finite(assume_finite, sleep_duration):
"""Return the value of assume_finite after waiting `sleep_duration`."""
with config_context(assume_finite=assume_finite):
time.sleep(sleep_duration)
return get_config()["assume_finite"]
|
Return the value of assume_finite after waiting `sleep_duration`.
|
set_assume_finite
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_config.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_config.py
|
BSD-3-Clause
|
def test_config_threadsafe_joblib(backend):
"""Test that the global config is threadsafe with all joblib backends.
Two jobs are spawned and sets assume_finite to two different values.
When the job with a duration 0.1s completes, the assume_finite value
should be the same as the value passed to the function. In other words,
it is not influenced by the other job setting assume_finite to True.
"""
assume_finites = [False, True, False, True]
sleep_durations = [0.1, 0.2, 0.1, 0.2]
items = Parallel(backend=backend, n_jobs=2)(
delayed(set_assume_finite)(assume_finite, sleep_dur)
for assume_finite, sleep_dur in zip(assume_finites, sleep_durations)
)
assert items == [False, True, False, True]
|
Test that the global config is threadsafe with all joblib backends.
Two jobs are spawned and sets assume_finite to two different values.
When the job with a duration 0.1s completes, the assume_finite value
should be the same as the value passed to the function. In other words,
it is not influenced by the other job setting assume_finite to True.
|
test_config_threadsafe_joblib
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_config.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_config.py
|
BSD-3-Clause
|
def test_config_threadsafe():
"""Uses threads directly to test that the global config does not change
between threads. Same test as `test_config_threadsafe_joblib` but with
`ThreadPoolExecutor`."""
assume_finites = [False, True, False, True]
sleep_durations = [0.1, 0.2, 0.1, 0.2]
with ThreadPoolExecutor(max_workers=2) as e:
items = [
output
for output in e.map(set_assume_finite, assume_finites, sleep_durations)
]
assert items == [False, True, False, True]
|
Uses threads directly to test that the global config does not change
between threads. Same test as `test_config_threadsafe_joblib` but with
`ThreadPoolExecutor`.
|
test_config_threadsafe
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_config.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_config.py
|
BSD-3-Clause
|
def test_config_array_api_dispatch_error_scipy(monkeypatch):
"""Check error when SciPy is too old"""
monkeypatch.setattr(sklearn.utils._array_api.scipy, "__version__", "1.13.0")
with pytest.raises(ImportError, match="SciPy must be 1.14.0 or newer"):
with config_context(array_api_dispatch=True):
pass
with pytest.raises(ImportError, match="SciPy must be 1.14.0 or newer"):
set_config(array_api_dispatch=True)
|
Check error when SciPy is too old
|
test_config_array_api_dispatch_error_scipy
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_config.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_config.py
|
BSD-3-Clause
|
def generate_dataset(n_samples, centers, covariances, random_state=None):
"""Generate a multivariate normal data given some centers and
covariances"""
rng = check_random_state(random_state)
X = np.vstack(
[
rng.multivariate_normal(mean, cov, size=n_samples // len(centers))
for mean, cov in zip(centers, covariances)
]
)
y = np.hstack(
[[clazz] * (n_samples // len(centers)) for clazz in range(len(centers))]
)
return X, y
|
Generate a multivariate normal data given some centers and
covariances
|
generate_dataset
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_discriminant_analysis.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_discriminant_analysis.py
|
BSD-3-Clause
|
def test_qda_prior_type(priors_type):
"""Check that priors accept array-like."""
priors = [0.5, 0.5]
clf = QuadraticDiscriminantAnalysis(
priors=_convert_container([0.5, 0.5], priors_type)
).fit(X6, y6)
assert isinstance(clf.priors_, np.ndarray)
assert_array_equal(clf.priors_, priors)
|
Check that priors accept array-like.
|
test_qda_prior_type
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_discriminant_analysis.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_discriminant_analysis.py
|
BSD-3-Clause
|
def test_qda_prior_copy():
"""Check that altering `priors` without `fit` doesn't change `priors_`"""
priors = np.array([0.5, 0.5])
qda = QuadraticDiscriminantAnalysis(priors=priors).fit(X, y)
# we expect the following
assert_array_equal(qda.priors_, qda.priors)
# altering `priors` without `fit` should not change `priors_`
priors[0] = 0.2
assert qda.priors_[0] != qda.priors[0]
|
Check that altering `priors` without `fit` doesn't change `priors_`
|
test_qda_prior_copy
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_discriminant_analysis.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_discriminant_analysis.py
|
BSD-3-Clause
|
def test_raises_value_error_on_same_number_of_classes_and_samples(solver):
"""
Tests that if the number of samples equals the number
of classes, a ValueError is raised.
"""
X = np.array([[0.5, 0.6], [0.6, 0.5]])
y = np.array(["a", "b"])
clf = LinearDiscriminantAnalysis(solver=solver)
with pytest.raises(ValueError, match="The number of samples must be more"):
clf.fit(X, y)
|
Tests that if the number of samples equals the number
of classes, a ValueError is raised.
|
test_raises_value_error_on_same_number_of_classes_and_samples
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_discriminant_analysis.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_discriminant_analysis.py
|
BSD-3-Clause
|
def test_get_feature_names_out():
"""Check get_feature_names_out uses class name as prefix."""
est = LinearDiscriminantAnalysis().fit(X, y)
names_out = est.get_feature_names_out()
class_name_lower = "LinearDiscriminantAnalysis".lower()
expected_names_out = np.array(
[
f"{class_name_lower}{i}"
for i in range(est.explained_variance_ratio_.shape[0])
],
dtype=object,
)
assert_array_equal(names_out, expected_names_out)
|
Check get_feature_names_out uses class name as prefix.
|
test_get_feature_names_out
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_discriminant_analysis.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_discriminant_analysis.py
|
BSD-3-Clause
|
def filter_errors(errors, method, Klass=None):
"""
Ignore some errors based on the method type.
These rules are specific for scikit-learn."""
for code, message in errors:
# We ignore following error code,
# - RT02: The first line of the Returns section
# should contain only the type, ..
# (as we may need refer to the name of the returned
# object)
# - GL01: Docstring text (summary) should start in the line
# immediately after the opening quotes (not in the same line,
# or leaving a blank line in between)
# - GL02: If there's a blank line, it should be before the
# first line of the Returns section, not after (it allows to have
# short docstrings for properties).
if code in ["RT02", "GL01", "GL02"]:
continue
# Ignore PR02: Unknown parameters for properties. We sometimes use
# properties for ducktyping, i.e. SGDClassifier.predict_proba
# Ignore GL08: Parsing of the method signature failed, possibly because this is
# a property. Properties are sometimes used for deprecated attributes and the
# attribute is already documented in the class docstring.
#
# All error codes:
# https://numpydoc.readthedocs.io/en/latest/validation.html#built-in-validation-checks
if code in ("PR02", "GL08") and Klass is not None and method is not None:
method_obj = getattr(Klass, method)
if isinstance(method_obj, property):
continue
# Following codes are only taken into account for the
# top level class docstrings:
# - ES01: No extended summary found
# - SA01: See Also section not found
# - EX01: No examples section found
if method is not None and code in ["EX01", "SA01", "ES01"]:
continue
yield code, message
|
Ignore some errors based on the method type.
These rules are specific for scikit-learn.
|
filter_errors
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_docstrings.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_docstrings.py
|
BSD-3-Clause
|
def repr_errors(res, Klass=None, method: Optional[str] = None) -> str:
"""Pretty print original docstring and the obtained errors
Parameters
----------
res : dict
result of numpydoc.validate.validate
Klass : {Estimator, Display, None}
estimator object or None
method : str
if estimator is not None, either the method name or None.
Returns
-------
str
String representation of the error.
"""
if method is None:
if hasattr(Klass, "__init__"):
method = "__init__"
elif Klass is None:
raise ValueError("At least one of Klass, method should be provided")
else:
raise NotImplementedError
if Klass is not None:
obj = getattr(Klass, method)
try:
obj_signature = str(signature(obj))
except TypeError:
# In particular we can't parse the signature of properties
obj_signature = (
"\nParsing of the method signature failed, "
"possibly because this is a property."
)
obj_name = Klass.__name__ + "." + method
else:
obj_signature = ""
obj_name = method
msg = "\n\n" + "\n\n".join(
[
str(res["file"]),
obj_name + obj_signature,
res["docstring"],
"# Errors",
"\n".join(
" - {}: {}".format(code, message) for code, message in res["errors"]
),
]
)
return msg
|
Pretty print original docstring and the obtained errors
Parameters
----------
res : dict
result of numpydoc.validate.validate
Klass : {Estimator, Display, None}
estimator object or None
method : str
if estimator is not None, either the method name or None.
Returns
-------
str
String representation of the error.
|
repr_errors
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_docstrings.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_docstrings.py
|
BSD-3-Clause
|
def _get_all_fitted_attributes(estimator):
"Get all the fitted attributes of an estimator including properties"
# attributes
fit_attr = list(estimator.__dict__.keys())
# properties
with warnings.catch_warnings():
warnings.filterwarnings("error", category=FutureWarning)
for name in dir(estimator.__class__):
obj = getattr(estimator.__class__, name)
if not isinstance(obj, property):
continue
# ignore properties that raises an AttributeError and deprecated
# properties
try:
getattr(estimator, name)
except (AttributeError, FutureWarning):
continue
fit_attr.append(name)
return [k for k in fit_attr if k.endswith("_") and not k.startswith("_")]
|
Get all the fitted attributes of an estimator including properties
|
_get_all_fitted_attributes
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_docstring_parameters.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_docstring_parameters.py
|
BSD-3-Clause
|
def test_isotonic_regression_ties_secondary_():
"""
Test isotonic regression fit, transform and fit_transform
against the "secondary" ties method and "pituitary" data from R
"isotone" package, as detailed in: J. d. Leeuw, K. Hornik, P. Mair,
Isotone Optimization in R: Pool-Adjacent-Violators Algorithm
(PAVA) and Active Set Methods
Set values based on pituitary example and
the following R command detailed in the paper above:
> library("isotone")
> data("pituitary")
> res1 <- gpava(pituitary$age, pituitary$size, ties="secondary")
> res1$x
`isotone` version: 1.0-2, 2014-09-07
R version: R version 3.1.1 (2014-07-10)
"""
x = [8, 8, 8, 10, 10, 10, 12, 12, 12, 14, 14]
y = [21, 23.5, 23, 24, 21, 25, 21.5, 22, 19, 23.5, 25]
y_true = [
22.22222,
22.22222,
22.22222,
22.22222,
22.22222,
22.22222,
22.22222,
22.22222,
22.22222,
24.25,
24.25,
]
# Check fit, transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_almost_equal(ir.transform(x), y_true, 4)
assert_array_almost_equal(ir.fit_transform(x, y), y_true, 4)
|
Test isotonic regression fit, transform and fit_transform
against the "secondary" ties method and "pituitary" data from R
"isotone" package, as detailed in: J. d. Leeuw, K. Hornik, P. Mair,
Isotone Optimization in R: Pool-Adjacent-Violators Algorithm
(PAVA) and Active Set Methods
Set values based on pituitary example and
the following R command detailed in the paper above:
> library("isotone")
> data("pituitary")
> res1 <- gpava(pituitary$age, pituitary$size, ties="secondary")
> res1$x
`isotone` version: 1.0-2, 2014-09-07
R version: R version 3.1.1 (2014-07-10)
|
test_isotonic_regression_ties_secondary_
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_isotonic.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_isotonic.py
|
BSD-3-Clause
|
def test_isotonic_regression_with_ties_in_differently_sized_groups():
"""
Non-regression test to handle issue 9432:
https://github.com/scikit-learn/scikit-learn/issues/9432
Compare against output in R:
> library("isotone")
> x <- c(0, 1, 1, 2, 3, 4)
> y <- c(0, 0, 1, 0, 0, 1)
> res1 <- gpava(x, y, ties="secondary")
> res1$x
`isotone` version: 1.1-0, 2015-07-24
R version: R version 3.3.2 (2016-10-31)
"""
x = np.array([0, 1, 1, 2, 3, 4])
y = np.array([0, 0, 1, 0, 0, 1])
y_true = np.array([0.0, 0.25, 0.25, 0.25, 0.25, 1.0])
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_almost_equal(ir.transform(x), y_true)
assert_array_almost_equal(ir.fit_transform(x, y), y_true)
|
Non-regression test to handle issue 9432:
https://github.com/scikit-learn/scikit-learn/issues/9432
Compare against output in R:
> library("isotone")
> x <- c(0, 1, 1, 2, 3, 4)
> y <- c(0, 0, 1, 0, 0, 1)
> res1 <- gpava(x, y, ties="secondary")
> res1$x
`isotone` version: 1.1-0, 2015-07-24
R version: R version 3.3.2 (2016-10-31)
|
test_isotonic_regression_with_ties_in_differently_sized_groups
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_isotonic.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_isotonic.py
|
BSD-3-Clause
|
def test_isotonic_regression_sample_weight_not_overwritten():
"""Check that calling fitting function of isotonic regression will not
overwrite `sample_weight`.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/20508
"""
X, y = make_regression(n_samples=10, n_features=1, random_state=41)
sample_weight_original = np.ones_like(y)
sample_weight_original[0] = 10
sample_weight_fit = sample_weight_original.copy()
isotonic_regression(y, sample_weight=sample_weight_fit)
assert_allclose(sample_weight_fit, sample_weight_original)
IsotonicRegression().fit(X, y, sample_weight=sample_weight_fit)
assert_allclose(sample_weight_fit, sample_weight_original)
|
Check that calling fitting function of isotonic regression will not
overwrite `sample_weight`.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/20508
|
test_isotonic_regression_sample_weight_not_overwritten
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_isotonic.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_isotonic.py
|
BSD-3-Clause
|
def test_isotonic_regression_output_predict():
"""Check that `predict` does return the expected output type.
We need to check that `transform` will output a DataFrame and a NumPy array
when we set `transform_output` to `pandas`.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/25499
"""
pd = pytest.importorskip("pandas")
X, y = make_regression(n_samples=10, n_features=1, random_state=42)
regressor = IsotonicRegression()
with sklearn.config_context(transform_output="pandas"):
regressor.fit(X, y)
X_trans = regressor.transform(X)
y_pred = regressor.predict(X)
assert isinstance(X_trans, pd.DataFrame)
assert isinstance(y_pred, np.ndarray)
|
Check that `predict` does return the expected output type.
We need to check that `transform` will output a DataFrame and a NumPy array
when we set `transform_output` to `pandas`.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/25499
|
test_isotonic_regression_output_predict
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_isotonic.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_isotonic.py
|
BSD-3-Clause
|
def test_polynomial_count_sketch_dense_sparse(gamma, degree, coef0, csr_container):
"""Check that PolynomialCountSketch results are the same for dense and sparse
input.
"""
ps_dense = PolynomialCountSketch(
n_components=500, gamma=gamma, degree=degree, coef0=coef0, random_state=42
)
Xt_dense = ps_dense.fit_transform(X)
Yt_dense = ps_dense.transform(Y)
ps_sparse = PolynomialCountSketch(
n_components=500, gamma=gamma, degree=degree, coef0=coef0, random_state=42
)
Xt_sparse = ps_sparse.fit_transform(csr_container(X))
Yt_sparse = ps_sparse.transform(csr_container(Y))
assert_allclose(Xt_dense, Xt_sparse)
assert_allclose(Yt_dense, Yt_sparse)
|
Check that PolynomialCountSketch results are the same for dense and sparse
input.
|
test_polynomial_count_sketch_dense_sparse
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_kernel_approximation.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_kernel_approximation.py
|
BSD-3-Clause
|
def test_additive_chi2_sampler_sample_steps(method, sample_steps):
"""Check that the input sample step doesn't raise an error
and that sample interval doesn't change after fit.
"""
transformer = AdditiveChi2Sampler(sample_steps=sample_steps)
getattr(transformer, method)(X)
sample_interval = 0.5
transformer = AdditiveChi2Sampler(
sample_steps=sample_steps,
sample_interval=sample_interval,
)
getattr(transformer, method)(X)
assert transformer.sample_interval == sample_interval
|
Check that the input sample step doesn't raise an error
and that sample interval doesn't change after fit.
|
test_additive_chi2_sampler_sample_steps
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_kernel_approximation.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_kernel_approximation.py
|
BSD-3-Clause
|
def test_additive_chi2_sampler_wrong_sample_steps(method):
"""Check that we raise a ValueError on invalid sample_steps"""
transformer = AdditiveChi2Sampler(sample_steps=4)
msg = re.escape(
"If sample_steps is not in [1, 2, 3], you need to provide sample_interval"
)
with pytest.raises(ValueError, match=msg):
getattr(transformer, method)(X)
|
Check that we raise a ValueError on invalid sample_steps
|
test_additive_chi2_sampler_wrong_sample_steps
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_kernel_approximation.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_kernel_approximation.py
|
BSD-3-Clause
|
def test_rbf_sampler_fitted_attributes_dtype(global_dtype):
"""Check that the fitted attributes are stored accordingly to the
data type of X."""
rbf = RBFSampler()
X = np.array([[1, 2], [3, 4], [5, 6]], dtype=global_dtype)
rbf.fit(X)
assert rbf.random_offset_.dtype == global_dtype
assert rbf.random_weights_.dtype == global_dtype
|
Check that the fitted attributes are stored accordingly to the
data type of X.
|
test_rbf_sampler_fitted_attributes_dtype
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_kernel_approximation.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_kernel_approximation.py
|
BSD-3-Clause
|
def test_rbf_sampler_dtype_equivalence():
"""Check the equivalence of the results with 32 and 64 bits input."""
rbf32 = RBFSampler(random_state=42)
X32 = np.array([[1, 2], [3, 4], [5, 6]], dtype=np.float32)
rbf32.fit(X32)
rbf64 = RBFSampler(random_state=42)
X64 = np.array([[1, 2], [3, 4], [5, 6]], dtype=np.float64)
rbf64.fit(X64)
assert_allclose(rbf32.random_offset_, rbf64.random_offset_)
assert_allclose(rbf32.random_weights_, rbf64.random_weights_)
|
Check the equivalence of the results with 32 and 64 bits input.
|
test_rbf_sampler_dtype_equivalence
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_kernel_approximation.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_kernel_approximation.py
|
BSD-3-Clause
|
def test_rbf_sampler_gamma_scale():
"""Check the inner value computed when `gamma='scale'`."""
X, y = [[0.0], [1.0]], [0, 1]
rbf = RBFSampler(gamma="scale")
rbf.fit(X, y)
assert rbf._gamma == pytest.approx(4)
|
Check the inner value computed when `gamma='scale'`.
|
test_rbf_sampler_gamma_scale
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_kernel_approximation.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_kernel_approximation.py
|
BSD-3-Clause
|
def test_skewed_chi2_sampler_fitted_attributes_dtype(global_dtype):
"""Check that the fitted attributes are stored accordingly to the
data type of X."""
skewed_chi2_sampler = SkewedChi2Sampler()
X = np.array([[1, 2], [3, 4], [5, 6]], dtype=global_dtype)
skewed_chi2_sampler.fit(X)
assert skewed_chi2_sampler.random_offset_.dtype == global_dtype
assert skewed_chi2_sampler.random_weights_.dtype == global_dtype
|
Check that the fitted attributes are stored accordingly to the
data type of X.
|
test_skewed_chi2_sampler_fitted_attributes_dtype
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_kernel_approximation.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_kernel_approximation.py
|
BSD-3-Clause
|
def test_skewed_chi2_sampler_dtype_equivalence():
"""Check the equivalence of the results with 32 and 64 bits input."""
skewed_chi2_sampler_32 = SkewedChi2Sampler(random_state=42)
X_32 = np.array([[1, 2], [3, 4], [5, 6]], dtype=np.float32)
skewed_chi2_sampler_32.fit(X_32)
skewed_chi2_sampler_64 = SkewedChi2Sampler(random_state=42)
X_64 = np.array([[1, 2], [3, 4], [5, 6]], dtype=np.float64)
skewed_chi2_sampler_64.fit(X_64)
assert_allclose(
skewed_chi2_sampler_32.random_offset_, skewed_chi2_sampler_64.random_offset_
)
assert_allclose(
skewed_chi2_sampler_32.random_weights_, skewed_chi2_sampler_64.random_weights_
)
|
Check the equivalence of the results with 32 and 64 bits input.
|
test_skewed_chi2_sampler_dtype_equivalence
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_kernel_approximation.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_kernel_approximation.py
|
BSD-3-Clause
|
def logging_histogram_kernel(x, y, log):
"""Histogram kernel that writes to a log."""
log.append(1)
return np.minimum(x, y).sum()
|
Histogram kernel that writes to a log.
|
logging_histogram_kernel
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_kernel_approximation.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_kernel_approximation.py
|
BSD-3-Clause
|
def test_nystroem_component_indices():
"""Check that `component_indices_` corresponds to the subset of
training points used to construct the feature map.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/20474
"""
X, _ = make_classification(n_samples=100, n_features=20)
feature_map_nystroem = Nystroem(
n_components=10,
random_state=0,
)
feature_map_nystroem.fit(X)
assert feature_map_nystroem.component_indices_.shape == (10,)
|
Check that `component_indices_` corresponds to the subset of
training points used to construct the feature map.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/20474
|
test_nystroem_component_indices
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_kernel_approximation.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_kernel_approximation.py
|
BSD-3-Clause
|
def test_estimator_puts_self_in_registry(estimator):
"""Check that an estimator puts itself in the registry upon fit."""
estimator.fit(X, y)
assert estimator in estimator.registry
|
Check that an estimator puts itself in the registry upon fit.
|
test_estimator_puts_self_in_registry
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_metadata_routing.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_metadata_routing.py
|
BSD-3-Clause
|
def test_default_request_override():
"""Test that default requests are correctly overridden regardless of the ASCII order
of the class names, hence testing small and capital letter class name starts.
Non-regression test for https://github.com/scikit-learn/scikit-learn/issues/28430
"""
class Base(BaseEstimator):
__metadata_request__split = {"groups": True}
class class_1(Base):
__metadata_request__split = {"groups": "sample_domain"}
class Class_1(Base):
__metadata_request__split = {"groups": "sample_domain"}
assert_request_equal(
class_1()._get_metadata_request(), {"split": {"groups": "sample_domain"}}
)
assert_request_equal(
Class_1()._get_metadata_request(), {"split": {"groups": "sample_domain"}}
)
|
Test that default requests are correctly overridden regardless of the ASCII order
of the class names, hence testing small and capital letter class name starts.
Non-regression test for https://github.com/scikit-learn/scikit-learn/issues/28430
|
test_default_request_override
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_metadata_routing.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_metadata_routing.py
|
BSD-3-Clause
|
def test_removing_non_existing_param_raises():
"""Test that removing a metadata using UNUSED which doesn't exist raises."""
class InvalidRequestRemoval(BaseEstimator):
# `fit` (in this class or a parent) requests `prop`, but we don't want
# it requested at all.
__metadata_request__fit = {"prop": metadata_routing.UNUSED}
def fit(self, X, y, **kwargs):
return self
with pytest.raises(ValueError, match="Trying to remove parameter"):
InvalidRequestRemoval().get_metadata_routing()
|
Test that removing a metadata using UNUSED which doesn't exist raises.
|
test_removing_non_existing_param_raises
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_metadata_routing.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_metadata_routing.py
|
BSD-3-Clause
|
def test_metadata_request_consumes_method():
"""Test that MetadataRequest().consumes() method works as expected."""
request = MetadataRouter(owner="test")
assert request.consumes(method="fit", params={"foo"}) == set()
request = MetadataRequest(owner="test")
request.fit.add_request(param="foo", alias=True)
assert request.consumes(method="fit", params={"foo"}) == {"foo"}
request = MetadataRequest(owner="test")
request.fit.add_request(param="foo", alias="bar")
assert request.consumes(method="fit", params={"bar", "foo"}) == {"bar"}
|
Test that MetadataRequest().consumes() method works as expected.
|
test_metadata_request_consumes_method
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_metadata_routing.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_metadata_routing.py
|
BSD-3-Clause
|
def test_metadata_router_consumes_method():
"""Test that MetadataRouter().consumes method works as expected."""
# having it here instead of parametrizing the test since `set_fit_request`
# is not available while collecting the tests.
cases = [
(
WeightedMetaRegressor(
estimator=ConsumingRegressor().set_fit_request(sample_weight=True)
),
{"sample_weight"},
{"sample_weight"},
),
(
WeightedMetaRegressor(
estimator=ConsumingRegressor().set_fit_request(
sample_weight="my_weights"
)
),
{"my_weights", "sample_weight"},
{"my_weights"},
),
]
for obj, input, output in cases:
assert obj.get_metadata_routing().consumes(method="fit", params=input) == output
|
Test that MetadataRouter().consumes method works as expected.
|
test_metadata_router_consumes_method
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_metadata_routing.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_metadata_routing.py
|
BSD-3-Clause
|
def test_no_feature_flag_raises_error():
"""Test that when feature flag disabled, set_{method}_requests raises."""
with config_context(enable_metadata_routing=False):
with pytest.raises(RuntimeError, match="This method is only available"):
ConsumingClassifier().set_fit_request(sample_weight=True)
|
Test that when feature flag disabled, set_{method}_requests raises.
|
test_no_feature_flag_raises_error
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_metadata_routing.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_metadata_routing.py
|
BSD-3-Clause
|
def test_no_metadata_always_works():
"""Test that when no metadata is passed, having a meta-estimator which does
not yet support metadata routing works.
Non-regression test for https://github.com/scikit-learn/scikit-learn/issues/28246
"""
class Estimator(_RoutingNotSupportedMixin, BaseEstimator):
def fit(self, X, y, metadata=None):
return self
# This passes since no metadata is passed.
MetaRegressor(estimator=Estimator()).fit(X, y)
# This fails since metadata is passed but Estimator() does not support it.
with pytest.raises(
NotImplementedError, match="Estimator has not implemented metadata routing yet."
):
MetaRegressor(estimator=Estimator()).fit(X, y, metadata=my_groups)
|
Test that when no metadata is passed, having a meta-estimator which does
not yet support metadata routing works.
Non-regression test for https://github.com/scikit-learn/scikit-learn/issues/28246
|
test_no_metadata_always_works
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_metadata_routing.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_metadata_routing.py
|
BSD-3-Clause
|
def test_unsetmetadatapassederror_correct():
"""Test that UnsetMetadataPassedError raises the correct error message when
set_{method}_request is not set in nested cases."""
weighted_meta = WeightedMetaClassifier(estimator=ConsumingClassifier())
pipe = SimplePipeline([weighted_meta])
msg = re.escape(
"[metadata] are passed but are not explicitly set as requested or not requested"
" for ConsumingClassifier.fit, which is used within WeightedMetaClassifier.fit."
" Call `ConsumingClassifier.set_fit_request({metadata}=True/False)` for each"
" metadata you want to request/ignore."
)
with pytest.raises(UnsetMetadataPassedError, match=msg):
pipe.fit(X, y, metadata="blah")
|
Test that UnsetMetadataPassedError raises the correct error message when
set_{method}_request is not set in nested cases.
|
test_unsetmetadatapassederror_correct
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_metadata_routing.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_metadata_routing.py
|
BSD-3-Clause
|
def test_unsetmetadatapassederror_correct_for_composite_methods():
"""Test that UnsetMetadataPassedError raises the correct error message when
composite metadata request methods are not set in nested cases."""
consuming_transformer = ConsumingTransformer()
pipe = Pipeline([("consuming_transformer", consuming_transformer)])
msg = re.escape(
"[metadata] are passed but are not explicitly set as requested or not requested"
" for ConsumingTransformer.fit_transform, which is used within"
" Pipeline.fit_transform. Call"
" `ConsumingTransformer.set_fit_request({metadata}=True/False)"
".set_transform_request({metadata}=True/False)`"
" for each metadata you want to request/ignore."
)
with pytest.raises(UnsetMetadataPassedError, match=msg):
pipe.fit_transform(X, y, metadata="blah")
|
Test that UnsetMetadataPassedError raises the correct error message when
composite metadata request methods are not set in nested cases.
|
test_unsetmetadatapassederror_correct_for_composite_methods
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_metadata_routing.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_metadata_routing.py
|
BSD-3-Clause
|
def test_unbound_set_methods_work():
"""Tests that if the set_{method}_request is unbound, it still works.
Also test that passing positional arguments to the set_{method}_request fails
with the right TypeError message.
Non-regression test for https://github.com/scikit-learn/scikit-learn/issues/28632
"""
class A(BaseEstimator):
def fit(self, X, y, sample_weight=None):
return self
error_message = re.escape(
"set_fit_request() takes 0 positional argument but 1 were given"
)
# Test positional arguments error before making the descriptor method unbound.
with pytest.raises(TypeError, match=error_message):
A().set_fit_request(True)
# This somehow makes the descriptor method unbound, which results in the `instance`
# argument being None, and instead `self` being passed as a positional argument
# to the descriptor method.
A.set_fit_request = A.set_fit_request
# This should pass as usual
A().set_fit_request(sample_weight=True)
# Test positional arguments error after making the descriptor method unbound.
with pytest.raises(TypeError, match=error_message):
A().set_fit_request(True)
|
Tests that if the set_{method}_request is unbound, it still works.
Also test that passing positional arguments to the set_{method}_request fails
with the right TypeError message.
Non-regression test for https://github.com/scikit-learn/scikit-learn/issues/28632
|
test_unbound_set_methods_work
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_metadata_routing.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_metadata_routing.py
|
BSD-3-Clause
|
def _get_instance_with_pipeline(meta_estimator, init_params):
"""Given a single meta-estimator instance, generate an instance with a pipeline"""
if {"estimator", "base_estimator", "regressor"} & init_params:
if is_regressor(meta_estimator):
estimator = make_pipeline(TfidfVectorizer(), Ridge())
param_grid = {"ridge__alpha": [0.1, 1.0]}
else:
estimator = make_pipeline(TfidfVectorizer(), LogisticRegression())
param_grid = {"logisticregression__C": [0.1, 1.0]}
if init_params.intersection(
{"param_grid", "param_distributions"}
): # SearchCV estimators
extra_params = {"n_iter": 2} if "n_iter" in init_params else {}
return type(meta_estimator)(estimator, param_grid, **extra_params)
else:
return type(meta_estimator)(estimator)
if "transformer_list" in init_params:
# FeatureUnion
transformer_list = [
("trans1", make_pipeline(TfidfVectorizer(), MaxAbsScaler())),
(
"trans2",
make_pipeline(TfidfVectorizer(), StandardScaler(with_mean=False)),
),
]
return type(meta_estimator)(transformer_list)
if "estimators" in init_params:
# stacking, voting
if is_regressor(meta_estimator):
estimator = [
("est1", make_pipeline(TfidfVectorizer(), Ridge(alpha=0.1))),
("est2", make_pipeline(TfidfVectorizer(), Ridge(alpha=1))),
]
else:
estimator = [
(
"est1",
make_pipeline(TfidfVectorizer(), LogisticRegression(C=0.1)),
),
("est2", make_pipeline(TfidfVectorizer(), LogisticRegression(C=1))),
]
return type(meta_estimator)(estimator)
|
Given a single meta-estimator instance, generate an instance with a pipeline
|
_get_instance_with_pipeline
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_metaestimators.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_metaestimators.py
|
BSD-3-Clause
|
def _generate_meta_estimator_instances_with_pipeline():
"""Generate instances of meta-estimators fed with a pipeline
Are considered meta-estimators all estimators accepting one of "estimator",
"base_estimator" or "estimators".
"""
print("estimators: ", len(all_estimators()))
for _, Estimator in sorted(all_estimators()):
sig = set(signature(Estimator).parameters)
print("\n", Estimator.__name__, sig)
if not sig.intersection(
{
"estimator",
"base_estimator",
"regressor",
"transformer_list",
"estimators",
}
):
continue
with suppress(SkipTest):
for meta_estimator in _construct_instances(Estimator):
print(meta_estimator)
yield _get_instance_with_pipeline(meta_estimator, sig)
|
Generate instances of meta-estimators fed with a pipeline
Are considered meta-estimators all estimators accepting one of "estimator",
"base_estimator" or "estimators".
|
_generate_meta_estimator_instances_with_pipeline
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_metaestimators.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_metaestimators.py
|
BSD-3-Clause
|
def get_init_args(metaestimator_info, sub_estimator_consumes):
"""Get the init args for a metaestimator
This is a helper function to get the init args for a metaestimator from
the METAESTIMATORS list. It returns an empty dict if no init args are
required.
Parameters
----------
metaestimator_info : dict
The metaestimator info from METAESTIMATORS
sub_estimator_consumes : bool
Whether the sub-estimator consumes metadata or not.
Returns
-------
kwargs : dict
The init args for the metaestimator.
(estimator, estimator_registry) : (estimator, registry)
The sub-estimator and the corresponding registry.
(scorer, scorer_registry) : (scorer, registry)
The scorer and the corresponding registry.
(cv, cv_registry) : (CV splitter, registry)
The CV splitter and the corresponding registry.
"""
kwargs = metaestimator_info.get("init_args", {})
estimator, estimator_registry = None, None
scorer, scorer_registry = None, None
cv, cv_registry = None, None
if "estimator" in metaestimator_info:
estimator_name = metaestimator_info["estimator_name"]
estimator_registry = _Registry()
sub_estimator_type = metaestimator_info["estimator"]
if sub_estimator_consumes:
if sub_estimator_type == "regressor":
estimator = ConsumingRegressor(estimator_registry)
elif sub_estimator_type == "classifier":
estimator = ConsumingClassifier(estimator_registry)
else:
raise ValueError("Unpermitted `sub_estimator_type`.") # pragma: nocover
else:
if sub_estimator_type == "regressor":
estimator = NonConsumingRegressor()
elif sub_estimator_type == "classifier":
estimator = NonConsumingClassifier()
else:
raise ValueError("Unpermitted `sub_estimator_type`.") # pragma: nocover
kwargs[estimator_name] = estimator
if "scorer_name" in metaestimator_info:
scorer_name = metaestimator_info["scorer_name"]
scorer_registry = _Registry()
scorer = ConsumingScorer(registry=scorer_registry)
kwargs[scorer_name] = scorer
if "cv_name" in metaestimator_info:
cv_name = metaestimator_info["cv_name"]
cv_registry = _Registry()
cv = ConsumingSplitter(registry=cv_registry)
kwargs[cv_name] = cv
return (
kwargs,
(estimator, estimator_registry),
(scorer, scorer_registry),
(cv, cv_registry),
)
|
Get the init args for a metaestimator
This is a helper function to get the init args for a metaestimator from
the METAESTIMATORS list. It returns an empty dict if no init args are
required.
Parameters
----------
metaestimator_info : dict
The metaestimator info from METAESTIMATORS
sub_estimator_consumes : bool
Whether the sub-estimator consumes metadata or not.
Returns
-------
kwargs : dict
The init args for the metaestimator.
(estimator, estimator_registry) : (estimator, registry)
The sub-estimator and the corresponding registry.
(scorer, scorer_registry) : (scorer, registry)
The scorer and the corresponding registry.
(cv, cv_registry) : (CV splitter, registry)
The CV splitter and the corresponding registry.
|
get_init_args
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_metaestimators_metadata_routing.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_metaestimators_metadata_routing.py
|
BSD-3-Clause
|
def set_requests(obj, *, method_mapping, methods, metadata_name, value=True):
"""Call `set_{method}_request` on a list of methods from the sub-estimator.
Parameters
----------
obj : BaseEstimator
The object for which `set_{method}_request` methods are called.
method_mapping : dict
The method mapping in the form of `{caller: [callee, ...]}`.
If a "caller" is not present in the method mapping, a one-to-one mapping is
assumed.
methods : list of str
The list of methods as "caller"s for which the request for the child should
be set.
metadata_name : str
The name of the metadata to be routed, usually either `"metadata"` or
`"sample_weight"` in our tests.
value : None, bool, or str
The request value to be set, by default it's `True`
"""
for caller in methods:
for callee in method_mapping.get(caller, [caller]):
set_request_for_method = getattr(obj, f"set_{callee}_request")
set_request_for_method(**{metadata_name: value})
if (
isinstance(obj, BaseEstimator)
and is_classifier(obj)
and callee == "partial_fit"
):
set_request_for_method(classes=True)
|
Call `set_{method}_request` on a list of methods from the sub-estimator.
Parameters
----------
obj : BaseEstimator
The object for which `set_{method}_request` methods are called.
method_mapping : dict
The method mapping in the form of `{caller: [callee, ...]}`.
If a "caller" is not present in the method mapping, a one-to-one mapping is
assumed.
methods : list of str
The list of methods as "caller"s for which the request for the child should
be set.
metadata_name : str
The name of the metadata to be routed, usually either `"metadata"` or
`"sample_weight"` in our tests.
value : None, bool, or str
The request value to be set, by default it's `True`
|
set_requests
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_metaestimators_metadata_routing.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_metaestimators_metadata_routing.py
|
BSD-3-Clause
|
def test_unsupported_estimators_fit_with_metadata(estimator):
"""Test that fit raises NotImplementedError when metadata routing is
enabled and a metadata is passed on meta-estimators for which we haven't
implemented routing yet."""
with pytest.raises(NotImplementedError):
try:
estimator.fit([[1]], [1], sample_weight=[1])
except TypeError:
# not all meta-estimators in the list support sample_weight,
# and for those we skip this test.
raise NotImplementedError
|
Test that fit raises NotImplementedError when metadata routing is
enabled and a metadata is passed on meta-estimators for which we haven't
implemented routing yet.
|
test_unsupported_estimators_fit_with_metadata
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_metaestimators_metadata_routing.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_metaestimators_metadata_routing.py
|
BSD-3-Clause
|
def test_metadata_is_routed_correctly_to_scorer(metaestimator):
"""Test that any requested metadata is correctly routed to the underlying
scorers in CV estimators.
"""
if "scorer_name" not in metaestimator:
# This test only makes sense for CV estimators
return
metaestimator_class = metaestimator["metaestimator"]
routing_methods = metaestimator["scorer_routing_methods"]
method_mapping = metaestimator.get("method_mapping", {})
for method_name in routing_methods:
kwargs, (estimator, _), (scorer, registry), (cv, _) = get_init_args(
metaestimator, sub_estimator_consumes=True
)
scorer.set_score_request(sample_weight=True)
if cv:
cv.set_split_request(groups=True, metadata=True)
if estimator is not None:
set_requests(
estimator,
method_mapping=method_mapping,
methods=[method_name],
metadata_name="sample_weight",
)
instance = metaestimator_class(**kwargs)
method = getattr(instance, method_name)
method_kwargs = {"sample_weight": sample_weight}
if "fit" not in method_name:
instance.fit(X, y)
method(X, y, **method_kwargs)
assert registry
for _scorer in registry:
check_recorded_metadata(
obj=_scorer,
method="score",
parent=method_name,
split_params=("sample_weight",),
**method_kwargs,
)
|
Test that any requested metadata is correctly routed to the underlying
scorers in CV estimators.
|
test_metadata_is_routed_correctly_to_scorer
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_metaestimators_metadata_routing.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_metaestimators_metadata_routing.py
|
BSD-3-Clause
|
def test_metadata_is_routed_correctly_to_splitter(metaestimator):
"""Test that any requested metadata is correctly routed to the underlying
splitters in CV estimators.
"""
if "cv_routing_methods" not in metaestimator:
# This test is only for metaestimators accepting a CV splitter
return
metaestimator_class = metaestimator["metaestimator"]
routing_methods = metaestimator["cv_routing_methods"]
X_ = metaestimator["X"]
y_ = metaestimator["y"]
for method_name in routing_methods:
kwargs, (estimator, _), (scorer, _), (cv, registry) = get_init_args(
metaestimator, sub_estimator_consumes=True
)
if estimator:
estimator.set_fit_request(sample_weight=False, metadata=False)
if scorer:
scorer.set_score_request(sample_weight=False, metadata=False)
cv.set_split_request(groups=True, metadata=True)
instance = metaestimator_class(**kwargs)
method_kwargs = {"groups": groups, "metadata": metadata}
method = getattr(instance, method_name)
method(X_, y_, **method_kwargs)
assert registry
for _splitter in registry:
check_recorded_metadata(
obj=_splitter, method="split", parent=method_name, **method_kwargs
)
|
Test that any requested metadata is correctly routed to the underlying
splitters in CV estimators.
|
test_metadata_is_routed_correctly_to_splitter
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_metaestimators_metadata_routing.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_metaestimators_metadata_routing.py
|
BSD-3-Clause
|
def test_metadata_routed_to_group_splitter(metaestimator):
"""Test that groups are routed correctly if group splitter of CV estimator is used
within cross_validate. Regression test for issue described in PR #29634 to test that
`ValueError: The 'groups' parameter should not be None.` is not raised."""
if "cv_routing_methods" not in metaestimator:
# This test is only for metaestimators accepting a CV splitter
return
metaestimator_class = metaestimator["metaestimator"]
X_ = metaestimator["X"]
y_ = metaestimator["y"]
kwargs, *_ = get_init_args(metaestimator, sub_estimator_consumes=True)
# remove `ConsumingSplitter` from kwargs, so 'cv' param isn't passed twice:
kwargs.pop("cv", None)
instance = metaestimator_class(cv=GroupKFold(n_splits=2), **kwargs)
cross_validate(
instance,
X_,
y_,
params={"groups": groups},
cv=GroupKFold(n_splits=2),
scoring=make_scorer(mean_squared_error, response_method="predict"),
)
|
Test that groups are routed correctly if group splitter of CV estimator is used
within cross_validate. Regression test for issue described in PR #29634 to test that
`ValueError: The 'groups' parameter should not be None.` is not raised.
|
test_metadata_routed_to_group_splitter
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_metaestimators_metadata_routing.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_metaestimators_metadata_routing.py
|
BSD-3-Clause
|
def test_min_dependencies_pyproject_toml(pyproject_section, min_dependencies_tag):
"""Check versions in pyproject.toml is consistent with _min_dependencies."""
# NumPy is more complex because build-time (>=1.25) and run-time (>=1.19.5)
# requirement currently don't match
skip_version_check_for = ["numpy"] if min_dependencies_tag == "build" else None
check_pyproject_section(
pyproject_section,
min_dependencies_tag,
skip_version_check_for=skip_version_check_for,
)
|
Check versions in pyproject.toml is consistent with _min_dependencies.
|
test_min_dependencies_pyproject_toml
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_min_dependencies_readme.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_min_dependencies_readme.py
|
BSD-3-Clause
|
def test_ovr_single_label_predict_proba_zero():
"""Check that predic_proba returns all zeros when the base estimator
never predicts the positive class.
"""
class NaiveBinaryClassifier(BaseEstimator, ClassifierMixin):
def fit(self, X, y):
self.classes_ = np.unique(y)
return self
def predict_proba(self, X):
proba = np.ones((len(X), 2))
# Probability of being the positive class is always 0
proba[:, 1] = 0
return proba
base_clf = NaiveBinaryClassifier()
X, y = iris.data, iris.target # Three-class problem with 150 samples
clf = OneVsRestClassifier(base_clf).fit(X, y)
y_proba = clf.predict_proba(X)
assert_allclose(y_proba, 0.0)
|
Check that predic_proba returns all zeros when the base estimator
never predicts the positive class.
|
test_ovr_single_label_predict_proba_zero
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_multiclass.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_multiclass.py
|
BSD-3-Clause
|
def test_pairwise_n_features_in():
"""Check the n_features_in_ attributes of the meta and base estimators
When the training data is a regular design matrix, everything is intuitive.
However, when the training data is a precomputed kernel matrix, the
multiclass strategy can resample the kernel matrix of the underlying base
estimator both row-wise and column-wise and this has a non-trivial impact
on the expected value for the n_features_in_ of both the meta and the base
estimators.
"""
X, y = iris.data, iris.target
# Remove the last sample to make the classes not exactly balanced and make
# the test more interesting.
assert y[-1] == 0
X = X[:-1]
y = y[:-1]
# Fitting directly on the design matrix:
assert X.shape == (149, 4)
clf_notprecomputed = svm.SVC(kernel="linear").fit(X, y)
assert clf_notprecomputed.n_features_in_ == 4
ovr_notprecomputed = OneVsRestClassifier(clf_notprecomputed).fit(X, y)
assert ovr_notprecomputed.n_features_in_ == 4
for est in ovr_notprecomputed.estimators_:
assert est.n_features_in_ == 4
ovo_notprecomputed = OneVsOneClassifier(clf_notprecomputed).fit(X, y)
assert ovo_notprecomputed.n_features_in_ == 4
assert ovo_notprecomputed.n_classes_ == 3
assert len(ovo_notprecomputed.estimators_) == 3
for est in ovo_notprecomputed.estimators_:
assert est.n_features_in_ == 4
# When working with precomputed kernels we have one "feature" per training
# sample:
K = X @ X.T
assert K.shape == (149, 149)
clf_precomputed = svm.SVC(kernel="precomputed").fit(K, y)
assert clf_precomputed.n_features_in_ == 149
ovr_precomputed = OneVsRestClassifier(clf_precomputed).fit(K, y)
assert ovr_precomputed.n_features_in_ == 149
assert ovr_precomputed.n_classes_ == 3
assert len(ovr_precomputed.estimators_) == 3
for est in ovr_precomputed.estimators_:
assert est.n_features_in_ == 149
# This becomes really interesting with OvO and precomputed kernel together:
# internally, OvO will drop the samples of the classes not part of the pair
# of classes under consideration for a given binary classifier. Since we
# use a precomputed kernel, it will also drop the matching columns of the
# kernel matrix, and therefore we have fewer "features" as result.
#
# Since class 0 has 49 samples, and class 1 and 2 have 50 samples each, a
# single OvO binary classifier works with a sub-kernel matrix of shape
# either (99, 99) or (100, 100).
ovo_precomputed = OneVsOneClassifier(clf_precomputed).fit(K, y)
assert ovo_precomputed.n_features_in_ == 149
assert ovr_precomputed.n_classes_ == 3
assert len(ovr_precomputed.estimators_) == 3
assert ovo_precomputed.estimators_[0].n_features_in_ == 99 # class 0 vs class 1
assert ovo_precomputed.estimators_[1].n_features_in_ == 99 # class 0 vs class 2
assert ovo_precomputed.estimators_[2].n_features_in_ == 100 # class 1 vs class 2
|
Check the n_features_in_ attributes of the meta and base estimators
When the training data is a regular design matrix, everything is intuitive.
However, when the training data is a precomputed kernel matrix, the
multiclass strategy can resample the kernel matrix of the underlying base
estimator both row-wise and column-wise and this has a non-trivial impact
on the expected value for the n_features_in_ of both the meta and the base
estimators.
|
test_pairwise_n_features_in
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_multiclass.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_multiclass.py
|
BSD-3-Clause
|
def test_constant_int_target(make_y):
"""Check that constant y target does not raise.
Non-regression test for #21869
"""
X = np.ones((10, 2))
y = make_y((10, 1), dtype=np.int32)
ovr = OneVsRestClassifier(LogisticRegression())
ovr.fit(X, y)
y_pred = ovr.predict_proba(X)
expected = np.zeros((X.shape[0], 2))
expected[:, 0] = 1
assert_allclose(y_pred, expected)
|
Check that constant y target does not raise.
Non-regression test for #21869
|
test_constant_int_target
|
python
|
scikit-learn/scikit-learn
|
sklearn/tests/test_multiclass.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tests/test_multiclass.py
|
BSD-3-Clause
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.