code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
---|---|---|---|---|---|---|---|
def _get_rescaled_operator(X, X_offset, sample_weight_sqrt):
"""Create LinearOperator for matrix products with implicit centering.
Matrix product `LinearOperator @ coef` returns `(X - X_offset) @ coef`.
"""
def matvec(b):
return X.dot(b) - sample_weight_sqrt * b.dot(X_offset)
def rmatvec(b):
return X.T.dot(b) - X_offset * b.dot(sample_weight_sqrt)
X1 = sparse.linalg.LinearOperator(shape=X.shape, matvec=matvec, rmatvec=rmatvec)
return X1
|
Create LinearOperator for matrix products with implicit centering.
Matrix product `LinearOperator @ coef` returns `(X - X_offset) @ coef`.
|
_get_rescaled_operator
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/_ridge.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/_ridge.py
|
BSD-3-Clause
|
def _solve_lsqr(
X,
y,
*,
alpha,
fit_intercept=True,
max_iter=None,
tol=1e-4,
X_offset=None,
X_scale=None,
sample_weight_sqrt=None,
):
"""Solve Ridge regression via LSQR.
We expect that y is always mean centered.
If X is dense, we expect it to be mean centered such that we can solve
||y - Xw||_2^2 + alpha * ||w||_2^2
If X is sparse, we expect X_offset to be given such that we can solve
||y - (X - X_offset)w||_2^2 + alpha * ||w||_2^2
With sample weights S=diag(sample_weight), this becomes
||sqrt(S) (y - (X - X_offset) w)||_2^2 + alpha * ||w||_2^2
and we expect y and X to already be rescaled, i.e. sqrt(S) @ y, sqrt(S) @ X. In
this case, X_offset is the sample_weight weighted mean of X before scaling by
sqrt(S). The objective then reads
||y - (X - sqrt(S) X_offset) w)||_2^2 + alpha * ||w||_2^2
"""
if sample_weight_sqrt is None:
sample_weight_sqrt = np.ones(X.shape[0], dtype=X.dtype)
if sparse.issparse(X) and fit_intercept:
X_offset_scale = X_offset / X_scale
X1 = _get_rescaled_operator(X, X_offset_scale, sample_weight_sqrt)
else:
# No need to touch anything
X1 = X
n_samples, n_features = X.shape
coefs = np.empty((y.shape[1], n_features), dtype=X.dtype)
n_iter = np.empty(y.shape[1], dtype=np.int32)
# According to the lsqr documentation, alpha = damp^2.
sqrt_alpha = np.sqrt(alpha)
for i in range(y.shape[1]):
y_column = y[:, i]
info = sp_linalg.lsqr(
X1, y_column, damp=sqrt_alpha[i], atol=tol, btol=tol, iter_lim=max_iter
)
coefs[i] = info[0]
n_iter[i] = info[2]
return coefs, n_iter
|
Solve Ridge regression via LSQR.
We expect that y is always mean centered.
If X is dense, we expect it to be mean centered such that we can solve
||y - Xw||_2^2 + alpha * ||w||_2^2
If X is sparse, we expect X_offset to be given such that we can solve
||y - (X - X_offset)w||_2^2 + alpha * ||w||_2^2
With sample weights S=diag(sample_weight), this becomes
||sqrt(S) (y - (X - X_offset) w)||_2^2 + alpha * ||w||_2^2
and we expect y and X to already be rescaled, i.e. sqrt(S) @ y, sqrt(S) @ X. In
this case, X_offset is the sample_weight weighted mean of X before scaling by
sqrt(S). The objective then reads
||y - (X - sqrt(S) X_offset) w)||_2^2 + alpha * ||w||_2^2
|
_solve_lsqr
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/_ridge.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/_ridge.py
|
BSD-3-Clause
|
def _solve_lbfgs(
X,
y,
alpha,
positive=True,
max_iter=None,
tol=1e-4,
X_offset=None,
X_scale=None,
sample_weight_sqrt=None,
):
"""Solve ridge regression with LBFGS.
The main purpose is fitting with forcing coefficients to be positive.
For unconstrained ridge regression, there are faster dedicated solver methods.
Note that with positive bounds on the coefficients, LBFGS seems faster
than scipy.optimize.lsq_linear.
"""
n_samples, n_features = X.shape
options = {}
if max_iter is not None:
options["maxiter"] = max_iter
config = {
"method": "L-BFGS-B",
"tol": tol,
"jac": True,
"options": options,
}
if positive:
config["bounds"] = [(0, np.inf)] * n_features
if X_offset is not None and X_scale is not None:
X_offset_scale = X_offset / X_scale
else:
X_offset_scale = None
if sample_weight_sqrt is None:
sample_weight_sqrt = np.ones(X.shape[0], dtype=X.dtype)
coefs = np.empty((y.shape[1], n_features), dtype=X.dtype)
for i in range(y.shape[1]):
x0 = np.zeros((n_features,))
y_column = y[:, i]
def func(w):
residual = X.dot(w) - y_column
if X_offset_scale is not None:
residual -= sample_weight_sqrt * w.dot(X_offset_scale)
f = 0.5 * residual.dot(residual) + 0.5 * alpha[i] * w.dot(w)
grad = X.T @ residual + alpha[i] * w
if X_offset_scale is not None:
grad -= X_offset_scale * residual.dot(sample_weight_sqrt)
return f, grad
result = optimize.minimize(func, x0, **config)
if not result["success"]:
warnings.warn(
(
"The lbfgs solver did not converge. Try increasing max_iter "
f"or tol. Currently: max_iter={max_iter} and tol={tol}"
),
ConvergenceWarning,
)
coefs[i] = result["x"]
return coefs
|
Solve ridge regression with LBFGS.
The main purpose is fitting with forcing coefficients to be positive.
For unconstrained ridge regression, there are faster dedicated solver methods.
Note that with positive bounds on the coefficients, LBFGS seems faster
than scipy.optimize.lsq_linear.
|
_solve_lbfgs
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/_ridge.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/_ridge.py
|
BSD-3-Clause
|
def ridge_regression(
X,
y,
alpha,
*,
sample_weight=None,
solver="auto",
max_iter=None,
tol=1e-4,
verbose=0,
positive=False,
random_state=None,
return_n_iter=False,
return_intercept=False,
check_input=True,
):
"""Solve the ridge equation by the method of normal equations.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
X : {array-like, sparse matrix, LinearOperator} of shape \
(n_samples, n_features)
Training data.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target values.
alpha : float or array-like of shape (n_targets,)
Constant that multiplies the L2 term, controlling regularization
strength. `alpha` must be a non-negative float i.e. in `[0, inf)`.
When `alpha = 0`, the objective is equivalent to ordinary least
squares, solved by the :class:`LinearRegression` object. For numerical
reasons, using `alpha = 0` with the `Ridge` object is not advised.
Instead, you should use the :class:`LinearRegression` object.
If an array is passed, penalties are assumed to be specific to the
targets. Hence they must correspond in number.
sample_weight : float or array-like of shape (n_samples,), default=None
Individual weights for each sample. If given a float, every sample
will have the same weight. If sample_weight is not None and
solver='auto', the solver will be set to 'cholesky'.
.. versionadded:: 0.17
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', \
'sag', 'saga', 'lbfgs'}, default='auto'
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. It is the most stable solver, in particular more stable
for singular matrices than 'cholesky' at the cost of being slower.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution via a Cholesky decomposition of
dot(X.T, X)
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fastest and uses an iterative
procedure.
- 'sag' uses a Stochastic Average Gradient descent, and 'saga' uses
its improved, unbiased version named SAGA. Both methods also use an
iterative procedure, and are often faster than other solvers when
both n_samples and n_features are large. Note that 'sag' and
'saga' fast convergence is only guaranteed on features with
approximately the same scale. You can preprocess the data with a
scaler from sklearn.preprocessing.
- 'lbfgs' uses L-BFGS-B algorithm implemented in
`scipy.optimize.minimize`. It can be used only when `positive`
is True.
All solvers except 'svd' support both dense and sparse data. However, only
'lsqr', 'sag', 'sparse_cg', and 'lbfgs' support sparse input when
`fit_intercept` is True.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
.. versionadded:: 0.19
SAGA solver.
max_iter : int, default=None
Maximum number of iterations for conjugate gradient solver.
For the 'sparse_cg' and 'lsqr' solvers, the default value is determined
by scipy.sparse.linalg. For 'sag' and saga solver, the default value is
1000. For 'lbfgs' solver, the default value is 15000.
tol : float, default=1e-4
Precision of the solution. Note that `tol` has no effect for solvers 'svd' and
'cholesky'.
.. versionchanged:: 1.2
Default value changed from 1e-3 to 1e-4 for consistency with other linear
models.
verbose : int, default=0
Verbosity level. Setting verbose > 0 will display additional
information depending on the solver used.
positive : bool, default=False
When set to ``True``, forces the coefficients to be positive.
Only 'lbfgs' solver is supported in this case.
random_state : int, RandomState instance, default=None
Used when ``solver`` == 'sag' or 'saga' to shuffle the data.
See :term:`Glossary <random_state>` for details.
return_n_iter : bool, default=False
If True, the method also returns `n_iter`, the actual number of
iteration performed by the solver.
.. versionadded:: 0.17
return_intercept : bool, default=False
If True and if X is sparse, the method also returns the intercept,
and the solver is automatically changed to 'sag'. This is only a
temporary fix for fitting the intercept with sparse data. For dense
data, use sklearn.linear_model._preprocess_data before your regression.
.. versionadded:: 0.17
check_input : bool, default=True
If False, the input arrays X and y will not be checked.
.. versionadded:: 0.21
Returns
-------
coef : ndarray of shape (n_features,) or (n_targets, n_features)
Weight vector(s).
n_iter : int, optional
The actual number of iteration performed by the solver.
Only returned if `return_n_iter` is True.
intercept : float or ndarray of shape (n_targets,)
The intercept of the model. Only returned if `return_intercept`
is True and if X is a scipy sparse array.
Notes
-----
This function won't compute the intercept.
Regularization improves the conditioning of the problem and
reduces the variance of the estimates. Larger values specify stronger
regularization. Alpha corresponds to ``1 / (2C)`` in other linear
models such as :class:`~sklearn.linear_model.LogisticRegression` or
:class:`~sklearn.svm.LinearSVC`. If an array is passed, penalties are
assumed to be specific to the targets. Hence they must correspond in
number.
Examples
--------
>>> import numpy as np
>>> from sklearn.datasets import make_regression
>>> from sklearn.linear_model import ridge_regression
>>> rng = np.random.RandomState(0)
>>> X = rng.randn(100, 4)
>>> y = 2.0 * X[:, 0] - 1.0 * X[:, 1] + 0.1 * rng.standard_normal(100)
>>> coef, intercept = ridge_regression(X, y, alpha=1.0, return_intercept=True,
... random_state=0)
>>> coef
array([ 1.97, -1., -2.69e-3, -9.27e-4 ])
>>> intercept
np.float64(-.0012)
"""
return _ridge_regression(
X,
y,
alpha,
sample_weight=sample_weight,
solver=solver,
max_iter=max_iter,
tol=tol,
verbose=verbose,
positive=positive,
random_state=random_state,
return_n_iter=return_n_iter,
return_intercept=return_intercept,
X_scale=None,
X_offset=None,
check_input=check_input,
)
|
Solve the ridge equation by the method of normal equations.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
X : {array-like, sparse matrix, LinearOperator} of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target values.
alpha : float or array-like of shape (n_targets,)
Constant that multiplies the L2 term, controlling regularization
strength. `alpha` must be a non-negative float i.e. in `[0, inf)`.
When `alpha = 0`, the objective is equivalent to ordinary least
squares, solved by the :class:`LinearRegression` object. For numerical
reasons, using `alpha = 0` with the `Ridge` object is not advised.
Instead, you should use the :class:`LinearRegression` object.
If an array is passed, penalties are assumed to be specific to the
targets. Hence they must correspond in number.
sample_weight : float or array-like of shape (n_samples,), default=None
Individual weights for each sample. If given a float, every sample
will have the same weight. If sample_weight is not None and
solver='auto', the solver will be set to 'cholesky'.
.. versionadded:: 0.17
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag', 'saga', 'lbfgs'}, default='auto'
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. It is the most stable solver, in particular more stable
for singular matrices than 'cholesky' at the cost of being slower.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution via a Cholesky decomposition of
dot(X.T, X)
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fastest and uses an iterative
procedure.
- 'sag' uses a Stochastic Average Gradient descent, and 'saga' uses
its improved, unbiased version named SAGA. Both methods also use an
iterative procedure, and are often faster than other solvers when
both n_samples and n_features are large. Note that 'sag' and
'saga' fast convergence is only guaranteed on features with
approximately the same scale. You can preprocess the data with a
scaler from sklearn.preprocessing.
- 'lbfgs' uses L-BFGS-B algorithm implemented in
`scipy.optimize.minimize`. It can be used only when `positive`
is True.
All solvers except 'svd' support both dense and sparse data. However, only
'lsqr', 'sag', 'sparse_cg', and 'lbfgs' support sparse input when
`fit_intercept` is True.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
.. versionadded:: 0.19
SAGA solver.
max_iter : int, default=None
Maximum number of iterations for conjugate gradient solver.
For the 'sparse_cg' and 'lsqr' solvers, the default value is determined
by scipy.sparse.linalg. For 'sag' and saga solver, the default value is
1000. For 'lbfgs' solver, the default value is 15000.
tol : float, default=1e-4
Precision of the solution. Note that `tol` has no effect for solvers 'svd' and
'cholesky'.
.. versionchanged:: 1.2
Default value changed from 1e-3 to 1e-4 for consistency with other linear
models.
verbose : int, default=0
Verbosity level. Setting verbose > 0 will display additional
information depending on the solver used.
positive : bool, default=False
When set to ``True``, forces the coefficients to be positive.
Only 'lbfgs' solver is supported in this case.
random_state : int, RandomState instance, default=None
Used when ``solver`` == 'sag' or 'saga' to shuffle the data.
See :term:`Glossary <random_state>` for details.
return_n_iter : bool, default=False
If True, the method also returns `n_iter`, the actual number of
iteration performed by the solver.
.. versionadded:: 0.17
return_intercept : bool, default=False
If True and if X is sparse, the method also returns the intercept,
and the solver is automatically changed to 'sag'. This is only a
temporary fix for fitting the intercept with sparse data. For dense
data, use sklearn.linear_model._preprocess_data before your regression.
.. versionadded:: 0.17
check_input : bool, default=True
If False, the input arrays X and y will not be checked.
.. versionadded:: 0.21
Returns
-------
coef : ndarray of shape (n_features,) or (n_targets, n_features)
Weight vector(s).
n_iter : int, optional
The actual number of iteration performed by the solver.
Only returned if `return_n_iter` is True.
intercept : float or ndarray of shape (n_targets,)
The intercept of the model. Only returned if `return_intercept`
is True and if X is a scipy sparse array.
Notes
-----
This function won't compute the intercept.
Regularization improves the conditioning of the problem and
reduces the variance of the estimates. Larger values specify stronger
regularization. Alpha corresponds to ``1 / (2C)`` in other linear
models such as :class:`~sklearn.linear_model.LogisticRegression` or
:class:`~sklearn.svm.LinearSVC`. If an array is passed, penalties are
assumed to be specific to the targets. Hence they must correspond in
number.
Examples
--------
>>> import numpy as np
>>> from sklearn.datasets import make_regression
>>> from sklearn.linear_model import ridge_regression
>>> rng = np.random.RandomState(0)
>>> X = rng.randn(100, 4)
>>> y = 2.0 * X[:, 0] - 1.0 * X[:, 1] + 0.1 * rng.standard_normal(100)
>>> coef, intercept = ridge_regression(X, y, alpha=1.0, return_intercept=True,
... random_state=0)
>>> coef
array([ 1.97, -1., -2.69e-3, -9.27e-4 ])
>>> intercept
np.float64(-.0012)
|
ridge_regression
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/_ridge.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/_ridge.py
|
BSD-3-Clause
|
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
Training data.
y : ndarray of shape (n_samples,) or (n_samples, n_targets)
Target values.
sample_weight : float or ndarray of shape (n_samples,), default=None
Individual weights for each sample. If given a float, every sample
will have the same weight.
Returns
-------
self : object
Fitted estimator.
"""
_accept_sparse = _get_valid_accept_sparse(sparse.issparse(X), self.solver)
xp, _ = get_namespace(X, y, sample_weight)
X, y = validate_data(
self,
X,
y,
accept_sparse=_accept_sparse,
dtype=[xp.float64, xp.float32],
force_writeable=True,
multi_output=True,
y_numeric=True,
)
return super().fit(X, y, sample_weight=sample_weight)
|
Fit Ridge regression model.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
Training data.
y : ndarray of shape (n_samples,) or (n_samples, n_targets)
Target values.
sample_weight : float or ndarray of shape (n_samples,), default=None
Individual weights for each sample. If given a float, every sample
will have the same weight.
Returns
-------
self : object
Fitted estimator.
|
fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/_ridge.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/_ridge.py
|
BSD-3-Clause
|
def _prepare_data(self, X, y, sample_weight, solver):
"""Validate `X` and `y` and binarize `y`.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
Training data.
y : ndarray of shape (n_samples,)
Target values.
sample_weight : float or ndarray of shape (n_samples,), default=None
Individual weights for each sample. If given a float, every sample
will have the same weight.
solver : str
The solver used in `Ridge` to know which sparse format to support.
Returns
-------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
Validated training data.
y : ndarray of shape (n_samples,)
Validated target values.
sample_weight : ndarray of shape (n_samples,)
Validated sample weights.
Y : ndarray of shape (n_samples, n_classes)
The binarized version of `y`.
"""
accept_sparse = _get_valid_accept_sparse(sparse.issparse(X), solver)
X, y = validate_data(
self,
X,
y,
accept_sparse=accept_sparse,
multi_output=True,
y_numeric=False,
force_writeable=True,
)
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith("multilabel"):
y = column_or_1d(y, warn=True)
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
if self.class_weight:
sample_weight = sample_weight * compute_sample_weight(self.class_weight, y)
return X, y, sample_weight, Y
|
Validate `X` and `y` and binarize `y`.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
Training data.
y : ndarray of shape (n_samples,)
Target values.
sample_weight : float or ndarray of shape (n_samples,), default=None
Individual weights for each sample. If given a float, every sample
will have the same weight.
solver : str
The solver used in `Ridge` to know which sparse format to support.
Returns
-------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
Validated training data.
y : ndarray of shape (n_samples,)
Validated target values.
sample_weight : ndarray of shape (n_samples,)
Validated sample weights.
Y : ndarray of shape (n_samples, n_classes)
The binarized version of `y`.
|
_prepare_data
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/_ridge.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/_ridge.py
|
BSD-3-Clause
|
def predict(self, X):
"""Predict class labels for samples in `X`.
Parameters
----------
X : {array-like, spare matrix} of shape (n_samples, n_features)
The data matrix for which we want to predict the targets.
Returns
-------
y_pred : ndarray of shape (n_samples,) or (n_samples, n_outputs)
Vector or matrix containing the predictions. In binary and
multiclass problems, this is a vector containing `n_samples`. In
a multilabel problem, it returns a matrix of shape
`(n_samples, n_outputs)`.
"""
check_is_fitted(self, attributes=["_label_binarizer"])
if self._label_binarizer.y_type_.startswith("multilabel"):
# Threshold such that the negative label is -1 and positive label
# is 1 to use the inverse transform of the label binarizer fitted
# during fit.
scores = 2 * (self.decision_function(X) > 0) - 1
return self._label_binarizer.inverse_transform(scores)
return super().predict(X)
|
Predict class labels for samples in `X`.
Parameters
----------
X : {array-like, spare matrix} of shape (n_samples, n_features)
The data matrix for which we want to predict the targets.
Returns
-------
y_pred : ndarray of shape (n_samples,) or (n_samples, n_outputs)
Vector or matrix containing the predictions. In binary and
multiclass problems, this is a vector containing `n_samples`. In
a multilabel problem, it returns a matrix of shape
`(n_samples, n_outputs)`.
|
predict
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/_ridge.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/_ridge.py
|
BSD-3-Clause
|
def fit(self, X, y, sample_weight=None):
"""Fit Ridge classifier model.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
Training data.
y : ndarray of shape (n_samples,)
Target values.
sample_weight : float or ndarray of shape (n_samples,), default=None
Individual weights for each sample. If given a float, every sample
will have the same weight.
.. versionadded:: 0.17
*sample_weight* support to RidgeClassifier.
Returns
-------
self : object
Instance of the estimator.
"""
X, y, sample_weight, Y = self._prepare_data(X, y, sample_weight, self.solver)
super().fit(X, Y, sample_weight=sample_weight)
return self
|
Fit Ridge classifier model.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
Training data.
y : ndarray of shape (n_samples,)
Target values.
sample_weight : float or ndarray of shape (n_samples,), default=None
Individual weights for each sample. If given a float, every sample
will have the same weight.
.. versionadded:: 0.17
*sample_weight* support to RidgeClassifier.
Returns
-------
self : object
Instance of the estimator.
|
fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/_ridge.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/_ridge.py
|
BSD-3-Clause
|
def _find_smallest_angle(query, vectors):
"""Find the column of vectors that is most aligned with the query.
Both query and the columns of vectors must have their l2 norm equal to 1.
Parameters
----------
query : ndarray of shape (n_samples,)
Normalized query vector.
vectors : ndarray of shape (n_samples, n_features)
Vectors to which we compare query, as columns. Must be normalized.
"""
abs_cosine = np.abs(query.dot(vectors))
index = np.argmax(abs_cosine)
return index
|
Find the column of vectors that is most aligned with the query.
Both query and the columns of vectors must have their l2 norm equal to 1.
Parameters
----------
query : ndarray of shape (n_samples,)
Normalized query vector.
vectors : ndarray of shape (n_samples, n_features)
Vectors to which we compare query, as columns. Must be normalized.
|
_find_smallest_angle
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/_ridge.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/_ridge.py
|
BSD-3-Clause
|
def _compute_gram(self, X, sqrt_sw):
"""Computes the Gram matrix XX^T with possible centering.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
The preprocessed design matrix.
sqrt_sw : ndarray of shape (n_samples,)
square roots of sample weights
Returns
-------
gram : ndarray of shape (n_samples, n_samples)
The Gram matrix.
X_mean : ndarray of shape (n_feature,)
The weighted mean of ``X`` for each feature.
Notes
-----
When X is dense the centering has been done in preprocessing
so the mean is 0 and we just compute XX^T.
When X is sparse it has not been centered in preprocessing, but it has
been scaled by sqrt(sample weights).
When self.fit_intercept is False no centering is done.
The centered X is never actually computed because centering would break
the sparsity of X.
"""
center = self.fit_intercept and sparse.issparse(X)
if not center:
# in this case centering has been done in preprocessing
# or we are not fitting an intercept.
X_mean = np.zeros(X.shape[1], dtype=X.dtype)
return safe_sparse_dot(X, X.T, dense_output=True), X_mean
# X is sparse
n_samples = X.shape[0]
sample_weight_matrix = sparse.dia_matrix(
(sqrt_sw, 0), shape=(n_samples, n_samples)
)
X_weighted = sample_weight_matrix.dot(X)
X_mean, _ = mean_variance_axis(X_weighted, axis=0)
X_mean *= n_samples / sqrt_sw.dot(sqrt_sw)
X_mX = sqrt_sw[:, None] * safe_sparse_dot(X_mean, X.T, dense_output=True)
X_mX_m = np.outer(sqrt_sw, sqrt_sw) * np.dot(X_mean, X_mean)
return (
safe_sparse_dot(X, X.T, dense_output=True) + X_mX_m - X_mX - X_mX.T,
X_mean,
)
|
Computes the Gram matrix XX^T with possible centering.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
The preprocessed design matrix.
sqrt_sw : ndarray of shape (n_samples,)
square roots of sample weights
Returns
-------
gram : ndarray of shape (n_samples, n_samples)
The Gram matrix.
X_mean : ndarray of shape (n_feature,)
The weighted mean of ``X`` for each feature.
Notes
-----
When X is dense the centering has been done in preprocessing
so the mean is 0 and we just compute XX^T.
When X is sparse it has not been centered in preprocessing, but it has
been scaled by sqrt(sample weights).
When self.fit_intercept is False no centering is done.
The centered X is never actually computed because centering would break
the sparsity of X.
|
_compute_gram
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/_ridge.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/_ridge.py
|
BSD-3-Clause
|
def _compute_covariance(self, X, sqrt_sw):
"""Computes covariance matrix X^TX with possible centering.
Parameters
----------
X : sparse matrix of shape (n_samples, n_features)
The preprocessed design matrix.
sqrt_sw : ndarray of shape (n_samples,)
square roots of sample weights
Returns
-------
covariance : ndarray of shape (n_features, n_features)
The covariance matrix.
X_mean : ndarray of shape (n_feature,)
The weighted mean of ``X`` for each feature.
Notes
-----
Since X is sparse it has not been centered in preprocessing, but it has
been scaled by sqrt(sample weights).
When self.fit_intercept is False no centering is done.
The centered X is never actually computed because centering would break
the sparsity of X.
"""
if not self.fit_intercept:
# in this case centering has been done in preprocessing
# or we are not fitting an intercept.
X_mean = np.zeros(X.shape[1], dtype=X.dtype)
return safe_sparse_dot(X.T, X, dense_output=True), X_mean
# this function only gets called for sparse X
n_samples = X.shape[0]
sample_weight_matrix = sparse.dia_matrix(
(sqrt_sw, 0), shape=(n_samples, n_samples)
)
X_weighted = sample_weight_matrix.dot(X)
X_mean, _ = mean_variance_axis(X_weighted, axis=0)
X_mean = X_mean * n_samples / sqrt_sw.dot(sqrt_sw)
weight_sum = sqrt_sw.dot(sqrt_sw)
return (
safe_sparse_dot(X.T, X, dense_output=True)
- weight_sum * np.outer(X_mean, X_mean),
X_mean,
)
|
Computes covariance matrix X^TX with possible centering.
Parameters
----------
X : sparse matrix of shape (n_samples, n_features)
The preprocessed design matrix.
sqrt_sw : ndarray of shape (n_samples,)
square roots of sample weights
Returns
-------
covariance : ndarray of shape (n_features, n_features)
The covariance matrix.
X_mean : ndarray of shape (n_feature,)
The weighted mean of ``X`` for each feature.
Notes
-----
Since X is sparse it has not been centered in preprocessing, but it has
been scaled by sqrt(sample weights).
When self.fit_intercept is False no centering is done.
The centered X is never actually computed because centering would break
the sparsity of X.
|
_compute_covariance
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/_ridge.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/_ridge.py
|
BSD-3-Clause
|
def _sparse_multidot_diag(self, X, A, X_mean, sqrt_sw):
"""Compute the diagonal of (X - X_mean).dot(A).dot((X - X_mean).T)
without explicitly centering X nor computing X.dot(A)
when X is sparse.
Parameters
----------
X : sparse matrix of shape (n_samples, n_features)
A : ndarray of shape (n_features, n_features)
X_mean : ndarray of shape (n_features,)
sqrt_sw : ndarray of shape (n_features,)
square roots of sample weights
Returns
-------
diag : np.ndarray, shape (n_samples,)
The computed diagonal.
"""
intercept_col = scale = sqrt_sw
batch_size = X.shape[1]
diag = np.empty(X.shape[0], dtype=X.dtype)
for start in range(0, X.shape[0], batch_size):
batch = slice(start, min(X.shape[0], start + batch_size), 1)
X_batch = np.empty(
(X[batch].shape[0], X.shape[1] + self.fit_intercept), dtype=X.dtype
)
if self.fit_intercept:
X_batch[:, :-1] = X[batch].toarray() - X_mean * scale[batch][:, None]
X_batch[:, -1] = intercept_col[batch]
else:
X_batch = X[batch].toarray()
diag[batch] = (X_batch.dot(A) * X_batch).sum(axis=1)
return diag
|
Compute the diagonal of (X - X_mean).dot(A).dot((X - X_mean).T)
without explicitly centering X nor computing X.dot(A)
when X is sparse.
Parameters
----------
X : sparse matrix of shape (n_samples, n_features)
A : ndarray of shape (n_features, n_features)
X_mean : ndarray of shape (n_features,)
sqrt_sw : ndarray of shape (n_features,)
square roots of sample weights
Returns
-------
diag : np.ndarray, shape (n_samples,)
The computed diagonal.
|
_sparse_multidot_diag
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/_ridge.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/_ridge.py
|
BSD-3-Clause
|
def _solve_eigen_gram(self, alpha, y, sqrt_sw, X_mean, eigvals, Q, QT_y):
"""Compute dual coefficients and diagonal of G^-1.
Used when we have a decomposition of X.X^T (n_samples <= n_features).
"""
w = 1.0 / (eigvals + alpha)
if self.fit_intercept:
# the vector containing the square roots of the sample weights (1
# when no sample weights) is the eigenvector of XX^T which
# corresponds to the intercept; we cancel the regularization on
# this dimension. the corresponding eigenvalue is
# sum(sample_weight).
normalized_sw = sqrt_sw / np.linalg.norm(sqrt_sw)
intercept_dim = _find_smallest_angle(normalized_sw, Q)
w[intercept_dim] = 0 # cancel regularization for the intercept
c = np.dot(Q, self._diag_dot(w, QT_y))
G_inverse_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_inverse_diag = G_inverse_diag[:, np.newaxis]
return G_inverse_diag, c
|
Compute dual coefficients and diagonal of G^-1.
Used when we have a decomposition of X.X^T (n_samples <= n_features).
|
_solve_eigen_gram
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/_ridge.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/_ridge.py
|
BSD-3-Clause
|
def _eigen_decompose_covariance(self, X, y, sqrt_sw):
"""Eigendecomposition of X^T.X, used when n_samples > n_features
and X is sparse.
"""
n_samples, n_features = X.shape
cov = np.empty((n_features + 1, n_features + 1), dtype=X.dtype)
cov[:-1, :-1], X_mean = self._compute_covariance(X, sqrt_sw)
if not self.fit_intercept:
cov = cov[:-1, :-1]
# to emulate centering X with sample weights,
# ie removing the weighted average, we add a column
# containing the square roots of the sample weights.
# by centering, it is orthogonal to the other columns
# when all samples have the same weight we add a column of 1
else:
cov[-1] = 0
cov[:, -1] = 0
cov[-1, -1] = sqrt_sw.dot(sqrt_sw)
nullspace_dim = max(0, n_features - n_samples)
eigvals, V = linalg.eigh(cov)
# remove eigenvalues and vectors in the null space of X^T.X
eigvals = eigvals[nullspace_dim:]
V = V[:, nullspace_dim:]
return X_mean, eigvals, V, X
|
Eigendecomposition of X^T.X, used when n_samples > n_features
and X is sparse.
|
_eigen_decompose_covariance
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/_ridge.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/_ridge.py
|
BSD-3-Clause
|
def _solve_eigen_covariance_no_intercept(
self, alpha, y, sqrt_sw, X_mean, eigvals, V, X
):
"""Compute dual coefficients and diagonal of G^-1.
Used when we have a decomposition of X^T.X
(n_samples > n_features and X is sparse), and not fitting an intercept.
"""
w = 1 / (eigvals + alpha)
A = (V * w).dot(V.T)
AXy = A.dot(safe_sparse_dot(X.T, y, dense_output=True))
y_hat = safe_sparse_dot(X, AXy, dense_output=True)
hat_diag = self._sparse_multidot_diag(X, A, X_mean, sqrt_sw)
if len(y.shape) != 1:
# handle case where y is 2-d
hat_diag = hat_diag[:, np.newaxis]
return (1 - hat_diag) / alpha, (y - y_hat) / alpha
|
Compute dual coefficients and diagonal of G^-1.
Used when we have a decomposition of X^T.X
(n_samples > n_features and X is sparse), and not fitting an intercept.
|
_solve_eigen_covariance_no_intercept
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/_ridge.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/_ridge.py
|
BSD-3-Clause
|
def _solve_eigen_covariance_intercept(
self, alpha, y, sqrt_sw, X_mean, eigvals, V, X
):
"""Compute dual coefficients and diagonal of G^-1.
Used when we have a decomposition of X^T.X
(n_samples > n_features and X is sparse),
and we are fitting an intercept.
"""
# the vector [0, 0, ..., 0, 1]
# is the eigenvector of X^TX which
# corresponds to the intercept; we cancel the regularization on
# this dimension. the corresponding eigenvalue is
# sum(sample_weight), e.g. n when uniform sample weights.
intercept_sv = np.zeros(V.shape[0])
intercept_sv[-1] = 1
intercept_dim = _find_smallest_angle(intercept_sv, V)
w = 1 / (eigvals + alpha)
w[intercept_dim] = 1 / eigvals[intercept_dim]
A = (V * w).dot(V.T)
# add a column to X containing the square roots of sample weights
X_op = _X_CenterStackOp(X, X_mean, sqrt_sw)
AXy = A.dot(X_op.T.dot(y))
y_hat = X_op.dot(AXy)
hat_diag = self._sparse_multidot_diag(X, A, X_mean, sqrt_sw)
# return (1 - hat_diag), (y - y_hat)
if len(y.shape) != 1:
# handle case where y is 2-d
hat_diag = hat_diag[:, np.newaxis]
return (1 - hat_diag) / alpha, (y - y_hat) / alpha
|
Compute dual coefficients and diagonal of G^-1.
Used when we have a decomposition of X^T.X
(n_samples > n_features and X is sparse),
and we are fitting an intercept.
|
_solve_eigen_covariance_intercept
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/_ridge.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/_ridge.py
|
BSD-3-Clause
|
def _solve_eigen_covariance(self, alpha, y, sqrt_sw, X_mean, eigvals, V, X):
"""Compute dual coefficients and diagonal of G^-1.
Used when we have a decomposition of X^T.X
(n_samples > n_features and X is sparse).
"""
if self.fit_intercept:
return self._solve_eigen_covariance_intercept(
alpha, y, sqrt_sw, X_mean, eigvals, V, X
)
return self._solve_eigen_covariance_no_intercept(
alpha, y, sqrt_sw, X_mean, eigvals, V, X
)
|
Compute dual coefficients and diagonal of G^-1.
Used when we have a decomposition of X^T.X
(n_samples > n_features and X is sparse).
|
_solve_eigen_covariance
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/_ridge.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/_ridge.py
|
BSD-3-Clause
|
def _solve_svd_design_matrix(self, alpha, y, sqrt_sw, X_mean, singvals_sq, U, UT_y):
"""Compute dual coefficients and diagonal of G^-1.
Used when we have an SVD decomposition of X
(n_samples > n_features and X is dense).
"""
w = ((singvals_sq + alpha) ** -1) - (alpha**-1)
if self.fit_intercept:
# detect intercept column
normalized_sw = sqrt_sw / np.linalg.norm(sqrt_sw)
intercept_dim = _find_smallest_angle(normalized_sw, U)
# cancel the regularization for the intercept
w[intercept_dim] = -(alpha**-1)
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha**-1) * y
G_inverse_diag = self._decomp_diag(w, U) + (alpha**-1)
if len(y.shape) != 1:
# handle case where y is 2-d
G_inverse_diag = G_inverse_diag[:, np.newaxis]
return G_inverse_diag, c
|
Compute dual coefficients and diagonal of G^-1.
Used when we have an SVD decomposition of X
(n_samples > n_features and X is dense).
|
_solve_svd_design_matrix
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/_ridge.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/_ridge.py
|
BSD-3-Clause
|
def fit(self, X, y, sample_weight=None, score_params=None):
"""Fit Ridge regression model with gcv.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
Training data. Will be cast to float64 if necessary.
y : ndarray of shape (n_samples,) or (n_samples, n_targets)
Target values. Will be cast to float64 if necessary.
sample_weight : float or ndarray of shape (n_samples,), default=None
Individual weights for each sample. If given a float, every sample
will have the same weight. Note that the scale of `sample_weight`
has an impact on the loss; i.e. multiplying all weights by `k`
is equivalent to setting `alpha / k`.
score_params : dict, default=None
Parameters to be passed to the underlying scorer.
.. versionadded:: 1.5
See :ref:`Metadata Routing User Guide <metadata_routing>` for
more details.
Returns
-------
self : object
"""
X, y = validate_data(
self,
X,
y,
accept_sparse=["csr", "csc", "coo"],
dtype=[np.float64],
multi_output=True,
y_numeric=True,
)
# alpha_per_target cannot be used in classifier mode. All subclasses
# of _RidgeGCV that are classifiers keep alpha_per_target at its
# default value: False, so the condition below should never happen.
assert not (self.is_clf and self.alpha_per_target)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
self.alphas = np.asarray(self.alphas)
unscaled_y = y
X, y, X_offset, y_offset, X_scale = _preprocess_data(
X,
y,
fit_intercept=self.fit_intercept,
copy=self.copy_X,
sample_weight=sample_weight,
)
gcv_mode = _check_gcv_mode(X, self.gcv_mode)
if gcv_mode == "eigen":
decompose = self._eigen_decompose_gram
solve = self._solve_eigen_gram
elif gcv_mode == "svd":
if sparse.issparse(X):
decompose = self._eigen_decompose_covariance
solve = self._solve_eigen_covariance
else:
decompose = self._svd_decompose_design_matrix
solve = self._solve_svd_design_matrix
n_samples = X.shape[0]
if sample_weight is not None:
X, y, sqrt_sw = _rescale_data(X, y, sample_weight)
else:
sqrt_sw = np.ones(n_samples, dtype=X.dtype)
X_mean, *decomposition = decompose(X, y, sqrt_sw)
n_y = 1 if len(y.shape) == 1 else y.shape[1]
n_alphas = 1 if np.ndim(self.alphas) == 0 else len(self.alphas)
if self.store_cv_results:
self.cv_results_ = np.empty((n_samples * n_y, n_alphas), dtype=X.dtype)
best_coef, best_score, best_alpha = None, None, None
for i, alpha in enumerate(np.atleast_1d(self.alphas)):
G_inverse_diag, c = solve(float(alpha), y, sqrt_sw, X_mean, *decomposition)
if self.scoring is None:
squared_errors = (c / G_inverse_diag) ** 2
alpha_score = self._score_without_scorer(squared_errors=squared_errors)
if self.store_cv_results:
self.cv_results_[:, i] = squared_errors.ravel()
else:
predictions = y - (c / G_inverse_diag)
# Rescale predictions back to original scale
if sample_weight is not None: # avoid the unnecessary division by ones
if predictions.ndim > 1:
predictions /= sqrt_sw[:, None]
else:
predictions /= sqrt_sw
predictions += y_offset
if self.store_cv_results:
self.cv_results_[:, i] = predictions.ravel()
score_params = score_params or {}
alpha_score = self._score(
predictions=predictions,
y=unscaled_y,
n_y=n_y,
scorer=self.scoring,
score_params=score_params,
)
# Keep track of the best model
if best_score is None:
# initialize
if self.alpha_per_target and n_y > 1:
best_coef = c
best_score = np.atleast_1d(alpha_score)
best_alpha = np.full(n_y, alpha)
else:
best_coef = c
best_score = alpha_score
best_alpha = alpha
else:
# update
if self.alpha_per_target and n_y > 1:
to_update = alpha_score > best_score
best_coef[:, to_update] = c[:, to_update]
best_score[to_update] = alpha_score[to_update]
best_alpha[to_update] = alpha
elif alpha_score > best_score:
best_coef, best_score, best_alpha = c, alpha_score, alpha
self.alpha_ = best_alpha
self.best_score_ = best_score
self.dual_coef_ = best_coef
self.coef_ = safe_sparse_dot(self.dual_coef_.T, X)
if y.ndim == 1 or y.shape[1] == 1:
self.coef_ = self.coef_.ravel()
if sparse.issparse(X):
X_offset = X_mean * X_scale
else:
X_offset += X_mean * X_scale
self._set_intercept(X_offset, y_offset, X_scale)
if self.store_cv_results:
if len(y.shape) == 1:
cv_results_shape = n_samples, n_alphas
else:
cv_results_shape = n_samples, n_y, n_alphas
self.cv_results_ = self.cv_results_.reshape(cv_results_shape)
return self
|
Fit Ridge regression model with gcv.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
Training data. Will be cast to float64 if necessary.
y : ndarray of shape (n_samples,) or (n_samples, n_targets)
Target values. Will be cast to float64 if necessary.
sample_weight : float or ndarray of shape (n_samples,), default=None
Individual weights for each sample. If given a float, every sample
will have the same weight. Note that the scale of `sample_weight`
has an impact on the loss; i.e. multiplying all weights by `k`
is equivalent to setting `alpha / k`.
score_params : dict, default=None
Parameters to be passed to the underlying scorer.
.. versionadded:: 1.5
See :ref:`Metadata Routing User Guide <metadata_routing>` for
more details.
Returns
-------
self : object
|
fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/_ridge.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/_ridge.py
|
BSD-3-Clause
|
def _score_without_scorer(self, squared_errors):
"""Performs scoring using squared errors when the scorer is None."""
if self.alpha_per_target:
_score = -squared_errors.mean(axis=0)
else:
_score = -squared_errors.mean()
return _score
|
Performs scoring using squared errors when the scorer is None.
|
_score_without_scorer
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/_ridge.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/_ridge.py
|
BSD-3-Clause
|
def _score(self, *, predictions, y, n_y, scorer, score_params):
"""Performs scoring with the specified scorer using the
predictions and the true y values.
"""
if self.is_clf:
identity_estimator = _IdentityClassifier(classes=np.arange(n_y))
_score = scorer(
identity_estimator,
predictions,
y.argmax(axis=1),
**score_params,
)
else:
identity_estimator = _IdentityRegressor()
if self.alpha_per_target:
_score = np.array(
[
scorer(
identity_estimator,
predictions[:, j],
y[:, j],
**score_params,
)
for j in range(n_y)
]
)
else:
_score = scorer(identity_estimator, predictions, y, **score_params)
return _score
|
Performs scoring with the specified scorer using the
predictions and the true y values.
|
_score
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/_ridge.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/_ridge.py
|
BSD-3-Clause
|
def fit(self, X, y, sample_weight=None, **params):
"""Fit Ridge regression model with cv.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Training data. If using GCV, will be cast to float64
if necessary.
y : ndarray of shape (n_samples,) or (n_samples, n_targets)
Target values. Will be cast to X's dtype if necessary.
sample_weight : float or ndarray of shape (n_samples,), default=None
Individual weights for each sample. If given a float, every sample
will have the same weight.
**params : dict, default=None
Extra parameters for the underlying scorer.
.. versionadded:: 1.5
Only available if `enable_metadata_routing=True`,
which can be set by using
``sklearn.set_config(enable_metadata_routing=True)``.
See :ref:`Metadata Routing User Guide <metadata_routing>` for
more details.
Returns
-------
self : object
Fitted estimator.
Notes
-----
When sample_weight is provided, the selected hyperparameter may depend
on whether we use leave-one-out cross-validation (cv=None)
or another form of cross-validation, because only leave-one-out
cross-validation takes the sample weights into account when computing
the validation score.
"""
_raise_for_params(params, self, "fit")
cv = self.cv
scorer = self._get_scorer()
# `_RidgeGCV` does not work for alpha = 0
if cv is None:
check_scalar_alpha = partial(
check_scalar,
target_type=numbers.Real,
min_val=0.0,
include_boundaries="neither",
)
else:
check_scalar_alpha = partial(
check_scalar,
target_type=numbers.Real,
min_val=0.0,
include_boundaries="left",
)
if isinstance(self.alphas, (np.ndarray, list, tuple)):
n_alphas = 1 if np.ndim(self.alphas) == 0 else len(self.alphas)
if n_alphas != 1:
for index, alpha in enumerate(self.alphas):
alpha = check_scalar_alpha(alpha, f"alphas[{index}]")
else:
self.alphas[0] = check_scalar_alpha(self.alphas[0], "alphas")
alphas = np.asarray(self.alphas)
if sample_weight is not None:
params["sample_weight"] = sample_weight
if cv is None:
if _routing_enabled():
routed_params = process_routing(
self,
"fit",
**params,
)
else:
routed_params = Bunch(scorer=Bunch(score={}))
if sample_weight is not None:
routed_params.scorer.score["sample_weight"] = sample_weight
# reset `scorer` variable to original user-intend if no scoring is passed
if self.scoring is None:
scorer = None
estimator = _RidgeGCV(
alphas,
fit_intercept=self.fit_intercept,
scoring=scorer,
gcv_mode=self.gcv_mode,
store_cv_results=self.store_cv_results,
is_clf=is_classifier(self),
alpha_per_target=self.alpha_per_target,
)
estimator.fit(
X,
y,
sample_weight=sample_weight,
score_params=routed_params.scorer.score,
)
self.alpha_ = estimator.alpha_
self.best_score_ = estimator.best_score_
if self.store_cv_results:
self.cv_results_ = estimator.cv_results_
else:
if self.store_cv_results:
raise ValueError("cv!=None and store_cv_results=True are incompatible")
if self.alpha_per_target:
raise ValueError("cv!=None and alpha_per_target=True are incompatible")
parameters = {"alpha": alphas}
solver = "sparse_cg" if sparse.issparse(X) else "auto"
model = RidgeClassifier if is_classifier(self) else Ridge
estimator = model(
fit_intercept=self.fit_intercept,
solver=solver,
)
if _routing_enabled():
estimator.set_fit_request(sample_weight=True)
grid_search = GridSearchCV(
estimator,
parameters,
cv=cv,
scoring=scorer,
)
grid_search.fit(X, y, **params)
estimator = grid_search.best_estimator_
self.alpha_ = grid_search.best_estimator_.alpha
self.best_score_ = grid_search.best_score_
self.coef_ = estimator.coef_
self.intercept_ = estimator.intercept_
self.n_features_in_ = estimator.n_features_in_
if hasattr(estimator, "feature_names_in_"):
self.feature_names_in_ = estimator.feature_names_in_
return self
|
Fit Ridge regression model with cv.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Training data. If using GCV, will be cast to float64
if necessary.
y : ndarray of shape (n_samples,) or (n_samples, n_targets)
Target values. Will be cast to X's dtype if necessary.
sample_weight : float or ndarray of shape (n_samples,), default=None
Individual weights for each sample. If given a float, every sample
will have the same weight.
**params : dict, default=None
Extra parameters for the underlying scorer.
.. versionadded:: 1.5
Only available if `enable_metadata_routing=True`,
which can be set by using
``sklearn.set_config(enable_metadata_routing=True)``.
See :ref:`Metadata Routing User Guide <metadata_routing>` for
more details.
Returns
-------
self : object
Fitted estimator.
Notes
-----
When sample_weight is provided, the selected hyperparameter may depend
on whether we use leave-one-out cross-validation (cv=None)
or another form of cross-validation, because only leave-one-out
cross-validation takes the sample weights into account when computing
the validation score.
|
fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/_ridge.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/_ridge.py
|
BSD-3-Clause
|
def get_metadata_routing(self):
"""Get metadata routing of this object.
Please check :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
.. versionadded:: 1.5
Returns
-------
routing : MetadataRouter
A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating
routing information.
"""
router = (
MetadataRouter(owner=self.__class__.__name__)
.add_self_request(self)
.add(
scorer=self.scoring,
method_mapping=MethodMapping().add(caller="fit", callee="score"),
)
.add(
splitter=self.cv,
method_mapping=MethodMapping().add(caller="fit", callee="split"),
)
)
return router
|
Get metadata routing of this object.
Please check :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
.. versionadded:: 1.5
Returns
-------
routing : MetadataRouter
A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating
routing information.
|
get_metadata_routing
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/_ridge.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/_ridge.py
|
BSD-3-Clause
|
def fit(self, X, y, sample_weight=None, **params):
"""Fit Ridge classifier with cv.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples
and `n_features` is the number of features. When using GCV,
will be cast to float64 if necessary.
y : ndarray of shape (n_samples,)
Target values. Will be cast to X's dtype if necessary.
sample_weight : float or ndarray of shape (n_samples,), default=None
Individual weights for each sample. If given a float, every sample
will have the same weight.
**params : dict, default=None
Parameters to be passed to the underlying scorer.
.. versionadded:: 1.5
Only available if `enable_metadata_routing=True`,
which can be set by using
``sklearn.set_config(enable_metadata_routing=True)``.
See :ref:`Metadata Routing User Guide <metadata_routing>` for
more details.
Returns
-------
self : object
Fitted estimator.
"""
# `RidgeClassifier` does not accept "sag" or "saga" solver and thus support
# csr, csc, and coo sparse matrices. By using solver="eigen" we force to accept
# all sparse format.
X, y, sample_weight, Y = self._prepare_data(X, y, sample_weight, solver="eigen")
# If cv is None, gcv mode will be used and we used the binarized Y
# since y will not be binarized in _RidgeGCV estimator.
# If cv is not None, a GridSearchCV with some RidgeClassifier
# estimators are used where y will be binarized. Thus, we pass y
# instead of the binarized Y.
target = Y if self.cv is None else y
super().fit(X, target, sample_weight=sample_weight, **params)
return self
|
Fit Ridge classifier with cv.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples
and `n_features` is the number of features. When using GCV,
will be cast to float64 if necessary.
y : ndarray of shape (n_samples,)
Target values. Will be cast to X's dtype if necessary.
sample_weight : float or ndarray of shape (n_samples,), default=None
Individual weights for each sample. If given a float, every sample
will have the same weight.
**params : dict, default=None
Parameters to be passed to the underlying scorer.
.. versionadded:: 1.5
Only available if `enable_metadata_routing=True`,
which can be set by using
``sklearn.set_config(enable_metadata_routing=True)``.
See :ref:`Metadata Routing User Guide <metadata_routing>` for
more details.
Returns
-------
self : object
Fitted estimator.
|
fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/_ridge.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/_ridge.py
|
BSD-3-Clause
|
def get_auto_step_size(
max_squared_sum, alpha_scaled, loss, fit_intercept, n_samples=None, is_saga=False
):
"""Compute automatic step size for SAG solver.
The step size is set to 1 / (alpha_scaled + L + fit_intercept) where L is
the max sum of squares for over all samples.
Parameters
----------
max_squared_sum : float
Maximum squared sum of X over samples.
alpha_scaled : float
Constant that multiplies the regularization term, scaled by
1. / n_samples, the number of samples.
loss : {'log', 'squared', 'multinomial'}
The loss function used in SAG solver.
fit_intercept : bool
Specifies if a constant (a.k.a. bias or intercept) will be
added to the decision function.
n_samples : int, default=None
Number of rows in X. Useful if is_saga=True.
is_saga : bool, default=False
Whether to return step size for the SAGA algorithm or the SAG
algorithm.
Returns
-------
step_size : float
Step size used in SAG solver.
References
----------
Schmidt, M., Roux, N. L., & Bach, F. (2013).
Minimizing finite sums with the stochastic average gradient
https://hal.inria.fr/hal-00860051/document
:arxiv:`Defazio, A., Bach F. & Lacoste-Julien S. (2014).
"SAGA: A Fast Incremental Gradient Method With Support
for Non-Strongly Convex Composite Objectives" <1407.0202>`
"""
if loss in ("log", "multinomial"):
L = 0.25 * (max_squared_sum + int(fit_intercept)) + alpha_scaled
elif loss == "squared":
# inverse Lipschitz constant for squared loss
L = max_squared_sum + int(fit_intercept) + alpha_scaled
else:
raise ValueError(
"Unknown loss function for SAG solver, got %s instead of 'log' or 'squared'"
% loss
)
if is_saga:
# SAGA theoretical step size is 1/3L or 1 / (2 * (L + mu n))
# See Defazio et al. 2014
mun = min(2 * n_samples * alpha_scaled, L)
step = 1.0 / (2 * L + mun)
else:
# SAG theoretical step size is 1/16L but it is recommended to use 1 / L
# see http://www.birs.ca//workshops//2014/14w5003/files/schmidt.pdf,
# slide 65
step = 1.0 / L
return step
|
Compute automatic step size for SAG solver.
The step size is set to 1 / (alpha_scaled + L + fit_intercept) where L is
the max sum of squares for over all samples.
Parameters
----------
max_squared_sum : float
Maximum squared sum of X over samples.
alpha_scaled : float
Constant that multiplies the regularization term, scaled by
1. / n_samples, the number of samples.
loss : {'log', 'squared', 'multinomial'}
The loss function used in SAG solver.
fit_intercept : bool
Specifies if a constant (a.k.a. bias or intercept) will be
added to the decision function.
n_samples : int, default=None
Number of rows in X. Useful if is_saga=True.
is_saga : bool, default=False
Whether to return step size for the SAGA algorithm or the SAG
algorithm.
Returns
-------
step_size : float
Step size used in SAG solver.
References
----------
Schmidt, M., Roux, N. L., & Bach, F. (2013).
Minimizing finite sums with the stochastic average gradient
https://hal.inria.fr/hal-00860051/document
:arxiv:`Defazio, A., Bach F. & Lacoste-Julien S. (2014).
"SAGA: A Fast Incremental Gradient Method With Support
for Non-Strongly Convex Composite Objectives" <1407.0202>`
|
get_auto_step_size
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/_sag.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/_sag.py
|
BSD-3-Clause
|
def sag_solver(
X,
y,
sample_weight=None,
loss="log",
alpha=1.0,
beta=0.0,
max_iter=1000,
tol=0.001,
verbose=0,
random_state=None,
check_input=True,
max_squared_sum=None,
warm_start_mem=None,
is_saga=False,
):
"""SAG solver for Ridge and LogisticRegression.
SAG stands for Stochastic Average Gradient: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a constant learning rate.
IMPORTANT NOTE: 'sag' solver converges faster on columns that are on the
same scale. You can normalize the data by using
sklearn.preprocessing.StandardScaler on your data before passing it to the
fit method.
This implementation works with data represented as dense numpy arrays or
sparse scipy arrays of floating point values for the features. It will
fit the data according to squared loss or log loss.
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using the squared euclidean norm L2.
.. versionadded:: 0.17
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : ndarray of shape (n_samples,)
Target values. With loss='multinomial', y must be label encoded
(see preprocessing.LabelEncoder). For loss='log' it must be in [0, 1].
sample_weight : array-like of shape (n_samples,), default=None
Weights applied to individual samples (1. for unweighted).
loss : {'log', 'squared', 'multinomial'}, default='log'
Loss function that will be optimized:
-'log' is the binary logistic loss, as used in LogisticRegression.
-'squared' is the squared loss, as used in Ridge.
-'multinomial' is the multinomial logistic loss, as used in
LogisticRegression.
.. versionadded:: 0.18
*loss='multinomial'*
alpha : float, default=1.
L2 regularization term in the objective function
``(0.5 * alpha * || W ||_F^2)``.
beta : float, default=0.
L1 regularization term in the objective function
``(beta * || W ||_1)``. Only applied if ``is_saga`` is set to True.
max_iter : int, default=1000
The max number of passes over the training data if the stopping
criteria is not reached.
tol : float, default=0.001
The stopping criteria for the weights. The iterations will stop when
max(change in weights) / max(weights) < tol.
verbose : int, default=0
The verbosity level.
random_state : int, RandomState instance or None, default=None
Used when shuffling the data. Pass an int for reproducible output
across multiple function calls.
See :term:`Glossary <random_state>`.
check_input : bool, default=True
If False, the input arrays X and y will not be checked.
max_squared_sum : float, default=None
Maximum squared sum of X over samples. If None, it will be computed,
going through all the samples. The value should be precomputed
to speed up cross validation.
warm_start_mem : dict, default=None
The initialization parameters used for warm starting. Warm starting is
currently used in LogisticRegression but not in Ridge.
It contains:
- 'coef': the weight vector, with the intercept in last line
if the intercept is fitted.
- 'gradient_memory': the scalar gradient for all seen samples.
- 'sum_gradient': the sum of gradient over all seen samples,
for each feature.
- 'intercept_sum_gradient': the sum of gradient over all seen
samples, for the intercept.
- 'seen': array of boolean describing the seen samples.
- 'num_seen': the number of seen samples.
is_saga : bool, default=False
Whether to use the SAGA algorithm or the SAG algorithm. SAGA behaves
better in the first epochs, and allow for l1 regularisation.
Returns
-------
coef_ : ndarray of shape (n_features,)
Weight vector.
n_iter_ : int
The number of full pass on all samples.
warm_start_mem : dict
Contains a 'coef' key with the fitted result, and possibly the
fitted intercept at the end of the array. Contains also other keys
used for warm starting.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> rng = np.random.RandomState(0)
>>> X = rng.randn(n_samples, n_features)
>>> y = rng.randn(n_samples)
>>> clf = linear_model.Ridge(solver='sag')
>>> clf.fit(X, y)
Ridge(solver='sag')
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> clf = linear_model.LogisticRegression(solver='sag')
>>> clf.fit(X, y)
LogisticRegression(solver='sag')
References
----------
Schmidt, M., Roux, N. L., & Bach, F. (2013).
Minimizing finite sums with the stochastic average gradient
https://hal.inria.fr/hal-00860051/document
:arxiv:`Defazio, A., Bach F. & Lacoste-Julien S. (2014).
"SAGA: A Fast Incremental Gradient Method With Support
for Non-Strongly Convex Composite Objectives" <1407.0202>`
See Also
--------
Ridge, SGDRegressor, ElasticNet, Lasso, SVR,
LogisticRegression, SGDClassifier, LinearSVC, Perceptron
"""
if warm_start_mem is None:
warm_start_mem = {}
# Ridge default max_iter is None
if max_iter is None:
max_iter = 1000
if check_input:
_dtype = [np.float64, np.float32]
X = check_array(X, dtype=_dtype, accept_sparse="csr", order="C")
y = check_array(y, dtype=_dtype, ensure_2d=False, order="C")
n_samples, n_features = X.shape[0], X.shape[1]
# As in SGD, the alpha is scaled by n_samples.
alpha_scaled = float(alpha) / n_samples
beta_scaled = float(beta) / n_samples
# if loss == 'multinomial', y should be label encoded.
n_classes = int(y.max()) + 1 if loss == "multinomial" else 1
# initialization
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
if "coef" in warm_start_mem.keys():
coef_init = warm_start_mem["coef"]
else:
# assume fit_intercept is False
coef_init = np.zeros((n_features, n_classes), dtype=X.dtype, order="C")
# coef_init contains possibly the intercept_init at the end.
# Note that Ridge centers the data before fitting, so fit_intercept=False.
fit_intercept = coef_init.shape[0] == (n_features + 1)
if fit_intercept:
intercept_init = coef_init[-1, :]
coef_init = coef_init[:-1, :]
else:
intercept_init = np.zeros(n_classes, dtype=X.dtype)
if "intercept_sum_gradient" in warm_start_mem.keys():
intercept_sum_gradient = warm_start_mem["intercept_sum_gradient"]
else:
intercept_sum_gradient = np.zeros(n_classes, dtype=X.dtype)
if "gradient_memory" in warm_start_mem.keys():
gradient_memory_init = warm_start_mem["gradient_memory"]
else:
gradient_memory_init = np.zeros(
(n_samples, n_classes), dtype=X.dtype, order="C"
)
if "sum_gradient" in warm_start_mem.keys():
sum_gradient_init = warm_start_mem["sum_gradient"]
else:
sum_gradient_init = np.zeros((n_features, n_classes), dtype=X.dtype, order="C")
if "seen" in warm_start_mem.keys():
seen_init = warm_start_mem["seen"]
else:
seen_init = np.zeros(n_samples, dtype=np.int32, order="C")
if "num_seen" in warm_start_mem.keys():
num_seen_init = warm_start_mem["num_seen"]
else:
num_seen_init = 0
dataset, intercept_decay = make_dataset(X, y, sample_weight, random_state)
if max_squared_sum is None:
max_squared_sum = row_norms(X, squared=True).max()
step_size = get_auto_step_size(
max_squared_sum,
alpha_scaled,
loss,
fit_intercept,
n_samples=n_samples,
is_saga=is_saga,
)
if step_size * alpha_scaled == 1:
raise ZeroDivisionError(
"Current sag implementation does not handle "
"the case step_size * alpha_scaled == 1"
)
sag = sag64 if X.dtype == np.float64 else sag32
num_seen, n_iter_ = sag(
dataset,
coef_init,
intercept_init,
n_samples,
n_features,
n_classes,
tol,
max_iter,
loss,
step_size,
alpha_scaled,
beta_scaled,
sum_gradient_init,
gradient_memory_init,
seen_init,
num_seen_init,
fit_intercept,
intercept_sum_gradient,
intercept_decay,
is_saga,
verbose,
)
if n_iter_ == max_iter:
warnings.warn(
"The max_iter was reached which means the coef_ did not converge",
ConvergenceWarning,
)
if fit_intercept:
coef_init = np.vstack((coef_init, intercept_init))
warm_start_mem = {
"coef": coef_init,
"sum_gradient": sum_gradient_init,
"intercept_sum_gradient": intercept_sum_gradient,
"gradient_memory": gradient_memory_init,
"seen": seen_init,
"num_seen": num_seen,
}
if loss == "multinomial":
coef_ = coef_init.T
else:
coef_ = coef_init[:, 0]
return coef_, n_iter_, warm_start_mem
|
SAG solver for Ridge and LogisticRegression.
SAG stands for Stochastic Average Gradient: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a constant learning rate.
IMPORTANT NOTE: 'sag' solver converges faster on columns that are on the
same scale. You can normalize the data by using
sklearn.preprocessing.StandardScaler on your data before passing it to the
fit method.
This implementation works with data represented as dense numpy arrays or
sparse scipy arrays of floating point values for the features. It will
fit the data according to squared loss or log loss.
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using the squared euclidean norm L2.
.. versionadded:: 0.17
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : ndarray of shape (n_samples,)
Target values. With loss='multinomial', y must be label encoded
(see preprocessing.LabelEncoder). For loss='log' it must be in [0, 1].
sample_weight : array-like of shape (n_samples,), default=None
Weights applied to individual samples (1. for unweighted).
loss : {'log', 'squared', 'multinomial'}, default='log'
Loss function that will be optimized:
-'log' is the binary logistic loss, as used in LogisticRegression.
-'squared' is the squared loss, as used in Ridge.
-'multinomial' is the multinomial logistic loss, as used in
LogisticRegression.
.. versionadded:: 0.18
*loss='multinomial'*
alpha : float, default=1.
L2 regularization term in the objective function
``(0.5 * alpha * || W ||_F^2)``.
beta : float, default=0.
L1 regularization term in the objective function
``(beta * || W ||_1)``. Only applied if ``is_saga`` is set to True.
max_iter : int, default=1000
The max number of passes over the training data if the stopping
criteria is not reached.
tol : float, default=0.001
The stopping criteria for the weights. The iterations will stop when
max(change in weights) / max(weights) < tol.
verbose : int, default=0
The verbosity level.
random_state : int, RandomState instance or None, default=None
Used when shuffling the data. Pass an int for reproducible output
across multiple function calls.
See :term:`Glossary <random_state>`.
check_input : bool, default=True
If False, the input arrays X and y will not be checked.
max_squared_sum : float, default=None
Maximum squared sum of X over samples. If None, it will be computed,
going through all the samples. The value should be precomputed
to speed up cross validation.
warm_start_mem : dict, default=None
The initialization parameters used for warm starting. Warm starting is
currently used in LogisticRegression but not in Ridge.
It contains:
- 'coef': the weight vector, with the intercept in last line
if the intercept is fitted.
- 'gradient_memory': the scalar gradient for all seen samples.
- 'sum_gradient': the sum of gradient over all seen samples,
for each feature.
- 'intercept_sum_gradient': the sum of gradient over all seen
samples, for the intercept.
- 'seen': array of boolean describing the seen samples.
- 'num_seen': the number of seen samples.
is_saga : bool, default=False
Whether to use the SAGA algorithm or the SAG algorithm. SAGA behaves
better in the first epochs, and allow for l1 regularisation.
Returns
-------
coef_ : ndarray of shape (n_features,)
Weight vector.
n_iter_ : int
The number of full pass on all samples.
warm_start_mem : dict
Contains a 'coef' key with the fitted result, and possibly the
fitted intercept at the end of the array. Contains also other keys
used for warm starting.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> rng = np.random.RandomState(0)
>>> X = rng.randn(n_samples, n_features)
>>> y = rng.randn(n_samples)
>>> clf = linear_model.Ridge(solver='sag')
>>> clf.fit(X, y)
Ridge(solver='sag')
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> clf = linear_model.LogisticRegression(solver='sag')
>>> clf.fit(X, y)
LogisticRegression(solver='sag')
References
----------
Schmidt, M., Roux, N. L., & Bach, F. (2013).
Minimizing finite sums with the stochastic average gradient
https://hal.inria.fr/hal-00860051/document
:arxiv:`Defazio, A., Bach F. & Lacoste-Julien S. (2014).
"SAGA: A Fast Incremental Gradient Method With Support
for Non-Strongly Convex Composite Objectives" <1407.0202>`
See Also
--------
Ridge, SGDRegressor, ElasticNet, Lasso, SVR,
LogisticRegression, SGDClassifier, LinearSVC, Perceptron
|
sag_solver
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/_sag.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/_sag.py
|
BSD-3-Clause
|
def _get_loss_function(self, loss):
"""Get concrete ``LossFunction`` object for str ``loss``."""
loss_ = self.loss_functions[loss]
loss_class, args = loss_[0], loss_[1:]
if loss in ("huber", "epsilon_insensitive", "squared_epsilon_insensitive"):
args = (self.epsilon,)
return loss_class(*args)
|
Get concrete ``LossFunction`` object for str ``loss``.
|
_get_loss_function
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/_stochastic_gradient.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/_stochastic_gradient.py
|
BSD-3-Clause
|
def _allocate_parameter_mem(
self,
n_classes,
n_features,
input_dtype,
coef_init=None,
intercept_init=None,
one_class=0,
):
"""Allocate mem for parameters; initialize if provided."""
if n_classes > 2:
# allocate coef_ for multi-class
if coef_init is not None:
coef_init = np.asarray(coef_init, dtype=input_dtype, order="C")
if coef_init.shape != (n_classes, n_features):
raise ValueError("Provided ``coef_`` does not match dataset. ")
self.coef_ = coef_init
else:
self.coef_ = np.zeros(
(n_classes, n_features), dtype=input_dtype, order="C"
)
# allocate intercept_ for multi-class
if intercept_init is not None:
intercept_init = np.asarray(
intercept_init, order="C", dtype=input_dtype
)
if intercept_init.shape != (n_classes,):
raise ValueError("Provided intercept_init does not match dataset.")
self.intercept_ = intercept_init
else:
self.intercept_ = np.zeros(n_classes, dtype=input_dtype, order="C")
else:
# allocate coef_
if coef_init is not None:
coef_init = np.asarray(coef_init, dtype=input_dtype, order="C")
coef_init = coef_init.ravel()
if coef_init.shape != (n_features,):
raise ValueError("Provided coef_init does not match dataset.")
self.coef_ = coef_init
else:
self.coef_ = np.zeros(n_features, dtype=input_dtype, order="C")
# allocate intercept_
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, dtype=input_dtype)
if intercept_init.shape != (1,) and intercept_init.shape != ():
raise ValueError("Provided intercept_init does not match dataset.")
if one_class:
self.offset_ = intercept_init.reshape(
1,
)
else:
self.intercept_ = intercept_init.reshape(
1,
)
else:
if one_class:
self.offset_ = np.zeros(1, dtype=input_dtype, order="C")
else:
self.intercept_ = np.zeros(1, dtype=input_dtype, order="C")
# initialize average parameters
if self.average > 0:
self._standard_coef = self.coef_
self._average_coef = np.zeros(
self.coef_.shape, dtype=input_dtype, order="C"
)
if one_class:
self._standard_intercept = 1 - self.offset_
else:
self._standard_intercept = self.intercept_
self._average_intercept = np.zeros(
self._standard_intercept.shape, dtype=input_dtype, order="C"
)
|
Allocate mem for parameters; initialize if provided.
|
_allocate_parameter_mem
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/_stochastic_gradient.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/_stochastic_gradient.py
|
BSD-3-Clause
|
def _make_validation_split(self, y, sample_mask):
"""Split the dataset between training set and validation set.
Parameters
----------
y : ndarray of shape (n_samples, )
Target values.
sample_mask : ndarray of shape (n_samples, )
A boolean array indicating whether each sample should be included
for validation set.
Returns
-------
validation_mask : ndarray of shape (n_samples, )
Equal to True on the validation set, False on the training set.
"""
n_samples = y.shape[0]
validation_mask = np.zeros(n_samples, dtype=np.bool_)
if not self.early_stopping:
# use the full set for training, with an empty validation set
return validation_mask
if is_classifier(self):
splitter_type = StratifiedShuffleSplit
else:
splitter_type = ShuffleSplit
cv = splitter_type(
test_size=self.validation_fraction, random_state=self.random_state
)
idx_train, idx_val = next(cv.split(np.zeros(shape=(y.shape[0], 1)), y))
if not np.any(sample_mask[idx_val]):
raise ValueError(
"The sample weights for validation set are all zero, consider using a"
" different random state."
)
if idx_train.shape[0] == 0 or idx_val.shape[0] == 0:
raise ValueError(
"Splitting %d samples into a train set and a validation set "
"with validation_fraction=%r led to an empty set (%d and %d "
"samples). Please either change validation_fraction, increase "
"number of samples, or disable early_stopping."
% (
n_samples,
self.validation_fraction,
idx_train.shape[0],
idx_val.shape[0],
)
)
validation_mask[idx_val] = True
return validation_mask
|
Split the dataset between training set and validation set.
Parameters
----------
y : ndarray of shape (n_samples, )
Target values.
sample_mask : ndarray of shape (n_samples, )
A boolean array indicating whether each sample should be included
for validation set.
Returns
-------
validation_mask : ndarray of shape (n_samples, )
Equal to True on the validation set, False on the training set.
|
_make_validation_split
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/_stochastic_gradient.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/_stochastic_gradient.py
|
BSD-3-Clause
|
def fit_binary(
est,
i,
X,
y,
alpha,
C,
learning_rate,
max_iter,
pos_weight,
neg_weight,
sample_weight,
validation_mask=None,
random_state=None,
):
"""Fit a single binary classifier.
The i'th class is considered the "positive" class.
Parameters
----------
est : Estimator object
The estimator to fit
i : int
Index of the positive class
X : numpy array or sparse matrix of shape [n_samples,n_features]
Training data
y : numpy array of shape [n_samples, ]
Target values
alpha : float
The regularization parameter
C : float
Maximum step size for passive aggressive
learning_rate : str
The learning rate. Accepted values are 'constant', 'optimal',
'invscaling', 'pa1' and 'pa2'.
max_iter : int
The maximum number of iterations (epochs)
pos_weight : float
The weight of the positive class
neg_weight : float
The weight of the negative class
sample_weight : numpy array of shape [n_samples, ]
The weight of each sample
validation_mask : numpy array of shape [n_samples, ], default=None
Precomputed validation mask in case _fit_binary is called in the
context of a one-vs-rest reduction.
random_state : int, RandomState instance, default=None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
"""
# if average is not true, average_coef, and average_intercept will be
# unused
label_encode = isinstance(est._loss_function_, CyHalfBinomialLoss)
y_i, coef, intercept, average_coef, average_intercept = _prepare_fit_binary(
est, y, i, input_dtype=X.dtype, label_encode=label_encode
)
assert y_i.shape[0] == y.shape[0] == sample_weight.shape[0]
random_state = check_random_state(random_state)
dataset, intercept_decay = make_dataset(
X, y_i, sample_weight, random_state=random_state
)
penalty_type = est._get_penalty_type(est.penalty)
learning_rate_type = est._get_learning_rate_type(learning_rate)
if validation_mask is None:
validation_mask = est._make_validation_split(y_i, sample_mask=sample_weight > 0)
classes = np.array([-1, 1], dtype=y_i.dtype)
validation_score_cb = est._make_validation_score_cb(
validation_mask, X, y_i, sample_weight, classes=classes
)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(MAX_INT)
tol = est.tol if est.tol is not None else -np.inf
_plain_sgd = _get_plain_sgd_function(input_dtype=coef.dtype)
coef, intercept, average_coef, average_intercept, n_iter_ = _plain_sgd(
coef,
intercept,
average_coef,
average_intercept,
est._loss_function_,
penalty_type,
alpha,
C,
est._get_l1_ratio(),
dataset,
validation_mask,
est.early_stopping,
validation_score_cb,
int(est.n_iter_no_change),
max_iter,
tol,
int(est.fit_intercept),
int(est.verbose),
int(est.shuffle),
seed,
pos_weight,
neg_weight,
learning_rate_type,
est.eta0,
est.power_t,
0,
est.t_,
intercept_decay,
est.average,
)
if est.average:
if len(est.classes_) == 2:
est._average_intercept[0] = average_intercept
else:
est._average_intercept[i] = average_intercept
return coef, intercept, n_iter_
|
Fit a single binary classifier.
The i'th class is considered the "positive" class.
Parameters
----------
est : Estimator object
The estimator to fit
i : int
Index of the positive class
X : numpy array or sparse matrix of shape [n_samples,n_features]
Training data
y : numpy array of shape [n_samples, ]
Target values
alpha : float
The regularization parameter
C : float
Maximum step size for passive aggressive
learning_rate : str
The learning rate. Accepted values are 'constant', 'optimal',
'invscaling', 'pa1' and 'pa2'.
max_iter : int
The maximum number of iterations (epochs)
pos_weight : float
The weight of the positive class
neg_weight : float
The weight of the negative class
sample_weight : numpy array of shape [n_samples, ]
The weight of each sample
validation_mask : numpy array of shape [n_samples, ], default=None
Precomputed validation mask in case _fit_binary is called in the
context of a one-vs-rest reduction.
random_state : int, RandomState instance, default=None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
|
fit_binary
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/_stochastic_gradient.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/_stochastic_gradient.py
|
BSD-3-Clause
|
def _fit_multiclass(self, X, y, alpha, C, learning_rate, sample_weight, max_iter):
"""Fit a multi-class classifier by combining binary classifiers
Each binary classifier predicts one class versus all others. This
strategy is called OvA (One versus All) or OvR (One versus Rest).
"""
# Precompute the validation split using the multiclass labels
# to ensure proper balancing of the classes.
validation_mask = self._make_validation_split(y, sample_mask=sample_weight > 0)
# Use joblib to fit OvA in parallel.
# Pick the random seed for each job outside of fit_binary to avoid
# sharing the estimator random state between threads which could lead
# to non-deterministic behavior
random_state = check_random_state(self.random_state)
seeds = random_state.randint(MAX_INT, size=len(self.classes_))
result = Parallel(
n_jobs=self.n_jobs, verbose=self.verbose, require="sharedmem"
)(
delayed(fit_binary)(
self,
i,
X,
y,
alpha,
C,
learning_rate,
max_iter,
self._expanded_class_weight[i],
1.0,
sample_weight,
validation_mask=validation_mask,
random_state=seed,
)
for i, seed in enumerate(seeds)
)
# take the maximum of n_iter_ over every binary fit
n_iter_ = 0.0
for i, (_, intercept, n_iter_i) in enumerate(result):
self.intercept_[i] = intercept
n_iter_ = max(n_iter_, n_iter_i)
self.t_ += n_iter_ * X.shape[0]
self.n_iter_ = n_iter_
if self.average > 0:
if self.average <= self.t_ - 1.0:
self.coef_ = self._average_coef
self.intercept_ = self._average_intercept
else:
self.coef_ = self._standard_coef
self._standard_intercept = np.atleast_1d(self.intercept_)
self.intercept_ = self._standard_intercept
|
Fit a multi-class classifier by combining binary classifiers
Each binary classifier predicts one class versus all others. This
strategy is called OvA (One versus All) or OvR (One versus Rest).
|
_fit_multiclass
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/_stochastic_gradient.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/_stochastic_gradient.py
|
BSD-3-Clause
|
def fit(self, X, y, coef_init=None, intercept_init=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray of shape (n_samples,)
Target values.
coef_init : ndarray of shape (n_classes, n_features), default=None
The initial coefficients to warm-start the optimization.
intercept_init : ndarray of shape (n_classes,), default=None
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), default=None
Weights applied to individual samples.
If not provided, uniform weights are assumed. These weights will
be multiplied with class_weight (passed through the
constructor) if class_weight is specified.
Returns
-------
self : object
Returns an instance of self.
"""
self._more_validate_params()
return self._fit(
X,
y,
alpha=self.alpha,
C=1.0,
loss=self.loss,
learning_rate=self.learning_rate,
coef_init=coef_init,
intercept_init=intercept_init,
sample_weight=sample_weight,
)
|
Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray of shape (n_samples,)
Target values.
coef_init : ndarray of shape (n_classes, n_features), default=None
The initial coefficients to warm-start the optimization.
intercept_init : ndarray of shape (n_classes,), default=None
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), default=None
Weights applied to individual samples.
If not provided, uniform weights are assumed. These weights will
be multiplied with class_weight (passed through the
constructor) if class_weight is specified.
Returns
-------
self : object
Returns an instance of self.
|
fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/_stochastic_gradient.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/_stochastic_gradient.py
|
BSD-3-Clause
|
def partial_fit(self, X, y, sample_weight=None):
"""Perform one epoch of stochastic gradient descent on given samples.
Internally, this method uses ``max_iter = 1``. Therefore, it is not
guaranteed that a minimum of the cost function is reached after calling
it once. Matters such as objective convergence and early stopping
should be handled by the user.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of training data.
y : numpy array of shape (n_samples,)
Subset of target values.
sample_weight : array-like, shape (n_samples,), default=None
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : object
Returns an instance of self.
"""
if not hasattr(self, "coef_"):
self._more_validate_params(for_partial_fit=True)
return self._partial_fit(
X,
y,
self.alpha,
C=1.0,
loss=self.loss,
learning_rate=self.learning_rate,
max_iter=1,
sample_weight=sample_weight,
coef_init=None,
intercept_init=None,
)
|
Perform one epoch of stochastic gradient descent on given samples.
Internally, this method uses ``max_iter = 1``. Therefore, it is not
guaranteed that a minimum of the cost function is reached after calling
it once. Matters such as objective convergence and early stopping
should be handled by the user.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of training data.
y : numpy array of shape (n_samples,)
Subset of target values.
sample_weight : array-like, shape (n_samples,), default=None
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : object
Returns an instance of self.
|
partial_fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/_stochastic_gradient.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/_stochastic_gradient.py
|
BSD-3-Clause
|
def fit(self, X, y, coef_init=None, intercept_init=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray of shape (n_samples,)
Target values.
coef_init : ndarray of shape (n_features,), default=None
The initial coefficients to warm-start the optimization.
intercept_init : ndarray of shape (1,), default=None
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), default=None
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Fitted `SGDRegressor` estimator.
"""
self._more_validate_params()
return self._fit(
X,
y,
alpha=self.alpha,
C=1.0,
loss=self.loss,
learning_rate=self.learning_rate,
coef_init=coef_init,
intercept_init=intercept_init,
sample_weight=sample_weight,
)
|
Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray of shape (n_samples,)
Target values.
coef_init : ndarray of shape (n_features,), default=None
The initial coefficients to warm-start the optimization.
intercept_init : ndarray of shape (1,), default=None
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), default=None
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Fitted `SGDRegressor` estimator.
|
fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/_stochastic_gradient.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/_stochastic_gradient.py
|
BSD-3-Clause
|
def _decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
ndarray of shape (n_samples,)
Predicted target values per element in X.
"""
check_is_fitted(self)
X = validate_data(self, X, accept_sparse="csr", reset=False)
scores = safe_sparse_dot(X, self.coef_.T, dense_output=True) + self.intercept_
return scores.ravel()
|
Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
ndarray of shape (n_samples,)
Predicted target values per element in X.
|
_decision_function
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/_stochastic_gradient.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/_stochastic_gradient.py
|
BSD-3-Clause
|
def _fit_one_class(self, X, alpha, C, sample_weight, learning_rate, max_iter):
"""Uses SGD implementation with X and y=np.ones(n_samples)."""
# The One-Class SVM uses the SGD implementation with
# y=np.ones(n_samples).
n_samples = X.shape[0]
y = np.ones(n_samples, dtype=X.dtype, order="C")
dataset, offset_decay = make_dataset(X, y, sample_weight)
penalty_type = self._get_penalty_type(self.penalty)
learning_rate_type = self._get_learning_rate_type(learning_rate)
# early stopping is set to False for the One-Class SVM. thus
# validation_mask and validation_score_cb will be set to values
# associated to early_stopping=False in _make_validation_split and
# _make_validation_score_cb respectively.
validation_mask = self._make_validation_split(y, sample_mask=sample_weight > 0)
validation_score_cb = self._make_validation_score_cb(
validation_mask, X, y, sample_weight
)
random_state = check_random_state(self.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
tol = self.tol if self.tol is not None else -np.inf
one_class = 1
# There are no class weights for the One-Class SVM and they are
# therefore set to 1.
pos_weight = 1
neg_weight = 1
if self.average:
coef = self._standard_coef
intercept = self._standard_intercept
average_coef = self._average_coef
average_intercept = self._average_intercept
else:
coef = self.coef_
intercept = 1 - self.offset_
average_coef = None # Not used
average_intercept = [0] # Not used
_plain_sgd = _get_plain_sgd_function(input_dtype=coef.dtype)
coef, intercept, average_coef, average_intercept, self.n_iter_ = _plain_sgd(
coef,
intercept[0],
average_coef,
average_intercept[0],
self._loss_function_,
penalty_type,
alpha,
C,
self.l1_ratio,
dataset,
validation_mask,
self.early_stopping,
validation_score_cb,
int(self.n_iter_no_change),
max_iter,
tol,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
neg_weight,
pos_weight,
learning_rate_type,
self.eta0,
self.power_t,
one_class,
self.t_,
offset_decay,
self.average,
)
self.t_ += self.n_iter_ * n_samples
if self.average > 0:
self._average_intercept = np.atleast_1d(average_intercept)
self._standard_intercept = np.atleast_1d(intercept)
if self.average <= self.t_ - 1.0:
# made enough updates for averaging to be taken into account
self.coef_ = average_coef
self.offset_ = 1 - np.atleast_1d(average_intercept)
else:
self.coef_ = coef
self.offset_ = 1 - np.atleast_1d(intercept)
else:
self.offset_ = 1 - np.atleast_1d(intercept)
|
Uses SGD implementation with X and y=np.ones(n_samples).
|
_fit_one_class
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/_stochastic_gradient.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/_stochastic_gradient.py
|
BSD-3-Clause
|
def partial_fit(self, X, y=None, sample_weight=None):
"""Fit linear One-Class SVM with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of the training data.
y : Ignored
Not used, present for API consistency by convention.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : object
Returns a fitted instance of self.
"""
if not hasattr(self, "coef_"):
self._more_validate_params(for_partial_fit=True)
alpha = self.nu / 2
return self._partial_fit(
X,
alpha,
C=1.0,
loss=self.loss,
learning_rate=self.learning_rate,
max_iter=1,
sample_weight=sample_weight,
coef_init=None,
offset_init=None,
)
|
Fit linear One-Class SVM with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of the training data.
y : Ignored
Not used, present for API consistency by convention.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : object
Returns a fitted instance of self.
|
partial_fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/_stochastic_gradient.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/_stochastic_gradient.py
|
BSD-3-Clause
|
def fit(self, X, y=None, coef_init=None, offset_init=None, sample_weight=None):
"""Fit linear One-Class SVM with Stochastic Gradient Descent.
This solves an equivalent optimization problem of the
One-Class SVM primal optimization problem and returns a weight vector
w and an offset rho such that the decision function is given by
<w, x> - rho.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : Ignored
Not used, present for API consistency by convention.
coef_init : array, shape (n_classes, n_features)
The initial coefficients to warm-start the optimization.
offset_init : array, shape (n_classes,)
The initial offset to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed. These weights will
be multiplied with class_weight (passed through the
constructor) if class_weight is specified.
Returns
-------
self : object
Returns a fitted instance of self.
"""
self._more_validate_params()
alpha = self.nu / 2
self._fit(
X,
alpha=alpha,
C=1.0,
loss=self.loss,
learning_rate=self.learning_rate,
coef_init=coef_init,
offset_init=offset_init,
sample_weight=sample_weight,
)
return self
|
Fit linear One-Class SVM with Stochastic Gradient Descent.
This solves an equivalent optimization problem of the
One-Class SVM primal optimization problem and returns a weight vector
w and an offset rho such that the decision function is given by
<w, x> - rho.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : Ignored
Not used, present for API consistency by convention.
coef_init : array, shape (n_classes, n_features)
The initial coefficients to warm-start the optimization.
offset_init : array, shape (n_classes,)
The initial offset to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed. These weights will
be multiplied with class_weight (passed through the
constructor) if class_weight is specified.
Returns
-------
self : object
Returns a fitted instance of self.
|
fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/_stochastic_gradient.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/_stochastic_gradient.py
|
BSD-3-Clause
|
def decision_function(self, X):
"""Signed distance to the separating hyperplane.
Signed distance is positive for an inlier and negative for an
outlier.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Testing data.
Returns
-------
dec : array-like, shape (n_samples,)
Decision function values of the samples.
"""
check_is_fitted(self, "coef_")
X = validate_data(self, X, accept_sparse="csr", reset=False)
decisions = safe_sparse_dot(X, self.coef_.T, dense_output=True) - self.offset_
return decisions.ravel()
|
Signed distance to the separating hyperplane.
Signed distance is positive for an inlier and negative for an
outlier.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Testing data.
Returns
-------
dec : array-like, shape (n_samples,)
Decision function values of the samples.
|
decision_function
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/_stochastic_gradient.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/_stochastic_gradient.py
|
BSD-3-Clause
|
def predict(self, X):
"""Return labels (1 inlier, -1 outlier) of the samples.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Testing data.
Returns
-------
y : array, shape (n_samples,)
Labels of the samples.
"""
y = (self.decision_function(X) >= 0).astype(np.int32)
y[y == 0] = -1 # for consistency with outlier detectors
return y
|
Return labels (1 inlier, -1 outlier) of the samples.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Testing data.
Returns
-------
y : array, shape (n_samples,)
Labels of the samples.
|
predict
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/_stochastic_gradient.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/_stochastic_gradient.py
|
BSD-3-Clause
|
def _breakdown_point(n_samples, n_subsamples):
"""Approximation of the breakdown point.
Parameters
----------
n_samples : int
Number of samples.
n_subsamples : int
Number of subsamples to consider.
Returns
-------
breakdown_point : float
Approximation of breakdown point.
"""
return (
1
- (
0.5 ** (1 / n_subsamples) * (n_samples - n_subsamples + 1)
+ n_subsamples
- 1
)
/ n_samples
)
|
Approximation of the breakdown point.
Parameters
----------
n_samples : int
Number of samples.
n_subsamples : int
Number of subsamples to consider.
Returns
-------
breakdown_point : float
Approximation of breakdown point.
|
_breakdown_point
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/_theil_sen.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/_theil_sen.py
|
BSD-3-Clause
|
def _lstsq(X, y, indices, fit_intercept):
"""Least Squares Estimator for TheilSenRegressor class.
This function calculates the least squares method on a subset of rows of X
and y defined by the indices array. Optionally, an intercept column is
added if intercept is set to true.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Design matrix, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : ndarray of shape (n_samples,)
Target vector, where `n_samples` is the number of samples.
indices : ndarray of shape (n_subpopulation, n_subsamples)
Indices of all subsamples with respect to the chosen subpopulation.
fit_intercept : bool
Fit intercept or not.
Returns
-------
weights : ndarray of shape (n_subpopulation, n_features + intercept)
Solution matrix of n_subpopulation solved least square problems.
"""
fit_intercept = int(fit_intercept)
n_features = X.shape[1] + fit_intercept
n_subsamples = indices.shape[1]
weights = np.empty((indices.shape[0], n_features))
X_subpopulation = np.ones((n_subsamples, n_features))
# gelss need to pad y_subpopulation to be of the max dim of X_subpopulation
y_subpopulation = np.zeros((max(n_subsamples, n_features)))
(lstsq,) = get_lapack_funcs(("gelss",), (X_subpopulation, y_subpopulation))
for index, subset in enumerate(indices):
X_subpopulation[:, fit_intercept:] = X[subset, :]
y_subpopulation[:n_subsamples] = y[subset]
weights[index] = lstsq(X_subpopulation, y_subpopulation)[1][:n_features]
return weights
|
Least Squares Estimator for TheilSenRegressor class.
This function calculates the least squares method on a subset of rows of X
and y defined by the indices array. Optionally, an intercept column is
added if intercept is set to true.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Design matrix, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : ndarray of shape (n_samples,)
Target vector, where `n_samples` is the number of samples.
indices : ndarray of shape (n_subpopulation, n_subsamples)
Indices of all subsamples with respect to the chosen subpopulation.
fit_intercept : bool
Fit intercept or not.
Returns
-------
weights : ndarray of shape (n_subpopulation, n_features + intercept)
Solution matrix of n_subpopulation solved least square problems.
|
_lstsq
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/_theil_sen.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/_theil_sen.py
|
BSD-3-Clause
|
def fit(self, X, y):
"""Fit linear model.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Training data.
y : ndarray of shape (n_samples,)
Target values.
Returns
-------
self : returns an instance of self.
Fitted `TheilSenRegressor` estimator.
"""
if self.copy_X != "deprecated":
warnings.warn(
"`copy_X` was deprecated in 1.6 and will be removed in 1.8 since it "
"has no effect internally. Simply leave this parameter to its default "
"value to avoid this warning.",
FutureWarning,
)
random_state = check_random_state(self.random_state)
X, y = validate_data(self, X, y, y_numeric=True)
n_samples, n_features = X.shape
n_subsamples, self.n_subpopulation_ = self._check_subparams(
n_samples, n_features
)
self.breakdown_ = _breakdown_point(n_samples, n_subsamples)
if self.verbose:
print("Breakdown point: {0}".format(self.breakdown_))
print("Number of samples: {0}".format(n_samples))
tol_outliers = int(self.breakdown_ * n_samples)
print("Tolerable outliers: {0}".format(tol_outliers))
print("Number of subpopulations: {0}".format(self.n_subpopulation_))
# Determine indices of subpopulation
if np.rint(binom(n_samples, n_subsamples)) <= self.max_subpopulation:
indices = list(combinations(range(n_samples), n_subsamples))
else:
indices = [
random_state.choice(n_samples, size=n_subsamples, replace=False)
for _ in range(self.n_subpopulation_)
]
n_jobs = effective_n_jobs(self.n_jobs)
index_list = np.array_split(indices, n_jobs)
weights = Parallel(n_jobs=n_jobs, verbose=self.verbose)(
delayed(_lstsq)(X, y, index_list[job], self.fit_intercept)
for job in range(n_jobs)
)
weights = np.vstack(weights)
self.n_iter_, coefs = _spatial_median(
weights, max_iter=self.max_iter, tol=self.tol
)
if self.fit_intercept:
self.intercept_ = coefs[0]
self.coef_ = coefs[1:]
else:
self.intercept_ = 0.0
self.coef_ = coefs
return self
|
Fit linear model.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Training data.
y : ndarray of shape (n_samples,)
Target values.
Returns
-------
self : returns an instance of self.
Fitted `TheilSenRegressor` estimator.
|
fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/_theil_sen.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/_theil_sen.py
|
BSD-3-Clause
|
def test_linear_regression_sample_weight_consistency(
X_shape, sparse_container, fit_intercept, global_random_seed
):
"""Test that the impact of sample_weight is consistent.
Note that this test is stricter than the common test
check_sample_weight_equivalence alone and also tests sparse X.
It is very similar to test_enet_sample_weight_consistency.
"""
rng = np.random.RandomState(global_random_seed)
n_samples, n_features = X_shape
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
if sparse_container is not None:
X = sparse_container(X)
params = dict(fit_intercept=fit_intercept)
reg = LinearRegression(**params).fit(X, y, sample_weight=None)
coef = reg.coef_.copy()
if fit_intercept:
intercept = reg.intercept_
# 1) sample_weight=np.ones(..) must be equivalent to sample_weight=None,
# a special case of check_sample_weight_equivalence(name, reg), but we also
# test with sparse input.
sample_weight = np.ones_like(y)
reg.fit(X, y, sample_weight=sample_weight)
assert_allclose(reg.coef_, coef, rtol=1e-6)
if fit_intercept:
assert_allclose(reg.intercept_, intercept)
# 2) sample_weight=None should be equivalent to sample_weight = number
sample_weight = 123.0
reg.fit(X, y, sample_weight=sample_weight)
assert_allclose(reg.coef_, coef, rtol=1e-6)
if fit_intercept:
assert_allclose(reg.intercept_, intercept)
# 3) scaling of sample_weight should have no effect, cf. np.average()
sample_weight = rng.uniform(low=0.01, high=2, size=X.shape[0])
reg = reg.fit(X, y, sample_weight=sample_weight)
coef = reg.coef_.copy()
if fit_intercept:
intercept = reg.intercept_
reg.fit(X, y, sample_weight=np.pi * sample_weight)
assert_allclose(reg.coef_, coef, rtol=1e-6 if sparse_container is None else 1e-5)
if fit_intercept:
assert_allclose(reg.intercept_, intercept)
# 4) setting elements of sample_weight to 0 is equivalent to removing these samples
sample_weight_0 = sample_weight.copy()
sample_weight_0[-5:] = 0
y[-5:] *= 1000 # to make excluding those samples important
reg.fit(X, y, sample_weight=sample_weight_0)
coef_0 = reg.coef_.copy()
if fit_intercept:
intercept_0 = reg.intercept_
reg.fit(X[:-5], y[:-5], sample_weight=sample_weight[:-5])
assert_allclose(reg.coef_, coef_0, rtol=1e-5)
if fit_intercept:
assert_allclose(reg.intercept_, intercept_0)
# 5) check that multiplying sample_weight by 2 is equivalent to repeating
# corresponding samples twice
if sparse_container is not None:
X2 = sparse.vstack([X, X[: n_samples // 2]], format="csc")
else:
X2 = np.concatenate([X, X[: n_samples // 2]], axis=0)
y2 = np.concatenate([y, y[: n_samples // 2]])
sample_weight_1 = sample_weight.copy()
sample_weight_1[: n_samples // 2] *= 2
sample_weight_2 = np.concatenate(
[sample_weight, sample_weight[: n_samples // 2]], axis=0
)
reg1 = LinearRegression(**params).fit(X, y, sample_weight=sample_weight_1)
reg2 = LinearRegression(**params).fit(X2, y2, sample_weight=sample_weight_2)
assert_allclose(reg1.coef_, reg2.coef_, rtol=1e-6)
if fit_intercept:
assert_allclose(reg1.intercept_, reg2.intercept_)
|
Test that the impact of sample_weight is consistent.
Note that this test is stricter than the common test
check_sample_weight_equivalence alone and also tests sparse X.
It is very similar to test_enet_sample_weight_consistency.
|
test_linear_regression_sample_weight_consistency
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_base.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_base.py
|
BSD-3-Clause
|
def test_bayesian_ridge_score_values():
"""Check value of score on toy example.
Compute log marginal likelihood with equation (36) in Sparse Bayesian
Learning and the Relevance Vector Machine (Tipping, 2001):
- 0.5 * (log |Id/alpha + X.X^T/lambda| +
y^T.(Id/alpha + X.X^T/lambda).y + n * log(2 * pi))
+ lambda_1 * log(lambda) - lambda_2 * lambda
+ alpha_1 * log(alpha) - alpha_2 * alpha
and check equality with the score computed during training.
"""
X, y = diabetes.data, diabetes.target
n_samples = X.shape[0]
# check with initial values of alpha and lambda (see code for the values)
eps = np.finfo(np.float64).eps
alpha_ = 1.0 / (np.var(y) + eps)
lambda_ = 1.0
# value of the parameters of the Gamma hyperpriors
alpha_1 = 0.1
alpha_2 = 0.1
lambda_1 = 0.1
lambda_2 = 0.1
# compute score using formula of docstring
score = lambda_1 * log(lambda_) - lambda_2 * lambda_
score += alpha_1 * log(alpha_) - alpha_2 * alpha_
M = 1.0 / alpha_ * np.eye(n_samples) + 1.0 / lambda_ * np.dot(X, X.T)
M_inv_dot_y = np.linalg.solve(M, y)
score += -0.5 * (
fast_logdet(M) + np.dot(y.T, M_inv_dot_y) + n_samples * log(2 * np.pi)
)
# compute score with BayesianRidge
clf = BayesianRidge(
alpha_1=alpha_1,
alpha_2=alpha_2,
lambda_1=lambda_1,
lambda_2=lambda_2,
max_iter=1,
fit_intercept=False,
compute_score=True,
)
clf.fit(X, y)
assert_almost_equal(clf.scores_[0], score, decimal=9)
|
Check value of score on toy example.
Compute log marginal likelihood with equation (36) in Sparse Bayesian
Learning and the Relevance Vector Machine (Tipping, 2001):
- 0.5 * (log |Id/alpha + X.X^T/lambda| +
y^T.(Id/alpha + X.X^T/lambda).y + n * log(2 * pi))
+ lambda_1 * log(lambda) - lambda_2 * lambda
+ alpha_1 * log(alpha) - alpha_2 * alpha
and check equality with the score computed during training.
|
test_bayesian_ridge_score_values
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_bayes.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_bayes.py
|
BSD-3-Clause
|
def test_bayesian_covariance_matrix(n_samples, n_features, global_random_seed):
"""Check the posterior covariance matrix sigma_
Non-regression test for https://github.com/scikit-learn/scikit-learn/issues/31093
"""
X, y = datasets.make_regression(
n_samples, n_features, random_state=global_random_seed
)
reg = BayesianRidge(fit_intercept=False).fit(X, y)
covariance_matrix = np.linalg.inv(
reg.lambda_ * np.identity(n_features) + reg.alpha_ * np.dot(X.T, X)
)
assert_allclose(reg.sigma_, covariance_matrix, rtol=1e-6)
|
Check the posterior covariance matrix sigma_
Non-regression test for https://github.com/scikit-learn/scikit-learn/issues/31093
|
test_bayesian_covariance_matrix
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_bayes.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_bayes.py
|
BSD-3-Clause
|
def test_linear_model_regressor_coef_shape(Regressor, ndim):
"""Check the consistency of linear models `coef` shape."""
if Regressor is LinearRegression:
pytest.xfail("LinearRegression does not follow `coef_` shape contract!")
X, y = make_regression(random_state=0, n_samples=200, n_features=20)
y = MinMaxScaler().fit_transform(y.reshape(-1, 1))[:, 0] + 1
y = y[:, np.newaxis] if ndim == 2 else y
regressor = Regressor()
set_random_state(regressor)
regressor.fit(X, y)
assert regressor.coef_.shape == (X.shape[1],)
|
Check the consistency of linear models `coef` shape.
|
test_linear_model_regressor_coef_shape
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_common.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_common.py
|
BSD-3-Clause
|
def test_set_order_dense(order, input_order):
"""Check that _set_order returns arrays with promised order."""
X = np.array([[0], [0], [0]], order=input_order)
y = np.array([0, 0, 0], order=input_order)
X2, y2 = _set_order(X, y, order=order)
if order == "C":
assert X2.flags["C_CONTIGUOUS"]
assert y2.flags["C_CONTIGUOUS"]
elif order == "F":
assert X2.flags["F_CONTIGUOUS"]
assert y2.flags["F_CONTIGUOUS"]
if order == input_order:
assert X is X2
assert y is y2
|
Check that _set_order returns arrays with promised order.
|
test_set_order_dense
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_coordinate_descent.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_coordinate_descent.py
|
BSD-3-Clause
|
def test_set_order_sparse(order, input_order, coo_container):
"""Check that _set_order returns sparse matrices in promised format."""
X = coo_container(np.array([[0], [0], [0]]))
y = coo_container(np.array([0, 0, 0]))
sparse_format = "csc" if input_order == "F" else "csr"
X = X.asformat(sparse_format)
y = X.asformat(sparse_format)
X2, y2 = _set_order(X, y, order=order)
format = "csc" if order == "F" else "csr"
assert sparse.issparse(X2) and X2.format == format
assert sparse.issparse(y2) and y2.format == format
|
Check that _set_order returns sparse matrices in promised format.
|
test_set_order_sparse
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_coordinate_descent.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_coordinate_descent.py
|
BSD-3-Clause
|
def test_lasso_dual_gap():
"""
Check that Lasso.dual_gap_ matches its objective formulation, with the
datafit normalized by n_samples
"""
X, y, _, _ = build_dataset(n_samples=10, n_features=30)
n_samples = len(y)
alpha = 0.01 * np.max(np.abs(X.T @ y)) / n_samples
clf = Lasso(alpha=alpha, fit_intercept=False).fit(X, y)
w = clf.coef_
R = y - X @ w
primal = 0.5 * np.mean(R**2) + clf.alpha * np.sum(np.abs(w))
# dual pt: R / n_samples, dual constraint: norm(X.T @ theta, inf) <= alpha
R /= np.max(np.abs(X.T @ R) / (n_samples * alpha))
dual = 0.5 * (np.mean(y**2) - np.mean((y - R) ** 2))
assert_allclose(clf.dual_gap_, primal - dual)
|
Check that Lasso.dual_gap_ matches its objective formulation, with the
datafit normalized by n_samples
|
test_lasso_dual_gap
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_coordinate_descent.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_coordinate_descent.py
|
BSD-3-Clause
|
def build_dataset(n_samples=50, n_features=200, n_informative_features=10, n_targets=1):
"""
build an ill-posed linear regression problem with many noisy features and
comparatively few samples
"""
random_state = np.random.RandomState(0)
if n_targets > 1:
w = random_state.randn(n_features, n_targets)
else:
w = random_state.randn(n_features)
w[n_informative_features:] = 0.0
X = random_state.randn(n_samples, n_features)
y = np.dot(X, w)
X_test = random_state.randn(n_samples, n_features)
y_test = np.dot(X_test, w)
return X, y, X_test, y_test
|
build an ill-posed linear regression problem with many noisy features and
comparatively few samples
|
build_dataset
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_coordinate_descent.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_coordinate_descent.py
|
BSD-3-Clause
|
def test_lassocv_alphas_validation(alphas, err_type, err_msg):
"""Check the `alphas` validation in LassoCV."""
n_samples, n_features = 5, 5
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features)
y = rng.randint(0, 2, n_samples)
lassocv = LassoCV(alphas=alphas)
with pytest.raises(err_type, match=err_msg):
lassocv.fit(X, y)
|
Check the `alphas` validation in LassoCV.
|
test_lassocv_alphas_validation
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_coordinate_descent.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_coordinate_descent.py
|
BSD-3-Clause
|
def _scale_alpha_inplace(estimator, n_samples):
"""Rescale the parameter alpha from when the estimator is evoked with
normalize set to True as if it were evoked in a Pipeline with normalize set
to False and with a StandardScaler.
"""
if ("alpha" not in estimator.get_params()) and (
"alphas" not in estimator.get_params()
):
return
if isinstance(estimator, (RidgeCV, RidgeClassifierCV)):
# alphas is not validated at this point and can be a list.
# We convert it to a np.ndarray to make sure broadcasting
# is used.
alphas = np.asarray(estimator.alphas) * n_samples
return estimator.set_params(alphas=alphas)
if isinstance(estimator, (Lasso, LassoLars, MultiTaskLasso)):
alpha = estimator.alpha * np.sqrt(n_samples)
if isinstance(estimator, (Ridge, RidgeClassifier)):
alpha = estimator.alpha * n_samples
if isinstance(estimator, (ElasticNet, MultiTaskElasticNet)):
if estimator.l1_ratio == 1:
alpha = estimator.alpha * np.sqrt(n_samples)
elif estimator.l1_ratio == 0:
alpha = estimator.alpha * n_samples
else:
# To avoid silent errors in case of refactoring
raise NotImplementedError
estimator.set_params(alpha=alpha)
|
Rescale the parameter alpha from when the estimator is evoked with
normalize set to True as if it were evoked in a Pipeline with normalize set
to False and with a StandardScaler.
|
_scale_alpha_inplace
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_coordinate_descent.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_coordinate_descent.py
|
BSD-3-Clause
|
def test_path_unknown_parameter(path_func):
"""Check that passing parameter not used by the coordinate descent solver
will raise an error."""
X, y, _, _ = build_dataset(n_samples=50, n_features=20)
err_msg = "Unexpected parameters in params"
with pytest.raises(ValueError, match=err_msg):
path_func(X, y, normalize=True, fit_intercept=True)
|
Check that passing parameter not used by the coordinate descent solver
will raise an error.
|
test_path_unknown_parameter
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_coordinate_descent.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_coordinate_descent.py
|
BSD-3-Clause
|
def test_enet_coordinate_descent(klass, n_classes, kwargs):
"""Test that a warning is issued if model does not converge"""
clf = klass(max_iter=2, **kwargs)
n_samples = 5
n_features = 2
X = np.ones((n_samples, n_features)) * 1e50
y = np.ones((n_samples, n_classes))
if klass == Lasso:
y = y.ravel()
warning_message = (
"Objective did not converge. You might want to"
" increase the number of iterations."
)
with pytest.warns(ConvergenceWarning, match=warning_message):
clf.fit(X, y)
|
Test that a warning is issued if model does not converge
|
test_enet_coordinate_descent
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_coordinate_descent.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_coordinate_descent.py
|
BSD-3-Clause
|
def test_enet_sample_weight_consistency(
fit_intercept, alpha, precompute, sparse_container, global_random_seed
):
"""Test that the impact of sample_weight is consistent.
Note that this test is stricter than the common test
check_sample_weight_equivalence alone and also tests sparse X.
"""
rng = np.random.RandomState(global_random_seed)
n_samples, n_features = 10, 5
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
if sparse_container is not None:
X = sparse_container(X)
params = dict(
alpha=alpha,
fit_intercept=fit_intercept,
precompute=precompute,
tol=1e-6,
l1_ratio=0.5,
)
reg = ElasticNet(**params).fit(X, y)
coef = reg.coef_.copy()
if fit_intercept:
intercept = reg.intercept_
# 1) sample_weight=np.ones(..) should be equivalent to sample_weight=None
sample_weight = np.ones_like(y)
reg.fit(X, y, sample_weight=sample_weight)
assert_allclose(reg.coef_, coef, rtol=1e-6)
if fit_intercept:
assert_allclose(reg.intercept_, intercept)
# 2) sample_weight=None should be equivalent to sample_weight = number
sample_weight = 123.0
reg.fit(X, y, sample_weight=sample_weight)
assert_allclose(reg.coef_, coef, rtol=1e-6)
if fit_intercept:
assert_allclose(reg.intercept_, intercept)
# 3) scaling of sample_weight should have no effect, cf. np.average()
sample_weight = rng.uniform(low=0.01, high=2, size=X.shape[0])
reg = reg.fit(X, y, sample_weight=sample_weight)
coef = reg.coef_.copy()
if fit_intercept:
intercept = reg.intercept_
reg.fit(X, y, sample_weight=np.pi * sample_weight)
assert_allclose(reg.coef_, coef, rtol=1e-6)
if fit_intercept:
assert_allclose(reg.intercept_, intercept)
# 4) setting elements of sample_weight to 0 is equivalent to removing these samples
sample_weight_0 = sample_weight.copy()
sample_weight_0[-5:] = 0
y[-5:] *= 1000 # to make excluding those samples important
reg.fit(X, y, sample_weight=sample_weight_0)
coef_0 = reg.coef_.copy()
if fit_intercept:
intercept_0 = reg.intercept_
reg.fit(X[:-5], y[:-5], sample_weight=sample_weight[:-5])
assert_allclose(reg.coef_, coef_0, rtol=1e-6)
if fit_intercept:
assert_allclose(reg.intercept_, intercept_0)
# 5) check that multiplying sample_weight by 2 is equivalent to repeating
# corresponding samples twice
if sparse_container is not None:
X2 = sparse.vstack([X, X[: n_samples // 2]], format="csc")
else:
X2 = np.concatenate([X, X[: n_samples // 2]], axis=0)
y2 = np.concatenate([y, y[: n_samples // 2]])
sample_weight_1 = sample_weight.copy()
sample_weight_1[: n_samples // 2] *= 2
sample_weight_2 = np.concatenate(
[sample_weight, sample_weight[: n_samples // 2]], axis=0
)
reg1 = ElasticNet(**params).fit(X, y, sample_weight=sample_weight_1)
reg2 = ElasticNet(**params).fit(X2, y2, sample_weight=sample_weight_2)
assert_allclose(reg1.coef_, reg2.coef_, rtol=1e-6)
|
Test that the impact of sample_weight is consistent.
Note that this test is stricter than the common test
check_sample_weight_equivalence alone and also tests sparse X.
|
test_enet_sample_weight_consistency
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_coordinate_descent.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_coordinate_descent.py
|
BSD-3-Clause
|
def test_enet_cv_sample_weight_correctness(
fit_intercept, sparse_container, global_random_seed
):
"""Test that ElasticNetCV with sample weights gives correct results.
We fit the same model twice, once with weighted training data, once with repeated
data points in the training data and check that both models converge to the
same solution.
Since this model uses an internal cross-validation scheme to tune the alpha
regularization parameter, we make sure that the repetitions only occur within
a specific CV group. Data points belonging to other CV groups stay
unit-weighted / "unrepeated".
"""
rng = np.random.RandomState(global_random_seed)
n_splits, n_samples_per_cv, n_features = 3, 10, 5
X_with_weights = rng.rand(n_splits * n_samples_per_cv, n_features)
beta = rng.rand(n_features)
beta[0:2] = 0
y_with_weights = X_with_weights @ beta + rng.rand(n_splits * n_samples_per_cv)
if sparse_container is not None:
X_with_weights = sparse_container(X_with_weights)
params = dict(tol=1e-6)
# Assign random integer weights only to the first cross-validation group.
# The samples in the other cross-validation groups are left with unit
# weights.
sw = np.ones_like(y_with_weights)
sw[:n_samples_per_cv] = rng.randint(0, 5, size=n_samples_per_cv)
groups_with_weights = np.concatenate(
[
np.full(n_samples_per_cv, 0),
np.full(n_samples_per_cv, 1),
np.full(n_samples_per_cv, 2),
]
)
splits_with_weights = list(
LeaveOneGroupOut().split(X_with_weights, groups=groups_with_weights)
)
reg_with_weights = ElasticNetCV(
cv=splits_with_weights, fit_intercept=fit_intercept, **params
)
reg_with_weights.fit(X_with_weights, y_with_weights, sample_weight=sw)
if sparse_container is not None:
X_with_weights = X_with_weights.toarray()
X_with_repetitions = np.repeat(X_with_weights, sw.astype(int), axis=0)
if sparse_container is not None:
X_with_repetitions = sparse_container(X_with_repetitions)
y_with_repetitions = np.repeat(y_with_weights, sw.astype(int), axis=0)
groups_with_repetitions = np.repeat(groups_with_weights, sw.astype(int), axis=0)
splits_with_repetitions = list(
LeaveOneGroupOut().split(X_with_repetitions, groups=groups_with_repetitions)
)
reg_with_repetitions = ElasticNetCV(
cv=splits_with_repetitions, fit_intercept=fit_intercept, **params
)
reg_with_repetitions.fit(X_with_repetitions, y_with_repetitions)
# Check that the alpha selection process is the same:
assert_allclose(reg_with_weights.mse_path_, reg_with_repetitions.mse_path_)
assert_allclose(reg_with_weights.alphas_, reg_with_repetitions.alphas_)
assert reg_with_weights.alpha_ == pytest.approx(reg_with_repetitions.alpha_)
# Check that the final model coefficients are the same:
assert_allclose(reg_with_weights.coef_, reg_with_repetitions.coef_, atol=1e-10)
assert reg_with_weights.intercept_ == pytest.approx(reg_with_repetitions.intercept_)
|
Test that ElasticNetCV with sample weights gives correct results.
We fit the same model twice, once with weighted training data, once with repeated
data points in the training data and check that both models converge to the
same solution.
Since this model uses an internal cross-validation scheme to tune the alpha
regularization parameter, we make sure that the repetitions only occur within
a specific CV group. Data points belonging to other CV groups stay
unit-weighted / "unrepeated".
|
test_enet_cv_sample_weight_correctness
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_coordinate_descent.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_coordinate_descent.py
|
BSD-3-Clause
|
def test_enet_cv_grid_search(sample_weight):
"""Test that ElasticNetCV gives same result as GridSearchCV."""
n_samples, n_features = 200, 10
cv = 5
X, y = make_regression(
n_samples=n_samples,
n_features=n_features,
effective_rank=10,
n_informative=n_features - 4,
noise=10,
random_state=0,
)
if sample_weight:
sample_weight = np.linspace(1, 5, num=n_samples)
else:
sample_weight = None
alphas = np.logspace(np.log10(1e-5), np.log10(1), num=10)
l1_ratios = [0.1, 0.5, 0.9]
reg = ElasticNetCV(cv=cv, alphas=alphas, l1_ratio=l1_ratios)
reg.fit(X, y, sample_weight=sample_weight)
param = {"alpha": alphas, "l1_ratio": l1_ratios}
gs = GridSearchCV(
estimator=ElasticNet(),
param_grid=param,
cv=cv,
scoring="neg_mean_squared_error",
).fit(X, y, sample_weight=sample_weight)
assert reg.l1_ratio_ == pytest.approx(gs.best_params_["l1_ratio"])
assert reg.alpha_ == pytest.approx(gs.best_params_["alpha"])
|
Test that ElasticNetCV gives same result as GridSearchCV.
|
test_enet_cv_grid_search
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_coordinate_descent.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_coordinate_descent.py
|
BSD-3-Clause
|
def test_enet_cv_sample_weight_consistency(
fit_intercept, l1_ratio, precompute, sparse_container
):
"""Test that the impact of sample_weight is consistent."""
rng = np.random.RandomState(0)
n_samples, n_features = 10, 5
X = rng.rand(n_samples, n_features)
y = X.sum(axis=1) + rng.rand(n_samples)
params = dict(
l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
precompute=precompute,
tol=1e-6,
cv=3,
)
if sparse_container is not None:
X = sparse_container(X)
if l1_ratio == 0:
params.pop("l1_ratio", None)
reg = LassoCV(**params).fit(X, y)
else:
reg = ElasticNetCV(**params).fit(X, y)
coef = reg.coef_.copy()
if fit_intercept:
intercept = reg.intercept_
# sample_weight=np.ones(..) should be equivalent to sample_weight=None
sample_weight = np.ones_like(y)
reg.fit(X, y, sample_weight=sample_weight)
assert_allclose(reg.coef_, coef, rtol=1e-6)
if fit_intercept:
assert_allclose(reg.intercept_, intercept)
# sample_weight=None should be equivalent to sample_weight = number
sample_weight = 123.0
reg.fit(X, y, sample_weight=sample_weight)
assert_allclose(reg.coef_, coef, rtol=1e-6)
if fit_intercept:
assert_allclose(reg.intercept_, intercept)
# scaling of sample_weight should have no effect, cf. np.average()
sample_weight = 2 * np.ones_like(y)
reg.fit(X, y, sample_weight=sample_weight)
assert_allclose(reg.coef_, coef, rtol=1e-6)
if fit_intercept:
assert_allclose(reg.intercept_, intercept)
|
Test that the impact of sample_weight is consistent.
|
test_enet_cv_sample_weight_consistency
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_coordinate_descent.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_coordinate_descent.py
|
BSD-3-Clause
|
def test_enet_sample_weight_does_not_overwrite_sample_weight(check_input):
"""Check that ElasticNet does not overwrite sample_weights."""
rng = np.random.RandomState(0)
n_samples, n_features = 10, 5
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
sample_weight_1_25 = 1.25 * np.ones_like(y)
sample_weight = sample_weight_1_25.copy()
reg = ElasticNet()
reg.fit(X, y, sample_weight=sample_weight, check_input=check_input)
assert_array_equal(sample_weight, sample_weight_1_25)
|
Check that ElasticNet does not overwrite sample_weights.
|
test_enet_sample_weight_does_not_overwrite_sample_weight
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_coordinate_descent.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_coordinate_descent.py
|
BSD-3-Clause
|
def test_read_only_buffer():
"""Test that sparse coordinate descent works for read-only buffers"""
rng = np.random.RandomState(0)
clf = ElasticNet(alpha=0.1, copy_X=True, random_state=rng)
X = np.asfortranarray(rng.uniform(size=(100, 10)))
X.setflags(write=False)
y = rng.rand(100)
clf.fit(X, y)
|
Test that sparse coordinate descent works for read-only buffers
|
test_read_only_buffer
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_coordinate_descent.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_coordinate_descent.py
|
BSD-3-Clause
|
def test_cv_estimators_reject_params_with_no_routing_enabled(EstimatorCV):
"""Check that the models inheriting from class:`LinearModelCV` raise an
error when any `params` are passed when routing is not enabled.
"""
X, y = make_regression(random_state=42)
groups = np.array([0, 1] * (len(y) // 2))
estimator = EstimatorCV()
msg = "is only supported if enable_metadata_routing=True"
with pytest.raises(ValueError, match=msg):
estimator.fit(X, y, groups=groups)
|
Check that the models inheriting from class:`LinearModelCV` raise an
error when any `params` are passed when routing is not enabled.
|
test_cv_estimators_reject_params_with_no_routing_enabled
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_coordinate_descent.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_coordinate_descent.py
|
BSD-3-Clause
|
def test_multitask_cv_estimators_with_sample_weight(MultiTaskEstimatorCV):
"""Check that for :class:`MultiTaskElasticNetCV` and
class:`MultiTaskLassoCV` if `sample_weight` is passed and the
CV splitter does not support `sample_weight` an error is raised.
On the other hand if the splitter does support `sample_weight`
while `sample_weight` is passed there is no error and process
completes smoothly as before.
"""
class CVSplitter(GroupsConsumerMixin, BaseCrossValidator):
def get_n_splits(self, X=None, y=None, groups=None, metadata=None):
pass # pragma: nocover
class CVSplitterSampleWeight(CVSplitter):
def split(self, X, y=None, groups=None, sample_weight=None):
split_index = len(X) // 2
train_indices = list(range(0, split_index))
test_indices = list(range(split_index, len(X)))
yield test_indices, train_indices
yield train_indices, test_indices
X, y = make_regression(random_state=42, n_targets=2)
sample_weight = np.ones(X.shape[0])
# If CV splitter does not support sample_weight an error is raised
splitter = CVSplitter().set_split_request(groups=True)
estimator = MultiTaskEstimatorCV(cv=splitter)
msg = "do not support sample weights"
with pytest.raises(ValueError, match=msg):
estimator.fit(X, y, sample_weight=sample_weight)
# If CV splitter does support sample_weight no error is raised
splitter = CVSplitterSampleWeight().set_split_request(
groups=True, sample_weight=True
)
estimator = MultiTaskEstimatorCV(cv=splitter)
estimator.fit(X, y, sample_weight=sample_weight)
|
Check that for :class:`MultiTaskElasticNetCV` and
class:`MultiTaskLassoCV` if `sample_weight` is passed and the
CV splitter does not support `sample_weight` an error is raised.
On the other hand if the splitter does support `sample_weight`
while `sample_weight` is passed there is no error and process
completes smoothly as before.
|
test_multitask_cv_estimators_with_sample_weight
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_coordinate_descent.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_coordinate_descent.py
|
BSD-3-Clause
|
def test_linear_model_cv_deprecated_n_alphas(Estimator):
"""Check the deprecation of n_alphas in favor of alphas."""
X, y = make_regression(n_targets=2, random_state=42)
# Asses warning message raised by LinearModelCV when n_alphas is used
with pytest.warns(
FutureWarning,
match="'n_alphas' was deprecated in 1.7 and will be removed in 1.9",
):
clf = Estimator(n_alphas=5)
if clf._is_multitask():
clf = clf.fit(X, y)
else:
clf = clf.fit(X, y[:, 0])
# Asses no warning message raised when n_alphas is not used
with warnings.catch_warnings():
warnings.simplefilter("error")
clf = Estimator(alphas=5)
if clf._is_multitask():
clf = clf.fit(X, y)
else:
clf = clf.fit(X, y[:, 0])
|
Check the deprecation of n_alphas in favor of alphas.
|
test_linear_model_cv_deprecated_n_alphas
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_coordinate_descent.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_coordinate_descent.py
|
BSD-3-Clause
|
def test_linear_model_cv_deprecated_alphas_none(Estimator):
"""Check the deprecation of alphas=None."""
X, y = make_regression(n_targets=2, random_state=42)
with pytest.warns(
FutureWarning, match="'alphas=None' is deprecated and will be removed in 1.9"
):
clf = Estimator(alphas=None)
if clf._is_multitask():
clf.fit(X, y)
else:
clf.fit(X, y[:, 0])
|
Check the deprecation of alphas=None.
|
test_linear_model_cv_deprecated_alphas_none
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_coordinate_descent.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_coordinate_descent.py
|
BSD-3-Clause
|
def test_linear_model_cv_alphas_n_alphas_unset(Estimator):
"""Check that no warning is raised when both n_alphas and alphas are unset."""
X, y = make_regression(n_targets=2, random_state=42)
# Asses no warning message raised when n_alphas is not used
with warnings.catch_warnings():
warnings.simplefilter("error")
clf = Estimator()
if clf._is_multitask():
clf = clf.fit(X, y)
else:
clf = clf.fit(X, y[:, 0])
|
Check that no warning is raised when both n_alphas and alphas are unset.
|
test_linear_model_cv_alphas_n_alphas_unset
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_coordinate_descent.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_coordinate_descent.py
|
BSD-3-Clause
|
def test_linear_model_cv_alphas(Estimator):
"""Check that the behavior of alphas is consistent with n_alphas."""
X, y = make_regression(n_targets=2, random_state=42)
# n_alphas is set, alphas is not => n_alphas is used
clf = Estimator(n_alphas=5)
if clf._is_multitask():
clf.fit(X, y)
else:
clf.fit(X, y[:, 0])
assert len(clf.alphas_) == 5
# n_alphas is set, alphas is set => alphas has priority
clf = Estimator(n_alphas=5, alphas=10)
if clf._is_multitask():
clf.fit(X, y)
else:
clf.fit(X, y[:, 0])
assert len(clf.alphas_) == 10
# same with alphas array-like
clf = Estimator(n_alphas=5, alphas=np.arange(10))
if clf._is_multitask():
clf.fit(X, y)
else:
clf.fit(X, y[:, 0])
assert len(clf.alphas_) == 10
# n_alphas is not set, alphas is set => alphas is used
clf = Estimator(alphas=10)
if clf._is_multitask():
clf.fit(X, y)
else:
clf.fit(X, y[:, 0])
assert len(clf.alphas_) == 10
# same with alphas array-like
clf = Estimator(alphas=np.arange(10))
if clf._is_multitask():
clf.fit(X, y)
else:
clf.fit(X, y[:, 0])
assert len(clf.alphas_) == 10
# both are not set => default = 100
clf = Estimator()
if clf._is_multitask():
clf.fit(X, y)
else:
clf.fit(X, y[:, 0])
assert len(clf.alphas_) == 100
|
Check that the behavior of alphas is consistent with n_alphas.
|
test_linear_model_cv_alphas
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_coordinate_descent.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_coordinate_descent.py
|
BSD-3-Clause
|
def test_lasso_lars_copyX_behaviour(copy_X):
"""
Test that user input regarding copy_X is not being overridden (it was until
at least version 0.21)
"""
lasso_lars = LassoLarsIC(copy_X=copy_X, precompute=False)
rng = np.random.RandomState(0)
X = rng.normal(0, 1, (100, 5))
X_copy = X.copy()
y = X[:, 2]
lasso_lars.fit(X, y)
assert copy_X == np.array_equal(X, X_copy)
|
Test that user input regarding copy_X is not being overridden (it was until
at least version 0.21)
|
test_lasso_lars_copyX_behaviour
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_least_angle.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_least_angle.py
|
BSD-3-Clause
|
def test_lasso_lars_fit_copyX_behaviour(copy_X):
"""
Test that user input to .fit for copy_X overrides default __init__ value
"""
lasso_lars = LassoLarsIC(precompute=False)
rng = np.random.RandomState(0)
X = rng.normal(0, 1, (100, 5))
X_copy = X.copy()
y = X[:, 2]
lasso_lars.fit(X, y, copy_X=copy_X)
assert copy_X == np.array_equal(X, X_copy)
|
Test that user input to .fit for copy_X overrides default __init__ value
|
test_lasso_lars_fit_copyX_behaviour
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_least_angle.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_least_angle.py
|
BSD-3-Clause
|
def test_lassolarsic_alpha_selection(criterion):
"""Check that we properly compute the AIC and BIC score.
In this test, we reproduce the example of the Fig. 2 of Zou et al.
(reference [1] in LassoLarsIC) In this example, only 7 features should be
selected.
"""
model = make_pipeline(StandardScaler(), LassoLarsIC(criterion=criterion))
model.fit(X, y)
best_alpha_selected = np.argmin(model[-1].criterion_)
assert best_alpha_selected == 7
|
Check that we properly compute the AIC and BIC score.
In this test, we reproduce the example of the Fig. 2 of Zou et al.
(reference [1] in LassoLarsIC) In this example, only 7 features should be
selected.
|
test_lassolarsic_alpha_selection
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_least_angle.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_least_angle.py
|
BSD-3-Clause
|
def test_lassolarsic_noise_variance(fit_intercept):
"""Check the behaviour when `n_samples` < `n_features` and that one needs
to provide the noise variance."""
rng = np.random.RandomState(0)
X, y = datasets.make_regression(
n_samples=10, n_features=11 - fit_intercept, random_state=rng
)
model = make_pipeline(StandardScaler(), LassoLarsIC(fit_intercept=fit_intercept))
err_msg = (
"You are using LassoLarsIC in the case where the number of samples is smaller"
" than the number of features"
)
with pytest.raises(ValueError, match=err_msg):
model.fit(X, y)
model.set_params(lassolarsic__noise_variance=1.0)
model.fit(X, y).predict(X)
|
Check the behaviour when `n_samples` < `n_features` and that one needs
to provide the noise variance.
|
test_lassolarsic_noise_variance
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_least_angle.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_least_angle.py
|
BSD-3-Clause
|
def random_X_y_coef(
linear_model_loss, n_samples, n_features, coef_bound=(-2, 2), seed=42
):
"""Random generate y, X and coef in valid range."""
rng = np.random.RandomState(seed)
n_dof = n_features + linear_model_loss.fit_intercept
X = make_low_rank_matrix(
n_samples=n_samples,
n_features=n_features,
random_state=rng,
)
coef = linear_model_loss.init_zero_coef(X)
if linear_model_loss.base_loss.is_multiclass:
n_classes = linear_model_loss.base_loss.n_classes
coef.flat[:] = rng.uniform(
low=coef_bound[0],
high=coef_bound[1],
size=n_classes * n_dof,
)
if linear_model_loss.fit_intercept:
raw_prediction = X @ coef[:, :-1].T + coef[:, -1]
else:
raw_prediction = X @ coef.T
proba = linear_model_loss.base_loss.link.inverse(raw_prediction)
# y = rng.choice(np.arange(n_classes), p=proba) does not work.
# See https://stackoverflow.com/a/34190035/16761084
def choice_vectorized(items, p):
s = p.cumsum(axis=1)
r = rng.rand(p.shape[0])[:, None]
k = (s < r).sum(axis=1)
return items[k]
y = choice_vectorized(np.arange(n_classes), p=proba).astype(np.float64)
else:
coef.flat[:] = rng.uniform(
low=coef_bound[0],
high=coef_bound[1],
size=n_dof,
)
if linear_model_loss.fit_intercept:
raw_prediction = X @ coef[:-1] + coef[-1]
else:
raw_prediction = X @ coef
y = linear_model_loss.base_loss.link.inverse(
raw_prediction + rng.uniform(low=-1, high=1, size=n_samples)
)
return X, y, coef
|
Random generate y, X and coef in valid range.
|
random_X_y_coef
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_linear_loss.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_linear_loss.py
|
BSD-3-Clause
|
def test_loss_grad_hess_are_the_same(
base_loss,
fit_intercept,
sample_weight,
l2_reg_strength,
csr_container,
global_random_seed,
):
"""Test that loss and gradient are the same across different functions."""
loss = LinearModelLoss(base_loss=base_loss(), fit_intercept=fit_intercept)
X, y, coef = random_X_y_coef(
linear_model_loss=loss, n_samples=10, n_features=5, seed=global_random_seed
)
X_old, y_old, coef_old = X.copy(), y.copy(), coef.copy()
if sample_weight == "range":
sample_weight = np.linspace(1, y.shape[0], num=y.shape[0])
l1 = loss.loss(
coef, X, y, sample_weight=sample_weight, l2_reg_strength=l2_reg_strength
)
g1 = loss.gradient(
coef, X, y, sample_weight=sample_weight, l2_reg_strength=l2_reg_strength
)
l2, g2 = loss.loss_gradient(
coef, X, y, sample_weight=sample_weight, l2_reg_strength=l2_reg_strength
)
g3, h3 = loss.gradient_hessian_product(
coef, X, y, sample_weight=sample_weight, l2_reg_strength=l2_reg_strength
)
g4, h4, _ = loss.gradient_hessian(
coef, X, y, sample_weight=sample_weight, l2_reg_strength=l2_reg_strength
)
assert_allclose(l1, l2)
assert_allclose(g1, g2)
assert_allclose(g1, g3)
assert_allclose(g1, g4)
# The ravelling only takes effect for multiclass.
assert_allclose(h4 @ g4.ravel(order="F"), h3(g3).ravel(order="F"))
# Test that gradient_out and hessian_out are considered properly.
g_out = np.empty_like(coef)
h_out = np.empty_like(coef, shape=(coef.size, coef.size))
g5, h5, _ = loss.gradient_hessian(
coef,
X,
y,
sample_weight=sample_weight,
l2_reg_strength=l2_reg_strength,
gradient_out=g_out,
hessian_out=h_out,
)
assert np.shares_memory(g5, g_out)
assert np.shares_memory(h5, h_out)
assert_allclose(g5, g_out)
assert_allclose(h5, h_out)
assert_allclose(g1, g5)
assert_allclose(h5, h4)
# same for sparse X
Xs = csr_container(X)
l1_sp = loss.loss(
coef, Xs, y, sample_weight=sample_weight, l2_reg_strength=l2_reg_strength
)
g1_sp = loss.gradient(
coef, Xs, y, sample_weight=sample_weight, l2_reg_strength=l2_reg_strength
)
l2_sp, g2_sp = loss.loss_gradient(
coef, Xs, y, sample_weight=sample_weight, l2_reg_strength=l2_reg_strength
)
g3_sp, h3_sp = loss.gradient_hessian_product(
coef, Xs, y, sample_weight=sample_weight, l2_reg_strength=l2_reg_strength
)
g4_sp, h4_sp, _ = loss.gradient_hessian(
coef, Xs, y, sample_weight=sample_weight, l2_reg_strength=l2_reg_strength
)
assert_allclose(l1, l1_sp)
assert_allclose(l1, l2_sp)
assert_allclose(g1, g1_sp)
assert_allclose(g1, g2_sp)
assert_allclose(g1, g3_sp)
assert_allclose(h3(g1), h3_sp(g1_sp))
assert_allclose(g1, g4_sp)
assert_allclose(h4, h4_sp)
# X, y and coef should not have changed
assert_allclose(X, X_old)
assert_allclose(Xs.toarray(), X_old)
assert_allclose(y, y_old)
assert_allclose(coef, coef_old)
|
Test that loss and gradient are the same across different functions.
|
test_loss_grad_hess_are_the_same
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_linear_loss.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_linear_loss.py
|
BSD-3-Clause
|
def test_loss_gradients_hessp_intercept(
base_loss, sample_weight, l2_reg_strength, X_container, global_random_seed
):
"""Test that loss and gradient handle intercept correctly."""
loss = LinearModelLoss(base_loss=base_loss(), fit_intercept=False)
loss_inter = LinearModelLoss(base_loss=base_loss(), fit_intercept=True)
n_samples, n_features = 10, 5
X, y, coef = random_X_y_coef(
linear_model_loss=loss,
n_samples=n_samples,
n_features=n_features,
seed=global_random_seed,
)
X[:, -1] = 1 # make last column of 1 to mimic intercept term
X_inter = X[
:, :-1
] # exclude intercept column as it is added automatically by loss_inter
if X_container is not None:
X = X_container(X)
if sample_weight == "range":
sample_weight = np.linspace(1, y.shape[0], num=y.shape[0])
l, g = loss.loss_gradient(
coef, X, y, sample_weight=sample_weight, l2_reg_strength=l2_reg_strength
)
_, hessp = loss.gradient_hessian_product(
coef, X, y, sample_weight=sample_weight, l2_reg_strength=l2_reg_strength
)
l_inter, g_inter = loss_inter.loss_gradient(
coef, X_inter, y, sample_weight=sample_weight, l2_reg_strength=l2_reg_strength
)
_, hessp_inter = loss_inter.gradient_hessian_product(
coef, X_inter, y, sample_weight=sample_weight, l2_reg_strength=l2_reg_strength
)
# Note, that intercept gets no L2 penalty.
assert l == pytest.approx(
l_inter + 0.5 * l2_reg_strength * squared_norm(coef.T[-1])
)
g_inter_corrected = g_inter
g_inter_corrected.T[-1] += l2_reg_strength * coef.T[-1]
assert_allclose(g, g_inter_corrected)
s = np.random.RandomState(global_random_seed).randn(*coef.shape)
h = hessp(s)
h_inter = hessp_inter(s)
h_inter_corrected = h_inter
h_inter_corrected.T[-1] += l2_reg_strength * s.T[-1]
assert_allclose(h, h_inter_corrected)
|
Test that loss and gradient handle intercept correctly.
|
test_loss_gradients_hessp_intercept
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_linear_loss.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_linear_loss.py
|
BSD-3-Clause
|
def test_gradients_hessians_numerically(
base_loss, fit_intercept, sample_weight, l2_reg_strength, global_random_seed
):
"""Test gradients and hessians with numerical derivatives.
Gradient should equal the numerical derivatives of the loss function.
Hessians should equal the numerical derivatives of gradients.
"""
loss = LinearModelLoss(base_loss=base_loss(), fit_intercept=fit_intercept)
n_samples, n_features = 10, 5
X, y, coef = random_X_y_coef(
linear_model_loss=loss,
n_samples=n_samples,
n_features=n_features,
seed=global_random_seed,
)
coef = coef.ravel(order="F") # this is important only for multinomial loss
if sample_weight == "range":
sample_weight = np.linspace(1, y.shape[0], num=y.shape[0])
# 1. Check gradients numerically
eps = 1e-6
g, hessp = loss.gradient_hessian_product(
coef, X, y, sample_weight=sample_weight, l2_reg_strength=l2_reg_strength
)
# Use a trick to get central finite difference of accuracy 4 (five-point stencil)
# https://en.wikipedia.org/wiki/Numerical_differentiation
# https://en.wikipedia.org/wiki/Finite_difference_coefficient
# approx_g1 = (f(x + eps) - f(x - eps)) / (2*eps)
approx_g1 = optimize.approx_fprime(
coef,
lambda coef: loss.loss(
coef - eps,
X,
y,
sample_weight=sample_weight,
l2_reg_strength=l2_reg_strength,
),
2 * eps,
)
# approx_g2 = (f(x + 2*eps) - f(x - 2*eps)) / (4*eps)
approx_g2 = optimize.approx_fprime(
coef,
lambda coef: loss.loss(
coef - 2 * eps,
X,
y,
sample_weight=sample_weight,
l2_reg_strength=l2_reg_strength,
),
4 * eps,
)
# Five-point stencil approximation
# See: https://en.wikipedia.org/wiki/Five-point_stencil#1D_first_derivative
approx_g = (4 * approx_g1 - approx_g2) / 3
assert_allclose(g, approx_g, rtol=1e-2, atol=1e-8)
# 2. Check hessp numerically along the second direction of the gradient
vector = np.zeros_like(g)
vector[1] = 1
hess_col = hessp(vector)
# Computation of the Hessian is particularly fragile to numerical errors when doing
# simple finite differences. Here we compute the grad along a path in the direction
# of the vector and then use a least-square regression to estimate the slope
eps = 1e-3
d_x = np.linspace(-eps, eps, 30)
d_grad = np.array(
[
loss.gradient(
coef + t * vector,
X,
y,
sample_weight=sample_weight,
l2_reg_strength=l2_reg_strength,
)
for t in d_x
]
)
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_allclose(approx_hess_col, hess_col, rtol=1e-3)
|
Test gradients and hessians with numerical derivatives.
Gradient should equal the numerical derivatives of the loss function.
Hessians should equal the numerical derivatives of gradients.
|
test_gradients_hessians_numerically
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_linear_loss.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_linear_loss.py
|
BSD-3-Clause
|
def test_multinomial_coef_shape(fit_intercept, global_random_seed):
"""Test that multinomial LinearModelLoss respects shape of coef."""
loss = LinearModelLoss(base_loss=HalfMultinomialLoss(), fit_intercept=fit_intercept)
n_samples, n_features = 10, 5
X, y, coef = random_X_y_coef(
linear_model_loss=loss,
n_samples=n_samples,
n_features=n_features,
seed=global_random_seed,
)
s = np.random.RandomState(global_random_seed).randn(*coef.shape)
l, g = loss.loss_gradient(coef, X, y)
g1 = loss.gradient(coef, X, y)
g2, hessp = loss.gradient_hessian_product(coef, X, y)
h = hessp(s)
assert g.shape == coef.shape
assert h.shape == coef.shape
assert_allclose(g, g1)
assert_allclose(g, g2)
g3, hess, _ = loss.gradient_hessian(coef, X, y)
assert g3.shape == coef.shape
# But full hessian is always 2d.
assert hess.shape == (coef.size, coef.size)
coef_r = coef.ravel(order="F")
s_r = s.ravel(order="F")
l_r, g_r = loss.loss_gradient(coef_r, X, y)
g1_r = loss.gradient(coef_r, X, y)
g2_r, hessp_r = loss.gradient_hessian_product(coef_r, X, y)
h_r = hessp_r(s_r)
assert g_r.shape == coef_r.shape
assert h_r.shape == coef_r.shape
assert_allclose(g_r, g1_r)
assert_allclose(g_r, g2_r)
assert_allclose(g, g_r.reshape(loss.base_loss.n_classes, -1, order="F"))
assert_allclose(h, h_r.reshape(loss.base_loss.n_classes, -1, order="F"))
|
Test that multinomial LinearModelLoss respects shape of coef.
|
test_multinomial_coef_shape
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_linear_loss.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_linear_loss.py
|
BSD-3-Clause
|
def test_multinomial_hessian_3_classes(sample_weight, global_random_seed):
"""Test multinomial hessian for 3 classes and 2 points.
For n_classes = 3 and n_samples = 2, we have
p0 = [p0_0, p0_1]
p1 = [p1_0, p1_1]
p2 = [p2_0, p2_1]
and with 2 x 2 diagonal subblocks
H = [p0 * (1-p0), -p0 * p1, -p0 * p2]
[ -p0 * p1, p1 * (1-p1), -p1 * p2]
[ -p0 * p2, -p1 * p2, p2 * (1-p2)]
hess = X' H X
"""
n_samples, n_features, n_classes = 2, 5, 3
loss = LinearModelLoss(
base_loss=HalfMultinomialLoss(n_classes=n_classes), fit_intercept=False
)
X, y, coef = random_X_y_coef(
linear_model_loss=loss,
n_samples=n_samples,
n_features=n_features,
seed=global_random_seed,
)
coef = coef.ravel(order="F") # this is important only for multinomial loss
if sample_weight == "range":
sample_weight = np.linspace(1, y.shape[0], num=y.shape[0])
grad, hess, _ = loss.gradient_hessian(
coef,
X,
y,
sample_weight=sample_weight,
l2_reg_strength=0,
)
# Hessian must be a symmetrix matrix.
assert_allclose(hess, hess.T)
weights, intercept, raw_prediction = loss.weight_intercept_raw(coef, X)
grad_pointwise, proba = loss.base_loss.gradient_proba(
y_true=y,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
)
p0d, p1d, p2d, oned = (
np.diag(proba[:, 0]),
np.diag(proba[:, 1]),
np.diag(proba[:, 2]),
np.diag(np.ones(2)),
)
h = np.block(
[
[p0d * (oned - p0d), -p0d * p1d, -p0d * p2d],
[-p0d * p1d, p1d * (oned - p1d), -p1d * p2d],
[-p0d * p2d, -p1d * p2d, p2d * (oned - p2d)],
]
)
h = h.reshape((n_classes, n_samples, n_classes, n_samples))
if sample_weight is None:
h /= n_samples
else:
h *= sample_weight / np.sum(sample_weight)
# hess_expected.shape = (n_features, n_classes, n_classes, n_features)
hess_expected = np.einsum("ij, mini, ik->jmnk", X, h, X)
hess_expected = np.moveaxis(hess_expected, 2, 3)
hess_expected = hess_expected.reshape(
n_classes * n_features, n_classes * n_features, order="C"
)
assert_allclose(hess_expected, hess_expected.T)
assert_allclose(hess, hess_expected)
|
Test multinomial hessian for 3 classes and 2 points.
For n_classes = 3 and n_samples = 2, we have
p0 = [p0_0, p0_1]
p1 = [p1_0, p1_1]
p2 = [p2_0, p2_1]
and with 2 x 2 diagonal subblocks
H = [p0 * (1-p0), -p0 * p1, -p0 * p2]
[ -p0 * p1, p1 * (1-p1), -p1 * p2]
[ -p0 * p2, -p1 * p2, p2 * (1-p2)]
hess = X' H X
|
test_multinomial_hessian_3_classes
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_linear_loss.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_linear_loss.py
|
BSD-3-Clause
|
def test_linear_loss_gradient_hessian_raises_wrong_out_parameters():
"""Test that wrong gradient_out and hessian_out raises errors."""
n_samples, n_features, n_classes = 5, 2, 3
loss = LinearModelLoss(base_loss=HalfBinomialLoss(), fit_intercept=False)
X = np.ones((n_samples, n_features))
y = np.ones(n_samples)
coef = loss.init_zero_coef(X)
gradient_out = np.zeros(1)
with pytest.raises(
ValueError, match="gradient_out is required to have shape coef.shape"
):
loss.gradient_hessian(
coef=coef,
X=X,
y=y,
gradient_out=gradient_out,
hessian_out=None,
)
hessian_out = np.zeros(1)
with pytest.raises(ValueError, match="hessian_out is required to have shape"):
loss.gradient_hessian(
coef=coef,
X=X,
y=y,
gradient_out=None,
hessian_out=hessian_out,
)
loss = LinearModelLoss(base_loss=HalfMultinomialLoss(), fit_intercept=False)
coef = loss.init_zero_coef(X)
gradient_out = np.zeros((2 * n_classes, n_features))[::2]
with pytest.raises(ValueError, match="gradient_out must be F-contiguous"):
loss.gradient_hessian(
coef=coef,
X=X,
y=y,
gradient_out=gradient_out,
)
hessian_out = np.zeros((2 * n_classes * n_features, n_classes * n_features))[::2]
with pytest.raises(ValueError, match="hessian_out must be contiguous"):
loss.gradient_hessian(
coef=coef,
X=X,
y=y,
gradient_out=None,
hessian_out=hessian_out,
)
|
Test that wrong gradient_out and hessian_out raises errors.
|
test_linear_loss_gradient_hessian_raises_wrong_out_parameters
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_linear_loss.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_linear_loss.py
|
BSD-3-Clause
|
def check_predictions(clf, X, y):
"""Check that the model is able to fit the classification data"""
n_samples = len(y)
classes = np.unique(y)
n_classes = classes.shape[0]
predicted = clf.fit(X, y).predict(X)
assert_array_equal(clf.classes_, classes)
assert predicted.shape == (n_samples,)
assert_array_equal(predicted, y)
probabilities = clf.predict_proba(X)
assert probabilities.shape == (n_samples, n_classes)
assert_array_almost_equal(probabilities.sum(axis=1), np.ones(n_samples))
assert_array_equal(probabilities.argmax(axis=1), y)
|
Check that the model is able to fit the classification data
|
check_predictions
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_logistic.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_logistic.py
|
BSD-3-Clause
|
def test_predict_iris(clf):
"""Test logistic regression with the iris dataset.
Test that both multinomial and OvR solvers handle multiclass data correctly and
give good accuracy score (>0.95) for the training data.
"""
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
if clf.solver == "lbfgs":
# lbfgs has convergence issues on the iris data with its default max_iter=100
with warnings.catch_warnings():
warnings.simplefilter("ignore", ConvergenceWarning)
clf.fit(iris.data, target)
else:
clf.fit(iris.data, target)
assert_array_equal(np.unique(target), clf.classes_)
pred = clf.predict(iris.data)
assert np.mean(pred == target) > 0.95
probabilities = clf.predict_proba(iris.data)
assert_allclose(probabilities.sum(axis=1), np.ones(n_samples))
pred = iris.target_names[probabilities.argmax(axis=1)]
assert np.mean(pred == target) > 0.95
|
Test logistic regression with the iris dataset.
Test that both multinomial and OvR solvers handle multiclass data correctly and
give good accuracy score (>0.95) for the training data.
|
test_predict_iris
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_logistic.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_logistic.py
|
BSD-3-Clause
|
def test_logistic_regression_solvers():
"""Test solvers converge to the same result."""
X, y = make_classification(n_features=10, n_informative=5, random_state=0)
params = dict(fit_intercept=False, random_state=42)
regressors = {
solver: LogisticRegression(solver=solver, **params).fit(X, y)
for solver in SOLVERS
}
for solver_1, solver_2 in itertools.combinations(regressors, r=2):
assert_array_almost_equal(
regressors[solver_1].coef_, regressors[solver_2].coef_, decimal=3
)
|
Test solvers converge to the same result.
|
test_logistic_regression_solvers
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_logistic.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_logistic.py
|
BSD-3-Clause
|
def test_logistic_regression_solvers_multiclass(fit_intercept):
"""Test solvers converge to the same result for multiclass problems."""
X, y = make_classification(
n_samples=20, n_features=20, n_informative=10, n_classes=3, random_state=0
)
tol = 1e-8
params = dict(fit_intercept=fit_intercept, tol=tol, random_state=42)
# Override max iteration count for specific solvers to allow for
# proper convergence.
solver_max_iter = {"lbfgs": 200, "sag": 10_000, "saga": 10_000}
regressors = {
solver: LogisticRegression(
solver=solver, max_iter=solver_max_iter.get(solver, 100), **params
).fit(X, y)
for solver in set(SOLVERS) - set(["liblinear"])
}
for solver_1, solver_2 in itertools.combinations(regressors, r=2):
assert_allclose(
regressors[solver_1].coef_,
regressors[solver_2].coef_,
rtol=5e-3 if (solver_1 == "saga" or solver_2 == "saga") else 1e-3,
err_msg=f"{solver_1} vs {solver_2}",
)
if fit_intercept:
assert_allclose(
regressors[solver_1].intercept_,
regressors[solver_2].intercept_,
rtol=5e-3 if (solver_1 == "saga" or solver_2 == "saga") else 1e-3,
err_msg=f"{solver_1} vs {solver_2}",
)
|
Test solvers converge to the same result for multiclass problems.
|
test_logistic_regression_solvers_multiclass
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_logistic.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_logistic.py
|
BSD-3-Clause
|
def test_logistic_regression_solvers_multiclass_unpenalized(
fit_intercept, global_random_seed
):
"""Test and compare solver results for unpenalized multinomial multiclass."""
# We want to avoid perfect separation.
n_samples, n_features, n_classes = 100, 4, 3
rng = np.random.RandomState(global_random_seed)
X = make_low_rank_matrix(
n_samples=n_samples,
n_features=n_features + fit_intercept,
effective_rank=n_features + fit_intercept,
tail_strength=0.1,
random_state=rng,
)
if fit_intercept:
X[:, -1] = 1
U, s, Vt = svd(X)
assert np.all(s > 1e-3) # to be sure that X is not singular
assert np.max(s) / np.min(s) < 100 # condition number of X
if fit_intercept:
X = X[:, :-1]
coef = rng.uniform(low=1, high=3, size=n_features * n_classes)
coef = coef.reshape(n_classes, n_features)
intercept = rng.uniform(low=-1, high=1, size=n_classes) * fit_intercept
raw_prediction = X @ coef.T + intercept
loss = HalfMultinomialLoss(n_classes=n_classes)
proba = loss.link.inverse(raw_prediction)
# Only newer numpy version (1.22) support more dimensions on pvals.
y = np.zeros(n_samples)
for i in range(n_samples):
y[i] = np.argwhere(rng.multinomial(n=1, pvals=proba[i, :]))[0, 0]
tol = 1e-9
params = dict(fit_intercept=fit_intercept, random_state=42)
solver_max_iter = {"lbfgs": 200, "sag": 10_000, "saga": 10_000}
solver_tol = {"sag": 1e-8, "saga": 1e-8}
regressors = {
solver: LogisticRegression(
C=np.inf,
solver=solver,
tol=solver_tol.get(solver, tol),
max_iter=solver_max_iter.get(solver, 100),
**params,
).fit(X, y)
for solver in set(SOLVERS) - set(["liblinear"])
}
for solver in regressors.keys():
# See the docstring of test_multinomial_identifiability_on_iris for reference.
assert_allclose(
regressors[solver].coef_.sum(axis=0), 0, atol=1e-10, err_msg=solver
)
for solver_1, solver_2 in itertools.combinations(regressors, r=2):
assert_allclose(
regressors[solver_1].coef_,
regressors[solver_2].coef_,
rtol=5e-3 if (solver_1 == "saga" or solver_2 == "saga") else 2e-3,
err_msg=f"{solver_1} vs {solver_2}",
)
if fit_intercept:
assert_allclose(
regressors[solver_1].intercept_,
regressors[solver_2].intercept_,
rtol=5e-3 if (solver_1 == "saga" or solver_2 == "saga") else 1e-3,
err_msg=f"{solver_1} vs {solver_2}",
)
|
Test and compare solver results for unpenalized multinomial multiclass.
|
test_logistic_regression_solvers_multiclass_unpenalized
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_logistic.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_logistic.py
|
BSD-3-Clause
|
def test_multinomial_identifiability_on_iris(solver, fit_intercept):
"""Test that the multinomial classification is identifiable.
A multinomial with c classes can be modeled with
probability_k = exp(X@coef_k) / sum(exp(X@coef_l), l=1..c) for k=1..c.
This is not identifiable, unless one chooses a further constraint.
According to [1], the maximum of the L2 penalized likelihood automatically
satisfies the symmetric constraint:
sum(coef_k, k=1..c) = 0
Further details can be found in [2].
Reference
---------
.. [1] :doi:`Zhu, Ji and Trevor J. Hastie. "Classification of gene microarrays by
penalized logistic regression". Biostatistics 5 3 (2004): 427-43.
<10.1093/biostatistics/kxg046>`
.. [2] :arxiv:`Noah Simon and Jerome Friedman and Trevor Hastie. (2013)
"A Blockwise Descent Algorithm for Group-penalized Multiresponse and
Multinomial Regression". <1311.6529>`
"""
# Test logistic regression with the iris dataset
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
clf = LogisticRegression(
C=len(iris.data),
solver="lbfgs",
fit_intercept=fit_intercept,
)
# Scaling X to ease convergence.
X_scaled = scale(iris.data)
clf.fit(X_scaled, target)
# axis=0 is sum over classes
assert_allclose(clf.coef_.sum(axis=0), 0, atol=1e-10)
if fit_intercept:
assert clf.intercept_.sum(axis=0) == pytest.approx(0, abs=1e-11)
|
Test that the multinomial classification is identifiable.
A multinomial with c classes can be modeled with
probability_k = exp(X@coef_k) / sum(exp(X@coef_l), l=1..c) for k=1..c.
This is not identifiable, unless one chooses a further constraint.
According to [1], the maximum of the L2 penalized likelihood automatically
satisfies the symmetric constraint:
sum(coef_k, k=1..c) = 0
Further details can be found in [2].
Reference
---------
.. [1] :doi:`Zhu, Ji and Trevor J. Hastie. "Classification of gene microarrays by
penalized logistic regression". Biostatistics 5 3 (2004): 427-43.
<10.1093/biostatistics/kxg046>`
.. [2] :arxiv:`Noah Simon and Jerome Friedman and Trevor Hastie. (2013)
"A Blockwise Descent Algorithm for Group-penalized Multiresponse and
Multinomial Regression". <1311.6529>`
|
test_multinomial_identifiability_on_iris
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_logistic.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_logistic.py
|
BSD-3-Clause
|
def test_lr_cv_scores_differ_when_sample_weight_is_requested():
"""Test that `sample_weight` is correctly passed to the scorer in
`LogisticRegressionCV.fit` and `LogisticRegressionCV.score` by
checking the difference in scores with the case when `sample_weight`
is not requested.
"""
rng = np.random.RandomState(10)
X, y = make_classification(n_samples=10, random_state=rng)
X_t, y_t = make_classification(n_samples=10, random_state=rng)
sample_weight = np.ones(len(y))
sample_weight[: len(y) // 2] = 2
kwargs = {"sample_weight": sample_weight}
scorer1 = get_scorer("accuracy")
lr_cv1 = LogisticRegressionCV(scoring=scorer1)
lr_cv1.fit(X, y, **kwargs)
scorer2 = get_scorer("accuracy")
scorer2.set_score_request(sample_weight=True)
lr_cv2 = LogisticRegressionCV(scoring=scorer2)
lr_cv2.fit(X, y, **kwargs)
assert not np.allclose(lr_cv1.scores_[1], lr_cv2.scores_[1])
score_1 = lr_cv1.score(X_t, y_t, **kwargs)
score_2 = lr_cv2.score(X_t, y_t, **kwargs)
assert not np.allclose(score_1, score_2)
|
Test that `sample_weight` is correctly passed to the scorer in
`LogisticRegressionCV.fit` and `LogisticRegressionCV.score` by
checking the difference in scores with the case when `sample_weight`
is not requested.
|
test_lr_cv_scores_differ_when_sample_weight_is_requested
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_logistic.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_logistic.py
|
BSD-3-Clause
|
def test_lr_cv_scores_without_enabling_metadata_routing():
"""Test that `sample_weight` is passed correctly to the scorer in
`LogisticRegressionCV.fit` and `LogisticRegressionCV.score` even
when `enable_metadata_routing=False`
"""
rng = np.random.RandomState(10)
X, y = make_classification(n_samples=10, random_state=rng)
X_t, y_t = make_classification(n_samples=10, random_state=rng)
sample_weight = np.ones(len(y))
sample_weight[: len(y) // 2] = 2
kwargs = {"sample_weight": sample_weight}
with config_context(enable_metadata_routing=False):
scorer1 = get_scorer("accuracy")
lr_cv1 = LogisticRegressionCV(scoring=scorer1)
lr_cv1.fit(X, y, **kwargs)
score_1 = lr_cv1.score(X_t, y_t, **kwargs)
with config_context(enable_metadata_routing=True):
scorer2 = get_scorer("accuracy")
scorer2.set_score_request(sample_weight=True)
lr_cv2 = LogisticRegressionCV(scoring=scorer2)
lr_cv2.fit(X, y, **kwargs)
score_2 = lr_cv2.score(X_t, y_t, **kwargs)
assert_allclose(lr_cv1.scores_[1], lr_cv2.scores_[1])
assert_allclose(score_1, score_2)
|
Test that `sample_weight` is passed correctly to the scorer in
`LogisticRegressionCV.fit` and `LogisticRegressionCV.score` even
when `enable_metadata_routing=False`
|
test_lr_cv_scores_without_enabling_metadata_routing
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_logistic.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_logistic.py
|
BSD-3-Clause
|
def test_passing_params_without_enabling_metadata_routing():
"""Test that the right error message is raised when metadata params
are passed while not supported when `enable_metadata_routing=False`."""
X, y = make_classification(n_samples=10, random_state=0)
lr_cv = LogisticRegressionCV()
msg = "is only supported if enable_metadata_routing=True"
with config_context(enable_metadata_routing=False):
params = {"extra_param": 1.0}
with pytest.raises(ValueError, match=msg):
lr_cv.fit(X, y, **params)
with pytest.raises(ValueError, match=msg):
lr_cv.score(X, y, **params)
|
Test that the right error message is raised when metadata params
are passed while not supported when `enable_metadata_routing=False`.
|
test_passing_params_without_enabling_metadata_routing
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_logistic.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_logistic.py
|
BSD-3-Clause
|
def test_liblinear_multiclass_warning(Estimator):
"""Check that liblinear warns on multiclass problems."""
msg = (
"Using the 'liblinear' solver for multiclass classification is "
"deprecated. An error will be raised in 1.8. Either use another "
"solver which supports the multinomial loss or wrap the estimator "
"in a OneVsRestClassifier to keep applying a one-versus-rest "
"scheme."
)
with pytest.warns(FutureWarning, match=msg):
Estimator(solver="liblinear").fit(iris.data, iris.target)
|
Check that liblinear warns on multiclass problems.
|
test_liblinear_multiclass_warning
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_logistic.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_logistic.py
|
BSD-3-Clause
|
def test_estimator_n_nonzero_coefs():
"""Check `n_nonzero_coefs_` correct when `tol` is and isn't set."""
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y[:, 0])
assert omp.n_nonzero_coefs_ == n_nonzero_coefs
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs, tol=0.5)
omp.fit(X, y[:, 0])
assert omp.n_nonzero_coefs_ is None
|
Check `n_nonzero_coefs_` correct when `tol` is and isn't set.
|
test_estimator_n_nonzero_coefs
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_omp.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_omp.py
|
BSD-3-Clause
|
def test_perceptron_l1_ratio():
"""Check that `l1_ratio` has an impact when `penalty='elasticnet'`"""
clf1 = Perceptron(l1_ratio=0, penalty="elasticnet")
clf1.fit(X, y)
clf2 = Perceptron(l1_ratio=0.15, penalty="elasticnet")
clf2.fit(X, y)
assert clf1.score(X, y) != clf2.score(X, y)
# check that the bounds of elastic net which should correspond to an l1 or
# l2 penalty depending of `l1_ratio` value.
clf_l1 = Perceptron(penalty="l1").fit(X, y)
clf_elasticnet = Perceptron(l1_ratio=1, penalty="elasticnet").fit(X, y)
assert_allclose(clf_l1.coef_, clf_elasticnet.coef_)
clf_l2 = Perceptron(penalty="l2").fit(X, y)
clf_elasticnet = Perceptron(l1_ratio=0, penalty="elasticnet").fit(X, y)
assert_allclose(clf_l2.coef_, clf_elasticnet.coef_)
|
Check that `l1_ratio` has an impact when `penalty='elasticnet'`
|
test_perceptron_l1_ratio
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_perceptron.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_perceptron.py
|
BSD-3-Clause
|
def test_asymmetric_error(quantile):
"""Test quantile regression for asymmetric distributed targets."""
n_samples = 1000
rng = np.random.RandomState(42)
X = np.concatenate(
(
np.abs(rng.randn(n_samples)[:, None]),
-rng.randint(2, size=(n_samples, 1)),
),
axis=1,
)
intercept = 1.23
coef = np.array([0.5, -2])
# Take care that X @ coef + intercept > 0
assert np.min(X @ coef + intercept) > 0
# For an exponential distribution with rate lambda, e.g. exp(-lambda * x),
# the quantile at level q is:
# quantile(q) = - log(1 - q) / lambda
# scale = 1/lambda = -quantile(q) / log(1 - q)
y = rng.exponential(
scale=-(X @ coef + intercept) / np.log(1 - quantile), size=n_samples
)
model = QuantileRegressor(
quantile=quantile,
alpha=0,
).fit(X, y)
# This test can be made to pass with any solver but in the interest
# of sparing continuous integration resources, the test is performed
# with the fastest solver only.
assert model.intercept_ == approx(intercept, rel=0.2)
assert_allclose(model.coef_, coef, rtol=0.6)
assert_allclose(np.mean(model.predict(X) > y), quantile, atol=1e-2)
# Now compare to Nelder-Mead optimization with L1 penalty
alpha = 0.01
model.set_params(alpha=alpha).fit(X, y)
model_coef = np.r_[model.intercept_, model.coef_]
def func(coef):
loss = mean_pinball_loss(y, X @ coef[1:] + coef[0], alpha=quantile)
L1 = np.sum(np.abs(coef[1:]))
return loss + alpha * L1
res = minimize(
fun=func,
x0=[1, 0, -1],
method="Nelder-Mead",
tol=1e-12,
options={"maxiter": 2000},
)
assert func(model_coef) == approx(func(res.x))
assert_allclose(model.intercept_, res.x[0])
assert_allclose(model.coef_, res.x[1:])
assert_allclose(np.mean(model.predict(X) > y), quantile, atol=1e-2)
|
Test quantile regression for asymmetric distributed targets.
|
test_asymmetric_error
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_quantile.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_quantile.py
|
BSD-3-Clause
|
def test_equivariance(quantile):
"""Test equivariace of quantile regression.
See Koenker (2005) Quantile Regression, Chapter 2.2.3.
"""
rng = np.random.RandomState(42)
n_samples, n_features = 100, 5
X, y = make_regression(
n_samples=n_samples,
n_features=n_features,
n_informative=n_features,
noise=0,
random_state=rng,
shuffle=False,
)
# make y asymmetric
y += rng.exponential(scale=100, size=y.shape)
params = dict(alpha=0)
model1 = QuantileRegressor(quantile=quantile, **params).fit(X, y)
# coef(q; a*y, X) = a * coef(q; y, X)
a = 2.5
model2 = QuantileRegressor(quantile=quantile, **params).fit(X, a * y)
assert model2.intercept_ == approx(a * model1.intercept_, rel=1e-5)
assert_allclose(model2.coef_, a * model1.coef_, rtol=1e-5)
# coef(1-q; -a*y, X) = -a * coef(q; y, X)
model2 = QuantileRegressor(quantile=1 - quantile, **params).fit(X, -a * y)
assert model2.intercept_ == approx(-a * model1.intercept_, rel=1e-5)
assert_allclose(model2.coef_, -a * model1.coef_, rtol=1e-5)
# coef(q; y + X @ g, X) = coef(q; y, X) + g
g_intercept, g_coef = rng.randn(), rng.randn(n_features)
model2 = QuantileRegressor(quantile=quantile, **params)
model2.fit(X, y + X @ g_coef + g_intercept)
assert model2.intercept_ == approx(model1.intercept_ + g_intercept)
assert_allclose(model2.coef_, model1.coef_ + g_coef, rtol=1e-6)
# coef(q; y, X @ A) = A^-1 @ coef(q; y, X)
A = rng.randn(n_features, n_features)
model2 = QuantileRegressor(quantile=quantile, **params)
model2.fit(X @ A, y)
assert model2.intercept_ == approx(model1.intercept_, rel=1e-5)
assert_allclose(model2.coef_, np.linalg.solve(A, model1.coef_), rtol=1e-5)
|
Test equivariace of quantile regression.
See Koenker (2005) Quantile Regression, Chapter 2.2.3.
|
test_equivariance
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_quantile.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_quantile.py
|
BSD-3-Clause
|
def test_sparse_input(sparse_container, solver, fit_intercept, global_random_seed):
"""Test that sparse and dense X give same results."""
n_informative = 10
quantile_level = 0.6
X, y = make_regression(
n_samples=300,
n_features=20,
n_informative=10,
random_state=global_random_seed,
noise=1.0,
)
X_sparse = sparse_container(X)
alpha = 0.1
quant_dense = QuantileRegressor(
quantile=quantile_level, alpha=alpha, fit_intercept=fit_intercept
).fit(X, y)
quant_sparse = QuantileRegressor(
quantile=quantile_level, alpha=alpha, fit_intercept=fit_intercept, solver=solver
).fit(X_sparse, y)
assert_allclose(quant_sparse.coef_, quant_dense.coef_, rtol=1e-2)
sparse_support = quant_sparse.coef_ != 0
dense_support = quant_dense.coef_ != 0
assert dense_support.sum() == pytest.approx(n_informative, abs=1)
assert sparse_support.sum() == pytest.approx(n_informative, abs=1)
if fit_intercept:
assert quant_sparse.intercept_ == approx(quant_dense.intercept_)
# check that we still predict fraction
empirical_coverage = np.mean(y < quant_sparse.predict(X_sparse))
assert empirical_coverage == approx(quantile_level, abs=3e-2)
|
Test that sparse and dense X give same results.
|
test_sparse_input
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_quantile.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_quantile.py
|
BSD-3-Clause
|
def test_error_interior_point_future(X_y_data, monkeypatch):
"""Check that we will raise a proper error when requesting
`solver='interior-point'` in SciPy >= 1.11.
"""
X, y = X_y_data
import sklearn.linear_model._quantile
with monkeypatch.context() as m:
m.setattr(sklearn.linear_model._quantile, "sp_version", parse_version("1.11.0"))
err_msg = "Solver interior-point is not anymore available in SciPy >= 1.11.0."
with pytest.raises(ValueError, match=err_msg):
QuantileRegressor(solver="interior-point").fit(X, y)
|
Check that we will raise a proper error when requesting
`solver='interior-point'` in SciPy >= 1.11.
|
test_error_interior_point_future
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_quantile.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_quantile.py
|
BSD-3-Clause
|
def test_perfect_horizontal_line():
"""Check that we can fit a line where all samples are inliers.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/19497
"""
X = np.arange(100)[:, None]
y = np.zeros((100,))
estimator = LinearRegression()
ransac_estimator = RANSACRegressor(estimator, random_state=0)
ransac_estimator.fit(X, y)
assert_allclose(ransac_estimator.estimator_.coef_, 0.0)
assert_allclose(ransac_estimator.estimator_.intercept_, 0.0)
|
Check that we can fit a line where all samples are inliers.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/19497
|
test_perfect_horizontal_line
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_ransac.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_ransac.py
|
BSD-3-Clause
|
def ols_ridge_dataset(global_random_seed, request):
"""Dataset with OLS and Ridge solutions, well conditioned X.
The construction is based on the SVD decomposition of X = U S V'.
Parameters
----------
type : {"long", "wide"}
If "long", then n_samples > n_features.
If "wide", then n_features > n_samples.
For "wide", we return the minimum norm solution w = X' (XX')^-1 y:
min ||w||_2 subject to X w = y
Returns
-------
X : ndarray
Last column of 1, i.e. intercept.
y : ndarray
coef_ols : ndarray of shape
Minimum norm OLS solutions, i.e. min ||X w - y||_2_2 (with minimum ||w||_2 in
case of ambiguity)
Last coefficient is intercept.
coef_ridge : ndarray of shape (5,)
Ridge solution with alpha=1, i.e. min ||X w - y||_2_2 + ||w||_2^2.
Last coefficient is intercept.
"""
# Make larger dim more than double as big as the smaller one.
# This helps when constructing singular matrices like (X, X).
if request.param == "long":
n_samples, n_features = 12, 4
else:
n_samples, n_features = 4, 12
k = min(n_samples, n_features)
rng = np.random.RandomState(global_random_seed)
X = make_low_rank_matrix(
n_samples=n_samples, n_features=n_features, effective_rank=k, random_state=rng
)
X[:, -1] = 1 # last columns acts as intercept
U, s, Vt = linalg.svd(X)
assert np.all(s > 1e-3) # to be sure
U1, U2 = U[:, :k], U[:, k:]
Vt1, _ = Vt[:k, :], Vt[k:, :]
if request.param == "long":
# Add a term that vanishes in the product X'y
coef_ols = rng.uniform(low=-10, high=10, size=n_features)
y = X @ coef_ols
y += U2 @ rng.normal(size=n_samples - n_features) ** 2
else:
y = rng.uniform(low=-10, high=10, size=n_samples)
# w = X'(XX')^-1 y = V s^-1 U' y
coef_ols = Vt1.T @ np.diag(1 / s) @ U1.T @ y
# Add penalty alpha * ||coef||_2^2 for alpha=1 and solve via normal equations.
# Note that the problem is well conditioned such that we get accurate results.
alpha = 1
d = alpha * np.identity(n_features)
d[-1, -1] = 0 # intercept gets no penalty
coef_ridge = linalg.solve(X.T @ X + d, X.T @ y)
# To be sure
R_OLS = y - X @ coef_ols
R_Ridge = y - X @ coef_ridge
assert np.linalg.norm(R_OLS) < np.linalg.norm(R_Ridge)
return X, y, coef_ols, coef_ridge
|
Dataset with OLS and Ridge solutions, well conditioned X.
The construction is based on the SVD decomposition of X = U S V'.
Parameters
----------
type : {"long", "wide"}
If "long", then n_samples > n_features.
If "wide", then n_features > n_samples.
For "wide", we return the minimum norm solution w = X' (XX')^-1 y:
min ||w||_2 subject to X w = y
Returns
-------
X : ndarray
Last column of 1, i.e. intercept.
y : ndarray
coef_ols : ndarray of shape
Minimum norm OLS solutions, i.e. min ||X w - y||_2_2 (with minimum ||w||_2 in
case of ambiguity)
Last coefficient is intercept.
coef_ridge : ndarray of shape (5,)
Ridge solution with alpha=1, i.e. min ||X w - y||_2_2 + ||w||_2^2.
Last coefficient is intercept.
|
ols_ridge_dataset
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_ridge.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_ridge.py
|
BSD-3-Clause
|
def test_ridge_regression(solver, fit_intercept, ols_ridge_dataset, global_random_seed):
"""Test that Ridge converges for all solvers to correct solution.
We work with a simple constructed data set with known solution.
"""
X, y, _, coef = ols_ridge_dataset
alpha = 1.0 # because ols_ridge_dataset uses this.
params = dict(
alpha=alpha,
fit_intercept=True,
solver=solver,
tol=1e-15 if solver in ("sag", "saga") else 1e-10,
random_state=global_random_seed,
)
# Calculate residuals and R2.
res_null = y - np.mean(y)
res_Ridge = y - X @ coef
R2_Ridge = 1 - np.sum(res_Ridge**2) / np.sum(res_null**2)
model = Ridge(**params)
X = X[:, :-1] # remove intercept
if fit_intercept:
intercept = coef[-1]
else:
X = X - X.mean(axis=0)
y = y - y.mean()
intercept = 0
model.fit(X, y)
coef = coef[:-1]
assert model.intercept_ == pytest.approx(intercept)
assert_allclose(model.coef_, coef)
assert model.score(X, y) == pytest.approx(R2_Ridge)
# Same with sample_weight.
model = Ridge(**params).fit(X, y, sample_weight=np.ones(X.shape[0]))
assert model.intercept_ == pytest.approx(intercept)
assert_allclose(model.coef_, coef)
assert model.score(X, y) == pytest.approx(R2_Ridge)
assert model.solver_ == solver
|
Test that Ridge converges for all solvers to correct solution.
We work with a simple constructed data set with known solution.
|
test_ridge_regression
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_ridge.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_ridge.py
|
BSD-3-Clause
|
def test_ridge_regression_hstacked_X(
solver, fit_intercept, ols_ridge_dataset, global_random_seed
):
"""Test that Ridge converges for all solvers to correct solution on hstacked data.
We work with a simple constructed data set with known solution.
Fit on [X] with alpha is the same as fit on [X, X]/2 with alpha/2.
For long X, [X, X] is a singular matrix.
"""
X, y, _, coef = ols_ridge_dataset
n_samples, n_features = X.shape
alpha = 1.0 # because ols_ridge_dataset uses this.
model = Ridge(
alpha=alpha / 2,
fit_intercept=fit_intercept,
solver=solver,
tol=1e-15 if solver in ("sag", "saga") else 1e-10,
random_state=global_random_seed,
)
X = X[:, :-1] # remove intercept
X = 0.5 * np.concatenate((X, X), axis=1)
assert np.linalg.matrix_rank(X) <= min(n_samples, n_features - 1)
if fit_intercept:
intercept = coef[-1]
else:
X = X - X.mean(axis=0)
y = y - y.mean()
intercept = 0
model.fit(X, y)
coef = coef[:-1]
assert model.intercept_ == pytest.approx(intercept)
# coefficients are not all on the same magnitude, adding a small atol to
# make this test less brittle
assert_allclose(model.coef_, np.r_[coef, coef], atol=1e-8)
|
Test that Ridge converges for all solvers to correct solution on hstacked data.
We work with a simple constructed data set with known solution.
Fit on [X] with alpha is the same as fit on [X, X]/2 with alpha/2.
For long X, [X, X] is a singular matrix.
|
test_ridge_regression_hstacked_X
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_ridge.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_ridge.py
|
BSD-3-Clause
|
def test_ridge_regression_vstacked_X(
solver, fit_intercept, ols_ridge_dataset, global_random_seed
):
"""Test that Ridge converges for all solvers to correct solution on vstacked data.
We work with a simple constructed data set with known solution.
Fit on [X] with alpha is the same as fit on [X], [y]
[X], [y] with 2 * alpha.
For wide X, [X', X'] is a singular matrix.
"""
X, y, _, coef = ols_ridge_dataset
n_samples, n_features = X.shape
alpha = 1.0 # because ols_ridge_dataset uses this.
model = Ridge(
alpha=2 * alpha,
fit_intercept=fit_intercept,
solver=solver,
tol=1e-15 if solver in ("sag", "saga") else 1e-10,
random_state=global_random_seed,
)
X = X[:, :-1] # remove intercept
X = np.concatenate((X, X), axis=0)
assert np.linalg.matrix_rank(X) <= min(n_samples, n_features)
y = np.r_[y, y]
if fit_intercept:
intercept = coef[-1]
else:
X = X - X.mean(axis=0)
y = y - y.mean()
intercept = 0
model.fit(X, y)
coef = coef[:-1]
assert model.intercept_ == pytest.approx(intercept)
# coefficients are not all on the same magnitude, adding a small atol to
# make this test less brittle
assert_allclose(model.coef_, coef, atol=1e-8)
|
Test that Ridge converges for all solvers to correct solution on vstacked data.
We work with a simple constructed data set with known solution.
Fit on [X] with alpha is the same as fit on [X], [y]
[X], [y] with 2 * alpha.
For wide X, [X', X'] is a singular matrix.
|
test_ridge_regression_vstacked_X
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_ridge.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_ridge.py
|
BSD-3-Clause
|
def test_ridge_regression_unpenalized(
solver, fit_intercept, ols_ridge_dataset, global_random_seed
):
"""Test that unpenalized Ridge = OLS converges for all solvers to correct solution.
We work with a simple constructed data set with known solution.
Note: This checks the minimum norm solution for wide X, i.e.
n_samples < n_features:
min ||w||_2 subject to X w = y
"""
X, y, coef, _ = ols_ridge_dataset
n_samples, n_features = X.shape
alpha = 0 # OLS
params = dict(
alpha=alpha,
fit_intercept=fit_intercept,
solver=solver,
tol=1e-15 if solver in ("sag", "saga") else 1e-10,
random_state=global_random_seed,
)
model = Ridge(**params)
# Note that cholesky might give a warning: "Singular matrix in solving dual
# problem. Using least-squares solution instead."
if fit_intercept:
X = X[:, :-1] # remove intercept
intercept = coef[-1]
coef = coef[:-1]
else:
intercept = 0
model.fit(X, y)
# FIXME: `assert_allclose(model.coef_, coef)` should work for all cases but fails
# for the wide/fat case with n_features > n_samples. The current Ridge solvers do
# NOT return the minimum norm solution with fit_intercept=True.
if n_samples > n_features or not fit_intercept:
assert model.intercept_ == pytest.approx(intercept)
assert_allclose(model.coef_, coef)
else:
# As it is an underdetermined problem, residuals = 0. This shows that we get
# a solution to X w = y ....
assert_allclose(model.predict(X), y)
assert_allclose(X @ coef + intercept, y)
# But it is not the minimum norm solution. (This should be equal.)
assert np.linalg.norm(np.r_[model.intercept_, model.coef_]) > np.linalg.norm(
np.r_[intercept, coef]
)
pytest.xfail(reason="Ridge does not provide the minimum norm solution.")
assert model.intercept_ == pytest.approx(intercept)
assert_allclose(model.coef_, coef)
|
Test that unpenalized Ridge = OLS converges for all solvers to correct solution.
We work with a simple constructed data set with known solution.
Note: This checks the minimum norm solution for wide X, i.e.
n_samples < n_features:
min ||w||_2 subject to X w = y
|
test_ridge_regression_unpenalized
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_ridge.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_ridge.py
|
BSD-3-Clause
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.