code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
---|---|---|---|---|---|---|---|
def get_covariance(self):
"""Compute data covariance with the FactorAnalysis model.
``cov = components_.T * components_ + diag(noise_variance)``
Returns
-------
cov : ndarray of shape (n_features, n_features)
Estimated covariance of data.
"""
check_is_fitted(self)
cov = np.dot(self.components_.T, self.components_)
cov.flat[:: len(cov) + 1] += self.noise_variance_ # modify diag inplace
return cov
|
Compute data covariance with the FactorAnalysis model.
``cov = components_.T * components_ + diag(noise_variance)``
Returns
-------
cov : ndarray of shape (n_features, n_features)
Estimated covariance of data.
|
get_covariance
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/_factor_analysis.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/_factor_analysis.py
|
BSD-3-Clause
|
def get_precision(self):
"""Compute data precision matrix with the FactorAnalysis model.
Returns
-------
precision : ndarray of shape (n_features, n_features)
Estimated precision of data.
"""
check_is_fitted(self)
n_features = self.components_.shape[1]
# handle corner cases first
if self.n_components == 0:
return np.diag(1.0 / self.noise_variance_)
if self.n_components == n_features:
return linalg.inv(self.get_covariance())
# Get precision using matrix inversion lemma
components_ = self.components_
precision = np.dot(components_ / self.noise_variance_, components_.T)
precision.flat[:: len(precision) + 1] += 1.0
precision = np.dot(components_.T, np.dot(linalg.inv(precision), components_))
precision /= self.noise_variance_[:, np.newaxis]
precision /= -self.noise_variance_[np.newaxis, :]
precision.flat[:: len(precision) + 1] += 1.0 / self.noise_variance_
return precision
|
Compute data precision matrix with the FactorAnalysis model.
Returns
-------
precision : ndarray of shape (n_features, n_features)
Estimated precision of data.
|
get_precision
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/_factor_analysis.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/_factor_analysis.py
|
BSD-3-Clause
|
def score_samples(self, X):
"""Compute the log-likelihood of each sample.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
The data.
Returns
-------
ll : ndarray of shape (n_samples,)
Log-likelihood of each sample under the current model.
"""
check_is_fitted(self)
X = validate_data(self, X, reset=False)
Xr = X - self.mean_
precision = self.get_precision()
n_features = X.shape[1]
log_like = -0.5 * (Xr * (np.dot(Xr, precision))).sum(axis=1)
log_like -= 0.5 * (n_features * log(2.0 * np.pi) - fast_logdet(precision))
return log_like
|
Compute the log-likelihood of each sample.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
The data.
Returns
-------
ll : ndarray of shape (n_samples,)
Log-likelihood of each sample under the current model.
|
score_samples
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/_factor_analysis.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/_factor_analysis.py
|
BSD-3-Clause
|
def _ica_def(X, tol, g, fun_args, max_iter, w_init):
"""Deflationary FastICA using fun approx to neg-entropy function
Used internally by FastICA.
"""
n_components = w_init.shape[0]
W = np.zeros((n_components, n_components), dtype=X.dtype)
n_iter = []
# j is the index of the extracted component
for j in range(n_components):
w = w_init[j, :].copy()
w /= np.sqrt((w**2).sum())
for i in range(max_iter):
gwtx, g_wtx = g(np.dot(w.T, X), fun_args)
w1 = (X * gwtx).mean(axis=1) - g_wtx.mean() * w
_gs_decorrelation(w1, W, j)
w1 /= np.sqrt((w1**2).sum())
lim = np.abs(np.abs((w1 * w).sum()) - 1)
w = w1
if lim < tol:
break
n_iter.append(i + 1)
W[j, :] = w
return W, max(n_iter)
|
Deflationary FastICA using fun approx to neg-entropy function
Used internally by FastICA.
|
_ica_def
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/_fastica.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/_fastica.py
|
BSD-3-Clause
|
def _ica_par(X, tol, g, fun_args, max_iter, w_init):
"""Parallel FastICA.
Used internally by FastICA --main loop
"""
W = _sym_decorrelation(w_init)
del w_init
p_ = float(X.shape[1])
for ii in range(max_iter):
gwtx, g_wtx = g(np.dot(W, X), fun_args)
W1 = _sym_decorrelation(np.dot(gwtx, X.T) / p_ - g_wtx[:, np.newaxis] * W)
del gwtx, g_wtx
# builtin max, abs are faster than numpy counter parts.
# np.einsum allows having the lowest memory footprint.
# It is faster than np.diag(np.dot(W1, W.T)).
lim = max(abs(abs(np.einsum("ij,ij->i", W1, W)) - 1))
W = W1
if lim < tol:
break
else:
warnings.warn(
(
"FastICA did not converge. Consider increasing "
"tolerance or the maximum number of iterations."
),
ConvergenceWarning,
)
return W, ii + 1
|
Parallel FastICA.
Used internally by FastICA --main loop
|
_ica_par
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/_fastica.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/_fastica.py
|
BSD-3-Clause
|
def fastica(
X,
n_components=None,
*,
algorithm="parallel",
whiten="unit-variance",
fun="logcosh",
fun_args=None,
max_iter=200,
tol=1e-04,
w_init=None,
whiten_solver="svd",
random_state=None,
return_X_mean=False,
compute_sources=True,
return_n_iter=False,
):
"""Perform Fast Independent Component Analysis.
The implementation is based on [1]_.
Read more in the :ref:`User Guide <ICA>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the number of features.
n_components : int, default=None
Number of components to use. If None is passed, all are used.
algorithm : {'parallel', 'deflation'}, default='parallel'
Specify which algorithm to use for FastICA.
whiten : str or bool, default='unit-variance'
Specify the whitening strategy to use.
- If 'arbitrary-variance', a whitening with variance
arbitrary is used.
- If 'unit-variance', the whitening matrix is rescaled to ensure that
each recovered source has unit variance.
- If False, the data is already considered to be whitened, and no
whitening is performed.
.. versionchanged:: 1.3
The default value of `whiten` changed to 'unit-variance' in 1.3.
fun : {'logcosh', 'exp', 'cube'} or callable, default='logcosh'
The functional form of the G function used in the
approximation to neg-entropy. Could be either 'logcosh', 'exp',
or 'cube'.
You can also provide your own function. It should return a tuple
containing the value of the function, and of its derivative, in the
point. The derivative should be averaged along its last dimension.
Example::
def my_g(x):
return x ** 3, (3 * x ** 2).mean(axis=-1)
fun_args : dict, default=None
Arguments to send to the functional form.
If empty or None and if fun='logcosh', fun_args will take value
{'alpha' : 1.0}.
max_iter : int, default=200
Maximum number of iterations to perform.
tol : float, default=1e-4
A positive scalar giving the tolerance at which the
un-mixing matrix is considered to have converged.
w_init : ndarray of shape (n_components, n_components), default=None
Initial un-mixing array. If `w_init=None`, then an array of values
drawn from a normal distribution is used.
whiten_solver : {"eigh", "svd"}, default="svd"
The solver to use for whitening.
- "svd" is more stable numerically if the problem is degenerate, and
often faster when `n_samples <= n_features`.
- "eigh" is generally more memory efficient when
`n_samples >= n_features`, and can be faster when
`n_samples >= 50 * n_features`.
.. versionadded:: 1.2
random_state : int, RandomState instance or None, default=None
Used to initialize ``w_init`` when not specified, with a
normal distribution. Pass an int, for reproducible results
across multiple function calls.
See :term:`Glossary <random_state>`.
return_X_mean : bool, default=False
If True, X_mean is returned too.
compute_sources : bool, default=True
If False, sources are not computed, but only the rotation matrix.
This can save memory when working with big data. Defaults to True.
return_n_iter : bool, default=False
Whether or not to return the number of iterations.
Returns
-------
K : ndarray of shape (n_components, n_features) or None
If whiten is 'True', K is the pre-whitening matrix that projects data
onto the first n_components principal components. If whiten is 'False',
K is 'None'.
W : ndarray of shape (n_components, n_components)
The square matrix that unmixes the data after whitening.
The mixing matrix is the pseudo-inverse of matrix ``W K``
if K is not None, else it is the inverse of W.
S : ndarray of shape (n_samples, n_components) or None
Estimated source matrix.
X_mean : ndarray of shape (n_features,)
The mean over features. Returned only if return_X_mean is True.
n_iter : int
If the algorithm is "deflation", n_iter is the
maximum number of iterations run across all components. Else
they are just the number of iterations taken to converge. This is
returned only when return_n_iter is set to `True`.
Notes
-----
The data matrix X is considered to be a linear combination of
non-Gaussian (independent) components i.e. X = AS where columns of S
contain the independent components and A is a linear mixing
matrix. In short ICA attempts to `un-mix' the data by estimating an
un-mixing matrix W where ``S = W K X.``
While FastICA was proposed to estimate as many sources
as features, it is possible to estimate less by setting
n_components < n_features. It this case K is not a square matrix
and the estimated A is the pseudo-inverse of ``W K``.
This implementation was originally made for data of shape
[n_features, n_samples]. Now the input is transposed
before the algorithm is applied. This makes it slightly
faster for Fortran-ordered input.
References
----------
.. [1] A. Hyvarinen and E. Oja, "Fast Independent Component Analysis",
Algorithms and Applications, Neural Networks, 13(4-5), 2000,
pp. 411-430.
Examples
--------
>>> from sklearn.datasets import load_digits
>>> from sklearn.decomposition import fastica
>>> X, _ = load_digits(return_X_y=True)
>>> K, W, S = fastica(X, n_components=7, random_state=0, whiten='unit-variance')
>>> K.shape
(7, 64)
>>> W.shape
(7, 7)
>>> S.shape
(1797, 7)
"""
est = FastICA(
n_components=n_components,
algorithm=algorithm,
whiten=whiten,
fun=fun,
fun_args=fun_args,
max_iter=max_iter,
tol=tol,
w_init=w_init,
whiten_solver=whiten_solver,
random_state=random_state,
)
est._validate_params()
S = est._fit_transform(X, compute_sources=compute_sources)
if est.whiten in ["unit-variance", "arbitrary-variance"]:
K = est.whitening_
X_mean = est.mean_
else:
K = None
X_mean = None
returned_values = [K, est._unmixing, S]
if return_X_mean:
returned_values.append(X_mean)
if return_n_iter:
returned_values.append(est.n_iter_)
return returned_values
|
Perform Fast Independent Component Analysis.
The implementation is based on [1]_.
Read more in the :ref:`User Guide <ICA>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the number of features.
n_components : int, default=None
Number of components to use. If None is passed, all are used.
algorithm : {'parallel', 'deflation'}, default='parallel'
Specify which algorithm to use for FastICA.
whiten : str or bool, default='unit-variance'
Specify the whitening strategy to use.
- If 'arbitrary-variance', a whitening with variance
arbitrary is used.
- If 'unit-variance', the whitening matrix is rescaled to ensure that
each recovered source has unit variance.
- If False, the data is already considered to be whitened, and no
whitening is performed.
.. versionchanged:: 1.3
The default value of `whiten` changed to 'unit-variance' in 1.3.
fun : {'logcosh', 'exp', 'cube'} or callable, default='logcosh'
The functional form of the G function used in the
approximation to neg-entropy. Could be either 'logcosh', 'exp',
or 'cube'.
You can also provide your own function. It should return a tuple
containing the value of the function, and of its derivative, in the
point. The derivative should be averaged along its last dimension.
Example::
def my_g(x):
return x ** 3, (3 * x ** 2).mean(axis=-1)
fun_args : dict, default=None
Arguments to send to the functional form.
If empty or None and if fun='logcosh', fun_args will take value
{'alpha' : 1.0}.
max_iter : int, default=200
Maximum number of iterations to perform.
tol : float, default=1e-4
A positive scalar giving the tolerance at which the
un-mixing matrix is considered to have converged.
w_init : ndarray of shape (n_components, n_components), default=None
Initial un-mixing array. If `w_init=None`, then an array of values
drawn from a normal distribution is used.
whiten_solver : {"eigh", "svd"}, default="svd"
The solver to use for whitening.
- "svd" is more stable numerically if the problem is degenerate, and
often faster when `n_samples <= n_features`.
- "eigh" is generally more memory efficient when
`n_samples >= n_features`, and can be faster when
`n_samples >= 50 * n_features`.
.. versionadded:: 1.2
random_state : int, RandomState instance or None, default=None
Used to initialize ``w_init`` when not specified, with a
normal distribution. Pass an int, for reproducible results
across multiple function calls.
See :term:`Glossary <random_state>`.
return_X_mean : bool, default=False
If True, X_mean is returned too.
compute_sources : bool, default=True
If False, sources are not computed, but only the rotation matrix.
This can save memory when working with big data. Defaults to True.
return_n_iter : bool, default=False
Whether or not to return the number of iterations.
Returns
-------
K : ndarray of shape (n_components, n_features) or None
If whiten is 'True', K is the pre-whitening matrix that projects data
onto the first n_components principal components. If whiten is 'False',
K is 'None'.
W : ndarray of shape (n_components, n_components)
The square matrix that unmixes the data after whitening.
The mixing matrix is the pseudo-inverse of matrix ``W K``
if K is not None, else it is the inverse of W.
S : ndarray of shape (n_samples, n_components) or None
Estimated source matrix.
X_mean : ndarray of shape (n_features,)
The mean over features. Returned only if return_X_mean is True.
n_iter : int
If the algorithm is "deflation", n_iter is the
maximum number of iterations run across all components. Else
they are just the number of iterations taken to converge. This is
returned only when return_n_iter is set to `True`.
Notes
-----
The data matrix X is considered to be a linear combination of
non-Gaussian (independent) components i.e. X = AS where columns of S
contain the independent components and A is a linear mixing
matrix. In short ICA attempts to `un-mix' the data by estimating an
un-mixing matrix W where ``S = W K X.``
While FastICA was proposed to estimate as many sources
as features, it is possible to estimate less by setting
n_components < n_features. It this case K is not a square matrix
and the estimated A is the pseudo-inverse of ``W K``.
This implementation was originally made for data of shape
[n_features, n_samples]. Now the input is transposed
before the algorithm is applied. This makes it slightly
faster for Fortran-ordered input.
References
----------
.. [1] A. Hyvarinen and E. Oja, "Fast Independent Component Analysis",
Algorithms and Applications, Neural Networks, 13(4-5), 2000,
pp. 411-430.
Examples
--------
>>> from sklearn.datasets import load_digits
>>> from sklearn.decomposition import fastica
>>> X, _ = load_digits(return_X_y=True)
>>> K, W, S = fastica(X, n_components=7, random_state=0, whiten='unit-variance')
>>> K.shape
(7, 64)
>>> W.shape
(7, 7)
>>> S.shape
(1797, 7)
|
fastica
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/_fastica.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/_fastica.py
|
BSD-3-Clause
|
def _fit_transform(self, X, compute_sources=False):
"""Fit the model.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
compute_sources : bool, default=False
If False, sources are not computes but only the rotation matrix.
This can save memory when working with big data. Defaults to False.
Returns
-------
S : ndarray of shape (n_samples, n_components) or None
Sources matrix. `None` if `compute_sources` is `False`.
"""
XT = validate_data(
self,
X,
copy=self.whiten,
dtype=[np.float64, np.float32],
ensure_min_samples=2,
).T
fun_args = {} if self.fun_args is None else self.fun_args
random_state = check_random_state(self.random_state)
alpha = fun_args.get("alpha", 1.0)
if not 1 <= alpha <= 2:
raise ValueError("alpha must be in [1,2]")
if self.fun == "logcosh":
g = _logcosh
elif self.fun == "exp":
g = _exp
elif self.fun == "cube":
g = _cube
elif callable(self.fun):
def g(x, fun_args):
return self.fun(x, **fun_args)
n_features, n_samples = XT.shape
n_components = self.n_components
if not self.whiten and n_components is not None:
n_components = None
warnings.warn("Ignoring n_components with whiten=False.")
if n_components is None:
n_components = min(n_samples, n_features)
if n_components > min(n_samples, n_features):
n_components = min(n_samples, n_features)
warnings.warn(
"n_components is too large: it will be set to %s" % n_components
)
if self.whiten:
# Centering the features of X
X_mean = XT.mean(axis=-1)
XT -= X_mean[:, np.newaxis]
# Whitening and preprocessing by PCA
if self.whiten_solver == "eigh":
# Faster when num_samples >> n_features
d, u = linalg.eigh(XT.dot(X))
sort_indices = np.argsort(d)[::-1]
eps = np.finfo(d.dtype).eps * 10
degenerate_idx = d < eps
if np.any(degenerate_idx):
warnings.warn(
"There are some small singular values, using "
"whiten_solver = 'svd' might lead to more "
"accurate results."
)
d[degenerate_idx] = eps # For numerical issues
np.sqrt(d, out=d)
d, u = d[sort_indices], u[:, sort_indices]
elif self.whiten_solver == "svd":
u, d = linalg.svd(XT, full_matrices=False, check_finite=False)[:2]
# Give consistent eigenvectors for both svd solvers
u *= np.sign(u[0])
K = (u / d).T[:n_components] # see (6.33) p.140
del u, d
X1 = np.dot(K, XT)
# see (13.6) p.267 Here X1 is white and data
# in X has been projected onto a subspace by PCA
X1 *= np.sqrt(n_samples)
else:
# X must be casted to floats to avoid typing issues with numpy
# 2.0 and the line below
X1 = as_float_array(XT, copy=False) # copy has been taken care of
w_init = self.w_init
if w_init is None:
w_init = np.asarray(
random_state.normal(size=(n_components, n_components)), dtype=X1.dtype
)
else:
w_init = np.asarray(w_init)
if w_init.shape != (n_components, n_components):
raise ValueError(
"w_init has invalid shape -- should be %(shape)s"
% {"shape": (n_components, n_components)}
)
kwargs = {
"tol": self.tol,
"g": g,
"fun_args": fun_args,
"max_iter": self.max_iter,
"w_init": w_init,
}
if self.algorithm == "parallel":
W, n_iter = _ica_par(X1, **kwargs)
elif self.algorithm == "deflation":
W, n_iter = _ica_def(X1, **kwargs)
del X1
self.n_iter_ = n_iter
if compute_sources:
if self.whiten:
S = np.linalg.multi_dot([W, K, XT]).T
else:
S = np.dot(W, XT).T
else:
S = None
if self.whiten:
if self.whiten == "unit-variance":
if not compute_sources:
S = np.linalg.multi_dot([W, K, XT]).T
S_std = np.std(S, axis=0, keepdims=True)
S /= S_std
W /= S_std.T
self.components_ = np.dot(W, K)
self.mean_ = X_mean
self.whitening_ = K
else:
self.components_ = W
self.mixing_ = linalg.pinv(self.components_, check_finite=False)
self._unmixing = W
return S
|
Fit the model.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
compute_sources : bool, default=False
If False, sources are not computes but only the rotation matrix.
This can save memory when working with big data. Defaults to False.
Returns
-------
S : ndarray of shape (n_samples, n_components) or None
Sources matrix. `None` if `compute_sources` is `False`.
|
_fit_transform
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/_fastica.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/_fastica.py
|
BSD-3-Clause
|
def transform(self, X, copy=True):
"""Recover the sources from X (apply the unmixing matrix).
Parameters
----------
X : array-like of shape (n_samples, n_features)
Data to transform, where `n_samples` is the number of samples
and `n_features` is the number of features.
copy : bool, default=True
If False, data passed to fit can be overwritten. Defaults to True.
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
Estimated sources obtained by transforming the data with the
estimated unmixing matrix.
"""
check_is_fitted(self)
X = validate_data(
self,
X,
copy=(copy and self.whiten),
dtype=[np.float64, np.float32],
reset=False,
)
if self.whiten:
X -= self.mean_
return np.dot(X, self.components_.T)
|
Recover the sources from X (apply the unmixing matrix).
Parameters
----------
X : array-like of shape (n_samples, n_features)
Data to transform, where `n_samples` is the number of samples
and `n_features` is the number of features.
copy : bool, default=True
If False, data passed to fit can be overwritten. Defaults to True.
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
Estimated sources obtained by transforming the data with the
estimated unmixing matrix.
|
transform
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/_fastica.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/_fastica.py
|
BSD-3-Clause
|
def inverse_transform(self, X, copy=True):
"""Transform the sources back to the mixed data (apply mixing matrix).
Parameters
----------
X : array-like of shape (n_samples, n_components)
Sources, where `n_samples` is the number of samples
and `n_components` is the number of components.
copy : bool, default=True
If False, data passed to fit are overwritten. Defaults to True.
Returns
-------
X_original : ndarray of shape (n_samples, n_features)
Reconstructed data obtained with the mixing matrix.
"""
check_is_fitted(self)
X = check_array(X, copy=(copy and self.whiten), dtype=[np.float64, np.float32])
X = np.dot(X, self.mixing_.T)
if self.whiten:
X += self.mean_
return X
|
Transform the sources back to the mixed data (apply mixing matrix).
Parameters
----------
X : array-like of shape (n_samples, n_components)
Sources, where `n_samples` is the number of samples
and `n_components` is the number of components.
copy : bool, default=True
If False, data passed to fit are overwritten. Defaults to True.
Returns
-------
X_original : ndarray of shape (n_samples, n_features)
Reconstructed data obtained with the mixing matrix.
|
inverse_transform
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/_fastica.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/_fastica.py
|
BSD-3-Clause
|
def fit(self, X, y=None):
"""Fit the model with X, using minibatches of size batch_size.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Returns the instance itself.
"""
self.components_ = None
self.n_samples_seen_ = 0
self.mean_ = 0.0
self.var_ = 0.0
self.singular_values_ = None
self.explained_variance_ = None
self.explained_variance_ratio_ = None
self.noise_variance_ = None
X = validate_data(
self,
X,
accept_sparse=["csr", "csc", "lil"],
copy=self.copy,
dtype=[np.float64, np.float32],
force_writeable=True,
)
n_samples, n_features = X.shape
if self.batch_size is None:
self.batch_size_ = 5 * n_features
else:
self.batch_size_ = self.batch_size
for batch in gen_batches(
n_samples, self.batch_size_, min_batch_size=self.n_components or 0
):
X_batch = X[batch]
if sparse.issparse(X_batch):
X_batch = X_batch.toarray()
self.partial_fit(X_batch, check_input=False)
return self
|
Fit the model with X, using minibatches of size batch_size.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Returns the instance itself.
|
fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/_incremental_pca.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/_incremental_pca.py
|
BSD-3-Clause
|
def partial_fit(self, X, y=None, check_input=True):
"""Incremental fit with X. All of X is processed as a single batch.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
check_input : bool, default=True
Run check_array on X.
Returns
-------
self : object
Returns the instance itself.
"""
first_pass = not hasattr(self, "components_")
if check_input:
if sparse.issparse(X):
raise TypeError(
"IncrementalPCA.partial_fit does not support "
"sparse input. Either convert data to dense "
"or use IncrementalPCA.fit to do so in batches."
)
X = validate_data(
self,
X,
copy=self.copy,
dtype=[np.float64, np.float32],
force_writeable=True,
reset=first_pass,
)
n_samples, n_features = X.shape
if first_pass:
self.components_ = None
if self.n_components is None:
if self.components_ is None:
self.n_components_ = min(n_samples, n_features)
else:
self.n_components_ = self.components_.shape[0]
elif not self.n_components <= n_features:
raise ValueError(
"n_components=%r invalid for n_features=%d, need "
"more rows than columns for IncrementalPCA "
"processing" % (self.n_components, n_features)
)
elif self.n_components > n_samples and first_pass:
raise ValueError(
f"n_components={self.n_components} must be less or equal to "
f"the batch number of samples {n_samples} for the first "
"partial_fit call."
)
else:
self.n_components_ = self.n_components
if (self.components_ is not None) and (
self.components_.shape[0] != self.n_components_
):
raise ValueError(
"Number of input features has changed from %i "
"to %i between calls to partial_fit! Try "
"setting n_components to a fixed value."
% (self.components_.shape[0], self.n_components_)
)
# This is the first partial_fit
if not hasattr(self, "n_samples_seen_"):
self.n_samples_seen_ = 0
self.mean_ = 0.0
self.var_ = 0.0
# Update stats - they are 0 if this is the first step
col_mean, col_var, n_total_samples = _incremental_mean_and_var(
X,
last_mean=self.mean_,
last_variance=self.var_,
last_sample_count=np.repeat(self.n_samples_seen_, X.shape[1]),
)
n_total_samples = n_total_samples[0]
# Whitening
if self.n_samples_seen_ == 0:
# If it is the first step, simply whiten X
X -= col_mean
else:
col_batch_mean = np.mean(X, axis=0)
X -= col_batch_mean
# Build matrix of combined previous basis and new data
mean_correction = np.sqrt(
(self.n_samples_seen_ / n_total_samples) * n_samples
) * (self.mean_ - col_batch_mean)
X = np.vstack(
(
self.singular_values_.reshape((-1, 1)) * self.components_,
X,
mean_correction,
)
)
U, S, Vt = linalg.svd(X, full_matrices=False, check_finite=False)
U, Vt = svd_flip(U, Vt, u_based_decision=False)
explained_variance = S**2 / (n_total_samples - 1)
explained_variance_ratio = S**2 / np.sum(col_var * n_total_samples)
self.n_samples_seen_ = n_total_samples
self.components_ = Vt[: self.n_components_]
self.singular_values_ = S[: self.n_components_]
self.mean_ = col_mean
self.var_ = col_var
self.explained_variance_ = explained_variance[: self.n_components_]
self.explained_variance_ratio_ = explained_variance_ratio[: self.n_components_]
# we already checked `self.n_components <= n_samples` above
if self.n_components_ not in (n_samples, n_features):
self.noise_variance_ = explained_variance[self.n_components_ :].mean()
else:
self.noise_variance_ = 0.0
return self
|
Incremental fit with X. All of X is processed as a single batch.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
check_input : bool, default=True
Run check_array on X.
Returns
-------
self : object
Returns the instance itself.
|
partial_fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/_incremental_pca.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/_incremental_pca.py
|
BSD-3-Clause
|
def transform(self, X):
"""Apply dimensionality reduction to X.
X is projected on the first principal components previously extracted
from a training set, using minibatches of size batch_size if X is
sparse.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
New data, where `n_samples` is the number of samples
and `n_features` is the number of features.
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
Projection of X in the first principal components.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import IncrementalPCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2],
... [1, 1], [2, 1], [3, 2]])
>>> ipca = IncrementalPCA(n_components=2, batch_size=3)
>>> ipca.fit(X)
IncrementalPCA(batch_size=3, n_components=2)
>>> ipca.transform(X) # doctest: +SKIP
"""
if sparse.issparse(X):
n_samples = X.shape[0]
output = []
for batch in gen_batches(
n_samples, self.batch_size_, min_batch_size=self.n_components or 0
):
output.append(super().transform(X[batch].toarray()))
return np.vstack(output)
else:
return super().transform(X)
|
Apply dimensionality reduction to X.
X is projected on the first principal components previously extracted
from a training set, using minibatches of size batch_size if X is
sparse.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
New data, where `n_samples` is the number of samples
and `n_features` is the number of features.
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
Projection of X in the first principal components.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import IncrementalPCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2],
... [1, 1], [2, 1], [3, 2]])
>>> ipca = IncrementalPCA(n_components=2, batch_size=3)
>>> ipca.fit(X)
IncrementalPCA(batch_size=3, n_components=2)
>>> ipca.transform(X) # doctest: +SKIP
|
transform
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/_incremental_pca.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/_incremental_pca.py
|
BSD-3-Clause
|
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Returns the instance itself.
"""
if self.fit_inverse_transform and self.kernel == "precomputed":
raise ValueError("Cannot fit_inverse_transform with a precomputed kernel.")
X = validate_data(self, X, accept_sparse="csr", copy=self.copy_X)
self.gamma_ = 1 / X.shape[1] if self.gamma is None else self.gamma
self._centerer = KernelCenterer().set_output(transform="default")
K = self._get_kernel(X)
# When kernel="precomputed", K is X but it's safe to perform in place operations
# on K because a copy was made before if requested by copy_X.
self._fit_transform_in_place(K)
if self.fit_inverse_transform:
# no need to use the kernel to transform X, use shortcut expression
X_transformed = self.eigenvectors_ * np.sqrt(self.eigenvalues_)
self._fit_inverse_transform(X_transformed, X)
self.X_fit_ = X
return self
|
Fit the model from data in X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Returns the instance itself.
|
fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/_kernel_pca.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/_kernel_pca.py
|
BSD-3-Clause
|
def fit_transform(self, X, y=None, **params):
"""Fit the model from data in X and transform X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
**params : kwargs
Parameters (keyword arguments) and values passed to
the fit_transform instance.
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
Returns the instance itself.
"""
self.fit(X, **params)
# no need to use the kernel to transform X, use shortcut expression
X_transformed = self.eigenvectors_ * np.sqrt(self.eigenvalues_)
if self.fit_inverse_transform:
self._fit_inverse_transform(X_transformed, X)
return X_transformed
|
Fit the model from data in X and transform X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
**params : kwargs
Parameters (keyword arguments) and values passed to
the fit_transform instance.
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
Returns the instance itself.
|
fit_transform
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/_kernel_pca.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/_kernel_pca.py
|
BSD-3-Clause
|
def transform(self, X):
"""Transform X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples
and `n_features` is the number of features.
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
Returns the instance itself.
"""
check_is_fitted(self)
X = validate_data(self, X, accept_sparse="csr", reset=False)
# Compute centered gram matrix between X and training data X_fit_
K = self._centerer.transform(self._get_kernel(X, self.X_fit_))
# scale eigenvectors (properly account for null-space for dot product)
non_zeros = np.flatnonzero(self.eigenvalues_)
scaled_alphas = np.zeros_like(self.eigenvectors_)
scaled_alphas[:, non_zeros] = self.eigenvectors_[:, non_zeros] / np.sqrt(
self.eigenvalues_[non_zeros]
)
# Project with a scalar product between K and the scaled eigenvectors
return np.dot(K, scaled_alphas)
|
Transform X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples
and `n_features` is the number of features.
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
Returns the instance itself.
|
transform
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/_kernel_pca.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/_kernel_pca.py
|
BSD-3-Clause
|
def _update_doc_distribution(
X,
exp_topic_word_distr,
doc_topic_prior,
max_doc_update_iter,
mean_change_tol,
cal_sstats,
random_state,
):
"""E-step: update document-topic distribution.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Document word matrix.
exp_topic_word_distr : ndarray of shape (n_topics, n_features)
Exponential value of expectation of log topic word distribution.
In the literature, this is `exp(E[log(beta)])`.
doc_topic_prior : float
Prior of document topic distribution `theta`.
max_doc_update_iter : int
Max number of iterations for updating document topic distribution in
the E-step.
mean_change_tol : float
Stopping tolerance for updating document topic distribution in E-step.
cal_sstats : bool
Parameter that indicate to calculate sufficient statistics or not.
Set `cal_sstats` to `True` when we need to run M-step.
random_state : RandomState instance or None
Parameter that indicate how to initialize document topic distribution.
Set `random_state` to None will initialize document topic distribution
to a constant number.
Returns
-------
(doc_topic_distr, suff_stats) :
`doc_topic_distr` is unnormalized topic distribution for each document.
In the literature, this is `gamma`. we can calculate `E[log(theta)]`
from it.
`suff_stats` is expected sufficient statistics for the M-step.
When `cal_sstats == False`, this will be None.
"""
is_sparse_x = sp.issparse(X)
n_samples, n_features = X.shape
n_topics = exp_topic_word_distr.shape[0]
if random_state:
doc_topic_distr = random_state.gamma(100.0, 0.01, (n_samples, n_topics)).astype(
X.dtype, copy=False
)
else:
doc_topic_distr = np.ones((n_samples, n_topics), dtype=X.dtype)
# In the literature, this is `exp(E[log(theta)])`
exp_doc_topic = np.exp(_dirichlet_expectation_2d(doc_topic_distr))
# diff on `component_` (only calculate it when `cal_diff` is True)
suff_stats = (
np.zeros(exp_topic_word_distr.shape, dtype=X.dtype) if cal_sstats else None
)
if is_sparse_x:
X_data = X.data
X_indices = X.indices
X_indptr = X.indptr
# These cython functions are called in a nested loop on usually very small arrays
# (length=n_topics). In that case, finding the appropriate signature of the
# fused-typed function can be more costly than its execution, hence the dispatch
# is done outside of the loop.
ctype = "float" if X.dtype == np.float32 else "double"
mean_change = cy_mean_change[ctype]
dirichlet_expectation_1d = cy_dirichlet_expectation_1d[ctype]
eps = np.finfo(X.dtype).eps
for idx_d in range(n_samples):
if is_sparse_x:
ids = X_indices[X_indptr[idx_d] : X_indptr[idx_d + 1]]
cnts = X_data[X_indptr[idx_d] : X_indptr[idx_d + 1]]
else:
ids = np.nonzero(X[idx_d, :])[0]
cnts = X[idx_d, ids]
doc_topic_d = doc_topic_distr[idx_d, :]
# The next one is a copy, since the inner loop overwrites it.
exp_doc_topic_d = exp_doc_topic[idx_d, :].copy()
exp_topic_word_d = exp_topic_word_distr[:, ids]
# Iterate between `doc_topic_d` and `norm_phi` until convergence
for _ in range(0, max_doc_update_iter):
last_d = doc_topic_d
# The optimal phi_{dwk} is proportional to
# exp(E[log(theta_{dk})]) * exp(E[log(beta_{dw})]).
norm_phi = np.dot(exp_doc_topic_d, exp_topic_word_d) + eps
doc_topic_d = exp_doc_topic_d * np.dot(cnts / norm_phi, exp_topic_word_d.T)
# Note: adds doc_topic_prior to doc_topic_d, in-place.
dirichlet_expectation_1d(doc_topic_d, doc_topic_prior, exp_doc_topic_d)
if mean_change(last_d, doc_topic_d) < mean_change_tol:
break
doc_topic_distr[idx_d, :] = doc_topic_d
# Contribution of document d to the expected sufficient
# statistics for the M step.
if cal_sstats:
norm_phi = np.dot(exp_doc_topic_d, exp_topic_word_d) + eps
suff_stats[:, ids] += np.outer(exp_doc_topic_d, cnts / norm_phi)
return (doc_topic_distr, suff_stats)
|
E-step: update document-topic distribution.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Document word matrix.
exp_topic_word_distr : ndarray of shape (n_topics, n_features)
Exponential value of expectation of log topic word distribution.
In the literature, this is `exp(E[log(beta)])`.
doc_topic_prior : float
Prior of document topic distribution `theta`.
max_doc_update_iter : int
Max number of iterations for updating document topic distribution in
the E-step.
mean_change_tol : float
Stopping tolerance for updating document topic distribution in E-step.
cal_sstats : bool
Parameter that indicate to calculate sufficient statistics or not.
Set `cal_sstats` to `True` when we need to run M-step.
random_state : RandomState instance or None
Parameter that indicate how to initialize document topic distribution.
Set `random_state` to None will initialize document topic distribution
to a constant number.
Returns
-------
(doc_topic_distr, suff_stats) :
`doc_topic_distr` is unnormalized topic distribution for each document.
In the literature, this is `gamma`. we can calculate `E[log(theta)]`
from it.
`suff_stats` is expected sufficient statistics for the M-step.
When `cal_sstats == False`, this will be None.
|
_update_doc_distribution
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/_lda.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/_lda.py
|
BSD-3-Clause
|
def _e_step(self, X, cal_sstats, random_init, parallel=None):
"""E-step in EM update.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Document word matrix.
cal_sstats : bool
Parameter that indicate whether to calculate sufficient statistics
or not. Set ``cal_sstats`` to True when we need to run M-step.
random_init : bool
Parameter that indicate whether to initialize document topic
distribution randomly in the E-step. Set it to True in training
steps.
parallel : joblib.Parallel, default=None
Pre-initialized instance of joblib.Parallel.
Returns
-------
(doc_topic_distr, suff_stats) :
`doc_topic_distr` is unnormalized topic distribution for each
document. In the literature, this is called `gamma`.
`suff_stats` is expected sufficient statistics for the M-step.
When `cal_sstats == False`, it will be None.
"""
# Run e-step in parallel
random_state = self.random_state_ if random_init else None
# TODO: make Parallel._effective_n_jobs public instead?
n_jobs = effective_n_jobs(self.n_jobs)
if parallel is None:
parallel = Parallel(n_jobs=n_jobs, verbose=max(0, self.verbose - 1))
results = parallel(
delayed(_update_doc_distribution)(
X[idx_slice, :],
self.exp_dirichlet_component_,
self.doc_topic_prior_,
self.max_doc_update_iter,
self.mean_change_tol,
cal_sstats,
random_state,
)
for idx_slice in gen_even_slices(X.shape[0], n_jobs)
)
# merge result
doc_topics, sstats_list = zip(*results)
doc_topic_distr = np.vstack(doc_topics)
if cal_sstats:
# This step finishes computing the sufficient statistics for the
# M-step.
suff_stats = np.zeros(self.components_.shape, dtype=self.components_.dtype)
for sstats in sstats_list:
suff_stats += sstats
suff_stats *= self.exp_dirichlet_component_
else:
suff_stats = None
return (doc_topic_distr, suff_stats)
|
E-step in EM update.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Document word matrix.
cal_sstats : bool
Parameter that indicate whether to calculate sufficient statistics
or not. Set ``cal_sstats`` to True when we need to run M-step.
random_init : bool
Parameter that indicate whether to initialize document topic
distribution randomly in the E-step. Set it to True in training
steps.
parallel : joblib.Parallel, default=None
Pre-initialized instance of joblib.Parallel.
Returns
-------
(doc_topic_distr, suff_stats) :
`doc_topic_distr` is unnormalized topic distribution for each
document. In the literature, this is called `gamma`.
`suff_stats` is expected sufficient statistics for the M-step.
When `cal_sstats == False`, it will be None.
|
_e_step
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/_lda.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/_lda.py
|
BSD-3-Clause
|
def _em_step(self, X, total_samples, batch_update, parallel=None):
"""EM update for 1 iteration.
update `component_` by batch VB or online VB.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Document word matrix.
total_samples : int
Total number of documents. It is only used when
batch_update is `False`.
batch_update : bool
Parameter that controls updating method.
`True` for batch learning, `False` for online learning.
parallel : joblib.Parallel, default=None
Pre-initialized instance of joblib.Parallel
Returns
-------
doc_topic_distr : ndarray of shape (n_samples, n_components)
Unnormalized document topic distribution.
"""
# E-step
_, suff_stats = self._e_step(
X, cal_sstats=True, random_init=True, parallel=parallel
)
# M-step
if batch_update:
self.components_ = self.topic_word_prior_ + suff_stats
else:
# online update
# In the literature, the weight is `rho`
weight = np.power(
self.learning_offset + self.n_batch_iter_, -self.learning_decay
)
doc_ratio = float(total_samples) / X.shape[0]
self.components_ *= 1 - weight
self.components_ += weight * (
self.topic_word_prior_ + doc_ratio * suff_stats
)
# update `component_` related variables
self.exp_dirichlet_component_ = np.exp(
_dirichlet_expectation_2d(self.components_)
)
self.n_batch_iter_ += 1
return
|
EM update for 1 iteration.
update `component_` by batch VB or online VB.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Document word matrix.
total_samples : int
Total number of documents. It is only used when
batch_update is `False`.
batch_update : bool
Parameter that controls updating method.
`True` for batch learning, `False` for online learning.
parallel : joblib.Parallel, default=None
Pre-initialized instance of joblib.Parallel
Returns
-------
doc_topic_distr : ndarray of shape (n_samples, n_components)
Unnormalized document topic distribution.
|
_em_step
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/_lda.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/_lda.py
|
BSD-3-Clause
|
def _check_non_neg_array(self, X, reset_n_features, whom):
"""check X format
check X format and make sure no negative value in X.
Parameters
----------
X : array-like or sparse matrix
"""
dtype = [np.float64, np.float32] if reset_n_features else self.components_.dtype
X = validate_data(
self,
X,
reset=reset_n_features,
accept_sparse="csr",
dtype=dtype,
)
check_non_negative(X, whom)
return X
|
check X format
check X format and make sure no negative value in X.
Parameters
----------
X : array-like or sparse matrix
|
_check_non_neg_array
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/_lda.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/_lda.py
|
BSD-3-Clause
|
def partial_fit(self, X, y=None):
"""Online VB with Mini-Batch update.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Document word matrix.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self
Partially fitted estimator.
"""
first_time = not hasattr(self, "components_")
X = self._check_non_neg_array(
X, reset_n_features=first_time, whom="LatentDirichletAllocation.partial_fit"
)
n_samples, n_features = X.shape
batch_size = self.batch_size
# initialize parameters or check
if first_time:
self._init_latent_vars(n_features, dtype=X.dtype)
if n_features != self.components_.shape[1]:
raise ValueError(
"The provided data has %d dimensions while "
"the model was trained with feature size %d."
% (n_features, self.components_.shape[1])
)
n_jobs = effective_n_jobs(self.n_jobs)
with Parallel(n_jobs=n_jobs, verbose=max(0, self.verbose - 1)) as parallel:
for idx_slice in gen_batches(n_samples, batch_size):
self._em_step(
X[idx_slice, :],
total_samples=self.total_samples,
batch_update=False,
parallel=parallel,
)
return self
|
Online VB with Mini-Batch update.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Document word matrix.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self
Partially fitted estimator.
|
partial_fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/_lda.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/_lda.py
|
BSD-3-Clause
|
def fit(self, X, y=None):
"""Learn model for the data X with variational Bayes method.
When `learning_method` is 'online', use mini-batch update.
Otherwise, use batch update.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Document word matrix.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self
Fitted estimator.
"""
X = self._check_non_neg_array(
X, reset_n_features=True, whom="LatentDirichletAllocation.fit"
)
n_samples, n_features = X.shape
max_iter = self.max_iter
evaluate_every = self.evaluate_every
learning_method = self.learning_method
batch_size = self.batch_size
# initialize parameters
self._init_latent_vars(n_features, dtype=X.dtype)
# change to perplexity later
last_bound = None
n_jobs = effective_n_jobs(self.n_jobs)
with Parallel(n_jobs=n_jobs, verbose=max(0, self.verbose - 1)) as parallel:
for i in range(max_iter):
if learning_method == "online":
for idx_slice in gen_batches(n_samples, batch_size):
self._em_step(
X[idx_slice, :],
total_samples=n_samples,
batch_update=False,
parallel=parallel,
)
else:
# batch update
self._em_step(
X, total_samples=n_samples, batch_update=True, parallel=parallel
)
# check perplexity
if evaluate_every > 0 and (i + 1) % evaluate_every == 0:
doc_topics_distr, _ = self._e_step(
X, cal_sstats=False, random_init=False, parallel=parallel
)
bound = self._perplexity_precomp_distr(
X, doc_topics_distr, sub_sampling=False
)
if self.verbose:
print(
"iteration: %d of max_iter: %d, perplexity: %.4f"
% (i + 1, max_iter, bound)
)
if last_bound and abs(last_bound - bound) < self.perp_tol:
break
last_bound = bound
elif self.verbose:
print("iteration: %d of max_iter: %d" % (i + 1, max_iter))
self.n_iter_ += 1
# calculate final perplexity value on train set
doc_topics_distr, _ = self._e_step(
X, cal_sstats=False, random_init=False, parallel=parallel
)
self.bound_ = self._perplexity_precomp_distr(
X, doc_topics_distr, sub_sampling=False
)
return self
|
Learn model for the data X with variational Bayes method.
When `learning_method` is 'online', use mini-batch update.
Otherwise, use batch update.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Document word matrix.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self
Fitted estimator.
|
fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/_lda.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/_lda.py
|
BSD-3-Clause
|
def transform(self, X, *, normalize=True):
"""Transform data X according to the fitted model.
.. versionchanged:: 0.18
`doc_topic_distr` is now normalized.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Document word matrix.
normalize : bool, default=True
Whether to normalize the document topic distribution.
Returns
-------
doc_topic_distr : ndarray of shape (n_samples, n_components)
Document topic distribution for X.
"""
check_is_fitted(self)
X = self._check_non_neg_array(
X, reset_n_features=False, whom="LatentDirichletAllocation.transform"
)
doc_topic_distr = self._unnormalized_transform(X)
if normalize:
doc_topic_distr /= doc_topic_distr.sum(axis=1)[:, np.newaxis]
return doc_topic_distr
|
Transform data X according to the fitted model.
.. versionchanged:: 0.18
`doc_topic_distr` is now normalized.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Document word matrix.
normalize : bool, default=True
Whether to normalize the document topic distribution.
Returns
-------
doc_topic_distr : ndarray of shape (n_samples, n_components)
Document topic distribution for X.
|
transform
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/_lda.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/_lda.py
|
BSD-3-Clause
|
def _approx_bound(self, X, doc_topic_distr, sub_sampling):
"""Estimate the variational bound.
Estimate the variational bound over "all documents" using only the
documents passed in as X. Since log-likelihood of each word cannot
be computed directly, we use this bound to estimate it.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Document word matrix.
doc_topic_distr : ndarray of shape (n_samples, n_components)
Document topic distribution. In the literature, this is called
gamma.
sub_sampling : bool, default=False
Compensate for subsampling of documents.
It is used in calculate bound in online learning.
Returns
-------
score : float
"""
def _loglikelihood(prior, distr, dirichlet_distr, size):
# calculate log-likelihood
score = np.sum((prior - distr) * dirichlet_distr)
score += np.sum(gammaln(distr) - gammaln(prior))
score += np.sum(gammaln(prior * size) - gammaln(np.sum(distr, 1)))
return score
is_sparse_x = sp.issparse(X)
n_samples, n_components = doc_topic_distr.shape
n_features = self.components_.shape[1]
score = 0
dirichlet_doc_topic = _dirichlet_expectation_2d(doc_topic_distr)
dirichlet_component_ = _dirichlet_expectation_2d(self.components_)
doc_topic_prior = self.doc_topic_prior_
topic_word_prior = self.topic_word_prior_
if is_sparse_x:
X_data = X.data
X_indices = X.indices
X_indptr = X.indptr
# E[log p(docs | theta, beta)]
for idx_d in range(0, n_samples):
if is_sparse_x:
ids = X_indices[X_indptr[idx_d] : X_indptr[idx_d + 1]]
cnts = X_data[X_indptr[idx_d] : X_indptr[idx_d + 1]]
else:
ids = np.nonzero(X[idx_d, :])[0]
cnts = X[idx_d, ids]
temp = (
dirichlet_doc_topic[idx_d, :, np.newaxis] + dirichlet_component_[:, ids]
)
norm_phi = logsumexp(temp, axis=0)
score += np.dot(cnts, norm_phi)
# compute E[log p(theta | alpha) - log q(theta | gamma)]
score += _loglikelihood(
doc_topic_prior, doc_topic_distr, dirichlet_doc_topic, self.n_components
)
# Compensate for the subsampling of the population of documents
if sub_sampling:
doc_ratio = float(self.total_samples) / n_samples
score *= doc_ratio
# E[log p(beta | eta) - log q (beta | lambda)]
score += _loglikelihood(
topic_word_prior, self.components_, dirichlet_component_, n_features
)
return score
|
Estimate the variational bound.
Estimate the variational bound over "all documents" using only the
documents passed in as X. Since log-likelihood of each word cannot
be computed directly, we use this bound to estimate it.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Document word matrix.
doc_topic_distr : ndarray of shape (n_samples, n_components)
Document topic distribution. In the literature, this is called
gamma.
sub_sampling : bool, default=False
Compensate for subsampling of documents.
It is used in calculate bound in online learning.
Returns
-------
score : float
|
_approx_bound
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/_lda.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/_lda.py
|
BSD-3-Clause
|
def score(self, X, y=None):
"""Calculate approximate log-likelihood as score.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Document word matrix.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
score : float
Use approximate bound as score.
"""
check_is_fitted(self)
X = self._check_non_neg_array(
X, reset_n_features=False, whom="LatentDirichletAllocation.score"
)
doc_topic_distr = self._unnormalized_transform(X)
score = self._approx_bound(X, doc_topic_distr, sub_sampling=False)
return score
|
Calculate approximate log-likelihood as score.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Document word matrix.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
score : float
Use approximate bound as score.
|
score
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/_lda.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/_lda.py
|
BSD-3-Clause
|
def _perplexity_precomp_distr(self, X, doc_topic_distr=None, sub_sampling=False):
"""Calculate approximate perplexity for data X with ability to accept
precomputed doc_topic_distr
Perplexity is defined as exp(-1. * log-likelihood per word)
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Document word matrix.
doc_topic_distr : ndarray of shape (n_samples, n_components), \
default=None
Document topic distribution.
If it is None, it will be generated by applying transform on X.
Returns
-------
score : float
Perplexity score.
"""
if doc_topic_distr is None:
doc_topic_distr = self._unnormalized_transform(X)
else:
n_samples, n_components = doc_topic_distr.shape
if n_samples != X.shape[0]:
raise ValueError(
"Number of samples in X and doc_topic_distr do not match."
)
if n_components != self.n_components:
raise ValueError("Number of topics does not match.")
current_samples = X.shape[0]
bound = self._approx_bound(X, doc_topic_distr, sub_sampling)
if sub_sampling:
word_cnt = X.sum() * (float(self.total_samples) / current_samples)
else:
word_cnt = X.sum()
perword_bound = bound / word_cnt
return np.exp(-1.0 * perword_bound)
|
Calculate approximate perplexity for data X with ability to accept
precomputed doc_topic_distr
Perplexity is defined as exp(-1. * log-likelihood per word)
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Document word matrix.
doc_topic_distr : ndarray of shape (n_samples, n_components), default=None
Document topic distribution.
If it is None, it will be generated by applying transform on X.
Returns
-------
score : float
Perplexity score.
|
_perplexity_precomp_distr
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/_lda.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/_lda.py
|
BSD-3-Clause
|
def perplexity(self, X, sub_sampling=False):
"""Calculate approximate perplexity for data X.
Perplexity is defined as exp(-1. * log-likelihood per word)
.. versionchanged:: 0.19
*doc_topic_distr* argument has been deprecated and is ignored
because user no longer has access to unnormalized distribution
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Document word matrix.
sub_sampling : bool
Do sub-sampling or not.
Returns
-------
score : float
Perplexity score.
"""
check_is_fitted(self)
X = self._check_non_neg_array(
X, reset_n_features=True, whom="LatentDirichletAllocation.perplexity"
)
return self._perplexity_precomp_distr(X, sub_sampling=sub_sampling)
|
Calculate approximate perplexity for data X.
Perplexity is defined as exp(-1. * log-likelihood per word)
.. versionchanged:: 0.19
*doc_topic_distr* argument has been deprecated and is ignored
because user no longer has access to unnormalized distribution
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Document word matrix.
sub_sampling : bool
Do sub-sampling or not.
Returns
-------
score : float
Perplexity score.
|
perplexity
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/_lda.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/_lda.py
|
BSD-3-Clause
|
def _beta_divergence(X, W, H, beta, square_root=False):
"""Compute the beta-divergence of X and dot(W, H).
Parameters
----------
X : float or array-like of shape (n_samples, n_features)
W : float or array-like of shape (n_samples, n_components)
H : float or array-like of shape (n_components, n_features)
beta : float or {'frobenius', 'kullback-leibler', 'itakura-saito'}
Parameter of the beta-divergence.
If beta == 2, this is half the Frobenius *squared* norm.
If beta == 1, this is the generalized Kullback-Leibler divergence.
If beta == 0, this is the Itakura-Saito divergence.
Else, this is the general beta-divergence.
square_root : bool, default=False
If True, return np.sqrt(2 * res)
For beta == 2, it corresponds to the Frobenius norm.
Returns
-------
res : float
Beta divergence of X and np.dot(X, H).
"""
beta = _beta_loss_to_float(beta)
# The method can be called with scalars
if not sp.issparse(X):
X = np.atleast_2d(X)
W = np.atleast_2d(W)
H = np.atleast_2d(H)
# Frobenius norm
if beta == 2:
# Avoid the creation of the dense np.dot(W, H) if X is sparse.
if sp.issparse(X):
norm_X = np.dot(X.data, X.data)
norm_WH = trace_dot(np.linalg.multi_dot([W.T, W, H]), H)
cross_prod = trace_dot((X @ H.T), W)
res = (norm_X + norm_WH - 2.0 * cross_prod) / 2.0
else:
res = squared_norm(X - np.dot(W, H)) / 2.0
if square_root:
return np.sqrt(res * 2)
else:
return res
if sp.issparse(X):
# compute np.dot(W, H) only where X is nonzero
WH_data = _special_sparse_dot(W, H, X).data
X_data = X.data
else:
WH = np.dot(W, H)
WH_data = WH.ravel()
X_data = X.ravel()
# do not affect the zeros: here 0 ** (-1) = 0 and not infinity
indices = X_data > EPSILON
WH_data = WH_data[indices]
X_data = X_data[indices]
# used to avoid division by zero
WH_data[WH_data < EPSILON] = EPSILON
# generalized Kullback-Leibler divergence
if beta == 1:
# fast and memory efficient computation of np.sum(np.dot(W, H))
sum_WH = np.dot(np.sum(W, axis=0), np.sum(H, axis=1))
# computes np.sum(X * log(X / WH)) only where X is nonzero
div = X_data / WH_data
res = np.dot(X_data, np.log(div))
# add full np.sum(np.dot(W, H)) - np.sum(X)
res += sum_WH - X_data.sum()
# Itakura-Saito divergence
elif beta == 0:
div = X_data / WH_data
res = np.sum(div) - np.prod(X.shape) - np.sum(np.log(div))
# beta-divergence, beta not in (0, 1, 2)
else:
if sp.issparse(X):
# slow loop, but memory efficient computation of :
# np.sum(np.dot(W, H) ** beta)
sum_WH_beta = 0
for i in range(X.shape[1]):
sum_WH_beta += np.sum(np.dot(W, H[:, i]) ** beta)
else:
sum_WH_beta = np.sum(WH**beta)
sum_X_WH = np.dot(X_data, WH_data ** (beta - 1))
res = (X_data**beta).sum() - beta * sum_X_WH
res += sum_WH_beta * (beta - 1)
res /= beta * (beta - 1)
if square_root:
res = max(res, 0) # avoid negative number due to rounding errors
return np.sqrt(2 * res)
else:
return res
|
Compute the beta-divergence of X and dot(W, H).
Parameters
----------
X : float or array-like of shape (n_samples, n_features)
W : float or array-like of shape (n_samples, n_components)
H : float or array-like of shape (n_components, n_features)
beta : float or {'frobenius', 'kullback-leibler', 'itakura-saito'}
Parameter of the beta-divergence.
If beta == 2, this is half the Frobenius *squared* norm.
If beta == 1, this is the generalized Kullback-Leibler divergence.
If beta == 0, this is the Itakura-Saito divergence.
Else, this is the general beta-divergence.
square_root : bool, default=False
If True, return np.sqrt(2 * res)
For beta == 2, it corresponds to the Frobenius norm.
Returns
-------
res : float
Beta divergence of X and np.dot(X, H).
|
_beta_divergence
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/_nmf.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/_nmf.py
|
BSD-3-Clause
|
def _special_sparse_dot(W, H, X):
"""Computes np.dot(W, H), only where X is non zero."""
if sp.issparse(X):
ii, jj = X.nonzero()
n_vals = ii.shape[0]
dot_vals = np.empty(n_vals)
n_components = W.shape[1]
batch_size = max(n_components, n_vals // n_components)
for start in range(0, n_vals, batch_size):
batch = slice(start, start + batch_size)
dot_vals[batch] = np.multiply(W[ii[batch], :], H.T[jj[batch], :]).sum(
axis=1
)
WH = sp.coo_matrix((dot_vals, (ii, jj)), shape=X.shape)
return WH.tocsr()
else:
return np.dot(W, H)
|
Computes np.dot(W, H), only where X is non zero.
|
_special_sparse_dot
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/_nmf.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/_nmf.py
|
BSD-3-Clause
|
def _initialize_nmf(X, n_components, init=None, eps=1e-6, random_state=None):
"""Algorithms for NMF initialization.
Computes an initial guess for the non-negative
rank k matrix approximation for X: X = WH.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data matrix to be decomposed.
n_components : int
The number of components desired in the approximation.
init : {'random', 'nndsvd', 'nndsvda', 'nndsvdar'}, default=None
Method used to initialize the procedure.
Valid options:
- None: 'nndsvda' if n_components <= min(n_samples, n_features),
otherwise 'random'.
- 'random': non-negative random matrices, scaled with:
sqrt(X.mean() / n_components)
- 'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
- 'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
- 'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
- 'custom': use custom matrices W and H
.. versionchanged:: 1.1
When `init=None` and n_components is less than n_samples and n_features
defaults to `nndsvda` instead of `nndsvd`.
eps : float, default=1e-6
Truncate all values less then this in output to zero.
random_state : int, RandomState instance or None, default=None
Used when ``init`` == 'nndsvdar' or 'random'. Pass an int for
reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
W : array-like of shape (n_samples, n_components)
Initial guesses for solving X ~= WH.
H : array-like of shape (n_components, n_features)
Initial guesses for solving X ~= WH.
References
----------
C. Boutsidis, E. Gallopoulos: SVD based initialization: A head start for
nonnegative matrix factorization - Pattern Recognition, 2008
http://tinyurl.com/nndsvd
"""
check_non_negative(X, "NMF initialization")
n_samples, n_features = X.shape
if (
init is not None
and init != "random"
and n_components > min(n_samples, n_features)
):
raise ValueError(
"init = '{}' can only be used when "
"n_components <= min(n_samples, n_features)".format(init)
)
if init is None:
if n_components <= min(n_samples, n_features):
init = "nndsvda"
else:
init = "random"
# Random initialization
if init == "random":
avg = np.sqrt(X.mean() / n_components)
rng = check_random_state(random_state)
H = avg * rng.standard_normal(size=(n_components, n_features)).astype(
X.dtype, copy=False
)
W = avg * rng.standard_normal(size=(n_samples, n_components)).astype(
X.dtype, copy=False
)
np.abs(H, out=H)
np.abs(W, out=W)
return W, H
# NNDSVD initialization
U, S, V = _randomized_svd(X, n_components, random_state=random_state)
W = np.zeros_like(U)
H = np.zeros_like(V)
# The leading singular triplet is non-negative
# so it can be used as is for initialization.
W[:, 0] = np.sqrt(S[0]) * np.abs(U[:, 0])
H[0, :] = np.sqrt(S[0]) * np.abs(V[0, :])
for j in range(1, n_components):
x, y = U[:, j], V[j, :]
# extract positive and negative parts of column vectors
x_p, y_p = np.maximum(x, 0), np.maximum(y, 0)
x_n, y_n = np.abs(np.minimum(x, 0)), np.abs(np.minimum(y, 0))
# and their norms
x_p_nrm, y_p_nrm = norm(x_p), norm(y_p)
x_n_nrm, y_n_nrm = norm(x_n), norm(y_n)
m_p, m_n = x_p_nrm * y_p_nrm, x_n_nrm * y_n_nrm
# choose update
if m_p > m_n:
u = x_p / x_p_nrm
v = y_p / y_p_nrm
sigma = m_p
else:
u = x_n / x_n_nrm
v = y_n / y_n_nrm
sigma = m_n
lbd = np.sqrt(S[j] * sigma)
W[:, j] = lbd * u
H[j, :] = lbd * v
W[W < eps] = 0
H[H < eps] = 0
if init == "nndsvd":
pass
elif init == "nndsvda":
avg = X.mean()
W[W == 0] = avg
H[H == 0] = avg
elif init == "nndsvdar":
rng = check_random_state(random_state)
avg = X.mean()
W[W == 0] = abs(avg * rng.standard_normal(size=len(W[W == 0])) / 100)
H[H == 0] = abs(avg * rng.standard_normal(size=len(H[H == 0])) / 100)
else:
raise ValueError(
"Invalid init parameter: got %r instead of one of %r"
% (init, (None, "random", "nndsvd", "nndsvda", "nndsvdar"))
)
return W, H
|
Algorithms for NMF initialization.
Computes an initial guess for the non-negative
rank k matrix approximation for X: X = WH.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data matrix to be decomposed.
n_components : int
The number of components desired in the approximation.
init : {'random', 'nndsvd', 'nndsvda', 'nndsvdar'}, default=None
Method used to initialize the procedure.
Valid options:
- None: 'nndsvda' if n_components <= min(n_samples, n_features),
otherwise 'random'.
- 'random': non-negative random matrices, scaled with:
sqrt(X.mean() / n_components)
- 'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
- 'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
- 'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
- 'custom': use custom matrices W and H
.. versionchanged:: 1.1
When `init=None` and n_components is less than n_samples and n_features
defaults to `nndsvda` instead of `nndsvd`.
eps : float, default=1e-6
Truncate all values less then this in output to zero.
random_state : int, RandomState instance or None, default=None
Used when ``init`` == 'nndsvdar' or 'random'. Pass an int for
reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
W : array-like of shape (n_samples, n_components)
Initial guesses for solving X ~= WH.
H : array-like of shape (n_components, n_features)
Initial guesses for solving X ~= WH.
References
----------
C. Boutsidis, E. Gallopoulos: SVD based initialization: A head start for
nonnegative matrix factorization - Pattern Recognition, 2008
http://tinyurl.com/nndsvd
|
_initialize_nmf
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/_nmf.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/_nmf.py
|
BSD-3-Clause
|
def _update_coordinate_descent(X, W, Ht, l1_reg, l2_reg, shuffle, random_state):
"""Helper function for _fit_coordinate_descent.
Update W to minimize the objective function, iterating once over all
coordinates. By symmetry, to update H, one can call
_update_coordinate_descent(X.T, Ht, W, ...).
"""
n_components = Ht.shape[1]
HHt = np.dot(Ht.T, Ht)
XHt = safe_sparse_dot(X, Ht)
# L2 regularization corresponds to increase of the diagonal of HHt
if l2_reg != 0.0:
# adds l2_reg only on the diagonal
HHt.flat[:: n_components + 1] += l2_reg
# L1 regularization corresponds to decrease of each element of XHt
if l1_reg != 0.0:
XHt -= l1_reg
if shuffle:
permutation = random_state.permutation(n_components)
else:
permutation = np.arange(n_components)
# The following seems to be required on 64-bit Windows w/ Python 3.5.
permutation = np.asarray(permutation, dtype=np.intp)
return _update_cdnmf_fast(W, HHt, XHt, permutation)
|
Helper function for _fit_coordinate_descent.
Update W to minimize the objective function, iterating once over all
coordinates. By symmetry, to update H, one can call
_update_coordinate_descent(X.T, Ht, W, ...).
|
_update_coordinate_descent
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/_nmf.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/_nmf.py
|
BSD-3-Clause
|
def _fit_coordinate_descent(
X,
W,
H,
tol=1e-4,
max_iter=200,
l1_reg_W=0,
l1_reg_H=0,
l2_reg_W=0,
l2_reg_H=0,
update_H=True,
verbose=0,
shuffle=False,
random_state=None,
):
"""Compute Non-negative Matrix Factorization (NMF) with Coordinate Descent
The objective function is minimized with an alternating minimization of W
and H. Each minimization is done with a cyclic (up to a permutation of the
features) Coordinate Descent.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Constant matrix.
W : array-like of shape (n_samples, n_components)
Initial guess for the solution.
H : array-like of shape (n_components, n_features)
Initial guess for the solution.
tol : float, default=1e-4
Tolerance of the stopping condition.
max_iter : int, default=200
Maximum number of iterations before timing out.
l1_reg_W : float, default=0.
L1 regularization parameter for W.
l1_reg_H : float, default=0.
L1 regularization parameter for H.
l2_reg_W : float, default=0.
L2 regularization parameter for W.
l2_reg_H : float, default=0.
L2 regularization parameter for H.
update_H : bool, default=True
Set to True, both W and H will be estimated from initial guesses.
Set to False, only W will be estimated.
verbose : int, default=0
The verbosity level.
shuffle : bool, default=False
If true, randomize the order of coordinates in the CD solver.
random_state : int, RandomState instance or None, default=None
Used to randomize the coordinates in the CD solver, when
``shuffle`` is set to ``True``. Pass an int for reproducible
results across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
W : ndarray of shape (n_samples, n_components)
Solution to the non-negative least squares problem.
H : ndarray of shape (n_components, n_features)
Solution to the non-negative least squares problem.
n_iter : int
The number of iterations done by the algorithm.
References
----------
.. [1] :doi:`"Fast local algorithms for large scale nonnegative matrix and tensor
factorizations" <10.1587/transfun.E92.A.708>`
Cichocki, Andrzej, and P. H. A. N. Anh-Huy. IEICE transactions on fundamentals
of electronics, communications and computer sciences 92.3: 708-721, 2009.
"""
# so W and Ht are both in C order in memory
Ht = check_array(H.T, order="C")
X = check_array(X, accept_sparse="csr")
rng = check_random_state(random_state)
for n_iter in range(1, max_iter + 1):
violation = 0.0
# Update W
violation += _update_coordinate_descent(
X, W, Ht, l1_reg_W, l2_reg_W, shuffle, rng
)
# Update H
if update_H:
violation += _update_coordinate_descent(
X.T, Ht, W, l1_reg_H, l2_reg_H, shuffle, rng
)
if n_iter == 1:
violation_init = violation
if violation_init == 0:
break
if verbose:
print("violation:", violation / violation_init)
if violation / violation_init <= tol:
if verbose:
print("Converged at iteration", n_iter + 1)
break
return W, Ht.T, n_iter
|
Compute Non-negative Matrix Factorization (NMF) with Coordinate Descent
The objective function is minimized with an alternating minimization of W
and H. Each minimization is done with a cyclic (up to a permutation of the
features) Coordinate Descent.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Constant matrix.
W : array-like of shape (n_samples, n_components)
Initial guess for the solution.
H : array-like of shape (n_components, n_features)
Initial guess for the solution.
tol : float, default=1e-4
Tolerance of the stopping condition.
max_iter : int, default=200
Maximum number of iterations before timing out.
l1_reg_W : float, default=0.
L1 regularization parameter for W.
l1_reg_H : float, default=0.
L1 regularization parameter for H.
l2_reg_W : float, default=0.
L2 regularization parameter for W.
l2_reg_H : float, default=0.
L2 regularization parameter for H.
update_H : bool, default=True
Set to True, both W and H will be estimated from initial guesses.
Set to False, only W will be estimated.
verbose : int, default=0
The verbosity level.
shuffle : bool, default=False
If true, randomize the order of coordinates in the CD solver.
random_state : int, RandomState instance or None, default=None
Used to randomize the coordinates in the CD solver, when
``shuffle`` is set to ``True``. Pass an int for reproducible
results across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
W : ndarray of shape (n_samples, n_components)
Solution to the non-negative least squares problem.
H : ndarray of shape (n_components, n_features)
Solution to the non-negative least squares problem.
n_iter : int
The number of iterations done by the algorithm.
References
----------
.. [1] :doi:`"Fast local algorithms for large scale nonnegative matrix and tensor
factorizations" <10.1587/transfun.E92.A.708>`
Cichocki, Andrzej, and P. H. A. N. Anh-Huy. IEICE transactions on fundamentals
of electronics, communications and computer sciences 92.3: 708-721, 2009.
|
_fit_coordinate_descent
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/_nmf.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/_nmf.py
|
BSD-3-Clause
|
def _fit_multiplicative_update(
X,
W,
H,
beta_loss="frobenius",
max_iter=200,
tol=1e-4,
l1_reg_W=0,
l1_reg_H=0,
l2_reg_W=0,
l2_reg_H=0,
update_H=True,
verbose=0,
):
"""Compute Non-negative Matrix Factorization with Multiplicative Update.
The objective function is _beta_divergence(X, WH) and is minimized with an
alternating minimization of W and H. Each minimization is done with a
Multiplicative Update.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Constant input matrix.
W : array-like of shape (n_samples, n_components)
Initial guess for the solution.
H : array-like of shape (n_components, n_features)
Initial guess for the solution.
beta_loss : float or {'frobenius', 'kullback-leibler', \
'itakura-saito'}, default='frobenius'
String must be in {'frobenius', 'kullback-leibler', 'itakura-saito'}.
Beta divergence to be minimized, measuring the distance between X
and the dot product WH. Note that values different from 'frobenius'
(or 2) and 'kullback-leibler' (or 1) lead to significantly slower
fits. Note that for beta_loss <= 0 (or 'itakura-saito'), the input
matrix X cannot contain zeros.
max_iter : int, default=200
Number of iterations.
tol : float, default=1e-4
Tolerance of the stopping condition.
l1_reg_W : float, default=0.
L1 regularization parameter for W.
l1_reg_H : float, default=0.
L1 regularization parameter for H.
l2_reg_W : float, default=0.
L2 regularization parameter for W.
l2_reg_H : float, default=0.
L2 regularization parameter for H.
update_H : bool, default=True
Set to True, both W and H will be estimated from initial guesses.
Set to False, only W will be estimated.
verbose : int, default=0
The verbosity level.
Returns
-------
W : ndarray of shape (n_samples, n_components)
Solution to the non-negative least squares problem.
H : ndarray of shape (n_components, n_features)
Solution to the non-negative least squares problem.
n_iter : int
The number of iterations done by the algorithm.
References
----------
Lee, D. D., & Seung, H., S. (2001). Algorithms for Non-negative Matrix
Factorization. Adv. Neural Inform. Process. Syst.. 13.
Fevotte, C., & Idier, J. (2011). Algorithms for nonnegative matrix
factorization with the beta-divergence. Neural Computation, 23(9).
"""
start_time = time.time()
beta_loss = _beta_loss_to_float(beta_loss)
# gamma for Maximization-Minimization (MM) algorithm [Fevotte 2011]
if beta_loss < 1:
gamma = 1.0 / (2.0 - beta_loss)
elif beta_loss > 2:
gamma = 1.0 / (beta_loss - 1.0)
else:
gamma = 1.0
# used for the convergence criterion
error_at_init = _beta_divergence(X, W, H, beta_loss, square_root=True)
previous_error = error_at_init
H_sum, HHt, XHt = None, None, None
for n_iter in range(1, max_iter + 1):
# update W
# H_sum, HHt and XHt are saved and reused if not update_H
W, H_sum, HHt, XHt = _multiplicative_update_w(
X,
W,
H,
beta_loss=beta_loss,
l1_reg_W=l1_reg_W,
l2_reg_W=l2_reg_W,
gamma=gamma,
H_sum=H_sum,
HHt=HHt,
XHt=XHt,
update_H=update_H,
)
# necessary for stability with beta_loss < 1
if beta_loss < 1:
W[W < np.finfo(np.float64).eps] = 0.0
# update H (only at fit or fit_transform)
if update_H:
H = _multiplicative_update_h(
X,
W,
H,
beta_loss=beta_loss,
l1_reg_H=l1_reg_H,
l2_reg_H=l2_reg_H,
gamma=gamma,
)
# These values will be recomputed since H changed
H_sum, HHt, XHt = None, None, None
# necessary for stability with beta_loss < 1
if beta_loss <= 1:
H[H < np.finfo(np.float64).eps] = 0.0
# test convergence criterion every 10 iterations
if tol > 0 and n_iter % 10 == 0:
error = _beta_divergence(X, W, H, beta_loss, square_root=True)
if verbose:
iter_time = time.time()
print(
"Epoch %02d reached after %.3f seconds, error: %f"
% (n_iter, iter_time - start_time, error)
)
if (previous_error - error) / error_at_init < tol:
break
previous_error = error
# do not print if we have already printed in the convergence test
if verbose and (tol == 0 or n_iter % 10 != 0):
end_time = time.time()
print(
"Epoch %02d reached after %.3f seconds." % (n_iter, end_time - start_time)
)
return W, H, n_iter
|
Compute Non-negative Matrix Factorization with Multiplicative Update.
The objective function is _beta_divergence(X, WH) and is minimized with an
alternating minimization of W and H. Each minimization is done with a
Multiplicative Update.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Constant input matrix.
W : array-like of shape (n_samples, n_components)
Initial guess for the solution.
H : array-like of shape (n_components, n_features)
Initial guess for the solution.
beta_loss : float or {'frobenius', 'kullback-leibler', 'itakura-saito'}, default='frobenius'
String must be in {'frobenius', 'kullback-leibler', 'itakura-saito'}.
Beta divergence to be minimized, measuring the distance between X
and the dot product WH. Note that values different from 'frobenius'
(or 2) and 'kullback-leibler' (or 1) lead to significantly slower
fits. Note that for beta_loss <= 0 (or 'itakura-saito'), the input
matrix X cannot contain zeros.
max_iter : int, default=200
Number of iterations.
tol : float, default=1e-4
Tolerance of the stopping condition.
l1_reg_W : float, default=0.
L1 regularization parameter for W.
l1_reg_H : float, default=0.
L1 regularization parameter for H.
l2_reg_W : float, default=0.
L2 regularization parameter for W.
l2_reg_H : float, default=0.
L2 regularization parameter for H.
update_H : bool, default=True
Set to True, both W and H will be estimated from initial guesses.
Set to False, only W will be estimated.
verbose : int, default=0
The verbosity level.
Returns
-------
W : ndarray of shape (n_samples, n_components)
Solution to the non-negative least squares problem.
H : ndarray of shape (n_components, n_features)
Solution to the non-negative least squares problem.
n_iter : int
The number of iterations done by the algorithm.
References
----------
Lee, D. D., & Seung, H., S. (2001). Algorithms for Non-negative Matrix
Factorization. Adv. Neural Inform. Process. Syst.. 13.
Fevotte, C., & Idier, J. (2011). Algorithms for nonnegative matrix
factorization with the beta-divergence. Neural Computation, 23(9).
|
_fit_multiplicative_update
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/_nmf.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/_nmf.py
|
BSD-3-Clause
|
def non_negative_factorization(
X,
W=None,
H=None,
n_components="auto",
*,
init=None,
update_H=True,
solver="cd",
beta_loss="frobenius",
tol=1e-4,
max_iter=200,
alpha_W=0.0,
alpha_H="same",
l1_ratio=0.0,
random_state=None,
verbose=0,
shuffle=False,
):
"""Compute Non-negative Matrix Factorization (NMF).
Find two non-negative matrices (W, H) whose product approximates the non-
negative matrix X. This factorization can be used for example for
dimensionality reduction, source separation or topic extraction.
The objective function is:
.. math::
L(W, H) &= 0.5 * ||X - WH||_{loss}^2
&+ alpha\\_W * l1\\_ratio * n\\_features * ||vec(W)||_1
&+ alpha\\_H * l1\\_ratio * n\\_samples * ||vec(H)||_1
&+ 0.5 * alpha\\_W * (1 - l1\\_ratio) * n\\_features * ||W||_{Fro}^2
&+ 0.5 * alpha\\_H * (1 - l1\\_ratio) * n\\_samples * ||H||_{Fro}^2,
where :math:`||A||_{Fro}^2 = \\sum_{i,j} A_{ij}^2` (Frobenius norm) and
:math:`||vec(A)||_1 = \\sum_{i,j} abs(A_{ij})` (Elementwise L1 norm)
The generic norm :math:`||X - WH||_{loss}^2` may represent
the Frobenius norm or another supported beta-divergence loss.
The choice between options is controlled by the `beta_loss` parameter.
The regularization terms are scaled by `n_features` for `W` and by `n_samples` for
`H` to keep their impact balanced with respect to one another and to the data fit
term as independent as possible of the size `n_samples` of the training set.
The objective function is minimized with an alternating minimization of W
and H. If H is given and update_H=False, it solves for W only.
Note that the transformed data is named W and the components matrix is named H. In
the NMF literature, the naming convention is usually the opposite since the data
matrix X is transposed.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Constant matrix.
W : array-like of shape (n_samples, n_components), default=None
If `init='custom'`, it is used as initial guess for the solution.
If `update_H=False`, it is initialised as an array of zeros, unless
`solver='mu'`, then it is filled with values calculated by
`np.sqrt(X.mean() / self._n_components)`.
If `None`, uses the initialisation method specified in `init`.
H : array-like of shape (n_components, n_features), default=None
If `init='custom'`, it is used as initial guess for the solution.
If `update_H=False`, it is used as a constant, to solve for W only.
If `None`, uses the initialisation method specified in `init`.
n_components : int or {'auto'} or None, default='auto'
Number of components. If `None`, all features are kept.
If `n_components='auto'`, the number of components is automatically inferred
from `W` or `H` shapes.
.. versionchanged:: 1.4
Added `'auto'` value.
.. versionchanged:: 1.6
Default value changed from `None` to `'auto'`.
init : {'random', 'nndsvd', 'nndsvda', 'nndsvdar', 'custom'}, default=None
Method used to initialize the procedure.
Valid options:
- None: 'nndsvda' if n_components < n_features, otherwise 'random'.
- 'random': non-negative random matrices, scaled with:
`sqrt(X.mean() / n_components)`
- 'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
- 'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
- 'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
- 'custom': If `update_H=True`, use custom matrices W and H which must both
be provided. If `update_H=False`, then only custom matrix H is used.
.. versionchanged:: 0.23
The default value of `init` changed from 'random' to None in 0.23.
.. versionchanged:: 1.1
When `init=None` and n_components is less than n_samples and n_features
defaults to `nndsvda` instead of `nndsvd`.
update_H : bool, default=True
Set to True, both W and H will be estimated from initial guesses.
Set to False, only W will be estimated.
solver : {'cd', 'mu'}, default='cd'
Numerical solver to use:
- 'cd' is a Coordinate Descent solver that uses Fast Hierarchical
Alternating Least Squares (Fast HALS).
- 'mu' is a Multiplicative Update solver.
.. versionadded:: 0.17
Coordinate Descent solver.
.. versionadded:: 0.19
Multiplicative Update solver.
beta_loss : float or {'frobenius', 'kullback-leibler', \
'itakura-saito'}, default='frobenius'
Beta divergence to be minimized, measuring the distance between X
and the dot product WH. Note that values different from 'frobenius'
(or 2) and 'kullback-leibler' (or 1) lead to significantly slower
fits. Note that for beta_loss <= 0 (or 'itakura-saito'), the input
matrix X cannot contain zeros. Used only in 'mu' solver.
.. versionadded:: 0.19
tol : float, default=1e-4
Tolerance of the stopping condition.
max_iter : int, default=200
Maximum number of iterations before timing out.
alpha_W : float, default=0.0
Constant that multiplies the regularization terms of `W`. Set it to zero
(default) to have no regularization on `W`.
.. versionadded:: 1.0
alpha_H : float or "same", default="same"
Constant that multiplies the regularization terms of `H`. Set it to zero to
have no regularization on `H`. If "same" (default), it takes the same value as
`alpha_W`.
.. versionadded:: 1.0
l1_ratio : float, default=0.0
The regularization mixing parameter, with 0 <= l1_ratio <= 1.
For l1_ratio = 0 the penalty is an elementwise L2 penalty
(aka Frobenius Norm).
For l1_ratio = 1 it is an elementwise L1 penalty.
For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
random_state : int, RandomState instance or None, default=None
Used for NMF initialisation (when ``init`` == 'nndsvdar' or
'random'), and in Coordinate Descent. Pass an int for reproducible
results across multiple function calls.
See :term:`Glossary <random_state>`.
verbose : int, default=0
The verbosity level.
shuffle : bool, default=False
If true, randomize the order of coordinates in the CD solver.
Returns
-------
W : ndarray of shape (n_samples, n_components)
Solution to the non-negative least squares problem.
H : ndarray of shape (n_components, n_features)
Solution to the non-negative least squares problem.
n_iter : int
Actual number of iterations.
References
----------
.. [1] :doi:`"Fast local algorithms for large scale nonnegative matrix and tensor
factorizations" <10.1587/transfun.E92.A.708>`
Cichocki, Andrzej, and P. H. A. N. Anh-Huy. IEICE transactions on fundamentals
of electronics, communications and computer sciences 92.3: 708-721, 2009.
.. [2] :doi:`"Algorithms for nonnegative matrix factorization with the
beta-divergence" <10.1162/NECO_a_00168>`
Fevotte, C., & Idier, J. (2011). Neural Computation, 23(9).
Examples
--------
>>> import numpy as np
>>> X = np.array([[1,1], [2, 1], [3, 1.2], [4, 1], [5, 0.8], [6, 1]])
>>> from sklearn.decomposition import non_negative_factorization
>>> W, H, n_iter = non_negative_factorization(
... X, n_components=2, init='random', random_state=0)
"""
est = NMF(
n_components=n_components,
init=init,
solver=solver,
beta_loss=beta_loss,
tol=tol,
max_iter=max_iter,
random_state=random_state,
alpha_W=alpha_W,
alpha_H=alpha_H,
l1_ratio=l1_ratio,
verbose=verbose,
shuffle=shuffle,
)
est._validate_params()
X = check_array(X, accept_sparse=("csr", "csc"), dtype=[np.float64, np.float32])
with config_context(assume_finite=True):
W, H, n_iter = est._fit_transform(X, W=W, H=H, update_H=update_H)
return W, H, n_iter
|
Compute Non-negative Matrix Factorization (NMF).
Find two non-negative matrices (W, H) whose product approximates the non-
negative matrix X. This factorization can be used for example for
dimensionality reduction, source separation or topic extraction.
The objective function is:
.. math::
L(W, H) &= 0.5 * ||X - WH||_{loss}^2
&+ alpha\_W * l1\_ratio * n\_features * ||vec(W)||_1
&+ alpha\_H * l1\_ratio * n\_samples * ||vec(H)||_1
&+ 0.5 * alpha\_W * (1 - l1\_ratio) * n\_features * ||W||_{Fro}^2
&+ 0.5 * alpha\_H * (1 - l1\_ratio) * n\_samples * ||H||_{Fro}^2,
where :math:`||A||_{Fro}^2 = \sum_{i,j} A_{ij}^2` (Frobenius norm) and
:math:`||vec(A)||_1 = \sum_{i,j} abs(A_{ij})` (Elementwise L1 norm)
The generic norm :math:`||X - WH||_{loss}^2` may represent
the Frobenius norm or another supported beta-divergence loss.
The choice between options is controlled by the `beta_loss` parameter.
The regularization terms are scaled by `n_features` for `W` and by `n_samples` for
`H` to keep their impact balanced with respect to one another and to the data fit
term as independent as possible of the size `n_samples` of the training set.
The objective function is minimized with an alternating minimization of W
and H. If H is given and update_H=False, it solves for W only.
Note that the transformed data is named W and the components matrix is named H. In
the NMF literature, the naming convention is usually the opposite since the data
matrix X is transposed.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Constant matrix.
W : array-like of shape (n_samples, n_components), default=None
If `init='custom'`, it is used as initial guess for the solution.
If `update_H=False`, it is initialised as an array of zeros, unless
`solver='mu'`, then it is filled with values calculated by
`np.sqrt(X.mean() / self._n_components)`.
If `None`, uses the initialisation method specified in `init`.
H : array-like of shape (n_components, n_features), default=None
If `init='custom'`, it is used as initial guess for the solution.
If `update_H=False`, it is used as a constant, to solve for W only.
If `None`, uses the initialisation method specified in `init`.
n_components : int or {'auto'} or None, default='auto'
Number of components. If `None`, all features are kept.
If `n_components='auto'`, the number of components is automatically inferred
from `W` or `H` shapes.
.. versionchanged:: 1.4
Added `'auto'` value.
.. versionchanged:: 1.6
Default value changed from `None` to `'auto'`.
init : {'random', 'nndsvd', 'nndsvda', 'nndsvdar', 'custom'}, default=None
Method used to initialize the procedure.
Valid options:
- None: 'nndsvda' if n_components < n_features, otherwise 'random'.
- 'random': non-negative random matrices, scaled with:
`sqrt(X.mean() / n_components)`
- 'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
- 'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
- 'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
- 'custom': If `update_H=True`, use custom matrices W and H which must both
be provided. If `update_H=False`, then only custom matrix H is used.
.. versionchanged:: 0.23
The default value of `init` changed from 'random' to None in 0.23.
.. versionchanged:: 1.1
When `init=None` and n_components is less than n_samples and n_features
defaults to `nndsvda` instead of `nndsvd`.
update_H : bool, default=True
Set to True, both W and H will be estimated from initial guesses.
Set to False, only W will be estimated.
solver : {'cd', 'mu'}, default='cd'
Numerical solver to use:
- 'cd' is a Coordinate Descent solver that uses Fast Hierarchical
Alternating Least Squares (Fast HALS).
- 'mu' is a Multiplicative Update solver.
.. versionadded:: 0.17
Coordinate Descent solver.
.. versionadded:: 0.19
Multiplicative Update solver.
beta_loss : float or {'frobenius', 'kullback-leibler', 'itakura-saito'}, default='frobenius'
Beta divergence to be minimized, measuring the distance between X
and the dot product WH. Note that values different from 'frobenius'
(or 2) and 'kullback-leibler' (or 1) lead to significantly slower
fits. Note that for beta_loss <= 0 (or 'itakura-saito'), the input
matrix X cannot contain zeros. Used only in 'mu' solver.
.. versionadded:: 0.19
tol : float, default=1e-4
Tolerance of the stopping condition.
max_iter : int, default=200
Maximum number of iterations before timing out.
alpha_W : float, default=0.0
Constant that multiplies the regularization terms of `W`. Set it to zero
(default) to have no regularization on `W`.
.. versionadded:: 1.0
alpha_H : float or "same", default="same"
Constant that multiplies the regularization terms of `H`. Set it to zero to
have no regularization on `H`. If "same" (default), it takes the same value as
`alpha_W`.
.. versionadded:: 1.0
l1_ratio : float, default=0.0
The regularization mixing parameter, with 0 <= l1_ratio <= 1.
For l1_ratio = 0 the penalty is an elementwise L2 penalty
(aka Frobenius Norm).
For l1_ratio = 1 it is an elementwise L1 penalty.
For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
random_state : int, RandomState instance or None, default=None
Used for NMF initialisation (when ``init`` == 'nndsvdar' or
'random'), and in Coordinate Descent. Pass an int for reproducible
results across multiple function calls.
See :term:`Glossary <random_state>`.
verbose : int, default=0
The verbosity level.
shuffle : bool, default=False
If true, randomize the order of coordinates in the CD solver.
Returns
-------
W : ndarray of shape (n_samples, n_components)
Solution to the non-negative least squares problem.
H : ndarray of shape (n_components, n_features)
Solution to the non-negative least squares problem.
n_iter : int
Actual number of iterations.
References
----------
.. [1] :doi:`"Fast local algorithms for large scale nonnegative matrix and tensor
factorizations" <10.1587/transfun.E92.A.708>`
Cichocki, Andrzej, and P. H. A. N. Anh-Huy. IEICE transactions on fundamentals
of electronics, communications and computer sciences 92.3: 708-721, 2009.
.. [2] :doi:`"Algorithms for nonnegative matrix factorization with the
beta-divergence" <10.1162/NECO_a_00168>`
Fevotte, C., & Idier, J. (2011). Neural Computation, 23(9).
Examples
--------
>>> import numpy as np
>>> X = np.array([[1,1], [2, 1], [3, 1.2], [4, 1], [5, 0.8], [6, 1]])
>>> from sklearn.decomposition import non_negative_factorization
>>> W, H, n_iter = non_negative_factorization(
... X, n_components=2, init='random', random_state=0)
|
non_negative_factorization
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/_nmf.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/_nmf.py
|
BSD-3-Clause
|
def fit(self, X, y=None, **params):
"""Learn a NMF model for the data X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
**params : kwargs
Parameters (keyword arguments) and values passed to
the fit_transform instance.
Returns
-------
self : object
Returns the instance itself.
"""
# param validation is done in fit_transform
self.fit_transform(X, **params)
return self
|
Learn a NMF model for the data X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
**params : kwargs
Parameters (keyword arguments) and values passed to
the fit_transform instance.
Returns
-------
self : object
Returns the instance itself.
|
fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/_nmf.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/_nmf.py
|
BSD-3-Clause
|
def inverse_transform(self, X):
"""Transform data back to its original space.
.. versionadded:: 0.18
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_components)
Transformed data matrix.
Returns
-------
X_original : ndarray of shape (n_samples, n_features)
Returns a data matrix of the original shape.
"""
check_is_fitted(self)
return X @ self.components_
|
Transform data back to its original space.
.. versionadded:: 0.18
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_components)
Transformed data matrix.
Returns
-------
X_original : ndarray of shape (n_samples, n_features)
Returns a data matrix of the original shape.
|
inverse_transform
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/_nmf.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/_nmf.py
|
BSD-3-Clause
|
def fit_transform(self, X, y=None, W=None, H=None):
"""Learn a NMF model for the data X and returns the transformed data.
This is more efficient than calling fit followed by transform.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
W : array-like of shape (n_samples, n_components), default=None
If `init='custom'`, it is used as initial guess for the solution.
If `None`, uses the initialisation method specified in `init`.
H : array-like of shape (n_components, n_features), default=None
If `init='custom'`, it is used as initial guess for the solution.
If `None`, uses the initialisation method specified in `init`.
Returns
-------
W : ndarray of shape (n_samples, n_components)
Transformed data.
"""
X = validate_data(
self, X, accept_sparse=("csr", "csc"), dtype=[np.float64, np.float32]
)
with config_context(assume_finite=True):
W, H, n_iter = self._fit_transform(X, W=W, H=H)
self.reconstruction_err_ = _beta_divergence(
X, W, H, self._beta_loss, square_root=True
)
self.n_components_ = H.shape[0]
self.components_ = H
self.n_iter_ = n_iter
return W
|
Learn a NMF model for the data X and returns the transformed data.
This is more efficient than calling fit followed by transform.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
W : array-like of shape (n_samples, n_components), default=None
If `init='custom'`, it is used as initial guess for the solution.
If `None`, uses the initialisation method specified in `init`.
H : array-like of shape (n_components, n_features), default=None
If `init='custom'`, it is used as initial guess for the solution.
If `None`, uses the initialisation method specified in `init`.
Returns
-------
W : ndarray of shape (n_samples, n_components)
Transformed data.
|
fit_transform
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/_nmf.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/_nmf.py
|
BSD-3-Clause
|
def _fit_transform(self, X, y=None, W=None, H=None, update_H=True):
"""Learn a NMF model for the data X and returns the transformed data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Data matrix to be decomposed
y : Ignored
W : array-like of shape (n_samples, n_components), default=None
If `init='custom'`, it is used as initial guess for the solution.
If `update_H=False`, it is initialised as an array of zeros, unless
`solver='mu'`, then it is filled with values calculated by
`np.sqrt(X.mean() / self._n_components)`.
If `None`, uses the initialisation method specified in `init`.
H : array-like of shape (n_components, n_features), default=None
If `init='custom'`, it is used as initial guess for the solution.
If `update_H=False`, it is used as a constant, to solve for W only.
If `None`, uses the initialisation method specified in `init`.
update_H : bool, default=True
If True, both W and H will be estimated from initial guesses,
this corresponds to a call to the 'fit_transform' method.
If False, only W will be estimated, this corresponds to a call
to the 'transform' method.
Returns
-------
W : ndarray of shape (n_samples, n_components)
Transformed data.
H : ndarray of shape (n_components, n_features)
Factorization matrix, sometimes called 'dictionary'.
n_iter_ : int
Actual number of iterations.
"""
# check parameters
self._check_params(X)
if X.min() == 0 and self._beta_loss <= 0:
raise ValueError(
"When beta_loss <= 0 and X contains zeros, "
"the solver may diverge. Please add small values "
"to X, or use a positive beta_loss."
)
# initialize or check W and H
W, H = self._check_w_h(X, W, H, update_H)
# scale the regularization terms
l1_reg_W, l1_reg_H, l2_reg_W, l2_reg_H = self._compute_regularization(X)
if self.solver == "cd":
W, H, n_iter = _fit_coordinate_descent(
X,
W,
H,
self.tol,
self.max_iter,
l1_reg_W,
l1_reg_H,
l2_reg_W,
l2_reg_H,
update_H=update_H,
verbose=self.verbose,
shuffle=self.shuffle,
random_state=self.random_state,
)
elif self.solver == "mu":
W, H, n_iter, *_ = _fit_multiplicative_update(
X,
W,
H,
self._beta_loss,
self.max_iter,
self.tol,
l1_reg_W,
l1_reg_H,
l2_reg_W,
l2_reg_H,
update_H,
self.verbose,
)
else:
raise ValueError("Invalid solver parameter '%s'." % self.solver)
if n_iter == self.max_iter and self.tol > 0:
warnings.warn(
"Maximum number of iterations %d reached. Increase "
"it to improve convergence." % self.max_iter,
ConvergenceWarning,
)
return W, H, n_iter
|
Learn a NMF model for the data X and returns the transformed data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Data matrix to be decomposed
y : Ignored
W : array-like of shape (n_samples, n_components), default=None
If `init='custom'`, it is used as initial guess for the solution.
If `update_H=False`, it is initialised as an array of zeros, unless
`solver='mu'`, then it is filled with values calculated by
`np.sqrt(X.mean() / self._n_components)`.
If `None`, uses the initialisation method specified in `init`.
H : array-like of shape (n_components, n_features), default=None
If `init='custom'`, it is used as initial guess for the solution.
If `update_H=False`, it is used as a constant, to solve for W only.
If `None`, uses the initialisation method specified in `init`.
update_H : bool, default=True
If True, both W and H will be estimated from initial guesses,
this corresponds to a call to the 'fit_transform' method.
If False, only W will be estimated, this corresponds to a call
to the 'transform' method.
Returns
-------
W : ndarray of shape (n_samples, n_components)
Transformed data.
H : ndarray of shape (n_components, n_features)
Factorization matrix, sometimes called 'dictionary'.
n_iter_ : int
Actual number of iterations.
|
_fit_transform
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/_nmf.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/_nmf.py
|
BSD-3-Clause
|
def transform(self, X):
"""Transform the data X according to the fitted NMF model.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples
and `n_features` is the number of features.
Returns
-------
W : ndarray of shape (n_samples, n_components)
Transformed data.
"""
check_is_fitted(self)
X = validate_data(
self,
X,
accept_sparse=("csr", "csc"),
dtype=[np.float64, np.float32],
reset=False,
ensure_non_negative=True,
)
with config_context(assume_finite=True):
W, *_ = self._fit_transform(X, H=self.components_, update_H=False)
return W
|
Transform the data X according to the fitted NMF model.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples
and `n_features` is the number of features.
Returns
-------
W : ndarray of shape (n_samples, n_components)
Transformed data.
|
transform
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/_nmf.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/_nmf.py
|
BSD-3-Clause
|
def _solve_W(self, X, H, max_iter):
"""Minimize the objective function w.r.t W.
Update W with H being fixed, until convergence. This is the heart
of `transform` but it's also used during `fit` when doing fresh restarts.
"""
avg = np.sqrt(X.mean() / self._n_components)
W = np.full((X.shape[0], self._n_components), avg, dtype=X.dtype)
W_buffer = W.copy()
# Get scaled regularization terms. Done for each minibatch to take into account
# variable sizes of minibatches.
l1_reg_W, _, l2_reg_W, _ = self._compute_regularization(X)
for _ in range(max_iter):
W, *_ = _multiplicative_update_w(
X, W, H, self._beta_loss, l1_reg_W, l2_reg_W, self._gamma
)
W_diff = linalg.norm(W - W_buffer) / linalg.norm(W)
if self.tol > 0 and W_diff <= self.tol:
break
W_buffer[:] = W
return W
|
Minimize the objective function w.r.t W.
Update W with H being fixed, until convergence. This is the heart
of `transform` but it's also used during `fit` when doing fresh restarts.
|
_solve_W
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/_nmf.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/_nmf.py
|
BSD-3-Clause
|
def _minibatch_step(self, X, W, H, update_H):
"""Perform the update of W and H for one minibatch."""
batch_size = X.shape[0]
# get scaled regularization terms. Done for each minibatch to take into account
# variable sizes of minibatches.
l1_reg_W, l1_reg_H, l2_reg_W, l2_reg_H = self._compute_regularization(X)
# update W
if self.fresh_restarts or W is None:
W = self._solve_W(X, H, self.fresh_restarts_max_iter)
else:
W, *_ = _multiplicative_update_w(
X, W, H, self._beta_loss, l1_reg_W, l2_reg_W, self._gamma
)
# necessary for stability with beta_loss < 1
if self._beta_loss < 1:
W[W < np.finfo(np.float64).eps] = 0.0
batch_cost = (
_beta_divergence(X, W, H, self._beta_loss)
+ l1_reg_W * W.sum()
+ l1_reg_H * H.sum()
+ l2_reg_W * (W**2).sum()
+ l2_reg_H * (H**2).sum()
) / batch_size
# update H (only at fit or fit_transform)
if update_H:
H[:] = _multiplicative_update_h(
X,
W,
H,
beta_loss=self._beta_loss,
l1_reg_H=l1_reg_H,
l2_reg_H=l2_reg_H,
gamma=self._gamma,
A=self._components_numerator,
B=self._components_denominator,
rho=self._rho,
)
# necessary for stability with beta_loss < 1
if self._beta_loss <= 1:
H[H < np.finfo(np.float64).eps] = 0.0
return batch_cost
|
Perform the update of W and H for one minibatch.
|
_minibatch_step
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/_nmf.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/_nmf.py
|
BSD-3-Clause
|
def _minibatch_convergence(
self, X, batch_cost, H, H_buffer, n_samples, step, n_steps
):
"""Helper function to encapsulate the early stopping logic"""
batch_size = X.shape[0]
# counts steps starting from 1 for user friendly verbose mode.
step = step + 1
# Ignore first iteration because H is not updated yet.
if step == 1:
if self.verbose:
print(f"Minibatch step {step}/{n_steps}: mean batch cost: {batch_cost}")
return False
# Compute an Exponentially Weighted Average of the cost function to
# monitor the convergence while discarding minibatch-local stochastic
# variability: https://en.wikipedia.org/wiki/Moving_average
if self._ewa_cost is None:
self._ewa_cost = batch_cost
else:
alpha = batch_size / (n_samples + 1)
alpha = min(alpha, 1)
self._ewa_cost = self._ewa_cost * (1 - alpha) + batch_cost * alpha
# Log progress to be able to monitor convergence
if self.verbose:
print(
f"Minibatch step {step}/{n_steps}: mean batch cost: "
f"{batch_cost}, ewa cost: {self._ewa_cost}"
)
# Early stopping based on change of H
H_diff = linalg.norm(H - H_buffer) / linalg.norm(H)
if self.tol > 0 and H_diff <= self.tol:
if self.verbose:
print(f"Converged (small H change) at step {step}/{n_steps}")
return True
# Early stopping heuristic due to lack of improvement on smoothed
# cost function
if self._ewa_cost_min is None or self._ewa_cost < self._ewa_cost_min:
self._no_improvement = 0
self._ewa_cost_min = self._ewa_cost
else:
self._no_improvement += 1
if (
self.max_no_improvement is not None
and self._no_improvement >= self.max_no_improvement
):
if self.verbose:
print(
"Converged (lack of improvement in objective function) "
f"at step {step}/{n_steps}"
)
return True
return False
|
Helper function to encapsulate the early stopping logic
|
_minibatch_convergence
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/_nmf.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/_nmf.py
|
BSD-3-Clause
|
def fit_transform(self, X, y=None, W=None, H=None):
"""Learn a NMF model for the data X and returns the transformed data.
This is more efficient than calling fit followed by transform.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Data matrix to be decomposed.
y : Ignored
Not used, present here for API consistency by convention.
W : array-like of shape (n_samples, n_components), default=None
If `init='custom'`, it is used as initial guess for the solution.
If `None`, uses the initialisation method specified in `init`.
H : array-like of shape (n_components, n_features), default=None
If `init='custom'`, it is used as initial guess for the solution.
If `None`, uses the initialisation method specified in `init`.
Returns
-------
W : ndarray of shape (n_samples, n_components)
Transformed data.
"""
X = validate_data(
self, X, accept_sparse=("csr", "csc"), dtype=[np.float64, np.float32]
)
with config_context(assume_finite=True):
W, H, n_iter, n_steps = self._fit_transform(X, W=W, H=H)
self.reconstruction_err_ = _beta_divergence(
X, W, H, self._beta_loss, square_root=True
)
self.n_components_ = H.shape[0]
self.components_ = H
self.n_iter_ = n_iter
self.n_steps_ = n_steps
return W
|
Learn a NMF model for the data X and returns the transformed data.
This is more efficient than calling fit followed by transform.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Data matrix to be decomposed.
y : Ignored
Not used, present here for API consistency by convention.
W : array-like of shape (n_samples, n_components), default=None
If `init='custom'`, it is used as initial guess for the solution.
If `None`, uses the initialisation method specified in `init`.
H : array-like of shape (n_components, n_features), default=None
If `init='custom'`, it is used as initial guess for the solution.
If `None`, uses the initialisation method specified in `init`.
Returns
-------
W : ndarray of shape (n_samples, n_components)
Transformed data.
|
fit_transform
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/_nmf.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/_nmf.py
|
BSD-3-Clause
|
def _fit_transform(self, X, W=None, H=None, update_H=True):
"""Learn a NMF model for the data X and returns the transformed data.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
Data matrix to be decomposed.
W : array-like of shape (n_samples, n_components), default=None
If `init='custom'`, it is used as initial guess for the solution.
If `update_H=False`, it is initialised as an array of zeros, unless
`solver='mu'`, then it is filled with values calculated by
`np.sqrt(X.mean() / self._n_components)`.
If `None`, uses the initialisation method specified in `init`.
H : array-like of shape (n_components, n_features), default=None
If `init='custom'`, it is used as initial guess for the solution.
If `update_H=False`, it is used as a constant, to solve for W only.
If `None`, uses the initialisation method specified in `init`.
update_H : bool, default=True
If True, both W and H will be estimated from initial guesses,
this corresponds to a call to the `fit_transform` method.
If False, only W will be estimated, this corresponds to a call
to the `transform` method.
Returns
-------
W : ndarray of shape (n_samples, n_components)
Transformed data.
H : ndarray of shape (n_components, n_features)
Factorization matrix, sometimes called 'dictionary'.
n_iter : int
Actual number of started iterations over the whole dataset.
n_steps : int
Number of mini-batches processed.
"""
check_non_negative(X, "MiniBatchNMF (input X)")
self._check_params(X)
if X.min() == 0 and self._beta_loss <= 0:
raise ValueError(
"When beta_loss <= 0 and X contains zeros, "
"the solver may diverge. Please add small values "
"to X, or use a positive beta_loss."
)
n_samples = X.shape[0]
# initialize or check W and H
W, H = self._check_w_h(X, W, H, update_H)
H_buffer = H.copy()
# Initialize auxiliary matrices
self._components_numerator = H.copy()
self._components_denominator = np.ones(H.shape, dtype=H.dtype)
# Attributes to monitor the convergence
self._ewa_cost = None
self._ewa_cost_min = None
self._no_improvement = 0
batches = gen_batches(n_samples, self._batch_size)
batches = itertools.cycle(batches)
n_steps_per_iter = int(np.ceil(n_samples / self._batch_size))
n_steps = self.max_iter * n_steps_per_iter
for i, batch in zip(range(n_steps), batches):
batch_cost = self._minibatch_step(X[batch], W[batch], H, update_H)
if update_H and self._minibatch_convergence(
X[batch], batch_cost, H, H_buffer, n_samples, i, n_steps
):
break
H_buffer[:] = H
if self.fresh_restarts:
W = self._solve_W(X, H, self._transform_max_iter)
n_steps = i + 1
n_iter = int(np.ceil(n_steps / n_steps_per_iter))
if n_iter == self.max_iter and self.tol > 0:
warnings.warn(
(
f"Maximum number of iterations {self.max_iter} reached. "
"Increase it to improve convergence."
),
ConvergenceWarning,
)
return W, H, n_iter, n_steps
|
Learn a NMF model for the data X and returns the transformed data.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
Data matrix to be decomposed.
W : array-like of shape (n_samples, n_components), default=None
If `init='custom'`, it is used as initial guess for the solution.
If `update_H=False`, it is initialised as an array of zeros, unless
`solver='mu'`, then it is filled with values calculated by
`np.sqrt(X.mean() / self._n_components)`.
If `None`, uses the initialisation method specified in `init`.
H : array-like of shape (n_components, n_features), default=None
If `init='custom'`, it is used as initial guess for the solution.
If `update_H=False`, it is used as a constant, to solve for W only.
If `None`, uses the initialisation method specified in `init`.
update_H : bool, default=True
If True, both W and H will be estimated from initial guesses,
this corresponds to a call to the `fit_transform` method.
If False, only W will be estimated, this corresponds to a call
to the `transform` method.
Returns
-------
W : ndarray of shape (n_samples, n_components)
Transformed data.
H : ndarray of shape (n_components, n_features)
Factorization matrix, sometimes called 'dictionary'.
n_iter : int
Actual number of started iterations over the whole dataset.
n_steps : int
Number of mini-batches processed.
|
_fit_transform
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/_nmf.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/_nmf.py
|
BSD-3-Clause
|
def transform(self, X):
"""Transform the data X according to the fitted MiniBatchNMF model.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Data matrix to be transformed by the model.
Returns
-------
W : ndarray of shape (n_samples, n_components)
Transformed data.
"""
check_is_fitted(self)
X = validate_data(
self,
X,
accept_sparse=("csr", "csc"),
dtype=[np.float64, np.float32],
reset=False,
)
W = self._solve_W(X, self.components_, self._transform_max_iter)
return W
|
Transform the data X according to the fitted MiniBatchNMF model.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Data matrix to be transformed by the model.
Returns
-------
W : ndarray of shape (n_samples, n_components)
Transformed data.
|
transform
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/_nmf.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/_nmf.py
|
BSD-3-Clause
|
def partial_fit(self, X, y=None, W=None, H=None):
"""Update the model using the data in `X` as a mini-batch.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once (see :ref:`scaling_strategies`).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Data matrix to be decomposed.
y : Ignored
Not used, present here for API consistency by convention.
W : array-like of shape (n_samples, n_components), default=None
If `init='custom'`, it is used as initial guess for the solution.
Only used for the first call to `partial_fit`.
H : array-like of shape (n_components, n_features), default=None
If `init='custom'`, it is used as initial guess for the solution.
Only used for the first call to `partial_fit`.
Returns
-------
self
Returns the instance itself.
"""
has_components = hasattr(self, "components_")
X = validate_data(
self,
X,
accept_sparse=("csr", "csc"),
dtype=[np.float64, np.float32],
reset=not has_components,
)
if not has_components:
# This instance has not been fitted yet (fit or partial_fit)
self._check_params(X)
_, H = self._check_w_h(X, W=W, H=H, update_H=True)
self._components_numerator = H.copy()
self._components_denominator = np.ones(H.shape, dtype=H.dtype)
self.n_steps_ = 0
else:
H = self.components_
self._minibatch_step(X, None, H, update_H=True)
self.n_components_ = H.shape[0]
self.components_ = H
self.n_steps_ += 1
return self
|
Update the model using the data in `X` as a mini-batch.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once (see :ref:`scaling_strategies`).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Data matrix to be decomposed.
y : Ignored
Not used, present here for API consistency by convention.
W : array-like of shape (n_samples, n_components), default=None
If `init='custom'`, it is used as initial guess for the solution.
Only used for the first call to `partial_fit`.
H : array-like of shape (n_components, n_features), default=None
If `init='custom'`, it is used as initial guess for the solution.
Only used for the first call to `partial_fit`.
Returns
-------
self
Returns the instance itself.
|
partial_fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/_nmf.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/_nmf.py
|
BSD-3-Clause
|
def _assess_dimension(spectrum, rank, n_samples):
"""Compute the log-likelihood of a rank ``rank`` dataset.
The dataset is assumed to be embedded in gaussian noise of shape(n,
dimf) having spectrum ``spectrum``. This implements the method of
T. P. Minka.
Parameters
----------
spectrum : ndarray of shape (n_features,)
Data spectrum.
rank : int
Tested rank value. It should be strictly lower than n_features,
otherwise the method isn't specified (division by zero in equation
(31) from the paper).
n_samples : int
Number of samples.
Returns
-------
ll : float
The log-likelihood.
References
----------
This implements the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604
<https://proceedings.neurips.cc/paper/2000/file/7503cfacd12053d309b6bed5c89de212-Paper.pdf>`_
"""
xp, _ = get_namespace(spectrum)
n_features = spectrum.shape[0]
if not 1 <= rank < n_features:
raise ValueError("the tested rank should be in [1, n_features - 1]")
eps = 1e-15
if spectrum[rank - 1] < eps:
# When the tested rank is associated with a small eigenvalue, there's
# no point in computing the log-likelihood: it's going to be very
# small and won't be the max anyway. Also, it can lead to numerical
# issues below when computing pa, in particular in log((spectrum[i] -
# spectrum[j]) because this will take the log of something very small.
return -xp.inf
pu = -rank * log(2.0)
for i in range(1, rank + 1):
pu += (
lgamma((n_features - i + 1) / 2.0) - log(xp.pi) * (n_features - i + 1) / 2.0
)
pl = xp.sum(xp.log(spectrum[:rank]))
pl = -pl * n_samples / 2.0
v = max(eps, xp.sum(spectrum[rank:]) / (n_features - rank))
pv = -log(v) * n_samples * (n_features - rank) / 2.0
m = n_features * rank - rank * (rank + 1.0) / 2.0
pp = log(2.0 * xp.pi) * (m + rank) / 2.0
pa = 0.0
spectrum_ = xp.asarray(spectrum, copy=True)
spectrum_[rank:n_features] = v
for i in range(rank):
for j in range(i + 1, spectrum.shape[0]):
pa += log(
(spectrum[i] - spectrum[j]) * (1.0 / spectrum_[j] - 1.0 / spectrum_[i])
) + log(n_samples)
ll = pu + pl + pv + pp - pa / 2.0 - rank * log(n_samples) / 2.0
return ll
|
Compute the log-likelihood of a rank ``rank`` dataset.
The dataset is assumed to be embedded in gaussian noise of shape(n,
dimf) having spectrum ``spectrum``. This implements the method of
T. P. Minka.
Parameters
----------
spectrum : ndarray of shape (n_features,)
Data spectrum.
rank : int
Tested rank value. It should be strictly lower than n_features,
otherwise the method isn't specified (division by zero in equation
(31) from the paper).
n_samples : int
Number of samples.
Returns
-------
ll : float
The log-likelihood.
References
----------
This implements the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604
<https://proceedings.neurips.cc/paper/2000/file/7503cfacd12053d309b6bed5c89de212-Paper.pdf>`_
|
_assess_dimension
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/_pca.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/_pca.py
|
BSD-3-Clause
|
def _infer_dimension(spectrum, n_samples):
"""Infers the dimension of a dataset with a given spectrum.
The returned value will be in [1, n_features - 1].
"""
xp, _ = get_namespace(spectrum)
ll = xp.empty_like(spectrum)
ll[0] = -xp.inf # we don't want to return n_components = 0
for rank in range(1, spectrum.shape[0]):
ll[rank] = _assess_dimension(spectrum, rank, n_samples)
return xp.argmax(ll)
|
Infers the dimension of a dataset with a given spectrum.
The returned value will be in [1, n_features - 1].
|
_infer_dimension
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/_pca.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/_pca.py
|
BSD-3-Clause
|
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : Ignored
Ignored.
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
Transformed values.
Notes
-----
This method returns a Fortran-ordered array. To convert it to a
C-ordered array, use 'np.ascontiguousarray'.
"""
U, S, _, X, x_is_centered, xp = self._fit(X)
if U is not None:
U = U[:, : self.n_components_]
if self.whiten:
# X_new = X * V / S * sqrt(n_samples) = U * sqrt(n_samples)
U *= sqrt(X.shape[0] - 1)
else:
# X_new = X * V = U * S * Vt * V = U * S
U *= S[: self.n_components_]
return U
else: # solver="covariance_eigh" does not compute U at fit time.
return self._transform(X, xp, x_is_centered=x_is_centered)
|
Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : Ignored
Ignored.
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
Transformed values.
Notes
-----
This method returns a Fortran-ordered array. To convert it to a
C-ordered array, use 'np.ascontiguousarray'.
|
fit_transform
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/_pca.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/_pca.py
|
BSD-3-Clause
|
def _fit(self, X):
"""Dispatch to the right submethod depending on the chosen solver."""
xp, is_array_api_compliant = get_namespace(X)
# Raise an error for sparse input and unsupported svd_solver
if issparse(X) and self.svd_solver not in ["auto", "arpack", "covariance_eigh"]:
raise TypeError(
'PCA only support sparse inputs with the "arpack" and'
f' "covariance_eigh" solvers, while "{self.svd_solver}" was passed. See'
" TruncatedSVD for a possible alternative."
)
if self.svd_solver == "arpack" and is_array_api_compliant:
raise ValueError(
"PCA with svd_solver='arpack' is not supported for Array API inputs."
)
# Validate the data, without ever forcing a copy as any solver that
# supports sparse input data and the `covariance_eigh` solver are
# written in a way to avoid the need for any inplace modification of
# the input data contrary to the other solvers.
# The copy will happen
# later, only if needed, once the solver negotiation below is done.
X = validate_data(
self,
X,
dtype=[xp.float64, xp.float32],
force_writeable=True,
accept_sparse=("csr", "csc"),
ensure_2d=True,
copy=False,
)
self._fit_svd_solver = self.svd_solver
if self._fit_svd_solver == "auto" and issparse(X):
self._fit_svd_solver = "arpack"
if self.n_components is None:
if self._fit_svd_solver != "arpack":
n_components = min(X.shape)
else:
n_components = min(X.shape) - 1
else:
n_components = self.n_components
if self._fit_svd_solver == "auto":
# Tall and skinny problems are best handled by precomputing the
# covariance matrix.
if X.shape[1] <= 1_000 and X.shape[0] >= 10 * X.shape[1]:
self._fit_svd_solver = "covariance_eigh"
# Small problem or n_components == 'mle', just call full PCA
elif max(X.shape) <= 500 or n_components == "mle":
self._fit_svd_solver = "full"
elif 1 <= n_components < 0.8 * min(X.shape):
self._fit_svd_solver = "randomized"
# This is also the case of n_components in (0, 1)
else:
self._fit_svd_solver = "full"
# Call different fits for either full or truncated SVD
if self._fit_svd_solver in ("full", "covariance_eigh"):
return self._fit_full(X, n_components, xp, is_array_api_compliant)
elif self._fit_svd_solver in ["arpack", "randomized"]:
return self._fit_truncated(X, n_components, xp)
|
Dispatch to the right submethod depending on the chosen solver.
|
_fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/_pca.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/_pca.py
|
BSD-3-Clause
|
def _fit_full(self, X, n_components, xp, is_array_api_compliant):
"""Fit the model by computing full SVD on X."""
n_samples, n_features = X.shape
if n_components == "mle":
if n_samples < n_features:
raise ValueError(
"n_components='mle' is only supported if n_samples >= n_features"
)
elif not 0 <= n_components <= min(n_samples, n_features):
raise ValueError(
f"n_components={n_components} must be between 0 and "
f"min(n_samples, n_features)={min(n_samples, n_features)} with "
f"svd_solver={self._fit_svd_solver!r}"
)
self.mean_ = xp.mean(X, axis=0)
# When X is a scipy sparse matrix, self.mean_ is a numpy matrix, so we need
# to transform it to a 1D array. Note that this is not the case when X
# is a scipy sparse array.
# TODO: remove the following two lines when scikit-learn only depends
# on scipy versions that no longer support scipy.sparse matrices.
self.mean_ = xp.reshape(xp.asarray(self.mean_), (-1,))
if self._fit_svd_solver == "full":
X_centered = xp.asarray(X, copy=True) if self.copy else X
X_centered -= self.mean_
x_is_centered = not self.copy
if not is_array_api_compliant:
# Use scipy.linalg with NumPy/SciPy inputs for the sake of not
# introducing unanticipated behavior changes. In the long run we
# could instead decide to always use xp.linalg.svd for all inputs,
# but that would make this code rely on numpy's SVD instead of
# scipy's. It's not 100% clear whether they use the same LAPACK
# solver by default though (assuming both are built against the
# same BLAS).
U, S, Vt = linalg.svd(X_centered, full_matrices=False)
else:
U, S, Vt = xp.linalg.svd(X_centered, full_matrices=False)
explained_variance_ = (S**2) / (n_samples - 1)
else:
assert self._fit_svd_solver == "covariance_eigh"
# In the following, we center the covariance matrix C afterwards
# (without centering the data X first) to avoid an unnecessary copy
# of X. Note that the mean_ attribute is still needed to center
# test data in the transform method.
#
# Note: at the time of writing, `xp.cov` does not exist in the
# Array API standard:
# https://github.com/data-apis/array-api/issues/43
#
# Besides, using `numpy.cov`, as of numpy 1.26.0, would not be
# memory efficient for our use case when `n_samples >> n_features`:
# `numpy.cov` centers a copy of the data before computing the
# matrix product instead of subtracting a small `(n_features,
# n_features)` square matrix from the gram matrix X.T @ X, as we do
# below.
x_is_centered = False
C = X.T @ X
C -= (
n_samples
* xp.reshape(self.mean_, (-1, 1))
* xp.reshape(self.mean_, (1, -1))
)
C /= n_samples - 1
eigenvals, eigenvecs = xp.linalg.eigh(C)
# When X is a scipy sparse matrix, the following two datastructures
# are returned as instances of the soft-deprecated numpy.matrix
# class. Note that this problem does not occur when X is a scipy
# sparse array (or another other kind of supported array).
# TODO: remove the following two lines when scikit-learn only
# depends on scipy versions that no longer support scipy.sparse
# matrices.
eigenvals = xp.reshape(xp.asarray(eigenvals), (-1,))
eigenvecs = xp.asarray(eigenvecs)
eigenvals = xp.flip(eigenvals, axis=0)
eigenvecs = xp.flip(eigenvecs, axis=1)
# The covariance matrix C is positive semi-definite by
# construction. However, the eigenvalues returned by xp.linalg.eigh
# can be slightly negative due to numerical errors. This would be
# an issue for the subsequent sqrt, hence the manual clipping.
eigenvals[eigenvals < 0.0] = 0.0
explained_variance_ = eigenvals
# Re-construct SVD of centered X indirectly and make it consistent
# with the other solvers.
S = xp.sqrt(eigenvals * (n_samples - 1))
Vt = eigenvecs.T
U = None
# flip eigenvectors' sign to enforce deterministic output
U, Vt = svd_flip(U, Vt, u_based_decision=False)
components_ = Vt
# Get variance explained by singular values
total_var = xp.sum(explained_variance_)
explained_variance_ratio_ = explained_variance_ / total_var
singular_values_ = xp.asarray(S, copy=True) # Store the singular values.
# Postprocess the number of components required
if n_components == "mle":
n_components = _infer_dimension(explained_variance_, n_samples)
elif 0 < n_components < 1.0:
# number of components for which the cumulated explained
# variance percentage is superior to the desired threshold
# side='right' ensures that number of features selected
# their variance is always greater than n_components float
# passed. More discussion in issue: #15669
if is_array_api_compliant:
# Convert to numpy as xp.cumsum and xp.searchsorted are not
# part of the Array API standard yet:
#
# https://github.com/data-apis/array-api/issues/597
# https://github.com/data-apis/array-api/issues/688
#
# Furthermore, it's not always safe to call them for namespaces
# that already implement them: for instance as
# cupy.searchsorted does not accept a float as second argument.
explained_variance_ratio_np = _convert_to_numpy(
explained_variance_ratio_, xp=xp
)
else:
explained_variance_ratio_np = explained_variance_ratio_
ratio_cumsum = stable_cumsum(explained_variance_ratio_np)
n_components = np.searchsorted(ratio_cumsum, n_components, side="right") + 1
# Compute noise covariance using Probabilistic PCA model
# The sigma2 maximum likelihood (cf. eq. 12.46)
if n_components < min(n_features, n_samples):
self.noise_variance_ = xp.mean(explained_variance_[n_components:])
else:
self.noise_variance_ = 0.0
self.n_samples_ = n_samples
self.n_components_ = n_components
# Assign a copy of the result of the truncation of the components in
# order to:
# - release the memory used by the discarded components,
# - ensure that the kept components are allocated contiguously in
# memory to make the transform method faster by leveraging cache
# locality.
self.components_ = xp.asarray(components_[:n_components, :], copy=True)
# We do the same for the other arrays for the sake of consistency.
self.explained_variance_ = xp.asarray(
explained_variance_[:n_components], copy=True
)
self.explained_variance_ratio_ = xp.asarray(
explained_variance_ratio_[:n_components], copy=True
)
self.singular_values_ = xp.asarray(singular_values_[:n_components], copy=True)
return U, S, Vt, X, x_is_centered, xp
|
Fit the model by computing full SVD on X.
|
_fit_full
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/_pca.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/_pca.py
|
BSD-3-Clause
|
def _fit_truncated(self, X, n_components, xp):
"""Fit the model by computing truncated SVD (by ARPACK or randomized)
on X.
"""
n_samples, n_features = X.shape
svd_solver = self._fit_svd_solver
if isinstance(n_components, str):
raise ValueError(
"n_components=%r cannot be a string with svd_solver='%s'"
% (n_components, svd_solver)
)
elif not 1 <= n_components <= min(n_samples, n_features):
raise ValueError(
"n_components=%r must be between 1 and "
"min(n_samples, n_features)=%r with "
"svd_solver='%s'"
% (n_components, min(n_samples, n_features), svd_solver)
)
elif svd_solver == "arpack" and n_components == min(n_samples, n_features):
raise ValueError(
"n_components=%r must be strictly less than "
"min(n_samples, n_features)=%r with "
"svd_solver='%s'"
% (n_components, min(n_samples, n_features), svd_solver)
)
random_state = check_random_state(self.random_state)
# Center data
total_var = None
if issparse(X):
self.mean_, var = mean_variance_axis(X, axis=0)
total_var = var.sum() * n_samples / (n_samples - 1) # ddof=1
X_centered = _implicit_column_offset(X, self.mean_)
x_is_centered = False
else:
self.mean_ = xp.mean(X, axis=0)
X_centered = xp.asarray(X, copy=True) if self.copy else X
X_centered -= self.mean_
x_is_centered = not self.copy
if svd_solver == "arpack":
v0 = _init_arpack_v0(min(X.shape), random_state)
U, S, Vt = svds(X_centered, k=n_components, tol=self.tol, v0=v0)
# svds doesn't abide by scipy.linalg.svd/randomized_svd
# conventions, so reverse its outputs.
S = S[::-1]
# flip eigenvectors' sign to enforce deterministic output
U, Vt = svd_flip(U[:, ::-1], Vt[::-1], u_based_decision=False)
elif svd_solver == "randomized":
# sign flipping is done inside
U, S, Vt = _randomized_svd(
X_centered,
n_components=n_components,
n_oversamples=self.n_oversamples,
n_iter=self.iterated_power,
power_iteration_normalizer=self.power_iteration_normalizer,
flip_sign=False,
random_state=random_state,
)
U, Vt = svd_flip(U, Vt, u_based_decision=False)
self.n_samples_ = n_samples
self.components_ = Vt
self.n_components_ = n_components
# Get variance explained by singular values
self.explained_variance_ = (S**2) / (n_samples - 1)
# Workaround in-place variance calculation since at the time numpy
# did not have a way to calculate variance in-place.
#
# TODO: update this code to either:
# * Use the array-api variance calculation, unless memory usage suffers
# * Update sklearn.utils.extmath._incremental_mean_and_var to support array-api
# See: https://github.com/scikit-learn/scikit-learn/pull/18689#discussion_r1335540991
if total_var is None:
N = X.shape[0] - 1
X_centered **= 2
total_var = xp.sum(X_centered) / N
self.explained_variance_ratio_ = self.explained_variance_ / total_var
self.singular_values_ = xp.asarray(S, copy=True) # Store the singular values.
if self.n_components_ < min(n_features, n_samples):
self.noise_variance_ = total_var - xp.sum(self.explained_variance_)
self.noise_variance_ /= min(n_features, n_samples) - n_components
else:
self.noise_variance_ = 0.0
return U, S, Vt, X, x_is_centered, xp
|
Fit the model by computing truncated SVD (by ARPACK or randomized)
on X.
|
_fit_truncated
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/_pca.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/_pca.py
|
BSD-3-Clause
|
def score_samples(self, X):
"""Return the log-likelihood of each sample.
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data.
Returns
-------
ll : ndarray of shape (n_samples,)
Log-likelihood of each sample under the current model.
"""
check_is_fitted(self)
xp, _ = get_namespace(X)
X = validate_data(self, X, dtype=[xp.float64, xp.float32], reset=False)
Xr = X - self.mean_
n_features = X.shape[1]
precision = self.get_precision()
log_like = -0.5 * xp.sum(Xr * (Xr @ precision), axis=1)
log_like -= 0.5 * (n_features * log(2.0 * np.pi) - fast_logdet(precision))
return log_like
|
Return the log-likelihood of each sample.
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data.
Returns
-------
ll : ndarray of shape (n_samples,)
Log-likelihood of each sample under the current model.
|
score_samples
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/_pca.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/_pca.py
|
BSD-3-Clause
|
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self : object
Returns the instance itself.
"""
random_state = check_random_state(self.random_state)
X = validate_data(self, X)
self.mean_ = X.mean(axis=0)
X = X - self.mean_
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
return self._fit(X, n_components, random_state)
|
Fit the model from data in X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self : object
Returns the instance itself.
|
fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/_sparse_pca.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/_sparse_pca.py
|
BSD-3-Clause
|
def transform(self, X):
"""Least Squares projection of the data onto the sparse components.
To avoid instability issues in case the system is under-determined,
regularization can be applied (Ridge regression) via the
`ridge_alpha` parameter.
Note that Sparse PCA components orthogonality is not enforced as in PCA
hence one cannot use a simple linear projection.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Test data to be transformed, must have the same number of
features as the data used to train the model.
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
Transformed data.
"""
check_is_fitted(self)
X = validate_data(self, X, reset=False)
X = X - self.mean_
U = ridge_regression(
self.components_.T, X.T, self.ridge_alpha, solver="cholesky"
)
return U
|
Least Squares projection of the data onto the sparse components.
To avoid instability issues in case the system is under-determined,
regularization can be applied (Ridge regression) via the
`ridge_alpha` parameter.
Note that Sparse PCA components orthogonality is not enforced as in PCA
hence one cannot use a simple linear projection.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Test data to be transformed, must have the same number of
features as the data used to train the model.
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
Transformed data.
|
transform
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/_sparse_pca.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/_sparse_pca.py
|
BSD-3-Clause
|
def inverse_transform(self, X):
"""Transform data from the latent space to the original space.
This inversion is an approximation due to the loss of information
induced by the forward decomposition.
.. versionadded:: 1.2
Parameters
----------
X : ndarray of shape (n_samples, n_components)
Data in the latent space.
Returns
-------
X_original : ndarray of shape (n_samples, n_features)
Reconstructed data in the original space.
"""
check_is_fitted(self)
X = check_array(X)
return (X @ self.components_) + self.mean_
|
Transform data from the latent space to the original space.
This inversion is an approximation due to the loss of information
induced by the forward decomposition.
.. versionadded:: 1.2
Parameters
----------
X : ndarray of shape (n_samples, n_components)
Data in the latent space.
Returns
-------
X_original : ndarray of shape (n_samples, n_features)
Reconstructed data in the original space.
|
inverse_transform
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/_sparse_pca.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/_sparse_pca.py
|
BSD-3-Clause
|
def fit_transform(self, X, y=None):
"""Fit model to X and perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
"""
X = validate_data(self, X, accept_sparse=["csr", "csc"], ensure_min_features=2)
random_state = check_random_state(self.random_state)
if self.algorithm == "arpack":
v0 = _init_arpack_v0(min(X.shape), random_state)
U, Sigma, VT = svds(X, k=self.n_components, tol=self.tol, v0=v0)
# svds doesn't abide by scipy.linalg.svd/randomized_svd
# conventions, so reverse its outputs.
Sigma = Sigma[::-1]
# u_based_decision=False is needed to be consistent with PCA.
U, VT = svd_flip(U[:, ::-1], VT[::-1], u_based_decision=False)
elif self.algorithm == "randomized":
if self.n_components > X.shape[1]:
raise ValueError(
f"n_components({self.n_components}) must be <="
f" n_features({X.shape[1]})."
)
U, Sigma, VT = _randomized_svd(
X,
self.n_components,
n_iter=self.n_iter,
n_oversamples=self.n_oversamples,
power_iteration_normalizer=self.power_iteration_normalizer,
random_state=random_state,
flip_sign=False,
)
U, VT = svd_flip(U, VT, u_based_decision=False)
self.components_ = VT
# As a result of the SVD approximation error on X ~ U @ Sigma @ V.T,
# X @ V is not the same as U @ Sigma
if self.algorithm == "randomized" or (
self.algorithm == "arpack" and self.tol > 0
):
X_transformed = safe_sparse_dot(X, self.components_.T)
else:
X_transformed = U * Sigma
# Calculate explained variance & explained variance ratio
self.explained_variance_ = exp_var = np.var(X_transformed, axis=0)
if sp.issparse(X):
_, full_var = mean_variance_axis(X, axis=0)
full_var = full_var.sum()
else:
full_var = np.var(X, axis=0).sum()
self.explained_variance_ratio_ = exp_var / full_var
self.singular_values_ = Sigma # Store the singular values.
return X_transformed
|
Fit model to X and perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
|
fit_transform
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/_truncated_svd.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/_truncated_svd.py
|
BSD-3-Clause
|
def transform(self, X):
"""Perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
New data.
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
"""
check_is_fitted(self)
X = validate_data(self, X, accept_sparse=["csr", "csc"], reset=False)
return safe_sparse_dot(X, self.components_.T)
|
Perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
New data.
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
|
transform
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/_truncated_svd.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/_truncated_svd.py
|
BSD-3-Clause
|
def inverse_transform(self, X):
"""Transform X back to its original space.
Returns an array X_original whose transform would be X.
Parameters
----------
X : array-like of shape (n_samples, n_components)
New data.
Returns
-------
X_original : ndarray of shape (n_samples, n_features)
Note that this is always a dense array.
"""
X = check_array(X)
return np.dot(X, self.components_)
|
Transform X back to its original space.
Returns an array X_original whose transform would be X.
Parameters
----------
X : array-like of shape (n_samples, n_components)
New data.
Returns
-------
X_original : ndarray of shape (n_samples, n_features)
Note that this is always a dense array.
|
inverse_transform
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/_truncated_svd.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/_truncated_svd.py
|
BSD-3-Clause
|
def ricker_function(resolution, center, width):
"""Discrete sub-sampled Ricker (Mexican hat) wavelet"""
x = np.linspace(0, resolution - 1, resolution)
x = (
(2 / (np.sqrt(3 * width) * np.pi**0.25))
* (1 - (x - center) ** 2 / width**2)
* np.exp(-((x - center) ** 2) / (2 * width**2))
)
return x
|
Discrete sub-sampled Ricker (Mexican hat) wavelet
|
ricker_function
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/tests/test_dict_learning.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/tests/test_dict_learning.py
|
BSD-3-Clause
|
def ricker_matrix(width, resolution, n_components):
"""Dictionary of Ricker (Mexican hat) wavelets"""
centers = np.linspace(0, resolution - 1, n_components)
D = np.empty((n_components, resolution))
for i, center in enumerate(centers):
D[i] = ricker_function(resolution, center, width)
D /= np.sqrt(np.sum(D**2, axis=1))[:, np.newaxis]
return D
|
Dictionary of Ricker (Mexican hat) wavelets
|
ricker_matrix
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/tests/test_dict_learning.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/tests/test_dict_learning.py
|
BSD-3-Clause
|
def test_get_feature_names_out(estimator):
"""Check feature names for dict learning estimators."""
estimator.fit(X)
n_components = X.shape[1]
feature_names_out = estimator.get_feature_names_out()
estimator_name = estimator.__class__.__name__.lower()
assert_array_equal(
feature_names_out,
[f"{estimator_name}{i}" for i in range(n_components)],
)
|
Check feature names for dict learning estimators.
|
test_get_feature_names_out
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/tests/test_dict_learning.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/tests/test_dict_learning.py
|
BSD-3-Clause
|
def center_and_norm(x, axis=-1):
"""Centers and norms x **in place**
Parameters
-----------
x: ndarray
Array with an axis of observations (statistical units) measured on
random variables.
axis: int, optional
Axis along which the mean and variance are calculated.
"""
x = np.rollaxis(x, axis)
x -= x.mean(axis=0)
x /= x.std(axis=0)
|
Centers and norms x **in place**
Parameters
-----------
x: ndarray
Array with an axis of observations (statistical units) measured on
random variables.
axis: int, optional
Axis along which the mean and variance are calculated.
|
center_and_norm
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/tests/test_fastica.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/tests/test_fastica.py
|
BSD-3-Clause
|
def test_fit_transform(global_random_seed, global_dtype):
"""Test unit variance of transformed data using FastICA algorithm.
Check that `fit_transform` gives the same result as applying
`fit` and then `transform`.
Bug #13056
"""
# multivariate uniform data in [0, 1]
rng = np.random.RandomState(global_random_seed)
X = rng.random_sample((100, 10)).astype(global_dtype)
max_iter = 300
for whiten, n_components in [["unit-variance", 5], [False, None]]:
n_components_ = n_components if n_components is not None else X.shape[1]
ica = FastICA(
n_components=n_components, max_iter=max_iter, whiten=whiten, random_state=0
)
with warnings.catch_warnings():
# make sure that numerical errors do not cause sqrt of negative
# values
warnings.simplefilter("error", RuntimeWarning)
# XXX: for some seeds, the model does not converge.
# However this is not what we test here.
warnings.simplefilter("ignore", ConvergenceWarning)
Xt = ica.fit_transform(X)
assert ica.components_.shape == (n_components_, 10)
assert Xt.shape == (X.shape[0], n_components_)
ica2 = FastICA(
n_components=n_components, max_iter=max_iter, whiten=whiten, random_state=0
)
with warnings.catch_warnings():
# make sure that numerical errors do not cause sqrt of negative
# values
warnings.simplefilter("error", RuntimeWarning)
warnings.simplefilter("ignore", ConvergenceWarning)
ica2.fit(X)
assert ica2.components_.shape == (n_components_, 10)
Xt2 = ica2.transform(X)
# XXX: we have to set atol for this test to pass for all seeds when
# fitting with float32 data. Is this revealing a bug?
if global_dtype:
atol = np.abs(Xt2).mean() / 1e6
else:
atol = 0.0 # the default rtol is enough for float64 data
assert_allclose(Xt, Xt2, atol=atol)
|
Test unit variance of transformed data using FastICA algorithm.
Check that `fit_transform` gives the same result as applying
`fit` and then `transform`.
Bug #13056
|
test_fit_transform
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/tests/test_fastica.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/tests/test_fastica.py
|
BSD-3-Clause
|
def test_fastica_whiten_unit_variance(global_random_seed):
"""Test unit variance of transformed data using FastICA algorithm.
Bug #13056
"""
rng = np.random.RandomState(global_random_seed)
X = rng.random_sample((100, 10))
n_components = X.shape[1]
ica = FastICA(n_components=n_components, whiten="unit-variance", random_state=0)
Xt = ica.fit_transform(X)
assert np.var(Xt) == pytest.approx(1.0)
|
Test unit variance of transformed data using FastICA algorithm.
Bug #13056
|
test_fastica_whiten_unit_variance
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/tests/test_fastica.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/tests/test_fastica.py
|
BSD-3-Clause
|
def test_fastica_eigh_low_rank_warning(global_random_seed):
"""Test FastICA eigh solver raises warning for low-rank data."""
rng = np.random.RandomState(global_random_seed)
A = rng.randn(10, 2)
X = A @ A.T
ica = FastICA(random_state=0, whiten="unit-variance", whiten_solver="eigh")
msg = "There are some small singular values"
with pytest.warns(UserWarning, match=msg):
with ignore_warnings(category=ConvergenceWarning):
# The FastICA solver may not converge for some data with specific
# random seeds but this happens after the whiten step so this is
# not want we want to test here.
ica.fit(X)
|
Test FastICA eigh solver raises warning for low-rank data.
|
test_fastica_eigh_low_rank_warning
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/tests/test_fastica.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/tests/test_fastica.py
|
BSD-3-Clause
|
def test_incremental_pca_feature_names_out():
"""Check feature names out for IncrementalPCA."""
ipca = IncrementalPCA(n_components=2).fit(iris.data)
names = ipca.get_feature_names_out()
assert_array_equal([f"incrementalpca{i}" for i in range(2)], names)
|
Check feature names out for IncrementalPCA.
|
test_incremental_pca_feature_names_out
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/tests/test_incremental_pca.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/tests/test_incremental_pca.py
|
BSD-3-Clause
|
def test_kernel_pca(global_random_seed):
"""Nominal test for all solvers and all known kernels + a custom one
It tests
- that fit_transform is equivalent to fit+transform
- that the shapes of transforms and inverse transforms are correct
"""
rng = np.random.RandomState(global_random_seed)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
def histogram(x, y, **kwargs):
# Histogram kernel implemented as a callable.
assert kwargs == {} # no kernel_params that we didn't ask for
return np.minimum(x, y).sum()
for eigen_solver in ("auto", "dense", "arpack", "randomized"):
for kernel in ("linear", "rbf", "poly", histogram):
# histogram kernel produces singular matrix inside linalg.solve
# XXX use a least-squares approximation?
inv = not callable(kernel)
# transform fit data
kpca = KernelPCA(
4, kernel=kernel, eigen_solver=eigen_solver, fit_inverse_transform=inv
)
X_fit_transformed = kpca.fit_transform(X_fit)
X_fit_transformed2 = kpca.fit(X_fit).transform(X_fit)
assert_array_almost_equal(
np.abs(X_fit_transformed), np.abs(X_fit_transformed2)
)
# non-regression test: previously, gamma would be 0 by default,
# forcing all eigenvalues to 0 under the poly kernel
assert X_fit_transformed.size != 0
# transform new data
X_pred_transformed = kpca.transform(X_pred)
assert X_pred_transformed.shape[1] == X_fit_transformed.shape[1]
# inverse transform
if inv:
X_pred2 = kpca.inverse_transform(X_pred_transformed)
assert X_pred2.shape == X_pred.shape
|
Nominal test for all solvers and all known kernels + a custom one
It tests
- that fit_transform is equivalent to fit+transform
- that the shapes of transforms and inverse transforms are correct
|
test_kernel_pca
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/tests/test_kernel_pca.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/tests/test_kernel_pca.py
|
BSD-3-Clause
|
def test_kernel_pca_invalid_parameters():
"""Check that kPCA raises an error if the parameters are invalid
Tests fitting inverse transform with a precomputed kernel raises a
ValueError.
"""
estimator = KernelPCA(
n_components=10, fit_inverse_transform=True, kernel="precomputed"
)
err_ms = "Cannot fit_inverse_transform with a precomputed kernel"
with pytest.raises(ValueError, match=err_ms):
estimator.fit(np.random.randn(10, 10))
|
Check that kPCA raises an error if the parameters are invalid
Tests fitting inverse transform with a precomputed kernel raises a
ValueError.
|
test_kernel_pca_invalid_parameters
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/tests/test_kernel_pca.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/tests/test_kernel_pca.py
|
BSD-3-Clause
|
def test_kernel_pca_consistent_transform(global_random_seed):
"""Check robustness to mutations in the original training array
Test that after fitting a kPCA model, it stays independent of any
mutation of the values of the original data object by relying on an
internal copy.
"""
# X_fit_ needs to retain the old, unmodified copy of X
state = np.random.RandomState(global_random_seed)
X = state.rand(10, 10)
kpca = KernelPCA(random_state=state).fit(X)
transformed1 = kpca.transform(X)
X_copy = X.copy()
X[:, 0] = 666
transformed2 = kpca.transform(X_copy)
assert_array_almost_equal(transformed1, transformed2)
|
Check robustness to mutations in the original training array
Test that after fitting a kPCA model, it stays independent of any
mutation of the values of the original data object by relying on an
internal copy.
|
test_kernel_pca_consistent_transform
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/tests/test_kernel_pca.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/tests/test_kernel_pca.py
|
BSD-3-Clause
|
def test_kernel_pca_deterministic_output(global_random_seed):
"""Test that Kernel PCA produces deterministic output
Tests that the same inputs and random state produce the same output.
"""
rng = np.random.RandomState(global_random_seed)
X = rng.rand(10, 10)
eigen_solver = ("arpack", "dense")
for solver in eigen_solver:
transformed_X = np.zeros((20, 2))
for i in range(20):
kpca = KernelPCA(n_components=2, eigen_solver=solver, random_state=rng)
transformed_X[i, :] = kpca.fit_transform(X)[0]
assert_allclose(transformed_X, np.tile(transformed_X[0, :], 20).reshape(20, 2))
|
Test that Kernel PCA produces deterministic output
Tests that the same inputs and random state produce the same output.
|
test_kernel_pca_deterministic_output
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/tests/test_kernel_pca.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/tests/test_kernel_pca.py
|
BSD-3-Clause
|
def test_kernel_pca_sparse(csr_container, global_random_seed):
"""Test that kPCA works on a sparse data input.
Same test as ``test_kernel_pca except inverse_transform`` since it's not
implemented for sparse matrices.
"""
rng = np.random.RandomState(global_random_seed)
X_fit = csr_container(rng.random_sample((5, 4)))
X_pred = csr_container(rng.random_sample((2, 4)))
for eigen_solver in ("auto", "arpack", "randomized"):
for kernel in ("linear", "rbf", "poly"):
# transform fit data
kpca = KernelPCA(
4,
kernel=kernel,
eigen_solver=eigen_solver,
fit_inverse_transform=False,
random_state=0,
)
X_fit_transformed = kpca.fit_transform(X_fit)
X_fit_transformed2 = kpca.fit(X_fit).transform(X_fit)
assert_array_almost_equal(
np.abs(X_fit_transformed), np.abs(X_fit_transformed2)
)
# transform new data
X_pred_transformed = kpca.transform(X_pred)
assert X_pred_transformed.shape[1] == X_fit_transformed.shape[1]
# inverse transform: not available for sparse matrices
# XXX: should we raise another exception type here? For instance:
# NotImplementedError.
with pytest.raises(NotFittedError):
kpca.inverse_transform(X_pred_transformed)
|
Test that kPCA works on a sparse data input.
Same test as ``test_kernel_pca except inverse_transform`` since it's not
implemented for sparse matrices.
|
test_kernel_pca_sparse
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/tests/test_kernel_pca.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/tests/test_kernel_pca.py
|
BSD-3-Clause
|
def test_kernel_pca_linear_kernel(solver, n_features, global_random_seed):
"""Test that kPCA with linear kernel is equivalent to PCA for all solvers.
KernelPCA with linear kernel should produce the same output as PCA.
"""
rng = np.random.RandomState(global_random_seed)
X_fit = rng.random_sample((5, n_features))
X_pred = rng.random_sample((2, n_features))
# for a linear kernel, kernel PCA should find the same projection as PCA
# modulo the sign (direction)
# fit only the first four components: fifth is near zero eigenvalue, so
# can be trimmed due to roundoff error
n_comps = 3 if solver == "arpack" else 4
assert_array_almost_equal(
np.abs(KernelPCA(n_comps, eigen_solver=solver).fit(X_fit).transform(X_pred)),
np.abs(
PCA(n_comps, svd_solver=solver if solver != "dense" else "full")
.fit(X_fit)
.transform(X_pred)
),
)
|
Test that kPCA with linear kernel is equivalent to PCA for all solvers.
KernelPCA with linear kernel should produce the same output as PCA.
|
test_kernel_pca_linear_kernel
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/tests/test_kernel_pca.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/tests/test_kernel_pca.py
|
BSD-3-Clause
|
def test_kernel_pca_n_components():
"""Test that `n_components` is correctly taken into account for projections
For all solvers this tests that the output has the correct shape depending
on the selected number of components.
"""
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
for eigen_solver in ("dense", "arpack", "randomized"):
for c in [1, 2, 4]:
kpca = KernelPCA(n_components=c, eigen_solver=eigen_solver)
shape = kpca.fit(X_fit).transform(X_pred).shape
assert shape == (2, c)
|
Test that `n_components` is correctly taken into account for projections
For all solvers this tests that the output has the correct shape depending
on the selected number of components.
|
test_kernel_pca_n_components
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/tests/test_kernel_pca.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/tests/test_kernel_pca.py
|
BSD-3-Clause
|
def test_remove_zero_eig():
"""Check that the ``remove_zero_eig`` parameter works correctly.
Tests that the null-space (Zero) eigenvalues are removed when
remove_zero_eig=True, whereas they are not by default.
"""
X = np.array([[1 - 1e-30, 1], [1, 1], [1, 1 - 1e-20]])
# n_components=None (default) => remove_zero_eig is True
kpca = KernelPCA()
Xt = kpca.fit_transform(X)
assert Xt.shape == (3, 0)
kpca = KernelPCA(n_components=2)
Xt = kpca.fit_transform(X)
assert Xt.shape == (3, 2)
kpca = KernelPCA(n_components=2, remove_zero_eig=True)
Xt = kpca.fit_transform(X)
assert Xt.shape == (3, 0)
|
Check that the ``remove_zero_eig`` parameter works correctly.
Tests that the null-space (Zero) eigenvalues are removed when
remove_zero_eig=True, whereas they are not by default.
|
test_remove_zero_eig
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/tests/test_kernel_pca.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/tests/test_kernel_pca.py
|
BSD-3-Clause
|
def test_leave_zero_eig():
"""Non-regression test for issue #12141 (PR #12143)
This test checks that fit().transform() returns the same result as
fit_transform() in case of non-removed zero eigenvalue.
"""
X_fit = np.array([[1, 1], [0, 0]])
# Assert that even with all np warnings on, there is no div by zero warning
with warnings.catch_warnings():
# There might be warnings about the kernel being badly conditioned,
# but there should not be warnings about division by zero.
# (Numpy division by zero warning can have many message variants, but
# at least we know that it is a RuntimeWarning so lets check only this)
warnings.simplefilter("error", RuntimeWarning)
with np.errstate(all="warn"):
k = KernelPCA(n_components=2, remove_zero_eig=False, eigen_solver="dense")
# Fit, then transform
A = k.fit(X_fit).transform(X_fit)
# Do both at once
B = k.fit_transform(X_fit)
# Compare
assert_array_almost_equal(np.abs(A), np.abs(B))
|
Non-regression test for issue #12141 (PR #12143)
This test checks that fit().transform() returns the same result as
fit_transform() in case of non-removed zero eigenvalue.
|
test_leave_zero_eig
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/tests/test_kernel_pca.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/tests/test_kernel_pca.py
|
BSD-3-Clause
|
def test_kernel_pca_precomputed(global_random_seed):
"""Test that kPCA works with a precomputed kernel, for all solvers"""
rng = np.random.RandomState(global_random_seed)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
for eigen_solver in ("dense", "arpack", "randomized"):
X_kpca = (
KernelPCA(4, eigen_solver=eigen_solver, random_state=0)
.fit(X_fit)
.transform(X_pred)
)
X_kpca2 = (
KernelPCA(
4, eigen_solver=eigen_solver, kernel="precomputed", random_state=0
)
.fit(np.dot(X_fit, X_fit.T))
.transform(np.dot(X_pred, X_fit.T))
)
X_kpca_train = KernelPCA(
4, eigen_solver=eigen_solver, kernel="precomputed", random_state=0
).fit_transform(np.dot(X_fit, X_fit.T))
X_kpca_train2 = (
KernelPCA(
4, eigen_solver=eigen_solver, kernel="precomputed", random_state=0
)
.fit(np.dot(X_fit, X_fit.T))
.transform(np.dot(X_fit, X_fit.T))
)
assert_array_almost_equal(np.abs(X_kpca), np.abs(X_kpca2))
assert_array_almost_equal(np.abs(X_kpca_train), np.abs(X_kpca_train2))
|
Test that kPCA works with a precomputed kernel, for all solvers
|
test_kernel_pca_precomputed
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/tests/test_kernel_pca.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/tests/test_kernel_pca.py
|
BSD-3-Clause
|
def test_kernel_pca_precomputed_non_symmetric(solver):
"""Check that the kernel centerer works.
Tests that a non symmetric precomputed kernel is actually accepted
because the kernel centerer does its job correctly.
"""
# a non symmetric gram matrix
K = [[1, 2], [3, 40]]
kpca = KernelPCA(
kernel="precomputed", eigen_solver=solver, n_components=1, random_state=0
)
kpca.fit(K) # no error
# same test with centered kernel
Kc = [[9, -9], [-9, 9]]
kpca_c = KernelPCA(
kernel="precomputed", eigen_solver=solver, n_components=1, random_state=0
)
kpca_c.fit(Kc)
# comparison between the non-centered and centered versions
assert_array_equal(kpca.eigenvectors_, kpca_c.eigenvectors_)
assert_array_equal(kpca.eigenvalues_, kpca_c.eigenvalues_)
|
Check that the kernel centerer works.
Tests that a non symmetric precomputed kernel is actually accepted
because the kernel centerer does its job correctly.
|
test_kernel_pca_precomputed_non_symmetric
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/tests/test_kernel_pca.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/tests/test_kernel_pca.py
|
BSD-3-Clause
|
def test_gridsearch_pipeline():
"""Check that kPCA works as expected in a grid search pipeline
Test if we can do a grid-search to find parameters to separate
circles with a perceptron model.
"""
X, y = make_circles(n_samples=400, factor=0.3, noise=0.05, random_state=0)
kpca = KernelPCA(kernel="rbf", n_components=2)
pipeline = Pipeline([("kernel_pca", kpca), ("Perceptron", Perceptron(max_iter=5))])
param_grid = dict(kernel_pca__gamma=2.0 ** np.arange(-2, 2))
grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid)
grid_search.fit(X, y)
assert grid_search.best_score_ == 1
|
Check that kPCA works as expected in a grid search pipeline
Test if we can do a grid-search to find parameters to separate
circles with a perceptron model.
|
test_gridsearch_pipeline
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/tests/test_kernel_pca.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/tests/test_kernel_pca.py
|
BSD-3-Clause
|
def test_gridsearch_pipeline_precomputed():
"""Check that kPCA works as expected in a grid search pipeline (2)
Test if we can do a grid-search to find parameters to separate
circles with a perceptron model. This test uses a precomputed kernel.
"""
X, y = make_circles(n_samples=400, factor=0.3, noise=0.05, random_state=0)
kpca = KernelPCA(kernel="precomputed", n_components=2)
pipeline = Pipeline([("kernel_pca", kpca), ("Perceptron", Perceptron(max_iter=5))])
param_grid = dict(Perceptron__max_iter=np.arange(1, 5))
grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid)
X_kernel = rbf_kernel(X, gamma=2.0)
grid_search.fit(X_kernel, y)
assert grid_search.best_score_ == 1
|
Check that kPCA works as expected in a grid search pipeline (2)
Test if we can do a grid-search to find parameters to separate
circles with a perceptron model. This test uses a precomputed kernel.
|
test_gridsearch_pipeline_precomputed
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/tests/test_kernel_pca.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/tests/test_kernel_pca.py
|
BSD-3-Clause
|
def test_nested_circles():
"""Check that kPCA projects in a space where nested circles are separable
Tests that 2D nested circles become separable with a perceptron when
projected in the first 2 kPCA using an RBF kernel, while raw samples
are not directly separable in the original space.
"""
X, y = make_circles(n_samples=400, factor=0.3, noise=0.05, random_state=0)
# 2D nested circles are not linearly separable
train_score = Perceptron(max_iter=5).fit(X, y).score(X, y)
assert train_score < 0.8
# Project the circles data into the first 2 components of a RBF Kernel
# PCA model.
# Note that the gamma value is data dependent. If this test breaks
# and the gamma value has to be updated, the Kernel PCA example will
# have to be updated too.
kpca = KernelPCA(
kernel="rbf", n_components=2, fit_inverse_transform=True, gamma=2.0
)
X_kpca = kpca.fit_transform(X)
# The data is perfectly linearly separable in that space
train_score = Perceptron(max_iter=5).fit(X_kpca, y).score(X_kpca, y)
assert train_score == 1.0
|
Check that kPCA projects in a space where nested circles are separable
Tests that 2D nested circles become separable with a perceptron when
projected in the first 2 kPCA using an RBF kernel, while raw samples
are not directly separable in the original space.
|
test_nested_circles
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/tests/test_kernel_pca.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/tests/test_kernel_pca.py
|
BSD-3-Clause
|
def test_kernel_conditioning():
"""Check that ``_check_psd_eigenvalues`` is correctly called in kPCA
Non-regression test for issue #12140 (PR #12145).
"""
# create a pathological X leading to small non-zero eigenvalue
X = [[5, 1], [5 + 1e-8, 1e-8], [5 + 1e-8, 0]]
kpca = KernelPCA(kernel="linear", n_components=2, fit_inverse_transform=True)
kpca.fit(X)
# check that the small non-zero eigenvalue was correctly set to zero
assert kpca.eigenvalues_.min() == 0
assert np.all(kpca.eigenvalues_ == _check_psd_eigenvalues(kpca.eigenvalues_))
|
Check that ``_check_psd_eigenvalues`` is correctly called in kPCA
Non-regression test for issue #12140 (PR #12145).
|
test_kernel_conditioning
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/tests/test_kernel_pca.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/tests/test_kernel_pca.py
|
BSD-3-Clause
|
def test_precomputed_kernel_not_psd(solver):
"""Check how KernelPCA works with non-PSD kernels depending on n_components
Tests for all methods what happens with a non PSD gram matrix (this
can happen in an isomap scenario, or with custom kernel functions, or
maybe with ill-posed datasets).
When ``n_component`` is large enough to capture a negative eigenvalue, an
error should be raised. Otherwise, KernelPCA should run without error
since the negative eigenvalues are not selected.
"""
# a non PSD kernel with large eigenvalues, already centered
# it was captured from an isomap call and multiplied by 100 for compacity
K = [
[4.48, -1.0, 8.07, 2.33, 2.33, 2.33, -5.76, -12.78],
[-1.0, -6.48, 4.5, -1.24, -1.24, -1.24, -0.81, 7.49],
[8.07, 4.5, 15.48, 2.09, 2.09, 2.09, -11.1, -23.23],
[2.33, -1.24, 2.09, 4.0, -3.65, -3.65, 1.02, -0.9],
[2.33, -1.24, 2.09, -3.65, 4.0, -3.65, 1.02, -0.9],
[2.33, -1.24, 2.09, -3.65, -3.65, 4.0, 1.02, -0.9],
[-5.76, -0.81, -11.1, 1.02, 1.02, 1.02, 4.86, 9.75],
[-12.78, 7.49, -23.23, -0.9, -0.9, -0.9, 9.75, 21.46],
]
# this gram matrix has 5 positive eigenvalues and 3 negative ones
# [ 52.72, 7.65, 7.65, 5.02, 0. , -0. , -6.13, -15.11]
# 1. ask for enough components to get a significant negative one
kpca = KernelPCA(kernel="precomputed", eigen_solver=solver, n_components=7)
# make sure that the appropriate error is raised
with pytest.raises(ValueError, match="There are significant negative eigenvalues"):
kpca.fit(K)
# 2. ask for a small enough n_components to get only positive ones
kpca = KernelPCA(kernel="precomputed", eigen_solver=solver, n_components=2)
if solver == "randomized":
# the randomized method is still inconsistent with the others on this
# since it selects the eigenvalues based on the largest 2 modules, not
# on the largest 2 values.
#
# At least we can ensure that we return an error instead of returning
# the wrong eigenvalues
with pytest.raises(
ValueError, match="There are significant negative eigenvalues"
):
kpca.fit(K)
else:
# general case: make sure that it works
kpca.fit(K)
|
Check how KernelPCA works with non-PSD kernels depending on n_components
Tests for all methods what happens with a non PSD gram matrix (this
can happen in an isomap scenario, or with custom kernel functions, or
maybe with ill-posed datasets).
When ``n_component`` is large enough to capture a negative eigenvalue, an
error should be raised. Otherwise, KernelPCA should run without error
since the negative eigenvalues are not selected.
|
test_precomputed_kernel_not_psd
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/tests/test_kernel_pca.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/tests/test_kernel_pca.py
|
BSD-3-Clause
|
def test_kernel_pca_solvers_equivalence(n_components):
"""Check that 'dense' 'arpack' & 'randomized' solvers give similar results"""
# Generate random data
n_train, n_test = 1_000, 100
X, _ = make_circles(
n_samples=(n_train + n_test), factor=0.3, noise=0.05, random_state=0
)
X_fit, X_pred = X[:n_train, :], X[n_train:, :]
# reference (full)
ref_pred = (
KernelPCA(n_components, eigen_solver="dense", random_state=0)
.fit(X_fit)
.transform(X_pred)
)
# arpack
a_pred = (
KernelPCA(n_components, eigen_solver="arpack", random_state=0)
.fit(X_fit)
.transform(X_pred)
)
# check that the result is still correct despite the approx
assert_array_almost_equal(np.abs(a_pred), np.abs(ref_pred))
# randomized
r_pred = (
KernelPCA(n_components, eigen_solver="randomized", random_state=0)
.fit(X_fit)
.transform(X_pred)
)
# check that the result is still correct despite the approximation
assert_array_almost_equal(np.abs(r_pred), np.abs(ref_pred))
|
Check that 'dense' 'arpack' & 'randomized' solvers give similar results
|
test_kernel_pca_solvers_equivalence
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/tests/test_kernel_pca.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/tests/test_kernel_pca.py
|
BSD-3-Clause
|
def test_kernel_pca_inverse_transform_reconstruction():
"""Test if the reconstruction is a good approximation.
Note that in general it is not possible to get an arbitrarily good
reconstruction because of kernel centering that does not
preserve all the information of the original data.
"""
X, *_ = make_blobs(n_samples=100, n_features=4, random_state=0)
kpca = KernelPCA(
n_components=20, kernel="rbf", fit_inverse_transform=True, alpha=1e-3
)
X_trans = kpca.fit_transform(X)
X_reconst = kpca.inverse_transform(X_trans)
assert np.linalg.norm(X - X_reconst) / np.linalg.norm(X) < 1e-1
|
Test if the reconstruction is a good approximation.
Note that in general it is not possible to get an arbitrarily good
reconstruction because of kernel centering that does not
preserve all the information of the original data.
|
test_kernel_pca_inverse_transform_reconstruction
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/tests/test_kernel_pca.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/tests/test_kernel_pca.py
|
BSD-3-Clause
|
def test_32_64_decomposition_shape():
"""Test that the decomposition is similar for 32 and 64 bits data
Non regression test for
https://github.com/scikit-learn/scikit-learn/issues/18146
"""
X, y = make_blobs(
n_samples=30, centers=[[0, 0, 0], [1, 1, 1]], random_state=0, cluster_std=0.1
)
X = StandardScaler().fit_transform(X)
X -= X.min()
# Compare the shapes (corresponds to the number of non-zero eigenvalues)
kpca = KernelPCA()
assert kpca.fit_transform(X).shape == kpca.fit_transform(X.astype(np.float32)).shape
|
Test that the decomposition is similar for 32 and 64 bits data
Non regression test for
https://github.com/scikit-learn/scikit-learn/issues/18146
|
test_32_64_decomposition_shape
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/tests/test_kernel_pca.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/tests/test_kernel_pca.py
|
BSD-3-Clause
|
def test_kernel_pca_feature_names_out():
"""Check feature names out for KernelPCA."""
X, *_ = make_blobs(n_samples=100, n_features=4, random_state=0)
kpca = KernelPCA(n_components=2).fit(X)
names = kpca.get_feature_names_out()
assert_array_equal([f"kernelpca{i}" for i in range(2)], names)
|
Check feature names out for KernelPCA.
|
test_kernel_pca_feature_names_out
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/tests/test_kernel_pca.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/tests/test_kernel_pca.py
|
BSD-3-Clause
|
def test_kernel_pca_inverse_correct_gamma(global_random_seed):
"""Check that gamma is set correctly when not provided.
Non-regression test for #26280
"""
rng = np.random.RandomState(global_random_seed)
X = rng.random_sample((5, 4))
kwargs = {
"n_components": 2,
"random_state": rng,
"fit_inverse_transform": True,
"kernel": "rbf",
}
expected_gamma = 1 / X.shape[1]
kpca1 = KernelPCA(gamma=None, **kwargs).fit(X)
kpca2 = KernelPCA(gamma=expected_gamma, **kwargs).fit(X)
assert kpca1.gamma_ == expected_gamma
assert kpca2.gamma_ == expected_gamma
X1_recon = kpca1.inverse_transform(kpca1.transform(X))
X2_recon = kpca2.inverse_transform(kpca1.transform(X))
assert_allclose(X1_recon, X2_recon)
|
Check that gamma is set correctly when not provided.
Non-regression test for #26280
|
test_kernel_pca_inverse_correct_gamma
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/tests/test_kernel_pca.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/tests/test_kernel_pca.py
|
BSD-3-Clause
|
def test_kernel_pca_pandas_output():
"""Check that KernelPCA works with pandas output when the solver is arpack.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/27579
"""
pytest.importorskip("pandas")
X, _ = load_iris(as_frame=True, return_X_y=True)
with sklearn.config_context(transform_output="pandas"):
KernelPCA(n_components=2, eigen_solver="arpack").fit_transform(X)
|
Check that KernelPCA works with pandas output when the solver is arpack.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/27579
|
test_kernel_pca_pandas_output
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/tests/test_kernel_pca.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/tests/test_kernel_pca.py
|
BSD-3-Clause
|
def _beta_divergence_dense(X, W, H, beta):
"""Compute the beta-divergence of X and W.H for dense array only.
Used as a reference for testing nmf._beta_divergence.
"""
WH = np.dot(W, H)
if beta == 2:
return squared_norm(X - WH) / 2
WH_Xnonzero = WH[X != 0]
X_nonzero = X[X != 0]
np.maximum(WH_Xnonzero, 1e-9, out=WH_Xnonzero)
if beta == 1:
res = np.sum(X_nonzero * np.log(X_nonzero / WH_Xnonzero))
res += WH.sum() - X.sum()
elif beta == 0:
div = X_nonzero / WH_Xnonzero
res = np.sum(div) - X.size - np.sum(np.log(div))
else:
res = (X_nonzero**beta).sum()
res += (beta - 1) * (WH**beta).sum()
res -= beta * (X_nonzero * (WH_Xnonzero ** (beta - 1))).sum()
res /= beta * (beta - 1)
return res
|
Compute the beta-divergence of X and W.H for dense array only.
Used as a reference for testing nmf._beta_divergence.
|
_beta_divergence_dense
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/tests/test_nmf.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/tests/test_nmf.py
|
BSD-3-Clause
|
def test_minibatch_nmf_negative_beta_loss(beta_loss):
"""Check that an error is raised if beta_loss < 0 and X contains zeros."""
rng = np.random.RandomState(0)
X = rng.normal(size=(6, 5))
X[X < 0] = 0
nmf = MiniBatchNMF(beta_loss=beta_loss, random_state=0)
msg = "When beta_loss <= 0 and X contains zeros, the solver may diverge."
with pytest.raises(ValueError, match=msg):
nmf.fit(X)
|
Check that an error is raised if beta_loss < 0 and X contains zeros.
|
test_minibatch_nmf_negative_beta_loss
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/tests/test_nmf.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/tests/test_nmf.py
|
BSD-3-Clause
|
def test_feature_names_out():
"""Check feature names out for NMF."""
random_state = np.random.RandomState(0)
X = np.abs(random_state.randn(10, 4))
nmf = NMF(n_components=3).fit(X)
names = nmf.get_feature_names_out()
assert_array_equal([f"nmf{i}" for i in range(3)], names)
|
Check feature names out for NMF.
|
test_feature_names_out
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/tests/test_nmf.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/tests/test_nmf.py
|
BSD-3-Clause
|
def test_lda_empty_docs(csr_container):
"""Test LDA on empty document (all-zero rows)."""
Z = np.zeros((5, 4))
for X in [Z, csr_container(Z)]:
lda = LatentDirichletAllocation(max_iter=750).fit(X)
assert_almost_equal(
lda.components_.sum(axis=0), np.ones(lda.components_.shape[1])
)
|
Test LDA on empty document (all-zero rows).
|
test_lda_empty_docs
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/tests/test_online_lda.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/tests/test_online_lda.py
|
BSD-3-Clause
|
def test_dirichlet_expectation():
"""Test Cython version of Dirichlet expectation calculation."""
x = np.logspace(-100, 10, 10000)
expectation = np.empty_like(x)
_dirichlet_expectation_1d(x, 0, expectation)
assert_allclose(expectation, np.exp(psi(x) - psi(np.sum(x))), atol=1e-19)
x = x.reshape(100, 100)
assert_allclose(
_dirichlet_expectation_2d(x),
psi(x) - psi(np.sum(x, axis=1)[:, np.newaxis]),
rtol=1e-11,
atol=3e-9,
)
|
Test Cython version of Dirichlet expectation calculation.
|
test_dirichlet_expectation
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/tests/test_online_lda.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/tests/test_online_lda.py
|
BSD-3-Clause
|
def test_lda_feature_names_out(csr_container):
"""Check feature names out for LatentDirichletAllocation."""
n_components, X = _build_sparse_array(csr_container)
lda = LatentDirichletAllocation(n_components=n_components).fit(X)
names = lda.get_feature_names_out()
assert_array_equal(
[f"latentdirichletallocation{i}" for i in range(n_components)], names
)
|
Check feature names out for LatentDirichletAllocation.
|
test_lda_feature_names_out
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/tests/test_online_lda.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/tests/test_online_lda.py
|
BSD-3-Clause
|
def test_lda_dtype_match(learning_method, global_dtype):
"""Check data type preservation of fitted attributes."""
rng = np.random.RandomState(0)
X = rng.uniform(size=(20, 10)).astype(global_dtype, copy=False)
lda = LatentDirichletAllocation(
n_components=5, random_state=0, learning_method=learning_method
)
lda.fit(X)
assert lda.components_.dtype == global_dtype
assert lda.exp_dirichlet_component_.dtype == global_dtype
|
Check data type preservation of fitted attributes.
|
test_lda_dtype_match
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/tests/test_online_lda.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/tests/test_online_lda.py
|
BSD-3-Clause
|
def test_lda_numerical_consistency(learning_method, global_random_seed):
"""Check numerical consistency between np.float32 and np.float64."""
rng = np.random.RandomState(global_random_seed)
X64 = rng.uniform(size=(20, 10))
X32 = X64.astype(np.float32)
lda_64 = LatentDirichletAllocation(
n_components=5, random_state=global_random_seed, learning_method=learning_method
).fit(X64)
lda_32 = LatentDirichletAllocation(
n_components=5, random_state=global_random_seed, learning_method=learning_method
).fit(X32)
assert_allclose(lda_32.components_, lda_64.components_)
assert_allclose(lda_32.transform(X32), lda_64.transform(X64))
|
Check numerical consistency between np.float32 and np.float64.
|
test_lda_numerical_consistency
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/tests/test_online_lda.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/tests/test_online_lda.py
|
BSD-3-Clause
|
def test_pca_sparse(
global_random_seed, svd_solver, sparse_container, n_components, density, scale
):
"""Check that the results are the same for sparse and dense input."""
# Set atol in addition of the default rtol to account for the very wide range of
# result values (1e-8 to 1e0).
atol = 1e-12
transform_atol = 1e-10
random_state = np.random.default_rng(global_random_seed)
X = sparse_container(
sp.sparse.random(
SPARSE_M,
SPARSE_N,
random_state=random_state,
density=density,
)
)
# Scale the data + vary the column means
scale_vector = random_state.random(X.shape[1]) * scale
X = X.multiply(scale_vector)
pca = PCA(
n_components=n_components,
svd_solver=svd_solver,
random_state=global_random_seed,
)
pca.fit(X)
Xd = X.toarray()
pcad = PCA(
n_components=n_components,
svd_solver=svd_solver,
random_state=global_random_seed,
)
pcad.fit(Xd)
# Fitted attributes equality
_check_fitted_pca_close(pca, pcad, atol=atol)
# Test transform
X2 = sparse_container(
sp.sparse.random(
SPARSE_M,
SPARSE_N,
random_state=random_state,
density=density,
)
)
X2d = X2.toarray()
assert_allclose(pca.transform(X2), pca.transform(X2d), atol=transform_atol)
assert_allclose(pca.transform(X2), pcad.transform(X2d), atol=transform_atol)
|
Check that the results are the same for sparse and dense input.
|
test_pca_sparse
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/tests/test_pca.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/tests/test_pca.py
|
BSD-3-Clause
|
def test_sparse_pca_auto_arpack_singluar_values_consistency(
global_random_seed, sparse_container
):
"""Check that "auto" and "arpack" solvers are equivalent for sparse inputs."""
random_state = np.random.RandomState(global_random_seed)
X = sparse_container(
sp.sparse.random(
SPARSE_M,
SPARSE_N,
random_state=random_state,
)
)
pca_arpack = PCA(n_components=10, svd_solver="arpack").fit(X)
pca_auto = PCA(n_components=10, svd_solver="auto").fit(X)
assert_allclose(pca_arpack.singular_values_, pca_auto.singular_values_, rtol=5e-3)
|
Check that "auto" and "arpack" solvers are equivalent for sparse inputs.
|
test_sparse_pca_auto_arpack_singluar_values_consistency
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/tests/test_pca.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/tests/test_pca.py
|
BSD-3-Clause
|
def test_pca_randomized_svd_n_oversamples():
"""Check that exposing and setting `n_oversamples` will provide accurate results
even when `X` as a large number of features.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/20589
"""
rng = np.random.RandomState(0)
n_features = 100
X = rng.randn(1_000, n_features)
# The default value of `n_oversamples` will lead to inaccurate results
# We force it to the number of features.
pca_randomized = PCA(
n_components=1,
svd_solver="randomized",
n_oversamples=n_features,
random_state=0,
).fit(X)
pca_full = PCA(n_components=1, svd_solver="full").fit(X)
pca_arpack = PCA(n_components=1, svd_solver="arpack", random_state=0).fit(X)
assert_allclose(np.abs(pca_full.components_), np.abs(pca_arpack.components_))
assert_allclose(np.abs(pca_randomized.components_), np.abs(pca_arpack.components_))
|
Check that exposing and setting `n_oversamples` will provide accurate results
even when `X` as a large number of features.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/20589
|
test_pca_randomized_svd_n_oversamples
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/tests/test_pca.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/tests/test_pca.py
|
BSD-3-Clause
|
def test_feature_names_out():
"""Check feature names out for PCA."""
pca = PCA(n_components=2).fit(iris.data)
names = pca.get_feature_names_out()
assert_array_equal([f"pca{i}" for i in range(2)], names)
|
Check feature names out for PCA.
|
test_feature_names_out
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/tests/test_pca.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/tests/test_pca.py
|
BSD-3-Clause
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.