code
stringlengths
66
870k
docstring
stringlengths
19
26.7k
func_name
stringlengths
1
138
language
stringclasses
1 value
repo
stringlengths
7
68
path
stringlengths
5
324
url
stringlengths
46
389
license
stringclasses
7 values
def test_metadata_routing_for_column_transformer(method): """Test that metadata is routed correctly for column transformer.""" X = np.array([[0, 1, 2], [2, 4, 6]]).T y = [1, 2, 3] registry = _Registry() sample_weight, metadata = [1], "a" trs = ColumnTransformer( [ ( "trans", ConsumingTransformer(registry=registry) .set_fit_request(sample_weight=True, metadata=True) .set_transform_request(sample_weight=True, metadata=True), [0], ) ] ) if method == "transform": trs.fit(X, y, sample_weight=sample_weight, metadata=metadata) trs.transform(X, sample_weight=sample_weight, metadata=metadata) else: getattr(trs, method)(X, y, sample_weight=sample_weight, metadata=metadata) assert len(registry) for _trs in registry: check_recorded_metadata( obj=_trs, method=method, parent=method, sample_weight=sample_weight, metadata=metadata, )
Test that metadata is routed correctly for column transformer.
test_metadata_routing_for_column_transformer
python
scikit-learn/scikit-learn
sklearn/compose/tests/test_column_transformer.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/compose/tests/test_column_transformer.py
BSD-3-Clause
def test_metadata_routing_no_fit_transform(): """Test metadata routing when the sub-estimator doesn't implement ``fit_transform``.""" class NoFitTransform(BaseEstimator): def fit(self, X, y=None, sample_weight=None, metadata=None): assert sample_weight assert metadata return self def transform(self, X, sample_weight=None, metadata=None): assert sample_weight assert metadata return X X = np.array([[0, 1, 2], [2, 4, 6]]).T y = [1, 2, 3] sample_weight, metadata = [1], "a" trs = ColumnTransformer( [ ( "trans", NoFitTransform() .set_fit_request(sample_weight=True, metadata=True) .set_transform_request(sample_weight=True, metadata=True), [0], ) ] ) trs.fit(X, y, sample_weight=sample_weight, metadata=metadata) trs.fit_transform(X, y, sample_weight=sample_weight, metadata=metadata)
Test metadata routing when the sub-estimator doesn't implement ``fit_transform``.
test_metadata_routing_no_fit_transform
python
scikit-learn/scikit-learn
sklearn/compose/tests/test_column_transformer.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/compose/tests/test_column_transformer.py
BSD-3-Clause
def test_metadata_routing_error_for_column_transformer(method): """Test that the right error is raised when metadata is not requested.""" X = np.array([[0, 1, 2], [2, 4, 6]]).T y = [1, 2, 3] sample_weight, metadata = [1], "a" trs = ColumnTransformer([("trans", ConsumingTransformer(), [0])]) error_message = ( "[sample_weight, metadata] are passed but are not explicitly set as requested" f" or not requested for ConsumingTransformer.{method}" ) with pytest.raises(ValueError, match=re.escape(error_message)): if method == "transform": trs.fit(X, y) trs.transform(X, sample_weight=sample_weight, metadata=metadata) else: getattr(trs, method)(X, y, sample_weight=sample_weight, metadata=metadata)
Test that the right error is raised when metadata is not requested.
test_metadata_routing_error_for_column_transformer
python
scikit-learn/scikit-learn
sklearn/compose/tests/test_column_transformer.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/compose/tests/test_column_transformer.py
BSD-3-Clause
def test_transform_target_regressor_not_warns_with_global_output_set(output_format): """Test that TransformedTargetRegressor will not raise warnings if set_config(transform_output="pandas"/"polars") is set globally; regression test for issue #29361.""" X, y = datasets.make_regression() y = np.abs(y) + 1 with config_context(transform_output=output_format): with warnings.catch_warnings(): warnings.simplefilter("error") TransformedTargetRegressor( regressor=LinearRegression(), func=np.log, inverse_func=np.exp ).fit(X, y)
Test that TransformedTargetRegressor will not raise warnings if set_config(transform_output="pandas"/"polars") is set globally; regression test for issue #29361.
test_transform_target_regressor_not_warns_with_global_output_set
python
scikit-learn/scikit-learn
sklearn/compose/tests/test_target.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/compose/tests/test_target.py
BSD-3-Clause
def fit(self, X, y=None): """Fit the EllipticEnvelope model. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data. y : Ignored Not used, present for API consistency by convention. Returns ------- self : object Returns the instance itself. """ super().fit(X) self.offset_ = np.percentile(-self.dist_, 100.0 * self.contamination) return self
Fit the EllipticEnvelope model. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data. y : Ignored Not used, present for API consistency by convention. Returns ------- self : object Returns the instance itself.
fit
python
scikit-learn/scikit-learn
sklearn/covariance/_elliptic_envelope.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/covariance/_elliptic_envelope.py
BSD-3-Clause
def decision_function(self, X): """Compute the decision function of the given observations. Parameters ---------- X : array-like of shape (n_samples, n_features) The data matrix. Returns ------- decision : ndarray of shape (n_samples,) Decision function of the samples. It is equal to the shifted Mahalanobis distances. The threshold for being an outlier is 0, which ensures a compatibility with other outlier detection algorithms. """ check_is_fitted(self) negative_mahal_dist = self.score_samples(X) return negative_mahal_dist - self.offset_
Compute the decision function of the given observations. Parameters ---------- X : array-like of shape (n_samples, n_features) The data matrix. Returns ------- decision : ndarray of shape (n_samples,) Decision function of the samples. It is equal to the shifted Mahalanobis distances. The threshold for being an outlier is 0, which ensures a compatibility with other outlier detection algorithms.
decision_function
python
scikit-learn/scikit-learn
sklearn/covariance/_elliptic_envelope.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/covariance/_elliptic_envelope.py
BSD-3-Clause
def predict(self, X): """ Predict labels (1 inlier, -1 outlier) of X according to fitted model. Parameters ---------- X : array-like of shape (n_samples, n_features) The data matrix. Returns ------- is_inlier : ndarray of shape (n_samples,) Returns -1 for anomalies/outliers and +1 for inliers. """ values = self.decision_function(X) is_inlier = np.full(values.shape[0], -1, dtype=int) is_inlier[values >= 0] = 1 return is_inlier
Predict labels (1 inlier, -1 outlier) of X according to fitted model. Parameters ---------- X : array-like of shape (n_samples, n_features) The data matrix. Returns ------- is_inlier : ndarray of shape (n_samples,) Returns -1 for anomalies/outliers and +1 for inliers.
predict
python
scikit-learn/scikit-learn
sklearn/covariance/_elliptic_envelope.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/covariance/_elliptic_envelope.py
BSD-3-Clause
def log_likelihood(emp_cov, precision): """Compute the sample mean of the log_likelihood under a covariance model. Computes the empirical expected log-likelihood, allowing for universal comparison (beyond this software package), and accounts for normalization terms and scaling. Parameters ---------- emp_cov : ndarray of shape (n_features, n_features) Maximum Likelihood Estimator of covariance. precision : ndarray of shape (n_features, n_features) The precision matrix of the covariance model to be tested. Returns ------- log_likelihood_ : float Sample mean of the log-likelihood. """ p = precision.shape[0] log_likelihood_ = -np.sum(emp_cov * precision) + fast_logdet(precision) log_likelihood_ -= p * np.log(2 * np.pi) log_likelihood_ /= 2.0 return log_likelihood_
Compute the sample mean of the log_likelihood under a covariance model. Computes the empirical expected log-likelihood, allowing for universal comparison (beyond this software package), and accounts for normalization terms and scaling. Parameters ---------- emp_cov : ndarray of shape (n_features, n_features) Maximum Likelihood Estimator of covariance. precision : ndarray of shape (n_features, n_features) The precision matrix of the covariance model to be tested. Returns ------- log_likelihood_ : float Sample mean of the log-likelihood.
log_likelihood
python
scikit-learn/scikit-learn
sklearn/covariance/_empirical_covariance.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/covariance/_empirical_covariance.py
BSD-3-Clause
def empirical_covariance(X, *, assume_centered=False): """Compute the Maximum likelihood covariance estimator. Parameters ---------- X : ndarray of shape (n_samples, n_features) Data from which to compute the covariance estimate. assume_centered : bool, default=False If `True`, data will not be centered before computation. Useful when working with data whose mean is almost, but not exactly zero. If `False`, data will be centered before computation. Returns ------- covariance : ndarray of shape (n_features, n_features) Empirical covariance (Maximum Likelihood Estimator). Examples -------- >>> from sklearn.covariance import empirical_covariance >>> X = [[1,1,1],[1,1,1],[1,1,1], ... [0,0,0],[0,0,0],[0,0,0]] >>> empirical_covariance(X) array([[0.25, 0.25, 0.25], [0.25, 0.25, 0.25], [0.25, 0.25, 0.25]]) """ X = check_array(X, ensure_2d=False, ensure_all_finite=False) if X.ndim == 1: X = np.reshape(X, (1, -1)) if X.shape[0] == 1: warnings.warn( "Only one sample available. You may want to reshape your data array" ) if assume_centered: covariance = np.dot(X.T, X) / X.shape[0] else: covariance = np.cov(X.T, bias=1) if covariance.ndim == 0: covariance = np.array([[covariance]]) return covariance
Compute the Maximum likelihood covariance estimator. Parameters ---------- X : ndarray of shape (n_samples, n_features) Data from which to compute the covariance estimate. assume_centered : bool, default=False If `True`, data will not be centered before computation. Useful when working with data whose mean is almost, but not exactly zero. If `False`, data will be centered before computation. Returns ------- covariance : ndarray of shape (n_features, n_features) Empirical covariance (Maximum Likelihood Estimator). Examples -------- >>> from sklearn.covariance import empirical_covariance >>> X = [[1,1,1],[1,1,1],[1,1,1], ... [0,0,0],[0,0,0],[0,0,0]] >>> empirical_covariance(X) array([[0.25, 0.25, 0.25], [0.25, 0.25, 0.25], [0.25, 0.25, 0.25]])
empirical_covariance
python
scikit-learn/scikit-learn
sklearn/covariance/_empirical_covariance.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/covariance/_empirical_covariance.py
BSD-3-Clause
def _set_covariance(self, covariance): """Saves the covariance and precision estimates Storage is done accordingly to `self.store_precision`. Precision stored only if invertible. Parameters ---------- covariance : array-like of shape (n_features, n_features) Estimated covariance matrix to be stored, and from which precision is computed. """ covariance = check_array(covariance) # set covariance self.covariance_ = covariance # set precision if self.store_precision: self.precision_ = linalg.pinvh(covariance, check_finite=False) else: self.precision_ = None
Saves the covariance and precision estimates Storage is done accordingly to `self.store_precision`. Precision stored only if invertible. Parameters ---------- covariance : array-like of shape (n_features, n_features) Estimated covariance matrix to be stored, and from which precision is computed.
_set_covariance
python
scikit-learn/scikit-learn
sklearn/covariance/_empirical_covariance.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/covariance/_empirical_covariance.py
BSD-3-Clause
def get_precision(self): """Getter for the precision matrix. Returns ------- precision_ : array-like of shape (n_features, n_features) The precision matrix associated to the current covariance object. """ if self.store_precision: precision = self.precision_ else: precision = linalg.pinvh(self.covariance_, check_finite=False) return precision
Getter for the precision matrix. Returns ------- precision_ : array-like of shape (n_features, n_features) The precision matrix associated to the current covariance object.
get_precision
python
scikit-learn/scikit-learn
sklearn/covariance/_empirical_covariance.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/covariance/_empirical_covariance.py
BSD-3-Clause
def fit(self, X, y=None): """Fit the maximum likelihood covariance estimator to X. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data, where `n_samples` is the number of samples and `n_features` is the number of features. y : Ignored Not used, present for API consistency by convention. Returns ------- self : object Returns the instance itself. """ X = validate_data(self, X) if self.assume_centered: self.location_ = np.zeros(X.shape[1]) else: self.location_ = X.mean(0) covariance = empirical_covariance(X, assume_centered=self.assume_centered) self._set_covariance(covariance) return self
Fit the maximum likelihood covariance estimator to X. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data, where `n_samples` is the number of samples and `n_features` is the number of features. y : Ignored Not used, present for API consistency by convention. Returns ------- self : object Returns the instance itself.
fit
python
scikit-learn/scikit-learn
sklearn/covariance/_empirical_covariance.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/covariance/_empirical_covariance.py
BSD-3-Clause
def score(self, X_test, y=None): """Compute the log-likelihood of `X_test` under the estimated Gaussian model. The Gaussian model is defined by its mean and covariance matrix which are represented respectively by `self.location_` and `self.covariance_`. Parameters ---------- X_test : array-like of shape (n_samples, n_features) Test data of which we compute the likelihood, where `n_samples` is the number of samples and `n_features` is the number of features. `X_test` is assumed to be drawn from the same distribution than the data used in fit (including centering). y : Ignored Not used, present for API consistency by convention. Returns ------- res : float The log-likelihood of `X_test` with `self.location_` and `self.covariance_` as estimators of the Gaussian model mean and covariance matrix respectively. """ X_test = validate_data(self, X_test, reset=False) # compute empirical covariance of the test set test_cov = empirical_covariance(X_test - self.location_, assume_centered=True) # compute log likelihood res = log_likelihood(test_cov, self.get_precision()) return res
Compute the log-likelihood of `X_test` under the estimated Gaussian model. The Gaussian model is defined by its mean and covariance matrix which are represented respectively by `self.location_` and `self.covariance_`. Parameters ---------- X_test : array-like of shape (n_samples, n_features) Test data of which we compute the likelihood, where `n_samples` is the number of samples and `n_features` is the number of features. `X_test` is assumed to be drawn from the same distribution than the data used in fit (including centering). y : Ignored Not used, present for API consistency by convention. Returns ------- res : float The log-likelihood of `X_test` with `self.location_` and `self.covariance_` as estimators of the Gaussian model mean and covariance matrix respectively.
score
python
scikit-learn/scikit-learn
sklearn/covariance/_empirical_covariance.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/covariance/_empirical_covariance.py
BSD-3-Clause
def error_norm(self, comp_cov, norm="frobenius", scaling=True, squared=True): """Compute the Mean Squared Error between two covariance estimators. Parameters ---------- comp_cov : array-like of shape (n_features, n_features) The covariance to compare with. norm : {"frobenius", "spectral"}, default="frobenius" The type of norm used to compute the error. Available error types: - 'frobenius' (default): sqrt(tr(A^t.A)) - 'spectral': sqrt(max(eigenvalues(A^t.A)) where A is the error ``(comp_cov - self.covariance_)``. scaling : bool, default=True If True (default), the squared error norm is divided by n_features. If False, the squared error norm is not rescaled. squared : bool, default=True Whether to compute the squared error norm or the error norm. If True (default), the squared error norm is returned. If False, the error norm is returned. Returns ------- result : float The Mean Squared Error (in the sense of the Frobenius norm) between `self` and `comp_cov` covariance estimators. """ # compute the error error = comp_cov - self.covariance_ # compute the error norm if norm == "frobenius": squared_norm = np.sum(error**2) elif norm == "spectral": squared_norm = np.amax(linalg.svdvals(np.dot(error.T, error))) else: raise NotImplementedError( "Only spectral and frobenius norms are implemented" ) # optionally scale the error norm if scaling: squared_norm = squared_norm / error.shape[0] # finally get either the squared norm or the norm if squared: result = squared_norm else: result = np.sqrt(squared_norm) return result
Compute the Mean Squared Error between two covariance estimators. Parameters ---------- comp_cov : array-like of shape (n_features, n_features) The covariance to compare with. norm : {"frobenius", "spectral"}, default="frobenius" The type of norm used to compute the error. Available error types: - 'frobenius' (default): sqrt(tr(A^t.A)) - 'spectral': sqrt(max(eigenvalues(A^t.A)) where A is the error ``(comp_cov - self.covariance_)``. scaling : bool, default=True If True (default), the squared error norm is divided by n_features. If False, the squared error norm is not rescaled. squared : bool, default=True Whether to compute the squared error norm or the error norm. If True (default), the squared error norm is returned. If False, the error norm is returned. Returns ------- result : float The Mean Squared Error (in the sense of the Frobenius norm) between `self` and `comp_cov` covariance estimators.
error_norm
python
scikit-learn/scikit-learn
sklearn/covariance/_empirical_covariance.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/covariance/_empirical_covariance.py
BSD-3-Clause
def mahalanobis(self, X): """Compute the squared Mahalanobis distances of given observations. Parameters ---------- X : array-like of shape (n_samples, n_features) The observations, the Mahalanobis distances of the which we compute. Observations are assumed to be drawn from the same distribution than the data used in fit. Returns ------- dist : ndarray of shape (n_samples,) Squared Mahalanobis distances of the observations. """ X = validate_data(self, X, reset=False) precision = self.get_precision() with config_context(assume_finite=True): # compute mahalanobis distances dist = pairwise_distances( X, self.location_[np.newaxis, :], metric="mahalanobis", VI=precision ) return np.reshape(dist, (len(X),)) ** 2
Compute the squared Mahalanobis distances of given observations. Parameters ---------- X : array-like of shape (n_samples, n_features) The observations, the Mahalanobis distances of the which we compute. Observations are assumed to be drawn from the same distribution than the data used in fit. Returns ------- dist : ndarray of shape (n_samples,) Squared Mahalanobis distances of the observations.
mahalanobis
python
scikit-learn/scikit-learn
sklearn/covariance/_empirical_covariance.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/covariance/_empirical_covariance.py
BSD-3-Clause
def _objective(mle, precision_, alpha): """Evaluation of the graphical-lasso objective function the objective function is made of a shifted scaled version of the normalized log-likelihood (i.e. its empirical mean over the samples) and a penalisation term to promote sparsity """ p = precision_.shape[0] cost = -2.0 * log_likelihood(mle, precision_) + p * np.log(2 * np.pi) cost += alpha * (np.abs(precision_).sum() - np.abs(np.diag(precision_)).sum()) return cost
Evaluation of the graphical-lasso objective function the objective function is made of a shifted scaled version of the normalized log-likelihood (i.e. its empirical mean over the samples) and a penalisation term to promote sparsity
_objective
python
scikit-learn/scikit-learn
sklearn/covariance/_graph_lasso.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/covariance/_graph_lasso.py
BSD-3-Clause
def _dual_gap(emp_cov, precision_, alpha): """Expression of the dual gap convergence criterion The specific definition is given in Duchi "Projected Subgradient Methods for Learning Sparse Gaussians". """ gap = np.sum(emp_cov * precision_) gap -= precision_.shape[0] gap += alpha * (np.abs(precision_).sum() - np.abs(np.diag(precision_)).sum()) return gap
Expression of the dual gap convergence criterion The specific definition is given in Duchi "Projected Subgradient Methods for Learning Sparse Gaussians".
_dual_gap
python
scikit-learn/scikit-learn
sklearn/covariance/_graph_lasso.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/covariance/_graph_lasso.py
BSD-3-Clause
def alpha_max(emp_cov): """Find the maximum alpha for which there are some non-zeros off-diagonal. Parameters ---------- emp_cov : ndarray of shape (n_features, n_features) The sample covariance matrix. Notes ----- This results from the bound for the all the Lasso that are solved in GraphicalLasso: each time, the row of cov corresponds to Xy. As the bound for alpha is given by `max(abs(Xy))`, the result follows. """ A = np.copy(emp_cov) A.flat[:: A.shape[0] + 1] = 0 return np.max(np.abs(A))
Find the maximum alpha for which there are some non-zeros off-diagonal. Parameters ---------- emp_cov : ndarray of shape (n_features, n_features) The sample covariance matrix. Notes ----- This results from the bound for the all the Lasso that are solved in GraphicalLasso: each time, the row of cov corresponds to Xy. As the bound for alpha is given by `max(abs(Xy))`, the result follows.
alpha_max
python
scikit-learn/scikit-learn
sklearn/covariance/_graph_lasso.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/covariance/_graph_lasso.py
BSD-3-Clause
def graphical_lasso( emp_cov, alpha, *, mode="cd", tol=1e-4, enet_tol=1e-4, max_iter=100, verbose=False, return_costs=False, eps=np.finfo(np.float64).eps, return_n_iter=False, ): """L1-penalized covariance estimator. Read more in the :ref:`User Guide <sparse_inverse_covariance>`. .. versionchanged:: v0.20 graph_lasso has been renamed to graphical_lasso Parameters ---------- emp_cov : array-like of shape (n_features, n_features) Empirical covariance from which to compute the covariance estimate. alpha : float The regularization parameter: the higher alpha, the more regularization, the sparser the inverse covariance. Range is (0, inf]. mode : {'cd', 'lars'}, default='cd' The Lasso solver to use: coordinate descent or LARS. Use LARS for very sparse underlying graphs, where p > n. Elsewhere prefer cd which is more numerically stable. tol : float, default=1e-4 The tolerance to declare convergence: if the dual gap goes below this value, iterations are stopped. Range is (0, inf]. enet_tol : float, default=1e-4 The tolerance for the elastic net solver used to calculate the descent direction. This parameter controls the accuracy of the search direction for a given column update, not of the overall parameter estimate. Only used for mode='cd'. Range is (0, inf]. max_iter : int, default=100 The maximum number of iterations. verbose : bool, default=False If verbose is True, the objective function and dual gap are printed at each iteration. return_costs : bool, default=False If return_costs is True, the objective function and dual gap at each iteration are returned. eps : float, default=eps The machine-precision regularization in the computation of the Cholesky diagonal factors. Increase this for very ill-conditioned systems. Default is `np.finfo(np.float64).eps`. return_n_iter : bool, default=False Whether or not to return the number of iterations. Returns ------- covariance : ndarray of shape (n_features, n_features) The estimated covariance matrix. precision : ndarray of shape (n_features, n_features) The estimated (sparse) precision matrix. costs : list of (objective, dual_gap) pairs The list of values of the objective function and the dual gap at each iteration. Returned only if return_costs is True. n_iter : int Number of iterations. Returned only if `return_n_iter` is set to True. See Also -------- GraphicalLasso : Sparse inverse covariance estimation with an l1-penalized estimator. GraphicalLassoCV : Sparse inverse covariance with cross-validated choice of the l1 penalty. Notes ----- The algorithm employed to solve this problem is the GLasso algorithm, from the Friedman 2008 Biostatistics paper. It is the same algorithm as in the R `glasso` package. One possible difference with the `glasso` R package is that the diagonal coefficients are not penalized. Examples -------- >>> import numpy as np >>> from sklearn.datasets import make_sparse_spd_matrix >>> from sklearn.covariance import empirical_covariance, graphical_lasso >>> true_cov = make_sparse_spd_matrix(n_dim=3,random_state=42) >>> rng = np.random.RandomState(42) >>> X = rng.multivariate_normal(mean=np.zeros(3), cov=true_cov, size=3) >>> emp_cov = empirical_covariance(X, assume_centered=True) >>> emp_cov, _ = graphical_lasso(emp_cov, alpha=0.05) >>> emp_cov array([[ 1.687, 0.212, -0.209], [ 0.212, 0.221, -0.0817], [-0.209, -0.0817, 0.232]]) """ model = GraphicalLasso( alpha=alpha, mode=mode, covariance="precomputed", tol=tol, enet_tol=enet_tol, max_iter=max_iter, verbose=verbose, eps=eps, assume_centered=True, ).fit(emp_cov) output = [model.covariance_, model.precision_] if return_costs: output.append(model.costs_) if return_n_iter: output.append(model.n_iter_) return tuple(output)
L1-penalized covariance estimator. Read more in the :ref:`User Guide <sparse_inverse_covariance>`. .. versionchanged:: v0.20 graph_lasso has been renamed to graphical_lasso Parameters ---------- emp_cov : array-like of shape (n_features, n_features) Empirical covariance from which to compute the covariance estimate. alpha : float The regularization parameter: the higher alpha, the more regularization, the sparser the inverse covariance. Range is (0, inf]. mode : {'cd', 'lars'}, default='cd' The Lasso solver to use: coordinate descent or LARS. Use LARS for very sparse underlying graphs, where p > n. Elsewhere prefer cd which is more numerically stable. tol : float, default=1e-4 The tolerance to declare convergence: if the dual gap goes below this value, iterations are stopped. Range is (0, inf]. enet_tol : float, default=1e-4 The tolerance for the elastic net solver used to calculate the descent direction. This parameter controls the accuracy of the search direction for a given column update, not of the overall parameter estimate. Only used for mode='cd'. Range is (0, inf]. max_iter : int, default=100 The maximum number of iterations. verbose : bool, default=False If verbose is True, the objective function and dual gap are printed at each iteration. return_costs : bool, default=False If return_costs is True, the objective function and dual gap at each iteration are returned. eps : float, default=eps The machine-precision regularization in the computation of the Cholesky diagonal factors. Increase this for very ill-conditioned systems. Default is `np.finfo(np.float64).eps`. return_n_iter : bool, default=False Whether or not to return the number of iterations. Returns ------- covariance : ndarray of shape (n_features, n_features) The estimated covariance matrix. precision : ndarray of shape (n_features, n_features) The estimated (sparse) precision matrix. costs : list of (objective, dual_gap) pairs The list of values of the objective function and the dual gap at each iteration. Returned only if return_costs is True. n_iter : int Number of iterations. Returned only if `return_n_iter` is set to True. See Also -------- GraphicalLasso : Sparse inverse covariance estimation with an l1-penalized estimator. GraphicalLassoCV : Sparse inverse covariance with cross-validated choice of the l1 penalty. Notes ----- The algorithm employed to solve this problem is the GLasso algorithm, from the Friedman 2008 Biostatistics paper. It is the same algorithm as in the R `glasso` package. One possible difference with the `glasso` R package is that the diagonal coefficients are not penalized. Examples -------- >>> import numpy as np >>> from sklearn.datasets import make_sparse_spd_matrix >>> from sklearn.covariance import empirical_covariance, graphical_lasso >>> true_cov = make_sparse_spd_matrix(n_dim=3,random_state=42) >>> rng = np.random.RandomState(42) >>> X = rng.multivariate_normal(mean=np.zeros(3), cov=true_cov, size=3) >>> emp_cov = empirical_covariance(X, assume_centered=True) >>> emp_cov, _ = graphical_lasso(emp_cov, alpha=0.05) >>> emp_cov array([[ 1.687, 0.212, -0.209], [ 0.212, 0.221, -0.0817], [-0.209, -0.0817, 0.232]])
graphical_lasso
python
scikit-learn/scikit-learn
sklearn/covariance/_graph_lasso.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/covariance/_graph_lasso.py
BSD-3-Clause
def fit(self, X, y=None): """Fit the GraphicalLasso model to X. Parameters ---------- X : array-like of shape (n_samples, n_features) Data from which to compute the covariance estimate. y : Ignored Not used, present for API consistency by convention. Returns ------- self : object Returns the instance itself. """ # Covariance does not make sense for a single feature X = validate_data(self, X, ensure_min_features=2, ensure_min_samples=2) if self.covariance == "precomputed": emp_cov = X.copy() self.location_ = np.zeros(X.shape[1]) else: emp_cov = empirical_covariance(X, assume_centered=self.assume_centered) if self.assume_centered: self.location_ = np.zeros(X.shape[1]) else: self.location_ = X.mean(0) self.covariance_, self.precision_, self.costs_, self.n_iter_ = _graphical_lasso( emp_cov, alpha=self.alpha, cov_init=None, mode=self.mode, tol=self.tol, enet_tol=self.enet_tol, max_iter=self.max_iter, verbose=self.verbose, eps=self.eps, ) return self
Fit the GraphicalLasso model to X. Parameters ---------- X : array-like of shape (n_samples, n_features) Data from which to compute the covariance estimate. y : Ignored Not used, present for API consistency by convention. Returns ------- self : object Returns the instance itself.
fit
python
scikit-learn/scikit-learn
sklearn/covariance/_graph_lasso.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/covariance/_graph_lasso.py
BSD-3-Clause
def graphical_lasso_path( X, alphas, cov_init=None, X_test=None, mode="cd", tol=1e-4, enet_tol=1e-4, max_iter=100, verbose=False, eps=np.finfo(np.float64).eps, ): """l1-penalized covariance estimator along a path of decreasing alphas Read more in the :ref:`User Guide <sparse_inverse_covariance>`. Parameters ---------- X : ndarray of shape (n_samples, n_features) Data from which to compute the covariance estimate. alphas : array-like of shape (n_alphas,) The list of regularization parameters, decreasing order. cov_init : array of shape (n_features, n_features), default=None The initial guess for the covariance. X_test : array of shape (n_test_samples, n_features), default=None Optional test matrix to measure generalisation error. mode : {'cd', 'lars'}, default='cd' The Lasso solver to use: coordinate descent or LARS. Use LARS for very sparse underlying graphs, where p > n. Elsewhere prefer cd which is more numerically stable. tol : float, default=1e-4 The tolerance to declare convergence: if the dual gap goes below this value, iterations are stopped. The tolerance must be a positive number. enet_tol : float, default=1e-4 The tolerance for the elastic net solver used to calculate the descent direction. This parameter controls the accuracy of the search direction for a given column update, not of the overall parameter estimate. Only used for mode='cd'. The tolerance must be a positive number. max_iter : int, default=100 The maximum number of iterations. This parameter should be a strictly positive integer. verbose : int or bool, default=False The higher the verbosity flag, the more information is printed during the fitting. eps : float, default=eps The machine-precision regularization in the computation of the Cholesky diagonal factors. Increase this for very ill-conditioned systems. Default is `np.finfo(np.float64).eps`. .. versionadded:: 1.3 Returns ------- covariances_ : list of shape (n_alphas,) of ndarray of shape \ (n_features, n_features) The estimated covariance matrices. precisions_ : list of shape (n_alphas,) of ndarray of shape \ (n_features, n_features) The estimated (sparse) precision matrices. scores_ : list of shape (n_alphas,), dtype=float The generalisation error (log-likelihood) on the test data. Returned only if test data is passed. """ inner_verbose = max(0, verbose - 1) emp_cov = empirical_covariance(X) if cov_init is None: covariance_ = emp_cov.copy() else: covariance_ = cov_init covariances_ = list() precisions_ = list() scores_ = list() if X_test is not None: test_emp_cov = empirical_covariance(X_test) for alpha in alphas: try: # Capture the errors, and move on covariance_, precision_, _, _ = _graphical_lasso( emp_cov, alpha=alpha, cov_init=covariance_, mode=mode, tol=tol, enet_tol=enet_tol, max_iter=max_iter, verbose=inner_verbose, eps=eps, ) covariances_.append(covariance_) precisions_.append(precision_) if X_test is not None: this_score = log_likelihood(test_emp_cov, precision_) except FloatingPointError: this_score = -np.inf covariances_.append(np.nan) precisions_.append(np.nan) if X_test is not None: if not np.isfinite(this_score): this_score = -np.inf scores_.append(this_score) if verbose == 1: sys.stderr.write(".") elif verbose > 1: if X_test is not None: print( "[graphical_lasso_path] alpha: %.2e, score: %.2e" % (alpha, this_score) ) else: print("[graphical_lasso_path] alpha: %.2e" % alpha) if X_test is not None: return covariances_, precisions_, scores_ return covariances_, precisions_
l1-penalized covariance estimator along a path of decreasing alphas Read more in the :ref:`User Guide <sparse_inverse_covariance>`. Parameters ---------- X : ndarray of shape (n_samples, n_features) Data from which to compute the covariance estimate. alphas : array-like of shape (n_alphas,) The list of regularization parameters, decreasing order. cov_init : array of shape (n_features, n_features), default=None The initial guess for the covariance. X_test : array of shape (n_test_samples, n_features), default=None Optional test matrix to measure generalisation error. mode : {'cd', 'lars'}, default='cd' The Lasso solver to use: coordinate descent or LARS. Use LARS for very sparse underlying graphs, where p > n. Elsewhere prefer cd which is more numerically stable. tol : float, default=1e-4 The tolerance to declare convergence: if the dual gap goes below this value, iterations are stopped. The tolerance must be a positive number. enet_tol : float, default=1e-4 The tolerance for the elastic net solver used to calculate the descent direction. This parameter controls the accuracy of the search direction for a given column update, not of the overall parameter estimate. Only used for mode='cd'. The tolerance must be a positive number. max_iter : int, default=100 The maximum number of iterations. This parameter should be a strictly positive integer. verbose : int or bool, default=False The higher the verbosity flag, the more information is printed during the fitting. eps : float, default=eps The machine-precision regularization in the computation of the Cholesky diagonal factors. Increase this for very ill-conditioned systems. Default is `np.finfo(np.float64).eps`. .. versionadded:: 1.3 Returns ------- covariances_ : list of shape (n_alphas,) of ndarray of shape (n_features, n_features) The estimated covariance matrices. precisions_ : list of shape (n_alphas,) of ndarray of shape (n_features, n_features) The estimated (sparse) precision matrices. scores_ : list of shape (n_alphas,), dtype=float The generalisation error (log-likelihood) on the test data. Returned only if test data is passed.
graphical_lasso_path
python
scikit-learn/scikit-learn
sklearn/covariance/_graph_lasso.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/covariance/_graph_lasso.py
BSD-3-Clause
def fit(self, X, y=None, **params): """Fit the GraphicalLasso covariance model to X. Parameters ---------- X : array-like of shape (n_samples, n_features) Data from which to compute the covariance estimate. y : Ignored Not used, present for API consistency by convention. **params : dict, default=None Parameters to be passed to the CV splitter and the cross_val_score function. .. versionadded:: 1.5 Only available if `enable_metadata_routing=True`, which can be set by using ``sklearn.set_config(enable_metadata_routing=True)``. See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- self : object Returns the instance itself. """ # Covariance does not make sense for a single feature _raise_for_params(params, self, "fit") X = validate_data(self, X, ensure_min_features=2) if self.assume_centered: self.location_ = np.zeros(X.shape[1]) else: self.location_ = X.mean(0) emp_cov = empirical_covariance(X, assume_centered=self.assume_centered) cv = check_cv(self.cv, y, classifier=False) # List of (alpha, scores, covs) path = list() n_alphas = self.alphas inner_verbose = max(0, self.verbose - 1) if _is_arraylike_not_scalar(n_alphas): for alpha in self.alphas: check_scalar( alpha, "alpha", Real, min_val=0, max_val=np.inf, include_boundaries="right", ) alphas = self.alphas n_refinements = 1 else: n_refinements = self.n_refinements alpha_1 = alpha_max(emp_cov) alpha_0 = 1e-2 * alpha_1 alphas = np.logspace(np.log10(alpha_0), np.log10(alpha_1), n_alphas)[::-1] if _routing_enabled(): routed_params = process_routing(self, "fit", **params) else: routed_params = Bunch(splitter=Bunch(split={})) t0 = time.time() for i in range(n_refinements): with warnings.catch_warnings(): # No need to see the convergence warnings on this grid: # they will always be points that will not converge # during the cross-validation warnings.simplefilter("ignore", ConvergenceWarning) # Compute the cross-validated loss on the current grid # NOTE: Warm-restarting graphical_lasso_path has been tried, # and this did not allow to gain anything # (same execution time with or without). this_path = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)( delayed(graphical_lasso_path)( X[train], alphas=alphas, X_test=X[test], mode=self.mode, tol=self.tol, enet_tol=self.enet_tol, max_iter=int(0.1 * self.max_iter), verbose=inner_verbose, eps=self.eps, ) for train, test in cv.split(X, y, **routed_params.splitter.split) ) # Little danse to transform the list in what we need covs, _, scores = zip(*this_path) covs = zip(*covs) scores = zip(*scores) path.extend(zip(alphas, scores, covs)) path = sorted(path, key=operator.itemgetter(0), reverse=True) # Find the maximum (avoid using built in 'max' function to # have a fully-reproducible selection of the smallest alpha # in case of equality) best_score = -np.inf last_finite_idx = 0 for index, (alpha, scores, _) in enumerate(path): this_score = np.mean(scores) if this_score >= 0.1 / np.finfo(np.float64).eps: this_score = np.nan if np.isfinite(this_score): last_finite_idx = index if this_score >= best_score: best_score = this_score best_index = index # Refine the grid if best_index == 0: # We do not need to go back: we have chosen # the highest value of alpha for which there are # non-zero coefficients alpha_1 = path[0][0] alpha_0 = path[1][0] elif best_index == last_finite_idx and not best_index == len(path) - 1: # We have non-converged models on the upper bound of the # grid, we need to refine the grid there alpha_1 = path[best_index][0] alpha_0 = path[best_index + 1][0] elif best_index == len(path) - 1: alpha_1 = path[best_index][0] alpha_0 = 0.01 * path[best_index][0] else: alpha_1 = path[best_index - 1][0] alpha_0 = path[best_index + 1][0] if not _is_arraylike_not_scalar(n_alphas): alphas = np.logspace(np.log10(alpha_1), np.log10(alpha_0), n_alphas + 2) alphas = alphas[1:-1] if self.verbose and n_refinements > 1: print( "[GraphicalLassoCV] Done refinement % 2i out of %i: % 3is" % (i + 1, n_refinements, time.time() - t0) ) path = list(zip(*path)) grid_scores = list(path[1]) alphas = list(path[0]) # Finally, compute the score with alpha = 0 alphas.append(0) grid_scores.append( cross_val_score( EmpiricalCovariance(), X, cv=cv, n_jobs=self.n_jobs, verbose=inner_verbose, params=params, ) ) grid_scores = np.array(grid_scores) self.cv_results_ = {"alphas": np.array(alphas)} for i in range(grid_scores.shape[1]): self.cv_results_[f"split{i}_test_score"] = grid_scores[:, i] self.cv_results_["mean_test_score"] = np.mean(grid_scores, axis=1) self.cv_results_["std_test_score"] = np.std(grid_scores, axis=1) best_alpha = alphas[best_index] self.alpha_ = best_alpha # Finally fit the model with the selected alpha self.covariance_, self.precision_, self.costs_, self.n_iter_ = _graphical_lasso( emp_cov, alpha=best_alpha, mode=self.mode, tol=self.tol, enet_tol=self.enet_tol, max_iter=self.max_iter, verbose=inner_verbose, eps=self.eps, ) return self
Fit the GraphicalLasso covariance model to X. Parameters ---------- X : array-like of shape (n_samples, n_features) Data from which to compute the covariance estimate. y : Ignored Not used, present for API consistency by convention. **params : dict, default=None Parameters to be passed to the CV splitter and the cross_val_score function. .. versionadded:: 1.5 Only available if `enable_metadata_routing=True`, which can be set by using ``sklearn.set_config(enable_metadata_routing=True)``. See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- self : object Returns the instance itself.
fit
python
scikit-learn/scikit-learn
sklearn/covariance/_graph_lasso.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/covariance/_graph_lasso.py
BSD-3-Clause
def get_metadata_routing(self): """Get metadata routing of this object. Please check :ref:`User Guide <metadata_routing>` on how the routing mechanism works. .. versionadded:: 1.5 Returns ------- routing : MetadataRouter A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating routing information. """ router = MetadataRouter(owner=self.__class__.__name__).add( splitter=check_cv(self.cv), method_mapping=MethodMapping().add(callee="split", caller="fit"), ) return router
Get metadata routing of this object. Please check :ref:`User Guide <metadata_routing>` on how the routing mechanism works. .. versionadded:: 1.5 Returns ------- routing : MetadataRouter A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating routing information.
get_metadata_routing
python
scikit-learn/scikit-learn
sklearn/covariance/_graph_lasso.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/covariance/_graph_lasso.py
BSD-3-Clause
def c_step( X, n_support, remaining_iterations=30, initial_estimates=None, verbose=False, cov_computation_method=empirical_covariance, random_state=None, ): """C_step procedure described in [Rouseeuw1984]_ aiming at computing MCD. Parameters ---------- X : array-like of shape (n_samples, n_features) Data set in which we look for the n_support observations whose scatter matrix has minimum determinant. n_support : int Number of observations to compute the robust estimates of location and covariance from. This parameter must be greater than `n_samples / 2`. remaining_iterations : int, default=30 Number of iterations to perform. According to [Rouseeuw1999]_, two iterations are sufficient to get close to the minimum, and we never need more than 30 to reach convergence. initial_estimates : tuple of shape (2,), default=None Initial estimates of location and shape from which to run the c_step procedure: - initial_estimates[0]: an initial location estimate - initial_estimates[1]: an initial covariance estimate verbose : bool, default=False Verbose mode. cov_computation_method : callable, \ default=:func:`sklearn.covariance.empirical_covariance` The function which will be used to compute the covariance. Must return array of shape (n_features, n_features). random_state : int, RandomState instance or None, default=None Determines the pseudo random number generator for shuffling the data. Pass an int for reproducible results across multiple function calls. See :term:`Glossary <random_state>`. Returns ------- location : ndarray of shape (n_features,) Robust location estimates. covariance : ndarray of shape (n_features, n_features) Robust covariance estimates. support : ndarray of shape (n_samples,) A mask for the `n_support` observations whose scatter matrix has minimum determinant. References ---------- .. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant Estimator, 1999, American Statistical Association and the American Society for Quality, TECHNOMETRICS """ X = np.asarray(X) random_state = check_random_state(random_state) return _c_step( X, n_support, remaining_iterations=remaining_iterations, initial_estimates=initial_estimates, verbose=verbose, cov_computation_method=cov_computation_method, random_state=random_state, )
C_step procedure described in [Rouseeuw1984]_ aiming at computing MCD. Parameters ---------- X : array-like of shape (n_samples, n_features) Data set in which we look for the n_support observations whose scatter matrix has minimum determinant. n_support : int Number of observations to compute the robust estimates of location and covariance from. This parameter must be greater than `n_samples / 2`. remaining_iterations : int, default=30 Number of iterations to perform. According to [Rouseeuw1999]_, two iterations are sufficient to get close to the minimum, and we never need more than 30 to reach convergence. initial_estimates : tuple of shape (2,), default=None Initial estimates of location and shape from which to run the c_step procedure: - initial_estimates[0]: an initial location estimate - initial_estimates[1]: an initial covariance estimate verbose : bool, default=False Verbose mode. cov_computation_method : callable, default=:func:`sklearn.covariance.empirical_covariance` The function which will be used to compute the covariance. Must return array of shape (n_features, n_features). random_state : int, RandomState instance or None, default=None Determines the pseudo random number generator for shuffling the data. Pass an int for reproducible results across multiple function calls. See :term:`Glossary <random_state>`. Returns ------- location : ndarray of shape (n_features,) Robust location estimates. covariance : ndarray of shape (n_features, n_features) Robust covariance estimates. support : ndarray of shape (n_samples,) A mask for the `n_support` observations whose scatter matrix has minimum determinant. References ---------- .. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant Estimator, 1999, American Statistical Association and the American Society for Quality, TECHNOMETRICS
c_step
python
scikit-learn/scikit-learn
sklearn/covariance/_robust_covariance.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/covariance/_robust_covariance.py
BSD-3-Clause
def select_candidates( X, n_support, n_trials, select=1, n_iter=30, verbose=False, cov_computation_method=empirical_covariance, random_state=None, ): """Finds the best pure subset of observations to compute MCD from it. The purpose of this function is to find the best sets of n_support observations with respect to a minimization of their covariance matrix determinant. Equivalently, it removes n_samples-n_support observations to construct what we call a pure data set (i.e. not containing outliers). The list of the observations of the pure data set is referred to as the `support`. Starting from a random support, the pure data set is found by the c_step procedure introduced by Rousseeuw and Van Driessen in [RV]_. Parameters ---------- X : array-like of shape (n_samples, n_features) Data (sub)set in which we look for the n_support purest observations. n_support : int The number of samples the pure data set must contain. This parameter must be in the range `[(n + p + 1)/2] < n_support < n`. n_trials : int or tuple of shape (2,) Number of different initial sets of observations from which to run the algorithm. This parameter should be a strictly positive integer. Instead of giving a number of trials to perform, one can provide a list of initial estimates that will be used to iteratively run c_step procedures. In this case: - n_trials[0]: array-like, shape (n_trials, n_features) is the list of `n_trials` initial location estimates - n_trials[1]: array-like, shape (n_trials, n_features, n_features) is the list of `n_trials` initial covariances estimates select : int, default=1 Number of best candidates results to return. This parameter must be a strictly positive integer. n_iter : int, default=30 Maximum number of iterations for the c_step procedure. (2 is enough to be close to the final solution. "Never" exceeds 20). This parameter must be a strictly positive integer. verbose : bool, default=False Control the output verbosity. cov_computation_method : callable, \ default=:func:`sklearn.covariance.empirical_covariance` The function which will be used to compute the covariance. Must return an array of shape (n_features, n_features). random_state : int, RandomState instance or None, default=None Determines the pseudo random number generator for shuffling the data. Pass an int for reproducible results across multiple function calls. See :term:`Glossary <random_state>`. See Also --------- c_step Returns ------- best_locations : ndarray of shape (select, n_features) The `select` location estimates computed from the `select` best supports found in the data set (`X`). best_covariances : ndarray of shape (select, n_features, n_features) The `select` covariance estimates computed from the `select` best supports found in the data set (`X`). best_supports : ndarray of shape (select, n_samples) The `select` best supports found in the data set (`X`). References ---------- .. [RV] A Fast Algorithm for the Minimum Covariance Determinant Estimator, 1999, American Statistical Association and the American Society for Quality, TECHNOMETRICS """ random_state = check_random_state(random_state) if isinstance(n_trials, Integral): run_from_estimates = False elif isinstance(n_trials, tuple): run_from_estimates = True estimates_list = n_trials n_trials = estimates_list[0].shape[0] else: raise TypeError( "Invalid 'n_trials' parameter, expected tuple or integer, got %s (%s)" % (n_trials, type(n_trials)) ) # compute `n_trials` location and shape estimates candidates in the subset all_estimates = [] if not run_from_estimates: # perform `n_trials` computations from random initial supports for j in range(n_trials): all_estimates.append( _c_step( X, n_support, remaining_iterations=n_iter, verbose=verbose, cov_computation_method=cov_computation_method, random_state=random_state, ) ) else: # perform computations from every given initial estimates for j in range(n_trials): initial_estimates = (estimates_list[0][j], estimates_list[1][j]) all_estimates.append( _c_step( X, n_support, remaining_iterations=n_iter, initial_estimates=initial_estimates, verbose=verbose, cov_computation_method=cov_computation_method, random_state=random_state, ) ) all_locs_sub, all_covs_sub, all_dets_sub, all_supports_sub, all_ds_sub = zip( *all_estimates ) # find the `n_best` best results among the `n_trials` ones index_best = np.argsort(all_dets_sub)[:select] best_locations = np.asarray(all_locs_sub)[index_best] best_covariances = np.asarray(all_covs_sub)[index_best] best_supports = np.asarray(all_supports_sub)[index_best] best_ds = np.asarray(all_ds_sub)[index_best] return best_locations, best_covariances, best_supports, best_ds
Finds the best pure subset of observations to compute MCD from it. The purpose of this function is to find the best sets of n_support observations with respect to a minimization of their covariance matrix determinant. Equivalently, it removes n_samples-n_support observations to construct what we call a pure data set (i.e. not containing outliers). The list of the observations of the pure data set is referred to as the `support`. Starting from a random support, the pure data set is found by the c_step procedure introduced by Rousseeuw and Van Driessen in [RV]_. Parameters ---------- X : array-like of shape (n_samples, n_features) Data (sub)set in which we look for the n_support purest observations. n_support : int The number of samples the pure data set must contain. This parameter must be in the range `[(n + p + 1)/2] < n_support < n`. n_trials : int or tuple of shape (2,) Number of different initial sets of observations from which to run the algorithm. This parameter should be a strictly positive integer. Instead of giving a number of trials to perform, one can provide a list of initial estimates that will be used to iteratively run c_step procedures. In this case: - n_trials[0]: array-like, shape (n_trials, n_features) is the list of `n_trials` initial location estimates - n_trials[1]: array-like, shape (n_trials, n_features, n_features) is the list of `n_trials` initial covariances estimates select : int, default=1 Number of best candidates results to return. This parameter must be a strictly positive integer. n_iter : int, default=30 Maximum number of iterations for the c_step procedure. (2 is enough to be close to the final solution. "Never" exceeds 20). This parameter must be a strictly positive integer. verbose : bool, default=False Control the output verbosity. cov_computation_method : callable, default=:func:`sklearn.covariance.empirical_covariance` The function which will be used to compute the covariance. Must return an array of shape (n_features, n_features). random_state : int, RandomState instance or None, default=None Determines the pseudo random number generator for shuffling the data. Pass an int for reproducible results across multiple function calls. See :term:`Glossary <random_state>`. See Also --------- c_step Returns ------- best_locations : ndarray of shape (select, n_features) The `select` location estimates computed from the `select` best supports found in the data set (`X`). best_covariances : ndarray of shape (select, n_features, n_features) The `select` covariance estimates computed from the `select` best supports found in the data set (`X`). best_supports : ndarray of shape (select, n_samples) The `select` best supports found in the data set (`X`). References ---------- .. [RV] A Fast Algorithm for the Minimum Covariance Determinant Estimator, 1999, American Statistical Association and the American Society for Quality, TECHNOMETRICS
select_candidates
python
scikit-learn/scikit-learn
sklearn/covariance/_robust_covariance.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/covariance/_robust_covariance.py
BSD-3-Clause
def fast_mcd( X, support_fraction=None, cov_computation_method=empirical_covariance, random_state=None, ): """Estimate the Minimum Covariance Determinant matrix. Read more in the :ref:`User Guide <robust_covariance>`. Parameters ---------- X : array-like of shape (n_samples, n_features) The data matrix, with p features and n samples. support_fraction : float, default=None The proportion of points to be included in the support of the raw MCD estimate. Default is `None`, which implies that the minimum value of `support_fraction` will be used within the algorithm: `(n_samples + n_features + 1) / 2 * n_samples`. This parameter must be in the range (0, 1). cov_computation_method : callable, \ default=:func:`sklearn.covariance.empirical_covariance` The function which will be used to compute the covariance. Must return an array of shape (n_features, n_features). random_state : int, RandomState instance or None, default=None Determines the pseudo random number generator for shuffling the data. Pass an int for reproducible results across multiple function calls. See :term:`Glossary <random_state>`. Returns ------- location : ndarray of shape (n_features,) Robust location of the data. covariance : ndarray of shape (n_features, n_features) Robust covariance of the features. support : ndarray of shape (n_samples,), dtype=bool A mask of the observations that have been used to compute the robust location and covariance estimates of the data set. Notes ----- The FastMCD algorithm has been introduced by Rousseuw and Van Driessen in "A Fast Algorithm for the Minimum Covariance Determinant Estimator, 1999, American Statistical Association and the American Society for Quality, TECHNOMETRICS". The principle is to compute robust estimates and random subsets before pooling them into a larger subsets, and finally into the full data set. Depending on the size of the initial sample, we have one, two or three such computation levels. Note that only raw estimates are returned. If one is interested in the correction and reweighting steps described in [RouseeuwVan]_, see the MinCovDet object. References ---------- .. [RouseeuwVan] A Fast Algorithm for the Minimum Covariance Determinant Estimator, 1999, American Statistical Association and the American Society for Quality, TECHNOMETRICS .. [Butler1993] R. W. Butler, P. L. Davies and M. Jhun, Asymptotics For The Minimum Covariance Determinant Estimator, The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400 """ random_state = check_random_state(random_state) X = check_array(X, ensure_min_samples=2, estimator="fast_mcd") n_samples, n_features = X.shape # minimum breakdown value if support_fraction is None: n_support = min(int(np.ceil(0.5 * (n_samples + n_features + 1))), n_samples) else: n_support = int(support_fraction * n_samples) # 1-dimensional case quick computation # (Rousseeuw, P. J. and Leroy, A. M. (2005) References, in Robust # Regression and Outlier Detection, John Wiley & Sons, chapter 4) if n_features == 1: if n_support < n_samples: # find the sample shortest halves X_sorted = np.sort(np.ravel(X)) diff = X_sorted[n_support:] - X_sorted[: (n_samples - n_support)] halves_start = np.where(diff == np.min(diff))[0] # take the middle points' mean to get the robust location estimate location = ( 0.5 * (X_sorted[n_support + halves_start] + X_sorted[halves_start]).mean() ) support = np.zeros(n_samples, dtype=bool) X_centered = X - location support[np.argsort(np.abs(X_centered), 0)[:n_support]] = True covariance = np.asarray([[np.var(X[support])]]) location = np.array([location]) # get precision matrix in an optimized way precision = linalg.pinvh(covariance) dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1) else: support = np.ones(n_samples, dtype=bool) covariance = np.asarray([[np.var(X)]]) location = np.asarray([np.mean(X)]) X_centered = X - location # get precision matrix in an optimized way precision = linalg.pinvh(covariance) dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1) # Starting FastMCD algorithm for p-dimensional case if (n_samples > 500) and (n_features > 1): # 1. Find candidate supports on subsets # a. split the set in subsets of size ~ 300 n_subsets = n_samples // 300 n_samples_subsets = n_samples // n_subsets samples_shuffle = random_state.permutation(n_samples) h_subset = int(np.ceil(n_samples_subsets * (n_support / float(n_samples)))) # b. perform a total of 500 trials n_trials_tot = 500 # c. select 10 best (location, covariance) for each subset n_best_sub = 10 n_trials = max(10, n_trials_tot // n_subsets) n_best_tot = n_subsets * n_best_sub all_best_locations = np.zeros((n_best_tot, n_features)) try: all_best_covariances = np.zeros((n_best_tot, n_features, n_features)) except MemoryError: # The above is too big. Let's try with something much small # (and less optimal) n_best_tot = 10 all_best_covariances = np.zeros((n_best_tot, n_features, n_features)) n_best_sub = 2 for i in range(n_subsets): low_bound = i * n_samples_subsets high_bound = low_bound + n_samples_subsets current_subset = X[samples_shuffle[low_bound:high_bound]] best_locations_sub, best_covariances_sub, _, _ = select_candidates( current_subset, h_subset, n_trials, select=n_best_sub, n_iter=2, cov_computation_method=cov_computation_method, random_state=random_state, ) subset_slice = np.arange(i * n_best_sub, (i + 1) * n_best_sub) all_best_locations[subset_slice] = best_locations_sub all_best_covariances[subset_slice] = best_covariances_sub # 2. Pool the candidate supports into a merged set # (possibly the full dataset) n_samples_merged = min(1500, n_samples) h_merged = int(np.ceil(n_samples_merged * (n_support / float(n_samples)))) if n_samples > 1500: n_best_merged = 10 else: n_best_merged = 1 # find the best couples (location, covariance) on the merged set selection = random_state.permutation(n_samples)[:n_samples_merged] locations_merged, covariances_merged, supports_merged, d = select_candidates( X[selection], h_merged, n_trials=(all_best_locations, all_best_covariances), select=n_best_merged, cov_computation_method=cov_computation_method, random_state=random_state, ) # 3. Finally get the overall best (locations, covariance) couple if n_samples < 1500: # directly get the best couple (location, covariance) location = locations_merged[0] covariance = covariances_merged[0] support = np.zeros(n_samples, dtype=bool) dist = np.zeros(n_samples) support[selection] = supports_merged[0] dist[selection] = d[0] else: # select the best couple on the full dataset locations_full, covariances_full, supports_full, d = select_candidates( X, n_support, n_trials=(locations_merged, covariances_merged), select=1, cov_computation_method=cov_computation_method, random_state=random_state, ) location = locations_full[0] covariance = covariances_full[0] support = supports_full[0] dist = d[0] elif n_features > 1: # 1. Find the 10 best couples (location, covariance) # considering two iterations n_trials = 30 n_best = 10 locations_best, covariances_best, _, _ = select_candidates( X, n_support, n_trials=n_trials, select=n_best, n_iter=2, cov_computation_method=cov_computation_method, random_state=random_state, ) # 2. Select the best couple on the full dataset amongst the 10 locations_full, covariances_full, supports_full, d = select_candidates( X, n_support, n_trials=(locations_best, covariances_best), select=1, cov_computation_method=cov_computation_method, random_state=random_state, ) location = locations_full[0] covariance = covariances_full[0] support = supports_full[0] dist = d[0] return location, covariance, support, dist
Estimate the Minimum Covariance Determinant matrix. Read more in the :ref:`User Guide <robust_covariance>`. Parameters ---------- X : array-like of shape (n_samples, n_features) The data matrix, with p features and n samples. support_fraction : float, default=None The proportion of points to be included in the support of the raw MCD estimate. Default is `None`, which implies that the minimum value of `support_fraction` will be used within the algorithm: `(n_samples + n_features + 1) / 2 * n_samples`. This parameter must be in the range (0, 1). cov_computation_method : callable, default=:func:`sklearn.covariance.empirical_covariance` The function which will be used to compute the covariance. Must return an array of shape (n_features, n_features). random_state : int, RandomState instance or None, default=None Determines the pseudo random number generator for shuffling the data. Pass an int for reproducible results across multiple function calls. See :term:`Glossary <random_state>`. Returns ------- location : ndarray of shape (n_features,) Robust location of the data. covariance : ndarray of shape (n_features, n_features) Robust covariance of the features. support : ndarray of shape (n_samples,), dtype=bool A mask of the observations that have been used to compute the robust location and covariance estimates of the data set. Notes ----- The FastMCD algorithm has been introduced by Rousseuw and Van Driessen in "A Fast Algorithm for the Minimum Covariance Determinant Estimator, 1999, American Statistical Association and the American Society for Quality, TECHNOMETRICS". The principle is to compute robust estimates and random subsets before pooling them into a larger subsets, and finally into the full data set. Depending on the size of the initial sample, we have one, two or three such computation levels. Note that only raw estimates are returned. If one is interested in the correction and reweighting steps described in [RouseeuwVan]_, see the MinCovDet object. References ---------- .. [RouseeuwVan] A Fast Algorithm for the Minimum Covariance Determinant Estimator, 1999, American Statistical Association and the American Society for Quality, TECHNOMETRICS .. [Butler1993] R. W. Butler, P. L. Davies and M. Jhun, Asymptotics For The Minimum Covariance Determinant Estimator, The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400
fast_mcd
python
scikit-learn/scikit-learn
sklearn/covariance/_robust_covariance.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/covariance/_robust_covariance.py
BSD-3-Clause
def fit(self, X, y=None): """Fit a Minimum Covariance Determinant with the FastMCD algorithm. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data, where `n_samples` is the number of samples and `n_features` is the number of features. y : Ignored Not used, present for API consistency by convention. Returns ------- self : object Returns the instance itself. """ X = validate_data(self, X, ensure_min_samples=2, estimator="MinCovDet") random_state = check_random_state(self.random_state) n_samples, n_features = X.shape # check that the empirical covariance is full rank if (linalg.svdvals(np.dot(X.T, X)) > 1e-8).sum() != n_features: warnings.warn( "The covariance matrix associated to your dataset is not full rank" ) # compute and store raw estimates raw_location, raw_covariance, raw_support, raw_dist = fast_mcd( X, support_fraction=self.support_fraction, cov_computation_method=self._nonrobust_covariance, random_state=random_state, ) if self.assume_centered: raw_location = np.zeros(n_features) raw_covariance = self._nonrobust_covariance( X[raw_support], assume_centered=True ) # get precision matrix in an optimized way precision = linalg.pinvh(raw_covariance) raw_dist = np.sum(np.dot(X, precision) * X, 1) self.raw_location_ = raw_location self.raw_covariance_ = raw_covariance self.raw_support_ = raw_support self.location_ = raw_location self.support_ = raw_support self.dist_ = raw_dist # obtain consistency at normal models self.correct_covariance(X) # re-weight estimator self.reweight_covariance(X) return self
Fit a Minimum Covariance Determinant with the FastMCD algorithm. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data, where `n_samples` is the number of samples and `n_features` is the number of features. y : Ignored Not used, present for API consistency by convention. Returns ------- self : object Returns the instance itself.
fit
python
scikit-learn/scikit-learn
sklearn/covariance/_robust_covariance.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/covariance/_robust_covariance.py
BSD-3-Clause
def correct_covariance(self, data): """Apply a correction to raw Minimum Covariance Determinant estimates. Correction using the empirical correction factor suggested by Rousseeuw and Van Driessen in [RVD]_. Parameters ---------- data : array-like of shape (n_samples, n_features) The data matrix, with p features and n samples. The data set must be the one which was used to compute the raw estimates. Returns ------- covariance_corrected : ndarray of shape (n_features, n_features) Corrected robust covariance estimate. References ---------- .. [RVD] A Fast Algorithm for the Minimum Covariance Determinant Estimator, 1999, American Statistical Association and the American Society for Quality, TECHNOMETRICS """ # Check that the covariance of the support data is not equal to 0. # Otherwise self.dist_ = 0 and thus correction = 0. n_samples = len(self.dist_) n_support = np.sum(self.support_) if n_support < n_samples and np.allclose(self.raw_covariance_, 0): raise ValueError( "The covariance matrix of the support data " "is equal to 0, try to increase support_fraction" ) correction = np.median(self.dist_) / chi2(data.shape[1]).isf(0.5) covariance_corrected = self.raw_covariance_ * correction self.dist_ /= correction return covariance_corrected
Apply a correction to raw Minimum Covariance Determinant estimates. Correction using the empirical correction factor suggested by Rousseeuw and Van Driessen in [RVD]_. Parameters ---------- data : array-like of shape (n_samples, n_features) The data matrix, with p features and n samples. The data set must be the one which was used to compute the raw estimates. Returns ------- covariance_corrected : ndarray of shape (n_features, n_features) Corrected robust covariance estimate. References ---------- .. [RVD] A Fast Algorithm for the Minimum Covariance Determinant Estimator, 1999, American Statistical Association and the American Society for Quality, TECHNOMETRICS
correct_covariance
python
scikit-learn/scikit-learn
sklearn/covariance/_robust_covariance.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/covariance/_robust_covariance.py
BSD-3-Clause
def reweight_covariance(self, data): """Re-weight raw Minimum Covariance Determinant estimates. Re-weight observations using Rousseeuw's method (equivalent to deleting outlying observations from the data set before computing location and covariance estimates) described in [RVDriessen]_. Parameters ---------- data : array-like of shape (n_samples, n_features) The data matrix, with p features and n samples. The data set must be the one which was used to compute the raw estimates. Returns ------- location_reweighted : ndarray of shape (n_features,) Re-weighted robust location estimate. covariance_reweighted : ndarray of shape (n_features, n_features) Re-weighted robust covariance estimate. support_reweighted : ndarray of shape (n_samples,), dtype=bool A mask of the observations that have been used to compute the re-weighted robust location and covariance estimates. References ---------- .. [RVDriessen] A Fast Algorithm for the Minimum Covariance Determinant Estimator, 1999, American Statistical Association and the American Society for Quality, TECHNOMETRICS """ n_samples, n_features = data.shape mask = self.dist_ < chi2(n_features).isf(0.025) if self.assume_centered: location_reweighted = np.zeros(n_features) else: location_reweighted = data[mask].mean(0) covariance_reweighted = self._nonrobust_covariance( data[mask], assume_centered=self.assume_centered ) support_reweighted = np.zeros(n_samples, dtype=bool) support_reweighted[mask] = True self._set_covariance(covariance_reweighted) self.location_ = location_reweighted self.support_ = support_reweighted X_centered = data - self.location_ self.dist_ = np.sum(np.dot(X_centered, self.get_precision()) * X_centered, 1) return location_reweighted, covariance_reweighted, support_reweighted
Re-weight raw Minimum Covariance Determinant estimates. Re-weight observations using Rousseeuw's method (equivalent to deleting outlying observations from the data set before computing location and covariance estimates) described in [RVDriessen]_. Parameters ---------- data : array-like of shape (n_samples, n_features) The data matrix, with p features and n samples. The data set must be the one which was used to compute the raw estimates. Returns ------- location_reweighted : ndarray of shape (n_features,) Re-weighted robust location estimate. covariance_reweighted : ndarray of shape (n_features, n_features) Re-weighted robust covariance estimate. support_reweighted : ndarray of shape (n_samples,), dtype=bool A mask of the observations that have been used to compute the re-weighted robust location and covariance estimates. References ---------- .. [RVDriessen] A Fast Algorithm for the Minimum Covariance Determinant Estimator, 1999, American Statistical Association and the American Society for Quality, TECHNOMETRICS
reweight_covariance
python
scikit-learn/scikit-learn
sklearn/covariance/_robust_covariance.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/covariance/_robust_covariance.py
BSD-3-Clause
def _ledoit_wolf(X, *, assume_centered, block_size): """Estimate the shrunk Ledoit-Wolf covariance matrix.""" # for only one feature, the result is the same whatever the shrinkage if len(X.shape) == 2 and X.shape[1] == 1: if not assume_centered: X = X - X.mean() return np.atleast_2d((X**2).mean()), 0.0 n_features = X.shape[1] # get Ledoit-Wolf shrinkage shrinkage = ledoit_wolf_shrinkage( X, assume_centered=assume_centered, block_size=block_size ) emp_cov = empirical_covariance(X, assume_centered=assume_centered) mu = np.sum(np.trace(emp_cov)) / n_features shrunk_cov = (1.0 - shrinkage) * emp_cov shrunk_cov.flat[:: n_features + 1] += shrinkage * mu return shrunk_cov, shrinkage
Estimate the shrunk Ledoit-Wolf covariance matrix.
_ledoit_wolf
python
scikit-learn/scikit-learn
sklearn/covariance/_shrunk_covariance.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/covariance/_shrunk_covariance.py
BSD-3-Clause
def _oas(X, *, assume_centered=False): """Estimate covariance with the Oracle Approximating Shrinkage algorithm. The formulation is based on [1]_. [1] "Shrinkage algorithms for MMSE covariance estimation.", Chen, Y., Wiesel, A., Eldar, Y. C., & Hero, A. O. IEEE Transactions on Signal Processing, 58(10), 5016-5029, 2010. https://arxiv.org/pdf/0907.4698.pdf """ if len(X.shape) == 2 and X.shape[1] == 1: # for only one feature, the result is the same whatever the shrinkage if not assume_centered: X = X - X.mean() return np.atleast_2d((X**2).mean()), 0.0 n_samples, n_features = X.shape emp_cov = empirical_covariance(X, assume_centered=assume_centered) # The shrinkage is defined as: # shrinkage = min( # trace(S @ S.T) + trace(S)**2) / ((n + 1) (trace(S @ S.T) - trace(S)**2 / p), 1 # ) # where n and p are n_samples and n_features, respectively (cf. Eq. 23 in [1]). # The factor 2 / p is omitted since it does not impact the value of the estimator # for large p. # Instead of computing trace(S)**2, we can compute the average of the squared # elements of S that is equal to trace(S)**2 / p**2. # See the definition of the Frobenius norm: # https://en.wikipedia.org/wiki/Matrix_norm#Frobenius_norm alpha = np.mean(emp_cov**2) mu = np.trace(emp_cov) / n_features mu_squared = mu**2 # The factor 1 / p**2 will cancel out since it is in both the numerator and # denominator num = alpha + mu_squared den = (n_samples + 1) * (alpha - mu_squared / n_features) shrinkage = 1.0 if den == 0 else min(num / den, 1.0) # The shrunk covariance is defined as: # (1 - shrinkage) * S + shrinkage * F (cf. Eq. 4 in [1]) # where S is the empirical covariance and F is the shrinkage target defined as # F = trace(S) / n_features * np.identity(n_features) (cf. Eq. 3 in [1]) shrunk_cov = (1.0 - shrinkage) * emp_cov shrunk_cov.flat[:: n_features + 1] += shrinkage * mu return shrunk_cov, shrinkage
Estimate covariance with the Oracle Approximating Shrinkage algorithm. The formulation is based on [1]_. [1] "Shrinkage algorithms for MMSE covariance estimation.", Chen, Y., Wiesel, A., Eldar, Y. C., & Hero, A. O. IEEE Transactions on Signal Processing, 58(10), 5016-5029, 2010. https://arxiv.org/pdf/0907.4698.pdf
_oas
python
scikit-learn/scikit-learn
sklearn/covariance/_shrunk_covariance.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/covariance/_shrunk_covariance.py
BSD-3-Clause
def shrunk_covariance(emp_cov, shrinkage=0.1): """Calculate covariance matrices shrunk on the diagonal. Read more in the :ref:`User Guide <shrunk_covariance>`. Parameters ---------- emp_cov : array-like of shape (..., n_features, n_features) Covariance matrices to be shrunk, at least 2D ndarray. shrinkage : float, default=0.1 Coefficient in the convex combination used for the computation of the shrunk estimate. Range is [0, 1]. Returns ------- shrunk_cov : ndarray of shape (..., n_features, n_features) Shrunk covariance matrices. Notes ----- The regularized (shrunk) covariance is given by:: (1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features) where `mu = trace(cov) / n_features`. Examples -------- >>> import numpy as np >>> from sklearn.datasets import make_gaussian_quantiles >>> from sklearn.covariance import empirical_covariance, shrunk_covariance >>> real_cov = np.array([[.8, .3], [.3, .4]]) >>> rng = np.random.RandomState(0) >>> X = rng.multivariate_normal(mean=[0, 0], cov=real_cov, size=500) >>> shrunk_covariance(empirical_covariance(X)) array([[0.739, 0.254], [0.254, 0.411]]) """ emp_cov = check_array(emp_cov, allow_nd=True) n_features = emp_cov.shape[-1] shrunk_cov = (1.0 - shrinkage) * emp_cov mu = np.trace(emp_cov, axis1=-2, axis2=-1) / n_features mu = np.expand_dims(mu, axis=tuple(range(mu.ndim, emp_cov.ndim))) shrunk_cov += shrinkage * mu * np.eye(n_features) return shrunk_cov
Calculate covariance matrices shrunk on the diagonal. Read more in the :ref:`User Guide <shrunk_covariance>`. Parameters ---------- emp_cov : array-like of shape (..., n_features, n_features) Covariance matrices to be shrunk, at least 2D ndarray. shrinkage : float, default=0.1 Coefficient in the convex combination used for the computation of the shrunk estimate. Range is [0, 1]. Returns ------- shrunk_cov : ndarray of shape (..., n_features, n_features) Shrunk covariance matrices. Notes ----- The regularized (shrunk) covariance is given by:: (1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features) where `mu = trace(cov) / n_features`. Examples -------- >>> import numpy as np >>> from sklearn.datasets import make_gaussian_quantiles >>> from sklearn.covariance import empirical_covariance, shrunk_covariance >>> real_cov = np.array([[.8, .3], [.3, .4]]) >>> rng = np.random.RandomState(0) >>> X = rng.multivariate_normal(mean=[0, 0], cov=real_cov, size=500) >>> shrunk_covariance(empirical_covariance(X)) array([[0.739, 0.254], [0.254, 0.411]])
shrunk_covariance
python
scikit-learn/scikit-learn
sklearn/covariance/_shrunk_covariance.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/covariance/_shrunk_covariance.py
BSD-3-Clause
def fit(self, X, y=None): """Fit the shrunk covariance model to X. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data, where `n_samples` is the number of samples and `n_features` is the number of features. y : Ignored Not used, present for API consistency by convention. Returns ------- self : object Returns the instance itself. """ X = validate_data(self, X) # Not calling the parent object to fit, to avoid a potential # matrix inversion when setting the precision if self.assume_centered: self.location_ = np.zeros(X.shape[1]) else: self.location_ = X.mean(0) covariance = empirical_covariance(X, assume_centered=self.assume_centered) covariance = shrunk_covariance(covariance, self.shrinkage) self._set_covariance(covariance) return self
Fit the shrunk covariance model to X. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data, where `n_samples` is the number of samples and `n_features` is the number of features. y : Ignored Not used, present for API consistency by convention. Returns ------- self : object Returns the instance itself.
fit
python
scikit-learn/scikit-learn
sklearn/covariance/_shrunk_covariance.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/covariance/_shrunk_covariance.py
BSD-3-Clause
def ledoit_wolf_shrinkage(X, assume_centered=False, block_size=1000): """Estimate the shrunk Ledoit-Wolf covariance matrix. Read more in the :ref:`User Guide <shrunk_covariance>`. Parameters ---------- X : array-like of shape (n_samples, n_features) Data from which to compute the Ledoit-Wolf shrunk covariance shrinkage. assume_centered : bool, default=False If True, data will not be centered before computation. Useful to work with data whose mean is significantly equal to zero but is not exactly zero. If False, data will be centered before computation. block_size : int, default=1000 Size of blocks into which the covariance matrix will be split. Returns ------- shrinkage : float Coefficient in the convex combination used for the computation of the shrunk estimate. Notes ----- The regularized (shrunk) covariance is: (1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features) where mu = trace(cov) / n_features Examples -------- >>> import numpy as np >>> from sklearn.covariance import ledoit_wolf_shrinkage >>> real_cov = np.array([[.4, .2], [.2, .8]]) >>> rng = np.random.RandomState(0) >>> X = rng.multivariate_normal(mean=[0, 0], cov=real_cov, size=50) >>> shrinkage_coefficient = ledoit_wolf_shrinkage(X) >>> shrinkage_coefficient np.float64(0.23) """ X = check_array(X) # for only one feature, the result is the same whatever the shrinkage if len(X.shape) == 2 and X.shape[1] == 1: return 0.0 if X.ndim == 1: X = np.reshape(X, (1, -1)) if X.shape[0] == 1: warnings.warn( "Only one sample available. You may want to reshape your data array" ) n_samples, n_features = X.shape # optionally center data if not assume_centered: X = X - X.mean(0) # A non-blocked version of the computation is present in the tests # in tests/test_covariance.py # number of blocks to split the covariance matrix into n_splits = int(n_features / block_size) X2 = X**2 emp_cov_trace = np.sum(X2, axis=0) / n_samples mu = np.sum(emp_cov_trace) / n_features beta_ = 0.0 # sum of the coefficients of <X2.T, X2> delta_ = 0.0 # sum of the *squared* coefficients of <X.T, X> # starting block computation for i in range(n_splits): for j in range(n_splits): rows = slice(block_size * i, block_size * (i + 1)) cols = slice(block_size * j, block_size * (j + 1)) beta_ += np.sum(np.dot(X2.T[rows], X2[:, cols])) delta_ += np.sum(np.dot(X.T[rows], X[:, cols]) ** 2) rows = slice(block_size * i, block_size * (i + 1)) beta_ += np.sum(np.dot(X2.T[rows], X2[:, block_size * n_splits :])) delta_ += np.sum(np.dot(X.T[rows], X[:, block_size * n_splits :]) ** 2) for j in range(n_splits): cols = slice(block_size * j, block_size * (j + 1)) beta_ += np.sum(np.dot(X2.T[block_size * n_splits :], X2[:, cols])) delta_ += np.sum(np.dot(X.T[block_size * n_splits :], X[:, cols]) ** 2) delta_ += np.sum( np.dot(X.T[block_size * n_splits :], X[:, block_size * n_splits :]) ** 2 ) delta_ /= n_samples**2 beta_ += np.sum( np.dot(X2.T[block_size * n_splits :], X2[:, block_size * n_splits :]) ) # use delta_ to compute beta beta = 1.0 / (n_features * n_samples) * (beta_ / n_samples - delta_) # delta is the sum of the squared coefficients of (<X.T,X> - mu*Id) / p delta = delta_ - 2.0 * mu * emp_cov_trace.sum() + n_features * mu**2 delta /= n_features # get final beta as the min between beta and delta # We do this to prevent shrinking more than "1", which would invert # the value of covariances beta = min(beta, delta) # finally get shrinkage shrinkage = 0 if beta == 0 else beta / delta return shrinkage
Estimate the shrunk Ledoit-Wolf covariance matrix. Read more in the :ref:`User Guide <shrunk_covariance>`. Parameters ---------- X : array-like of shape (n_samples, n_features) Data from which to compute the Ledoit-Wolf shrunk covariance shrinkage. assume_centered : bool, default=False If True, data will not be centered before computation. Useful to work with data whose mean is significantly equal to zero but is not exactly zero. If False, data will be centered before computation. block_size : int, default=1000 Size of blocks into which the covariance matrix will be split. Returns ------- shrinkage : float Coefficient in the convex combination used for the computation of the shrunk estimate. Notes ----- The regularized (shrunk) covariance is: (1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features) where mu = trace(cov) / n_features Examples -------- >>> import numpy as np >>> from sklearn.covariance import ledoit_wolf_shrinkage >>> real_cov = np.array([[.4, .2], [.2, .8]]) >>> rng = np.random.RandomState(0) >>> X = rng.multivariate_normal(mean=[0, 0], cov=real_cov, size=50) >>> shrinkage_coefficient = ledoit_wolf_shrinkage(X) >>> shrinkage_coefficient np.float64(0.23)
ledoit_wolf_shrinkage
python
scikit-learn/scikit-learn
sklearn/covariance/_shrunk_covariance.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/covariance/_shrunk_covariance.py
BSD-3-Clause
def ledoit_wolf(X, *, assume_centered=False, block_size=1000): """Estimate the shrunk Ledoit-Wolf covariance matrix. Read more in the :ref:`User Guide <shrunk_covariance>`. Parameters ---------- X : array-like of shape (n_samples, n_features) Data from which to compute the covariance estimate. assume_centered : bool, default=False If True, data will not be centered before computation. Useful to work with data whose mean is significantly equal to zero but is not exactly zero. If False, data will be centered before computation. block_size : int, default=1000 Size of blocks into which the covariance matrix will be split. This is purely a memory optimization and does not affect results. Returns ------- shrunk_cov : ndarray of shape (n_features, n_features) Shrunk covariance. shrinkage : float Coefficient in the convex combination used for the computation of the shrunk estimate. Notes ----- The regularized (shrunk) covariance is: (1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features) where mu = trace(cov) / n_features Examples -------- >>> import numpy as np >>> from sklearn.covariance import empirical_covariance, ledoit_wolf >>> real_cov = np.array([[.4, .2], [.2, .8]]) >>> rng = np.random.RandomState(0) >>> X = rng.multivariate_normal(mean=[0, 0], cov=real_cov, size=50) >>> covariance, shrinkage = ledoit_wolf(X) >>> covariance array([[0.44, 0.16], [0.16, 0.80]]) >>> shrinkage np.float64(0.23) """ estimator = LedoitWolf( assume_centered=assume_centered, block_size=block_size, store_precision=False, ).fit(X) return estimator.covariance_, estimator.shrinkage_
Estimate the shrunk Ledoit-Wolf covariance matrix. Read more in the :ref:`User Guide <shrunk_covariance>`. Parameters ---------- X : array-like of shape (n_samples, n_features) Data from which to compute the covariance estimate. assume_centered : bool, default=False If True, data will not be centered before computation. Useful to work with data whose mean is significantly equal to zero but is not exactly zero. If False, data will be centered before computation. block_size : int, default=1000 Size of blocks into which the covariance matrix will be split. This is purely a memory optimization and does not affect results. Returns ------- shrunk_cov : ndarray of shape (n_features, n_features) Shrunk covariance. shrinkage : float Coefficient in the convex combination used for the computation of the shrunk estimate. Notes ----- The regularized (shrunk) covariance is: (1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features) where mu = trace(cov) / n_features Examples -------- >>> import numpy as np >>> from sklearn.covariance import empirical_covariance, ledoit_wolf >>> real_cov = np.array([[.4, .2], [.2, .8]]) >>> rng = np.random.RandomState(0) >>> X = rng.multivariate_normal(mean=[0, 0], cov=real_cov, size=50) >>> covariance, shrinkage = ledoit_wolf(X) >>> covariance array([[0.44, 0.16], [0.16, 0.80]]) >>> shrinkage np.float64(0.23)
ledoit_wolf
python
scikit-learn/scikit-learn
sklearn/covariance/_shrunk_covariance.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/covariance/_shrunk_covariance.py
BSD-3-Clause
def fit(self, X, y=None): """Fit the Ledoit-Wolf shrunk covariance model to X. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data, where `n_samples` is the number of samples and `n_features` is the number of features. y : Ignored Not used, present for API consistency by convention. Returns ------- self : object Returns the instance itself. """ # Not calling the parent object to fit, to avoid computing the # covariance matrix (and potentially the precision) X = validate_data(self, X) if self.assume_centered: self.location_ = np.zeros(X.shape[1]) else: self.location_ = X.mean(0) covariance, shrinkage = _ledoit_wolf( X - self.location_, assume_centered=True, block_size=self.block_size ) self.shrinkage_ = shrinkage self._set_covariance(covariance) return self
Fit the Ledoit-Wolf shrunk covariance model to X. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data, where `n_samples` is the number of samples and `n_features` is the number of features. y : Ignored Not used, present for API consistency by convention. Returns ------- self : object Returns the instance itself.
fit
python
scikit-learn/scikit-learn
sklearn/covariance/_shrunk_covariance.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/covariance/_shrunk_covariance.py
BSD-3-Clause
def fit(self, X, y=None): """Fit the Oracle Approximating Shrinkage covariance model to X. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data, where `n_samples` is the number of samples and `n_features` is the number of features. y : Ignored Not used, present for API consistency by convention. Returns ------- self : object Returns the instance itself. """ X = validate_data(self, X) # Not calling the parent object to fit, to avoid computing the # covariance matrix (and potentially the precision) if self.assume_centered: self.location_ = np.zeros(X.shape[1]) else: self.location_ = X.mean(0) covariance, shrinkage = _oas(X - self.location_, assume_centered=True) self.shrinkage_ = shrinkage self._set_covariance(covariance) return self
Fit the Oracle Approximating Shrinkage covariance model to X. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data, where `n_samples` is the number of samples and `n_features` is the number of features. y : Ignored Not used, present for API consistency by convention. Returns ------- self : object Returns the instance itself.
fit
python
scikit-learn/scikit-learn
sklearn/covariance/_shrunk_covariance.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/covariance/_shrunk_covariance.py
BSD-3-Clause
def test_ledoit_wolf_empty_array(ledoit_wolf_fitting_function): """Check that we validate X and raise proper error with 0-sample array.""" X_empty = np.zeros((0, 2)) with pytest.raises(ValueError, match="Found array with 0 sample"): ledoit_wolf_fitting_function(X_empty)
Check that we validate X and raise proper error with 0-sample array.
test_ledoit_wolf_empty_array
python
scikit-learn/scikit-learn
sklearn/covariance/tests/test_covariance.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/covariance/tests/test_covariance.py
BSD-3-Clause
def test_EmpiricalCovariance_validates_mahalanobis(): """Checks that EmpiricalCovariance validates data with mahalanobis.""" cov = EmpiricalCovariance().fit(X) msg = f"X has 2 features, but \\w+ is expecting {X.shape[1]} features as input" with pytest.raises(ValueError, match=msg): cov.mahalanobis(X[:, :2])
Checks that EmpiricalCovariance validates data with mahalanobis.
test_EmpiricalCovariance_validates_mahalanobis
python
scikit-learn/scikit-learn
sklearn/covariance/tests/test_covariance.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/covariance/tests/test_covariance.py
BSD-3-Clause
def test_graphical_lassos(random_state=1): """Test the graphical lasso solvers. This checks is unstable for some random seeds where the covariance found with "cd" and "lars" solvers are different (4 cases / 100 tries). """ # Sample data from a sparse multivariate normal dim = 20 n_samples = 100 random_state = check_random_state(random_state) prec = make_sparse_spd_matrix(dim, alpha=0.95, random_state=random_state) cov = linalg.inv(prec) X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples) emp_cov = empirical_covariance(X) for alpha in (0.0, 0.1, 0.25): covs = dict() icovs = dict() for method in ("cd", "lars"): cov_, icov_, costs = graphical_lasso( emp_cov, return_costs=True, alpha=alpha, mode=method ) covs[method] = cov_ icovs[method] = icov_ costs, dual_gap = np.array(costs).T # Check that the costs always decrease (doesn't hold if alpha == 0) if not alpha == 0: # use 1e-12 since the cost can be exactly 0 assert_array_less(np.diff(costs), 1e-12) # Check that the 2 approaches give similar results assert_allclose(covs["cd"], covs["lars"], atol=5e-4) assert_allclose(icovs["cd"], icovs["lars"], atol=5e-4) # Smoke test the estimator model = GraphicalLasso(alpha=0.25).fit(X) model.score(X) assert_array_almost_equal(model.covariance_, covs["cd"], decimal=4) assert_array_almost_equal(model.covariance_, covs["lars"], decimal=4) # For a centered matrix, assume_centered could be chosen True or False # Check that this returns indeed the same result for centered data Z = X - X.mean(0) precs = list() for assume_centered in (False, True): prec_ = GraphicalLasso(assume_centered=assume_centered).fit(Z).precision_ precs.append(prec_) assert_array_almost_equal(precs[0], precs[1])
Test the graphical lasso solvers. This checks is unstable for some random seeds where the covariance found with "cd" and "lars" solvers are different (4 cases / 100 tries).
test_graphical_lassos
python
scikit-learn/scikit-learn
sklearn/covariance/tests/test_graphical_lasso.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/covariance/tests/test_graphical_lasso.py
BSD-3-Clause
def test_graphical_lasso_when_alpha_equals_0(): """Test graphical_lasso's early return condition when alpha=0.""" X = np.random.randn(100, 10) emp_cov = empirical_covariance(X, assume_centered=True) model = GraphicalLasso(alpha=0, covariance="precomputed").fit(emp_cov) assert_allclose(model.precision_, np.linalg.inv(emp_cov)) _, precision = graphical_lasso(emp_cov, alpha=0) assert_allclose(precision, np.linalg.inv(emp_cov))
Test graphical_lasso's early return condition when alpha=0.
test_graphical_lasso_when_alpha_equals_0
python
scikit-learn/scikit-learn
sklearn/covariance/tests/test_graphical_lasso.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/covariance/tests/test_graphical_lasso.py
BSD-3-Clause
def test_graphical_lasso_cv_alphas_iterable(alphas_container_type): """Check that we can pass an array-like to `alphas`. Non-regression test for: https://github.com/scikit-learn/scikit-learn/issues/22489 """ true_cov = np.array( [ [0.8, 0.0, 0.2, 0.0], [0.0, 0.4, 0.0, 0.0], [0.2, 0.0, 0.3, 0.1], [0.0, 0.0, 0.1, 0.7], ] ) rng = np.random.RandomState(0) X = rng.multivariate_normal(mean=[0, 0, 0, 0], cov=true_cov, size=200) alphas = _convert_container([0.02, 0.03], alphas_container_type) GraphicalLassoCV(alphas=alphas, tol=1e-1, n_jobs=1).fit(X)
Check that we can pass an array-like to `alphas`. Non-regression test for: https://github.com/scikit-learn/scikit-learn/issues/22489
test_graphical_lasso_cv_alphas_iterable
python
scikit-learn/scikit-learn
sklearn/covariance/tests/test_graphical_lasso.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/covariance/tests/test_graphical_lasso.py
BSD-3-Clause
def test_graphical_lasso_cv_alphas_invalid_array(alphas, err_type, err_msg): """Check that if an array-like containing a value outside of (0, inf] is passed to `alphas`, a ValueError is raised. Check if a string is passed, a TypeError is raised. """ true_cov = np.array( [ [0.8, 0.0, 0.2, 0.0], [0.0, 0.4, 0.0, 0.0], [0.2, 0.0, 0.3, 0.1], [0.0, 0.0, 0.1, 0.7], ] ) rng = np.random.RandomState(0) X = rng.multivariate_normal(mean=[0, 0, 0, 0], cov=true_cov, size=200) with pytest.raises(err_type, match=err_msg): GraphicalLassoCV(alphas=alphas, tol=1e-1, n_jobs=1).fit(X)
Check that if an array-like containing a value outside of (0, inf] is passed to `alphas`, a ValueError is raised. Check if a string is passed, a TypeError is raised.
test_graphical_lasso_cv_alphas_invalid_array
python
scikit-learn/scikit-learn
sklearn/covariance/tests/test_graphical_lasso.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/covariance/tests/test_graphical_lasso.py
BSD-3-Clause
def test_graphical_lasso_cv_scores_with_routing(global_random_seed): """Check that `GraphicalLassoCV` internally dispatches metadata to the splitter. """ splits = 5 n_alphas = 5 n_refinements = 3 true_cov = np.array( [ [0.8, 0.0, 0.2, 0.0], [0.0, 0.4, 0.0, 0.0], [0.2, 0.0, 0.3, 0.1], [0.0, 0.0, 0.1, 0.7], ] ) rng = np.random.RandomState(global_random_seed) X = rng.multivariate_normal(mean=[0, 0, 0, 0], cov=true_cov, size=300) n_samples = X.shape[0] groups = rng.randint(0, 5, n_samples) params = {"groups": groups} cv = GroupKFold(n_splits=splits) cv.set_split_request(groups=True) cov = GraphicalLassoCV(cv=cv, alphas=n_alphas, n_refinements=n_refinements).fit( X, **params ) _assert_graphical_lasso_cv_scores( cov=cov, n_splits=splits, n_refinements=n_refinements, n_alphas=n_alphas, )
Check that `GraphicalLassoCV` internally dispatches metadata to the splitter.
test_graphical_lasso_cv_scores_with_routing
python
scikit-learn/scikit-learn
sklearn/covariance/tests/test_graphical_lasso.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/covariance/tests/test_graphical_lasso.py
BSD-3-Clause
def _get_first_singular_vectors_power_method( X, y, mode="A", max_iter=500, tol=1e-06, norm_y_weights=False ): """Return the first left and right singular vectors of X'y. Provides an alternative to the svd(X'y) and uses the power method instead. With norm_y_weights to True and in mode A, this corresponds to the algorithm section 11.3 of the Wegelin's review, except this starts at the "update saliences" part. """ eps = np.finfo(X.dtype).eps try: y_score = next(col for col in y.T if np.any(np.abs(col) > eps)) except StopIteration as e: raise StopIteration("y residual is constant") from e x_weights_old = 100 # init to big value for first convergence check if mode == "B": # Precompute pseudo inverse matrices # Basically: X_pinv = (X.T X)^-1 X.T # Which requires inverting a (n_features, n_features) matrix. # As a result, and as detailed in the Wegelin's review, CCA (i.e. mode # B) will be unstable if n_features > n_samples or n_targets > # n_samples X_pinv, y_pinv = _pinv2_old(X), _pinv2_old(y) for i in range(max_iter): if mode == "B": x_weights = np.dot(X_pinv, y_score) else: x_weights = np.dot(X.T, y_score) / np.dot(y_score, y_score) x_weights /= np.sqrt(np.dot(x_weights, x_weights)) + eps x_score = np.dot(X, x_weights) if mode == "B": y_weights = np.dot(y_pinv, x_score) else: y_weights = np.dot(y.T, x_score) / np.dot(x_score.T, x_score) if norm_y_weights: y_weights /= np.sqrt(np.dot(y_weights, y_weights)) + eps y_score = np.dot(y, y_weights) / (np.dot(y_weights, y_weights) + eps) x_weights_diff = x_weights - x_weights_old if np.dot(x_weights_diff, x_weights_diff) < tol or y.shape[1] == 1: break x_weights_old = x_weights n_iter = i + 1 if n_iter == max_iter: warnings.warn("Maximum number of iterations reached", ConvergenceWarning) return x_weights, y_weights, n_iter
Return the first left and right singular vectors of X'y. Provides an alternative to the svd(X'y) and uses the power method instead. With norm_y_weights to True and in mode A, this corresponds to the algorithm section 11.3 of the Wegelin's review, except this starts at the "update saliences" part.
_get_first_singular_vectors_power_method
python
scikit-learn/scikit-learn
sklearn/cross_decomposition/_pls.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cross_decomposition/_pls.py
BSD-3-Clause
def _get_first_singular_vectors_svd(X, y): """Return the first left and right singular vectors of X'y. Here the whole SVD is computed. """ C = np.dot(X.T, y) U, _, Vt = svd(C, full_matrices=False) return U[:, 0], Vt[0, :]
Return the first left and right singular vectors of X'y. Here the whole SVD is computed.
_get_first_singular_vectors_svd
python
scikit-learn/scikit-learn
sklearn/cross_decomposition/_pls.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cross_decomposition/_pls.py
BSD-3-Clause
def _center_scale_xy(X, y, scale=True): """Center X, y and scale if the scale parameter==True Returns ------- X, y, x_mean, y_mean, x_std, y_std """ # center x_mean = X.mean(axis=0) X -= x_mean y_mean = y.mean(axis=0) y -= y_mean # scale if scale: x_std = X.std(axis=0, ddof=1) x_std[x_std == 0.0] = 1.0 X /= x_std y_std = y.std(axis=0, ddof=1) y_std[y_std == 0.0] = 1.0 y /= y_std else: x_std = np.ones(X.shape[1]) y_std = np.ones(y.shape[1]) return X, y, x_mean, y_mean, x_std, y_std
Center X, y and scale if the scale parameter==True Returns ------- X, y, x_mean, y_mean, x_std, y_std
_center_scale_xy
python
scikit-learn/scikit-learn
sklearn/cross_decomposition/_pls.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cross_decomposition/_pls.py
BSD-3-Clause
def _svd_flip_1d(u, v): """Same as svd_flip but works on 1d arrays, and is inplace""" # svd_flip would force us to convert to 2d array and would also return 2d # arrays. We don't want that. biggest_abs_val_idx = np.argmax(np.abs(u)) sign = np.sign(u[biggest_abs_val_idx]) u *= sign v *= sign
Same as svd_flip but works on 1d arrays, and is inplace
_svd_flip_1d
python
scikit-learn/scikit-learn
sklearn/cross_decomposition/_pls.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cross_decomposition/_pls.py
BSD-3-Clause
def fit(self, X, y): """Fit model to data. Parameters ---------- X : array-like of shape (n_samples, n_features) Training vectors, where `n_samples` is the number of samples and `n_features` is the number of predictors. y : array-like of shape (n_samples,) or (n_samples, n_targets) Target vectors, where `n_samples` is the number of samples and `n_targets` is the number of response variables. Returns ------- self : object Fitted model. """ check_consistent_length(X, y) X = validate_data( self, X, dtype=np.float64, force_writeable=True, copy=self.copy, ensure_min_samples=2, ) y = check_array( y, input_name="y", dtype=np.float64, force_writeable=True, copy=self.copy, ensure_2d=False, ) if y.ndim == 1: self._predict_1d = True y = y.reshape(-1, 1) else: self._predict_1d = False n = X.shape[0] p = X.shape[1] q = y.shape[1] n_components = self.n_components # With PLSRegression n_components is bounded by the rank of (X.T X) see # Wegelin page 25. With CCA and PLSCanonical, n_components is bounded # by the rank of X and the rank of y: see Wegelin page 12 rank_upper_bound = ( min(n, p) if self.deflation_mode == "regression" else min(n, p, q) ) if n_components > rank_upper_bound: raise ValueError( f"`n_components` upper bound is {rank_upper_bound}. " f"Got {n_components} instead. Reduce `n_components`." ) self._norm_y_weights = self.deflation_mode == "canonical" # 1.1 norm_y_weights = self._norm_y_weights # Scale (in place) Xk, yk, self._x_mean, self._y_mean, self._x_std, self._y_std = _center_scale_xy( X, y, self.scale ) self.x_weights_ = np.zeros((p, n_components)) # U self.y_weights_ = np.zeros((q, n_components)) # V self._x_scores = np.zeros((n, n_components)) # Xi self._y_scores = np.zeros((n, n_components)) # Omega self.x_loadings_ = np.zeros((p, n_components)) # Gamma self.y_loadings_ = np.zeros((q, n_components)) # Delta self.n_iter_ = [] # This whole thing corresponds to the algorithm in section 4.1 of the # review from Wegelin. See above for a notation mapping from code to # paper. y_eps = np.finfo(yk.dtype).eps for k in range(n_components): # Find first left and right singular vectors of the X.T.dot(y) # cross-covariance matrix. if self.algorithm == "nipals": # Replace columns that are all close to zero with zeros yk_mask = np.all(np.abs(yk) < 10 * y_eps, axis=0) yk[:, yk_mask] = 0.0 try: ( x_weights, y_weights, n_iter_, ) = _get_first_singular_vectors_power_method( Xk, yk, mode=self.mode, max_iter=self.max_iter, tol=self.tol, norm_y_weights=norm_y_weights, ) except StopIteration as e: if str(e) != "y residual is constant": raise warnings.warn(f"y residual is constant at iteration {k}") break self.n_iter_.append(n_iter_) elif self.algorithm == "svd": x_weights, y_weights = _get_first_singular_vectors_svd(Xk, yk) # inplace sign flip for consistency across solvers and archs _svd_flip_1d(x_weights, y_weights) # compute scores, i.e. the projections of X and y x_scores = np.dot(Xk, x_weights) if norm_y_weights: y_ss = 1 else: y_ss = np.dot(y_weights, y_weights) y_scores = np.dot(yk, y_weights) / y_ss # Deflation: subtract rank-one approx to obtain Xk+1 and yk+1 x_loadings = np.dot(x_scores, Xk) / np.dot(x_scores, x_scores) Xk -= np.outer(x_scores, x_loadings) if self.deflation_mode == "canonical": # regress yk on y_score y_loadings = np.dot(y_scores, yk) / np.dot(y_scores, y_scores) yk -= np.outer(y_scores, y_loadings) if self.deflation_mode == "regression": # regress yk on x_score y_loadings = np.dot(x_scores, yk) / np.dot(x_scores, x_scores) yk -= np.outer(x_scores, y_loadings) self.x_weights_[:, k] = x_weights self.y_weights_[:, k] = y_weights self._x_scores[:, k] = x_scores self._y_scores[:, k] = y_scores self.x_loadings_[:, k] = x_loadings self.y_loadings_[:, k] = y_loadings # X was approximated as Xi . Gamma.T + X_(R+1) # Xi . Gamma.T is a sum of n_components rank-1 matrices. X_(R+1) is # whatever is left to fully reconstruct X, and can be 0 if X is of rank # n_components. # Similarly, y was approximated as Omega . Delta.T + y_(R+1) # Compute transformation matrices (rotations_). See User Guide. self.x_rotations_ = np.dot( self.x_weights_, pinv(np.dot(self.x_loadings_.T, self.x_weights_), check_finite=False), ) self.y_rotations_ = np.dot( self.y_weights_, pinv(np.dot(self.y_loadings_.T, self.y_weights_), check_finite=False), ) self.coef_ = np.dot(self.x_rotations_, self.y_loadings_.T) self.coef_ = (self.coef_ * self._y_std).T / self._x_std self.intercept_ = self._y_mean self._n_features_out = self.x_rotations_.shape[1] return self
Fit model to data. Parameters ---------- X : array-like of shape (n_samples, n_features) Training vectors, where `n_samples` is the number of samples and `n_features` is the number of predictors. y : array-like of shape (n_samples,) or (n_samples, n_targets) Target vectors, where `n_samples` is the number of samples and `n_targets` is the number of response variables. Returns ------- self : object Fitted model.
fit
python
scikit-learn/scikit-learn
sklearn/cross_decomposition/_pls.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cross_decomposition/_pls.py
BSD-3-Clause
def transform(self, X, y=None, copy=True): """Apply the dimension reduction. Parameters ---------- X : array-like of shape (n_samples, n_features) Samples to transform. y : array-like of shape (n_samples, n_targets), default=None Target vectors. copy : bool, default=True Whether to copy `X` and `y`, or perform in-place normalization. Returns ------- x_scores, y_scores : array-like or tuple of array-like Return `x_scores` if `y` is not given, `(x_scores, y_scores)` otherwise. """ check_is_fitted(self) X = validate_data(self, X, copy=copy, dtype=FLOAT_DTYPES, reset=False) # Normalize X -= self._x_mean X /= self._x_std # Apply rotation x_scores = np.dot(X, self.x_rotations_) if y is not None: y = check_array( y, input_name="y", ensure_2d=False, copy=copy, dtype=FLOAT_DTYPES ) if y.ndim == 1: y = y.reshape(-1, 1) y -= self._y_mean y /= self._y_std y_scores = np.dot(y, self.y_rotations_) return x_scores, y_scores return x_scores
Apply the dimension reduction. Parameters ---------- X : array-like of shape (n_samples, n_features) Samples to transform. y : array-like of shape (n_samples, n_targets), default=None Target vectors. copy : bool, default=True Whether to copy `X` and `y`, or perform in-place normalization. Returns ------- x_scores, y_scores : array-like or tuple of array-like Return `x_scores` if `y` is not given, `(x_scores, y_scores)` otherwise.
transform
python
scikit-learn/scikit-learn
sklearn/cross_decomposition/_pls.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cross_decomposition/_pls.py
BSD-3-Clause
def inverse_transform(self, X, y=None): """Transform data back to its original space. Parameters ---------- X : array-like of shape (n_samples, n_components) New data, where `n_samples` is the number of samples and `n_components` is the number of pls components. y : array-like of shape (n_samples,) or (n_samples, n_components) New target, where `n_samples` is the number of samples and `n_components` is the number of pls components. Returns ------- X_original : ndarray of shape (n_samples, n_features) Return the reconstructed `X` data. y_original : ndarray of shape (n_samples, n_targets) Return the reconstructed `X` target. Only returned when `y` is given. Notes ----- This transformation will only be exact if `n_components=n_features`. """ check_is_fitted(self) X = check_array(X, input_name="X", dtype=FLOAT_DTYPES) # From pls space to original space X_reconstructed = np.matmul(X, self.x_loadings_.T) # Denormalize X_reconstructed *= self._x_std X_reconstructed += self._x_mean if y is not None: y = check_array(y, input_name="y", dtype=FLOAT_DTYPES) # From pls space to original space y_reconstructed = np.matmul(y, self.y_loadings_.T) # Denormalize y_reconstructed *= self._y_std y_reconstructed += self._y_mean return X_reconstructed, y_reconstructed return X_reconstructed
Transform data back to its original space. Parameters ---------- X : array-like of shape (n_samples, n_components) New data, where `n_samples` is the number of samples and `n_components` is the number of pls components. y : array-like of shape (n_samples,) or (n_samples, n_components) New target, where `n_samples` is the number of samples and `n_components` is the number of pls components. Returns ------- X_original : ndarray of shape (n_samples, n_features) Return the reconstructed `X` data. y_original : ndarray of shape (n_samples, n_targets) Return the reconstructed `X` target. Only returned when `y` is given. Notes ----- This transformation will only be exact if `n_components=n_features`.
inverse_transform
python
scikit-learn/scikit-learn
sklearn/cross_decomposition/_pls.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cross_decomposition/_pls.py
BSD-3-Clause
def predict(self, X, copy=True): """Predict targets of given samples. Parameters ---------- X : array-like of shape (n_samples, n_features) Samples. copy : bool, default=True Whether to copy `X` or perform in-place normalization. Returns ------- y_pred : ndarray of shape (n_samples,) or (n_samples, n_targets) Returns predicted values. Notes ----- This call requires the estimation of a matrix of shape `(n_features, n_targets)`, which may be an issue in high dimensional space. """ check_is_fitted(self) X = validate_data(self, X, copy=copy, dtype=FLOAT_DTYPES, reset=False) # Only center X but do not scale it since the coefficients are already scaled X -= self._x_mean y_pred = X @ self.coef_.T + self.intercept_ return y_pred.ravel() if self._predict_1d else y_pred
Predict targets of given samples. Parameters ---------- X : array-like of shape (n_samples, n_features) Samples. copy : bool, default=True Whether to copy `X` or perform in-place normalization. Returns ------- y_pred : ndarray of shape (n_samples,) or (n_samples, n_targets) Returns predicted values. Notes ----- This call requires the estimation of a matrix of shape `(n_features, n_targets)`, which may be an issue in high dimensional space.
predict
python
scikit-learn/scikit-learn
sklearn/cross_decomposition/_pls.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cross_decomposition/_pls.py
BSD-3-Clause
def fit(self, X, y): """Fit model to data. Parameters ---------- X : array-like of shape (n_samples, n_features) Training vectors, where `n_samples` is the number of samples and `n_features` is the number of predictors. y : array-like of shape (n_samples,) or (n_samples, n_targets) Target vectors, where `n_samples` is the number of samples and `n_targets` is the number of response variables. Returns ------- self : object Fitted model. """ super().fit(X, y) # expose the fitted attributes `x_scores_` and `y_scores_` self.x_scores_ = self._x_scores self.y_scores_ = self._y_scores return self
Fit model to data. Parameters ---------- X : array-like of shape (n_samples, n_features) Training vectors, where `n_samples` is the number of samples and `n_features` is the number of predictors. y : array-like of shape (n_samples,) or (n_samples, n_targets) Target vectors, where `n_samples` is the number of samples and `n_targets` is the number of response variables. Returns ------- self : object Fitted model.
fit
python
scikit-learn/scikit-learn
sklearn/cross_decomposition/_pls.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cross_decomposition/_pls.py
BSD-3-Clause
def fit(self, X, y): """Fit model to data. Parameters ---------- X : array-like of shape (n_samples, n_features) Training samples. y : array-like of shape (n_samples,) or (n_samples, n_targets) Targets. Returns ------- self : object Fitted estimator. """ check_consistent_length(X, y) X = validate_data( self, X, dtype=np.float64, force_writeable=True, copy=self.copy, ensure_min_samples=2, ) y = check_array( y, input_name="y", dtype=np.float64, force_writeable=True, copy=self.copy, ensure_2d=False, ) if y.ndim == 1: y = y.reshape(-1, 1) # we'll compute the SVD of the cross-covariance matrix = X.T.dot(y) # This matrix rank is at most min(n_samples, n_features, n_targets) so # n_components cannot be bigger than that. n_components = self.n_components rank_upper_bound = min(X.shape[0], X.shape[1], y.shape[1]) if n_components > rank_upper_bound: raise ValueError( f"`n_components` upper bound is {rank_upper_bound}. " f"Got {n_components} instead. Reduce `n_components`." ) X, y, self._x_mean, self._y_mean, self._x_std, self._y_std = _center_scale_xy( X, y, self.scale ) # Compute SVD of cross-covariance matrix C = np.dot(X.T, y) U, s, Vt = svd(C, full_matrices=False) U = U[:, :n_components] Vt = Vt[:n_components] U, Vt = svd_flip(U, Vt) V = Vt.T self.x_weights_ = U self.y_weights_ = V self._n_features_out = self.x_weights_.shape[1] return self
Fit model to data. Parameters ---------- X : array-like of shape (n_samples, n_features) Training samples. y : array-like of shape (n_samples,) or (n_samples, n_targets) Targets. Returns ------- self : object Fitted estimator.
fit
python
scikit-learn/scikit-learn
sklearn/cross_decomposition/_pls.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cross_decomposition/_pls.py
BSD-3-Clause
def transform(self, X, y=None): """ Apply the dimensionality reduction. Parameters ---------- X : array-like of shape (n_samples, n_features) Samples to be transformed. y : array-like of shape (n_samples,) or (n_samples, n_targets), \ default=None Targets. Returns ------- x_scores : array-like or tuple of array-like The transformed data `X_transformed` if `y is not None`, `(X_transformed, y_transformed)` otherwise. """ check_is_fitted(self) X = validate_data(self, X, dtype=np.float64, reset=False) Xr = (X - self._x_mean) / self._x_std x_scores = np.dot(Xr, self.x_weights_) if y is not None: y = check_array(y, input_name="y", ensure_2d=False, dtype=np.float64) if y.ndim == 1: y = y.reshape(-1, 1) yr = (y - self._y_mean) / self._y_std y_scores = np.dot(yr, self.y_weights_) return x_scores, y_scores return x_scores
Apply the dimensionality reduction. Parameters ---------- X : array-like of shape (n_samples, n_features) Samples to be transformed. y : array-like of shape (n_samples,) or (n_samples, n_targets), default=None Targets. Returns ------- x_scores : array-like or tuple of array-like The transformed data `X_transformed` if `y is not None`, `(X_transformed, y_transformed)` otherwise.
transform
python
scikit-learn/scikit-learn
sklearn/cross_decomposition/_pls.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cross_decomposition/_pls.py
BSD-3-Clause
def test_scale_and_stability(Est, X, y): """scale=True is equivalent to scale=False on centered/scaled data This allows to check numerical stability over platforms as well""" X_s, y_s, *_ = _center_scale_xy(X, y) X_score, y_score = Est(scale=True).fit_transform(X, y) X_s_score, y_s_score = Est(scale=False).fit_transform(X_s, y_s) assert_allclose(X_s_score, X_score, atol=1e-4) assert_allclose(y_s_score, y_score, atol=1e-4)
scale=True is equivalent to scale=False on centered/scaled data This allows to check numerical stability over platforms as well
test_scale_and_stability
python
scikit-learn/scikit-learn
sklearn/cross_decomposition/tests/test_pls.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cross_decomposition/tests/test_pls.py
BSD-3-Clause
def test_n_components_upper_bounds(Estimator): """Check the validation of `n_components` upper bounds for `PLS` regressors.""" rng = np.random.RandomState(0) X = rng.randn(10, 5) y = rng.randn(10, 3) est = Estimator(n_components=10) err_msg = "`n_components` upper bound is .*. Got 10 instead. Reduce `n_components`." with pytest.raises(ValueError, match=err_msg): est.fit(X, y)
Check the validation of `n_components` upper bounds for `PLS` regressors.
test_n_components_upper_bounds
python
scikit-learn/scikit-learn
sklearn/cross_decomposition/tests/test_pls.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cross_decomposition/tests/test_pls.py
BSD-3-Clause
def test_n_components_upper_PLSRegression(): """Check the validation of `n_components` upper bounds for PLSRegression.""" rng = np.random.RandomState(0) X = rng.randn(20, 64) y = rng.randn(20, 3) est = PLSRegression(n_components=30) err_msg = "`n_components` upper bound is 20. Got 30 instead. Reduce `n_components`." with pytest.raises(ValueError, match=err_msg): est.fit(X, y)
Check the validation of `n_components` upper bounds for PLSRegression.
test_n_components_upper_PLSRegression
python
scikit-learn/scikit-learn
sklearn/cross_decomposition/tests/test_pls.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cross_decomposition/tests/test_pls.py
BSD-3-Clause
def test_loadings_converges(global_random_seed): """Test that CCA converges. Non-regression test for #19549.""" X, y = make_regression( n_samples=200, n_features=20, n_targets=20, random_state=global_random_seed ) cca = CCA(n_components=10, max_iter=500) with warnings.catch_warnings(): warnings.simplefilter("error", ConvergenceWarning) cca.fit(X, y) # Loadings converges to reasonable values assert np.all(np.abs(cca.x_loadings_) < 1)
Test that CCA converges. Non-regression test for #19549.
test_loadings_converges
python
scikit-learn/scikit-learn
sklearn/cross_decomposition/tests/test_pls.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cross_decomposition/tests/test_pls.py
BSD-3-Clause
def test_pls_constant_y(): """Checks warning when y is constant. Non-regression test for #19831""" rng = np.random.RandomState(42) x = rng.rand(100, 3) y = np.zeros(100) pls = PLSRegression() msg = "y residual is constant at iteration" with pytest.warns(UserWarning, match=msg): pls.fit(x, y) assert_allclose(pls.x_rotations_, 0)
Checks warning when y is constant. Non-regression test for #19831
test_pls_constant_y
python
scikit-learn/scikit-learn
sklearn/cross_decomposition/tests/test_pls.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cross_decomposition/tests/test_pls.py
BSD-3-Clause
def test_pls_coef_shape(PLSEstimator): """Check the shape of `coef_` attribute. Non-regression test for: https://github.com/scikit-learn/scikit-learn/issues/12410 """ d = load_linnerud() X = d.data y = d.target pls = PLSEstimator(copy=True).fit(X, y) n_targets, n_features = y.shape[1], X.shape[1] assert pls.coef_.shape == (n_targets, n_features)
Check the shape of `coef_` attribute. Non-regression test for: https://github.com/scikit-learn/scikit-learn/issues/12410
test_pls_coef_shape
python
scikit-learn/scikit-learn
sklearn/cross_decomposition/tests/test_pls.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cross_decomposition/tests/test_pls.py
BSD-3-Clause
def test_pls_prediction(PLSEstimator, scale): """Check the behaviour of the prediction function.""" d = load_linnerud() X = d.data y = d.target pls = PLSEstimator(copy=True, scale=scale).fit(X, y) y_pred = pls.predict(X, copy=True) y_mean = y.mean(axis=0) X_trans = X - X.mean(axis=0) assert_allclose(pls.intercept_, y_mean) assert_allclose(y_pred, X_trans @ pls.coef_.T + pls.intercept_)
Check the behaviour of the prediction function.
test_pls_prediction
python
scikit-learn/scikit-learn
sklearn/cross_decomposition/tests/test_pls.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cross_decomposition/tests/test_pls.py
BSD-3-Clause
def test_pls_regression_fit_1d_y(): """Check that when fitting with 1d `y`, prediction should also be 1d. Non-regression test for Issue #26549. """ X = np.array([[1, 1], [2, 4], [3, 9], [4, 16], [5, 25], [6, 36]]) y = np.array([2, 6, 12, 20, 30, 42]) expected = y.copy() plsr = PLSRegression().fit(X, y) y_pred = plsr.predict(X) assert y_pred.shape == expected.shape # Check that it works in VotingRegressor lr = LinearRegression().fit(X, y) vr = VotingRegressor([("lr", lr), ("plsr", plsr)]) y_pred = vr.fit(X, y).predict(X) assert y_pred.shape == expected.shape assert_allclose(y_pred, expected)
Check that when fitting with 1d `y`, prediction should also be 1d. Non-regression test for Issue #26549.
test_pls_regression_fit_1d_y
python
scikit-learn/scikit-learn
sklearn/cross_decomposition/tests/test_pls.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cross_decomposition/tests/test_pls.py
BSD-3-Clause
def test_pls_regression_scaling_coef(): """Check that when using `scale=True`, the coefficients are using the std. dev. from both `X` and `y`. Non-regression test for: https://github.com/scikit-learn/scikit-learn/issues/27964 """ # handcrafted data where we can predict y from X with an additional scaling factor rng = np.random.RandomState(0) coef = rng.uniform(size=(3, 5)) X = rng.normal(scale=10, size=(30, 5)) # add a std of 10 y = X @ coef.T # we need to make sure that the dimension of the latent space is large enough to # perfectly predict `y` from `X` (no information loss) pls = PLSRegression(n_components=5, scale=True).fit(X, y) assert_allclose(pls.coef_, coef) # we therefore should be able to predict `y` from `X` assert_allclose(pls.predict(X), y)
Check that when using `scale=True`, the coefficients are using the std. dev. from both `X` and `y`. Non-regression test for: https://github.com/scikit-learn/scikit-learn/issues/27964
test_pls_regression_scaling_coef
python
scikit-learn/scikit-learn
sklearn/cross_decomposition/tests/test_pls.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cross_decomposition/tests/test_pls.py
BSD-3-Clause
def _split_sparse_columns( arff_data: ArffSparseDataType, include_columns: List ) -> ArffSparseDataType: """Obtains several columns from sparse ARFF representation. Additionally, the column indices are re-labelled, given the columns that are not included. (e.g., when including [1, 2, 3], the columns will be relabelled to [0, 1, 2]). Parameters ---------- arff_data : tuple A tuple of three lists of equal size; first list indicating the value, second the x coordinate and the third the y coordinate. include_columns : list A list of columns to include. Returns ------- arff_data_new : tuple Subset of arff data with only the include columns indicated by the include_columns argument. """ arff_data_new: ArffSparseDataType = (list(), list(), list()) reindexed_columns = { column_idx: array_idx for array_idx, column_idx in enumerate(include_columns) } for val, row_idx, col_idx in zip(arff_data[0], arff_data[1], arff_data[2]): if col_idx in include_columns: arff_data_new[0].append(val) arff_data_new[1].append(row_idx) arff_data_new[2].append(reindexed_columns[col_idx]) return arff_data_new
Obtains several columns from sparse ARFF representation. Additionally, the column indices are re-labelled, given the columns that are not included. (e.g., when including [1, 2, 3], the columns will be relabelled to [0, 1, 2]). Parameters ---------- arff_data : tuple A tuple of three lists of equal size; first list indicating the value, second the x coordinate and the third the y coordinate. include_columns : list A list of columns to include. Returns ------- arff_data_new : tuple Subset of arff data with only the include columns indicated by the include_columns argument.
_split_sparse_columns
python
scikit-learn/scikit-learn
sklearn/datasets/_arff_parser.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/_arff_parser.py
BSD-3-Clause
def _post_process_frame(frame, feature_names, target_names): """Post process a dataframe to select the desired columns in `X` and `y`. Parameters ---------- frame : dataframe The dataframe to split into `X` and `y`. feature_names : list of str The list of feature names to populate `X`. target_names : list of str The list of target names to populate `y`. Returns ------- X : dataframe The dataframe containing the features. y : {series, dataframe} or None The series or dataframe containing the target. """ X = frame[feature_names] if len(target_names) >= 2: y = frame[target_names] elif len(target_names) == 1: y = frame[target_names[0]] else: y = None return X, y
Post process a dataframe to select the desired columns in `X` and `y`. Parameters ---------- frame : dataframe The dataframe to split into `X` and `y`. feature_names : list of str The list of feature names to populate `X`. target_names : list of str The list of target names to populate `y`. Returns ------- X : dataframe The dataframe containing the features. y : {series, dataframe} or None The series or dataframe containing the target.
_post_process_frame
python
scikit-learn/scikit-learn
sklearn/datasets/_arff_parser.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/_arff_parser.py
BSD-3-Clause
def _liac_arff_parser( gzip_file, output_arrays_type, openml_columns_info, feature_names_to_select, target_names_to_select, shape=None, ): """ARFF parser using the LIAC-ARFF library coded purely in Python. This parser is quite slow but consumes a generator. Currently it is needed to parse sparse datasets. For dense datasets, it is recommended to instead use the pandas-based parser, although it does not always handles the dtypes exactly the same. Parameters ---------- gzip_file : GzipFile instance The file compressed to be read. output_arrays_type : {"numpy", "sparse", "pandas"} The type of the arrays that will be returned. The possibilities ara: - `"numpy"`: both `X` and `y` will be NumPy arrays; - `"sparse"`: `X` will be sparse matrix and `y` will be a NumPy array; - `"pandas"`: `X` will be a pandas DataFrame and `y` will be either a pandas Series or DataFrame. columns_info : dict The information provided by OpenML regarding the columns of the ARFF file. feature_names_to_select : list of str A list of the feature names to be selected. target_names_to_select : list of str A list of the target names to be selected. Returns ------- X : {ndarray, sparse matrix, dataframe} The data matrix. y : {ndarray, dataframe, series} The target. frame : dataframe or None A dataframe containing both `X` and `y`. `None` if `output_array_type != "pandas"`. categories : list of str or None The names of the features that are categorical. `None` if `output_array_type == "pandas"`. """ def _io_to_generator(gzip_file): for line in gzip_file: yield line.decode("utf-8") stream = _io_to_generator(gzip_file) # find which type (dense or sparse) ARFF type we will have to deal with return_type = _arff.COO if output_arrays_type == "sparse" else _arff.DENSE_GEN # we should not let LIAC-ARFF to encode the nominal attributes with NumPy # arrays to have only numerical values. encode_nominal = not (output_arrays_type == "pandas") arff_container = _arff.load( stream, return_type=return_type, encode_nominal=encode_nominal ) columns_to_select = feature_names_to_select + target_names_to_select categories = { name: cat for name, cat in arff_container["attributes"] if isinstance(cat, list) and name in columns_to_select } if output_arrays_type == "pandas": pd = check_pandas_support("fetch_openml with as_frame=True") columns_info = OrderedDict(arff_container["attributes"]) columns_names = list(columns_info.keys()) # calculate chunksize first_row = next(arff_container["data"]) first_df = pd.DataFrame([first_row], columns=columns_names, copy=False) row_bytes = first_df.memory_usage(deep=True).sum() chunksize = get_chunk_n_rows(row_bytes) # read arff data with chunks columns_to_keep = [col for col in columns_names if col in columns_to_select] dfs = [first_df[columns_to_keep]] for data in chunk_generator(arff_container["data"], chunksize): dfs.append( pd.DataFrame(data, columns=columns_names, copy=False)[columns_to_keep] ) # dfs[0] contains only one row, which may not have enough data to infer to # column's dtype. Here we use `dfs[1]` to configure the dtype in dfs[0] if len(dfs) >= 2: dfs[0] = dfs[0].astype(dfs[1].dtypes) # liac-arff parser does not depend on NumPy and uses None to represent # missing values. To be consistent with the pandas parser, we replace # None with np.nan. frame = pd.concat(dfs, ignore_index=True) frame = pd_fillna(pd, frame) del dfs, first_df # cast the columns frame dtypes = {} for name in frame.columns: column_dtype = openml_columns_info[name]["data_type"] if column_dtype.lower() == "integer": # Use a pandas extension array instead of np.int64 to be able # to support missing values. dtypes[name] = "Int64" elif column_dtype.lower() == "nominal": dtypes[name] = "category" else: dtypes[name] = frame.dtypes[name] frame = frame.astype(dtypes) X, y = _post_process_frame( frame, feature_names_to_select, target_names_to_select ) else: arff_data = arff_container["data"] feature_indices_to_select = [ int(openml_columns_info[col_name]["index"]) for col_name in feature_names_to_select ] target_indices_to_select = [ int(openml_columns_info[col_name]["index"]) for col_name in target_names_to_select ] if isinstance(arff_data, Generator): if shape is None: raise ValueError( "shape must be provided when arr['data'] is a Generator" ) if shape[0] == -1: count = -1 else: count = shape[0] * shape[1] data = np.fromiter( itertools.chain.from_iterable(arff_data), dtype="float64", count=count, ) data = data.reshape(*shape) X = data[:, feature_indices_to_select] y = data[:, target_indices_to_select] elif isinstance(arff_data, tuple): arff_data_X = _split_sparse_columns(arff_data, feature_indices_to_select) num_obs = max(arff_data[1]) + 1 X_shape = (num_obs, len(feature_indices_to_select)) X = sp.sparse.coo_matrix( (arff_data_X[0], (arff_data_X[1], arff_data_X[2])), shape=X_shape, dtype=np.float64, ) X = X.tocsr() y = _sparse_data_to_array(arff_data, target_indices_to_select) else: # This should never happen raise ValueError( f"Unexpected type for data obtained from arff: {type(arff_data)}" ) is_classification = { col_name in categories for col_name in target_names_to_select } if not is_classification: # No target pass elif all(is_classification): y = np.hstack( [ np.take( np.asarray(categories.pop(col_name), dtype="O"), y[:, i : i + 1].astype(int, copy=False), ) for i, col_name in enumerate(target_names_to_select) ] ) elif any(is_classification): raise ValueError( "Mix of nominal and non-nominal targets is not currently supported" ) # reshape y back to 1-D array, if there is only 1 target column; # back to None if there are not target columns if y.shape[1] == 1: y = y.reshape((-1,)) elif y.shape[1] == 0: y = None if output_arrays_type == "pandas": return X, y, frame, None return X, y, None, categories
ARFF parser using the LIAC-ARFF library coded purely in Python. This parser is quite slow but consumes a generator. Currently it is needed to parse sparse datasets. For dense datasets, it is recommended to instead use the pandas-based parser, although it does not always handles the dtypes exactly the same. Parameters ---------- gzip_file : GzipFile instance The file compressed to be read. output_arrays_type : {"numpy", "sparse", "pandas"} The type of the arrays that will be returned. The possibilities ara: - `"numpy"`: both `X` and `y` will be NumPy arrays; - `"sparse"`: `X` will be sparse matrix and `y` will be a NumPy array; - `"pandas"`: `X` will be a pandas DataFrame and `y` will be either a pandas Series or DataFrame. columns_info : dict The information provided by OpenML regarding the columns of the ARFF file. feature_names_to_select : list of str A list of the feature names to be selected. target_names_to_select : list of str A list of the target names to be selected. Returns ------- X : {ndarray, sparse matrix, dataframe} The data matrix. y : {ndarray, dataframe, series} The target. frame : dataframe or None A dataframe containing both `X` and `y`. `None` if `output_array_type != "pandas"`. categories : list of str or None The names of the features that are categorical. `None` if `output_array_type == "pandas"`.
_liac_arff_parser
python
scikit-learn/scikit-learn
sklearn/datasets/_arff_parser.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/_arff_parser.py
BSD-3-Clause
def _pandas_arff_parser( gzip_file, output_arrays_type, openml_columns_info, feature_names_to_select, target_names_to_select, read_csv_kwargs=None, ): """ARFF parser using `pandas.read_csv`. This parser uses the metadata fetched directly from OpenML and skips the metadata headers of ARFF file itself. The data is loaded as a CSV file. Parameters ---------- gzip_file : GzipFile instance The GZip compressed file with the ARFF formatted payload. output_arrays_type : {"numpy", "sparse", "pandas"} The type of the arrays that will be returned. The possibilities are: - `"numpy"`: both `X` and `y` will be NumPy arrays; - `"sparse"`: `X` will be sparse matrix and `y` will be a NumPy array; - `"pandas"`: `X` will be a pandas DataFrame and `y` will be either a pandas Series or DataFrame. openml_columns_info : dict The information provided by OpenML regarding the columns of the ARFF file. feature_names_to_select : list of str A list of the feature names to be selected to build `X`. target_names_to_select : list of str A list of the target names to be selected to build `y`. read_csv_kwargs : dict, default=None Keyword arguments to pass to `pandas.read_csv`. It allows to overwrite the default options. Returns ------- X : {ndarray, sparse matrix, dataframe} The data matrix. y : {ndarray, dataframe, series} The target. frame : dataframe or None A dataframe containing both `X` and `y`. `None` if `output_array_type != "pandas"`. categories : list of str or None The names of the features that are categorical. `None` if `output_array_type == "pandas"`. """ import pandas as pd # read the file until the data section to skip the ARFF metadata headers for line in gzip_file: if line.decode("utf-8").lower().startswith("@data"): break dtypes = {} for name in openml_columns_info: column_dtype = openml_columns_info[name]["data_type"] if column_dtype.lower() == "integer": # Use Int64 to infer missing values from data # XXX: this line is not covered by our tests. Is this really needed? dtypes[name] = "Int64" elif column_dtype.lower() == "nominal": dtypes[name] = "category" # since we will not pass `names` when reading the ARFF file, we need to translate # `dtypes` from column names to column indices to pass to `pandas.read_csv` dtypes_positional = { col_idx: dtypes[name] for col_idx, name in enumerate(openml_columns_info) if name in dtypes } default_read_csv_kwargs = { "header": None, "index_col": False, # always force pandas to not use the first column as index "na_values": ["?"], # missing values are represented by `?` "keep_default_na": False, # only `?` is a missing value given the ARFF specs "comment": "%", # skip line starting by `%` since they are comments "quotechar": '"', # delimiter to use for quoted strings "skipinitialspace": True, # skip spaces after delimiter to follow ARFF specs "escapechar": "\\", "dtype": dtypes_positional, } read_csv_kwargs = {**default_read_csv_kwargs, **(read_csv_kwargs or {})} frame = pd.read_csv(gzip_file, **read_csv_kwargs) try: # Setting the columns while reading the file will select the N first columns # and not raise a ParserError. Instead, we set the columns after reading the # file and raise a ParserError if the number of columns does not match the # number of columns in the metadata given by OpenML. frame.columns = [name for name in openml_columns_info] except ValueError as exc: raise pd.errors.ParserError( "The number of columns provided by OpenML does not match the number of " "columns inferred by pandas when reading the file." ) from exc columns_to_select = feature_names_to_select + target_names_to_select columns_to_keep = [col for col in frame.columns if col in columns_to_select] frame = frame[columns_to_keep] # `pd.read_csv` automatically handles double quotes for quoting non-numeric # CSV cell values. Contrary to LIAC-ARFF, `pd.read_csv` cannot be configured to # consider either single quotes and double quotes as valid quoting chars at # the same time since this case does not occur in regular (non-ARFF) CSV files. # To mimic the behavior of LIAC-ARFF parser, we manually strip single quotes # on categories as a post-processing steps if needed. # # Note however that we intentionally do not attempt to do this kind of manual # post-processing of (non-categorical) string-typed columns because we cannot # resolve the ambiguity of the case of CSV cell with nesting quoting such as # `"'some string value'"` with pandas. single_quote_pattern = re.compile(r"^'(?P<contents>.*)'$") def strip_single_quotes(input_string): match = re.search(single_quote_pattern, input_string) if match is None: return input_string return match.group("contents") categorical_columns = [ name for name, dtype in frame.dtypes.items() if isinstance(dtype, pd.CategoricalDtype) ] for col in categorical_columns: frame[col] = frame[col].cat.rename_categories(strip_single_quotes) X, y = _post_process_frame(frame, feature_names_to_select, target_names_to_select) if output_arrays_type == "pandas": return X, y, frame, None else: X, y = X.to_numpy(), y.to_numpy() categories = { name: dtype.categories.tolist() for name, dtype in frame.dtypes.items() if isinstance(dtype, pd.CategoricalDtype) } return X, y, None, categories
ARFF parser using `pandas.read_csv`. This parser uses the metadata fetched directly from OpenML and skips the metadata headers of ARFF file itself. The data is loaded as a CSV file. Parameters ---------- gzip_file : GzipFile instance The GZip compressed file with the ARFF formatted payload. output_arrays_type : {"numpy", "sparse", "pandas"} The type of the arrays that will be returned. The possibilities are: - `"numpy"`: both `X` and `y` will be NumPy arrays; - `"sparse"`: `X` will be sparse matrix and `y` will be a NumPy array; - `"pandas"`: `X` will be a pandas DataFrame and `y` will be either a pandas Series or DataFrame. openml_columns_info : dict The information provided by OpenML regarding the columns of the ARFF file. feature_names_to_select : list of str A list of the feature names to be selected to build `X`. target_names_to_select : list of str A list of the target names to be selected to build `y`. read_csv_kwargs : dict, default=None Keyword arguments to pass to `pandas.read_csv`. It allows to overwrite the default options. Returns ------- X : {ndarray, sparse matrix, dataframe} The data matrix. y : {ndarray, dataframe, series} The target. frame : dataframe or None A dataframe containing both `X` and `y`. `None` if `output_array_type != "pandas"`. categories : list of str or None The names of the features that are categorical. `None` if `output_array_type == "pandas"`.
_pandas_arff_parser
python
scikit-learn/scikit-learn
sklearn/datasets/_arff_parser.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/_arff_parser.py
BSD-3-Clause
def load_arff_from_gzip_file( gzip_file, parser, output_type, openml_columns_info, feature_names_to_select, target_names_to_select, shape=None, read_csv_kwargs=None, ): """Load a compressed ARFF file using a given parser. Parameters ---------- gzip_file : GzipFile instance The file compressed to be read. parser : {"pandas", "liac-arff"} The parser used to parse the ARFF file. "pandas" is recommended but only supports loading dense datasets. output_type : {"numpy", "sparse", "pandas"} The type of the arrays that will be returned. The possibilities ara: - `"numpy"`: both `X` and `y` will be NumPy arrays; - `"sparse"`: `X` will be sparse matrix and `y` will be a NumPy array; - `"pandas"`: `X` will be a pandas DataFrame and `y` will be either a pandas Series or DataFrame. openml_columns_info : dict The information provided by OpenML regarding the columns of the ARFF file. feature_names_to_select : list of str A list of the feature names to be selected. target_names_to_select : list of str A list of the target names to be selected. read_csv_kwargs : dict, default=None Keyword arguments to pass to `pandas.read_csv`. It allows to overwrite the default options. Returns ------- X : {ndarray, sparse matrix, dataframe} The data matrix. y : {ndarray, dataframe, series} The target. frame : dataframe or None A dataframe containing both `X` and `y`. `None` if `output_array_type != "pandas"`. categories : list of str or None The names of the features that are categorical. `None` if `output_array_type == "pandas"`. """ if parser == "liac-arff": return _liac_arff_parser( gzip_file, output_type, openml_columns_info, feature_names_to_select, target_names_to_select, shape, ) elif parser == "pandas": return _pandas_arff_parser( gzip_file, output_type, openml_columns_info, feature_names_to_select, target_names_to_select, read_csv_kwargs, ) else: raise ValueError( f"Unknown parser: '{parser}'. Should be 'liac-arff' or 'pandas'." )
Load a compressed ARFF file using a given parser. Parameters ---------- gzip_file : GzipFile instance The file compressed to be read. parser : {"pandas", "liac-arff"} The parser used to parse the ARFF file. "pandas" is recommended but only supports loading dense datasets. output_type : {"numpy", "sparse", "pandas"} The type of the arrays that will be returned. The possibilities ara: - `"numpy"`: both `X` and `y` will be NumPy arrays; - `"sparse"`: `X` will be sparse matrix and `y` will be a NumPy array; - `"pandas"`: `X` will be a pandas DataFrame and `y` will be either a pandas Series or DataFrame. openml_columns_info : dict The information provided by OpenML regarding the columns of the ARFF file. feature_names_to_select : list of str A list of the feature names to be selected. target_names_to_select : list of str A list of the target names to be selected. read_csv_kwargs : dict, default=None Keyword arguments to pass to `pandas.read_csv`. It allows to overwrite the default options. Returns ------- X : {ndarray, sparse matrix, dataframe} The data matrix. y : {ndarray, dataframe, series} The target. frame : dataframe or None A dataframe containing both `X` and `y`. `None` if `output_array_type != "pandas"`. categories : list of str or None The names of the features that are categorical. `None` if `output_array_type == "pandas"`.
load_arff_from_gzip_file
python
scikit-learn/scikit-learn
sklearn/datasets/_arff_parser.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/_arff_parser.py
BSD-3-Clause
def get_data_home(data_home=None) -> str: """Return the path of the scikit-learn data directory. This folder is used by some large dataset loaders to avoid downloading the data several times. By default the data directory is set to a folder named 'scikit_learn_data' in the user home folder. Alternatively, it can be set by the 'SCIKIT_LEARN_DATA' environment variable or programmatically by giving an explicit folder path. The '~' symbol is expanded to the user home folder. If the folder does not already exist, it is automatically created. Parameters ---------- data_home : str or path-like, default=None The path to scikit-learn data directory. If `None`, the default path is `~/scikit_learn_data`. Returns ------- data_home: str The path to scikit-learn data directory. Examples -------- >>> import os >>> from sklearn.datasets import get_data_home >>> data_home_path = get_data_home() >>> os.path.exists(data_home_path) True """ if data_home is None: data_home = environ.get("SCIKIT_LEARN_DATA", join("~", "scikit_learn_data")) data_home = expanduser(data_home) makedirs(data_home, exist_ok=True) return data_home
Return the path of the scikit-learn data directory. This folder is used by some large dataset loaders to avoid downloading the data several times. By default the data directory is set to a folder named 'scikit_learn_data' in the user home folder. Alternatively, it can be set by the 'SCIKIT_LEARN_DATA' environment variable or programmatically by giving an explicit folder path. The '~' symbol is expanded to the user home folder. If the folder does not already exist, it is automatically created. Parameters ---------- data_home : str or path-like, default=None The path to scikit-learn data directory. If `None`, the default path is `~/scikit_learn_data`. Returns ------- data_home: str The path to scikit-learn data directory. Examples -------- >>> import os >>> from sklearn.datasets import get_data_home >>> data_home_path = get_data_home() >>> os.path.exists(data_home_path) True
get_data_home
python
scikit-learn/scikit-learn
sklearn/datasets/_base.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/_base.py
BSD-3-Clause
def load_files( container_path, *, description=None, categories=None, load_content=True, shuffle=True, encoding=None, decode_error="strict", random_state=0, allowed_extensions=None, ): """Load text files with categories as subfolder names. Individual samples are assumed to be files stored a two levels folder structure such as the following: .. code-block:: text container_folder/ category_1_folder/ file_1.txt file_2.txt ... file_42.txt category_2_folder/ file_43.txt file_44.txt ... The folder names are used as supervised signal label names. The individual file names are not important. This function does not try to extract features into a numpy array or scipy sparse matrix. In addition, if load_content is false it does not try to load the files in memory. To use text files in a scikit-learn classification or clustering algorithm, you will need to use the :mod:`~sklearn.feature_extraction.text` module to build a feature extraction transformer that suits your problem. If you set load_content=True, you should also specify the encoding of the text using the 'encoding' parameter. For many modern text files, 'utf-8' will be the correct encoding. If you leave encoding equal to None, then the content will be made of bytes instead of Unicode, and you will not be able to use most functions in :mod:`~sklearn.feature_extraction.text`. Similar feature extractors should be built for other kind of unstructured data input such as images, audio, video, ... If you want files with a specific file extension (e.g. `.txt`) then you can pass a list of those file extensions to `allowed_extensions`. Read more in the :ref:`User Guide <datasets>`. Parameters ---------- container_path : str Path to the main folder holding one subfolder per category. description : str, default=None A paragraph describing the characteristic of the dataset: its source, reference, etc. categories : list of str, default=None If None (default), load all the categories. If not None, list of category names to load (other categories ignored). load_content : bool, default=True Whether to load or not the content of the different files. If true a 'data' attribute containing the text information is present in the data structure returned. If not, a filenames attribute gives the path to the files. shuffle : bool, default=True Whether or not to shuffle the data: might be important for models that make the assumption that the samples are independent and identically distributed (i.i.d.), such as stochastic gradient descent. encoding : str, default=None If None, do not try to decode the content of the files (e.g. for images or other non-text content). If not None, encoding to use to decode text files to Unicode if load_content is True. decode_error : {'strict', 'ignore', 'replace'}, default='strict' Instruction on what to do if a byte sequence is given to analyze that contains characters not of the given `encoding`. Passed as keyword argument 'errors' to bytes.decode. random_state : int, RandomState instance or None, default=0 Determines random number generation for dataset shuffling. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. allowed_extensions : list of str, default=None List of desired file extensions to filter the files to be loaded. Returns ------- data : :class:`~sklearn.utils.Bunch` Dictionary-like object, with the following attributes. data : list of str Only present when `load_content=True`. The raw text data to learn. target : ndarray The target labels (integer index). target_names : list The names of target classes. DESCR : str The full description of the dataset. filenames: ndarray The filenames holding the dataset. Examples -------- >>> from sklearn.datasets import load_files >>> container_path = "./" >>> load_files(container_path) # doctest: +SKIP """ target = [] target_names = [] filenames = [] folders = [ f for f in sorted(listdir(container_path)) if isdir(join(container_path, f)) ] if categories is not None: folders = [f for f in folders if f in categories] if allowed_extensions is not None: allowed_extensions = frozenset(allowed_extensions) for label, folder in enumerate(folders): target_names.append(folder) folder_path = join(container_path, folder) files = sorted(listdir(folder_path)) if allowed_extensions is not None: documents = [ join(folder_path, file) for file in files if os.path.splitext(file)[1] in allowed_extensions ] else: documents = [join(folder_path, file) for file in files] target.extend(len(documents) * [label]) filenames.extend(documents) # convert to array for fancy indexing filenames = np.array(filenames) target = np.array(target) if shuffle: random_state = check_random_state(random_state) indices = np.arange(filenames.shape[0]) random_state.shuffle(indices) filenames = filenames[indices] target = target[indices] if load_content: data = [] for filename in filenames: data.append(Path(filename).read_bytes()) if encoding is not None: data = [d.decode(encoding, decode_error) for d in data] return Bunch( data=data, filenames=filenames, target_names=target_names, target=target, DESCR=description, ) return Bunch( filenames=filenames, target_names=target_names, target=target, DESCR=description )
Load text files with categories as subfolder names. Individual samples are assumed to be files stored a two levels folder structure such as the following: .. code-block:: text container_folder/ category_1_folder/ file_1.txt file_2.txt ... file_42.txt category_2_folder/ file_43.txt file_44.txt ... The folder names are used as supervised signal label names. The individual file names are not important. This function does not try to extract features into a numpy array or scipy sparse matrix. In addition, if load_content is false it does not try to load the files in memory. To use text files in a scikit-learn classification or clustering algorithm, you will need to use the :mod:`~sklearn.feature_extraction.text` module to build a feature extraction transformer that suits your problem. If you set load_content=True, you should also specify the encoding of the text using the 'encoding' parameter. For many modern text files, 'utf-8' will be the correct encoding. If you leave encoding equal to None, then the content will be made of bytes instead of Unicode, and you will not be able to use most functions in :mod:`~sklearn.feature_extraction.text`. Similar feature extractors should be built for other kind of unstructured data input such as images, audio, video, ... If you want files with a specific file extension (e.g. `.txt`) then you can pass a list of those file extensions to `allowed_extensions`. Read more in the :ref:`User Guide <datasets>`. Parameters ---------- container_path : str Path to the main folder holding one subfolder per category. description : str, default=None A paragraph describing the characteristic of the dataset: its source, reference, etc. categories : list of str, default=None If None (default), load all the categories. If not None, list of category names to load (other categories ignored). load_content : bool, default=True Whether to load or not the content of the different files. If true a 'data' attribute containing the text information is present in the data structure returned. If not, a filenames attribute gives the path to the files. shuffle : bool, default=True Whether or not to shuffle the data: might be important for models that make the assumption that the samples are independent and identically distributed (i.i.d.), such as stochastic gradient descent. encoding : str, default=None If None, do not try to decode the content of the files (e.g. for images or other non-text content). If not None, encoding to use to decode text files to Unicode if load_content is True. decode_error : {'strict', 'ignore', 'replace'}, default='strict' Instruction on what to do if a byte sequence is given to analyze that contains characters not of the given `encoding`. Passed as keyword argument 'errors' to bytes.decode. random_state : int, RandomState instance or None, default=0 Determines random number generation for dataset shuffling. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. allowed_extensions : list of str, default=None List of desired file extensions to filter the files to be loaded. Returns ------- data : :class:`~sklearn.utils.Bunch` Dictionary-like object, with the following attributes. data : list of str Only present when `load_content=True`. The raw text data to learn. target : ndarray The target labels (integer index). target_names : list The names of target classes. DESCR : str The full description of the dataset. filenames: ndarray The filenames holding the dataset. Examples -------- >>> from sklearn.datasets import load_files >>> container_path = "./" >>> load_files(container_path) # doctest: +SKIP
load_files
python
scikit-learn/scikit-learn
sklearn/datasets/_base.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/_base.py
BSD-3-Clause
def load_csv_data( data_file_name, *, data_module=DATA_MODULE, descr_file_name=None, descr_module=DESCR_MODULE, encoding="utf-8", ): """Loads `data_file_name` from `data_module with `importlib.resources`. Parameters ---------- data_file_name : str Name of csv file to be loaded from `data_module/data_file_name`. For example `'wine_data.csv'`. data_module : str or module, default='sklearn.datasets.data' Module where data lives. The default is `'sklearn.datasets.data'`. descr_file_name : str, default=None Name of rst file to be loaded from `descr_module/descr_file_name`. For example `'wine_data.rst'`. See also :func:`load_descr`. If not None, also returns the corresponding description of the dataset. descr_module : str or module, default='sklearn.datasets.descr' Module where `descr_file_name` lives. See also :func:`load_descr`. The default is `'sklearn.datasets.descr'`. Returns ------- data : ndarray of shape (n_samples, n_features) A 2D array with each row representing one sample and each column representing the features of a given sample. target : ndarry of shape (n_samples,) A 1D array holding target variables for all the samples in `data`. For example target[0] is the target variable for data[0]. target_names : ndarry of shape (n_samples,) A 1D array containing the names of the classifications. For example target_names[0] is the name of the target[0] class. descr : str, optional Description of the dataset (the content of `descr_file_name`). Only returned if `descr_file_name` is not None. encoding : str, optional Text encoding of the CSV file. .. versionadded:: 1.4 """ data_path = resources.files(data_module) / data_file_name with data_path.open("r", encoding="utf-8") as csv_file: data_file = csv.reader(csv_file) temp = next(data_file) n_samples = int(temp[0]) n_features = int(temp[1]) target_names = np.array(temp[2:]) data = np.empty((n_samples, n_features)) target = np.empty((n_samples,), dtype=int) for i, ir in enumerate(data_file): data[i] = np.asarray(ir[:-1], dtype=np.float64) target[i] = np.asarray(ir[-1], dtype=int) if descr_file_name is None: return data, target, target_names else: assert descr_module is not None descr = load_descr(descr_module=descr_module, descr_file_name=descr_file_name) return data, target, target_names, descr
Loads `data_file_name` from `data_module with `importlib.resources`. Parameters ---------- data_file_name : str Name of csv file to be loaded from `data_module/data_file_name`. For example `'wine_data.csv'`. data_module : str or module, default='sklearn.datasets.data' Module where data lives. The default is `'sklearn.datasets.data'`. descr_file_name : str, default=None Name of rst file to be loaded from `descr_module/descr_file_name`. For example `'wine_data.rst'`. See also :func:`load_descr`. If not None, also returns the corresponding description of the dataset. descr_module : str or module, default='sklearn.datasets.descr' Module where `descr_file_name` lives. See also :func:`load_descr`. The default is `'sklearn.datasets.descr'`. Returns ------- data : ndarray of shape (n_samples, n_features) A 2D array with each row representing one sample and each column representing the features of a given sample. target : ndarry of shape (n_samples,) A 1D array holding target variables for all the samples in `data`. For example target[0] is the target variable for data[0]. target_names : ndarry of shape (n_samples,) A 1D array containing the names of the classifications. For example target_names[0] is the name of the target[0] class. descr : str, optional Description of the dataset (the content of `descr_file_name`). Only returned if `descr_file_name` is not None. encoding : str, optional Text encoding of the CSV file. .. versionadded:: 1.4
load_csv_data
python
scikit-learn/scikit-learn
sklearn/datasets/_base.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/_base.py
BSD-3-Clause
def load_wine(*, return_X_y=False, as_frame=False): """Load and return the wine dataset (classification). .. versionadded:: 0.18 The wine dataset is a classic and very easy multi-class classification dataset. ================= ============== Classes 3 Samples per class [59,71,48] Samples total 178 Dimensionality 13 Features real, positive ================= ============== The copy of UCI ML Wine Data Set dataset is downloaded and modified to fit standard format from: https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data Read more in the :ref:`User Guide <wine_dataset>`. Parameters ---------- return_X_y : bool, default=False If True, returns ``(data, target)`` instead of a Bunch object. See below for more information about the `data` and `target` object. as_frame : bool, default=False If True, the data is a pandas DataFrame including columns with appropriate dtypes (numeric). The target is a pandas DataFrame or Series depending on the number of target columns. If `return_X_y` is True, then (`data`, `target`) will be pandas DataFrames or Series as described below. .. versionadded:: 0.23 Returns ------- data : :class:`~sklearn.utils.Bunch` Dictionary-like object, with the following attributes. data : {ndarray, dataframe} of shape (178, 13) The data matrix. If `as_frame=True`, `data` will be a pandas DataFrame. target: {ndarray, Series} of shape (178,) The classification target. If `as_frame=True`, `target` will be a pandas Series. feature_names: list The names of the dataset columns. target_names: list The names of target classes. frame: DataFrame of shape (178, 14) Only present when `as_frame=True`. DataFrame with `data` and `target`. .. versionadded:: 0.23 DESCR: str The full description of the dataset. (data, target) : tuple if ``return_X_y`` is True A tuple of two ndarrays by default. The first contains a 2D array of shape (178, 13) with each row representing one sample and each column representing the features. The second array of shape (178,) contains the target samples. Examples -------- Let's say you are interested in the samples 10, 80, and 140, and want to know their class name. >>> from sklearn.datasets import load_wine >>> data = load_wine() >>> data.target[[10, 80, 140]] array([0, 1, 2]) >>> list(data.target_names) [np.str_('class_0'), np.str_('class_1'), np.str_('class_2')] """ data, target, target_names, fdescr = load_csv_data( data_file_name="wine_data.csv", descr_file_name="wine_data.rst" ) feature_names = [ "alcohol", "malic_acid", "ash", "alcalinity_of_ash", "magnesium", "total_phenols", "flavanoids", "nonflavanoid_phenols", "proanthocyanins", "color_intensity", "hue", "od280/od315_of_diluted_wines", "proline", ] frame = None target_columns = [ "target", ] if as_frame: frame, data, target = _convert_data_dataframe( "load_wine", data, target, feature_names, target_columns ) if return_X_y: return data, target return Bunch( data=data, target=target, frame=frame, target_names=target_names, DESCR=fdescr, feature_names=feature_names, )
Load and return the wine dataset (classification). .. versionadded:: 0.18 The wine dataset is a classic and very easy multi-class classification dataset. ================= ============== Classes 3 Samples per class [59,71,48] Samples total 178 Dimensionality 13 Features real, positive ================= ============== The copy of UCI ML Wine Data Set dataset is downloaded and modified to fit standard format from: https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data Read more in the :ref:`User Guide <wine_dataset>`. Parameters ---------- return_X_y : bool, default=False If True, returns ``(data, target)`` instead of a Bunch object. See below for more information about the `data` and `target` object. as_frame : bool, default=False If True, the data is a pandas DataFrame including columns with appropriate dtypes (numeric). The target is a pandas DataFrame or Series depending on the number of target columns. If `return_X_y` is True, then (`data`, `target`) will be pandas DataFrames or Series as described below. .. versionadded:: 0.23 Returns ------- data : :class:`~sklearn.utils.Bunch` Dictionary-like object, with the following attributes. data : {ndarray, dataframe} of shape (178, 13) The data matrix. If `as_frame=True`, `data` will be a pandas DataFrame. target: {ndarray, Series} of shape (178,) The classification target. If `as_frame=True`, `target` will be a pandas Series. feature_names: list The names of the dataset columns. target_names: list The names of target classes. frame: DataFrame of shape (178, 14) Only present when `as_frame=True`. DataFrame with `data` and `target`. .. versionadded:: 0.23 DESCR: str The full description of the dataset. (data, target) : tuple if ``return_X_y`` is True A tuple of two ndarrays by default. The first contains a 2D array of shape (178, 13) with each row representing one sample and each column representing the features. The second array of shape (178,) contains the target samples. Examples -------- Let's say you are interested in the samples 10, 80, and 140, and want to know their class name. >>> from sklearn.datasets import load_wine >>> data = load_wine() >>> data.target[[10, 80, 140]] array([0, 1, 2]) >>> list(data.target_names) [np.str_('class_0'), np.str_('class_1'), np.str_('class_2')]
load_wine
python
scikit-learn/scikit-learn
sklearn/datasets/_base.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/_base.py
BSD-3-Clause
def load_iris(*, return_X_y=False, as_frame=False): """Load and return the iris dataset (classification). The iris dataset is a classic and very easy multi-class classification dataset. ================= ============== Classes 3 Samples per class 50 Samples total 150 Dimensionality 4 Features real, positive ================= ============== Read more in the :ref:`User Guide <iris_dataset>`. .. versionchanged:: 0.20 Fixed two wrong data points according to Fisher's paper. The new version is the same as in R, but not as in the UCI Machine Learning Repository. Parameters ---------- return_X_y : bool, default=False If True, returns ``(data, target)`` instead of a Bunch object. See below for more information about the `data` and `target` object. .. versionadded:: 0.18 as_frame : bool, default=False If True, the data is a pandas DataFrame including columns with appropriate dtypes (numeric). The target is a pandas DataFrame or Series depending on the number of target columns. If `return_X_y` is True, then (`data`, `target`) will be pandas DataFrames or Series as described below. .. versionadded:: 0.23 Returns ------- data : :class:`~sklearn.utils.Bunch` Dictionary-like object, with the following attributes. data : {ndarray, dataframe} of shape (150, 4) The data matrix. If `as_frame=True`, `data` will be a pandas DataFrame. target: {ndarray, Series} of shape (150,) The classification target. If `as_frame=True`, `target` will be a pandas Series. feature_names: list The names of the dataset columns. target_names: ndarray of shape (3, ) The names of target classes. frame: DataFrame of shape (150, 5) Only present when `as_frame=True`. DataFrame with `data` and `target`. .. versionadded:: 0.23 DESCR: str The full description of the dataset. filename: str The path to the location of the data. .. versionadded:: 0.20 (data, target) : tuple if ``return_X_y`` is True A tuple of two ndarray. The first containing a 2D array of shape (n_samples, n_features) with each row representing one sample and each column representing the features. The second ndarray of shape (n_samples,) containing the target samples. .. versionadded:: 0.18 Examples -------- Let's say you are interested in the samples 10, 25, and 50, and want to know their class name. >>> from sklearn.datasets import load_iris >>> data = load_iris() >>> data.target[[10, 25, 50]] array([0, 0, 1]) >>> list(data.target_names) [np.str_('setosa'), np.str_('versicolor'), np.str_('virginica')] See :ref:`sphx_glr_auto_examples_decomposition_plot_pca_iris.py` for a more detailed example of how to work with the iris dataset. """ data_file_name = "iris.csv" data, target, target_names, fdescr = load_csv_data( data_file_name=data_file_name, descr_file_name="iris.rst" ) feature_names = [ "sepal length (cm)", "sepal width (cm)", "petal length (cm)", "petal width (cm)", ] frame = None target_columns = [ "target", ] if as_frame: frame, data, target = _convert_data_dataframe( "load_iris", data, target, feature_names, target_columns ) if return_X_y: return data, target return Bunch( data=data, target=target, frame=frame, target_names=target_names, DESCR=fdescr, feature_names=feature_names, filename=data_file_name, data_module=DATA_MODULE, )
Load and return the iris dataset (classification). The iris dataset is a classic and very easy multi-class classification dataset. ================= ============== Classes 3 Samples per class 50 Samples total 150 Dimensionality 4 Features real, positive ================= ============== Read more in the :ref:`User Guide <iris_dataset>`. .. versionchanged:: 0.20 Fixed two wrong data points according to Fisher's paper. The new version is the same as in R, but not as in the UCI Machine Learning Repository. Parameters ---------- return_X_y : bool, default=False If True, returns ``(data, target)`` instead of a Bunch object. See below for more information about the `data` and `target` object. .. versionadded:: 0.18 as_frame : bool, default=False If True, the data is a pandas DataFrame including columns with appropriate dtypes (numeric). The target is a pandas DataFrame or Series depending on the number of target columns. If `return_X_y` is True, then (`data`, `target`) will be pandas DataFrames or Series as described below. .. versionadded:: 0.23 Returns ------- data : :class:`~sklearn.utils.Bunch` Dictionary-like object, with the following attributes. data : {ndarray, dataframe} of shape (150, 4) The data matrix. If `as_frame=True`, `data` will be a pandas DataFrame. target: {ndarray, Series} of shape (150,) The classification target. If `as_frame=True`, `target` will be a pandas Series. feature_names: list The names of the dataset columns. target_names: ndarray of shape (3, ) The names of target classes. frame: DataFrame of shape (150, 5) Only present when `as_frame=True`. DataFrame with `data` and `target`. .. versionadded:: 0.23 DESCR: str The full description of the dataset. filename: str The path to the location of the data. .. versionadded:: 0.20 (data, target) : tuple if ``return_X_y`` is True A tuple of two ndarray. The first containing a 2D array of shape (n_samples, n_features) with each row representing one sample and each column representing the features. The second ndarray of shape (n_samples,) containing the target samples. .. versionadded:: 0.18 Examples -------- Let's say you are interested in the samples 10, 25, and 50, and want to know their class name. >>> from sklearn.datasets import load_iris >>> data = load_iris() >>> data.target[[10, 25, 50]] array([0, 0, 1]) >>> list(data.target_names) [np.str_('setosa'), np.str_('versicolor'), np.str_('virginica')] See :ref:`sphx_glr_auto_examples_decomposition_plot_pca_iris.py` for a more detailed example of how to work with the iris dataset.
load_iris
python
scikit-learn/scikit-learn
sklearn/datasets/_base.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/_base.py
BSD-3-Clause
def load_breast_cancer(*, return_X_y=False, as_frame=False): """Load and return the breast cancer Wisconsin dataset (classification). The breast cancer dataset is a classic and very easy binary classification dataset. ================= ============== Classes 2 Samples per class 212(M),357(B) Samples total 569 Dimensionality 30 Features real, positive ================= ============== The copy of UCI ML Breast Cancer Wisconsin (Diagnostic) dataset is downloaded from: https://archive.ics.uci.edu/dataset/17/breast+cancer+wisconsin+diagnostic Read more in the :ref:`User Guide <breast_cancer_dataset>`. Parameters ---------- return_X_y : bool, default=False If True, returns ``(data, target)`` instead of a Bunch object. See below for more information about the `data` and `target` object. .. versionadded:: 0.18 as_frame : bool, default=False If True, the data is a pandas DataFrame including columns with appropriate dtypes (numeric). The target is a pandas DataFrame or Series depending on the number of target columns. If `return_X_y` is True, then (`data`, `target`) will be pandas DataFrames or Series as described below. .. versionadded:: 0.23 Returns ------- data : :class:`~sklearn.utils.Bunch` Dictionary-like object, with the following attributes. data : {ndarray, dataframe} of shape (569, 30) The data matrix. If `as_frame=True`, `data` will be a pandas DataFrame. target : {ndarray, Series} of shape (569,) The classification target. If `as_frame=True`, `target` will be a pandas Series. feature_names : ndarray of shape (30,) The names of the dataset columns. target_names : ndarray of shape (2,) The names of target classes. frame : DataFrame of shape (569, 31) Only present when `as_frame=True`. DataFrame with `data` and `target`. .. versionadded:: 0.23 DESCR : str The full description of the dataset. filename : str The path to the location of the data. .. versionadded:: 0.20 (data, target) : tuple if ``return_X_y`` is True A tuple of two ndarrays by default. The first contains a 2D ndarray of shape (569, 30) with each row representing one sample and each column representing the features. The second ndarray of shape (569,) contains the target samples. If `as_frame=True`, both arrays are pandas objects, i.e. `X` a dataframe and `y` a series. .. versionadded:: 0.18 Examples -------- Let's say you are interested in the samples 10, 50, and 85, and want to know their class name. >>> from sklearn.datasets import load_breast_cancer >>> data = load_breast_cancer() >>> data.target[[10, 50, 85]] array([0, 1, 0]) >>> list(data.target_names) [np.str_('malignant'), np.str_('benign')] """ data_file_name = "breast_cancer.csv" data, target, target_names, fdescr = load_csv_data( data_file_name=data_file_name, descr_file_name="breast_cancer.rst" ) feature_names = np.array( [ "mean radius", "mean texture", "mean perimeter", "mean area", "mean smoothness", "mean compactness", "mean concavity", "mean concave points", "mean symmetry", "mean fractal dimension", "radius error", "texture error", "perimeter error", "area error", "smoothness error", "compactness error", "concavity error", "concave points error", "symmetry error", "fractal dimension error", "worst radius", "worst texture", "worst perimeter", "worst area", "worst smoothness", "worst compactness", "worst concavity", "worst concave points", "worst symmetry", "worst fractal dimension", ] ) frame = None target_columns = [ "target", ] if as_frame: frame, data, target = _convert_data_dataframe( "load_breast_cancer", data, target, feature_names, target_columns ) if return_X_y: return data, target return Bunch( data=data, target=target, frame=frame, target_names=target_names, DESCR=fdescr, feature_names=feature_names, filename=data_file_name, data_module=DATA_MODULE, )
Load and return the breast cancer Wisconsin dataset (classification). The breast cancer dataset is a classic and very easy binary classification dataset. ================= ============== Classes 2 Samples per class 212(M),357(B) Samples total 569 Dimensionality 30 Features real, positive ================= ============== The copy of UCI ML Breast Cancer Wisconsin (Diagnostic) dataset is downloaded from: https://archive.ics.uci.edu/dataset/17/breast+cancer+wisconsin+diagnostic Read more in the :ref:`User Guide <breast_cancer_dataset>`. Parameters ---------- return_X_y : bool, default=False If True, returns ``(data, target)`` instead of a Bunch object. See below for more information about the `data` and `target` object. .. versionadded:: 0.18 as_frame : bool, default=False If True, the data is a pandas DataFrame including columns with appropriate dtypes (numeric). The target is a pandas DataFrame or Series depending on the number of target columns. If `return_X_y` is True, then (`data`, `target`) will be pandas DataFrames or Series as described below. .. versionadded:: 0.23 Returns ------- data : :class:`~sklearn.utils.Bunch` Dictionary-like object, with the following attributes. data : {ndarray, dataframe} of shape (569, 30) The data matrix. If `as_frame=True`, `data` will be a pandas DataFrame. target : {ndarray, Series} of shape (569,) The classification target. If `as_frame=True`, `target` will be a pandas Series. feature_names : ndarray of shape (30,) The names of the dataset columns. target_names : ndarray of shape (2,) The names of target classes. frame : DataFrame of shape (569, 31) Only present when `as_frame=True`. DataFrame with `data` and `target`. .. versionadded:: 0.23 DESCR : str The full description of the dataset. filename : str The path to the location of the data. .. versionadded:: 0.20 (data, target) : tuple if ``return_X_y`` is True A tuple of two ndarrays by default. The first contains a 2D ndarray of shape (569, 30) with each row representing one sample and each column representing the features. The second ndarray of shape (569,) contains the target samples. If `as_frame=True`, both arrays are pandas objects, i.e. `X` a dataframe and `y` a series. .. versionadded:: 0.18 Examples -------- Let's say you are interested in the samples 10, 50, and 85, and want to know their class name. >>> from sklearn.datasets import load_breast_cancer >>> data = load_breast_cancer() >>> data.target[[10, 50, 85]] array([0, 1, 0]) >>> list(data.target_names) [np.str_('malignant'), np.str_('benign')]
load_breast_cancer
python
scikit-learn/scikit-learn
sklearn/datasets/_base.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/_base.py
BSD-3-Clause
def load_digits(*, n_class=10, return_X_y=False, as_frame=False): """Load and return the digits dataset (classification). Each datapoint is a 8x8 image of a digit. ================= ============== Classes 10 Samples per class ~180 Samples total 1797 Dimensionality 64 Features integers 0-16 ================= ============== This is a copy of the test set of the UCI ML hand-written digits datasets https://archive.ics.uci.edu/ml/datasets/Optical+Recognition+of+Handwritten+Digits Read more in the :ref:`User Guide <digits_dataset>`. Parameters ---------- n_class : int, default=10 The number of classes to return. Between 0 and 10. return_X_y : bool, default=False If True, returns ``(data, target)`` instead of a Bunch object. See below for more information about the `data` and `target` object. .. versionadded:: 0.18 as_frame : bool, default=False If True, the data is a pandas DataFrame including columns with appropriate dtypes (numeric). The target is a pandas DataFrame or Series depending on the number of target columns. If `return_X_y` is True, then (`data`, `target`) will be pandas DataFrames or Series as described below. .. versionadded:: 0.23 Returns ------- data : :class:`~sklearn.utils.Bunch` Dictionary-like object, with the following attributes. data : {ndarray, dataframe} of shape (1797, 64) The flattened data matrix. If `as_frame=True`, `data` will be a pandas DataFrame. target: {ndarray, Series} of shape (1797,) The classification target. If `as_frame=True`, `target` will be a pandas Series. feature_names: list The names of the dataset columns. target_names: list The names of target classes. .. versionadded:: 0.20 frame: DataFrame of shape (1797, 65) Only present when `as_frame=True`. DataFrame with `data` and `target`. .. versionadded:: 0.23 images: {ndarray} of shape (1797, 8, 8) The raw image data. DESCR: str The full description of the dataset. (data, target) : tuple if ``return_X_y`` is True A tuple of two ndarrays by default. The first contains a 2D ndarray of shape (1797, 64) with each row representing one sample and each column representing the features. The second ndarray of shape (1797) contains the target samples. If `as_frame=True`, both arrays are pandas objects, i.e. `X` a dataframe and `y` a series. .. versionadded:: 0.18 Examples -------- To load the data and visualize the images:: >>> from sklearn.datasets import load_digits >>> digits = load_digits() >>> print(digits.data.shape) (1797, 64) >>> import matplotlib.pyplot as plt >>> plt.matshow(digits.images[0], cmap="gray") <...> >>> plt.show() """ data, fdescr = load_gzip_compressed_csv_data( data_file_name="digits.csv.gz", descr_file_name="digits.rst", delimiter="," ) target = data[:, -1].astype(int, copy=False) flat_data = data[:, :-1] images = flat_data.view() images.shape = (-1, 8, 8) if n_class < 10: idx = target < n_class flat_data, target = flat_data[idx], target[idx] images = images[idx] feature_names = [ "pixel_{}_{}".format(row_idx, col_idx) for row_idx in range(8) for col_idx in range(8) ] frame = None target_columns = [ "target", ] if as_frame: frame, flat_data, target = _convert_data_dataframe( "load_digits", flat_data, target, feature_names, target_columns ) if return_X_y: return flat_data, target return Bunch( data=flat_data, target=target, frame=frame, feature_names=feature_names, target_names=np.arange(10), images=images, DESCR=fdescr, )
Load and return the digits dataset (classification). Each datapoint is a 8x8 image of a digit. ================= ============== Classes 10 Samples per class ~180 Samples total 1797 Dimensionality 64 Features integers 0-16 ================= ============== This is a copy of the test set of the UCI ML hand-written digits datasets https://archive.ics.uci.edu/ml/datasets/Optical+Recognition+of+Handwritten+Digits Read more in the :ref:`User Guide <digits_dataset>`. Parameters ---------- n_class : int, default=10 The number of classes to return. Between 0 and 10. return_X_y : bool, default=False If True, returns ``(data, target)`` instead of a Bunch object. See below for more information about the `data` and `target` object. .. versionadded:: 0.18 as_frame : bool, default=False If True, the data is a pandas DataFrame including columns with appropriate dtypes (numeric). The target is a pandas DataFrame or Series depending on the number of target columns. If `return_X_y` is True, then (`data`, `target`) will be pandas DataFrames or Series as described below. .. versionadded:: 0.23 Returns ------- data : :class:`~sklearn.utils.Bunch` Dictionary-like object, with the following attributes. data : {ndarray, dataframe} of shape (1797, 64) The flattened data matrix. If `as_frame=True`, `data` will be a pandas DataFrame. target: {ndarray, Series} of shape (1797,) The classification target. If `as_frame=True`, `target` will be a pandas Series. feature_names: list The names of the dataset columns. target_names: list The names of target classes. .. versionadded:: 0.20 frame: DataFrame of shape (1797, 65) Only present when `as_frame=True`. DataFrame with `data` and `target`. .. versionadded:: 0.23 images: {ndarray} of shape (1797, 8, 8) The raw image data. DESCR: str The full description of the dataset. (data, target) : tuple if ``return_X_y`` is True A tuple of two ndarrays by default. The first contains a 2D ndarray of shape (1797, 64) with each row representing one sample and each column representing the features. The second ndarray of shape (1797) contains the target samples. If `as_frame=True`, both arrays are pandas objects, i.e. `X` a dataframe and `y` a series. .. versionadded:: 0.18 Examples -------- To load the data and visualize the images:: >>> from sklearn.datasets import load_digits >>> digits = load_digits() >>> print(digits.data.shape) (1797, 64) >>> import matplotlib.pyplot as plt >>> plt.matshow(digits.images[0], cmap="gray") <...> >>> plt.show()
load_digits
python
scikit-learn/scikit-learn
sklearn/datasets/_base.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/_base.py
BSD-3-Clause
def load_diabetes(*, return_X_y=False, as_frame=False, scaled=True): """Load and return the diabetes dataset (regression). ============== ================== Samples total 442 Dimensionality 10 Features real, -.2 < x < .2 Targets integer 25 - 346 ============== ================== .. note:: The meaning of each feature (i.e. `feature_names`) might be unclear (especially for `ltg`) as the documentation of the original dataset is not explicit. We provide information that seems correct in regard with the scientific literature in this field of research. Read more in the :ref:`User Guide <diabetes_dataset>`. Parameters ---------- return_X_y : bool, default=False If True, returns ``(data, target)`` instead of a Bunch object. See below for more information about the `data` and `target` object. .. versionadded:: 0.18 as_frame : bool, default=False If True, the data is a pandas DataFrame including columns with appropriate dtypes (numeric). The target is a pandas DataFrame or Series depending on the number of target columns. If `return_X_y` is True, then (`data`, `target`) will be pandas DataFrames or Series as described below. .. versionadded:: 0.23 scaled : bool, default=True If True, the feature variables are mean centered and scaled by the standard deviation times the square root of `n_samples`. If False, raw data is returned for the feature variables. .. versionadded:: 1.1 Returns ------- data : :class:`~sklearn.utils.Bunch` Dictionary-like object, with the following attributes. data : {ndarray, dataframe} of shape (442, 10) The data matrix. If `as_frame=True`, `data` will be a pandas DataFrame. target: {ndarray, Series} of shape (442,) The regression target. If `as_frame=True`, `target` will be a pandas Series. feature_names: list The names of the dataset columns. frame: DataFrame of shape (442, 11) Only present when `as_frame=True`. DataFrame with `data` and `target`. .. versionadded:: 0.23 DESCR: str The full description of the dataset. data_filename: str The path to the location of the data. target_filename: str The path to the location of the target. (data, target) : tuple if ``return_X_y`` is True Returns a tuple of two ndarray of shape (n_samples, n_features) A 2D array with each row representing one sample and each column representing the features and/or target of a given sample. .. versionadded:: 0.18 Examples -------- >>> from sklearn.datasets import load_diabetes >>> diabetes = load_diabetes() >>> diabetes.target[:3] array([151., 75., 141.]) >>> diabetes.data.shape (442, 10) """ data_filename = "diabetes_data_raw.csv.gz" target_filename = "diabetes_target.csv.gz" data = load_gzip_compressed_csv_data(data_filename) target = load_gzip_compressed_csv_data(target_filename) if scaled: data = scale(data, copy=False) data /= data.shape[0] ** 0.5 fdescr = load_descr("diabetes.rst") feature_names = ["age", "sex", "bmi", "bp", "s1", "s2", "s3", "s4", "s5", "s6"] frame = None target_columns = [ "target", ] if as_frame: frame, data, target = _convert_data_dataframe( "load_diabetes", data, target, feature_names, target_columns ) if return_X_y: return data, target return Bunch( data=data, target=target, frame=frame, DESCR=fdescr, feature_names=feature_names, data_filename=data_filename, target_filename=target_filename, data_module=DATA_MODULE, )
Load and return the diabetes dataset (regression). ============== ================== Samples total 442 Dimensionality 10 Features real, -.2 < x < .2 Targets integer 25 - 346 ============== ================== .. note:: The meaning of each feature (i.e. `feature_names`) might be unclear (especially for `ltg`) as the documentation of the original dataset is not explicit. We provide information that seems correct in regard with the scientific literature in this field of research. Read more in the :ref:`User Guide <diabetes_dataset>`. Parameters ---------- return_X_y : bool, default=False If True, returns ``(data, target)`` instead of a Bunch object. See below for more information about the `data` and `target` object. .. versionadded:: 0.18 as_frame : bool, default=False If True, the data is a pandas DataFrame including columns with appropriate dtypes (numeric). The target is a pandas DataFrame or Series depending on the number of target columns. If `return_X_y` is True, then (`data`, `target`) will be pandas DataFrames or Series as described below. .. versionadded:: 0.23 scaled : bool, default=True If True, the feature variables are mean centered and scaled by the standard deviation times the square root of `n_samples`. If False, raw data is returned for the feature variables. .. versionadded:: 1.1 Returns ------- data : :class:`~sklearn.utils.Bunch` Dictionary-like object, with the following attributes. data : {ndarray, dataframe} of shape (442, 10) The data matrix. If `as_frame=True`, `data` will be a pandas DataFrame. target: {ndarray, Series} of shape (442,) The regression target. If `as_frame=True`, `target` will be a pandas Series. feature_names: list The names of the dataset columns. frame: DataFrame of shape (442, 11) Only present when `as_frame=True`. DataFrame with `data` and `target`. .. versionadded:: 0.23 DESCR: str The full description of the dataset. data_filename: str The path to the location of the data. target_filename: str The path to the location of the target. (data, target) : tuple if ``return_X_y`` is True Returns a tuple of two ndarray of shape (n_samples, n_features) A 2D array with each row representing one sample and each column representing the features and/or target of a given sample. .. versionadded:: 0.18 Examples -------- >>> from sklearn.datasets import load_diabetes >>> diabetes = load_diabetes() >>> diabetes.target[:3] array([151., 75., 141.]) >>> diabetes.data.shape (442, 10)
load_diabetes
python
scikit-learn/scikit-learn
sklearn/datasets/_base.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/_base.py
BSD-3-Clause
def load_linnerud(*, return_X_y=False, as_frame=False): """Load and return the physical exercise Linnerud dataset. This dataset is suitable for multi-output regression tasks. ============== ============================ Samples total 20 Dimensionality 3 (for both data and target) Features integer Targets integer ============== ============================ Read more in the :ref:`User Guide <linnerrud_dataset>`. Parameters ---------- return_X_y : bool, default=False If True, returns ``(data, target)`` instead of a Bunch object. See below for more information about the `data` and `target` object. .. versionadded:: 0.18 as_frame : bool, default=False If True, the data is a pandas DataFrame including columns with appropriate dtypes (numeric, string or categorical). The target is a pandas DataFrame or Series depending on the number of target columns. If `return_X_y` is True, then (`data`, `target`) will be pandas DataFrames or Series as described below. .. versionadded:: 0.23 Returns ------- data : :class:`~sklearn.utils.Bunch` Dictionary-like object, with the following attributes. data : {ndarray, dataframe} of shape (20, 3) The data matrix. If `as_frame=True`, `data` will be a pandas DataFrame. target: {ndarray, dataframe} of shape (20, 3) The regression targets. If `as_frame=True`, `target` will be a pandas DataFrame. feature_names: list The names of the dataset columns. target_names: list The names of the target columns. frame: DataFrame of shape (20, 6) Only present when `as_frame=True`. DataFrame with `data` and `target`. .. versionadded:: 0.23 DESCR: str The full description of the dataset. data_filename: str The path to the location of the data. target_filename: str The path to the location of the target. .. versionadded:: 0.20 (data, target) : tuple if ``return_X_y`` is True Returns a tuple of two ndarrays or dataframe of shape `(20, 3)`. Each row represents one sample and each column represents the features in `X` and a target in `y` of a given sample. .. versionadded:: 0.18 Examples -------- >>> from sklearn.datasets import load_linnerud >>> linnerud = load_linnerud() >>> linnerud.data.shape (20, 3) >>> linnerud.target.shape (20, 3) """ data_filename = "linnerud_exercise.csv" target_filename = "linnerud_physiological.csv" data_module_path = resources.files(DATA_MODULE) # Read header and data data_path = data_module_path / data_filename with data_path.open("r", encoding="utf-8") as f: header_exercise = f.readline().split() f.seek(0) # reset file obj data_exercise = np.loadtxt(f, skiprows=1) target_path = data_module_path / target_filename with target_path.open("r", encoding="utf-8") as f: header_physiological = f.readline().split() f.seek(0) # reset file obj data_physiological = np.loadtxt(f, skiprows=1) fdescr = load_descr("linnerud.rst") frame = None if as_frame: (frame, data_exercise, data_physiological) = _convert_data_dataframe( "load_linnerud", data_exercise, data_physiological, header_exercise, header_physiological, ) if return_X_y: return data_exercise, data_physiological return Bunch( data=data_exercise, feature_names=header_exercise, target=data_physiological, target_names=header_physiological, frame=frame, DESCR=fdescr, data_filename=data_filename, target_filename=target_filename, data_module=DATA_MODULE, )
Load and return the physical exercise Linnerud dataset. This dataset is suitable for multi-output regression tasks. ============== ============================ Samples total 20 Dimensionality 3 (for both data and target) Features integer Targets integer ============== ============================ Read more in the :ref:`User Guide <linnerrud_dataset>`. Parameters ---------- return_X_y : bool, default=False If True, returns ``(data, target)`` instead of a Bunch object. See below for more information about the `data` and `target` object. .. versionadded:: 0.18 as_frame : bool, default=False If True, the data is a pandas DataFrame including columns with appropriate dtypes (numeric, string or categorical). The target is a pandas DataFrame or Series depending on the number of target columns. If `return_X_y` is True, then (`data`, `target`) will be pandas DataFrames or Series as described below. .. versionadded:: 0.23 Returns ------- data : :class:`~sklearn.utils.Bunch` Dictionary-like object, with the following attributes. data : {ndarray, dataframe} of shape (20, 3) The data matrix. If `as_frame=True`, `data` will be a pandas DataFrame. target: {ndarray, dataframe} of shape (20, 3) The regression targets. If `as_frame=True`, `target` will be a pandas DataFrame. feature_names: list The names of the dataset columns. target_names: list The names of the target columns. frame: DataFrame of shape (20, 6) Only present when `as_frame=True`. DataFrame with `data` and `target`. .. versionadded:: 0.23 DESCR: str The full description of the dataset. data_filename: str The path to the location of the data. target_filename: str The path to the location of the target. .. versionadded:: 0.20 (data, target) : tuple if ``return_X_y`` is True Returns a tuple of two ndarrays or dataframe of shape `(20, 3)`. Each row represents one sample and each column represents the features in `X` and a target in `y` of a given sample. .. versionadded:: 0.18 Examples -------- >>> from sklearn.datasets import load_linnerud >>> linnerud = load_linnerud() >>> linnerud.data.shape (20, 3) >>> linnerud.target.shape (20, 3)
load_linnerud
python
scikit-learn/scikit-learn
sklearn/datasets/_base.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/_base.py
BSD-3-Clause
def load_sample_images(): """Load sample images for image manipulation. Loads both, ``china`` and ``flower``. Read more in the :ref:`User Guide <sample_images>`. Returns ------- data : :class:`~sklearn.utils.Bunch` Dictionary-like object, with the following attributes. images : list of ndarray of shape (427, 640, 3) The two sample image. filenames : list The filenames for the images. DESCR : str The full description of the dataset. Examples -------- To load the data and visualize the images: >>> from sklearn.datasets import load_sample_images >>> dataset = load_sample_images() #doctest: +SKIP >>> len(dataset.images) #doctest: +SKIP 2 >>> first_img_data = dataset.images[0] #doctest: +SKIP >>> first_img_data.shape #doctest: +SKIP (427, 640, 3) >>> first_img_data.dtype #doctest: +SKIP dtype('uint8') """ try: from PIL import Image except ImportError: raise ImportError( "The Python Imaging Library (PIL) is required to load data " "from jpeg files. Please refer to " "https://pillow.readthedocs.io/en/stable/installation.html " "for installing PIL." ) descr = load_descr("README.txt", descr_module=IMAGES_MODULE) filenames, images = [], [] jpg_paths = sorted( resource for resource in resources.files(IMAGES_MODULE).iterdir() if resource.is_file() and resource.match("*.jpg") ) for path in jpg_paths: filenames.append(str(path)) with path.open("rb") as image_file: pil_image = Image.open(image_file) image = np.asarray(pil_image) images.append(image) return Bunch(images=images, filenames=filenames, DESCR=descr)
Load sample images for image manipulation. Loads both, ``china`` and ``flower``. Read more in the :ref:`User Guide <sample_images>`. Returns ------- data : :class:`~sklearn.utils.Bunch` Dictionary-like object, with the following attributes. images : list of ndarray of shape (427, 640, 3) The two sample image. filenames : list The filenames for the images. DESCR : str The full description of the dataset. Examples -------- To load the data and visualize the images: >>> from sklearn.datasets import load_sample_images >>> dataset = load_sample_images() #doctest: +SKIP >>> len(dataset.images) #doctest: +SKIP 2 >>> first_img_data = dataset.images[0] #doctest: +SKIP >>> first_img_data.shape #doctest: +SKIP (427, 640, 3) >>> first_img_data.dtype #doctest: +SKIP dtype('uint8')
load_sample_images
python
scikit-learn/scikit-learn
sklearn/datasets/_base.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/_base.py
BSD-3-Clause
def load_sample_image(image_name): """Load the numpy array of a single sample image. Read more in the :ref:`User Guide <sample_images>`. Parameters ---------- image_name : {`china.jpg`, `flower.jpg`} The name of the sample image loaded. Returns ------- img : 3D array The image as a numpy array: height x width x color. Examples -------- >>> from sklearn.datasets import load_sample_image >>> china = load_sample_image('china.jpg') # doctest: +SKIP >>> china.dtype # doctest: +SKIP dtype('uint8') >>> china.shape # doctest: +SKIP (427, 640, 3) >>> flower = load_sample_image('flower.jpg') # doctest: +SKIP >>> flower.dtype # doctest: +SKIP dtype('uint8') >>> flower.shape # doctest: +SKIP (427, 640, 3) """ images = load_sample_images() index = None for i, filename in enumerate(images.filenames): if filename.endswith(image_name): index = i break if index is None: raise AttributeError("Cannot find sample image: %s" % image_name) return images.images[index]
Load the numpy array of a single sample image. Read more in the :ref:`User Guide <sample_images>`. Parameters ---------- image_name : {`china.jpg`, `flower.jpg`} The name of the sample image loaded. Returns ------- img : 3D array The image as a numpy array: height x width x color. Examples -------- >>> from sklearn.datasets import load_sample_image >>> china = load_sample_image('china.jpg') # doctest: +SKIP >>> china.dtype # doctest: +SKIP dtype('uint8') >>> china.shape # doctest: +SKIP (427, 640, 3) >>> flower = load_sample_image('flower.jpg') # doctest: +SKIP >>> flower.dtype # doctest: +SKIP dtype('uint8') >>> flower.shape # doctest: +SKIP (427, 640, 3)
load_sample_image
python
scikit-learn/scikit-learn
sklearn/datasets/_base.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/_base.py
BSD-3-Clause
def _pkl_filepath(*args, **kwargs): """Return filename for Python 3 pickles args[-1] is expected to be the ".pkl" filename. For compatibility with older scikit-learn versions, a suffix is inserted before the extension. _pkl_filepath('/path/to/folder', 'filename.pkl') returns '/path/to/folder/filename_py3.pkl' """ py3_suffix = kwargs.get("py3_suffix", "_py3") basename, ext = splitext(args[-1]) basename += py3_suffix new_args = args[:-1] + (basename + ext,) return join(*new_args)
Return filename for Python 3 pickles args[-1] is expected to be the ".pkl" filename. For compatibility with older scikit-learn versions, a suffix is inserted before the extension. _pkl_filepath('/path/to/folder', 'filename.pkl') returns '/path/to/folder/filename_py3.pkl'
_pkl_filepath
python
scikit-learn/scikit-learn
sklearn/datasets/_base.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/_base.py
BSD-3-Clause
def _sha256(path): """Calculate the sha256 hash of the file at path.""" sha256hash = hashlib.sha256() chunk_size = 8192 with open(path, "rb") as f: while True: buffer = f.read(chunk_size) if not buffer: break sha256hash.update(buffer) return sha256hash.hexdigest()
Calculate the sha256 hash of the file at path.
_sha256
python
scikit-learn/scikit-learn
sklearn/datasets/_base.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/_base.py
BSD-3-Clause
def _fetch_remote(remote, dirname=None, n_retries=3, delay=1): """Helper function to download a remote dataset. Fetch a dataset pointed by remote's url, save into path using remote's filename and ensure its integrity based on the SHA256 checksum of the downloaded file. .. versionchanged:: 1.6 If the file already exists locally and the SHA256 checksums match, the path to the local file is returned without re-downloading. Parameters ---------- remote : RemoteFileMetadata Named tuple containing remote dataset meta information: url, filename and checksum. dirname : str or Path, default=None Directory to save the file to. If None, the current working directory is used. n_retries : int, default=3 Number of retries when HTTP errors are encountered. .. versionadded:: 1.5 delay : int, default=1 Number of seconds between retries. .. versionadded:: 1.5 Returns ------- file_path: Path Full path of the created file. """ if dirname is None: folder_path = Path(".") else: folder_path = Path(dirname) file_path = folder_path / remote.filename if file_path.exists(): if remote.checksum is None: return file_path checksum = _sha256(file_path) if checksum == remote.checksum: return file_path else: warnings.warn( f"SHA256 checksum of existing local file {file_path.name} " f"({checksum}) differs from expected ({remote.checksum}): " f"re-downloading from {remote.url} ." ) # We create a temporary file dedicated to this particular download to avoid # conflicts with parallel downloads. If the download is successful, the # temporary file is atomically renamed to the final file path (with # `shutil.move`). We therefore pass `delete=False` to `NamedTemporaryFile`. # Otherwise, garbage collecting temp_file would raise an error when # attempting to delete a file that was already renamed. If the download # fails or the result does not match the expected SHA256 digest, the # temporary file is removed manually in the except block. temp_file = NamedTemporaryFile( prefix=remote.filename + ".part_", dir=folder_path, delete=False ) # Note that Python 3.12's `delete_on_close=True` is ignored as we set # `delete=False` explicitly. So after this line the empty temporary file still # exists on disk to make sure that it's uniquely reserved for this specific call of # `_fetch_remote` and therefore it protects against any corruption by parallel # calls. temp_file.close() try: temp_file_path = Path(temp_file.name) while True: try: urlretrieve(remote.url, temp_file_path) break except (URLError, TimeoutError): if n_retries == 0: # If no more retries are left, re-raise the caught exception. raise warnings.warn(f"Retry downloading from url: {remote.url}") n_retries -= 1 time.sleep(delay) checksum = _sha256(temp_file_path) if remote.checksum is not None and remote.checksum != checksum: raise OSError( f"The SHA256 checksum of {remote.filename} ({checksum}) " f"differs from expected ({remote.checksum})." ) except (Exception, KeyboardInterrupt): os.unlink(temp_file.name) raise # The following renaming is atomic whenever temp_file_path and # file_path are on the same filesystem. This should be the case most of # the time, but we still use shutil.move instead of os.rename in case # they are not. shutil.move(temp_file_path, file_path) return file_path
Helper function to download a remote dataset. Fetch a dataset pointed by remote's url, save into path using remote's filename and ensure its integrity based on the SHA256 checksum of the downloaded file. .. versionchanged:: 1.6 If the file already exists locally and the SHA256 checksums match, the path to the local file is returned without re-downloading. Parameters ---------- remote : RemoteFileMetadata Named tuple containing remote dataset meta information: url, filename and checksum. dirname : str or Path, default=None Directory to save the file to. If None, the current working directory is used. n_retries : int, default=3 Number of retries when HTTP errors are encountered. .. versionadded:: 1.5 delay : int, default=1 Number of seconds between retries. .. versionadded:: 1.5 Returns ------- file_path: Path Full path of the created file.
_fetch_remote
python
scikit-learn/scikit-learn
sklearn/datasets/_base.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/_base.py
BSD-3-Clause
def _filter_filename(value, filter_dots=True): """Derive a name that is safe to use as filename from the given string. Adapted from the `slugify` function of django: https://github.com/django/django/blob/master/django/utils/text.py Convert spaces or repeated dashes to single dashes. Replace characters that aren't alphanumerics, underscores, hyphens or dots by underscores. Convert to lowercase. Also strip leading and trailing whitespace, dashes, and underscores. """ value = unicodedata.normalize("NFKD", value).lower() if filter_dots: value = re.sub(r"[^\w\s-]+", "_", value) else: value = re.sub(r"[^.\w\s-]+", "_", value) value = re.sub(r"[\s-]+", "-", value) return value.strip("-_.")
Derive a name that is safe to use as filename from the given string. Adapted from the `slugify` function of django: https://github.com/django/django/blob/master/django/utils/text.py Convert spaces or repeated dashes to single dashes. Replace characters that aren't alphanumerics, underscores, hyphens or dots by underscores. Convert to lowercase. Also strip leading and trailing whitespace, dashes, and underscores.
_filter_filename
python
scikit-learn/scikit-learn
sklearn/datasets/_base.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/_base.py
BSD-3-Clause
def fetch_file( url, folder=None, local_filename=None, sha256=None, n_retries=3, delay=1 ): """Fetch a file from the web if not already present in the local folder. If the file already exists locally (and the SHA256 checksums match when provided), the path to the local file is returned without re-downloading. .. versionadded:: 1.6 Parameters ---------- url : str URL of the file to download. folder : str or Path, default=None Directory to save the file to. If None, the file is downloaded in a folder with a name derived from the URL host name and path under scikit-learn data home folder. local_filename : str, default=None Name of the file to save. If None, the filename is inferred from the URL. sha256 : str, default=None SHA256 checksum of the file. If None, no checksum is verified. n_retries : int, default=3 Number of retries when HTTP errors are encountered. delay : int, default=1 Number of seconds between retries. Returns ------- file_path : Path Full path of the downloaded file. """ folder_from_url, filename_from_url = _derive_folder_and_filename_from_url(url) if local_filename is None: local_filename = filename_from_url if folder is None: folder = Path(get_data_home()) / folder_from_url makedirs(folder, exist_ok=True) remote_metadata = RemoteFileMetadata( filename=local_filename, url=url, checksum=sha256 ) return _fetch_remote( remote_metadata, dirname=folder, n_retries=n_retries, delay=delay )
Fetch a file from the web if not already present in the local folder. If the file already exists locally (and the SHA256 checksums match when provided), the path to the local file is returned without re-downloading. .. versionadded:: 1.6 Parameters ---------- url : str URL of the file to download. folder : str or Path, default=None Directory to save the file to. If None, the file is downloaded in a folder with a name derived from the URL host name and path under scikit-learn data home folder. local_filename : str, default=None Name of the file to save. If None, the filename is inferred from the URL. sha256 : str, default=None SHA256 checksum of the file. If None, no checksum is verified. n_retries : int, default=3 Number of retries when HTTP errors are encountered. delay : int, default=1 Number of seconds between retries. Returns ------- file_path : Path Full path of the downloaded file.
fetch_file
python
scikit-learn/scikit-learn
sklearn/datasets/_base.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/_base.py
BSD-3-Clause
def fetch_california_housing( *, data_home=None, download_if_missing=True, return_X_y=False, as_frame=False, n_retries=3, delay=1.0, ): """Load the California housing dataset (regression). ============== ============== Samples total 20640 Dimensionality 8 Features real Target real 0.15 - 5. ============== ============== Read more in the :ref:`User Guide <california_housing_dataset>`. Parameters ---------- data_home : str or path-like, default=None Specify another download and cache folder for the datasets. By default all scikit-learn data is stored in '~/scikit_learn_data' subfolders. download_if_missing : bool, default=True If False, raise an OSError if the data is not locally available instead of trying to download the data from the source site. return_X_y : bool, default=False If True, returns ``(data.data, data.target)`` instead of a Bunch object. .. versionadded:: 0.20 as_frame : bool, default=False If True, the data is a pandas DataFrame including columns with appropriate dtypes (numeric, string or categorical). The target is a pandas DataFrame or Series depending on the number of target_columns. .. versionadded:: 0.23 n_retries : int, default=3 Number of retries when HTTP errors are encountered. .. versionadded:: 1.5 delay : float, default=1.0 Number of seconds between retries. .. versionadded:: 1.5 Returns ------- dataset : :class:`~sklearn.utils.Bunch` Dictionary-like object, with the following attributes. data : ndarray, shape (20640, 8) Each row corresponding to the 8 feature values in order. If ``as_frame`` is True, ``data`` is a pandas object. target : numpy array of shape (20640,) Each value corresponds to the average house value in units of 100,000. If ``as_frame`` is True, ``target`` is a pandas object. feature_names : list of length 8 Array of ordered feature names used in the dataset. DESCR : str Description of the California housing dataset. frame : pandas DataFrame Only present when `as_frame=True`. DataFrame with ``data`` and ``target``. .. versionadded:: 0.23 (data, target) : tuple if ``return_X_y`` is True A tuple of two ndarray. The first containing a 2D array of shape (n_samples, n_features) with each row representing one sample and each column representing the features. The second ndarray of shape (n_samples,) containing the target samples. .. versionadded:: 0.20 Notes ----- This dataset consists of 20,640 samples and 9 features. Examples -------- >>> from sklearn.datasets import fetch_california_housing >>> housing = fetch_california_housing() >>> print(housing.data.shape, housing.target.shape) (20640, 8) (20640,) >>> print(housing.feature_names[0:6]) ['MedInc', 'HouseAge', 'AveRooms', 'AveBedrms', 'Population', 'AveOccup'] """ data_home = get_data_home(data_home=data_home) if not exists(data_home): makedirs(data_home) filepath = _pkl_filepath(data_home, "cal_housing.pkz") if not exists(filepath): if not download_if_missing: raise OSError("Data not found and `download_if_missing` is False") logger.info( "Downloading Cal. housing from {} to {}".format(ARCHIVE.url, data_home) ) archive_path = _fetch_remote( ARCHIVE, dirname=data_home, n_retries=n_retries, delay=delay, ) with tarfile.open(mode="r:gz", name=archive_path) as f: cal_housing = np.loadtxt( f.extractfile("CaliforniaHousing/cal_housing.data"), delimiter="," ) # Columns are not in the same order compared to the previous # URL resource on lib.stat.cmu.edu columns_index = [8, 7, 2, 3, 4, 5, 6, 1, 0] cal_housing = cal_housing[:, columns_index] joblib.dump(cal_housing, filepath, compress=6) remove(archive_path) else: cal_housing = joblib.load(filepath) feature_names = [ "MedInc", "HouseAge", "AveRooms", "AveBedrms", "Population", "AveOccup", "Latitude", "Longitude", ] target, data = cal_housing[:, 0], cal_housing[:, 1:] # avg rooms = total rooms / households data[:, 2] /= data[:, 5] # avg bed rooms = total bed rooms / households data[:, 3] /= data[:, 5] # avg occupancy = population / households data[:, 5] = data[:, 4] / data[:, 5] # target in units of 100,000 target = target / 100000.0 descr = load_descr("california_housing.rst") X = data y = target frame = None target_names = [ "MedHouseVal", ] if as_frame: frame, X, y = _convert_data_dataframe( "fetch_california_housing", data, target, feature_names, target_names ) if return_X_y: return X, y return Bunch( data=X, target=y, frame=frame, target_names=target_names, feature_names=feature_names, DESCR=descr, )
Load the California housing dataset (regression). ============== ============== Samples total 20640 Dimensionality 8 Features real Target real 0.15 - 5. ============== ============== Read more in the :ref:`User Guide <california_housing_dataset>`. Parameters ---------- data_home : str or path-like, default=None Specify another download and cache folder for the datasets. By default all scikit-learn data is stored in '~/scikit_learn_data' subfolders. download_if_missing : bool, default=True If False, raise an OSError if the data is not locally available instead of trying to download the data from the source site. return_X_y : bool, default=False If True, returns ``(data.data, data.target)`` instead of a Bunch object. .. versionadded:: 0.20 as_frame : bool, default=False If True, the data is a pandas DataFrame including columns with appropriate dtypes (numeric, string or categorical). The target is a pandas DataFrame or Series depending on the number of target_columns. .. versionadded:: 0.23 n_retries : int, default=3 Number of retries when HTTP errors are encountered. .. versionadded:: 1.5 delay : float, default=1.0 Number of seconds between retries. .. versionadded:: 1.5 Returns ------- dataset : :class:`~sklearn.utils.Bunch` Dictionary-like object, with the following attributes. data : ndarray, shape (20640, 8) Each row corresponding to the 8 feature values in order. If ``as_frame`` is True, ``data`` is a pandas object. target : numpy array of shape (20640,) Each value corresponds to the average house value in units of 100,000. If ``as_frame`` is True, ``target`` is a pandas object. feature_names : list of length 8 Array of ordered feature names used in the dataset. DESCR : str Description of the California housing dataset. frame : pandas DataFrame Only present when `as_frame=True`. DataFrame with ``data`` and ``target``. .. versionadded:: 0.23 (data, target) : tuple if ``return_X_y`` is True A tuple of two ndarray. The first containing a 2D array of shape (n_samples, n_features) with each row representing one sample and each column representing the features. The second ndarray of shape (n_samples,) containing the target samples. .. versionadded:: 0.20 Notes ----- This dataset consists of 20,640 samples and 9 features. Examples -------- >>> from sklearn.datasets import fetch_california_housing >>> housing = fetch_california_housing() >>> print(housing.data.shape, housing.target.shape) (20640, 8) (20640,) >>> print(housing.feature_names[0:6]) ['MedInc', 'HouseAge', 'AveRooms', 'AveBedrms', 'Population', 'AveOccup']
fetch_california_housing
python
scikit-learn/scikit-learn
sklearn/datasets/_california_housing.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/_california_housing.py
BSD-3-Clause
def fetch_covtype( *, data_home=None, download_if_missing=True, random_state=None, shuffle=False, return_X_y=False, as_frame=False, n_retries=3, delay=1.0, ): """Load the covertype dataset (classification). Download it if necessary. ================= ============ Classes 7 Samples total 581012 Dimensionality 54 Features int ================= ============ Read more in the :ref:`User Guide <covtype_dataset>`. Parameters ---------- data_home : str or path-like, default=None Specify another download and cache folder for the datasets. By default all scikit-learn data is stored in '~/scikit_learn_data' subfolders. download_if_missing : bool, default=True If False, raise an OSError if the data is not locally available instead of trying to download the data from the source site. random_state : int, RandomState instance or None, default=None Determines random number generation for dataset shuffling. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. shuffle : bool, default=False Whether to shuffle dataset. return_X_y : bool, default=False If True, returns ``(data.data, data.target)`` instead of a Bunch object. .. versionadded:: 0.20 as_frame : bool, default=False If True, the data is a pandas DataFrame including columns with appropriate dtypes (numeric). The target is a pandas DataFrame or Series depending on the number of target columns. If `return_X_y` is True, then (`data`, `target`) will be pandas DataFrames or Series as described below. .. versionadded:: 0.24 n_retries : int, default=3 Number of retries when HTTP errors are encountered. .. versionadded:: 1.5 delay : float, default=1.0 Number of seconds between retries. .. versionadded:: 1.5 Returns ------- dataset : :class:`~sklearn.utils.Bunch` Dictionary-like object, with the following attributes. data : ndarray of shape (581012, 54) Each row corresponds to the 54 features in the dataset. target : ndarray of shape (581012,) Each value corresponds to one of the 7 forest covertypes with values ranging between 1 to 7. frame : dataframe of shape (581012, 55) Only present when `as_frame=True`. Contains `data` and `target`. DESCR : str Description of the forest covertype dataset. feature_names : list The names of the dataset columns. target_names: list The names of the target columns. (data, target) : tuple if ``return_X_y`` is True A tuple of two ndarray. The first containing a 2D array of shape (n_samples, n_features) with each row representing one sample and each column representing the features. The second ndarray of shape (n_samples,) containing the target samples. .. versionadded:: 0.20 Examples -------- >>> from sklearn.datasets import fetch_covtype >>> cov_type = fetch_covtype() >>> cov_type.data.shape (581012, 54) >>> cov_type.target.shape (581012,) >>> # Let's check the 4 first feature names >>> cov_type.feature_names[:4] ['Elevation', 'Aspect', 'Slope', 'Horizontal_Distance_To_Hydrology'] """ data_home = get_data_home(data_home=data_home) covtype_dir = join(data_home, "covertype") samples_path = _pkl_filepath(covtype_dir, "samples") targets_path = _pkl_filepath(covtype_dir, "targets") available = exists(samples_path) and exists(targets_path) if download_if_missing and not available: os.makedirs(covtype_dir, exist_ok=True) # Creating temp_dir as a direct subdirectory of the target directory # guarantees that both reside on the same filesystem, so that we can use # os.rename to atomically move the data files to their target location. with TemporaryDirectory(dir=covtype_dir) as temp_dir: logger.info(f"Downloading {ARCHIVE.url}") archive_path = _fetch_remote( ARCHIVE, dirname=temp_dir, n_retries=n_retries, delay=delay ) Xy = np.genfromtxt(GzipFile(filename=archive_path), delimiter=",") X = Xy[:, :-1] y = Xy[:, -1].astype(np.int32, copy=False) samples_tmp_path = _pkl_filepath(temp_dir, "samples") joblib.dump(X, samples_tmp_path, compress=9) os.rename(samples_tmp_path, samples_path) targets_tmp_path = _pkl_filepath(temp_dir, "targets") joblib.dump(y, targets_tmp_path, compress=9) os.rename(targets_tmp_path, targets_path) elif not available and not download_if_missing: raise OSError("Data not found and `download_if_missing` is False") try: X, y except NameError: X = joblib.load(samples_path) y = joblib.load(targets_path) if shuffle: ind = np.arange(X.shape[0]) rng = check_random_state(random_state) rng.shuffle(ind) X = X[ind] y = y[ind] fdescr = load_descr("covtype.rst") frame = None if as_frame: frame, X, y = _convert_data_dataframe( caller_name="fetch_covtype", data=X, target=y, feature_names=FEATURE_NAMES, target_names=TARGET_NAMES, ) if return_X_y: return X, y return Bunch( data=X, target=y, frame=frame, target_names=TARGET_NAMES, feature_names=FEATURE_NAMES, DESCR=fdescr, )
Load the covertype dataset (classification). Download it if necessary. ================= ============ Classes 7 Samples total 581012 Dimensionality 54 Features int ================= ============ Read more in the :ref:`User Guide <covtype_dataset>`. Parameters ---------- data_home : str or path-like, default=None Specify another download and cache folder for the datasets. By default all scikit-learn data is stored in '~/scikit_learn_data' subfolders. download_if_missing : bool, default=True If False, raise an OSError if the data is not locally available instead of trying to download the data from the source site. random_state : int, RandomState instance or None, default=None Determines random number generation for dataset shuffling. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. shuffle : bool, default=False Whether to shuffle dataset. return_X_y : bool, default=False If True, returns ``(data.data, data.target)`` instead of a Bunch object. .. versionadded:: 0.20 as_frame : bool, default=False If True, the data is a pandas DataFrame including columns with appropriate dtypes (numeric). The target is a pandas DataFrame or Series depending on the number of target columns. If `return_X_y` is True, then (`data`, `target`) will be pandas DataFrames or Series as described below. .. versionadded:: 0.24 n_retries : int, default=3 Number of retries when HTTP errors are encountered. .. versionadded:: 1.5 delay : float, default=1.0 Number of seconds between retries. .. versionadded:: 1.5 Returns ------- dataset : :class:`~sklearn.utils.Bunch` Dictionary-like object, with the following attributes. data : ndarray of shape (581012, 54) Each row corresponds to the 54 features in the dataset. target : ndarray of shape (581012,) Each value corresponds to one of the 7 forest covertypes with values ranging between 1 to 7. frame : dataframe of shape (581012, 55) Only present when `as_frame=True`. Contains `data` and `target`. DESCR : str Description of the forest covertype dataset. feature_names : list The names of the dataset columns. target_names: list The names of the target columns. (data, target) : tuple if ``return_X_y`` is True A tuple of two ndarray. The first containing a 2D array of shape (n_samples, n_features) with each row representing one sample and each column representing the features. The second ndarray of shape (n_samples,) containing the target samples. .. versionadded:: 0.20 Examples -------- >>> from sklearn.datasets import fetch_covtype >>> cov_type = fetch_covtype() >>> cov_type.data.shape (581012, 54) >>> cov_type.target.shape (581012,) >>> # Let's check the 4 first feature names >>> cov_type.feature_names[:4] ['Elevation', 'Aspect', 'Slope', 'Horizontal_Distance_To_Hydrology']
fetch_covtype
python
scikit-learn/scikit-learn
sklearn/datasets/_covtype.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/_covtype.py
BSD-3-Clause
def fetch_kddcup99( *, subset=None, data_home=None, shuffle=False, random_state=None, percent10=True, download_if_missing=True, return_X_y=False, as_frame=False, n_retries=3, delay=1.0, ): """Load the kddcup99 dataset (classification). Download it if necessary. ================= ==================================== Classes 23 Samples total 4898431 Dimensionality 41 Features discrete (int) or continuous (float) ================= ==================================== Read more in the :ref:`User Guide <kddcup99_dataset>`. .. versionadded:: 0.18 Parameters ---------- subset : {'SA', 'SF', 'http', 'smtp'}, default=None To return the corresponding classical subsets of kddcup 99. If None, return the entire kddcup 99 dataset. data_home : str or path-like, default=None Specify another download and cache folder for the datasets. By default all scikit-learn data is stored in '~/scikit_learn_data' subfolders. .. versionadded:: 0.19 shuffle : bool, default=False Whether to shuffle dataset. random_state : int, RandomState instance or None, default=None Determines random number generation for dataset shuffling and for selection of abnormal samples if `subset='SA'`. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. percent10 : bool, default=True Whether to load only 10 percent of the data. download_if_missing : bool, default=True If False, raise an OSError if the data is not locally available instead of trying to download the data from the source site. return_X_y : bool, default=False If True, returns ``(data, target)`` instead of a Bunch object. See below for more information about the `data` and `target` object. .. versionadded:: 0.20 as_frame : bool, default=False If `True`, returns a pandas Dataframe for the ``data`` and ``target`` objects in the `Bunch` returned object; `Bunch` return object will also have a ``frame`` member. .. versionadded:: 0.24 n_retries : int, default=3 Number of retries when HTTP errors are encountered. .. versionadded:: 1.5 delay : float, default=1.0 Number of seconds between retries. .. versionadded:: 1.5 Returns ------- data : :class:`~sklearn.utils.Bunch` Dictionary-like object, with the following attributes. data : {ndarray, dataframe} of shape (494021, 41) The data matrix to learn. If `as_frame=True`, `data` will be a pandas DataFrame. target : {ndarray, series} of shape (494021,) The regression target for each sample. If `as_frame=True`, `target` will be a pandas Series. frame : dataframe of shape (494021, 42) Only present when `as_frame=True`. Contains `data` and `target`. DESCR : str The full description of the dataset. feature_names : list The names of the dataset columns target_names: list The names of the target columns (data, target) : tuple if ``return_X_y`` is True A tuple of two ndarray. The first containing a 2D array of shape (n_samples, n_features) with each row representing one sample and each column representing the features. The second ndarray of shape (n_samples,) containing the target samples. .. versionadded:: 0.20 """ data_home = get_data_home(data_home=data_home) kddcup99 = _fetch_brute_kddcup99( data_home=data_home, percent10=percent10, download_if_missing=download_if_missing, n_retries=n_retries, delay=delay, ) data = kddcup99.data target = kddcup99.target feature_names = kddcup99.feature_names target_names = kddcup99.target_names if subset == "SA": s = target == b"normal." t = np.logical_not(s) normal_samples = data[s, :] normal_targets = target[s] abnormal_samples = data[t, :] abnormal_targets = target[t] n_samples_abnormal = abnormal_samples.shape[0] # selected abnormal samples: random_state = check_random_state(random_state) r = random_state.randint(0, n_samples_abnormal, 3377) abnormal_samples = abnormal_samples[r] abnormal_targets = abnormal_targets[r] data = np.r_[normal_samples, abnormal_samples] target = np.r_[normal_targets, abnormal_targets] if subset == "SF" or subset == "http" or subset == "smtp": # select all samples with positive logged_in attribute: s = data[:, 11] == 1 data = np.c_[data[s, :11], data[s, 12:]] feature_names = feature_names[:11] + feature_names[12:] target = target[s] data[:, 0] = np.log((data[:, 0] + 0.1).astype(float, copy=False)) data[:, 4] = np.log((data[:, 4] + 0.1).astype(float, copy=False)) data[:, 5] = np.log((data[:, 5] + 0.1).astype(float, copy=False)) if subset == "http": s = data[:, 2] == b"http" data = data[s] target = target[s] data = np.c_[data[:, 0], data[:, 4], data[:, 5]] feature_names = [feature_names[0], feature_names[4], feature_names[5]] if subset == "smtp": s = data[:, 2] == b"smtp" data = data[s] target = target[s] data = np.c_[data[:, 0], data[:, 4], data[:, 5]] feature_names = [feature_names[0], feature_names[4], feature_names[5]] if subset == "SF": data = np.c_[data[:, 0], data[:, 2], data[:, 4], data[:, 5]] feature_names = [ feature_names[0], feature_names[2], feature_names[4], feature_names[5], ] if shuffle: data, target = shuffle_method(data, target, random_state=random_state) fdescr = load_descr("kddcup99.rst") frame = None if as_frame: frame, data, target = _convert_data_dataframe( "fetch_kddcup99", data, target, feature_names, target_names ) if return_X_y: return data, target return Bunch( data=data, target=target, frame=frame, target_names=target_names, feature_names=feature_names, DESCR=fdescr, )
Load the kddcup99 dataset (classification). Download it if necessary. ================= ==================================== Classes 23 Samples total 4898431 Dimensionality 41 Features discrete (int) or continuous (float) ================= ==================================== Read more in the :ref:`User Guide <kddcup99_dataset>`. .. versionadded:: 0.18 Parameters ---------- subset : {'SA', 'SF', 'http', 'smtp'}, default=None To return the corresponding classical subsets of kddcup 99. If None, return the entire kddcup 99 dataset. data_home : str or path-like, default=None Specify another download and cache folder for the datasets. By default all scikit-learn data is stored in '~/scikit_learn_data' subfolders. .. versionadded:: 0.19 shuffle : bool, default=False Whether to shuffle dataset. random_state : int, RandomState instance or None, default=None Determines random number generation for dataset shuffling and for selection of abnormal samples if `subset='SA'`. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. percent10 : bool, default=True Whether to load only 10 percent of the data. download_if_missing : bool, default=True If False, raise an OSError if the data is not locally available instead of trying to download the data from the source site. return_X_y : bool, default=False If True, returns ``(data, target)`` instead of a Bunch object. See below for more information about the `data` and `target` object. .. versionadded:: 0.20 as_frame : bool, default=False If `True`, returns a pandas Dataframe for the ``data`` and ``target`` objects in the `Bunch` returned object; `Bunch` return object will also have a ``frame`` member. .. versionadded:: 0.24 n_retries : int, default=3 Number of retries when HTTP errors are encountered. .. versionadded:: 1.5 delay : float, default=1.0 Number of seconds between retries. .. versionadded:: 1.5 Returns ------- data : :class:`~sklearn.utils.Bunch` Dictionary-like object, with the following attributes. data : {ndarray, dataframe} of shape (494021, 41) The data matrix to learn. If `as_frame=True`, `data` will be a pandas DataFrame. target : {ndarray, series} of shape (494021,) The regression target for each sample. If `as_frame=True`, `target` will be a pandas Series. frame : dataframe of shape (494021, 42) Only present when `as_frame=True`. Contains `data` and `target`. DESCR : str The full description of the dataset. feature_names : list The names of the dataset columns target_names: list The names of the target columns (data, target) : tuple if ``return_X_y`` is True A tuple of two ndarray. The first containing a 2D array of shape (n_samples, n_features) with each row representing one sample and each column representing the features. The second ndarray of shape (n_samples,) containing the target samples. .. versionadded:: 0.20
fetch_kddcup99
python
scikit-learn/scikit-learn
sklearn/datasets/_kddcup99.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/_kddcup99.py
BSD-3-Clause
def _fetch_brute_kddcup99( data_home=None, download_if_missing=True, percent10=True, n_retries=3, delay=1.0 ): """Load the kddcup99 dataset, downloading it if necessary. Parameters ---------- data_home : str, default=None Specify another download and cache folder for the datasets. By default all scikit-learn data is stored in '~/scikit_learn_data' subfolders. download_if_missing : bool, default=True If False, raise an OSError if the data is not locally available instead of trying to download the data from the source site. percent10 : bool, default=True Whether to load only 10 percent of the data. n_retries : int, default=3 Number of retries when HTTP errors are encountered. delay : float, default=1.0 Number of seconds between retries. Returns ------- dataset : :class:`~sklearn.utils.Bunch` Dictionary-like object, with the following attributes. data : ndarray of shape (494021, 41) Each row corresponds to the 41 features in the dataset. target : ndarray of shape (494021,) Each value corresponds to one of the 21 attack types or to the label 'normal.'. feature_names : list The names of the dataset columns target_names: list The names of the target columns DESCR : str Description of the kddcup99 dataset. """ data_home = get_data_home(data_home=data_home) dir_suffix = "-py3" if percent10: kddcup_dir = join(data_home, "kddcup99_10" + dir_suffix) archive = ARCHIVE_10_PERCENT else: kddcup_dir = join(data_home, "kddcup99" + dir_suffix) archive = ARCHIVE samples_path = join(kddcup_dir, "samples") targets_path = join(kddcup_dir, "targets") available = exists(samples_path) dt = [ ("duration", int), ("protocol_type", "S4"), ("service", "S11"), ("flag", "S6"), ("src_bytes", int), ("dst_bytes", int), ("land", int), ("wrong_fragment", int), ("urgent", int), ("hot", int), ("num_failed_logins", int), ("logged_in", int), ("num_compromised", int), ("root_shell", int), ("su_attempted", int), ("num_root", int), ("num_file_creations", int), ("num_shells", int), ("num_access_files", int), ("num_outbound_cmds", int), ("is_host_login", int), ("is_guest_login", int), ("count", int), ("srv_count", int), ("serror_rate", float), ("srv_serror_rate", float), ("rerror_rate", float), ("srv_rerror_rate", float), ("same_srv_rate", float), ("diff_srv_rate", float), ("srv_diff_host_rate", float), ("dst_host_count", int), ("dst_host_srv_count", int), ("dst_host_same_srv_rate", float), ("dst_host_diff_srv_rate", float), ("dst_host_same_src_port_rate", float), ("dst_host_srv_diff_host_rate", float), ("dst_host_serror_rate", float), ("dst_host_srv_serror_rate", float), ("dst_host_rerror_rate", float), ("dst_host_srv_rerror_rate", float), ("labels", "S16"), ] column_names = [c[0] for c in dt] target_names = column_names[-1] feature_names = column_names[:-1] if available: try: X = joblib.load(samples_path) y = joblib.load(targets_path) except Exception as e: raise OSError( "The cache for fetch_kddcup99 is invalid, please delete " f"{kddcup_dir} and run the fetch_kddcup99 again" ) from e elif download_if_missing: _mkdirp(kddcup_dir) logger.info("Downloading %s" % archive.url) _fetch_remote(archive, dirname=kddcup_dir, n_retries=n_retries, delay=delay) DT = np.dtype(dt) logger.debug("extracting archive") archive_path = join(kddcup_dir, archive.filename) file_ = GzipFile(filename=archive_path, mode="r") Xy = [] for line in file_.readlines(): line = line.decode() Xy.append(line.replace("\n", "").split(",")) file_.close() logger.debug("extraction done") os.remove(archive_path) Xy = np.asarray(Xy, dtype=object) for j in range(42): Xy[:, j] = Xy[:, j].astype(DT[j]) X = Xy[:, :-1] y = Xy[:, -1] # XXX bug when compress!=0: # (error: 'Incorrect data length while decompressing[...] the file # could be corrupted.') joblib.dump(X, samples_path, compress=0) joblib.dump(y, targets_path, compress=0) else: raise OSError("Data not found and `download_if_missing` is False") return Bunch( data=X, target=y, feature_names=feature_names, target_names=[target_names], )
Load the kddcup99 dataset, downloading it if necessary. Parameters ---------- data_home : str, default=None Specify another download and cache folder for the datasets. By default all scikit-learn data is stored in '~/scikit_learn_data' subfolders. download_if_missing : bool, default=True If False, raise an OSError if the data is not locally available instead of trying to download the data from the source site. percent10 : bool, default=True Whether to load only 10 percent of the data. n_retries : int, default=3 Number of retries when HTTP errors are encountered. delay : float, default=1.0 Number of seconds between retries. Returns ------- dataset : :class:`~sklearn.utils.Bunch` Dictionary-like object, with the following attributes. data : ndarray of shape (494021, 41) Each row corresponds to the 41 features in the dataset. target : ndarray of shape (494021,) Each value corresponds to one of the 21 attack types or to the label 'normal.'. feature_names : list The names of the dataset columns target_names: list The names of the target columns DESCR : str Description of the kddcup99 dataset.
_fetch_brute_kddcup99
python
scikit-learn/scikit-learn
sklearn/datasets/_kddcup99.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/_kddcup99.py
BSD-3-Clause
def _mkdirp(d): """Ensure directory d exists (like mkdir -p on Unix) No guarantee that the directory is writable. """ try: os.makedirs(d) except OSError as e: if e.errno != errno.EEXIST: raise
Ensure directory d exists (like mkdir -p on Unix) No guarantee that the directory is writable.
_mkdirp
python
scikit-learn/scikit-learn
sklearn/datasets/_kddcup99.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/_kddcup99.py
BSD-3-Clause
def _check_fetch_lfw( data_home=None, funneled=True, download_if_missing=True, n_retries=3, delay=1.0 ): """Helper function to download any missing LFW data""" data_home = get_data_home(data_home=data_home) lfw_home = join(data_home, "lfw_home") if not exists(lfw_home): makedirs(lfw_home) for target in TARGETS: target_filepath = join(lfw_home, target.filename) if not exists(target_filepath): if download_if_missing: logger.info("Downloading LFW metadata: %s", target.url) _fetch_remote( target, dirname=lfw_home, n_retries=n_retries, delay=delay ) else: raise OSError("%s is missing" % target_filepath) if funneled: data_folder_path = join(lfw_home, "lfw_funneled") archive = FUNNELED_ARCHIVE else: data_folder_path = join(lfw_home, "lfw") archive = ARCHIVE if not exists(data_folder_path): archive_path = join(lfw_home, archive.filename) if not exists(archive_path): if download_if_missing: logger.info("Downloading LFW data (~200MB): %s", archive.url) _fetch_remote( archive, dirname=lfw_home, n_retries=n_retries, delay=delay ) else: raise OSError("%s is missing" % archive_path) import tarfile logger.debug("Decompressing the data archive to %s", data_folder_path) with tarfile.open(archive_path, "r:gz") as fp: # Use filter="data" to prevent the most dangerous security issues. # For more details, see # https://docs.python.org/3.9/library/tarfile.html#tarfile.TarFile.extractall fp.extractall(path=lfw_home, filter="data") remove(archive_path) return lfw_home, data_folder_path
Helper function to download any missing LFW data
_check_fetch_lfw
python
scikit-learn/scikit-learn
sklearn/datasets/_lfw.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/_lfw.py
BSD-3-Clause
def _fetch_lfw_people( data_folder_path, slice_=None, color=False, resize=None, min_faces_per_person=0 ): """Perform the actual data loading for the lfw people dataset This operation is meant to be cached by a joblib wrapper. """ # scan the data folder content to retain people with more that # `min_faces_per_person` face pictures person_names, file_paths = [], [] for person_name in sorted(listdir(data_folder_path)): folder_path = join(data_folder_path, person_name) if not isdir(folder_path): continue paths = [join(folder_path, f) for f in sorted(listdir(folder_path))] n_pictures = len(paths) if n_pictures >= min_faces_per_person: person_name = person_name.replace("_", " ") person_names.extend([person_name] * n_pictures) file_paths.extend(paths) n_faces = len(file_paths) if n_faces == 0: raise ValueError( "min_faces_per_person=%d is too restrictive" % min_faces_per_person ) target_names = np.unique(person_names) target = np.searchsorted(target_names, person_names) faces = _load_imgs(file_paths, slice_, color, resize) # shuffle the faces with a deterministic RNG scheme to avoid having # all faces of the same person in a row, as it would break some # cross validation and learning algorithms such as SGD and online # k-means that make an IID assumption indices = np.arange(n_faces) np.random.RandomState(42).shuffle(indices) faces, target = faces[indices], target[indices] return faces, target, target_names
Perform the actual data loading for the lfw people dataset This operation is meant to be cached by a joblib wrapper.
_fetch_lfw_people
python
scikit-learn/scikit-learn
sklearn/datasets/_lfw.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/_lfw.py
BSD-3-Clause
def fetch_lfw_people( *, data_home=None, funneled=True, resize=0.5, min_faces_per_person=0, color=False, slice_=(slice(70, 195), slice(78, 172)), download_if_missing=True, return_X_y=False, n_retries=3, delay=1.0, ): """Load the Labeled Faces in the Wild (LFW) people dataset \ (classification). Download it if necessary. ================= ======================= Classes 5749 Samples total 13233 Dimensionality 5828 Features real, between 0 and 255 ================= ======================= For a usage example of this dataset, see :ref:`sphx_glr_auto_examples_applications_plot_face_recognition.py`. Read more in the :ref:`User Guide <labeled_faces_in_the_wild_dataset>`. Parameters ---------- data_home : str or path-like, default=None Specify another download and cache folder for the datasets. By default all scikit-learn data is stored in '~/scikit_learn_data' subfolders. funneled : bool, default=True Download and use the funneled variant of the dataset. resize : float or None, default=0.5 Ratio used to resize the each face picture. If `None`, no resizing is performed. min_faces_per_person : int, default=None The extracted dataset will only retain pictures of people that have at least `min_faces_per_person` different pictures. color : bool, default=False Keep the 3 RGB channels instead of averaging them to a single gray level channel. If color is True the shape of the data has one more dimension than the shape with color = False. slice_ : tuple of slice, default=(slice(70, 195), slice(78, 172)) Provide a custom 2D slice (height, width) to extract the 'interesting' part of the jpeg files and avoid use statistical correlation from the background. download_if_missing : bool, default=True If False, raise an OSError if the data is not locally available instead of trying to download the data from the source site. return_X_y : bool, default=False If True, returns ``(dataset.data, dataset.target)`` instead of a Bunch object. See below for more information about the `dataset.data` and `dataset.target` object. .. versionadded:: 0.20 n_retries : int, default=3 Number of retries when HTTP errors are encountered. .. versionadded:: 1.5 delay : float, default=1.0 Number of seconds between retries. .. versionadded:: 1.5 Returns ------- dataset : :class:`~sklearn.utils.Bunch` Dictionary-like object, with the following attributes. data : numpy array of shape (13233, 2914) Each row corresponds to a ravelled face image of original size 62 x 47 pixels. Changing the ``slice_`` or resize parameters will change the shape of the output. images : numpy array of shape (13233, 62, 47) Each row is a face image corresponding to one of the 5749 people in the dataset. Changing the ``slice_`` or resize parameters will change the shape of the output. target : numpy array of shape (13233,) Labels associated to each face image. Those labels range from 0-5748 and correspond to the person IDs. target_names : numpy array of shape (5749,) Names of all persons in the dataset. Position in array corresponds to the person ID in the target array. DESCR : str Description of the Labeled Faces in the Wild (LFW) dataset. (data, target) : tuple if ``return_X_y`` is True A tuple of two ndarray. The first containing a 2D array of shape (n_samples, n_features) with each row representing one sample and each column representing the features. The second ndarray of shape (n_samples,) containing the target samples. .. versionadded:: 0.20 Examples -------- >>> from sklearn.datasets import fetch_lfw_people >>> lfw_people = fetch_lfw_people() >>> lfw_people.data.shape (13233, 2914) >>> lfw_people.target.shape (13233,) >>> for name in lfw_people.target_names[:5]: ... print(name) AJ Cook AJ Lamas Aaron Eckhart Aaron Guiel Aaron Patterson """ lfw_home, data_folder_path = _check_fetch_lfw( data_home=data_home, funneled=funneled, download_if_missing=download_if_missing, n_retries=n_retries, delay=delay, ) logger.debug("Loading LFW people faces from %s", lfw_home) # wrap the loader in a memoizing function that will return memmaped data # arrays for optimal memory usage m = Memory(location=lfw_home, compress=6, verbose=0) load_func = m.cache(_fetch_lfw_people) # load and memoize the pairs as np arrays faces, target, target_names = load_func( data_folder_path, resize=resize, min_faces_per_person=min_faces_per_person, color=color, slice_=slice_, ) X = faces.reshape(len(faces), -1) fdescr = load_descr("lfw.rst") if return_X_y: return X, target # pack the results as a Bunch instance return Bunch( data=X, images=faces, target=target, target_names=target_names, DESCR=fdescr )
Load the Labeled Faces in the Wild (LFW) people dataset (classification). Download it if necessary. ================= ======================= Classes 5749 Samples total 13233 Dimensionality 5828 Features real, between 0 and 255 ================= ======================= For a usage example of this dataset, see :ref:`sphx_glr_auto_examples_applications_plot_face_recognition.py`. Read more in the :ref:`User Guide <labeled_faces_in_the_wild_dataset>`. Parameters ---------- data_home : str or path-like, default=None Specify another download and cache folder for the datasets. By default all scikit-learn data is stored in '~/scikit_learn_data' subfolders. funneled : bool, default=True Download and use the funneled variant of the dataset. resize : float or None, default=0.5 Ratio used to resize the each face picture. If `None`, no resizing is performed. min_faces_per_person : int, default=None The extracted dataset will only retain pictures of people that have at least `min_faces_per_person` different pictures. color : bool, default=False Keep the 3 RGB channels instead of averaging them to a single gray level channel. If color is True the shape of the data has one more dimension than the shape with color = False. slice_ : tuple of slice, default=(slice(70, 195), slice(78, 172)) Provide a custom 2D slice (height, width) to extract the 'interesting' part of the jpeg files and avoid use statistical correlation from the background. download_if_missing : bool, default=True If False, raise an OSError if the data is not locally available instead of trying to download the data from the source site. return_X_y : bool, default=False If True, returns ``(dataset.data, dataset.target)`` instead of a Bunch object. See below for more information about the `dataset.data` and `dataset.target` object. .. versionadded:: 0.20 n_retries : int, default=3 Number of retries when HTTP errors are encountered. .. versionadded:: 1.5 delay : float, default=1.0 Number of seconds between retries. .. versionadded:: 1.5 Returns ------- dataset : :class:`~sklearn.utils.Bunch` Dictionary-like object, with the following attributes. data : numpy array of shape (13233, 2914) Each row corresponds to a ravelled face image of original size 62 x 47 pixels. Changing the ``slice_`` or resize parameters will change the shape of the output. images : numpy array of shape (13233, 62, 47) Each row is a face image corresponding to one of the 5749 people in the dataset. Changing the ``slice_`` or resize parameters will change the shape of the output. target : numpy array of shape (13233,) Labels associated to each face image. Those labels range from 0-5748 and correspond to the person IDs. target_names : numpy array of shape (5749,) Names of all persons in the dataset. Position in array corresponds to the person ID in the target array. DESCR : str Description of the Labeled Faces in the Wild (LFW) dataset. (data, target) : tuple if ``return_X_y`` is True A tuple of two ndarray. The first containing a 2D array of shape (n_samples, n_features) with each row representing one sample and each column representing the features. The second ndarray of shape (n_samples,) containing the target samples. .. versionadded:: 0.20 Examples -------- >>> from sklearn.datasets import fetch_lfw_people >>> lfw_people = fetch_lfw_people() >>> lfw_people.data.shape (13233, 2914) >>> lfw_people.target.shape (13233,) >>> for name in lfw_people.target_names[:5]: ... print(name) AJ Cook AJ Lamas Aaron Eckhart Aaron Guiel Aaron Patterson
fetch_lfw_people
python
scikit-learn/scikit-learn
sklearn/datasets/_lfw.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/_lfw.py
BSD-3-Clause
def _fetch_lfw_pairs( index_file_path, data_folder_path, slice_=None, color=False, resize=None ): """Perform the actual data loading for the LFW pairs dataset This operation is meant to be cached by a joblib wrapper. """ # parse the index file to find the number of pairs to be able to allocate # the right amount of memory before starting to decode the jpeg files with open(index_file_path, "rb") as index_file: split_lines = [ln.decode().strip().split("\t") for ln in index_file] pair_specs = [sl for sl in split_lines if len(sl) > 2] n_pairs = len(pair_specs) # iterating over the metadata lines for each pair to find the filename to # decode and load in memory target = np.zeros(n_pairs, dtype=int) file_paths = list() for i, components in enumerate(pair_specs): if len(components) == 3: target[i] = 1 pair = ( (components[0], int(components[1]) - 1), (components[0], int(components[2]) - 1), ) elif len(components) == 4: target[i] = 0 pair = ( (components[0], int(components[1]) - 1), (components[2], int(components[3]) - 1), ) else: raise ValueError("invalid line %d: %r" % (i + 1, components)) for j, (name, idx) in enumerate(pair): try: person_folder = join(data_folder_path, name) except TypeError: person_folder = join(data_folder_path, str(name, "UTF-8")) filenames = list(sorted(listdir(person_folder))) file_path = join(person_folder, filenames[idx]) file_paths.append(file_path) pairs = _load_imgs(file_paths, slice_, color, resize) shape = list(pairs.shape) n_faces = shape.pop(0) shape.insert(0, 2) shape.insert(0, n_faces // 2) pairs.shape = shape return pairs, target, np.array(["Different persons", "Same person"])
Perform the actual data loading for the LFW pairs dataset This operation is meant to be cached by a joblib wrapper.
_fetch_lfw_pairs
python
scikit-learn/scikit-learn
sklearn/datasets/_lfw.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/_lfw.py
BSD-3-Clause
def fetch_lfw_pairs( *, subset="train", data_home=None, funneled=True, resize=0.5, color=False, slice_=(slice(70, 195), slice(78, 172)), download_if_missing=True, n_retries=3, delay=1.0, ): """Load the Labeled Faces in the Wild (LFW) pairs dataset (classification). Download it if necessary. ================= ======================= Classes 2 Samples total 13233 Dimensionality 5828 Features real, between 0 and 255 ================= ======================= In the `original paper <https://people.cs.umass.edu/~elm/papers/lfw.pdf>`_ the "pairs" version corresponds to the "restricted task", where the experimenter should not use the name of a person to infer the equivalence or non-equivalence of two face images that are not explicitly given in the training set. The original images are 250 x 250 pixels, but the default slice and resize arguments reduce them to 62 x 47. Read more in the :ref:`User Guide <labeled_faces_in_the_wild_dataset>`. Parameters ---------- subset : {'train', 'test', '10_folds'}, default='train' Select the dataset to load: 'train' for the development training set, 'test' for the development test set, and '10_folds' for the official evaluation set that is meant to be used with a 10-folds cross validation. data_home : str or path-like, default=None Specify another download and cache folder for the datasets. By default all scikit-learn data is stored in '~/scikit_learn_data' subfolders. funneled : bool, default=True Download and use the funneled variant of the dataset. resize : float, default=0.5 Ratio used to resize the each face picture. color : bool, default=False Keep the 3 RGB channels instead of averaging them to a single gray level channel. If color is True the shape of the data has one more dimension than the shape with color = False. slice_ : tuple of slice, default=(slice(70, 195), slice(78, 172)) Provide a custom 2D slice (height, width) to extract the 'interesting' part of the jpeg files and avoid use statistical correlation from the background. download_if_missing : bool, default=True If False, raise an OSError if the data is not locally available instead of trying to download the data from the source site. n_retries : int, default=3 Number of retries when HTTP errors are encountered. .. versionadded:: 1.5 delay : float, default=1.0 Number of seconds between retries. .. versionadded:: 1.5 Returns ------- data : :class:`~sklearn.utils.Bunch` Dictionary-like object, with the following attributes. data : ndarray of shape (2200, 5828). Shape depends on ``subset``. Each row corresponds to 2 ravel'd face images of original size 62 x 47 pixels. Changing the ``slice_``, ``resize`` or ``subset`` parameters will change the shape of the output. pairs : ndarray of shape (2200, 2, 62, 47). Shape depends on ``subset`` Each row has 2 face images corresponding to same or different person from the dataset containing 5749 people. Changing the ``slice_``, ``resize`` or ``subset`` parameters will change the shape of the output. target : numpy array of shape (2200,). Shape depends on ``subset``. Labels associated to each pair of images. The two label values being different persons or the same person. target_names : numpy array of shape (2,) Explains the target values of the target array. 0 corresponds to "Different person", 1 corresponds to "same person". DESCR : str Description of the Labeled Faces in the Wild (LFW) dataset. Examples -------- >>> from sklearn.datasets import fetch_lfw_pairs >>> lfw_pairs_train = fetch_lfw_pairs(subset='train') >>> list(lfw_pairs_train.target_names) [np.str_('Different persons'), np.str_('Same person')] >>> lfw_pairs_train.pairs.shape (2200, 2, 62, 47) >>> lfw_pairs_train.data.shape (2200, 5828) >>> lfw_pairs_train.target.shape (2200,) """ lfw_home, data_folder_path = _check_fetch_lfw( data_home=data_home, funneled=funneled, download_if_missing=download_if_missing, n_retries=n_retries, delay=delay, ) logger.debug("Loading %s LFW pairs from %s", subset, lfw_home) # wrap the loader in a memoizing function that will return memmaped data # arrays for optimal memory usage m = Memory(location=lfw_home, compress=6, verbose=0) load_func = m.cache(_fetch_lfw_pairs) # select the right metadata file according to the requested subset label_filenames = { "train": "pairsDevTrain.txt", "test": "pairsDevTest.txt", "10_folds": "pairs.txt", } if subset not in label_filenames: raise ValueError( "subset='%s' is invalid: should be one of %r" % (subset, list(sorted(label_filenames.keys()))) ) index_file_path = join(lfw_home, label_filenames[subset]) # load and memoize the pairs as np arrays pairs, target, target_names = load_func( index_file_path, data_folder_path, resize=resize, color=color, slice_=slice_ ) fdescr = load_descr("lfw.rst") # pack the results as a Bunch instance return Bunch( data=pairs.reshape(len(pairs), -1), pairs=pairs, target=target, target_names=target_names, DESCR=fdescr, )
Load the Labeled Faces in the Wild (LFW) pairs dataset (classification). Download it if necessary. ================= ======================= Classes 2 Samples total 13233 Dimensionality 5828 Features real, between 0 and 255 ================= ======================= In the `original paper <https://people.cs.umass.edu/~elm/papers/lfw.pdf>`_ the "pairs" version corresponds to the "restricted task", where the experimenter should not use the name of a person to infer the equivalence or non-equivalence of two face images that are not explicitly given in the training set. The original images are 250 x 250 pixels, but the default slice and resize arguments reduce them to 62 x 47. Read more in the :ref:`User Guide <labeled_faces_in_the_wild_dataset>`. Parameters ---------- subset : {'train', 'test', '10_folds'}, default='train' Select the dataset to load: 'train' for the development training set, 'test' for the development test set, and '10_folds' for the official evaluation set that is meant to be used with a 10-folds cross validation. data_home : str or path-like, default=None Specify another download and cache folder for the datasets. By default all scikit-learn data is stored in '~/scikit_learn_data' subfolders. funneled : bool, default=True Download and use the funneled variant of the dataset. resize : float, default=0.5 Ratio used to resize the each face picture. color : bool, default=False Keep the 3 RGB channels instead of averaging them to a single gray level channel. If color is True the shape of the data has one more dimension than the shape with color = False. slice_ : tuple of slice, default=(slice(70, 195), slice(78, 172)) Provide a custom 2D slice (height, width) to extract the 'interesting' part of the jpeg files and avoid use statistical correlation from the background. download_if_missing : bool, default=True If False, raise an OSError if the data is not locally available instead of trying to download the data from the source site. n_retries : int, default=3 Number of retries when HTTP errors are encountered. .. versionadded:: 1.5 delay : float, default=1.0 Number of seconds between retries. .. versionadded:: 1.5 Returns ------- data : :class:`~sklearn.utils.Bunch` Dictionary-like object, with the following attributes. data : ndarray of shape (2200, 5828). Shape depends on ``subset``. Each row corresponds to 2 ravel'd face images of original size 62 x 47 pixels. Changing the ``slice_``, ``resize`` or ``subset`` parameters will change the shape of the output. pairs : ndarray of shape (2200, 2, 62, 47). Shape depends on ``subset`` Each row has 2 face images corresponding to same or different person from the dataset containing 5749 people. Changing the ``slice_``, ``resize`` or ``subset`` parameters will change the shape of the output. target : numpy array of shape (2200,). Shape depends on ``subset``. Labels associated to each pair of images. The two label values being different persons or the same person. target_names : numpy array of shape (2,) Explains the target values of the target array. 0 corresponds to "Different person", 1 corresponds to "same person". DESCR : str Description of the Labeled Faces in the Wild (LFW) dataset. Examples -------- >>> from sklearn.datasets import fetch_lfw_pairs >>> lfw_pairs_train = fetch_lfw_pairs(subset='train') >>> list(lfw_pairs_train.target_names) [np.str_('Different persons'), np.str_('Same person')] >>> lfw_pairs_train.pairs.shape (2200, 2, 62, 47) >>> lfw_pairs_train.data.shape (2200, 5828) >>> lfw_pairs_train.target.shape (2200,)
fetch_lfw_pairs
python
scikit-learn/scikit-learn
sklearn/datasets/_lfw.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/_lfw.py
BSD-3-Clause
def fetch_olivetti_faces( *, data_home=None, shuffle=False, random_state=0, download_if_missing=True, return_X_y=False, n_retries=3, delay=1.0, ): """Load the Olivetti faces data-set from AT&T (classification). Download it if necessary. ================= ===================== Classes 40 Samples total 400 Dimensionality 4096 Features real, between 0 and 1 ================= ===================== Read more in the :ref:`User Guide <olivetti_faces_dataset>`. Parameters ---------- data_home : str or path-like, default=None Specify another download and cache folder for the datasets. By default all scikit-learn data is stored in '~/scikit_learn_data' subfolders. shuffle : bool, default=False If True the order of the dataset is shuffled to avoid having images of the same person grouped. random_state : int, RandomState instance or None, default=0 Determines random number generation for dataset shuffling. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. download_if_missing : bool, default=True If False, raise an OSError if the data is not locally available instead of trying to download the data from the source site. return_X_y : bool, default=False If True, returns `(data, target)` instead of a `Bunch` object. See below for more information about the `data` and `target` object. .. versionadded:: 0.22 n_retries : int, default=3 Number of retries when HTTP errors are encountered. .. versionadded:: 1.5 delay : float, default=1.0 Number of seconds between retries. .. versionadded:: 1.5 Returns ------- data : :class:`~sklearn.utils.Bunch` Dictionary-like object, with the following attributes. data: ndarray, shape (400, 4096) Each row corresponds to a ravelled face image of original size 64 x 64 pixels. images : ndarray, shape (400, 64, 64) Each row is a face image corresponding to one of the 40 subjects of the dataset. target : ndarray, shape (400,) Labels associated to each face image. Those labels are ranging from 0-39 and correspond to the Subject IDs. DESCR : str Description of the modified Olivetti Faces Dataset. (data, target) : tuple if `return_X_y=True` Tuple with the `data` and `target` objects described above. .. versionadded:: 0.22 Examples -------- >>> from sklearn.datasets import fetch_olivetti_faces >>> olivetti_faces = fetch_olivetti_faces() >>> olivetti_faces.data.shape (400, 4096) >>> olivetti_faces.target.shape (400,) >>> olivetti_faces.images.shape (400, 64, 64) """ data_home = get_data_home(data_home=data_home) if not exists(data_home): makedirs(data_home) filepath = _pkl_filepath(data_home, "olivetti.pkz") if not exists(filepath): if not download_if_missing: raise OSError("Data not found and `download_if_missing` is False") print("downloading Olivetti faces from %s to %s" % (FACES.url, data_home)) mat_path = _fetch_remote( FACES, dirname=data_home, n_retries=n_retries, delay=delay ) mfile = loadmat(file_name=mat_path) # delete raw .mat data remove(mat_path) faces = mfile["faces"].T.copy() joblib.dump(faces, filepath, compress=6) del mfile else: faces = joblib.load(filepath) # We want floating point data, but float32 is enough (there is only # one byte of precision in the original uint8s anyway) faces = np.float32(faces) faces = faces - faces.min() faces /= faces.max() faces = faces.reshape((400, 64, 64)).transpose(0, 2, 1) # 10 images per class, 400 images total, each class is contiguous. target = np.array([i // 10 for i in range(400)]) if shuffle: random_state = check_random_state(random_state) order = random_state.permutation(len(faces)) faces = faces[order] target = target[order] faces_vectorized = faces.reshape(len(faces), -1) fdescr = load_descr("olivetti_faces.rst") if return_X_y: return faces_vectorized, target return Bunch(data=faces_vectorized, images=faces, target=target, DESCR=fdescr)
Load the Olivetti faces data-set from AT&T (classification). Download it if necessary. ================= ===================== Classes 40 Samples total 400 Dimensionality 4096 Features real, between 0 and 1 ================= ===================== Read more in the :ref:`User Guide <olivetti_faces_dataset>`. Parameters ---------- data_home : str or path-like, default=None Specify another download and cache folder for the datasets. By default all scikit-learn data is stored in '~/scikit_learn_data' subfolders. shuffle : bool, default=False If True the order of the dataset is shuffled to avoid having images of the same person grouped. random_state : int, RandomState instance or None, default=0 Determines random number generation for dataset shuffling. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. download_if_missing : bool, default=True If False, raise an OSError if the data is not locally available instead of trying to download the data from the source site. return_X_y : bool, default=False If True, returns `(data, target)` instead of a `Bunch` object. See below for more information about the `data` and `target` object. .. versionadded:: 0.22 n_retries : int, default=3 Number of retries when HTTP errors are encountered. .. versionadded:: 1.5 delay : float, default=1.0 Number of seconds between retries. .. versionadded:: 1.5 Returns ------- data : :class:`~sklearn.utils.Bunch` Dictionary-like object, with the following attributes. data: ndarray, shape (400, 4096) Each row corresponds to a ravelled face image of original size 64 x 64 pixels. images : ndarray, shape (400, 64, 64) Each row is a face image corresponding to one of the 40 subjects of the dataset. target : ndarray, shape (400,) Labels associated to each face image. Those labels are ranging from 0-39 and correspond to the Subject IDs. DESCR : str Description of the modified Olivetti Faces Dataset. (data, target) : tuple if `return_X_y=True` Tuple with the `data` and `target` objects described above. .. versionadded:: 0.22 Examples -------- >>> from sklearn.datasets import fetch_olivetti_faces >>> olivetti_faces = fetch_olivetti_faces() >>> olivetti_faces.data.shape (400, 4096) >>> olivetti_faces.target.shape (400,) >>> olivetti_faces.images.shape (400, 64, 64)
fetch_olivetti_faces
python
scikit-learn/scikit-learn
sklearn/datasets/_olivetti_faces.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/_olivetti_faces.py
BSD-3-Clause
def _retry_with_clean_cache( openml_path: str, data_home: Optional[str], no_retry_exception: Optional[Exception] = None, ) -> Callable: """If the first call to the decorated function fails, the local cached file is removed, and the function is called again. If ``data_home`` is ``None``, then the function is called once. We can provide a specific exception to not retry on using `no_retry_exception` parameter. """ def decorator(f): @wraps(f) def wrapper(*args, **kw): if data_home is None: return f(*args, **kw) try: return f(*args, **kw) except URLError: raise except Exception as exc: if no_retry_exception is not None and isinstance( exc, no_retry_exception ): raise warn("Invalid cache, redownloading file", RuntimeWarning) local_path = _get_local_path(openml_path, data_home) if os.path.exists(local_path): os.unlink(local_path) return f(*args, **kw) return wrapper return decorator
If the first call to the decorated function fails, the local cached file is removed, and the function is called again. If ``data_home`` is ``None``, then the function is called once. We can provide a specific exception to not retry on using `no_retry_exception` parameter.
_retry_with_clean_cache
python
scikit-learn/scikit-learn
sklearn/datasets/_openml.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/_openml.py
BSD-3-Clause
def _retry_on_network_error( n_retries: int = 3, delay: float = 1.0, url: str = "" ) -> Callable: """If the function call results in a network error, call the function again up to ``n_retries`` times with a ``delay`` between each call. If the error has a 412 status code, don't call the function again as this is a specific OpenML error. The url parameter is used to give more information to the user about the error. """ def decorator(f): @wraps(f) def wrapper(*args, **kwargs): retry_counter = n_retries while True: try: return f(*args, **kwargs) except (URLError, TimeoutError) as e: # 412 is a specific OpenML error code. if isinstance(e, HTTPError) and e.code == 412: raise if retry_counter == 0: raise warn( f"A network error occurred while downloading {url}. Retrying..." ) retry_counter -= 1 time.sleep(delay) return wrapper return decorator
If the function call results in a network error, call the function again up to ``n_retries`` times with a ``delay`` between each call. If the error has a 412 status code, don't call the function again as this is a specific OpenML error. The url parameter is used to give more information to the user about the error.
_retry_on_network_error
python
scikit-learn/scikit-learn
sklearn/datasets/_openml.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/_openml.py
BSD-3-Clause
def _open_openml_url( url: str, data_home: Optional[str], n_retries: int = 3, delay: float = 1.0 ): """ Returns a resource from OpenML.org. Caches it to data_home if required. Parameters ---------- url : str OpenML URL that will be downloaded and cached locally. The path component of the URL is used to replicate the tree structure as sub-folders of the local cache folder. data_home : str Directory to which the files will be cached. If None, no caching will be applied. n_retries : int, default=3 Number of retries when HTTP errors are encountered. Error with status code 412 won't be retried as they represent OpenML generic errors. delay : float, default=1.0 Number of seconds between retries. Returns ------- result : stream A stream to the OpenML resource. """ def is_gzip_encoded(_fsrc): return _fsrc.info().get("Content-Encoding", "") == "gzip" req = Request(url) req.add_header("Accept-encoding", "gzip") if data_home is None: fsrc = _retry_on_network_error(n_retries, delay, req.full_url)(urlopen)(req) if is_gzip_encoded(fsrc): return gzip.GzipFile(fileobj=fsrc, mode="rb") return fsrc openml_path = urlparse(url).path.lstrip("/") local_path = _get_local_path(openml_path, data_home) dir_name, file_name = os.path.split(local_path) if not os.path.exists(local_path): os.makedirs(dir_name, exist_ok=True) try: # Create a tmpdir as a subfolder of dir_name where the final file will # be moved to if the download is successful. This guarantees that the # renaming operation to the final location is atomic to ensure the # concurrence safety of the dataset caching mechanism. with TemporaryDirectory(dir=dir_name) as tmpdir: with closing( _retry_on_network_error(n_retries, delay, req.full_url)(urlopen)( req ) ) as fsrc: opener: Callable if is_gzip_encoded(fsrc): opener = open else: opener = gzip.GzipFile with opener(os.path.join(tmpdir, file_name), "wb") as fdst: shutil.copyfileobj(fsrc, fdst) shutil.move(fdst.name, local_path) except Exception: if os.path.exists(local_path): os.unlink(local_path) raise # XXX: First time, decompression will not be necessary (by using fsrc), but # it will happen nonetheless return gzip.GzipFile(local_path, "rb")
Returns a resource from OpenML.org. Caches it to data_home if required. Parameters ---------- url : str OpenML URL that will be downloaded and cached locally. The path component of the URL is used to replicate the tree structure as sub-folders of the local cache folder. data_home : str Directory to which the files will be cached. If None, no caching will be applied. n_retries : int, default=3 Number of retries when HTTP errors are encountered. Error with status code 412 won't be retried as they represent OpenML generic errors. delay : float, default=1.0 Number of seconds between retries. Returns ------- result : stream A stream to the OpenML resource.
_open_openml_url
python
scikit-learn/scikit-learn
sklearn/datasets/_openml.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/_openml.py
BSD-3-Clause
def _get_json_content_from_openml_api( url: str, error_message: Optional[str], data_home: Optional[str], n_retries: int = 3, delay: float = 1.0, ) -> Dict: """ Loads json data from the openml api. Parameters ---------- url : str The URL to load from. Should be an official OpenML endpoint. error_message : str or None The error message to raise if an acceptable OpenML error is thrown (acceptable error is, e.g., data id not found. Other errors, like 404's will throw the native error message). data_home : str or None Location to cache the response. None if no cache is required. n_retries : int, default=3 Number of retries when HTTP errors are encountered. Error with status code 412 won't be retried as they represent OpenML generic errors. delay : float, default=1.0 Number of seconds between retries. Returns ------- json_data : json the json result from the OpenML server if the call was successful. An exception otherwise. """ @_retry_with_clean_cache(url, data_home=data_home) def _load_json(): with closing( _open_openml_url(url, data_home, n_retries=n_retries, delay=delay) ) as response: return json.loads(response.read().decode("utf-8")) try: return _load_json() except HTTPError as error: # 412 is an OpenML specific error code, indicating a generic error # (e.g., data not found) if error.code != 412: raise error # 412 error, not in except for nicer traceback raise OpenMLError(error_message)
Loads json data from the openml api. Parameters ---------- url : str The URL to load from. Should be an official OpenML endpoint. error_message : str or None The error message to raise if an acceptable OpenML error is thrown (acceptable error is, e.g., data id not found. Other errors, like 404's will throw the native error message). data_home : str or None Location to cache the response. None if no cache is required. n_retries : int, default=3 Number of retries when HTTP errors are encountered. Error with status code 412 won't be retried as they represent OpenML generic errors. delay : float, default=1.0 Number of seconds between retries. Returns ------- json_data : json the json result from the OpenML server if the call was successful. An exception otherwise.
_get_json_content_from_openml_api
python
scikit-learn/scikit-learn
sklearn/datasets/_openml.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/_openml.py
BSD-3-Clause