code
stringlengths
66
870k
docstring
stringlengths
19
26.7k
func_name
stringlengths
1
138
language
stringclasses
1 value
repo
stringlengths
7
68
path
stringlengths
5
324
url
stringlengths
46
389
license
stringclasses
7 values
def add_dummy_feature(X, value=1.0): """Augment dataset with an additional dummy feature. This is useful for fitting an intercept term with implementations which cannot otherwise fit it directly. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Data. value : float Value to use for the dummy feature. Returns ------- X : {ndarray, sparse matrix} of shape (n_samples, n_features + 1) Same data with dummy feature added as first column. Examples -------- >>> from sklearn.preprocessing import add_dummy_feature >>> add_dummy_feature([[0, 1], [1, 0]]) array([[1., 0., 1.], [1., 1., 0.]]) """ X = check_array(X, accept_sparse=["csc", "csr", "coo"], dtype=FLOAT_DTYPES) n_samples, n_features = X.shape shape = (n_samples, n_features + 1) if sparse.issparse(X): if X.format == "coo": # Shift columns to the right. col = X.col + 1 # Column indices of dummy feature are 0 everywhere. col = np.concatenate((np.zeros(n_samples), col)) # Row indices of dummy feature are 0, ..., n_samples-1. row = np.concatenate((np.arange(n_samples), X.row)) # Prepend the dummy feature n_samples times. data = np.concatenate((np.full(n_samples, value), X.data)) return sparse.coo_matrix((data, (row, col)), shape) elif X.format == "csc": # Shift index pointers since we need to add n_samples elements. indptr = X.indptr + n_samples # indptr[0] must be 0. indptr = np.concatenate((np.array([0]), indptr)) # Row indices of dummy feature are 0, ..., n_samples-1. indices = np.concatenate((np.arange(n_samples), X.indices)) # Prepend the dummy feature n_samples times. data = np.concatenate((np.full(n_samples, value), X.data)) return sparse.csc_matrix((data, indices, indptr), shape) else: klass = X.__class__ return klass(add_dummy_feature(X.tocoo(), value)) else: return np.hstack((np.full((n_samples, 1), value), X))
Augment dataset with an additional dummy feature. This is useful for fitting an intercept term with implementations which cannot otherwise fit it directly. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Data. value : float Value to use for the dummy feature. Returns ------- X : {ndarray, sparse matrix} of shape (n_samples, n_features + 1) Same data with dummy feature added as first column. Examples -------- >>> from sklearn.preprocessing import add_dummy_feature >>> add_dummy_feature([[0, 1], [1, 0]]) array([[1., 0., 1.], [1., 1., 0.]])
add_dummy_feature
python
scikit-learn/scikit-learn
sklearn/preprocessing/_data.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_data.py
BSD-3-Clause
def _dense_fit(self, X, random_state): """Compute percentiles for dense matrices. Parameters ---------- X : ndarray of shape (n_samples, n_features) The data used to scale along the features axis. """ if self.ignore_implicit_zeros: warnings.warn( "'ignore_implicit_zeros' takes effect only with" " sparse matrix. This parameter has no effect." ) n_samples, n_features = X.shape references = self.references_ * 100 if self.subsample is not None and self.subsample < n_samples: # Take a subsample of `X` X = resample( X, replace=False, n_samples=self.subsample, random_state=random_state ) self.quantiles_ = np.nanpercentile(X, references, axis=0) # Due to floating-point precision error in `np.nanpercentile`, # make sure that quantiles are monotonically increasing. # Upstream issue in numpy: # https://github.com/numpy/numpy/issues/14685 self.quantiles_ = np.maximum.accumulate(self.quantiles_)
Compute percentiles for dense matrices. Parameters ---------- X : ndarray of shape (n_samples, n_features) The data used to scale along the features axis.
_dense_fit
python
scikit-learn/scikit-learn
sklearn/preprocessing/_data.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_data.py
BSD-3-Clause
def _sparse_fit(self, X, random_state): """Compute percentiles for sparse matrices. Parameters ---------- X : sparse matrix of shape (n_samples, n_features) The data used to scale along the features axis. The sparse matrix needs to be nonnegative. If a sparse matrix is provided, it will be converted into a sparse ``csc_matrix``. """ n_samples, n_features = X.shape references = self.references_ * 100 self.quantiles_ = [] for feature_idx in range(n_features): column_nnz_data = X.data[X.indptr[feature_idx] : X.indptr[feature_idx + 1]] if self.subsample is not None and len(column_nnz_data) > self.subsample: column_subsample = self.subsample * len(column_nnz_data) // n_samples if self.ignore_implicit_zeros: column_data = np.zeros(shape=column_subsample, dtype=X.dtype) else: column_data = np.zeros(shape=self.subsample, dtype=X.dtype) column_data[:column_subsample] = random_state.choice( column_nnz_data, size=column_subsample, replace=False ) else: if self.ignore_implicit_zeros: column_data = np.zeros(shape=len(column_nnz_data), dtype=X.dtype) else: column_data = np.zeros(shape=n_samples, dtype=X.dtype) column_data[: len(column_nnz_data)] = column_nnz_data if not column_data.size: # if no nnz, an error will be raised for computing the # quantiles. Force the quantiles to be zeros. self.quantiles_.append([0] * len(references)) else: self.quantiles_.append(np.nanpercentile(column_data, references)) self.quantiles_ = np.transpose(self.quantiles_) # due to floating-point precision error in `np.nanpercentile`, # make sure the quantiles are monotonically increasing # Upstream issue in numpy: # https://github.com/numpy/numpy/issues/14685 self.quantiles_ = np.maximum.accumulate(self.quantiles_)
Compute percentiles for sparse matrices. Parameters ---------- X : sparse matrix of shape (n_samples, n_features) The data used to scale along the features axis. The sparse matrix needs to be nonnegative. If a sparse matrix is provided, it will be converted into a sparse ``csc_matrix``.
_sparse_fit
python
scikit-learn/scikit-learn
sklearn/preprocessing/_data.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_data.py
BSD-3-Clause
def fit(self, X, y=None): """Compute the quantiles used for transforming. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data used to scale along the features axis. If a sparse matrix is provided, it will be converted into a sparse ``csc_matrix``. Additionally, the sparse matrix needs to be nonnegative if `ignore_implicit_zeros` is False. y : None Ignored. Returns ------- self : object Fitted transformer. """ if self.subsample is not None and self.n_quantiles > self.subsample: raise ValueError( "The number of quantiles cannot be greater than" " the number of samples used. Got {} quantiles" " and {} samples.".format(self.n_quantiles, self.subsample) ) X = self._check_inputs(X, in_fit=True, copy=False) n_samples = X.shape[0] if self.n_quantiles > n_samples: warnings.warn( "n_quantiles (%s) is greater than the total number " "of samples (%s). n_quantiles is set to " "n_samples." % (self.n_quantiles, n_samples) ) self.n_quantiles_ = max(1, min(self.n_quantiles, n_samples)) rng = check_random_state(self.random_state) # Create the quantiles of reference self.references_ = np.linspace(0, 1, self.n_quantiles_, endpoint=True) if sparse.issparse(X): self._sparse_fit(X, rng) else: self._dense_fit(X, rng) return self
Compute the quantiles used for transforming. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data used to scale along the features axis. If a sparse matrix is provided, it will be converted into a sparse ``csc_matrix``. Additionally, the sparse matrix needs to be nonnegative if `ignore_implicit_zeros` is False. y : None Ignored. Returns ------- self : object Fitted transformer.
fit
python
scikit-learn/scikit-learn
sklearn/preprocessing/_data.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_data.py
BSD-3-Clause
def _transform_col(self, X_col, quantiles, inverse): """Private function to transform a single feature.""" output_distribution = self.output_distribution if not inverse: lower_bound_x = quantiles[0] upper_bound_x = quantiles[-1] lower_bound_y = 0 upper_bound_y = 1 else: lower_bound_x = 0 upper_bound_x = 1 lower_bound_y = quantiles[0] upper_bound_y = quantiles[-1] # for inverse transform, match a uniform distribution with np.errstate(invalid="ignore"): # hide NaN comparison warnings if output_distribution == "normal": X_col = stats.norm.cdf(X_col) # else output distribution is already a uniform distribution # find index for lower and higher bounds with np.errstate(invalid="ignore"): # hide NaN comparison warnings if output_distribution == "normal": lower_bounds_idx = X_col - BOUNDS_THRESHOLD < lower_bound_x upper_bounds_idx = X_col + BOUNDS_THRESHOLD > upper_bound_x if output_distribution == "uniform": lower_bounds_idx = X_col == lower_bound_x upper_bounds_idx = X_col == upper_bound_x isfinite_mask = ~np.isnan(X_col) X_col_finite = X_col[isfinite_mask] if not inverse: # Interpolate in one direction and in the other and take the # mean. This is in case of repeated values in the features # and hence repeated quantiles # # If we don't do this, only one extreme of the duplicated is # used (the upper when we do ascending, and the # lower for descending). We take the mean of these two X_col[isfinite_mask] = 0.5 * ( np.interp(X_col_finite, quantiles, self.references_) - np.interp(-X_col_finite, -quantiles[::-1], -self.references_[::-1]) ) else: X_col[isfinite_mask] = np.interp(X_col_finite, self.references_, quantiles) X_col[upper_bounds_idx] = upper_bound_y X_col[lower_bounds_idx] = lower_bound_y # for forward transform, match the output distribution if not inverse: with np.errstate(invalid="ignore"): # hide NaN comparison warnings if output_distribution == "normal": X_col = stats.norm.ppf(X_col) # find the value to clip the data to avoid mapping to # infinity. Clip such that the inverse transform will be # consistent clip_min = stats.norm.ppf(BOUNDS_THRESHOLD - np.spacing(1)) clip_max = stats.norm.ppf(1 - (BOUNDS_THRESHOLD - np.spacing(1))) X_col = np.clip(X_col, clip_min, clip_max) # else output distribution is uniform and the ppf is the # identity function so we let X_col unchanged return X_col
Private function to transform a single feature.
_transform_col
python
scikit-learn/scikit-learn
sklearn/preprocessing/_data.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_data.py
BSD-3-Clause
def _check_inputs(self, X, in_fit, accept_sparse_negative=False, copy=False): """Check inputs before fit and transform.""" X = validate_data( self, X, reset=in_fit, accept_sparse="csc", copy=copy, dtype=FLOAT_DTYPES, # only set force_writeable for the validation at transform time because # it's the only place where QuantileTransformer performs inplace operations. force_writeable=True if not in_fit else None, ensure_all_finite="allow-nan", ) # we only accept positive sparse matrix when ignore_implicit_zeros is # false and that we call fit or transform. with np.errstate(invalid="ignore"): # hide NaN comparison warnings if ( not accept_sparse_negative and not self.ignore_implicit_zeros and (sparse.issparse(X) and np.any(X.data < 0)) ): raise ValueError( "QuantileTransformer only accepts non-negative sparse matrices." ) return X
Check inputs before fit and transform.
_check_inputs
python
scikit-learn/scikit-learn
sklearn/preprocessing/_data.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_data.py
BSD-3-Clause
def _transform(self, X, inverse=False): """Forward and inverse transform. Parameters ---------- X : ndarray of shape (n_samples, n_features) The data used to scale along the features axis. inverse : bool, default=False If False, apply forward transform. If True, apply inverse transform. Returns ------- X : ndarray of shape (n_samples, n_features) Projected data. """ if sparse.issparse(X): for feature_idx in range(X.shape[1]): column_slice = slice(X.indptr[feature_idx], X.indptr[feature_idx + 1]) X.data[column_slice] = self._transform_col( X.data[column_slice], self.quantiles_[:, feature_idx], inverse ) else: for feature_idx in range(X.shape[1]): X[:, feature_idx] = self._transform_col( X[:, feature_idx], self.quantiles_[:, feature_idx], inverse ) return X
Forward and inverse transform. Parameters ---------- X : ndarray of shape (n_samples, n_features) The data used to scale along the features axis. inverse : bool, default=False If False, apply forward transform. If True, apply inverse transform. Returns ------- X : ndarray of shape (n_samples, n_features) Projected data.
_transform
python
scikit-learn/scikit-learn
sklearn/preprocessing/_data.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_data.py
BSD-3-Clause
def transform(self, X): """Feature-wise transformation of the data. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data used to scale along the features axis. If a sparse matrix is provided, it will be converted into a sparse ``csc_matrix``. Additionally, the sparse matrix needs to be nonnegative if `ignore_implicit_zeros` is False. Returns ------- Xt : {ndarray, sparse matrix} of shape (n_samples, n_features) The projected data. """ check_is_fitted(self) X = self._check_inputs(X, in_fit=False, copy=self.copy) return self._transform(X, inverse=False)
Feature-wise transformation of the data. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data used to scale along the features axis. If a sparse matrix is provided, it will be converted into a sparse ``csc_matrix``. Additionally, the sparse matrix needs to be nonnegative if `ignore_implicit_zeros` is False. Returns ------- Xt : {ndarray, sparse matrix} of shape (n_samples, n_features) The projected data.
transform
python
scikit-learn/scikit-learn
sklearn/preprocessing/_data.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_data.py
BSD-3-Clause
def inverse_transform(self, X): """Back-projection to the original space. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data used to scale along the features axis. If a sparse matrix is provided, it will be converted into a sparse ``csc_matrix``. Additionally, the sparse matrix needs to be nonnegative if `ignore_implicit_zeros` is False. Returns ------- X_original : {ndarray, sparse matrix} of (n_samples, n_features) The projected data. """ check_is_fitted(self) X = self._check_inputs( X, in_fit=False, accept_sparse_negative=True, copy=self.copy ) return self._transform(X, inverse=True)
Back-projection to the original space. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data used to scale along the features axis. If a sparse matrix is provided, it will be converted into a sparse ``csc_matrix``. Additionally, the sparse matrix needs to be nonnegative if `ignore_implicit_zeros` is False. Returns ------- X_original : {ndarray, sparse matrix} of (n_samples, n_features) The projected data.
inverse_transform
python
scikit-learn/scikit-learn
sklearn/preprocessing/_data.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_data.py
BSD-3-Clause
def quantile_transform( X, *, axis=0, n_quantiles=1000, output_distribution="uniform", ignore_implicit_zeros=False, subsample=int(1e5), random_state=None, copy=True, ): """Transform features using quantiles information. This method transforms the features to follow a uniform or a normal distribution. Therefore, for a given feature, this transformation tends to spread out the most frequent values. It also reduces the impact of (marginal) outliers: this is therefore a robust preprocessing scheme. The transformation is applied on each feature independently. First an estimate of the cumulative distribution function of a feature is used to map the original values to a uniform distribution. The obtained values are then mapped to the desired output distribution using the associated quantile function. Features values of new/unseen data that fall below or above the fitted range will be mapped to the bounds of the output distribution. Note that this transform is non-linear. It may distort linear correlations between variables measured at the same scale but renders variables measured at different scales more directly comparable. Read more in the :ref:`User Guide <preprocessing_transformer>`. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data to transform. axis : int, default=0 Axis used to compute the means and standard deviations along. If 0, transform each feature, otherwise (if 1) transform each sample. n_quantiles : int, default=1000 or n_samples Number of quantiles to be computed. It corresponds to the number of landmarks used to discretize the cumulative distribution function. If n_quantiles is larger than the number of samples, n_quantiles is set to the number of samples as a larger number of quantiles does not give a better approximation of the cumulative distribution function estimator. output_distribution : {'uniform', 'normal'}, default='uniform' Marginal distribution for the transformed data. The choices are 'uniform' (default) or 'normal'. ignore_implicit_zeros : bool, default=False Only applies to sparse matrices. If True, the sparse entries of the matrix are discarded to compute the quantile statistics. If False, these entries are treated as zeros. subsample : int or None, default=1e5 Maximum number of samples used to estimate the quantiles for computational efficiency. Note that the subsampling procedure may differ for value-identical sparse and dense matrices. Disable subsampling by setting `subsample=None`. .. versionadded:: 1.5 The option `None` to disable subsampling was added. random_state : int, RandomState instance or None, default=None Determines random number generation for subsampling and smoothing noise. Please see ``subsample`` for more details. Pass an int for reproducible results across multiple function calls. See :term:`Glossary <random_state>`. copy : bool, default=True If False, try to avoid a copy and transform in place. This is not guaranteed to always work in place; e.g. if the data is a numpy array with an int dtype, a copy will be returned even with copy=False. .. versionchanged:: 0.23 The default value of `copy` changed from False to True in 0.23. Returns ------- Xt : {ndarray, sparse matrix} of shape (n_samples, n_features) The transformed data. See Also -------- QuantileTransformer : Performs quantile-based scaling using the Transformer API (e.g. as part of a preprocessing :class:`~sklearn.pipeline.Pipeline`). power_transform : Maps data to a normal distribution using a power transformation. scale : Performs standardization that is faster, but less robust to outliers. robust_scale : Performs robust standardization that removes the influence of outliers but does not put outliers and inliers on the same scale. Notes ----- NaNs are treated as missing values: disregarded in fit, and maintained in transform. .. warning:: Risk of data leak Do not use :func:`~sklearn.preprocessing.quantile_transform` unless you know what you are doing. A common mistake is to apply it to the entire data *before* splitting into training and test sets. This will bias the model evaluation because information would have leaked from the test set to the training set. In general, we recommend using :class:`~sklearn.preprocessing.QuantileTransformer` within a :ref:`Pipeline <pipeline>` in order to prevent most risks of data leaking:`pipe = make_pipeline(QuantileTransformer(), LogisticRegression())`. For a comparison of the different scalers, transformers, and normalizers, see: :ref:`sphx_glr_auto_examples_preprocessing_plot_all_scaling.py`. Examples -------- >>> import numpy as np >>> from sklearn.preprocessing import quantile_transform >>> rng = np.random.RandomState(0) >>> X = np.sort(rng.normal(loc=0.5, scale=0.25, size=(25, 1)), axis=0) >>> quantile_transform(X, n_quantiles=10, random_state=0, copy=True) array([...]) """ n = QuantileTransformer( n_quantiles=n_quantiles, output_distribution=output_distribution, subsample=subsample, ignore_implicit_zeros=ignore_implicit_zeros, random_state=random_state, copy=copy, ) if axis == 0: X = n.fit_transform(X) else: # axis == 1 X = n.fit_transform(X.T).T return X
Transform features using quantiles information. This method transforms the features to follow a uniform or a normal distribution. Therefore, for a given feature, this transformation tends to spread out the most frequent values. It also reduces the impact of (marginal) outliers: this is therefore a robust preprocessing scheme. The transformation is applied on each feature independently. First an estimate of the cumulative distribution function of a feature is used to map the original values to a uniform distribution. The obtained values are then mapped to the desired output distribution using the associated quantile function. Features values of new/unseen data that fall below or above the fitted range will be mapped to the bounds of the output distribution. Note that this transform is non-linear. It may distort linear correlations between variables measured at the same scale but renders variables measured at different scales more directly comparable. Read more in the :ref:`User Guide <preprocessing_transformer>`. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data to transform. axis : int, default=0 Axis used to compute the means and standard deviations along. If 0, transform each feature, otherwise (if 1) transform each sample. n_quantiles : int, default=1000 or n_samples Number of quantiles to be computed. It corresponds to the number of landmarks used to discretize the cumulative distribution function. If n_quantiles is larger than the number of samples, n_quantiles is set to the number of samples as a larger number of quantiles does not give a better approximation of the cumulative distribution function estimator. output_distribution : {'uniform', 'normal'}, default='uniform' Marginal distribution for the transformed data. The choices are 'uniform' (default) or 'normal'. ignore_implicit_zeros : bool, default=False Only applies to sparse matrices. If True, the sparse entries of the matrix are discarded to compute the quantile statistics. If False, these entries are treated as zeros. subsample : int or None, default=1e5 Maximum number of samples used to estimate the quantiles for computational efficiency. Note that the subsampling procedure may differ for value-identical sparse and dense matrices. Disable subsampling by setting `subsample=None`. .. versionadded:: 1.5 The option `None` to disable subsampling was added. random_state : int, RandomState instance or None, default=None Determines random number generation for subsampling and smoothing noise. Please see ``subsample`` for more details. Pass an int for reproducible results across multiple function calls. See :term:`Glossary <random_state>`. copy : bool, default=True If False, try to avoid a copy and transform in place. This is not guaranteed to always work in place; e.g. if the data is a numpy array with an int dtype, a copy will be returned even with copy=False. .. versionchanged:: 0.23 The default value of `copy` changed from False to True in 0.23. Returns ------- Xt : {ndarray, sparse matrix} of shape (n_samples, n_features) The transformed data. See Also -------- QuantileTransformer : Performs quantile-based scaling using the Transformer API (e.g. as part of a preprocessing :class:`~sklearn.pipeline.Pipeline`). power_transform : Maps data to a normal distribution using a power transformation. scale : Performs standardization that is faster, but less robust to outliers. robust_scale : Performs robust standardization that removes the influence of outliers but does not put outliers and inliers on the same scale. Notes ----- NaNs are treated as missing values: disregarded in fit, and maintained in transform. .. warning:: Risk of data leak Do not use :func:`~sklearn.preprocessing.quantile_transform` unless you know what you are doing. A common mistake is to apply it to the entire data *before* splitting into training and test sets. This will bias the model evaluation because information would have leaked from the test set to the training set. In general, we recommend using :class:`~sklearn.preprocessing.QuantileTransformer` within a :ref:`Pipeline <pipeline>` in order to prevent most risks of data leaking:`pipe = make_pipeline(QuantileTransformer(), LogisticRegression())`. For a comparison of the different scalers, transformers, and normalizers, see: :ref:`sphx_glr_auto_examples_preprocessing_plot_all_scaling.py`. Examples -------- >>> import numpy as np >>> from sklearn.preprocessing import quantile_transform >>> rng = np.random.RandomState(0) >>> X = np.sort(rng.normal(loc=0.5, scale=0.25, size=(25, 1)), axis=0) >>> quantile_transform(X, n_quantiles=10, random_state=0, copy=True) array([...])
quantile_transform
python
scikit-learn/scikit-learn
sklearn/preprocessing/_data.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_data.py
BSD-3-Clause
def transform(self, X): """Apply the power transform to each feature using the fitted lambdas. Parameters ---------- X : array-like of shape (n_samples, n_features) The data to be transformed using a power transformation. Returns ------- X_trans : ndarray of shape (n_samples, n_features) The transformed data. """ check_is_fitted(self) X = self._check_input(X, in_fit=False, check_positive=True, check_shape=True) transform_function = { "box-cox": boxcox, "yeo-johnson": self._yeo_johnson_transform, }[self.method] for i, lmbda in enumerate(self.lambdas_): with np.errstate(invalid="ignore"): # hide NaN warnings X[:, i] = transform_function(X[:, i], lmbda) if self.standardize: X = self._scaler.transform(X) return X
Apply the power transform to each feature using the fitted lambdas. Parameters ---------- X : array-like of shape (n_samples, n_features) The data to be transformed using a power transformation. Returns ------- X_trans : ndarray of shape (n_samples, n_features) The transformed data.
transform
python
scikit-learn/scikit-learn
sklearn/preprocessing/_data.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_data.py
BSD-3-Clause
def inverse_transform(self, X): """Apply the inverse power transformation using the fitted lambdas. The inverse of the Box-Cox transformation is given by:: if lambda_ == 0: X_original = exp(X_trans) else: X_original = (X * lambda_ + 1) ** (1 / lambda_) The inverse of the Yeo-Johnson transformation is given by:: if X >= 0 and lambda_ == 0: X_original = exp(X) - 1 elif X >= 0 and lambda_ != 0: X_original = (X * lambda_ + 1) ** (1 / lambda_) - 1 elif X < 0 and lambda_ != 2: X_original = 1 - (-(2 - lambda_) * X + 1) ** (1 / (2 - lambda_)) elif X < 0 and lambda_ == 2: X_original = 1 - exp(-X) Parameters ---------- X : array-like of shape (n_samples, n_features) The transformed data. Returns ------- X_original : ndarray of shape (n_samples, n_features) The original data. """ check_is_fitted(self) X = self._check_input(X, in_fit=False, check_shape=True) if self.standardize: X = self._scaler.inverse_transform(X) inv_fun = { "box-cox": inv_boxcox, "yeo-johnson": self._yeo_johnson_inverse_transform, }[self.method] for i, lmbda in enumerate(self.lambdas_): with np.errstate(invalid="ignore"): # hide NaN warnings X[:, i] = inv_fun(X[:, i], lmbda) return X
Apply the inverse power transformation using the fitted lambdas. The inverse of the Box-Cox transformation is given by:: if lambda_ == 0: X_original = exp(X_trans) else: X_original = (X * lambda_ + 1) ** (1 / lambda_) The inverse of the Yeo-Johnson transformation is given by:: if X >= 0 and lambda_ == 0: X_original = exp(X) - 1 elif X >= 0 and lambda_ != 0: X_original = (X * lambda_ + 1) ** (1 / lambda_) - 1 elif X < 0 and lambda_ != 2: X_original = 1 - (-(2 - lambda_) * X + 1) ** (1 / (2 - lambda_)) elif X < 0 and lambda_ == 2: X_original = 1 - exp(-X) Parameters ---------- X : array-like of shape (n_samples, n_features) The transformed data. Returns ------- X_original : ndarray of shape (n_samples, n_features) The original data.
inverse_transform
python
scikit-learn/scikit-learn
sklearn/preprocessing/_data.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_data.py
BSD-3-Clause
def _yeo_johnson_inverse_transform(self, x, lmbda): """Return inverse-transformed input x following Yeo-Johnson inverse transform with parameter lambda. """ x_inv = np.zeros_like(x) pos = x >= 0 # when x >= 0 if abs(lmbda) < np.spacing(1.0): x_inv[pos] = np.exp(x[pos]) - 1 else: # lmbda != 0 x_inv[pos] = np.power(x[pos] * lmbda + 1, 1 / lmbda) - 1 # when x < 0 if abs(lmbda - 2) > np.spacing(1.0): x_inv[~pos] = 1 - np.power(-(2 - lmbda) * x[~pos] + 1, 1 / (2 - lmbda)) else: # lmbda == 2 x_inv[~pos] = 1 - np.exp(-x[~pos]) return x_inv
Return inverse-transformed input x following Yeo-Johnson inverse transform with parameter lambda.
_yeo_johnson_inverse_transform
python
scikit-learn/scikit-learn
sklearn/preprocessing/_data.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_data.py
BSD-3-Clause
def _yeo_johnson_transform(self, x, lmbda): """Return transformed input x following Yeo-Johnson transform with parameter lambda. """ out = np.zeros_like(x) pos = x >= 0 # binary mask # when x >= 0 if abs(lmbda) < np.spacing(1.0): out[pos] = np.log1p(x[pos]) else: # lmbda != 0 out[pos] = (np.power(x[pos] + 1, lmbda) - 1) / lmbda # when x < 0 if abs(lmbda - 2) > np.spacing(1.0): out[~pos] = -(np.power(-x[~pos] + 1, 2 - lmbda) - 1) / (2 - lmbda) else: # lmbda == 2 out[~pos] = -np.log1p(-x[~pos]) return out
Return transformed input x following Yeo-Johnson transform with parameter lambda.
_yeo_johnson_transform
python
scikit-learn/scikit-learn
sklearn/preprocessing/_data.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_data.py
BSD-3-Clause
def _box_cox_optimize(self, x): """Find and return optimal lambda parameter of the Box-Cox transform by MLE, for observed data x. We here use scipy builtins which uses the brent optimizer. """ mask = np.isnan(x) if np.all(mask): raise ValueError("Column must not be all nan.") # the computation of lambda is influenced by NaNs so we need to # get rid of them _, lmbda = stats.boxcox(x[~mask], lmbda=None) return lmbda
Find and return optimal lambda parameter of the Box-Cox transform by MLE, for observed data x. We here use scipy builtins which uses the brent optimizer.
_box_cox_optimize
python
scikit-learn/scikit-learn
sklearn/preprocessing/_data.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_data.py
BSD-3-Clause
def _yeo_johnson_optimize(self, x): """Find and return optimal lambda parameter of the Yeo-Johnson transform by MLE, for observed data x. Like for Box-Cox, MLE is done via the brent optimizer. """ x_tiny = np.finfo(np.float64).tiny def _neg_log_likelihood(lmbda): """Return the negative log likelihood of the observed data x as a function of lambda.""" x_trans = self._yeo_johnson_transform(x, lmbda) n_samples = x.shape[0] x_trans_var = x_trans.var() # Reject transformed data that would raise a RuntimeWarning in np.log if x_trans_var < x_tiny: return np.inf log_var = np.log(x_trans_var) loglike = -n_samples / 2 * log_var loglike += (lmbda - 1) * (np.sign(x) * np.log1p(np.abs(x))).sum() return -loglike # the computation of lambda is influenced by NaNs so we need to # get rid of them x = x[~np.isnan(x)] return _yeojohnson_lambda(_neg_log_likelihood, x)
Find and return optimal lambda parameter of the Yeo-Johnson transform by MLE, for observed data x. Like for Box-Cox, MLE is done via the brent optimizer.
_yeo_johnson_optimize
python
scikit-learn/scikit-learn
sklearn/preprocessing/_data.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_data.py
BSD-3-Clause
def _neg_log_likelihood(lmbda): """Return the negative log likelihood of the observed data x as a function of lambda.""" x_trans = self._yeo_johnson_transform(x, lmbda) n_samples = x.shape[0] x_trans_var = x_trans.var() # Reject transformed data that would raise a RuntimeWarning in np.log if x_trans_var < x_tiny: return np.inf log_var = np.log(x_trans_var) loglike = -n_samples / 2 * log_var loglike += (lmbda - 1) * (np.sign(x) * np.log1p(np.abs(x))).sum() return -loglike
Return the negative log likelihood of the observed data x as a function of lambda.
_neg_log_likelihood
python
scikit-learn/scikit-learn
sklearn/preprocessing/_data.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_data.py
BSD-3-Clause
def _check_input(self, X, in_fit, check_positive=False, check_shape=False): """Validate the input before fit and transform. Parameters ---------- X : array-like of shape (n_samples, n_features) in_fit : bool Whether or not `_check_input` is called from `fit` or other methods, e.g. `predict`, `transform`, etc. check_positive : bool, default=False If True, check that all data is positive and non-zero (only if ``self.method=='box-cox'``). check_shape : bool, default=False If True, check that n_features matches the length of self.lambdas_ """ X = validate_data( self, X, ensure_2d=True, dtype=FLOAT_DTYPES, force_writeable=True, copy=self.copy, ensure_all_finite="allow-nan", reset=in_fit, ) with warnings.catch_warnings(): warnings.filterwarnings("ignore", r"All-NaN (slice|axis) encountered") if check_positive and self.method == "box-cox" and np.nanmin(X) <= 0: raise ValueError( "The Box-Cox transformation can only be " "applied to strictly positive data" ) if check_shape and not X.shape[1] == len(self.lambdas_): raise ValueError( "Input data has a different number of features " "than fitting data. Should have {n}, data has {m}".format( n=len(self.lambdas_), m=X.shape[1] ) ) return X
Validate the input before fit and transform. Parameters ---------- X : array-like of shape (n_samples, n_features) in_fit : bool Whether or not `_check_input` is called from `fit` or other methods, e.g. `predict`, `transform`, etc. check_positive : bool, default=False If True, check that all data is positive and non-zero (only if ``self.method=='box-cox'``). check_shape : bool, default=False If True, check that n_features matches the length of self.lambdas_
_check_input
python
scikit-learn/scikit-learn
sklearn/preprocessing/_data.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_data.py
BSD-3-Clause
def fit(self, X, y=None, sample_weight=None): """ Fit the estimator. Parameters ---------- X : array-like of shape (n_samples, n_features) Data to be discretized. y : None Ignored. This parameter exists only for compatibility with :class:`~sklearn.pipeline.Pipeline`. sample_weight : ndarray of shape (n_samples,) Contains weight values to be associated with each sample. .. versionadded:: 1.3 .. versionchanged:: 1.7 Added support for strategy="uniform". Returns ------- self : object Returns the instance itself. """ X = validate_data(self, X, dtype="numeric") if self.dtype in (np.float64, np.float32): output_dtype = self.dtype else: # self.dtype is None output_dtype = X.dtype n_samples, n_features = X.shape if sample_weight is not None: sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype) if self.subsample is not None and n_samples > self.subsample: # Take a subsample of `X` # When resampling, it is important to subsample **with replacement** to # preserve the distribution, in particular in the presence of a few data # points with large weights. You can check this by setting `replace=False` # in sklearn.utils.test.test_indexing.test_resample_weighted and check that # it fails as a justification for this claim. X = resample( X, replace=True, n_samples=self.subsample, random_state=self.random_state, sample_weight=sample_weight, ) # Since we already used the weights when resampling when provided, # we set them back to `None` to avoid accounting for the weights twice # in subsequent operations to compute weight-aware bin edges with # quantiles or k-means. sample_weight = None n_features = X.shape[1] n_bins = self._validate_n_bins(n_features) bin_edges = np.zeros(n_features, dtype=object) # TODO(1.9): remove and switch to quantile_method="averaged_inverted_cdf" # by default. quantile_method = self.quantile_method if self.strategy == "quantile" and quantile_method == "warn": warnings.warn( "The current default behavior, quantile_method='linear', will be " "changed to quantile_method='averaged_inverted_cdf' in " "scikit-learn version 1.9 to naturally support sample weight " "equivalence properties by default. Pass " "quantile_method='averaged_inverted_cdf' explicitly to silence this " "warning.", FutureWarning, ) quantile_method = "linear" if ( self.strategy == "quantile" and quantile_method not in ["inverted_cdf", "averaged_inverted_cdf"] and sample_weight is not None ): raise ValueError( "When fitting with strategy='quantile' and sample weights, " "quantile_method should either be set to 'averaged_inverted_cdf' or " f"'inverted_cdf', got quantile_method='{quantile_method}' instead." ) if self.strategy != "quantile" and sample_weight is not None: # Prepare a mask to filter out zero-weight samples when extracting # the min and max values of each columns which are needed for the # "uniform" and "kmeans" strategies. nnz_weight_mask = sample_weight != 0 else: # Otherwise, all samples are used. Use a slice to avoid creating a # new array. nnz_weight_mask = slice(None) for jj in range(n_features): column = X[:, jj] col_min = column[nnz_weight_mask].min() col_max = column[nnz_weight_mask].max() if col_min == col_max: warnings.warn( "Feature %d is constant and will be replaced with 0." % jj ) n_bins[jj] = 1 bin_edges[jj] = np.array([-np.inf, np.inf]) continue if self.strategy == "uniform": bin_edges[jj] = np.linspace(col_min, col_max, n_bins[jj] + 1) elif self.strategy == "quantile": percentile_levels = np.linspace(0, 100, n_bins[jj] + 1) # method="linear" is the implicit default for any numpy # version. So we keep it version independent in that case by # using an empty param dict. percentile_kwargs = {} if quantile_method != "linear" and sample_weight is None: percentile_kwargs["method"] = quantile_method if sample_weight is None: bin_edges[jj] = np.asarray( np.percentile(column, percentile_levels, **percentile_kwargs), dtype=np.float64, ) else: # TODO: make _weighted_percentile and # _averaged_weighted_percentile accept an array of # quantiles instead of calling it multiple times and # sorting the column multiple times as a result. percentile_func = { "inverted_cdf": _weighted_percentile, "averaged_inverted_cdf": _averaged_weighted_percentile, }[quantile_method] bin_edges[jj] = np.asarray( [ percentile_func(column, sample_weight, percentile_rank=p) for p in percentile_levels ], dtype=np.float64, ) elif self.strategy == "kmeans": from ..cluster import KMeans # fixes import loops # Deterministic initialization with uniform spacing uniform_edges = np.linspace(col_min, col_max, n_bins[jj] + 1) init = (uniform_edges[1:] + uniform_edges[:-1])[:, None] * 0.5 # 1D k-means procedure km = KMeans(n_clusters=n_bins[jj], init=init, n_init=1) centers = km.fit( column[:, None], sample_weight=sample_weight ).cluster_centers_[:, 0] # Must sort, centers may be unsorted even with sorted init centers.sort() bin_edges[jj] = (centers[1:] + centers[:-1]) * 0.5 bin_edges[jj] = np.r_[col_min, bin_edges[jj], col_max] # Remove bins whose width are too small (i.e., <= 1e-8) if self.strategy in ("quantile", "kmeans"): mask = np.ediff1d(bin_edges[jj], to_begin=np.inf) > 1e-8 bin_edges[jj] = bin_edges[jj][mask] if len(bin_edges[jj]) - 1 != n_bins[jj]: warnings.warn( "Bins whose width are too small (i.e., <= " "1e-8) in feature %d are removed. Consider " "decreasing the number of bins." % jj ) n_bins[jj] = len(bin_edges[jj]) - 1 self.bin_edges_ = bin_edges self.n_bins_ = n_bins if "onehot" in self.encode: self._encoder = OneHotEncoder( categories=[np.arange(i) for i in self.n_bins_], sparse_output=self.encode == "onehot", dtype=output_dtype, ) # Fit the OneHotEncoder with toy datasets # so that it's ready for use after the KBinsDiscretizer is fitted self._encoder.fit(np.zeros((1, len(self.n_bins_)))) return self
Fit the estimator. Parameters ---------- X : array-like of shape (n_samples, n_features) Data to be discretized. y : None Ignored. This parameter exists only for compatibility with :class:`~sklearn.pipeline.Pipeline`. sample_weight : ndarray of shape (n_samples,) Contains weight values to be associated with each sample. .. versionadded:: 1.3 .. versionchanged:: 1.7 Added support for strategy="uniform". Returns ------- self : object Returns the instance itself.
fit
python
scikit-learn/scikit-learn
sklearn/preprocessing/_discretization.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_discretization.py
BSD-3-Clause
def _validate_n_bins(self, n_features): """Returns n_bins_, the number of bins per feature.""" orig_bins = self.n_bins if isinstance(orig_bins, Integral): return np.full(n_features, orig_bins, dtype=int) n_bins = check_array(orig_bins, dtype=int, copy=True, ensure_2d=False) if n_bins.ndim > 1 or n_bins.shape[0] != n_features: raise ValueError("n_bins must be a scalar or array of shape (n_features,).") bad_nbins_value = (n_bins < 2) | (n_bins != orig_bins) violating_indices = np.where(bad_nbins_value)[0] if violating_indices.shape[0] > 0: indices = ", ".join(str(i) for i in violating_indices) raise ValueError( "{} received an invalid number " "of bins at indices {}. Number of bins " "must be at least 2, and must be an int.".format( KBinsDiscretizer.__name__, indices ) ) return n_bins
Returns n_bins_, the number of bins per feature.
_validate_n_bins
python
scikit-learn/scikit-learn
sklearn/preprocessing/_discretization.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_discretization.py
BSD-3-Clause
def transform(self, X): """ Discretize the data. Parameters ---------- X : array-like of shape (n_samples, n_features) Data to be discretized. Returns ------- Xt : {ndarray, sparse matrix}, dtype={np.float32, np.float64} Data in the binned space. Will be a sparse matrix if `self.encode='onehot'` and ndarray otherwise. """ check_is_fitted(self) # check input and attribute dtypes dtype = (np.float64, np.float32) if self.dtype is None else self.dtype Xt = validate_data(self, X, copy=True, dtype=dtype, reset=False) bin_edges = self.bin_edges_ for jj in range(Xt.shape[1]): Xt[:, jj] = np.searchsorted(bin_edges[jj][1:-1], Xt[:, jj], side="right") if self.encode == "ordinal": return Xt dtype_init = None if "onehot" in self.encode: dtype_init = self._encoder.dtype self._encoder.dtype = Xt.dtype try: Xt_enc = self._encoder.transform(Xt) finally: # revert the initial dtype to avoid modifying self. self._encoder.dtype = dtype_init return Xt_enc
Discretize the data. Parameters ---------- X : array-like of shape (n_samples, n_features) Data to be discretized. Returns ------- Xt : {ndarray, sparse matrix}, dtype={np.float32, np.float64} Data in the binned space. Will be a sparse matrix if `self.encode='onehot'` and ndarray otherwise.
transform
python
scikit-learn/scikit-learn
sklearn/preprocessing/_discretization.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_discretization.py
BSD-3-Clause
def inverse_transform(self, X): """ Transform discretized data back to original feature space. Note that this function does not regenerate the original data due to discretization rounding. Parameters ---------- X : array-like of shape (n_samples, n_features) Transformed data in the binned space. Returns ------- X_original : ndarray, dtype={np.float32, np.float64} Data in the original feature space. """ check_is_fitted(self) if "onehot" in self.encode: X = self._encoder.inverse_transform(X) Xinv = check_array(X, copy=True, dtype=(np.float64, np.float32)) n_features = self.n_bins_.shape[0] if Xinv.shape[1] != n_features: raise ValueError( "Incorrect number of features. Expecting {}, received {}.".format( n_features, Xinv.shape[1] ) ) for jj in range(n_features): bin_edges = self.bin_edges_[jj] bin_centers = (bin_edges[1:] + bin_edges[:-1]) * 0.5 Xinv[:, jj] = bin_centers[(Xinv[:, jj]).astype(np.int64)] return Xinv
Transform discretized data back to original feature space. Note that this function does not regenerate the original data due to discretization rounding. Parameters ---------- X : array-like of shape (n_samples, n_features) Transformed data in the binned space. Returns ------- X_original : ndarray, dtype={np.float32, np.float64} Data in the original feature space.
inverse_transform
python
scikit-learn/scikit-learn
sklearn/preprocessing/_discretization.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_discretization.py
BSD-3-Clause
def get_feature_names_out(self, input_features=None): """Get output feature names. Parameters ---------- input_features : array-like of str or None, default=None Input features. - If `input_features` is `None`, then `feature_names_in_` is used as feature names in. If `feature_names_in_` is not defined, then the following input feature names are generated: `["x0", "x1", ..., "x(n_features_in_ - 1)"]`. - If `input_features` is an array-like, then `input_features` must match `feature_names_in_` if `feature_names_in_` is defined. Returns ------- feature_names_out : ndarray of str objects Transformed feature names. """ check_is_fitted(self, "n_features_in_") input_features = _check_feature_names_in(self, input_features) if hasattr(self, "_encoder"): return self._encoder.get_feature_names_out(input_features) # ordinal encoding return input_features
Get output feature names. Parameters ---------- input_features : array-like of str or None, default=None Input features. - If `input_features` is `None`, then `feature_names_in_` is used as feature names in. If `feature_names_in_` is not defined, then the following input feature names are generated: `["x0", "x1", ..., "x(n_features_in_ - 1)"]`. - If `input_features` is an array-like, then `input_features` must match `feature_names_in_` if `feature_names_in_` is defined. Returns ------- feature_names_out : ndarray of str objects Transformed feature names.
get_feature_names_out
python
scikit-learn/scikit-learn
sklearn/preprocessing/_discretization.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_discretization.py
BSD-3-Clause
def _check_X(self, X, ensure_all_finite=True): """ Perform custom check_array: - convert list of strings to object dtype - check for missing values for object dtype data (check_array does not do that) - return list of features (arrays): this list of features is constructed feature by feature to preserve the data types of pandas DataFrame columns, as otherwise information is lost and cannot be used, e.g. for the `categories_` attribute. """ if not (hasattr(X, "iloc") and getattr(X, "ndim", 0) == 2): # if not a dataframe, do normal check_array validation X_temp = check_array(X, dtype=None, ensure_all_finite=ensure_all_finite) if not hasattr(X, "dtype") and np.issubdtype(X_temp.dtype, np.str_): X = check_array(X, dtype=object, ensure_all_finite=ensure_all_finite) else: X = X_temp needs_validation = False else: # pandas dataframe, do validation later column by column, in order # to keep the dtype information to be used in the encoder. needs_validation = ensure_all_finite n_samples, n_features = X.shape X_columns = [] for i in range(n_features): Xi = _safe_indexing(X, indices=i, axis=1) Xi = check_array( Xi, ensure_2d=False, dtype=None, ensure_all_finite=needs_validation ) X_columns.append(Xi) return X_columns, n_samples, n_features
Perform custom check_array: - convert list of strings to object dtype - check for missing values for object dtype data (check_array does not do that) - return list of features (arrays): this list of features is constructed feature by feature to preserve the data types of pandas DataFrame columns, as otherwise information is lost and cannot be used, e.g. for the `categories_` attribute.
_check_X
python
scikit-learn/scikit-learn
sklearn/preprocessing/_encoders.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_encoders.py
BSD-3-Clause
def _check_infrequent_enabled(self): """ This functions checks whether _infrequent_enabled is True or False. This has to be called after parameter validation in the fit function. """ max_categories = getattr(self, "max_categories", None) min_frequency = getattr(self, "min_frequency", None) self._infrequent_enabled = ( max_categories is not None and max_categories >= 1 ) or min_frequency is not None
This functions checks whether _infrequent_enabled is True or False. This has to be called after parameter validation in the fit function.
_check_infrequent_enabled
python
scikit-learn/scikit-learn
sklearn/preprocessing/_encoders.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_encoders.py
BSD-3-Clause
def _identify_infrequent(self, category_count, n_samples, col_idx): """Compute the infrequent indices. Parameters ---------- category_count : ndarray of shape (n_cardinality,) Category counts. n_samples : int Number of samples. col_idx : int Index of the current category. Only used for the error message. Returns ------- output : ndarray of shape (n_infrequent_categories,) or None If there are infrequent categories, indices of infrequent categories. Otherwise None. """ if isinstance(self.min_frequency, numbers.Integral): infrequent_mask = category_count < self.min_frequency elif isinstance(self.min_frequency, numbers.Real): min_frequency_abs = n_samples * self.min_frequency infrequent_mask = category_count < min_frequency_abs else: infrequent_mask = np.zeros(category_count.shape[0], dtype=bool) n_current_features = category_count.size - infrequent_mask.sum() + 1 if self.max_categories is not None and self.max_categories < n_current_features: # max_categories includes the one infrequent category frequent_category_count = self.max_categories - 1 if frequent_category_count == 0: # All categories are infrequent infrequent_mask[:] = True else: # stable sort to preserve original count order smallest_levels = np.argsort(category_count, kind="mergesort")[ :-frequent_category_count ] infrequent_mask[smallest_levels] = True output = np.flatnonzero(infrequent_mask) return output if output.size > 0 else None
Compute the infrequent indices. Parameters ---------- category_count : ndarray of shape (n_cardinality,) Category counts. n_samples : int Number of samples. col_idx : int Index of the current category. Only used for the error message. Returns ------- output : ndarray of shape (n_infrequent_categories,) or None If there are infrequent categories, indices of infrequent categories. Otherwise None.
_identify_infrequent
python
scikit-learn/scikit-learn
sklearn/preprocessing/_encoders.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_encoders.py
BSD-3-Clause
def _fit_infrequent_category_mapping( self, n_samples, category_counts, missing_indices ): """Fit infrequent categories. Defines the private attribute: `_default_to_infrequent_mappings`. For feature `i`, `_default_to_infrequent_mappings[i]` defines the mapping from the integer encoding returned by `super().transform()` into infrequent categories. If `_default_to_infrequent_mappings[i]` is None, there were no infrequent categories in the training set. For example if categories 0, 2 and 4 were frequent, while categories 1, 3, 5 were infrequent for feature 7, then these categories are mapped to a single output: `_default_to_infrequent_mappings[7] = array([0, 3, 1, 3, 2, 3])` Defines private attribute: `_infrequent_indices`. `_infrequent_indices[i]` is an array of indices such that `categories_[i][_infrequent_indices[i]]` are all the infrequent category labels. If the feature `i` has no infrequent categories `_infrequent_indices[i]` is None. .. versionadded:: 1.1 Parameters ---------- n_samples : int Number of samples in training set. category_counts: list of ndarray `category_counts[i]` is the category counts corresponding to `self.categories_[i]`. missing_indices : dict Dict mapping from feature_idx to category index with a missing value. """ # Remove missing value from counts, so it is not considered as infrequent if missing_indices: category_counts_ = [] for feature_idx, count in enumerate(category_counts): if feature_idx in missing_indices: category_counts_.append( np.delete(count, missing_indices[feature_idx]) ) else: category_counts_.append(count) else: category_counts_ = category_counts self._infrequent_indices = [ self._identify_infrequent(category_count, n_samples, col_idx) for col_idx, category_count in enumerate(category_counts_) ] # compute mapping from default mapping to infrequent mapping self._default_to_infrequent_mappings = [] for feature_idx, infreq_idx in enumerate(self._infrequent_indices): cats = self.categories_[feature_idx] # no infrequent categories if infreq_idx is None: self._default_to_infrequent_mappings.append(None) continue n_cats = len(cats) if feature_idx in missing_indices: # Missing index was removed from this category when computing # infrequent indices, thus we need to decrease the number of # total categories when considering the infrequent mapping. n_cats -= 1 # infrequent indices exist mapping = np.empty(n_cats, dtype=np.int64) n_infrequent_cats = infreq_idx.size # infrequent categories are mapped to the last element. n_frequent_cats = n_cats - n_infrequent_cats mapping[infreq_idx] = n_frequent_cats frequent_indices = np.setdiff1d(np.arange(n_cats), infreq_idx) mapping[frequent_indices] = np.arange(n_frequent_cats) self._default_to_infrequent_mappings.append(mapping)
Fit infrequent categories. Defines the private attribute: `_default_to_infrequent_mappings`. For feature `i`, `_default_to_infrequent_mappings[i]` defines the mapping from the integer encoding returned by `super().transform()` into infrequent categories. If `_default_to_infrequent_mappings[i]` is None, there were no infrequent categories in the training set. For example if categories 0, 2 and 4 were frequent, while categories 1, 3, 5 were infrequent for feature 7, then these categories are mapped to a single output: `_default_to_infrequent_mappings[7] = array([0, 3, 1, 3, 2, 3])` Defines private attribute: `_infrequent_indices`. `_infrequent_indices[i]` is an array of indices such that `categories_[i][_infrequent_indices[i]]` are all the infrequent category labels. If the feature `i` has no infrequent categories `_infrequent_indices[i]` is None. .. versionadded:: 1.1 Parameters ---------- n_samples : int Number of samples in training set. category_counts: list of ndarray `category_counts[i]` is the category counts corresponding to `self.categories_[i]`. missing_indices : dict Dict mapping from feature_idx to category index with a missing value.
_fit_infrequent_category_mapping
python
scikit-learn/scikit-learn
sklearn/preprocessing/_encoders.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_encoders.py
BSD-3-Clause
def _map_infrequent_categories(self, X_int, X_mask, ignore_category_indices): """Map infrequent categories to integer representing the infrequent category. This modifies X_int in-place. Values that were invalid based on `X_mask` are mapped to the infrequent category if there was an infrequent category for that feature. Parameters ---------- X_int: ndarray of shape (n_samples, n_features) Integer encoded categories. X_mask: ndarray of shape (n_samples, n_features) Bool mask for valid values in `X_int`. ignore_category_indices : dict Dictionary mapping from feature_idx to category index to ignore. Ignored indexes will not be grouped and the original ordinal encoding will remain. """ if not self._infrequent_enabled: return ignore_category_indices = ignore_category_indices or {} for col_idx in range(X_int.shape[1]): infrequent_idx = self._infrequent_indices[col_idx] if infrequent_idx is None: continue X_int[~X_mask[:, col_idx], col_idx] = infrequent_idx[0] if self.handle_unknown == "infrequent_if_exist": # All the unknown values are now mapped to the # infrequent_idx[0], which makes the unknown values valid # This is needed in `transform` when the encoding is formed # using `X_mask`. X_mask[:, col_idx] = True # Remaps encoding in `X_int` where the infrequent categories are # grouped together. for i, mapping in enumerate(self._default_to_infrequent_mappings): if mapping is None: continue if i in ignore_category_indices: # Update rows that are **not** ignored rows_to_update = X_int[:, i] != ignore_category_indices[i] else: rows_to_update = slice(None) X_int[rows_to_update, i] = np.take(mapping, X_int[rows_to_update, i])
Map infrequent categories to integer representing the infrequent category. This modifies X_int in-place. Values that were invalid based on `X_mask` are mapped to the infrequent category if there was an infrequent category for that feature. Parameters ---------- X_int: ndarray of shape (n_samples, n_features) Integer encoded categories. X_mask: ndarray of shape (n_samples, n_features) Bool mask for valid values in `X_int`. ignore_category_indices : dict Dictionary mapping from feature_idx to category index to ignore. Ignored indexes will not be grouped and the original ordinal encoding will remain.
_map_infrequent_categories
python
scikit-learn/scikit-learn
sklearn/preprocessing/_encoders.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_encoders.py
BSD-3-Clause
def _map_drop_idx_to_infrequent(self, feature_idx, drop_idx): """Convert `drop_idx` into the index for infrequent categories. If there are no infrequent categories, then `drop_idx` is returned. This method is called in `_set_drop_idx` when the `drop` parameter is an array-like. """ if not self._infrequent_enabled: return drop_idx default_to_infrequent = self._default_to_infrequent_mappings[feature_idx] if default_to_infrequent is None: return drop_idx # Raise error when explicitly dropping a category that is infrequent infrequent_indices = self._infrequent_indices[feature_idx] if infrequent_indices is not None and drop_idx in infrequent_indices: categories = self.categories_[feature_idx] raise ValueError( f"Unable to drop category {categories[drop_idx].item()!r} from" f" feature {feature_idx} because it is infrequent" ) return default_to_infrequent[drop_idx]
Convert `drop_idx` into the index for infrequent categories. If there are no infrequent categories, then `drop_idx` is returned. This method is called in `_set_drop_idx` when the `drop` parameter is an array-like.
_map_drop_idx_to_infrequent
python
scikit-learn/scikit-learn
sklearn/preprocessing/_encoders.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_encoders.py
BSD-3-Clause
def _set_drop_idx(self): """Compute the drop indices associated with `self.categories_`. If `self.drop` is: - `None`, No categories have been dropped. - `'first'`, All zeros to drop the first category. - `'if_binary'`, All zeros if the category is binary and `None` otherwise. - array-like, The indices of the categories that match the categories in `self.drop`. If the dropped category is an infrequent category, then the index for the infrequent category is used. This means that the entire infrequent category is dropped. This methods defines a public `drop_idx_` and a private `_drop_idx_after_grouping`. - `drop_idx_`: Public facing API that references the drop category in `self.categories_`. - `_drop_idx_after_grouping`: Used internally to drop categories *after* the infrequent categories are grouped together. If there are no infrequent categories or drop is `None`, then `drop_idx_=_drop_idx_after_grouping`. """ if self.drop is None: drop_idx_after_grouping = None elif isinstance(self.drop, str): if self.drop == "first": drop_idx_after_grouping = np.zeros(len(self.categories_), dtype=object) elif self.drop == "if_binary": n_features_out_no_drop = [len(cat) for cat in self.categories_] if self._infrequent_enabled: for i, infreq_idx in enumerate(self._infrequent_indices): if infreq_idx is None: continue n_features_out_no_drop[i] -= infreq_idx.size - 1 drop_idx_after_grouping = np.array( [ 0 if n_features_out == 2 else None for n_features_out in n_features_out_no_drop ], dtype=object, ) else: drop_array = np.asarray(self.drop, dtype=object) droplen = len(drop_array) if droplen != len(self.categories_): msg = ( "`drop` should have length equal to the number " "of features ({}), got {}" ) raise ValueError(msg.format(len(self.categories_), droplen)) missing_drops = [] drop_indices = [] for feature_idx, (drop_val, cat_list) in enumerate( zip(drop_array, self.categories_) ): if not is_scalar_nan(drop_val): drop_idx = np.where(cat_list == drop_val)[0] if drop_idx.size: # found drop idx drop_indices.append( self._map_drop_idx_to_infrequent(feature_idx, drop_idx[0]) ) else: missing_drops.append((feature_idx, drop_val)) continue # drop_val is nan, find nan in categories manually if is_scalar_nan(cat_list[-1]): drop_indices.append( self._map_drop_idx_to_infrequent(feature_idx, cat_list.size - 1) ) else: # nan is missing missing_drops.append((feature_idx, drop_val)) if any(missing_drops): msg = ( "The following categories were supposed to be " "dropped, but were not found in the training " "data.\n{}".format( "\n".join( [ "Category: {}, Feature: {}".format(c, v) for c, v in missing_drops ] ) ) ) raise ValueError(msg) drop_idx_after_grouping = np.array(drop_indices, dtype=object) # `_drop_idx_after_grouping` are the categories to drop *after* the infrequent # categories are grouped together. If needed, we remap `drop_idx` back # to the categories seen in `self.categories_`. self._drop_idx_after_grouping = drop_idx_after_grouping if not self._infrequent_enabled or drop_idx_after_grouping is None: self.drop_idx_ = self._drop_idx_after_grouping else: drop_idx_ = [] for feature_idx, drop_idx in enumerate(drop_idx_after_grouping): default_to_infrequent = self._default_to_infrequent_mappings[ feature_idx ] if drop_idx is None or default_to_infrequent is None: orig_drop_idx = drop_idx else: orig_drop_idx = np.flatnonzero(default_to_infrequent == drop_idx)[0] drop_idx_.append(orig_drop_idx) self.drop_idx_ = np.asarray(drop_idx_, dtype=object)
Compute the drop indices associated with `self.categories_`. If `self.drop` is: - `None`, No categories have been dropped. - `'first'`, All zeros to drop the first category. - `'if_binary'`, All zeros if the category is binary and `None` otherwise. - array-like, The indices of the categories that match the categories in `self.drop`. If the dropped category is an infrequent category, then the index for the infrequent category is used. This means that the entire infrequent category is dropped. This methods defines a public `drop_idx_` and a private `_drop_idx_after_grouping`. - `drop_idx_`: Public facing API that references the drop category in `self.categories_`. - `_drop_idx_after_grouping`: Used internally to drop categories *after* the infrequent categories are grouped together. If there are no infrequent categories or drop is `None`, then `drop_idx_=_drop_idx_after_grouping`.
_set_drop_idx
python
scikit-learn/scikit-learn
sklearn/preprocessing/_encoders.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_encoders.py
BSD-3-Clause
def _compute_transformed_categories(self, i, remove_dropped=True): """Compute the transformed categories used for column `i`. 1. If there are infrequent categories, the category is named 'infrequent_sklearn'. 2. Dropped columns are removed when remove_dropped=True. """ cats = self.categories_[i] if self._infrequent_enabled: infreq_map = self._default_to_infrequent_mappings[i] if infreq_map is not None: frequent_mask = infreq_map < infreq_map.max() infrequent_cat = "infrequent_sklearn" # infrequent category is always at the end cats = np.concatenate( (cats[frequent_mask], np.array([infrequent_cat], dtype=object)) ) if remove_dropped: cats = self._remove_dropped_categories(cats, i) return cats
Compute the transformed categories used for column `i`. 1. If there are infrequent categories, the category is named 'infrequent_sklearn'. 2. Dropped columns are removed when remove_dropped=True.
_compute_transformed_categories
python
scikit-learn/scikit-learn
sklearn/preprocessing/_encoders.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_encoders.py
BSD-3-Clause
def _compute_n_features_outs(self): """Compute the n_features_out for each input feature.""" output = [len(cats) for cats in self.categories_] if self._drop_idx_after_grouping is not None: for i, drop_idx in enumerate(self._drop_idx_after_grouping): if drop_idx is not None: output[i] -= 1 if not self._infrequent_enabled: return output # infrequent is enabled, the number of features out are reduced # because the infrequent categories are grouped together for i, infreq_idx in enumerate(self._infrequent_indices): if infreq_idx is None: continue output[i] -= infreq_idx.size - 1 return output
Compute the n_features_out for each input feature.
_compute_n_features_outs
python
scikit-learn/scikit-learn
sklearn/preprocessing/_encoders.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_encoders.py
BSD-3-Clause
def fit(self, X, y=None): """ Fit OneHotEncoder to X. Parameters ---------- X : array-like of shape (n_samples, n_features) The data to determine the categories of each feature. y : None Ignored. This parameter exists only for compatibility with :class:`~sklearn.pipeline.Pipeline`. Returns ------- self Fitted encoder. """ self._fit( X, handle_unknown=self.handle_unknown, ensure_all_finite="allow-nan", ) self._set_drop_idx() self._n_features_outs = self._compute_n_features_outs() return self
Fit OneHotEncoder to X. Parameters ---------- X : array-like of shape (n_samples, n_features) The data to determine the categories of each feature. y : None Ignored. This parameter exists only for compatibility with :class:`~sklearn.pipeline.Pipeline`. Returns ------- self Fitted encoder.
fit
python
scikit-learn/scikit-learn
sklearn/preprocessing/_encoders.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_encoders.py
BSD-3-Clause
def transform(self, X): """ Transform X using one-hot encoding. If `sparse_output=True` (default), it returns an instance of :class:`scipy.sparse._csr.csr_matrix` (CSR format). If there are infrequent categories for a feature, set by specifying `max_categories` or `min_frequency`, the infrequent categories are grouped into a single category. Parameters ---------- X : array-like of shape (n_samples, n_features) The data to encode. Returns ------- X_out : {ndarray, sparse matrix} of shape \ (n_samples, n_encoded_features) Transformed input. If `sparse_output=True`, a sparse matrix will be returned. """ check_is_fitted(self) transform_output = _get_output_config("transform", estimator=self)["dense"] if transform_output != "default" and self.sparse_output: capitalize_transform_output = transform_output.capitalize() raise ValueError( f"{capitalize_transform_output} output does not support sparse data." f" Set sparse_output=False to output {transform_output} dataframes or" f" disable {capitalize_transform_output} output via" '` ohe.set_output(transform="default").' ) # validation of X happens in _check_X called by _transform if self.handle_unknown == "warn": warn_on_unknown, handle_unknown = True, "infrequent_if_exist" else: warn_on_unknown = self.drop is not None and self.handle_unknown in { "ignore", "infrequent_if_exist", } handle_unknown = self.handle_unknown X_int, X_mask = self._transform( X, handle_unknown=handle_unknown, ensure_all_finite="allow-nan", warn_on_unknown=warn_on_unknown, ) n_samples, n_features = X_int.shape if self._drop_idx_after_grouping is not None: to_drop = self._drop_idx_after_grouping.copy() # We remove all the dropped categories from mask, and decrement all # categories that occur after them to avoid an empty column. keep_cells = X_int != to_drop for i, cats in enumerate(self.categories_): # drop='if_binary' but feature isn't binary if to_drop[i] is None: # set to cardinality to not drop from X_int to_drop[i] = len(cats) to_drop = to_drop.reshape(1, -1) X_int[X_int > to_drop] -= 1 X_mask &= keep_cells mask = X_mask.ravel() feature_indices = np.cumsum([0] + self._n_features_outs) indices = (X_int + feature_indices[:-1]).ravel()[mask] indptr = np.empty(n_samples + 1, dtype=int) indptr[0] = 0 np.sum(X_mask, axis=1, out=indptr[1:], dtype=indptr.dtype) np.cumsum(indptr[1:], out=indptr[1:]) data = np.ones(indptr[-1]) out = sparse.csr_matrix( (data, indices, indptr), shape=(n_samples, feature_indices[-1]), dtype=self.dtype, ) if not self.sparse_output: return out.toarray() else: return out
Transform X using one-hot encoding. If `sparse_output=True` (default), it returns an instance of :class:`scipy.sparse._csr.csr_matrix` (CSR format). If there are infrequent categories for a feature, set by specifying `max_categories` or `min_frequency`, the infrequent categories are grouped into a single category. Parameters ---------- X : array-like of shape (n_samples, n_features) The data to encode. Returns ------- X_out : {ndarray, sparse matrix} of shape (n_samples, n_encoded_features) Transformed input. If `sparse_output=True`, a sparse matrix will be returned.
transform
python
scikit-learn/scikit-learn
sklearn/preprocessing/_encoders.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_encoders.py
BSD-3-Clause
def inverse_transform(self, X): """ Convert the data back to the original representation. When unknown categories are encountered (all zeros in the one-hot encoding), ``None`` is used to represent this category. If the feature with the unknown category has a dropped category, the dropped category will be its inverse. For a given input feature, if there is an infrequent category, 'infrequent_sklearn' will be used to represent the infrequent category. Parameters ---------- X : {array-like, sparse matrix} of shape \ (n_samples, n_encoded_features) The transformed data. Returns ------- X_original : ndarray of shape (n_samples, n_features) Inverse transformed array. """ check_is_fitted(self) X = check_array(X, accept_sparse="csr") n_samples, _ = X.shape n_features = len(self.categories_) n_features_out = np.sum(self._n_features_outs) # validate shape of passed X msg = ( "Shape of the passed X data is not correct. Expected {0} columns, got {1}." ) if X.shape[1] != n_features_out: raise ValueError(msg.format(n_features_out, X.shape[1])) transformed_features = [ self._compute_transformed_categories(i, remove_dropped=False) for i, _ in enumerate(self.categories_) ] # create resulting array of appropriate dtype dt = np.result_type(*[cat.dtype for cat in transformed_features]) X_tr = np.empty((n_samples, n_features), dtype=dt) j = 0 found_unknown = {} if self._infrequent_enabled: infrequent_indices = self._infrequent_indices else: infrequent_indices = [None] * n_features for i in range(n_features): cats_wo_dropped = self._remove_dropped_categories( transformed_features[i], i ) n_categories = cats_wo_dropped.shape[0] # Only happens if there was a column with a unique # category. In this case we just fill the column with this # unique category value. if n_categories == 0: X_tr[:, i] = self.categories_[i][self._drop_idx_after_grouping[i]] j += n_categories continue sub = X[:, j : j + n_categories] # for sparse X argmax returns 2D matrix, ensure 1D array labels = np.asarray(sub.argmax(axis=1)).flatten() X_tr[:, i] = cats_wo_dropped[labels] if self.handle_unknown == "ignore" or ( self.handle_unknown in ("infrequent_if_exist", "warn") and infrequent_indices[i] is None ): unknown = np.asarray(sub.sum(axis=1) == 0).flatten() # ignored unknown categories: we have a row of all zero if unknown.any(): # if categories were dropped then unknown categories will # be mapped to the dropped category if ( self._drop_idx_after_grouping is None or self._drop_idx_after_grouping[i] is None ): found_unknown[i] = unknown else: X_tr[unknown, i] = self.categories_[i][ self._drop_idx_after_grouping[i] ] else: dropped = np.asarray(sub.sum(axis=1) == 0).flatten() if dropped.any(): if self._drop_idx_after_grouping is None: all_zero_samples = np.flatnonzero(dropped) raise ValueError( f"Samples {all_zero_samples} can not be inverted " "when drop=None and handle_unknown='error' " "because they contain all zeros" ) # we can safely assume that all of the nulls in each column # are the dropped value drop_idx = self._drop_idx_after_grouping[i] X_tr[dropped, i] = transformed_features[i][drop_idx] j += n_categories # if ignored are found: potentially need to upcast result to # insert None values if found_unknown: if X_tr.dtype != object: X_tr = X_tr.astype(object) for idx, mask in found_unknown.items(): X_tr[mask, idx] = None return X_tr
Convert the data back to the original representation. When unknown categories are encountered (all zeros in the one-hot encoding), ``None`` is used to represent this category. If the feature with the unknown category has a dropped category, the dropped category will be its inverse. For a given input feature, if there is an infrequent category, 'infrequent_sklearn' will be used to represent the infrequent category. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_encoded_features) The transformed data. Returns ------- X_original : ndarray of shape (n_samples, n_features) Inverse transformed array.
inverse_transform
python
scikit-learn/scikit-learn
sklearn/preprocessing/_encoders.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_encoders.py
BSD-3-Clause
def get_feature_names_out(self, input_features=None): """Get output feature names for transformation. Parameters ---------- input_features : array-like of str or None, default=None Input features. - If `input_features` is `None`, then `feature_names_in_` is used as feature names in. If `feature_names_in_` is not defined, then the following input feature names are generated: `["x0", "x1", ..., "x(n_features_in_ - 1)"]`. - If `input_features` is an array-like, then `input_features` must match `feature_names_in_` if `feature_names_in_` is defined. Returns ------- feature_names_out : ndarray of str objects Transformed feature names. """ check_is_fitted(self) input_features = _check_feature_names_in(self, input_features) cats = [ self._compute_transformed_categories(i) for i, _ in enumerate(self.categories_) ] name_combiner = self._check_get_feature_name_combiner() feature_names = [] for i in range(len(cats)): names = [name_combiner(input_features[i], t) for t in cats[i]] feature_names.extend(names) return np.array(feature_names, dtype=object)
Get output feature names for transformation. Parameters ---------- input_features : array-like of str or None, default=None Input features. - If `input_features` is `None`, then `feature_names_in_` is used as feature names in. If `feature_names_in_` is not defined, then the following input feature names are generated: `["x0", "x1", ..., "x(n_features_in_ - 1)"]`. - If `input_features` is an array-like, then `input_features` must match `feature_names_in_` if `feature_names_in_` is defined. Returns ------- feature_names_out : ndarray of str objects Transformed feature names.
get_feature_names_out
python
scikit-learn/scikit-learn
sklearn/preprocessing/_encoders.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_encoders.py
BSD-3-Clause
def fit(self, X, y=None): """ Fit the OrdinalEncoder to X. Parameters ---------- X : array-like of shape (n_samples, n_features) The data to determine the categories of each feature. y : None Ignored. This parameter exists only for compatibility with :class:`~sklearn.pipeline.Pipeline`. Returns ------- self : object Fitted encoder. """ if self.handle_unknown == "use_encoded_value": if is_scalar_nan(self.unknown_value): if np.dtype(self.dtype).kind != "f": raise ValueError( "When unknown_value is np.nan, the dtype " "parameter should be " f"a float dtype. Got {self.dtype}." ) elif not isinstance(self.unknown_value, numbers.Integral): raise TypeError( "unknown_value should be an integer or " "np.nan when " "handle_unknown is 'use_encoded_value', " f"got {self.unknown_value}." ) elif self.unknown_value is not None: raise TypeError( "unknown_value should only be set when " "handle_unknown is 'use_encoded_value', " f"got {self.unknown_value}." ) # `_fit` will only raise an error when `self.handle_unknown="error"` fit_results = self._fit( X, handle_unknown=self.handle_unknown, ensure_all_finite="allow-nan", return_and_ignore_missing_for_infrequent=True, ) self._missing_indices = fit_results["missing_indices"] cardinalities = [len(categories) for categories in self.categories_] if self._infrequent_enabled: # Cardinality decreases because the infrequent categories are grouped # together for feature_idx, infrequent in enumerate(self.infrequent_categories_): if infrequent is not None: cardinalities[feature_idx] -= len(infrequent) # missing values are not considered part of the cardinality # when considering unknown categories or encoded_missing_value for cat_idx, categories_for_idx in enumerate(self.categories_): if is_scalar_nan(categories_for_idx[-1]): cardinalities[cat_idx] -= 1 if self.handle_unknown == "use_encoded_value": for cardinality in cardinalities: if 0 <= self.unknown_value < cardinality: raise ValueError( "The used value for unknown_value " f"{self.unknown_value} is one of the " "values already used for encoding the " "seen categories." ) if self._missing_indices: if np.dtype(self.dtype).kind != "f" and is_scalar_nan( self.encoded_missing_value ): raise ValueError( "There are missing values in features " f"{list(self._missing_indices)}. For OrdinalEncoder to " f"encode missing values with dtype: {self.dtype}, set " "encoded_missing_value to a non-nan value, or " "set dtype to a float" ) if not is_scalar_nan(self.encoded_missing_value): # Features are invalid when they contain a missing category # and encoded_missing_value was already used to encode a # known category invalid_features = [ cat_idx for cat_idx, cardinality in enumerate(cardinalities) if cat_idx in self._missing_indices and 0 <= self.encoded_missing_value < cardinality ] if invalid_features: # Use feature names if they are available if hasattr(self, "feature_names_in_"): invalid_features = self.feature_names_in_[invalid_features] raise ValueError( f"encoded_missing_value ({self.encoded_missing_value}) " "is already used to encode a known category in features: " f"{invalid_features}" ) return self
Fit the OrdinalEncoder to X. Parameters ---------- X : array-like of shape (n_samples, n_features) The data to determine the categories of each feature. y : None Ignored. This parameter exists only for compatibility with :class:`~sklearn.pipeline.Pipeline`. Returns ------- self : object Fitted encoder.
fit
python
scikit-learn/scikit-learn
sklearn/preprocessing/_encoders.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_encoders.py
BSD-3-Clause
def transform(self, X): """ Transform X to ordinal codes. Parameters ---------- X : array-like of shape (n_samples, n_features) The data to encode. Returns ------- X_out : ndarray of shape (n_samples, n_features) Transformed input. """ check_is_fitted(self, "categories_") X_int, X_mask = self._transform( X, handle_unknown=self.handle_unknown, ensure_all_finite="allow-nan", ignore_category_indices=self._missing_indices, ) X_trans = X_int.astype(self.dtype, copy=False) for cat_idx, missing_idx in self._missing_indices.items(): X_missing_mask = X_int[:, cat_idx] == missing_idx X_trans[X_missing_mask, cat_idx] = self.encoded_missing_value # create separate category for unknown values if self.handle_unknown == "use_encoded_value": X_trans[~X_mask] = self.unknown_value return X_trans
Transform X to ordinal codes. Parameters ---------- X : array-like of shape (n_samples, n_features) The data to encode. Returns ------- X_out : ndarray of shape (n_samples, n_features) Transformed input.
transform
python
scikit-learn/scikit-learn
sklearn/preprocessing/_encoders.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_encoders.py
BSD-3-Clause
def inverse_transform(self, X): """ Convert the data back to the original representation. Parameters ---------- X : array-like of shape (n_samples, n_encoded_features) The transformed data. Returns ------- X_original : ndarray of shape (n_samples, n_features) Inverse transformed array. """ check_is_fitted(self) X = check_array(X, ensure_all_finite="allow-nan") n_samples, _ = X.shape n_features = len(self.categories_) # validate shape of passed X msg = ( "Shape of the passed X data is not correct. Expected {0} columns, got {1}." ) if X.shape[1] != n_features: raise ValueError(msg.format(n_features, X.shape[1])) # create resulting array of appropriate dtype dt = np.result_type(*[cat.dtype for cat in self.categories_]) X_tr = np.empty((n_samples, n_features), dtype=dt) found_unknown = {} infrequent_masks = {} infrequent_indices = getattr(self, "_infrequent_indices", None) for i in range(n_features): labels = X[:, i] # replace values of X[:, i] that were nan with actual indices if i in self._missing_indices: X_i_mask = _get_mask(labels, self.encoded_missing_value) labels[X_i_mask] = self._missing_indices[i] rows_to_update = slice(None) categories = self.categories_[i] if infrequent_indices is not None and infrequent_indices[i] is not None: # Compute mask for frequent categories infrequent_encoding_value = len(categories) - len(infrequent_indices[i]) infrequent_masks[i] = labels == infrequent_encoding_value rows_to_update = ~infrequent_masks[i] # Remap categories to be only frequent categories. The infrequent # categories will be mapped to "infrequent_sklearn" later frequent_categories_mask = np.ones_like(categories, dtype=bool) frequent_categories_mask[infrequent_indices[i]] = False categories = categories[frequent_categories_mask] if self.handle_unknown == "use_encoded_value": unknown_labels = _get_mask(labels, self.unknown_value) found_unknown[i] = unknown_labels known_labels = ~unknown_labels if isinstance(rows_to_update, np.ndarray): rows_to_update &= known_labels else: rows_to_update = known_labels labels_int = labels[rows_to_update].astype("int64", copy=False) X_tr[rows_to_update, i] = categories[labels_int] if found_unknown or infrequent_masks: X_tr = X_tr.astype(object, copy=False) # insert None values for unknown values if found_unknown: for idx, mask in found_unknown.items(): X_tr[mask, idx] = None if infrequent_masks: for idx, mask in infrequent_masks.items(): X_tr[mask, idx] = "infrequent_sklearn" return X_tr
Convert the data back to the original representation. Parameters ---------- X : array-like of shape (n_samples, n_encoded_features) The transformed data. Returns ------- X_original : ndarray of shape (n_samples, n_features) Inverse transformed array.
inverse_transform
python
scikit-learn/scikit-learn
sklearn/preprocessing/_encoders.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_encoders.py
BSD-3-Clause
def _check_inverse_transform(self, X): """Check that func and inverse_func are the inverse.""" idx_selected = slice(None, None, max(1, X.shape[0] // 100)) X_round_trip = self.inverse_transform(self.transform(X[idx_selected])) if hasattr(X, "dtype"): dtypes = [X.dtype] elif hasattr(X, "dtypes"): # Dataframes can have multiple dtypes dtypes = X.dtypes if not all(np.issubdtype(d, np.number) for d in dtypes): raise ValueError( "'check_inverse' is only supported when all the elements in `X` is" " numerical." ) if not _allclose_dense_sparse(X[idx_selected], X_round_trip): warnings.warn( ( "The provided functions are not strictly" " inverse of each other. If you are sure you" " want to proceed regardless, set" " 'check_inverse=False'." ), UserWarning, )
Check that func and inverse_func are the inverse.
_check_inverse_transform
python
scikit-learn/scikit-learn
sklearn/preprocessing/_function_transformer.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_function_transformer.py
BSD-3-Clause
def fit(self, X, y=None): """Fit transformer by checking X. If ``validate`` is ``True``, ``X`` will be checked. Parameters ---------- X : {array-like, sparse-matrix} of shape (n_samples, n_features) \ if `validate=True` else any object that `func` can handle Input array. y : Ignored Not used, present here for API consistency by convention. Returns ------- self : object FunctionTransformer class instance. """ X = self._check_input(X, reset=True) if self.check_inverse and not (self.func is None or self.inverse_func is None): self._check_inverse_transform(X) return self
Fit transformer by checking X. If ``validate`` is ``True``, ``X`` will be checked. Parameters ---------- X : {array-like, sparse-matrix} of shape (n_samples, n_features) if `validate=True` else any object that `func` can handle Input array. y : Ignored Not used, present here for API consistency by convention. Returns ------- self : object FunctionTransformer class instance.
fit
python
scikit-learn/scikit-learn
sklearn/preprocessing/_function_transformer.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_function_transformer.py
BSD-3-Clause
def transform(self, X): """Transform X using the forward function. Parameters ---------- X : {array-like, sparse-matrix} of shape (n_samples, n_features) \ if `validate=True` else any object that `func` can handle Input array. Returns ------- X_out : array-like, shape (n_samples, n_features) Transformed input. """ X = self._check_input(X, reset=False) out = self._transform(X, func=self.func, kw_args=self.kw_args) output_config = _get_output_config("transform", self)["dense"] if hasattr(out, "columns") and self.feature_names_out is not None: # check the consistency between the column provided by `transform` and # the column names provided by `get_feature_names_out`. feature_names_out = self.get_feature_names_out() if list(out.columns) != list(feature_names_out): # we can override the column names of the output if it is inconsistent # with the column names provided by `get_feature_names_out` in the # following cases: # * `func` preserved the column names between the input and the output # * the input column names are all numbers # * the output is requested to be a DataFrame (pandas or polars) feature_names_in = getattr( X, "feature_names_in_", _get_feature_names(X) ) same_feature_names_in_out = feature_names_in is not None and list( feature_names_in ) == list(out.columns) not_all_str_columns = not all( isinstance(col, str) for col in out.columns ) if same_feature_names_in_out or not_all_str_columns: adapter = _get_adapter_from_container(out) out = adapter.create_container( X_output=out, X_original=out, columns=feature_names_out, inplace=False, ) else: raise ValueError( "The output generated by `func` have different column names " "than the ones provided by `get_feature_names_out`. " f"Got output with columns names: {list(out.columns)} and " "`get_feature_names_out` returned: " f"{list(self.get_feature_names_out())}. " "The column names can be overridden by setting " "`set_output(transform='pandas')` or " "`set_output(transform='polars')` such that the column names " "are set to the names provided by `get_feature_names_out`." ) if self.feature_names_out is None: warn_msg = ( "When `set_output` is configured to be '{0}', `func` should return " "a {0} DataFrame to follow the `set_output` API or `feature_names_out`" " should be defined." ) if output_config == "pandas" and not _is_pandas_df(out): warnings.warn(warn_msg.format("pandas")) elif output_config == "polars" and not _is_polars_df(out): warnings.warn(warn_msg.format("polars")) return out
Transform X using the forward function. Parameters ---------- X : {array-like, sparse-matrix} of shape (n_samples, n_features) if `validate=True` else any object that `func` can handle Input array. Returns ------- X_out : array-like, shape (n_samples, n_features) Transformed input.
transform
python
scikit-learn/scikit-learn
sklearn/preprocessing/_function_transformer.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_function_transformer.py
BSD-3-Clause
def inverse_transform(self, X): """Transform X using the inverse function. Parameters ---------- X : {array-like, sparse-matrix} of shape (n_samples, n_features) \ if `validate=True` else any object that `inverse_func` can handle Input array. Returns ------- X_original : array-like, shape (n_samples, n_features) Transformed input. """ if self.validate: X = check_array(X, accept_sparse=self.accept_sparse) return self._transform(X, func=self.inverse_func, kw_args=self.inv_kw_args)
Transform X using the inverse function. Parameters ---------- X : {array-like, sparse-matrix} of shape (n_samples, n_features) if `validate=True` else any object that `inverse_func` can handle Input array. Returns ------- X_original : array-like, shape (n_samples, n_features) Transformed input.
inverse_transform
python
scikit-learn/scikit-learn
sklearn/preprocessing/_function_transformer.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_function_transformer.py
BSD-3-Clause
def get_feature_names_out(self, input_features=None): """Get output feature names for transformation. This method is only defined if `feature_names_out` is not None. Parameters ---------- input_features : array-like of str or None, default=None Input feature names. - If `input_features` is None, then `feature_names_in_` is used as the input feature names. If `feature_names_in_` is not defined, then names are generated: `[x0, x1, ..., x(n_features_in_ - 1)]`. - If `input_features` is array-like, then `input_features` must match `feature_names_in_` if `feature_names_in_` is defined. Returns ------- feature_names_out : ndarray of str objects Transformed feature names. - If `feature_names_out` is 'one-to-one', the input feature names are returned (see `input_features` above). This requires `feature_names_in_` and/or `n_features_in_` to be defined, which is done automatically if `validate=True`. Alternatively, you can set them in `func`. - If `feature_names_out` is a callable, then it is called with two arguments, `self` and `input_features`, and its return value is returned by this method. """ if hasattr(self, "n_features_in_") or input_features is not None: input_features = _check_feature_names_in(self, input_features) if self.feature_names_out == "one-to-one": names_out = input_features elif callable(self.feature_names_out): names_out = self.feature_names_out(self, input_features) else: raise ValueError( f"feature_names_out={self.feature_names_out!r} is invalid. " 'It must either be "one-to-one" or a callable with two ' "arguments: the function transformer and an array-like of " "input feature names. The callable must return an array-like " "of output feature names." ) return np.asarray(names_out, dtype=object)
Get output feature names for transformation. This method is only defined if `feature_names_out` is not None. Parameters ---------- input_features : array-like of str or None, default=None Input feature names. - If `input_features` is None, then `feature_names_in_` is used as the input feature names. If `feature_names_in_` is not defined, then names are generated: `[x0, x1, ..., x(n_features_in_ - 1)]`. - If `input_features` is array-like, then `input_features` must match `feature_names_in_` if `feature_names_in_` is defined. Returns ------- feature_names_out : ndarray of str objects Transformed feature names. - If `feature_names_out` is 'one-to-one', the input feature names are returned (see `input_features` above). This requires `feature_names_in_` and/or `n_features_in_` to be defined, which is done automatically if `validate=True`. Alternatively, you can set them in `func`. - If `feature_names_out` is a callable, then it is called with two arguments, `self` and `input_features`, and its return value is returned by this method.
get_feature_names_out
python
scikit-learn/scikit-learn
sklearn/preprocessing/_function_transformer.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_function_transformer.py
BSD-3-Clause
def set_output(self, *, transform=None): """Set output container. See :ref:`sphx_glr_auto_examples_miscellaneous_plot_set_output.py` for an example on how to use the API. Parameters ---------- transform : {"default", "pandas", "polars"}, default=None Configure output of `transform` and `fit_transform`. - `"default"`: Default output format of a transformer - `"pandas"`: DataFrame output - `"polars"`: Polars output - `None`: Transform configuration is unchanged .. versionadded:: 1.4 `"polars"` option was added. Returns ------- self : estimator instance Estimator instance. """ if not hasattr(self, "_sklearn_output_config"): self._sklearn_output_config = {} self._sklearn_output_config["transform"] = transform return self
Set output container. See :ref:`sphx_glr_auto_examples_miscellaneous_plot_set_output.py` for an example on how to use the API. Parameters ---------- transform : {"default", "pandas", "polars"}, default=None Configure output of `transform` and `fit_transform`. - `"default"`: Default output format of a transformer - `"pandas"`: DataFrame output - `"polars"`: Polars output - `None`: Transform configuration is unchanged .. versionadded:: 1.4 `"polars"` option was added. Returns ------- self : estimator instance Estimator instance.
set_output
python
scikit-learn/scikit-learn
sklearn/preprocessing/_function_transformer.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_function_transformer.py
BSD-3-Clause
def _get_function_name(self): """Get the name display of the `func` used in HTML representation.""" if hasattr(self.func, "__name__"): return self.func.__name__ if isinstance(self.func, partial): return self.func.func.__name__ return f"{self.func.__class__.__name__}(...)"
Get the name display of the `func` used in HTML representation.
_get_function_name
python
scikit-learn/scikit-learn
sklearn/preprocessing/_function_transformer.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_function_transformer.py
BSD-3-Clause
def fit(self, y): """Fit label encoder. Parameters ---------- y : array-like of shape (n_samples,) Target values. Returns ------- self : returns an instance of self. Fitted label encoder. """ y = column_or_1d(y, warn=True) self.classes_ = _unique(y) return self
Fit label encoder. Parameters ---------- y : array-like of shape (n_samples,) Target values. Returns ------- self : returns an instance of self. Fitted label encoder.
fit
python
scikit-learn/scikit-learn
sklearn/preprocessing/_label.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_label.py
BSD-3-Clause
def fit_transform(self, y): """Fit label encoder and return encoded labels. Parameters ---------- y : array-like of shape (n_samples,) Target values. Returns ------- y : array-like of shape (n_samples,) Encoded labels. """ y = column_or_1d(y, warn=True) self.classes_, y = _unique(y, return_inverse=True) return y
Fit label encoder and return encoded labels. Parameters ---------- y : array-like of shape (n_samples,) Target values. Returns ------- y : array-like of shape (n_samples,) Encoded labels.
fit_transform
python
scikit-learn/scikit-learn
sklearn/preprocessing/_label.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_label.py
BSD-3-Clause
def transform(self, y): """Transform labels to normalized encoding. Parameters ---------- y : array-like of shape (n_samples,) Target values. Returns ------- y : array-like of shape (n_samples,) Labels as normalized encodings. """ check_is_fitted(self) xp, _ = get_namespace(y) y = column_or_1d(y, dtype=self.classes_.dtype, warn=True) # transform of empty array is empty array if _num_samples(y) == 0: return xp.asarray([]) return _encode(y, uniques=self.classes_)
Transform labels to normalized encoding. Parameters ---------- y : array-like of shape (n_samples,) Target values. Returns ------- y : array-like of shape (n_samples,) Labels as normalized encodings.
transform
python
scikit-learn/scikit-learn
sklearn/preprocessing/_label.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_label.py
BSD-3-Clause
def inverse_transform(self, y): """Transform labels back to original encoding. Parameters ---------- y : array-like of shape (n_samples,) Target values. Returns ------- y_original : ndarray of shape (n_samples,) Original encoding. """ check_is_fitted(self) xp, _ = get_namespace(y) y = column_or_1d(y, warn=True) # inverse transform of empty array is empty array if _num_samples(y) == 0: return xp.asarray([]) diff = xpx.setdiff1d( y, xp.arange(self.classes_.shape[0], device=device(y)), xp=xp, ) if diff.shape[0]: raise ValueError("y contains previously unseen labels: %s" % str(diff)) y = xp.asarray(y) return xp.take(self.classes_, y, axis=0)
Transform labels back to original encoding. Parameters ---------- y : array-like of shape (n_samples,) Target values. Returns ------- y_original : ndarray of shape (n_samples,) Original encoding.
inverse_transform
python
scikit-learn/scikit-learn
sklearn/preprocessing/_label.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_label.py
BSD-3-Clause
def fit(self, y): """Fit label binarizer. Parameters ---------- y : ndarray of shape (n_samples,) or (n_samples, n_classes) Target values. The 2-d matrix should only contain 0 and 1, represents multilabel classification. Returns ------- self : object Returns the instance itself. """ if self.neg_label >= self.pos_label: raise ValueError( f"neg_label={self.neg_label} must be strictly less than " f"pos_label={self.pos_label}." ) if self.sparse_output and (self.pos_label == 0 or self.neg_label != 0): raise ValueError( "Sparse binarization is only supported with non " "zero pos_label and zero neg_label, got " f"pos_label={self.pos_label} and neg_label={self.neg_label}" ) self.y_type_ = type_of_target(y, input_name="y") if "multioutput" in self.y_type_: raise ValueError( "Multioutput target data is not supported with label binarization" ) if _num_samples(y) == 0: raise ValueError("y has 0 samples: %r" % y) self.sparse_input_ = sp.issparse(y) self.classes_ = unique_labels(y) return self
Fit label binarizer. Parameters ---------- y : ndarray of shape (n_samples,) or (n_samples, n_classes) Target values. The 2-d matrix should only contain 0 and 1, represents multilabel classification. Returns ------- self : object Returns the instance itself.
fit
python
scikit-learn/scikit-learn
sklearn/preprocessing/_label.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_label.py
BSD-3-Clause
def transform(self, y): """Transform multi-class labels to binary labels. The output of transform is sometimes referred to by some authors as the 1-of-K coding scheme. Parameters ---------- y : {array, sparse matrix} of shape (n_samples,) or \ (n_samples, n_classes) Target values. The 2-d matrix should only contain 0 and 1, represents multilabel classification. Sparse matrix can be CSR, CSC, COO, DOK, or LIL. Returns ------- Y : {ndarray, sparse matrix} of shape (n_samples, n_classes) Shape will be (n_samples, 1) for binary problems. Sparse matrix will be of CSR format. """ check_is_fitted(self) y_is_multilabel = type_of_target(y).startswith("multilabel") if y_is_multilabel and not self.y_type_.startswith("multilabel"): raise ValueError("The object was not fitted with multilabel input.") return label_binarize( y, classes=self.classes_, pos_label=self.pos_label, neg_label=self.neg_label, sparse_output=self.sparse_output, )
Transform multi-class labels to binary labels. The output of transform is sometimes referred to by some authors as the 1-of-K coding scheme. Parameters ---------- y : {array, sparse matrix} of shape (n_samples,) or (n_samples, n_classes) Target values. The 2-d matrix should only contain 0 and 1, represents multilabel classification. Sparse matrix can be CSR, CSC, COO, DOK, or LIL. Returns ------- Y : {ndarray, sparse matrix} of shape (n_samples, n_classes) Shape will be (n_samples, 1) for binary problems. Sparse matrix will be of CSR format.
transform
python
scikit-learn/scikit-learn
sklearn/preprocessing/_label.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_label.py
BSD-3-Clause
def inverse_transform(self, Y, threshold=None): """Transform binary labels back to multi-class labels. Parameters ---------- Y : {ndarray, sparse matrix} of shape (n_samples, n_classes) Target values. All sparse matrices are converted to CSR before inverse transformation. threshold : float, default=None Threshold used in the binary and multi-label cases. Use 0 when ``Y`` contains the output of :term:`decision_function` (classifier). Use 0.5 when ``Y`` contains the output of :term:`predict_proba`. If None, the threshold is assumed to be half way between neg_label and pos_label. Returns ------- y_original : {ndarray, sparse matrix} of shape (n_samples,) Target values. Sparse matrix will be of CSR format. Notes ----- In the case when the binary labels are fractional (probabilistic), :meth:`inverse_transform` chooses the class with the greatest value. Typically, this allows to use the output of a linear model's :term:`decision_function` method directly as the input of :meth:`inverse_transform`. """ check_is_fitted(self) if threshold is None: threshold = (self.pos_label + self.neg_label) / 2.0 if self.y_type_ == "multiclass": y_inv = _inverse_binarize_multiclass(Y, self.classes_) else: y_inv = _inverse_binarize_thresholding( Y, self.y_type_, self.classes_, threshold ) if self.sparse_input_: y_inv = sp.csr_matrix(y_inv) elif sp.issparse(y_inv): y_inv = y_inv.toarray() return y_inv
Transform binary labels back to multi-class labels. Parameters ---------- Y : {ndarray, sparse matrix} of shape (n_samples, n_classes) Target values. All sparse matrices are converted to CSR before inverse transformation. threshold : float, default=None Threshold used in the binary and multi-label cases. Use 0 when ``Y`` contains the output of :term:`decision_function` (classifier). Use 0.5 when ``Y`` contains the output of :term:`predict_proba`. If None, the threshold is assumed to be half way between neg_label and pos_label. Returns ------- y_original : {ndarray, sparse matrix} of shape (n_samples,) Target values. Sparse matrix will be of CSR format. Notes ----- In the case when the binary labels are fractional (probabilistic), :meth:`inverse_transform` chooses the class with the greatest value. Typically, this allows to use the output of a linear model's :term:`decision_function` method directly as the input of :meth:`inverse_transform`.
inverse_transform
python
scikit-learn/scikit-learn
sklearn/preprocessing/_label.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_label.py
BSD-3-Clause
def label_binarize(y, *, classes, neg_label=0, pos_label=1, sparse_output=False): """Binarize labels in a one-vs-all fashion. Several regression and binary classification algorithms are available in scikit-learn. A simple way to extend these algorithms to the multi-class classification case is to use the so-called one-vs-all scheme. This function makes it possible to compute this transformation for a fixed set of class labels known ahead of time. Parameters ---------- y : array-like or sparse matrix Sequence of integer labels or multilabel data to encode. classes : array-like of shape (n_classes,) Uniquely holds the label for each class. neg_label : int, default=0 Value with which negative labels must be encoded. pos_label : int, default=1 Value with which positive labels must be encoded. sparse_output : bool, default=False, Set to true if output binary array is desired in CSR sparse format. Returns ------- Y : {ndarray, sparse matrix} of shape (n_samples, n_classes) Shape will be (n_samples, 1) for binary problems. Sparse matrix will be of CSR format. See Also -------- LabelBinarizer : Class used to wrap the functionality of label_binarize and allow for fitting to classes independently of the transform operation. Examples -------- >>> from sklearn.preprocessing import label_binarize >>> label_binarize([1, 6], classes=[1, 2, 4, 6]) array([[1, 0, 0, 0], [0, 0, 0, 1]]) The class ordering is preserved: >>> label_binarize([1, 6], classes=[1, 6, 4, 2]) array([[1, 0, 0, 0], [0, 1, 0, 0]]) Binary targets transform to a column vector >>> label_binarize(['yes', 'no', 'no', 'yes'], classes=['no', 'yes']) array([[1], [0], [0], [1]]) """ if not isinstance(y, list): # XXX Workaround that will be removed when list of list format is # dropped y = check_array( y, input_name="y", accept_sparse="csr", ensure_2d=False, dtype=None ) else: if _num_samples(y) == 0: raise ValueError("y has 0 samples: %r" % y) if neg_label >= pos_label: raise ValueError( "neg_label={0} must be strictly less than pos_label={1}.".format( neg_label, pos_label ) ) if sparse_output and (pos_label == 0 or neg_label != 0): raise ValueError( "Sparse binarization is only supported with non " "zero pos_label and zero neg_label, got " "pos_label={0} and neg_label={1}" "".format(pos_label, neg_label) ) # To account for pos_label == 0 in the dense case pos_switch = pos_label == 0 if pos_switch: pos_label = -neg_label y_type = type_of_target(y) if "multioutput" in y_type: raise ValueError( "Multioutput target data is not supported with label binarization" ) if y_type == "unknown": raise ValueError("The type of target data is not known") n_samples = y.shape[0] if sp.issparse(y) else len(y) n_classes = len(classes) classes = np.asarray(classes) if y_type == "binary": if n_classes == 1: if sparse_output: return sp.csr_matrix((n_samples, 1), dtype=int) else: Y = np.zeros((len(y), 1), dtype=int) Y += neg_label return Y elif len(classes) >= 3: y_type = "multiclass" sorted_class = np.sort(classes) if y_type == "multilabel-indicator": y_n_classes = y.shape[1] if hasattr(y, "shape") else len(y[0]) if classes.size != y_n_classes: raise ValueError( "classes {0} mismatch with the labels {1} found in the data".format( classes, unique_labels(y) ) ) if y_type in ("binary", "multiclass"): y = column_or_1d(y) # pick out the known labels from y y_in_classes = np.isin(y, classes) y_seen = y[y_in_classes] indices = np.searchsorted(sorted_class, y_seen) indptr = np.hstack((0, np.cumsum(y_in_classes))) data = np.empty_like(indices) data.fill(pos_label) Y = sp.csr_matrix((data, indices, indptr), shape=(n_samples, n_classes)) elif y_type == "multilabel-indicator": Y = sp.csr_matrix(y) if pos_label != 1: data = np.empty_like(Y.data) data.fill(pos_label) Y.data = data else: raise ValueError( "%s target data is not supported with label binarization" % y_type ) if not sparse_output: Y = Y.toarray() Y = Y.astype(int, copy=False) if neg_label != 0: Y[Y == 0] = neg_label if pos_switch: Y[Y == pos_label] = 0 else: Y.data = Y.data.astype(int, copy=False) # preserve label ordering if np.any(classes != sorted_class): indices = np.searchsorted(sorted_class, classes) Y = Y[:, indices] if y_type == "binary": if sparse_output: Y = Y[:, [-1]] else: Y = Y[:, -1].reshape((-1, 1)) return Y
Binarize labels in a one-vs-all fashion. Several regression and binary classification algorithms are available in scikit-learn. A simple way to extend these algorithms to the multi-class classification case is to use the so-called one-vs-all scheme. This function makes it possible to compute this transformation for a fixed set of class labels known ahead of time. Parameters ---------- y : array-like or sparse matrix Sequence of integer labels or multilabel data to encode. classes : array-like of shape (n_classes,) Uniquely holds the label for each class. neg_label : int, default=0 Value with which negative labels must be encoded. pos_label : int, default=1 Value with which positive labels must be encoded. sparse_output : bool, default=False, Set to true if output binary array is desired in CSR sparse format. Returns ------- Y : {ndarray, sparse matrix} of shape (n_samples, n_classes) Shape will be (n_samples, 1) for binary problems. Sparse matrix will be of CSR format. See Also -------- LabelBinarizer : Class used to wrap the functionality of label_binarize and allow for fitting to classes independently of the transform operation. Examples -------- >>> from sklearn.preprocessing import label_binarize >>> label_binarize([1, 6], classes=[1, 2, 4, 6]) array([[1, 0, 0, 0], [0, 0, 0, 1]]) The class ordering is preserved: >>> label_binarize([1, 6], classes=[1, 6, 4, 2]) array([[1, 0, 0, 0], [0, 1, 0, 0]]) Binary targets transform to a column vector >>> label_binarize(['yes', 'no', 'no', 'yes'], classes=['no', 'yes']) array([[1], [0], [0], [1]])
label_binarize
python
scikit-learn/scikit-learn
sklearn/preprocessing/_label.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_label.py
BSD-3-Clause
def _inverse_binarize_multiclass(y, classes): """Inverse label binarization transformation for multiclass. Multiclass uses the maximal score instead of a threshold. """ classes = np.asarray(classes) if sp.issparse(y): # Find the argmax for each row in y where y is a CSR matrix y = y.tocsr() n_samples, n_outputs = y.shape outputs = np.arange(n_outputs) row_max = min_max_axis(y, 1)[1] row_nnz = np.diff(y.indptr) y_data_repeated_max = np.repeat(row_max, row_nnz) # picks out all indices obtaining the maximum per row y_i_all_argmax = np.flatnonzero(y_data_repeated_max == y.data) # For corner case where last row has a max of 0 if row_max[-1] == 0: y_i_all_argmax = np.append(y_i_all_argmax, [len(y.data)]) # Gets the index of the first argmax in each row from y_i_all_argmax index_first_argmax = np.searchsorted(y_i_all_argmax, y.indptr[:-1]) # first argmax of each row y_ind_ext = np.append(y.indices, [0]) y_i_argmax = y_ind_ext[y_i_all_argmax[index_first_argmax]] # Handle rows of all 0 y_i_argmax[np.where(row_nnz == 0)[0]] = 0 # Handles rows with max of 0 that contain negative numbers samples = np.arange(n_samples)[(row_nnz > 0) & (row_max.ravel() == 0)] for i in samples: ind = y.indices[y.indptr[i] : y.indptr[i + 1]] y_i_argmax[i] = classes[np.setdiff1d(outputs, ind)][0] return classes[y_i_argmax] else: return classes.take(y.argmax(axis=1), mode="clip")
Inverse label binarization transformation for multiclass. Multiclass uses the maximal score instead of a threshold.
_inverse_binarize_multiclass
python
scikit-learn/scikit-learn
sklearn/preprocessing/_label.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_label.py
BSD-3-Clause
def _inverse_binarize_thresholding(y, output_type, classes, threshold): """Inverse label binarization transformation using thresholding.""" if output_type == "binary" and y.ndim == 2 and y.shape[1] > 2: raise ValueError("output_type='binary', but y.shape = {0}".format(y.shape)) if output_type != "binary" and y.shape[1] != len(classes): raise ValueError( "The number of class is not equal to the number of dimension of y." ) classes = np.asarray(classes) # Perform thresholding if sp.issparse(y): if threshold > 0: if y.format not in ("csr", "csc"): y = y.tocsr() y.data = np.array(y.data > threshold, dtype=int) y.eliminate_zeros() else: y = np.array(y.toarray() > threshold, dtype=int) else: y = np.array(y > threshold, dtype=int) # Inverse transform data if output_type == "binary": if sp.issparse(y): y = y.toarray() if y.ndim == 2 and y.shape[1] == 2: return classes[y[:, 1]] else: if len(classes) == 1: return np.repeat(classes[0], len(y)) else: return classes[y.ravel()] elif output_type == "multilabel-indicator": return y else: raise ValueError("{0} format is not supported".format(output_type))
Inverse label binarization transformation using thresholding.
_inverse_binarize_thresholding
python
scikit-learn/scikit-learn
sklearn/preprocessing/_label.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_label.py
BSD-3-Clause
def fit(self, y): """Fit the label sets binarizer, storing :term:`classes_`. Parameters ---------- y : iterable of iterables A set of labels (any orderable and hashable object) for each sample. If the `classes` parameter is set, `y` will not be iterated. Returns ------- self : object Fitted estimator. """ self._cached_dict = None if self.classes is None: classes = sorted(set(itertools.chain.from_iterable(y))) elif len(set(self.classes)) < len(self.classes): raise ValueError( "The classes argument contains duplicate " "classes. Remove these duplicates before passing " "them to MultiLabelBinarizer." ) else: classes = self.classes dtype = int if all(isinstance(c, int) for c in classes) else object self.classes_ = np.empty(len(classes), dtype=dtype) self.classes_[:] = classes return self
Fit the label sets binarizer, storing :term:`classes_`. Parameters ---------- y : iterable of iterables A set of labels (any orderable and hashable object) for each sample. If the `classes` parameter is set, `y` will not be iterated. Returns ------- self : object Fitted estimator.
fit
python
scikit-learn/scikit-learn
sklearn/preprocessing/_label.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_label.py
BSD-3-Clause
def fit_transform(self, y): """Fit the label sets binarizer and transform the given label sets. Parameters ---------- y : iterable of iterables A set of labels (any orderable and hashable object) for each sample. If the `classes` parameter is set, `y` will not be iterated. Returns ------- y_indicator : {ndarray, sparse matrix} of shape (n_samples, n_classes) A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in `y[i]`, and 0 otherwise. Sparse matrix will be of CSR format. """ if self.classes is not None: return self.fit(y).transform(y) self._cached_dict = None # Automatically increment on new class class_mapping = defaultdict(int) class_mapping.default_factory = class_mapping.__len__ yt = self._transform(y, class_mapping) # sort classes and reorder columns tmp = sorted(class_mapping, key=class_mapping.get) # (make safe for tuples) dtype = int if all(isinstance(c, int) for c in tmp) else object class_mapping = np.empty(len(tmp), dtype=dtype) class_mapping[:] = tmp self.classes_, inverse = np.unique(class_mapping, return_inverse=True) # ensure yt.indices keeps its current dtype yt.indices = np.asarray(inverse[yt.indices], dtype=yt.indices.dtype) if not self.sparse_output: yt = yt.toarray() return yt
Fit the label sets binarizer and transform the given label sets. Parameters ---------- y : iterable of iterables A set of labels (any orderable and hashable object) for each sample. If the `classes` parameter is set, `y` will not be iterated. Returns ------- y_indicator : {ndarray, sparse matrix} of shape (n_samples, n_classes) A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in `y[i]`, and 0 otherwise. Sparse matrix will be of CSR format.
fit_transform
python
scikit-learn/scikit-learn
sklearn/preprocessing/_label.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_label.py
BSD-3-Clause
def transform(self, y): """Transform the given label sets. Parameters ---------- y : iterable of iterables A set of labels (any orderable and hashable object) for each sample. If the `classes` parameter is set, `y` will not be iterated. Returns ------- y_indicator : array or CSR matrix, shape (n_samples, n_classes) A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in `y[i]`, and 0 otherwise. """ check_is_fitted(self) class_to_index = self._build_cache() yt = self._transform(y, class_to_index) if not self.sparse_output: yt = yt.toarray() return yt
Transform the given label sets. Parameters ---------- y : iterable of iterables A set of labels (any orderable and hashable object) for each sample. If the `classes` parameter is set, `y` will not be iterated. Returns ------- y_indicator : array or CSR matrix, shape (n_samples, n_classes) A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in `y[i]`, and 0 otherwise.
transform
python
scikit-learn/scikit-learn
sklearn/preprocessing/_label.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_label.py
BSD-3-Clause
def _transform(self, y, class_mapping): """Transforms the label sets with a given mapping. Parameters ---------- y : iterable of iterables A set of labels (any orderable and hashable object) for each sample. If the `classes` parameter is set, `y` will not be iterated. class_mapping : Mapping Maps from label to column index in label indicator matrix. Returns ------- y_indicator : sparse matrix of shape (n_samples, n_classes) Label indicator matrix. Will be of CSR format. """ indices = array.array("i") indptr = array.array("i", [0]) unknown = set() for labels in y: index = set() for label in labels: try: index.add(class_mapping[label]) except KeyError: unknown.add(label) indices.extend(index) indptr.append(len(indices)) if unknown: warnings.warn( "unknown class(es) {0} will be ignored".format(sorted(unknown, key=str)) ) data = np.ones(len(indices), dtype=int) return sp.csr_matrix( (data, indices, indptr), shape=(len(indptr) - 1, len(class_mapping)) )
Transforms the label sets with a given mapping. Parameters ---------- y : iterable of iterables A set of labels (any orderable and hashable object) for each sample. If the `classes` parameter is set, `y` will not be iterated. class_mapping : Mapping Maps from label to column index in label indicator matrix. Returns ------- y_indicator : sparse matrix of shape (n_samples, n_classes) Label indicator matrix. Will be of CSR format.
_transform
python
scikit-learn/scikit-learn
sklearn/preprocessing/_label.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_label.py
BSD-3-Clause
def inverse_transform(self, yt): """Transform the given indicator matrix into label sets. Parameters ---------- yt : {ndarray, sparse matrix} of shape (n_samples, n_classes) A matrix containing only 1s ands 0s. Returns ------- y_original : list of tuples The set of labels for each sample such that `y[i]` consists of `classes_[j]` for each `yt[i, j] == 1`. """ check_is_fitted(self) if yt.shape[1] != len(self.classes_): raise ValueError( "Expected indicator for {0} classes, but got {1}".format( len(self.classes_), yt.shape[1] ) ) if sp.issparse(yt): yt = yt.tocsr() if len(yt.data) != 0 and len(np.setdiff1d(yt.data, [0, 1])) > 0: raise ValueError("Expected only 0s and 1s in label indicator.") return [ tuple(self.classes_.take(yt.indices[start:end])) for start, end in zip(yt.indptr[:-1], yt.indptr[1:]) ] else: unexpected = np.setdiff1d(yt, [0, 1]) if len(unexpected) > 0: raise ValueError( "Expected only 0s and 1s in label indicator. Also got {0}".format( unexpected ) ) return [tuple(self.classes_.compress(indicators)) for indicators in yt]
Transform the given indicator matrix into label sets. Parameters ---------- yt : {ndarray, sparse matrix} of shape (n_samples, n_classes) A matrix containing only 1s ands 0s. Returns ------- y_original : list of tuples The set of labels for each sample such that `y[i]` consists of `classes_[j]` for each `yt[i, j] == 1`.
inverse_transform
python
scikit-learn/scikit-learn
sklearn/preprocessing/_label.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_label.py
BSD-3-Clause
def _create_expansion(X, interaction_only, deg, n_features, cumulative_size=0): """Helper function for creating and appending sparse expansion matrices""" total_nnz = _calc_total_nnz(X.indptr, interaction_only, deg) expanded_col = _calc_expanded_nnz(n_features, interaction_only, deg) if expanded_col == 0: return None # This only checks whether each block needs 64bit integers upon # expansion. We prefer to keep int32 indexing where we can, # since currently SciPy's CSR construction downcasts when possible, # so we prefer to avoid an unnecessary cast. The dtype may still # change in the concatenation process if needed. # See: https://github.com/scipy/scipy/issues/16569 max_indices = expanded_col - 1 max_indptr = total_nnz max_int32 = np.iinfo(np.int32).max needs_int64 = max(max_indices, max_indptr) > max_int32 index_dtype = np.int64 if needs_int64 else np.int32 # Result of the expansion, modified in place by the # `_csr_polynomial_expansion` routine. expanded_data = np.empty(shape=total_nnz, dtype=X.data.dtype) expanded_indices = np.empty(shape=total_nnz, dtype=index_dtype) expanded_indptr = np.empty(shape=X.indptr.shape[0], dtype=index_dtype) _csr_polynomial_expansion( X.data, X.indices, X.indptr, X.shape[1], expanded_data, expanded_indices, expanded_indptr, interaction_only, deg, ) return sparse.csr_matrix( (expanded_data, expanded_indices, expanded_indptr), shape=(X.indptr.shape[0] - 1, expanded_col), dtype=X.dtype, )
Helper function for creating and appending sparse expansion matrices
_create_expansion
python
scikit-learn/scikit-learn
sklearn/preprocessing/_polynomial.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_polynomial.py
BSD-3-Clause
def _num_combinations( n_features, min_degree, max_degree, interaction_only, include_bias ): """Calculate number of terms in polynomial expansion This should be equivalent to counting the number of terms returned by _combinations(...) but much faster. """ if interaction_only: combinations = sum( [ comb(n_features, i, exact=True) for i in range(max(1, min_degree), min(max_degree, n_features) + 1) ] ) else: combinations = comb(n_features + max_degree, max_degree, exact=True) - 1 if min_degree > 0: d = min_degree - 1 combinations -= comb(n_features + d, d, exact=True) - 1 if include_bias: combinations += 1 return combinations
Calculate number of terms in polynomial expansion This should be equivalent to counting the number of terms returned by _combinations(...) but much faster.
_num_combinations
python
scikit-learn/scikit-learn
sklearn/preprocessing/_polynomial.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_polynomial.py
BSD-3-Clause
def powers_(self): """Exponent for each of the inputs in the output.""" check_is_fitted(self) combinations = self._combinations( n_features=self.n_features_in_, min_degree=self._min_degree, max_degree=self._max_degree, interaction_only=self.interaction_only, include_bias=self.include_bias, ) return np.vstack( [np.bincount(c, minlength=self.n_features_in_) for c in combinations] )
Exponent for each of the inputs in the output.
powers_
python
scikit-learn/scikit-learn
sklearn/preprocessing/_polynomial.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_polynomial.py
BSD-3-Clause
def get_feature_names_out(self, input_features=None): """Get output feature names for transformation. Parameters ---------- input_features : array-like of str or None, default=None Input features. - If `input_features is None`, then `feature_names_in_` is used as feature names in. If `feature_names_in_` is not defined, then the following input feature names are generated: `["x0", "x1", ..., "x(n_features_in_ - 1)"]`. - If `input_features` is an array-like, then `input_features` must match `feature_names_in_` if `feature_names_in_` is defined. Returns ------- feature_names_out : ndarray of str objects Transformed feature names. """ powers = self.powers_ input_features = _check_feature_names_in(self, input_features) feature_names = [] for row in powers: inds = np.where(row)[0] if len(inds): name = " ".join( ( "%s^%d" % (input_features[ind], exp) if exp != 1 else input_features[ind] ) for ind, exp in zip(inds, row[inds]) ) else: name = "1" feature_names.append(name) return np.asarray(feature_names, dtype=object)
Get output feature names for transformation. Parameters ---------- input_features : array-like of str or None, default=None Input features. - If `input_features is None`, then `feature_names_in_` is used as feature names in. If `feature_names_in_` is not defined, then the following input feature names are generated: `["x0", "x1", ..., "x(n_features_in_ - 1)"]`. - If `input_features` is an array-like, then `input_features` must match `feature_names_in_` if `feature_names_in_` is defined. Returns ------- feature_names_out : ndarray of str objects Transformed feature names.
get_feature_names_out
python
scikit-learn/scikit-learn
sklearn/preprocessing/_polynomial.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_polynomial.py
BSD-3-Clause
def fit(self, X, y=None): """ Compute number of output features. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data. y : Ignored Not used, present here for API consistency by convention. Returns ------- self : object Fitted transformer. """ _, n_features = validate_data(self, X, accept_sparse=True).shape if isinstance(self.degree, Integral): if self.degree == 0 and not self.include_bias: raise ValueError( "Setting degree to zero and include_bias to False would result in" " an empty output array." ) self._min_degree = 0 self._max_degree = self.degree elif ( isinstance(self.degree, collections.abc.Iterable) and len(self.degree) == 2 ): self._min_degree, self._max_degree = self.degree if not ( isinstance(self._min_degree, Integral) and isinstance(self._max_degree, Integral) and self._min_degree >= 0 and self._min_degree <= self._max_degree ): raise ValueError( "degree=(min_degree, max_degree) must " "be non-negative integers that fulfil " "min_degree <= max_degree, got " f"{self.degree}." ) elif self._max_degree == 0 and not self.include_bias: raise ValueError( "Setting both min_degree and max_degree to zero and include_bias to" " False would result in an empty output array." ) else: raise ValueError( "degree must be a non-negative int or tuple " "(min_degree, max_degree), got " f"{self.degree}." ) self.n_output_features_ = self._num_combinations( n_features=n_features, min_degree=self._min_degree, max_degree=self._max_degree, interaction_only=self.interaction_only, include_bias=self.include_bias, ) if self.n_output_features_ > np.iinfo(np.intp).max: msg = ( "The output that would result from the current configuration would" f" have {self.n_output_features_} features which is too large to be" f" indexed by {np.intp().dtype.name}. Please change some or all of the" " following:\n- The number of features in the input, currently" f" {n_features=}\n- The range of degrees to calculate, currently" f" [{self._min_degree}, {self._max_degree}]\n- Whether to include only" f" interaction terms, currently {self.interaction_only}\n- Whether to" f" include a bias term, currently {self.include_bias}." ) if ( np.intp == np.int32 and self.n_output_features_ <= np.iinfo(np.int64).max ): # pragma: nocover msg += ( "\nNote that the current Python runtime has a limited 32 bit " "address space and that this configuration would have been " "admissible if run on a 64 bit Python runtime." ) raise ValueError(msg) # We also record the number of output features for # _min_degree = 0 self._n_out_full = self._num_combinations( n_features=n_features, min_degree=0, max_degree=self._max_degree, interaction_only=self.interaction_only, include_bias=self.include_bias, ) return self
Compute number of output features. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data. y : Ignored Not used, present here for API consistency by convention. Returns ------- self : object Fitted transformer.
fit
python
scikit-learn/scikit-learn
sklearn/preprocessing/_polynomial.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_polynomial.py
BSD-3-Clause
def transform(self, X): """Transform data to polynomial features. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data to transform, row by row. Prefer CSR over CSC for sparse input (for speed), but CSC is required if the degree is 4 or higher. If the degree is less than 4 and the input format is CSC, it will be converted to CSR, have its polynomial features generated, then converted back to CSC. If the degree is 2 or 3, the method described in "Leveraging Sparsity to Speed Up Polynomial Feature Expansions of CSR Matrices Using K-Simplex Numbers" by Andrew Nystrom and John Hughes is used, which is much faster than the method used on CSC input. For this reason, a CSC input will be converted to CSR, and the output will be converted back to CSC prior to being returned, hence the preference of CSR. Returns ------- XP : {ndarray, sparse matrix} of shape (n_samples, NP) The matrix of features, where `NP` is the number of polynomial features generated from the combination of inputs. If a sparse matrix is provided, it will be converted into a sparse `csr_matrix`. """ check_is_fitted(self) X = validate_data( self, X, order="F", dtype=FLOAT_DTYPES, reset=False, accept_sparse=("csr", "csc"), ) n_samples, n_features = X.shape max_int32 = np.iinfo(np.int32).max if sparse.issparse(X) and X.format == "csr": if self._max_degree > 3: return self.transform(X.tocsc()).tocsr() to_stack = [] if self.include_bias: to_stack.append( sparse.csr_matrix(np.ones(shape=(n_samples, 1), dtype=X.dtype)) ) if self._min_degree <= 1 and self._max_degree > 0: to_stack.append(X) cumulative_size = sum(mat.shape[1] for mat in to_stack) for deg in range(max(2, self._min_degree), self._max_degree + 1): expanded = _create_expansion( X=X, interaction_only=self.interaction_only, deg=deg, n_features=n_features, cumulative_size=cumulative_size, ) if expanded is not None: to_stack.append(expanded) cumulative_size += expanded.shape[1] if len(to_stack) == 0: # edge case: deal with empty matrix XP = sparse.csr_matrix((n_samples, 0), dtype=X.dtype) else: # `scipy.sparse.hstack` breaks in scipy<1.9.2 # when `n_output_features_ > max_int32` all_int32 = all(mat.indices.dtype == np.int32 for mat in to_stack) if ( sp_version < parse_version("1.9.2") and self.n_output_features_ > max_int32 and all_int32 ): raise ValueError( # pragma: no cover "In scipy versions `<1.9.2`, the function `scipy.sparse.hstack`" " produces negative columns when:\n1. The output shape contains" " `n_cols` too large to be represented by a 32bit signed" " integer.\n2. All sub-matrices to be stacked have indices of" " dtype `np.int32`.\nTo avoid this error, either use a version" " of scipy `>=1.9.2` or alter the `PolynomialFeatures`" " transformer to produce fewer than 2^31 output features" ) XP = sparse.hstack(to_stack, dtype=X.dtype, format="csr") elif sparse.issparse(X) and X.format == "csc" and self._max_degree < 4: return self.transform(X.tocsr()).tocsc() elif sparse.issparse(X): combinations = self._combinations( n_features=n_features, min_degree=self._min_degree, max_degree=self._max_degree, interaction_only=self.interaction_only, include_bias=self.include_bias, ) columns = [] for combi in combinations: if combi: out_col = 1 for col_idx in combi: out_col = X[:, [col_idx]].multiply(out_col) columns.append(out_col) else: bias = sparse.csc_matrix(np.ones((X.shape[0], 1))) columns.append(bias) XP = sparse.hstack(columns, dtype=X.dtype).tocsc() else: # Do as if _min_degree = 0 and cut down array after the # computation, i.e. use _n_out_full instead of n_output_features_. XP = np.empty( shape=(n_samples, self._n_out_full), dtype=X.dtype, order=self.order ) # What follows is a faster implementation of: # for i, comb in enumerate(combinations): # XP[:, i] = X[:, comb].prod(1) # This implementation uses two optimisations. # First one is broadcasting, # multiply ([X1, ..., Xn], X1) -> [X1 X1, ..., Xn X1] # multiply ([X2, ..., Xn], X2) -> [X2 X2, ..., Xn X2] # ... # multiply ([X[:, start:end], X[:, start]) -> ... # Second optimisation happens for degrees >= 3. # Xi^3 is computed reusing previous computation: # Xi^3 = Xi^2 * Xi. # degree 0 term if self.include_bias: XP[:, 0] = 1 current_col = 1 else: current_col = 0 if self._max_degree == 0: return XP # degree 1 term XP[:, current_col : current_col + n_features] = X index = list(range(current_col, current_col + n_features)) current_col += n_features index.append(current_col) # loop over degree >= 2 terms for _ in range(2, self._max_degree + 1): new_index = [] end = index[-1] for feature_idx in range(n_features): start = index[feature_idx] new_index.append(current_col) if self.interaction_only: start += index[feature_idx + 1] - index[feature_idx] next_col = current_col + end - start if next_col <= current_col: break # XP[:, start:end] are terms of degree d - 1 # that exclude feature #feature_idx. np.multiply( XP[:, start:end], X[:, feature_idx : feature_idx + 1], out=XP[:, current_col:next_col], casting="no", ) current_col = next_col new_index.append(current_col) index = new_index if self._min_degree > 1: n_XP, n_Xout = self._n_out_full, self.n_output_features_ if self.include_bias: Xout = np.empty( shape=(n_samples, n_Xout), dtype=XP.dtype, order=self.order ) Xout[:, 0] = 1 Xout[:, 1:] = XP[:, n_XP - n_Xout + 1 :] else: Xout = XP[:, n_XP - n_Xout :].copy() XP = Xout return XP
Transform data to polynomial features. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data to transform, row by row. Prefer CSR over CSC for sparse input (for speed), but CSC is required if the degree is 4 or higher. If the degree is less than 4 and the input format is CSC, it will be converted to CSR, have its polynomial features generated, then converted back to CSC. If the degree is 2 or 3, the method described in "Leveraging Sparsity to Speed Up Polynomial Feature Expansions of CSR Matrices Using K-Simplex Numbers" by Andrew Nystrom and John Hughes is used, which is much faster than the method used on CSC input. For this reason, a CSC input will be converted to CSR, and the output will be converted back to CSC prior to being returned, hence the preference of CSR. Returns ------- XP : {ndarray, sparse matrix} of shape (n_samples, NP) The matrix of features, where `NP` is the number of polynomial features generated from the combination of inputs. If a sparse matrix is provided, it will be converted into a sparse `csr_matrix`.
transform
python
scikit-learn/scikit-learn
sklearn/preprocessing/_polynomial.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_polynomial.py
BSD-3-Clause
def _get_base_knot_positions(X, n_knots=10, knots="uniform", sample_weight=None): """Calculate base knot positions. Base knots such that first knot <= feature <= last knot. For the B-spline construction with scipy.interpolate.BSpline, 2*degree knots beyond the base interval are added. Returns ------- knots : ndarray of shape (n_knots, n_features), dtype=np.float64 Knot positions (points) of base interval. """ if knots == "quantile": percentile_ranks = 100 * np.linspace( start=0, stop=1, num=n_knots, dtype=np.float64 ) if sample_weight is None: knots = np.percentile(X, percentile_ranks, axis=0) else: knots = np.array( [ _weighted_percentile(X, sample_weight, percentile_rank) for percentile_rank in percentile_ranks ] ) else: # knots == 'uniform': # Note that the variable `knots` has already been validated and # `else` is therefore safe. # Disregard observations with zero weight. mask = slice(None, None, 1) if sample_weight is None else sample_weight > 0 x_min = np.amin(X[mask], axis=0) x_max = np.amax(X[mask], axis=0) knots = np.linspace( start=x_min, stop=x_max, num=n_knots, endpoint=True, dtype=np.float64, ) return knots
Calculate base knot positions. Base knots such that first knot <= feature <= last knot. For the B-spline construction with scipy.interpolate.BSpline, 2*degree knots beyond the base interval are added. Returns ------- knots : ndarray of shape (n_knots, n_features), dtype=np.float64 Knot positions (points) of base interval.
_get_base_knot_positions
python
scikit-learn/scikit-learn
sklearn/preprocessing/_polynomial.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_polynomial.py
BSD-3-Clause
def get_feature_names_out(self, input_features=None): """Get output feature names for transformation. Parameters ---------- input_features : array-like of str or None, default=None Input features. - If `input_features` is `None`, then `feature_names_in_` is used as feature names in. If `feature_names_in_` is not defined, then the following input feature names are generated: `["x0", "x1", ..., "x(n_features_in_ - 1)"]`. - If `input_features` is an array-like, then `input_features` must match `feature_names_in_` if `feature_names_in_` is defined. Returns ------- feature_names_out : ndarray of str objects Transformed feature names. """ check_is_fitted(self, "n_features_in_") n_splines = self.bsplines_[0].c.shape[1] input_features = _check_feature_names_in(self, input_features) feature_names = [] for i in range(self.n_features_in_): for j in range(n_splines - 1 + self.include_bias): feature_names.append(f"{input_features[i]}_sp_{j}") return np.asarray(feature_names, dtype=object)
Get output feature names for transformation. Parameters ---------- input_features : array-like of str or None, default=None Input features. - If `input_features` is `None`, then `feature_names_in_` is used as feature names in. If `feature_names_in_` is not defined, then the following input feature names are generated: `["x0", "x1", ..., "x(n_features_in_ - 1)"]`. - If `input_features` is an array-like, then `input_features` must match `feature_names_in_` if `feature_names_in_` is defined. Returns ------- feature_names_out : ndarray of str objects Transformed feature names.
get_feature_names_out
python
scikit-learn/scikit-learn
sklearn/preprocessing/_polynomial.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_polynomial.py
BSD-3-Clause
def fit(self, X, y=None, sample_weight=None): """Compute knot positions of splines. Parameters ---------- X : array-like of shape (n_samples, n_features) The data. y : None Ignored. sample_weight : array-like of shape (n_samples,), default = None Individual weights for each sample. Used to calculate quantiles if `knots="quantile"`. For `knots="uniform"`, zero weighted observations are ignored for finding the min and max of `X`. Returns ------- self : object Fitted transformer. """ X = validate_data( self, X, reset=True, accept_sparse=False, ensure_min_samples=2, ensure_2d=True, ) if sample_weight is not None: sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype) _, n_features = X.shape if isinstance(self.knots, str): base_knots = self._get_base_knot_positions( X, n_knots=self.n_knots, knots=self.knots, sample_weight=sample_weight ) else: base_knots = check_array(self.knots, dtype=np.float64) if base_knots.shape[0] < 2: raise ValueError("Number of knots, knots.shape[0], must be >= 2.") elif base_knots.shape[1] != n_features: raise ValueError("knots.shape[1] == n_features is violated.") elif not np.all(np.diff(base_knots, axis=0) > 0): raise ValueError("knots must be sorted without duplicates.") # number of knots for base interval n_knots = base_knots.shape[0] if self.extrapolation == "periodic" and n_knots <= self.degree: raise ValueError( "Periodic splines require degree < n_knots. Got n_knots=" f"{n_knots} and degree={self.degree}." ) # number of splines basis functions if self.extrapolation != "periodic": n_splines = n_knots + self.degree - 1 else: # periodic splines have self.degree less degrees of freedom n_splines = n_knots - 1 degree = self.degree n_out = n_features * n_splines # We have to add degree number of knots below, and degree number knots # above the base knots in order to make the spline basis complete. if self.extrapolation == "periodic": # For periodic splines the spacing of the first / last degree knots # needs to be a continuation of the spacing of the last / first # base knots. period = base_knots[-1] - base_knots[0] knots = np.r_[ base_knots[-(degree + 1) : -1] - period, base_knots, base_knots[1 : (degree + 1)] + period, ] else: # Eilers & Marx in "Flexible smoothing with B-splines and # penalties" https://doi.org/10.1214/ss/1038425655 advice # against repeating first and last knot several times, which # would have inferior behaviour at boundaries if combined with # a penalty (hence P-Spline). We follow this advice even if our # splines are unpenalized. Meaning we do not: # knots = np.r_[ # np.tile(base_knots.min(axis=0), reps=[degree, 1]), # base_knots, # np.tile(base_knots.max(axis=0), reps=[degree, 1]) # ] # Instead, we reuse the distance of the 2 fist/last knots. dist_min = base_knots[1] - base_knots[0] dist_max = base_knots[-1] - base_knots[-2] knots = np.r_[ np.linspace( base_knots[0] - degree * dist_min, base_knots[0] - dist_min, num=degree, ), base_knots, np.linspace( base_knots[-1] + dist_max, base_knots[-1] + degree * dist_max, num=degree, ), ] # With a diagonal coefficient matrix, we get back the spline basis # elements, i.e. the design matrix of the spline. # Note, BSpline appreciates C-contiguous float64 arrays as c=coef. coef = np.eye(n_splines, dtype=np.float64) if self.extrapolation == "periodic": coef = np.concatenate((coef, coef[:degree, :])) extrapolate = self.extrapolation in ["periodic", "continue"] bsplines = [ BSpline.construct_fast( knots[:, i], coef, self.degree, extrapolate=extrapolate ) for i in range(n_features) ] self.bsplines_ = bsplines self.n_features_out_ = n_out - n_features * (1 - self.include_bias) return self
Compute knot positions of splines. Parameters ---------- X : array-like of shape (n_samples, n_features) The data. y : None Ignored. sample_weight : array-like of shape (n_samples,), default = None Individual weights for each sample. Used to calculate quantiles if `knots="quantile"`. For `knots="uniform"`, zero weighted observations are ignored for finding the min and max of `X`. Returns ------- self : object Fitted transformer.
fit
python
scikit-learn/scikit-learn
sklearn/preprocessing/_polynomial.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_polynomial.py
BSD-3-Clause
def transform(self, X): """Transform each feature data to B-splines. Parameters ---------- X : array-like of shape (n_samples, n_features) The data to transform. Returns ------- XBS : {ndarray, sparse matrix} of shape (n_samples, n_features * n_splines) The matrix of features, where n_splines is the number of bases elements of the B-splines, n_knots + degree - 1. """ check_is_fitted(self) X = validate_data(self, X, reset=False, accept_sparse=False, ensure_2d=True) n_samples, n_features = X.shape n_splines = self.bsplines_[0].c.shape[1] degree = self.degree # TODO: Remove this condition, once scipy 1.10 is the minimum version. # Only scipy => 1.10 supports design_matrix(.., extrapolate=..). # The default (implicit in scipy < 1.10) is extrapolate=False. scipy_1_10 = sp_version >= parse_version("1.10.0") # Note: self.bsplines_[0].extrapolate is True for extrapolation in # ["periodic", "continue"] if scipy_1_10: use_sparse = self.sparse_output kwargs_extrapolate = {"extrapolate": self.bsplines_[0].extrapolate} else: use_sparse = self.sparse_output and not self.bsplines_[0].extrapolate kwargs_extrapolate = dict() # Note that scipy BSpline returns float64 arrays and converts input # x=X[:, i] to c-contiguous float64. n_out = self.n_features_out_ + n_features * (1 - self.include_bias) if X.dtype in FLOAT_DTYPES: dtype = X.dtype else: dtype = np.float64 if use_sparse: output_list = [] else: XBS = np.zeros((n_samples, n_out), dtype=dtype, order=self.order) for i in range(n_features): spl = self.bsplines_[i] if self.extrapolation in ("continue", "error", "periodic"): if self.extrapolation == "periodic": # With periodic extrapolation we map x to the segment # [spl.t[k], spl.t[n]]. # This is equivalent to BSpline(.., extrapolate="periodic") # for scipy>=1.0.0. n = spl.t.size - spl.k - 1 # Assign to new array to avoid inplace operation x = spl.t[spl.k] + (X[:, i] - spl.t[spl.k]) % ( spl.t[n] - spl.t[spl.k] ) else: x = X[:, i] if use_sparse: XBS_sparse = BSpline.design_matrix( x, spl.t, spl.k, **kwargs_extrapolate ) if self.extrapolation == "periodic": # See the construction of coef in fit. We need to add the last # degree spline basis function to the first degree ones and # then drop the last ones. # Note: See comment about SparseEfficiencyWarning below. XBS_sparse = XBS_sparse.tolil() XBS_sparse[:, :degree] += XBS_sparse[:, -degree:] XBS_sparse = XBS_sparse[:, :-degree] else: XBS[:, (i * n_splines) : ((i + 1) * n_splines)] = spl(x) else: # extrapolation in ("constant", "linear") xmin, xmax = spl.t[degree], spl.t[-degree - 1] # spline values at boundaries f_min, f_max = spl(xmin), spl(xmax) mask = (xmin <= X[:, i]) & (X[:, i] <= xmax) if use_sparse: mask_inv = ~mask x = X[:, i].copy() # Set some arbitrary values outside boundary that will be reassigned # later. x[mask_inv] = spl.t[self.degree] XBS_sparse = BSpline.design_matrix(x, spl.t, spl.k) # Note: Without converting to lil_matrix we would get: # scipy.sparse._base.SparseEfficiencyWarning: Changing the sparsity # structure of a csr_matrix is expensive. lil_matrix is more # efficient. if np.any(mask_inv): XBS_sparse = XBS_sparse.tolil() XBS_sparse[mask_inv, :] = 0 else: XBS[mask, (i * n_splines) : ((i + 1) * n_splines)] = spl(X[mask, i]) # Note for extrapolation: # 'continue' is already returned as is by scipy BSplines if self.extrapolation == "error": # BSpline with extrapolate=False does not raise an error, but # outputs np.nan. if (use_sparse and np.any(np.isnan(XBS_sparse.data))) or ( not use_sparse and np.any( np.isnan(XBS[:, (i * n_splines) : ((i + 1) * n_splines)]) ) ): raise ValueError( "X contains values beyond the limits of the knots." ) elif self.extrapolation == "constant": # Set all values beyond xmin and xmax to the value of the # spline basis functions at those two positions. # Only the first degree and last degree number of splines # have non-zero values at the boundaries. mask = X[:, i] < xmin if np.any(mask): if use_sparse: # Note: See comment about SparseEfficiencyWarning above. XBS_sparse = XBS_sparse.tolil() XBS_sparse[mask, :degree] = f_min[:degree] else: XBS[mask, (i * n_splines) : (i * n_splines + degree)] = f_min[ :degree ] mask = X[:, i] > xmax if np.any(mask): if use_sparse: # Note: See comment about SparseEfficiencyWarning above. XBS_sparse = XBS_sparse.tolil() XBS_sparse[mask, -degree:] = f_max[-degree:] else: XBS[ mask, ((i + 1) * n_splines - degree) : ((i + 1) * n_splines), ] = f_max[-degree:] elif self.extrapolation == "linear": # Continue the degree first and degree last spline bases # linearly beyond the boundaries, with slope = derivative at # the boundary. # Note that all others have derivative = value = 0 at the # boundaries. # spline derivatives = slopes at boundaries fp_min, fp_max = spl(xmin, nu=1), spl(xmax, nu=1) # Compute the linear continuation. if degree <= 1: # For degree=1, the derivative of 2nd spline is not zero at # boundary. For degree=0 it is the same as 'constant'. degree += 1 for j in range(degree): mask = X[:, i] < xmin if np.any(mask): linear_extr = f_min[j] + (X[mask, i] - xmin) * fp_min[j] if use_sparse: # Note: See comment about SparseEfficiencyWarning above. XBS_sparse = XBS_sparse.tolil() XBS_sparse[mask, j] = linear_extr else: XBS[mask, i * n_splines + j] = linear_extr mask = X[:, i] > xmax if np.any(mask): k = n_splines - 1 - j linear_extr = f_max[k] + (X[mask, i] - xmax) * fp_max[k] if use_sparse: # Note: See comment about SparseEfficiencyWarning above. XBS_sparse = XBS_sparse.tolil() XBS_sparse[mask, k : k + 1] = linear_extr[:, None] else: XBS[mask, i * n_splines + k] = linear_extr if use_sparse: XBS_sparse = XBS_sparse.tocsr() output_list.append(XBS_sparse) if use_sparse: # TODO: Remove this conditional error when the minimum supported version of # SciPy is 1.9.2 # `scipy.sparse.hstack` breaks in scipy<1.9.2 # when `n_features_out_ > max_int32` max_int32 = np.iinfo(np.int32).max all_int32 = True for mat in output_list: all_int32 &= mat.indices.dtype == np.int32 if ( sp_version < parse_version("1.9.2") and self.n_features_out_ > max_int32 and all_int32 ): raise ValueError( "In scipy versions `<1.9.2`, the function `scipy.sparse.hstack`" " produces negative columns when:\n1. The output shape contains" " `n_cols` too large to be represented by a 32bit signed" " integer.\n. All sub-matrices to be stacked have indices of" " dtype `np.int32`.\nTo avoid this error, either use a version" " of scipy `>=1.9.2` or alter the `SplineTransformer`" " transformer to produce fewer than 2^31 output features" ) XBS = sparse.hstack(output_list, format="csr") elif self.sparse_output: # TODO: Remove ones scipy 1.10 is the minimum version. See comments above. XBS = sparse.csr_matrix(XBS) if self.include_bias: return XBS else: # We throw away one spline basis per feature. # We chose the last one. indices = [j for j in range(XBS.shape[1]) if (j + 1) % n_splines != 0] return XBS[:, indices]
Transform each feature data to B-splines. Parameters ---------- X : array-like of shape (n_samples, n_features) The data to transform. Returns ------- XBS : {ndarray, sparse matrix} of shape (n_samples, n_features * n_splines) The matrix of features, where n_splines is the number of bases elements of the B-splines, n_knots + degree - 1.
transform
python
scikit-learn/scikit-learn
sklearn/preprocessing/_polynomial.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_polynomial.py
BSD-3-Clause
def fit_transform(self, X, y): """Fit :class:`TargetEncoder` and transform X with the target encoding. .. note:: `fit(X, y).transform(X)` does not equal `fit_transform(X, y)` because a :term:`cross fitting` scheme is used in `fit_transform` for encoding. See the :ref:`User Guide <target_encoder>`. for details. Parameters ---------- X : array-like of shape (n_samples, n_features) The data to determine the categories of each feature. y : array-like of shape (n_samples,) The target data used to encode the categories. Returns ------- X_trans : ndarray of shape (n_samples, n_features) or \ (n_samples, (n_features * n_classes)) Transformed input. """ from ..model_selection import KFold, StratifiedKFold # avoid circular import X_ordinal, X_known_mask, y_encoded, n_categories = self._fit_encodings_all(X, y) # The cv splitter is voluntarily restricted to *KFold to enforce non # overlapping validation folds, otherwise the fit_transform output will # not be well-specified. if self.target_type_ == "continuous": cv = KFold(self.cv, shuffle=self.shuffle, random_state=self.random_state) else: cv = StratifiedKFold( self.cv, shuffle=self.shuffle, random_state=self.random_state ) # If 'multiclass' multiply axis=1 by num classes else keep shape the same if self.target_type_ == "multiclass": X_out = np.empty( (X_ordinal.shape[0], X_ordinal.shape[1] * len(self.classes_)), dtype=np.float64, ) else: X_out = np.empty_like(X_ordinal, dtype=np.float64) for train_idx, test_idx in cv.split(X, y): X_train, y_train = X_ordinal[train_idx, :], y_encoded[train_idx] y_train_mean = np.mean(y_train, axis=0) if self.target_type_ == "multiclass": encodings = self._fit_encoding_multiclass( X_train, y_train, n_categories, y_train_mean, ) else: encodings = self._fit_encoding_binary_or_continuous( X_train, y_train, n_categories, y_train_mean, ) self._transform_X_ordinal( X_out, X_ordinal, ~X_known_mask, test_idx, encodings, y_train_mean, ) return X_out
Fit :class:`TargetEncoder` and transform X with the target encoding. .. note:: `fit(X, y).transform(X)` does not equal `fit_transform(X, y)` because a :term:`cross fitting` scheme is used in `fit_transform` for encoding. See the :ref:`User Guide <target_encoder>`. for details. Parameters ---------- X : array-like of shape (n_samples, n_features) The data to determine the categories of each feature. y : array-like of shape (n_samples,) The target data used to encode the categories. Returns ------- X_trans : ndarray of shape (n_samples, n_features) or (n_samples, (n_features * n_classes)) Transformed input.
fit_transform
python
scikit-learn/scikit-learn
sklearn/preprocessing/_target_encoder.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_target_encoder.py
BSD-3-Clause
def transform(self, X): """Transform X with the target encoding. .. note:: `fit(X, y).transform(X)` does not equal `fit_transform(X, y)` because a :term:`cross fitting` scheme is used in `fit_transform` for encoding. See the :ref:`User Guide <target_encoder>`. for details. Parameters ---------- X : array-like of shape (n_samples, n_features) The data to determine the categories of each feature. Returns ------- X_trans : ndarray of shape (n_samples, n_features) or \ (n_samples, (n_features * n_classes)) Transformed input. """ X_ordinal, X_known_mask = self._transform( X, handle_unknown="ignore", ensure_all_finite="allow-nan" ) # If 'multiclass' multiply axis=1 by num of classes else keep shape the same if self.target_type_ == "multiclass": X_out = np.empty( (X_ordinal.shape[0], X_ordinal.shape[1] * len(self.classes_)), dtype=np.float64, ) else: X_out = np.empty_like(X_ordinal, dtype=np.float64) self._transform_X_ordinal( X_out, X_ordinal, ~X_known_mask, slice(None), self.encodings_, self.target_mean_, ) return X_out
Transform X with the target encoding. .. note:: `fit(X, y).transform(X)` does not equal `fit_transform(X, y)` because a :term:`cross fitting` scheme is used in `fit_transform` for encoding. See the :ref:`User Guide <target_encoder>`. for details. Parameters ---------- X : array-like of shape (n_samples, n_features) The data to determine the categories of each feature. Returns ------- X_trans : ndarray of shape (n_samples, n_features) or (n_samples, (n_features * n_classes)) Transformed input.
transform
python
scikit-learn/scikit-learn
sklearn/preprocessing/_target_encoder.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_target_encoder.py
BSD-3-Clause
def _fit_encodings_all(self, X, y): """Fit a target encoding with all the data.""" # avoid circular import from ..preprocessing import ( LabelBinarizer, LabelEncoder, ) check_consistent_length(X, y) self._fit(X, handle_unknown="ignore", ensure_all_finite="allow-nan") if self.target_type == "auto": accepted_target_types = ("binary", "multiclass", "continuous") inferred_type_of_target = type_of_target(y, input_name="y") if inferred_type_of_target not in accepted_target_types: raise ValueError( "Unknown label type: Target type was inferred to be " f"{inferred_type_of_target!r}. Only {accepted_target_types} are " "supported." ) self.target_type_ = inferred_type_of_target else: self.target_type_ = self.target_type self.classes_ = None if self.target_type_ == "binary": label_encoder = LabelEncoder() y = label_encoder.fit_transform(y) self.classes_ = label_encoder.classes_ elif self.target_type_ == "multiclass": label_binarizer = LabelBinarizer() y = label_binarizer.fit_transform(y) self.classes_ = label_binarizer.classes_ else: # continuous y = _check_y(y, y_numeric=True, estimator=self) self.target_mean_ = np.mean(y, axis=0) X_ordinal, X_known_mask = self._transform( X, handle_unknown="ignore", ensure_all_finite="allow-nan" ) n_categories = np.fromiter( (len(category_for_feature) for category_for_feature in self.categories_), dtype=np.int64, count=len(self.categories_), ) if self.target_type_ == "multiclass": encodings = self._fit_encoding_multiclass( X_ordinal, y, n_categories, self.target_mean_, ) else: encodings = self._fit_encoding_binary_or_continuous( X_ordinal, y, n_categories, self.target_mean_, ) self.encodings_ = encodings return X_ordinal, X_known_mask, y, n_categories
Fit a target encoding with all the data.
_fit_encodings_all
python
scikit-learn/scikit-learn
sklearn/preprocessing/_target_encoder.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_target_encoder.py
BSD-3-Clause
def _fit_encoding_multiclass(self, X_ordinal, y, n_categories, target_mean): """Learn multiclass encodings. Learn encodings for each class (c) then reorder encodings such that the same features (f) are grouped together. `reorder_index` enables converting from: f0_c0, f1_c0, f0_c1, f1_c1, f0_c2, f1_c2 to: f0_c0, f0_c1, f0_c2, f1_c0, f1_c1, f1_c2 """ n_features = self.n_features_in_ n_classes = len(self.classes_) encodings = [] for i in range(n_classes): y_class = y[:, i] encoding = self._fit_encoding_binary_or_continuous( X_ordinal, y_class, n_categories, target_mean[i], ) encodings.extend(encoding) reorder_index = ( idx for start in range(n_features) for idx in range(start, (n_classes * n_features), n_features) ) return [encodings[idx] for idx in reorder_index]
Learn multiclass encodings. Learn encodings for each class (c) then reorder encodings such that the same features (f) are grouped together. `reorder_index` enables converting from: f0_c0, f1_c0, f0_c1, f1_c1, f0_c2, f1_c2 to: f0_c0, f0_c1, f0_c2, f1_c0, f1_c1, f1_c2
_fit_encoding_multiclass
python
scikit-learn/scikit-learn
sklearn/preprocessing/_target_encoder.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_target_encoder.py
BSD-3-Clause
def _transform_X_ordinal( self, X_out, X_ordinal, X_unknown_mask, row_indices, encodings, target_mean, ): """Transform X_ordinal using encodings. In the multiclass case, `X_ordinal` and `X_unknown_mask` have column (axis=1) size `n_features`, while `encodings` has length of size `n_features * n_classes`. `feat_idx` deals with this by repeating feature indices by `n_classes` E.g., for 3 features, 2 classes: 0,0,1,1,2,2 Additionally, `target_mean` is of shape (`n_classes`,) so `mean_idx` cycles through 0 to `n_classes` - 1, `n_features` times. """ if self.target_type_ == "multiclass": n_classes = len(self.classes_) for e_idx, encoding in enumerate(encodings): # Repeat feature indices by n_classes feat_idx = e_idx // n_classes # Cycle through each class mean_idx = e_idx % n_classes X_out[row_indices, e_idx] = encoding[X_ordinal[row_indices, feat_idx]] X_out[X_unknown_mask[:, feat_idx], e_idx] = target_mean[mean_idx] else: for e_idx, encoding in enumerate(encodings): X_out[row_indices, e_idx] = encoding[X_ordinal[row_indices, e_idx]] X_out[X_unknown_mask[:, e_idx], e_idx] = target_mean
Transform X_ordinal using encodings. In the multiclass case, `X_ordinal` and `X_unknown_mask` have column (axis=1) size `n_features`, while `encodings` has length of size `n_features * n_classes`. `feat_idx` deals with this by repeating feature indices by `n_classes` E.g., for 3 features, 2 classes: 0,0,1,1,2,2 Additionally, `target_mean` is of shape (`n_classes`,) so `mean_idx` cycles through 0 to `n_classes` - 1, `n_features` times.
_transform_X_ordinal
python
scikit-learn/scikit-learn
sklearn/preprocessing/_target_encoder.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_target_encoder.py
BSD-3-Clause
def get_feature_names_out(self, input_features=None): """Get output feature names for transformation. Parameters ---------- input_features : array-like of str or None, default=None Not used, present here for API consistency by convention. Returns ------- feature_names_out : ndarray of str objects Transformed feature names. `feature_names_in_` is used unless it is not defined, in which case the following input feature names are generated: `["x0", "x1", ..., "x(n_features_in_ - 1)"]`. When `type_of_target_` is "multiclass" the names are of the format '<feature_name>_<class_name>'. """ check_is_fitted(self, "n_features_in_") feature_names = _check_feature_names_in(self, input_features) if self.target_type_ == "multiclass": feature_names = [ f"{feature_name}_{class_name}" for feature_name in feature_names for class_name in self.classes_ ] return np.asarray(feature_names, dtype=object) else: return feature_names
Get output feature names for transformation. Parameters ---------- input_features : array-like of str or None, default=None Not used, present here for API consistency by convention. Returns ------- feature_names_out : ndarray of str objects Transformed feature names. `feature_names_in_` is used unless it is not defined, in which case the following input feature names are generated: `["x0", "x1", ..., "x(n_features_in_ - 1)"]`. When `type_of_target_` is "multiclass" the names are of the format '<feature_name>_<class_name>'.
get_feature_names_out
python
scikit-learn/scikit-learn
sklearn/preprocessing/_target_encoder.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_target_encoder.py
BSD-3-Clause
def test_quantile_transform_subsampling_disabled(): """Check the behaviour of `QuantileTransformer` when `subsample=None`.""" X = np.random.RandomState(0).normal(size=(200, 1)) n_quantiles = 5 transformer = QuantileTransformer(n_quantiles=n_quantiles, subsample=None).fit(X) expected_references = np.linspace(0, 1, n_quantiles) assert_allclose(transformer.references_, expected_references) expected_quantiles = np.quantile(X.ravel(), expected_references) assert_allclose(transformer.quantiles_.ravel(), expected_quantiles)
Check the behaviour of `QuantileTransformer` when `subsample=None`.
test_quantile_transform_subsampling_disabled
python
scikit-learn/scikit-learn
sklearn/preprocessing/tests/test_data.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_data.py
BSD-3-Clause
def test_power_transformer_box_cox_raise_all_nans_col(): """Check that box-cox raises informative when a column contains all nans. Non-regression test for gh-26303 """ X = rng.random_sample((4, 5)) X[:, 0] = np.nan err_msg = "Column must not be all nan." pt = PowerTransformer(method="box-cox") with pytest.raises(ValueError, match=err_msg): pt.fit_transform(X)
Check that box-cox raises informative when a column contains all nans. Non-regression test for gh-26303
test_power_transformer_box_cox_raise_all_nans_col
python
scikit-learn/scikit-learn
sklearn/preprocessing/tests/test_data.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_data.py
BSD-3-Clause
def test_standard_scaler_raise_error_for_1d_input(): """Check that `inverse_transform` from `StandardScaler` raises an error with 1D array. Non-regression test for: https://github.com/scikit-learn/scikit-learn/issues/19518 """ scaler = StandardScaler().fit(X_2d) err_msg = "Expected 2D array, got 1D array instead" with pytest.raises(ValueError, match=err_msg): scaler.inverse_transform(X_2d[:, 0])
Check that `inverse_transform` from `StandardScaler` raises an error with 1D array. Non-regression test for: https://github.com/scikit-learn/scikit-learn/issues/19518
test_standard_scaler_raise_error_for_1d_input
python
scikit-learn/scikit-learn
sklearn/preprocessing/tests/test_data.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_data.py
BSD-3-Clause
def test_power_transformer_significantly_non_gaussian(): """Check that significantly non-Gaussian data before transforms correctly. For some explored lambdas, the transformed data may be constant and will be rejected. Non-regression test for https://github.com/scikit-learn/scikit-learn/issues/14959 """ X_non_gaussian = 1e6 * np.array( [0.6, 2.0, 3.0, 4.0] * 4 + [11, 12, 12, 16, 17, 20, 85, 90], dtype=np.float64 ).reshape(-1, 1) pt = PowerTransformer() with warnings.catch_warnings(): warnings.simplefilter("error", RuntimeWarning) X_trans = pt.fit_transform(X_non_gaussian) assert not np.any(np.isnan(X_trans)) assert X_trans.mean() == pytest.approx(0.0) assert X_trans.std() == pytest.approx(1.0) assert X_trans.min() > -2 assert X_trans.max() < 2
Check that significantly non-Gaussian data before transforms correctly. For some explored lambdas, the transformed data may be constant and will be rejected. Non-regression test for https://github.com/scikit-learn/scikit-learn/issues/14959
test_power_transformer_significantly_non_gaussian
python
scikit-learn/scikit-learn
sklearn/preprocessing/tests/test_data.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_data.py
BSD-3-Clause
def test_one_to_one_features(Transformer): """Check one-to-one transformers give correct feature names.""" tr = Transformer().fit(iris.data) names_out = tr.get_feature_names_out(iris.feature_names) assert_array_equal(names_out, iris.feature_names)
Check one-to-one transformers give correct feature names.
test_one_to_one_features
python
scikit-learn/scikit-learn
sklearn/preprocessing/tests/test_data.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_data.py
BSD-3-Clause
def test_one_to_one_features_pandas(Transformer): """Check one-to-one transformers give correct feature names.""" pd = pytest.importorskip("pandas") df = pd.DataFrame(iris.data, columns=iris.feature_names) tr = Transformer().fit(df) names_out_df_default = tr.get_feature_names_out() assert_array_equal(names_out_df_default, iris.feature_names) names_out_df_valid_in = tr.get_feature_names_out(iris.feature_names) assert_array_equal(names_out_df_valid_in, iris.feature_names) msg = re.escape("input_features is not equal to feature_names_in_") with pytest.raises(ValueError, match=msg): invalid_names = list("abcd") tr.get_feature_names_out(invalid_names)
Check one-to-one transformers give correct feature names.
test_one_to_one_features_pandas
python
scikit-learn/scikit-learn
sklearn/preprocessing/tests/test_data.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_data.py
BSD-3-Clause
def test_power_transformer_constant_feature(standardize): """Check that PowerTransfomer leaves constant features unchanged.""" X = [[-2, 0, 2], [-2, 0, 2], [-2, 0, 2]] pt = PowerTransformer(method="yeo-johnson", standardize=standardize).fit(X) assert_allclose(pt.lambdas_, [1, 1, 1]) Xft = pt.fit_transform(X) Xt = pt.transform(X) for Xt_ in [Xft, Xt]: if standardize: assert_allclose(Xt_, np.zeros_like(X)) else: assert_allclose(Xt_, X)
Check that PowerTransfomer leaves constant features unchanged.
test_power_transformer_constant_feature
python
scikit-learn/scikit-learn
sklearn/preprocessing/tests/test_data.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_data.py
BSD-3-Clause
def test_power_transformer_no_warnings(): """Verify that PowerTransformer operates without raising any warnings on valid data. This test addresses numerical issues with floating point numbers (mostly overflows) with the Yeo-Johnson transform, see https://github.com/scikit-learn/scikit-learn/issues/23319#issuecomment-1464933635 """ x = np.array( [ 2003.0, 1950.0, 1997.0, 2000.0, 2009.0, 2009.0, 1980.0, 1999.0, 2007.0, 1991.0, ] ) def _test_no_warnings(data): """Internal helper to test for unexpected warnings.""" with warnings.catch_warnings(record=True) as caught_warnings: warnings.simplefilter("always") # Ensure all warnings are captured PowerTransformer(method="yeo-johnson", standardize=True).fit_transform(data) assert not caught_warnings, "Unexpected warnings were raised:\n" + "\n".join( str(w.message) for w in caught_warnings ) # Full dataset: Should not trigger overflow in variance calculation. _test_no_warnings(x.reshape(-1, 1)) # Subset of data: Should not trigger overflow in power calculation. _test_no_warnings(x[:5].reshape(-1, 1))
Verify that PowerTransformer operates without raising any warnings on valid data. This test addresses numerical issues with floating point numbers (mostly overflows) with the Yeo-Johnson transform, see https://github.com/scikit-learn/scikit-learn/issues/23319#issuecomment-1464933635
test_power_transformer_no_warnings
python
scikit-learn/scikit-learn
sklearn/preprocessing/tests/test_data.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_data.py
BSD-3-Clause
def _test_no_warnings(data): """Internal helper to test for unexpected warnings.""" with warnings.catch_warnings(record=True) as caught_warnings: warnings.simplefilter("always") # Ensure all warnings are captured PowerTransformer(method="yeo-johnson", standardize=True).fit_transform(data) assert not caught_warnings, "Unexpected warnings were raised:\n" + "\n".join( str(w.message) for w in caught_warnings )
Internal helper to test for unexpected warnings.
_test_no_warnings
python
scikit-learn/scikit-learn
sklearn/preprocessing/tests/test_data.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_data.py
BSD-3-Clause
def test_yeojohnson_for_different_scipy_version(): """Check that the results are consistent across different SciPy versions.""" pt = PowerTransformer(method="yeo-johnson").fit(X_1col) pt.lambdas_[0] == pytest.approx(0.99546157, rel=1e-7)
Check that the results are consistent across different SciPy versions.
test_yeojohnson_for_different_scipy_version
python
scikit-learn/scikit-learn
sklearn/preprocessing/tests/test_data.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_data.py
BSD-3-Clause
def test_kbinsdiscretizer_effect_sample_weight(): """Check the impact of `sample_weight` one computed quantiles.""" X = np.array([[-2], [-1], [1], [3], [500], [1000]]) # add a large number of bins such that each sample with a non-null weight # will be used as bin edge est = KBinsDiscretizer( n_bins=10, encode="ordinal", strategy="quantile", quantile_method="averaged_inverted_cdf", ) est.fit(X, sample_weight=[1, 1, 1, 1, 0, 0]) assert_allclose(est.bin_edges_[0], [-2, -1, 0, 1, 3]) assert_allclose(est.transform(X), [[0.0], [1.0], [3.0], [3.0], [3.0], [3.0]])
Check the impact of `sample_weight` one computed quantiles.
test_kbinsdiscretizer_effect_sample_weight
python
scikit-learn/scikit-learn
sklearn/preprocessing/tests/test_discretization.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_discretization.py
BSD-3-Clause
def test_kbinsdiscretizer_no_mutating_sample_weight(strategy): """Make sure that `sample_weight` is not changed in place.""" if strategy == "quantile": est = KBinsDiscretizer( n_bins=3, encode="ordinal", strategy=strategy, quantile_method="averaged_inverted_cdf", ) else: est = KBinsDiscretizer(n_bins=3, encode="ordinal", strategy=strategy) sample_weight = np.array([1, 3, 1, 2], dtype=np.float64) sample_weight_copy = np.copy(sample_weight) est.fit(X, sample_weight=sample_weight) assert_allclose(sample_weight, sample_weight_copy)
Make sure that `sample_weight` is not changed in place.
test_kbinsdiscretizer_no_mutating_sample_weight
python
scikit-learn/scikit-learn
sklearn/preprocessing/tests/test_discretization.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_discretization.py
BSD-3-Clause
def test_kbinsdiscrtizer_get_feature_names_out(encode, expected_names): """Check get_feature_names_out for different settings. Non-regression test for #22731 """ X = [[-2, 1, -4], [-1, 2, -3], [0, 3, -2], [1, 4, -1]] kbd = KBinsDiscretizer( n_bins=4, encode=encode, quantile_method="averaged_inverted_cdf" ).fit(X) Xt = kbd.transform(X) input_features = [f"feat{i}" for i in range(3)] output_names = kbd.get_feature_names_out(input_features) assert Xt.shape[1] == output_names.shape[0] assert_array_equal(output_names, expected_names)
Check get_feature_names_out for different settings. Non-regression test for #22731
test_kbinsdiscrtizer_get_feature_names_out
python
scikit-learn/scikit-learn
sklearn/preprocessing/tests/test_discretization.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_discretization.py
BSD-3-Clause
def test_one_hot_encoder_custom_feature_name_combiner(): """Check the behaviour of `feature_name_combiner` as a callable.""" def name_combiner(feature, category): return feature + "_" + repr(category) enc = OneHotEncoder(feature_name_combiner=name_combiner) X = np.array([["None", None]], dtype=object).T enc.fit(X) feature_names = enc.get_feature_names_out() assert_array_equal(["x0_'None'", "x0_None"], feature_names) feature_names = enc.get_feature_names_out(input_features=["a"]) assert_array_equal(["a_'None'", "a_None"], feature_names) def wrong_combiner(feature, category): # we should be returning a Python string return 0 enc = OneHotEncoder(feature_name_combiner=wrong_combiner).fit(X) err_msg = ( "When `feature_name_combiner` is a callable, it should return a Python string." ) with pytest.raises(TypeError, match=err_msg): enc.get_feature_names_out()
Check the behaviour of `feature_name_combiner` as a callable.
test_one_hot_encoder_custom_feature_name_combiner
python
scikit-learn/scikit-learn
sklearn/preprocessing/tests/test_encoders.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_encoders.py
BSD-3-Clause
def test_one_hot_encoder_inverse_transform_raise_error_with_unknown( X, X_trans, sparse_ ): """Check that `inverse_transform` raise an error with unknown samples, no dropped feature, and `handle_unknow="error`. Non-regression test for: https://github.com/scikit-learn/scikit-learn/issues/14934 """ enc = OneHotEncoder(sparse_output=sparse_).fit(X) msg = ( r"Samples \[(\d )*\d\] can not be inverted when drop=None and " r"handle_unknown='error' because they contain all zeros" ) if sparse_: # emulate sparse data transform by a one-hot encoder sparse. X_trans = _convert_container(X_trans, "sparse") with pytest.raises(ValueError, match=msg): enc.inverse_transform(X_trans)
Check that `inverse_transform` raise an error with unknown samples, no dropped feature, and `handle_unknow="error`. Non-regression test for: https://github.com/scikit-learn/scikit-learn/issues/14934
test_one_hot_encoder_inverse_transform_raise_error_with_unknown
python
scikit-learn/scikit-learn
sklearn/preprocessing/tests/test_encoders.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_encoders.py
BSD-3-Clause
def test_encoder_nan_ending_specified_categories(Encoder): """Test encoder for specified categories that nan is at the end. Non-regression test for: https://github.com/scikit-learn/scikit-learn/issues/27088 """ cats = [np.array([0, np.nan, 1])] enc = Encoder(categories=cats) X = np.array([[0, 1]], dtype=object).T with pytest.raises(ValueError, match="Nan should be the last element"): enc.fit(X)
Test encoder for specified categories that nan is at the end. Non-regression test for: https://github.com/scikit-learn/scikit-learn/issues/27088
test_encoder_nan_ending_specified_categories
python
scikit-learn/scikit-learn
sklearn/preprocessing/tests/test_encoders.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_encoders.py
BSD-3-Clause
def test_ohe_infrequent_two_levels(kwargs, categories): """Test that different parameters for combine 'a', 'c', and 'd' into the infrequent category works as expected.""" X_train = np.array([["a"] * 5 + ["b"] * 20 + ["c"] * 10 + ["d"] * 3]).T ohe = OneHotEncoder( categories=categories, handle_unknown="infrequent_if_exist", sparse_output=False, **kwargs, ).fit(X_train) assert_array_equal(ohe.infrequent_categories_, [["a", "c", "d"]]) X_test = [["b"], ["a"], ["c"], ["d"], ["e"]] expected = np.array([[1, 0], [0, 1], [0, 1], [0, 1], [0, 1]]) X_trans = ohe.transform(X_test) assert_allclose(expected, X_trans) expected_inv = [[col] for col in ["b"] + ["infrequent_sklearn"] * 4] X_inv = ohe.inverse_transform(X_trans) assert_array_equal(expected_inv, X_inv) feature_names = ohe.get_feature_names_out() assert_array_equal(["x0_b", "x0_infrequent_sklearn"], feature_names)
Test that different parameters for combine 'a', 'c', and 'd' into the infrequent category works as expected.
test_ohe_infrequent_two_levels
python
scikit-learn/scikit-learn
sklearn/preprocessing/tests/test_encoders.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_encoders.py
BSD-3-Clause
def test_ohe_infrequent_two_levels_drop_frequent(drop): """Test two levels and dropping the frequent category.""" X_train = np.array([["a"] * 5 + ["b"] * 20 + ["c"] * 10 + ["d"] * 3]).T ohe = OneHotEncoder( handle_unknown="infrequent_if_exist", sparse_output=False, max_categories=2, drop=drop, ).fit(X_train) assert ohe.categories_[0][ohe.drop_idx_[0]] == "b" X_test = np.array([["b"], ["c"]]) X_trans = ohe.transform(X_test) assert_allclose([[0], [1]], X_trans) feature_names = ohe.get_feature_names_out() assert_array_equal(["x0_infrequent_sklearn"], feature_names) X_inverse = ohe.inverse_transform(X_trans) assert_array_equal([["b"], ["infrequent_sklearn"]], X_inverse)
Test two levels and dropping the frequent category.
test_ohe_infrequent_two_levels_drop_frequent
python
scikit-learn/scikit-learn
sklearn/preprocessing/tests/test_encoders.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_encoders.py
BSD-3-Clause
def test_ohe_infrequent_two_levels_drop_infrequent_errors(drop): """Test two levels and dropping any infrequent category removes the whole infrequent category.""" X_train = np.array([["a"] * 5 + ["b"] * 20 + ["c"] * 10 + ["d"] * 3]).T ohe = OneHotEncoder( handle_unknown="infrequent_if_exist", sparse_output=False, max_categories=2, drop=drop, ) msg = f"Unable to drop category {drop[0]!r} from feature 0 because it is infrequent" with pytest.raises(ValueError, match=msg): ohe.fit(X_train)
Test two levels and dropping any infrequent category removes the whole infrequent category.
test_ohe_infrequent_two_levels_drop_infrequent_errors
python
scikit-learn/scikit-learn
sklearn/preprocessing/tests/test_encoders.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_encoders.py
BSD-3-Clause
def test_ohe_infrequent_three_levels(kwargs): """Test that different parameters for combing 'a', and 'd' into the infrequent category works as expected.""" X_train = np.array([["a"] * 5 + ["b"] * 20 + ["c"] * 10 + ["d"] * 3]).T ohe = OneHotEncoder( handle_unknown="infrequent_if_exist", sparse_output=False, **kwargs ).fit(X_train) assert_array_equal(ohe.infrequent_categories_, [["a", "d"]]) X_test = [["b"], ["a"], ["c"], ["d"], ["e"]] expected = np.array([[1, 0, 0], [0, 0, 1], [0, 1, 0], [0, 0, 1], [0, 0, 1]]) X_trans = ohe.transform(X_test) assert_allclose(expected, X_trans) expected_inv = [ ["b"], ["infrequent_sklearn"], ["c"], ["infrequent_sklearn"], ["infrequent_sklearn"], ] X_inv = ohe.inverse_transform(X_trans) assert_array_equal(expected_inv, X_inv) feature_names = ohe.get_feature_names_out() assert_array_equal(["x0_b", "x0_c", "x0_infrequent_sklearn"], feature_names)
Test that different parameters for combing 'a', and 'd' into the infrequent category works as expected.
test_ohe_infrequent_three_levels
python
scikit-learn/scikit-learn
sklearn/preprocessing/tests/test_encoders.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_encoders.py
BSD-3-Clause
def test_ohe_infrequent_three_levels_drop_frequent(drop): """Test three levels and dropping the frequent category.""" X_train = np.array([["a"] * 5 + ["b"] * 20 + ["c"] * 10 + ["d"] * 3]).T ohe = OneHotEncoder( handle_unknown="infrequent_if_exist", sparse_output=False, max_categories=3, drop=drop, ).fit(X_train) X_test = np.array([["b"], ["c"], ["d"]]) assert_allclose([[0, 0], [1, 0], [0, 1]], ohe.transform(X_test)) # Check handle_unknown="ignore" ohe.set_params(handle_unknown="ignore").fit(X_train) msg = "Found unknown categories" with pytest.warns(UserWarning, match=msg): X_trans = ohe.transform([["b"], ["e"]]) assert_allclose([[0, 0], [0, 0]], X_trans)
Test three levels and dropping the frequent category.
test_ohe_infrequent_three_levels_drop_frequent
python
scikit-learn/scikit-learn
sklearn/preprocessing/tests/test_encoders.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_encoders.py
BSD-3-Clause
def test_ohe_infrequent_three_levels_drop_infrequent_errors(drop): """Test three levels and dropping the infrequent category.""" X_train = np.array([["a"] * 5 + ["b"] * 20 + ["c"] * 10 + ["d"] * 3]).T ohe = OneHotEncoder( handle_unknown="infrequent_if_exist", sparse_output=False, max_categories=3, drop=drop, ) msg = f"Unable to drop category {drop[0]!r} from feature 0 because it is infrequent" with pytest.raises(ValueError, match=msg): ohe.fit(X_train)
Test three levels and dropping the infrequent category.
test_ohe_infrequent_three_levels_drop_infrequent_errors
python
scikit-learn/scikit-learn
sklearn/preprocessing/tests/test_encoders.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_encoders.py
BSD-3-Clause
def test_ohe_infrequent_handle_unknown_error(): """Test that different parameters for combining 'a', and 'd' into the infrequent category works as expected.""" X_train = np.array([["a"] * 5 + ["b"] * 20 + ["c"] * 10 + ["d"] * 3]).T ohe = OneHotEncoder( handle_unknown="error", sparse_output=False, max_categories=3 ).fit(X_train) assert_array_equal(ohe.infrequent_categories_, [["a", "d"]]) # all categories are known X_test = [["b"], ["a"], ["c"], ["d"]] expected = np.array([[1, 0, 0], [0, 0, 1], [0, 1, 0], [0, 0, 1]]) X_trans = ohe.transform(X_test) assert_allclose(expected, X_trans) # 'bad' is not known and will error X_test = [["bad"]] msg = r"Found unknown categories \['bad'\] in column 0" with pytest.raises(ValueError, match=msg): ohe.transform(X_test)
Test that different parameters for combining 'a', and 'd' into the infrequent category works as expected.
test_ohe_infrequent_handle_unknown_error
python
scikit-learn/scikit-learn
sklearn/preprocessing/tests/test_encoders.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/tests/test_encoders.py
BSD-3-Clause